日期:2025年4月17日
确保系统包最新并安装必要工具:
sudoapt-get update
sudoapt install -y net-tools iputils-ping iptables parted lrzsz vim axel unzip cmake gcc make build-essential ninja-build为大数据集和模型检查点配置 NVMe SSD 和 LVM:
# 检查磁盘
fdisk -l
lsblk
# 分区 NVMe 磁盘
parted /dev/nvme0n1
# 命令:mklabel gpt, mkpart primary ext4 0% 100%, set 1 lvm on, quit
# 格式化分区
mkfs.ext4 /dev/nvme0n1p1
# 创建 LVM 逻辑卷
sudolvcreate -n backup-lv -l 100%FREE ubuntu-vg
mkfs.ext4 /dev/ubuntu-vg/backup-lv
# 创建挂载点
mkdir/data /backup
# 配置 /etc/fstab
sudovi /etc/fstab
# 添加以下内容(使用 blkid 获取 UUID):
# UUID=<nvme0n1p1-uuid> /data ext4 defaults 0 0
# /dev/ubuntu-vg/backup-lv /backup ext4 defaults 0 0
# 挂载
sudomount -a验证:
df -Thblkidls -larth /data/ /backup/确保网络接口支持高带宽通信:
cd/etc/netplan/
sudovi 00-installer-config.yaml
# 示例配置(根据实际网卡调整):
network:
ethernets:
enp25s0f0:
dhcp4: no
addresses: [10.1.1.10/24]
gateway4: 10.1.1.1
nameservers:
addresses: [8.8.8.8, 8.8.4.4]
version: 2
sudonetplan apply验证:
ip addrethtool enp25s0f0ping 8.8.8.8安装 H100 适配的 NVIDIA 数据中心驱动:
cd/data/install_deb/
sudochmod+x NVIDIA-Linux-x86_64-570.124.06.run
sudo./NVIDIA-Linux-x86_64-570.124.06.run --no-x-check --no-nouveau-check --no-opengl-files验证:
nvidia-smilsmod | grep nvidia版本:
为多 GPU 系统安装 Fabric Manager,支持 NVLink 和高效通信:
cd/data/install_deb/
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/nvidia-fabricmanager-570_570.124.06-1_amd64.deb
sudoapt-get install ./nvidia-fabricmanager-570_570.124.06-1_amd64.deb
systemctlenablenvidia-fabricmanager
systemctl restart nvidia-fabricmanager验证:
systemctl status nvidia-fabricmanagernvidia-smi -q | grep -i -A 2 Fabric版本:
安装 CUDA 12.4,支持 PyTorch 和深度学习任务:
cd/data/install_deb/
wget https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda_12.4.0_550.54.14_linux.run
sudo./cuda_12.4.0_550.54.14_linux.run --no-x-check --no-nouveau-check --no-opengl-files
# 配置环境变量
echo'export CUDA_HOME=/usr/local/cuda-12.4'>> ~/.bashrc
echo'export PATH=$CUDA_HOME/bin
PATH'>> ~/.bashrc
echo'export LD_LIBRARY_PATH=$CUDA_HOME/lib64
LD_LIBRARY_PATH'>> ~/.bashrc
source~/.bashrc验证:
nvcc --versionnvidia-smi版本:
安装 cuDNN,提升深度学习性能:
cd/data/install_deb/
wget https://developer.download.nvidia.com/compute/cudnn/9.0.0/local_installers/cudnn-local-repo-ubuntu2204-9.0.0_1.0-1_amd64.deb
sudodpkg -i cudnn-local-repo-ubuntu2204-9.0.0_1.0-1_amd64.deb
sudoapt-get update
sudoapt-get install -y libcudnn9-cuda-12 libcudnn9-dev-cuda-12验证:
cat /usr/include/cudnn_version.h | grep CUDNN_MAJOR -A 2版本:
安装 Anaconda 用于环境隔离:
cd/data/install_deb/
wget https://repo.anaconda.com/archive/Anaconda3-2024.10-1-Linux-x86_64.sh
bash Anaconda3-2024.10-1-Linux-x86_64.sh
echo'export PATH="/root/anaconda3/bin
PATH"'>> ~/.bashrc
source~/.bashrc
conda init验证:
conda --version版本:
创建隔离环境llama_factory:
conda create -n llama_factory python=3.12
conda activate llama_factory验证:
conda info --envspython --version安装 PyTorch 2.5.1,支持 CUDA 12.4:
conda activate llama_factory
pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu124验证:
python-c"importtorch;print(torch.__version__);print(torch.cuda.is_available());print(torch.version.cuda);print(torch.cuda.device_count());print([torch.cuda.get_device_name(i)foriinrange(torch.cuda.device_count())])"
版本:
安装 FlashAttention,提升 Transformer 模型性能:
pipinstallflash_attn-2.7.4.post1+cu12torch2.5cxx11abiFALSE-cp312-cp312-linux_x86_64.whl
验证:
pip show flash-attnpython-c"importflash_attn;print('FlashAttentioninstalledsuccessfully!')"版本:
克隆并安装 LLaMA-Factory:
cd/data
gitclonehttps://github.com/hiyouga/LLaMA-Factory.git
cdLLaMA-Factory
pip install llamafactory==0.9.0 -i https://repo.huaweicloud.com/repository/pypi/simple验证:
llamafactory-cli --version版本:
准备数据集和预训练模型:
cd/data
tar xvf checkpoint-214971.tar
tar xvf Qwen2___5-7B-Instruct.tar
mvQwen2___5-7B-Instruct qwen25_7BI
# 移动数据集
cd/data/SKData
mvdata/*.jsonl ./
# 配置 dataset_info.json
cd/data/LLaMA-Factory/data
vim dataset_info.json
# 示例配置:
{
"v5": {
"file_name":"/data/SKData/new_step_data_20250317_train_ocv.jsonl",
"columns": {
"prompt":"prompt",
"response":"response"
}
},
"ddz_dataset": {
"file_name":"/data/ddz_dataset/ai_data_training_v1.0.json",
"columns": {
"prompt":"prompt",
"response":"response"
}
}
}将 Hugging Face 模型转换为 GGUF 格式,用于llama.cpp推理:
cd/data/llama.cpp
python convert_hf_to_gguf.py /data/checkpoint-214971 --outfile /data/qwen2-model.gguf验证:
ls -larth /data/qwen2-model.gguf使用 DeepSpeed 和多 GPU 进行模型微调:
conda activate llama_factory
FORCE_TORCHRUN=1 DISABLE_VERSION_CHECK=1 CUDA_VISIBLE_DEVICES=0,1 llamafactory-cli train examples/qwen2_7b_freeze_sft_ddz_v1.yaml配置文件示例(qwen2_7b_freeze_sft_ddz_v1.yaml):
model_name_or_path:/data/qwen25_7BI
dataset:v5,ddz_dataset
template:qwen
finetuning_type:freeze
use_deepspeed:true
deepspeed:ds_configs/stage3.json
per_device_train_batch_size:4
gradient_accumulation_steps:8
learning_rate:5e-5
num_train_epochs:3
output_dir:/data/checkpoint验证:
nvitop或nvidia-smitail -f /data/checkpoint/train.log启动 API 服务进行推理:
conda activate llama_factory
API_PORT=6000 CUDA_VISIBLE_DEVICES=4,5 llamafactory-cli api examples/test_7b_dcot.yaml配置文件示例(test_7b_dcot.yaml):
model_name_or_path:/data/checkpoint-214971
template:qwen
infer_backend:vllm
vllm_args:
gpu_memory_utilization:0.9
max_model_len:4096用于模型转换和轻量推理:
cd/data
gitclonehttps://github.com/ggerganov/llama.cpp
cdllama.cpp
mkdirbuild &&cdbuild
cmake .. -DGGML_CUDA=ON -DCMAKE_CUDA_ARCHITECTURES=90
cmake --build . --config Release验证:
./build/bin/llama-cli-m/data/qwen2-model.gguf--prompt"WhatisthecapitalofFrance?"-n256-t8--gpu-layers28-c4096
用于 GPU 监控:
pipinstallnvitop
验证:
nvitop版本:
以下是.bash_history中有效的关键安装步骤,经过筛选和整理:
NVIDIA-Linux-x86_64-570.124.06.run):sudo ./NVIDIA-Linux-x86_64-570.124.06.run --no-x-check --no-nouveau-check --no-opengl-filesnvidia-smi显示 8 张 H100 GPU。nvidia-fabricmanager-570_570.124.06-1_amd64.deb):sudo apt-get install ./nvidia-fabricmanager-570_570.124.06-1_amd64.debsystemctl status nvidia-fabricmanager确认服务运行正常,未进行安装可导致错误,详见 10.1。cuda_12.4.0_550.54.14_linux.run):sudo ./cuda_12.4.0_550.54.14_linux.run --no-x-check --no-nouveau-check --no-opengl-filesnvcc --version显示 12.4.0。cudnn-local-repo-ubuntu2204-9.0.0_1.0-1_amd64.deb):sudo dpkg -i cudnn-local-repo-ubuntu2204-9.0.0_1.0-1_amd64.deb && sudo apt-get install -y libcudnn9-cuda-12 libcudnn9-dev-cuda-12cat /usr/include/cudnn_version.h确认版本。flash_attn-2.7.4.post1+cu12torch2.5cxx11abiFALSE-cp312-cp312-linux_x86_64.whl):pip install flash_attn-2.7.4.post1+cu12torch2.5cxx11abiFALSE-cp312-cp312-linux_x86_64.whlninja-build确保依赖完整,pip show flash-attn确认安装。pip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu124python -c "import torch; print(torch.cuda.is_available())"确认 GPU 可用。pip install llamafactory==0.9.0 transformers==4.46.1 accelerate==0.34.2 deepspeed==0.15.4 vllm==0.8.2 -i https://repo.huaweicloud.com/repository/pypi/simpleDISABLE_VERSION_CHECK=1解决版本冲突,vllm提供高效推理。nvidia-driver-550、nvidia-driver-535等,最终使用570.124.06,早期版本无效。conda-forge、pytorch),但 PyTorch 最终通过 pip 安装,Conda 渠道配置影响有限。flash-attn(git clone和python setup.py install),因依赖复杂失败,改为预编译 wheel。conda init llamafactory(无效,正确为conda init bash)、多次ls -larth和ps auxf用于调试,文档中已精简。lsmod | grep nvidiasudo apt purge nvidia*echo $CUDA_HOME $LD_LIBRARY_PATHpython -c "import torch; print(torch.cuda.is_available())"ninja已安装:ninja --versionDISABLE_VERSION_CHECK=1绕过检查。transformers==4.46.1。use_beam_search参数,必要时修改/root/anaconda3/envs/llama_factory/lib/python3.12/site-packages/llamafactory/chat/vllm_engine.py。CUDA initialization: Unexpected error from cudaGetDeviceCount(). Did you run some cuda functions before calling NumCudaDevices() that might have already set an error? Error 802: system not yet initialized(2025-04-15 17:45:57, 13:10:42)。 systemctl unmask nvidia-fabricmanager.service
sudorm-f /lib/systemd/system/nvidia-fabricmanager.service
sudorm-f /etc/systemd/system/nvidia-fabricmanager.service
sudoapt-get remove nvidia-fabricmanager*
sudoapt-get install ./nvidia-fabricmanager-570_570.124.06-1_amd64.deb
sudosystemctlenablenvidia-fabricmanager
sudosystemctl restart nvidia-fabricmanagerpython -c"import torch; print(torch.cuda.is_available()); print(torch.cuda.device_count())"
nvidia-smi| 欢迎光临 链载Ai (https://www.lianzai.com/) | Powered by Discuz! X3.5 |