从模型仓库下载大模型

免认证仓库

下载整个仓库

1
2
3
4
5
6
7
8
9
10
11
from huggingface_hub import snapshot_download

repo_id = "Qwen/Qwen-14B-Chat-Int4" # 模型在huggingface上的名称
local_dir = "Qwen/Qwen-14B-Chat-Int4" # 本地模型存储的地址
local_dir_use_symlinks = False # 本地模型使用文件保存,而非blob形式保存

snapshot_download(
repo_id=repo_id,
local_dir=local_dir,
local_dir_use_symlinks=local_dir_use_symlinks
)

下载单个文件

1
2
3
4
5
6
7
8
9
10
11
12
from huggingface_hub import hf_hub_download

repo_id = "bartowski/Qwen2.5-32B-Instruct-GGUF" # 仓库名称
filename = "Qwen2.5-32B-Instruct-Q6_K_L.gguf" # 要下载的文件名
local_dir = "F:/Qwen/Qwen2.5-32B-Instruct-GGUF/" # 本地存储文件的目录

# 下载文件
local_file_path = hf_hub_download(
repo_id=repo_id,
filename=filename,
local_dir=local_dir
)

屏蔽特定文件

1
2
3
4
5
6
7
8
9
10
11
12
from huggingface_hub import snapshot_download

repo_id = "black-forest-labs/FLUX.1-schnell"
local_dir = "D:/black-forest-labs/FLUX.1-schnell"
local_dir_use_symlinks = False

snapshot_download(
repo_id=repo_id,
local_dir=local_dir,
local_dir_use_symlinks=local_dir_use_symlinks,
ignore_patterns=["flux1-schnell.safetensors"], # 忽略特定文件
)

需要认证的仓库

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
from huggingface_hub import snapshot_download

repo_id = "google/gemma-7b" # 模型在huggingface上的名称
local_dir = "google/gemma-7b" # 本地模型存储的地址
local_dir_use_symlinks = False # 本地模型使用文件保存,而非blob形式保存
token = '.' # 在hg账号的access_token里看

# 如果需要代理的话
proxies = {
'http': 'XXXX',
'https': 'XXXX',
}

snapshot_download(
repo_id=repo_id,
local_dir=local_dir,
local_dir_use_symlinks=local_dir_use_symlinks,
token=token,
#proxies=proxies
)