Created
February 8, 2025 08:26
-
-
Save luuil/0a8a95ba7c3e425b688a50436a138b85 to your computer and use it in GitHub Desktop.
Used with the `ollama-model-direct-download` software, you can download ollama models with one click.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
Manually download the ollama model | |
1. You need a software (Linux only): https://github.com/amirrezaDev1378/ollama-model-direct-download | |
2. Use the software to get the download links of the model, it will output 6 links. (If the file downloaded from the release page is not available, you need to install the Go environment and compile it yourself) | |
3. You can manually download them, or use this script to download all at once. | |
""" | |
import subprocess | |
import re | |
import json | |
import requests | |
import os | |
import argparse | |
from tqdm import tqdm | |
def run_command(command): | |
result = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) | |
if result.returncode != 0: | |
raise RuntimeError(f"Command execution failed: {result.stderr}") | |
return result.stdout | |
def extract_links(output): | |
pattern = r'(?:Manifest download link:|^\d+-)\s*(https?://\S+)' | |
matches = re.findall(pattern, output, flags=re.MULTILINE) | |
if not matches: | |
raise ValueError("Download links not found") | |
result = {"manifest": matches[0]} | |
for i, url in enumerate(matches[1:], start=1): | |
result[str(i)] = url | |
return result | |
def download_files(links, output_dir="downloads"): | |
os.makedirs(output_dir, exist_ok=True) | |
for name, url in links.items(): | |
try: | |
response = requests.get(url, stream=True) | |
response.raise_for_status() | |
# Replace colons with hyphens in the filename | |
filename_part = url.split("/")[-1].replace(":", "-") | |
filename = os.path.join(output_dir, filename_part) | |
# Get total file size | |
total_size = int(response.headers.get('content-length', 0)) | |
# Initialize progress bar | |
with tqdm( | |
total=total_size, | |
unit='B', | |
unit_scale=True, | |
desc=filename, | |
bar_format='{l_bar}{bar} | {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]' | |
) as pbar: | |
with open(filename, "wb") as f: | |
for chunk in response.iter_content(chunk_size=8192): | |
f.write(chunk) | |
pbar.update(len(chunk)) | |
print(f"\nDownloaded: {filename}") | |
except Exception as e: | |
print(f"\nDownload failed {url}: {str(e)}") | |
def main(): | |
parser = argparse.ArgumentParser(description='Model downloading tool') | |
parser.add_argument('-m', '--model_name', type=str, required=True, help='Name of the model to download') | |
parser.add_argument('-p', '--path', type=str, default='downloads', help='Storage path') | |
args = parser.parse_args() | |
try: | |
# Run the command and extract links | |
cmd = f'./omdd-linux-amd64 get {args.model_name}' | |
output = run_command(cmd) | |
print(output) | |
links = extract_links(output) | |
print(json.dumps(links, indent=4)) | |
# Download files to the specified path | |
download_files(links, args.path) | |
print(f"Files saved at: {os.path.abspath(args.path)}") | |
except Exception as e: | |
print(f"Program execution error: {str(e)}") | |
if __name__ == "__main__": | |
main() | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment