# The LOCAL_HF package preset is copied from this list
# Remember to bump the LOCAL_HF code env preset version when updating this list

vllm==0.9.0.1  # on mac, use arm python

transformers[torch]==4.51.3

# embedding
sentence-transformers==3.0.1
timm==1.0.8

# fine-tuning
peft==0.14.0
trl==0.14.0
datasets==3.2.0 # also pulled by vllm

# transitive dependencies
torch==2.7.0  # on mac, use arm python
tokenizers==0.21.1  # (sentence-transformers, transformers, vllm) version pinned by vllm, so we pin it here to make sure we use the same version with/without vllm
pillow>=10.3,<11  # (sentence-transformers, timm, vllm) security issue on pillow 10.2
scipy>=1.10,<1.15  # (sentence-transformers) security issue with <1.10
cachetools==6.0.0 # unpinned dependency of vLLM that introduced a breaking change in 6.0.0

# runtime requirements
hf-transfer==0.1.9
accelerate==0.34.2  # multi-gpu support with transformers
bitsandbytes==0.46.0; platform_system == "Linux"  # macOS x86_64 binaries are not built anymore for release >=0.43
protobuf==5.29.5  # required for fine-tuning mistral/llama models on ARM (also pulled by vllm)
sentencepiece==0.2.0
tiktoken==0.7.0
einops==0.8.0

# image generation requirements
diffusers==0.30.1
compel==2.0.3 # used for prompt weighting

# analytics
psutil==7.0.0
py-cpuinfo==9.0.0
