# The LOCAL_HF package preset is copied from this list
# Remember to bump the LOCAL_HF code env preset version when updating this list
# Keep in sync with HuggingFaceKernelClient.VLLM_VERSION static variable
vllm==0.11.2

# Required to avoid a (non-fatal) error when running inference with vLLM
torch-c-dlpack-ext===0.1.3

transformers[torch]==4.57.1

# embedding
sentence-transformers==5.1.0
timm==1.0.19

# fine-tuning
peft==0.17.1
trl==0.21.0
datasets==4.4.1 # also pulled by vllm

# transitive dependencies

# Torch 2.9 comes with CUDA 12.8 support
torch==2.9.0

tokenizers==0.22.1  # (sentence-transformers, transformers, vllm) version pinned by vllm, so we pin it here to make sure we use the same version with/without vllm
pillow==11.3.0
scipy>=1.10,<1.15  # (sentence-transformers) security issue with <1.10
cachetools==6.1.0 # unpinned dependency of vLLM that introduced a breaking change in 6.0.0

# runtime requirements
hf-transfer==0.1.9
accelerate==1.10.0  # multi-gpu support with transformers
bitsandbytes==0.48.2; platform_system == "Linux"  # macOS x86_64 binaries are not built anymore for release >=0.43
protobuf==5.29.5  # required for fine-tuning mistral/llama models on ARM (also pulled by vllm)
sentencepiece==0.2.1
tiktoken==0.12.0
einops==0.8.1
fla-core==0.4.0 # required for Kimi models

# image generation requirements
diffusers==0.35.1
compel==2.1.1 # used for prompt weighting

# analytics
psutil==7.1.3
py-cpuinfo==9.0.0
