litellm_config.yaml 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243
  1. litellm_settings:
  2. drop_params: true
  3. model_list:
  4. - model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.1'
  5. litellm_params:
  6. model: huggingface/mistralai/Mistral-7B-Instruct-v0.1
  7. api_key: os.environ/HF_TOKEN
  8. max_tokens: 1024
  9. - model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.2'
  10. litellm_params:
  11. model: huggingface/mistralai/Mistral-7B-Instruct-v0.2
  12. api_key: os.environ/HF_TOKEN
  13. max_tokens: 1024
  14. - model_name: 'HuggingFace: Meta: Llama 3 8B Instruct'
  15. litellm_params:
  16. model: huggingface/meta-llama/Meta-Llama-3-8B-Instruct
  17. api_key: os.environ/HF_TOKEN
  18. max_tokens: 2047
  19. - model_name: 'HuggingFace: Mistral: Mixtral 8x7B Instruct v0.1'
  20. litellm_params:
  21. model: huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1
  22. api_key: os.environ/HF_TOKEN
  23. max_tokens: 8192
  24. - model_name: 'HuggingFace: Microsoft: Phi-3 Mini-4K-Instruct'
  25. litellm_params:
  26. model: huggingface/microsoft/Phi-3-mini-4k-instruct
  27. api_key: os.environ/HF_TOKEN
  28. max_tokens: 1024
  29. - model_name: 'HuggingFace: Google: Gemma 7B 1.1'
  30. litellm_params:
  31. model: huggingface/google/gemma-1.1-7b-it
  32. api_key: os.environ/HF_TOKEN
  33. max_tokens: 1024
  34. - model_name: 'HuggingFace: Yi-1.5 34B Chat'
  35. litellm_params:
  36. model: huggingface/01-ai/Yi-1.5-34B-Chat
  37. api_key: os.environ/HF_TOKEN
  38. max_tokens: 1024
  39. - model_name: 'HuggingFace: Nous Research: Nous Hermes 2 Mixtral 8x7B DPO'
  40. litellm_params:
  41. model: huggingface/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
  42. api_key: os.environ/HF_TOKEN
  43. max_tokens: 2048