convert_qwen2.go 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. package convert
  2. import "github.com/ollama/ollama/fs/ggml"
  3. type qwen2Model struct {
  4. ModelParameters
  5. MaxPositionEmbeddings uint32 `json:"max_position_embeddings"`
  6. HiddenSize uint32 `json:"hidden_size"`
  7. HiddenLayers uint32 `json:"num_hidden_layers"`
  8. IntermediateSize uint32 `json:"intermediate_size"`
  9. NumAttentionHeads uint32 `json:"num_attention_heads"`
  10. NumKeyValueHeads uint32 `json:"num_key_value_heads"`
  11. RopeTheta float32 `json:"rope_theta"`
  12. RopeScaling struct {
  13. Type string `json:"type"`
  14. Factor ropeFactor `json:"factor"`
  15. OriginalMaxPositionEmbeddings uint32 `json:"original_max_position_embeddings"`
  16. } `json:"rope_scaling"`
  17. RMSNormEPS float32 `json:"rms_norm_eps"`
  18. }
  19. var _ ModelConverter = (*qwen2Model)(nil)
  20. func (q *qwen2Model) KV(t *Tokenizer) ggml.KV {
  21. kv := q.ModelParameters.KV(t)
  22. kv["general.architecture"] = "qwen2"
  23. kv["qwen2.block_count"] = q.HiddenLayers
  24. kv["qwen2.context_length"] = q.MaxPositionEmbeddings
  25. kv["qwen2.embedding_length"] = q.HiddenSize
  26. kv["qwen2.feed_forward_length"] = q.IntermediateSize
  27. kv["qwen2.attention.head_count"] = q.NumAttentionHeads
  28. kv["qwen2.attention.head_count_kv"] = q.NumKeyValueHeads
  29. kv["qwen2.rope.freq_base"] = q.RopeTheta
  30. kv["qwen2.attention.layer_norm_rms_epsilon"] = q.RMSNormEPS
  31. switch q.RopeScaling.Type {
  32. case "":
  33. // no scaling
  34. case "yarn":
  35. kv["qwen2.rope.scaling.type"] = q.RopeScaling.Type
  36. kv["qwen2.rope.scaling.factor"] = q.RopeScaling.Factor
  37. default:
  38. panic("unknown rope scaling type")
  39. }
  40. return kv
  41. }
  42. func (q *qwen2Model) Tensors(ts []Tensor) []ggml.Tensor {
  43. var out []ggml.Tensor
  44. for _, t := range ts {
  45. out = append(out, ggml.Tensor{
  46. Name: t.Name(),
  47. Kind: t.Kind(),
  48. Shape: t.Shape(),
  49. WriterTo: t,
  50. })
  51. }
  52. return out
  53. }
  54. func (p *qwen2Model) Replacements() []string {
  55. return []string{
  56. "lm_head", "output",
  57. "model.embed_tokens", "token_embd",
  58. "model.layers", "blk",
  59. "input_layernorm", "attn_norm",
  60. "self_attn.k_proj", "attn_k",
  61. "self_attn.v_proj", "attn_v",
  62. "self_attn.q_proj", "attn_q",
  63. "self_attn.o_proj", "attn_output",
  64. "mlp.down_proj", "ffn_down",
  65. "mlp.gate_proj", "ffn_gate",
  66. "mlp.up_proj", "ffn_up",
  67. "post_attention_layernorm", "ffn_norm",
  68. "model.norm", "output_norm",
  69. }
  70. }