convert_gemma2.go 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243
  1. package convert
  2. import (
  3. "github.com/ollama/ollama/llm"
  4. )
  5. type gemma2Model struct {
  6. gemmaModel
  7. SlidingWindow uint32 `json:"sliding_window"`
  8. AttentionLogitSoftcap float32 `json:"attn_logit_softcapping"`
  9. FinalLogitSoftcap float32 `json:"final_logit_softcapping"`
  10. }
  11. func (p *gemma2Model) KV(t *Tokenizer) llm.KV {
  12. kv := p.ModelParameters.KV(t)
  13. kv["general.architecture"] = "gemma2"
  14. kv["gemma2.context_length"] = p.MaxPositionEmbeddings
  15. kv["gemma2.embedding_length"] = p.HiddenSize
  16. kv["gemma2.block_count"] = p.HiddenLayers
  17. kv["gemma2.feed_forward_length"] = p.IntermediateSize
  18. kv["gemma2.attention.head_count"] = p.NumAttentionHeads
  19. kv["gemma2.attention.head_count_kv"] = p.NumKeyValueHeads
  20. kv["gemma2.attention.layer_norm_rms_epsilon"] = p.RMSNormEPS
  21. kv["gemma2.attention.key_length"] = p.HeadDim
  22. kv["gemma2.attention.value_length"] = p.HeadDim
  23. kv["gemma2.attention.sliding_window"] = p.SlidingWindow
  24. kv["gemma2.attn_logit_softcapping"] = p.AttentionLogitSoftcap
  25. kv["gemma2.final_logit_softcapping"] = p.FinalLogitSoftcap
  26. kv["tokenizer.ggml.eot_token_id"] = uint32(107)
  27. kv["tokenizer.ggml.middle_token_id"] = uint32(68)
  28. kv["tokenizer.ggml.prefix_token_id"] = uint32(67)
  29. kv["tokenizer.ggml.suffix_token_id"] = uint32(69)
  30. return kv
  31. }
  32. func (p *gemma2Model) Replacements() []string {
  33. return append(
  34. p.gemmaModel.Replacements(),
  35. "post_attention_layernorm", "post_attention_norm",
  36. "pre_feedforward_layernorm", "ffn_norm",
  37. "post_feedforward_layernorm", "post_ffw_norm",
  38. )
  39. }