ggml.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556
  1. package ggml
  2. import (
  3. "encoding/binary"
  4. "errors"
  5. "fmt"
  6. "io"
  7. "log/slog"
  8. "slices"
  9. "strings"
  10. "github.com/ollama/ollama/fs/util/bufioutil"
  11. )
  12. type GGML struct {
  13. container
  14. model
  15. }
  16. type model interface {
  17. KV() KV
  18. Tensors() Tensors
  19. }
  20. type KV map[string]any
  21. func (kv KV) Architecture() string {
  22. return kv.String("general.architecture", "unknown")
  23. }
  24. func (kv KV) Kind() string {
  25. return kv.String("general.kind", "unknown")
  26. }
  27. func (kv KV) ParameterCount() uint64 {
  28. return keyValue[uint64](kv, "general.parameter_count")
  29. }
  30. func (kv KV) FileType() fileType {
  31. if t := kv.Uint("general.file_type"); t > 0 {
  32. return fileType(t)
  33. }
  34. return fileTypeUnknown
  35. }
  36. func (kv KV) BlockCount() uint64 {
  37. return uint64(kv.Uint("block_count"))
  38. }
  39. func (kv KV) EmbeddingLength() uint64 {
  40. return uint64(kv.Uint("embedding_length"))
  41. }
  42. func (kv KV) HeadCount() uint64 {
  43. return uint64(kv.Uint("attention.head_count"))
  44. }
  45. func (kv KV) HeadCountKV() uint64 {
  46. return uint64(kv.Uint("attention.head_count_kv", 1))
  47. }
  48. func (kv KV) EmbeddingHeadCount() uint64 {
  49. if heads := kv.HeadCount(); heads > 0 {
  50. return kv.EmbeddingLength() / heads
  51. }
  52. return 0
  53. }
  54. func (kv KV) EmbeddingHeadCountK() uint64 {
  55. return uint64(kv.Uint("attention.key_length", uint32(kv.EmbeddingHeadCount())))
  56. }
  57. func (kv KV) EmbeddingHeadCountV() uint64 {
  58. return uint64(kv.Uint("attention.value_length", uint32(kv.EmbeddingHeadCount())))
  59. }
  60. func (kv KV) GQA() uint64 {
  61. return kv.HeadCount() / kv.HeadCountKV()
  62. }
  63. func (kv KV) ContextLength() uint64 {
  64. return uint64(kv.Uint("context_length"))
  65. }
  66. func (kv KV) ChatTemplate() string {
  67. return kv.String("tokenizer.chat_template")
  68. }
  69. func (kv KV) String(key string, defaultValue ...string) string {
  70. return keyValue(kv, key, append(defaultValue, "")...)
  71. }
  72. func (kv KV) Uint(key string, defaultValue ...uint32) uint32 {
  73. return keyValue(kv, key, append(defaultValue, 0)...)
  74. }
  75. func (kv KV) Float(key string, defaultValue ...float32) float32 {
  76. return keyValue(kv, key, append(defaultValue, 0)...)
  77. }
  78. func (kv KV) Strings(key string, defaultValue ...[]string) []string {
  79. r := keyValue(kv, key, &array{})
  80. s := make([]string, r.size)
  81. for i := range r.size {
  82. s[i] = r.values[i].(string)
  83. }
  84. return s
  85. }
  86. func (kv KV) Uints(key string, defaultValue ...[]uint32) []uint32 {
  87. r := keyValue(kv, key, &array{})
  88. s := make([]uint32, r.size)
  89. for i := range r.size {
  90. s[i] = uint32(r.values[i].(int32))
  91. }
  92. return s
  93. }
  94. func keyValue[T string | uint32 | uint64 | float32 | *array](kv KV, key string, defaultValue ...T) T {
  95. if !strings.HasPrefix(key, "tokenizer.") && !strings.HasPrefix(key, "general.") {
  96. key = kv.Architecture() + "." + key
  97. }
  98. if val, ok := kv[key]; ok {
  99. return val.(T)
  100. }
  101. slog.Warn("key not found", "key", key, "default", defaultValue[0])
  102. return defaultValue[0]
  103. }
  104. type Tensors struct {
  105. Items []*Tensor
  106. Offset uint64
  107. }
  108. func (ts Tensors) Layers() map[string]Layer {
  109. layers := make(map[string]Layer)
  110. for _, t := range ts.Items {
  111. parts := strings.Split(t.Name, ".")
  112. if parts[0] == "blk" {
  113. // join first and second part, e.g. blk.%d
  114. parts = append([]string{fmt.Sprintf("%s.%s", parts[0], parts[1])}, parts[2:]...)
  115. }
  116. if _, ok := layers[parts[0]]; !ok {
  117. layers[parts[0]] = make(Layer)
  118. }
  119. layers[parts[0]][strings.Join(parts[1:], ".")] = t
  120. }
  121. return layers
  122. }
  123. type Layer map[string]*Tensor
  124. func (l Layer) Size() (size uint64) {
  125. for _, t := range l {
  126. size += t.Size()
  127. }
  128. return size
  129. }
  130. type Tensor struct {
  131. Name string `json:"name"`
  132. Kind uint32 `json:"kind"`
  133. Offset uint64 `json:"-"`
  134. // Shape is the number of elements in each dimension
  135. Shape []uint64 `json:"shape"`
  136. io.WriterTo `json:"-"`
  137. }
  138. func (t Tensor) block() (n int) {
  139. if _, err := fmt.Sscanf(t.Name, "blk.%d.", &n); err != nil {
  140. return -1
  141. }
  142. return
  143. }
  144. func (t Tensor) blockSize() uint64 {
  145. switch t.Kind {
  146. case 0, 1, 24, 25, 26, 27, 28, 30: // F32, F16, I8, I16, I32, I64, F64, BF16
  147. return 1
  148. case 2, 3, 4, 5, 6, 7, 8, 9, 20: // Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, Q8_1, IQ4_NL
  149. return 32
  150. default: // All others
  151. return 256
  152. }
  153. }
  154. func (t Tensor) typeSize() uint64 {
  155. blockSize := t.blockSize()
  156. switch t.Kind {
  157. case 0: // FP32
  158. return 4
  159. case 1: // FP16
  160. return 2
  161. case 2: // Q4_0
  162. return 2 + blockSize/2
  163. case 3: // Q4_1
  164. return 2 + 2 + blockSize/2
  165. case 6: // Q5_0
  166. return 2 + 4 + blockSize/2
  167. case 7: // Q5_1
  168. return 2 + 2 + 4 + blockSize/2
  169. case 8: // Q8_0
  170. return 2 + blockSize
  171. case 9: // Q8_1
  172. return 4 + 4 + blockSize
  173. case 10: // Q2_K
  174. return blockSize/16 + blockSize/4 + 2 + 2
  175. case 11: // Q3_K
  176. return blockSize/8 + blockSize/4 + 12 + 2
  177. case 12: // Q4_K
  178. return 2 + 2 + 12 + blockSize/2
  179. case 13: // Q5_K
  180. return 2 + 2 + 12 + blockSize/8 + blockSize/2
  181. case 14: // Q6_K
  182. return blockSize/2 + blockSize/4 + blockSize/16 + 2
  183. case 15: // Q8_K
  184. return 2 + blockSize + 2*blockSize/16
  185. case 16: // IQ2_XXS
  186. return 2 + 2*blockSize/8
  187. case 17: // IQ2_XS
  188. return 2 + 2*blockSize/8 + blockSize/32
  189. case 18: // IQ3_XXS
  190. return 2 + blockSize/4 + blockSize/8
  191. case 19: // IQ1_S
  192. return 2 + blockSize/8 + blockSize/16
  193. case 20: // IQ4_NL
  194. return 2 + blockSize/2
  195. case 21: // IQ3_S
  196. return 2 + blockSize/4 + blockSize/8 + blockSize/32 + 4
  197. case 22: // IQ2_S
  198. return 2 + blockSize/4 + blockSize/16
  199. case 23: // IQ4_XS
  200. return 2 + 2 + blockSize/2 + blockSize/64
  201. case 24: // I8
  202. return 1
  203. case 25: // I16
  204. return 2
  205. case 26: // I32
  206. return 4
  207. case 27: // I64
  208. return 8
  209. case 28: // F64
  210. return 8
  211. case 29: // IQ1_M
  212. return blockSize/8 + blockSize/16 + blockSize/32
  213. default:
  214. return 0
  215. }
  216. }
  217. func (t Tensor) parameters() uint64 {
  218. var count uint64 = 1
  219. for _, n := range t.Shape {
  220. count *= n
  221. }
  222. return count
  223. }
  224. func (t Tensor) Size() uint64 {
  225. return t.parameters() * t.typeSize() / t.blockSize()
  226. }
  227. type container interface {
  228. Name() string
  229. Decode(io.ReadSeeker) (model, error)
  230. }
  231. const (
  232. // Magic constant for `ggml` files (unversioned).
  233. FILE_MAGIC_GGML = 0x67676d6c
  234. // Magic constant for `ggml` files (versioned, ggmf).
  235. FILE_MAGIC_GGMF = 0x67676d66
  236. // Magic constant for `ggml` files (versioned, ggjt).
  237. FILE_MAGIC_GGJT = 0x67676a74
  238. // Magic constant for `ggla` files (LoRA adapter).
  239. FILE_MAGIC_GGLA = 0x67676C61
  240. // Magic constant for `gguf` files (versioned, gguf)
  241. FILE_MAGIC_GGUF_LE = 0x46554747
  242. FILE_MAGIC_GGUF_BE = 0x47475546
  243. )
  244. var ErrUnsupportedFormat = errors.New("unsupported model format")
  245. func DetectContentType(b []byte) string {
  246. switch binary.LittleEndian.Uint32(b[:4]) {
  247. case FILE_MAGIC_GGML:
  248. return "ggml"
  249. case FILE_MAGIC_GGMF:
  250. return "ggmf"
  251. case FILE_MAGIC_GGJT:
  252. return "ggjt"
  253. case FILE_MAGIC_GGLA:
  254. return "ggla"
  255. case FILE_MAGIC_GGUF_LE, FILE_MAGIC_GGUF_BE:
  256. return "gguf"
  257. default:
  258. return ""
  259. }
  260. }
  261. // Decode decodes a GGML model from the given reader.
  262. //
  263. // It collects array values for arrays with a size less than or equal to
  264. // maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
  265. // the maxArraySize is negative, all arrays are collected.
  266. func Decode(rs io.ReadSeeker, maxArraySize int) (*GGML, int64, error) {
  267. if maxArraySize == 0 {
  268. maxArraySize = 1024
  269. }
  270. rs = bufioutil.NewBufferedSeeker(rs, 32<<10)
  271. var magic uint32
  272. if err := binary.Read(rs, binary.LittleEndian, &magic); err != nil {
  273. return nil, 0, err
  274. }
  275. var c container
  276. switch magic {
  277. case FILE_MAGIC_GGUF_LE:
  278. c = &containerGGUF{ByteOrder: binary.LittleEndian, maxArraySize: maxArraySize}
  279. case FILE_MAGIC_GGUF_BE:
  280. c = &containerGGUF{ByteOrder: binary.BigEndian, maxArraySize: maxArraySize}
  281. default:
  282. return nil, 0, errors.New("invalid file magic")
  283. }
  284. model, err := c.Decode(rs)
  285. if err != nil {
  286. return nil, 0, err
  287. }
  288. offset, err := rs.Seek(0, io.SeekCurrent)
  289. if err != nil {
  290. return nil, 0, err
  291. }
  292. // final model type
  293. return &GGML{
  294. container: c,
  295. model: model,
  296. }, offset, nil
  297. }
  298. func (llm GGML) GraphSize(context, batch uint64, kvCacheType string) (kv, partialOffload, fullOffload uint64) {
  299. embedding := llm.KV().EmbeddingLength()
  300. heads := llm.KV().HeadCount()
  301. headsKV := llm.KV().HeadCountKV()
  302. vocab := uint64(llm.KV()["tokenizer.ggml.tokens"].(*array).size)
  303. embeddingHeads := llm.KV().EmbeddingHeadCount()
  304. embeddingHeadsK := llm.KV().EmbeddingHeadCountK()
  305. embeddingHeadsV := llm.KV().EmbeddingHeadCountV()
  306. layers := llm.Tensors().Layers()
  307. bytesPerElement := kvCacheBytesPerElement(kvCacheType)
  308. kv = uint64(float64(context*llm.KV().BlockCount()*(embeddingHeadsK+embeddingHeadsV)*headsKV) * bytesPerElement)
  309. switch llm.KV().Architecture() {
  310. case "llama":
  311. fullOffload = max(
  312. 4*batch*(1+4*embedding+context*(1+heads)),
  313. 4*batch*(embedding+vocab),
  314. )
  315. partialOffload = 4 * batch * embedding
  316. partialOffload += max(
  317. 4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*heads+embeddingHeads*headsKV),
  318. 4*batch*(embedding+vocab)+embedding*vocab*105/128,
  319. )
  320. if ffnGateExpsWeight, ok := layers["blk.0"]["ffn_gate_exps.weight"]; ok {
  321. // mixtral 8x22b
  322. ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32))
  323. partialOffload = max(
  324. 3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embeddingHeads*headsKV),
  325. 4*(context*batch*heads+context*embeddingHeads*headsKV+batch*1024+embeddingHeads*headsKV*batch),
  326. )
  327. } else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok {
  328. // mixtral 8x7b
  329. ffnGateWeight1 := ffnGateWeight.Shape[1]
  330. fullOffload = 4 * batch * (2 + 3*embedding + context*(1+heads) + 2*headsKV + ffnGateWeight1)
  331. partialOffload = max(
  332. 4*batch*(3+embeddingHeads*headsKV+embedding+context*(1+heads)+ffnGateWeight1)+(embedding*embedding+3*embedding*headsKV*ffnGateWeight1)*9/16,
  333. 4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16),
  334. )
  335. }
  336. case "mllama":
  337. var visionTokens, tiles uint64 = 1601, 4
  338. if crossAttentionLayers, ok := llm.KV()["mllama.attention.cross_attention_layers"].(*array); ok {
  339. kv = headsKV *
  340. (embeddingHeadsK + embeddingHeadsV) * // one for K, one for V
  341. (2* // sizeof(float16)
  342. (llm.KV().BlockCount()-uint64(crossAttentionLayers.size))* // num non-cross attention layers
  343. context +
  344. 4* // sizeof(float32)
  345. uint64(crossAttentionLayers.size)* // num cross attention layers
  346. visionTokens*
  347. tiles)
  348. }
  349. fullOffload = max(
  350. 4*batch*(2+3*embedding+embeddingHeadsK*heads+context*(1+heads)),
  351. // vocab graph
  352. 4*batch*(embedding+vocab),
  353. )
  354. var ropeFreqsCount uint64
  355. if ropeFreqs, ok := llm.Tensors().Layers()["rope_freqs"]; ok {
  356. if ropeFreqsWeights, ok := ropeFreqs["weights"]; ok {
  357. ropeFreqsCount = ropeFreqsWeights.parameters()
  358. }
  359. }
  360. partialOffload = max(
  361. 4*(batch*
  362. (2*embedding+1+context*(1+heads)+embeddingHeadsK*heads)+
  363. ropeFreqsCount+
  364. embeddingHeadsK*context*headsKV),
  365. // vocab graph
  366. 4*batch*(embedding+vocab)+embedding*vocab*105/128,
  367. )
  368. case "gemma", "gemma2":
  369. fullOffload = max(
  370. 4*batch*(embedding+vocab),
  371. 4*batch*(2+context+context*heads+2*embedding+2*embeddingHeadsK*heads),
  372. )
  373. partialOffload = max(
  374. 4*embedding*batch+embedding*vocab*105/128+4*vocab*batch,
  375. 4*batch*(2*embedding+1+2*embeddingHeadsK*heads+context+context*heads)+
  376. 4*embeddingHeadsK*context*8+
  377. embedding*embeddingHeadsK*heads*9/16,
  378. )
  379. case "command-r":
  380. fullOffload = max(
  381. 4*batch*(embedding+vocab),
  382. 4*batch*(2+4*embedding+context*(1+heads)),
  383. )
  384. partialOffload = max(
  385. 4*batch*(embedding+vocab)+embedding*vocab*105/128,
  386. 4*batch*(1+2*embedding+context*(1+heads))+4*embedding*context+embedding*embedding*9/16,
  387. )
  388. case "qwen2":
  389. fullOffload = max(
  390. 4*batch*(embedding+vocab),
  391. 4*batch*(1+2*embedding+context+context*heads),
  392. )
  393. partialOffload = max(
  394. 4*batch*(embedding+vocab)+embedding*vocab*105/128,
  395. 4*(batch*(1+2*embedding+context*(1+heads))+embedding*(1+context)),
  396. )
  397. case "phi2":
  398. fullOffload = max(
  399. 4*batch*(embedding+vocab),
  400. 4*batch*(1+4*embedding+context+context*heads),
  401. )
  402. partialOffload = max(
  403. 4*batch*(2*embedding+vocab)+embedding*vocab*105/128,
  404. 4*batch*(2+3*embedding+context+context*heads),
  405. )
  406. case "stablelm":
  407. fullOffload = 4 * batch * (context*(1+heads) + 3*embedding + 2)
  408. partialOffload = max(
  409. 4*batch*(vocab+2*embedding),
  410. fullOffload,
  411. )
  412. case "deepseek2":
  413. fullOffload = max(
  414. 4*batch*(3*embedding+vocab),
  415. 4*batch*(3*embedding+2+context*(1+headsKV)+2*embeddingHeadsK*headsKV),
  416. )
  417. partialOffload = max(
  418. 4*batch*(3*embedding+vocab)+embedding*vocab*105/128,
  419. 4*batch*(2*embedding+1+2*embeddingHeadsK*headsKV+context+context*headsKV)+4*embeddingHeadsK*context*headsKV+embedding*embeddingHeadsK*headsKV*9/16,
  420. )
  421. case "chatglm":
  422. fullOffload = 4 * batch * (embedding + vocab)
  423. partialOffload = 4*batch*(embedding+vocab) + embedding*vocab*105/128
  424. if qkvBias, ok := layers["blk.0"]["attn_qkv.bias"]; ok {
  425. fullOffload = max(
  426. fullOffload,
  427. 4*batch*(2+
  428. 2*embedding+
  429. context+
  430. context*heads+
  431. embeddingHeadsK*heads+
  432. qkvBias.Shape[0]),
  433. )
  434. partialOffload = max(
  435. partialOffload,
  436. 4*batch*(1+
  437. 2*embedding+
  438. embeddingHeadsK*heads+
  439. context+
  440. context*heads)+
  441. 4*embeddingHeadsK*context+
  442. 4*context*embeddingHeadsK+
  443. 4*qkvBias.Shape[0],
  444. )
  445. }
  446. }
  447. return
  448. }
  449. // SupportsKVCacheType checks if the requested cache type is supported
  450. func (llm GGML) SupportsKVCacheType(cacheType string) bool {
  451. return slices.Contains([]string{"f16", "q8_0", "q4_0"}, cacheType)
  452. }
  453. // SupportsFlashAttention checks if the model supports flash attention
  454. func (llm GGML) SupportsFlashAttention() bool {
  455. _, isEmbedding := llm.KV()[fmt.Sprintf("%s.pooling_type", llm.KV().Architecture())]
  456. if isEmbedding {
  457. return false
  458. }
  459. // Check head counts match and are non-zero
  460. headCountK := llm.KV().EmbeddingHeadCountK()
  461. headCountV := llm.KV().EmbeddingHeadCountV()
  462. return headCountK != 0 && headCountV != 0 && headCountK == headCountV
  463. }
  464. // kvCacheBytesPerElement returns the number of bytes per element for a given KV cache type
  465. func kvCacheBytesPerElement(cacheType string) float64 {
  466. switch cacheType {
  467. case "q8_0":
  468. return 1 // 1/2 of fp16
  469. case "q4_0":
  470. return 0.5 // 1/4 of fp16
  471. default:
  472. return 2 // f16 (default)
  473. }
  474. }