process_text_spm.go 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. package model
  2. import (
  3. "iter"
  4. "strings"
  5. "github.com/dlclark/regexp2"
  6. queue "github.com/emirpasic/gods/v2/queues/priorityqueue"
  7. "github.com/ollama/ollama/logging"
  8. )
  9. const spmWhitespaceSep = "▁"
  10. var log = logging.NewLogger()
  11. func replaceWhitespaceBySeperator(s string) string {
  12. return strings.ReplaceAll(s, " ", spmWhitespaceSep)
  13. }
  14. type SentencePieceModel struct {
  15. maxTokenLen int
  16. pre *regexp2.Regexp
  17. vocab *Vocabulary
  18. }
  19. var _ TextProcessor = (*SentencePieceModel)(nil)
  20. func NewSentencePieceModel(pre string, vocab *Vocabulary) SentencePieceModel {
  21. log.Debug("Tokens", "num tokens", len(vocab.Values), "vals", vocab.Values[:5], "scores", vocab.Scores[:5], "types", vocab.Types[:5])
  22. counter := map[int]int{}
  23. var maxTokenLen int
  24. for cnt := range vocab.Types {
  25. switch vocab.Types[cnt] {
  26. case TOKEN_TYPE_NORMAL, TOKEN_TYPE_USER_DEFINED, TOKEN_TYPE_UNUSED:
  27. maxTokenLen = max(maxTokenLen, len(vocab.Values[cnt]))
  28. fallthrough
  29. default:
  30. counter[int(vocab.Types[cnt])] += 1
  31. }
  32. }
  33. log.Debug("Token counts", "normal", counter[TOKEN_TYPE_NORMAL], "unknown", counter[TOKEN_TYPE_UNKNOWN], "control", counter[TOKEN_TYPE_CONTROL],
  34. "user defined", counter[TOKEN_TYPE_USER_DEFINED], "unused", counter[TOKEN_TYPE_UNUSED], "byte", counter[TOKEN_TYPE_BYTE],
  35. "max token len", maxTokenLen)
  36. return SentencePieceModel{
  37. maxTokenLen: maxTokenLen,
  38. pre: regexp2.MustCompile(pre, regexp2.Unicode|regexp2.RE2),
  39. vocab: vocab,
  40. }
  41. }
  42. func (spm SentencePieceModel) Is(id int32, special Special) bool {
  43. return spm.vocab.Is(id, special)
  44. }
  45. func (spm *SentencePieceModel) split(s string) iter.Seq[string] {
  46. return func(yield func(string) bool) {
  47. for m, _ := spm.pre.FindStringMatch(s); m != nil; m, _ = spm.pre.FindNextMatch(m) {
  48. if !yield(m.String()) {
  49. break
  50. }
  51. }
  52. }
  53. }
  54. func (spm SentencePieceModel) Encode(s string, addSpecial bool) ([]int32, error) {
  55. fragments := []fragment{{value: s}}
  56. for _, special := range spm.vocab.SpecialVocabulary() {
  57. // TODO: process special tokens concurrently
  58. id := spm.vocab.Encode(special)
  59. for i := 0; i < len(fragments); i++ {
  60. frag := fragments[i]
  61. if len(frag.ids) > 0 {
  62. continue
  63. }
  64. var middle []fragment
  65. switch i := strings.Index(frag.value, special); {
  66. case i < 0:
  67. middle = append(middle, frag)
  68. case i > 0:
  69. middle = append(middle, fragment{value: frag.value[:i]})
  70. fallthrough
  71. default:
  72. middle = append(middle, fragment{value: special, ids: []int32{id}})
  73. if rest := frag.value[i+len(special):]; rest != "" {
  74. middle = append(middle, fragment{value: rest})
  75. }
  76. }
  77. fragments = append(fragments[:i], append(middle, fragments[i+1:]...)...)
  78. }
  79. }
  80. log.Trace("fragments", "frags", fragments)
  81. var ids []int32
  82. for _, frag := range fragments {
  83. if len(frag.ids) > 0 {
  84. ids = append(ids, frag.ids...)
  85. continue
  86. }
  87. for split := range spm.split(frag.value) {
  88. split = replaceWhitespaceBySeperator(split)
  89. var sb strings.Builder
  90. sb.Write([]byte(split))
  91. if id := spm.vocab.Encode(sb.String()); id >= 0 {
  92. ids = append(ids, id)
  93. continue
  94. }
  95. runes := []rune(sb.String())
  96. pq := queue.NewWith(func(a, b any) int {
  97. priA := a.(*candidate)
  98. priB := b.(*candidate)
  99. if priA.score > priB.score || (priA.score == priB.score && priA.a < priB.a) {
  100. return -1
  101. }
  102. return 1
  103. })
  104. merges := make([]merge, len(runes))
  105. for r := range runes {
  106. merges[r] = merge{
  107. p: r - 1,
  108. n: r + 1,
  109. runes: []rune{runes[r]},
  110. }
  111. }
  112. log.Trace("tokenizer", "merges", merges)
  113. pairwise := func(a, b int) *candidate {
  114. if a < 0 || b >= len(runes) {
  115. return nil
  116. }
  117. left, right := string(merges[a].runes), string(merges[b].runes)
  118. if id := spm.vocab.Encode(left + right); id >= 0 {
  119. return &candidate{
  120. a: a,
  121. b: b,
  122. score: spm.vocab.Scores[id],
  123. }
  124. }
  125. return nil
  126. }
  127. for i := range len(runes) - 1 {
  128. if pair := pairwise(i, i+1); pair != nil {
  129. pq.Enqueue(pair)
  130. }
  131. }
  132. pqv := pq.Values()
  133. for _, v := range pqv {
  134. e := v.(*candidate)
  135. log.Trace("candidate", "candidate", e)
  136. }
  137. for !pq.Empty() {
  138. v, _ := pq.Dequeue()
  139. pair := v.(*candidate)
  140. left, right := merges[pair.a], merges[pair.b]
  141. log.Trace("pair", "left", left, "right", right)
  142. if len(left.runes) == 0 || len(right.runes) == 0 {
  143. continue
  144. }
  145. if id := spm.vocab.Encode(string(left.runes) + string(right.runes)); id < 0 {
  146. continue
  147. }
  148. merges[pair.a].runes = append(left.runes, right.runes...)
  149. merges[pair.b].runes = nil
  150. merges[pair.a].n = right.n
  151. if right.n < len(merges) {
  152. merges[right.n].p = pair.a
  153. }
  154. if pair := pairwise(merges[pair.a].p, pair.a); pair != nil {
  155. pq.Enqueue(pair)
  156. }
  157. if pair := pairwise(pair.a, merges[pair.a].n); pair != nil {
  158. pq.Enqueue(pair)
  159. }
  160. }
  161. log.Trace("merges", "merges", merges)
  162. for _, merge := range merges {
  163. if len(merge.runes) > 0 {
  164. if id := spm.vocab.Encode(string(merge.runes)); id >= 0 {
  165. ids = append(ids, id)
  166. } else {
  167. log.Error("missing token", "token", string(merge.runes))
  168. }
  169. }
  170. }
  171. }
  172. }
  173. if addSpecial && len(ids) > 0 {
  174. if spm.vocab.AddBOS {
  175. if ids[0] == spm.vocab.BOS {
  176. log.Warn("adding bos token to prompt which already has it", "id", spm.vocab.BOS)
  177. }
  178. log.Debug("adding bos token to prompt", "id", spm.vocab.BOS)
  179. ids = append([]int32{spm.vocab.BOS}, ids...)
  180. }
  181. if spm.vocab.AddEOS {
  182. if ids[len(ids)-1] == spm.vocab.EOS {
  183. log.Warn("adding eos token to prompt which already has it", "id", spm.vocab.EOS)
  184. }
  185. log.Debug("adding eos token to prompt", "id", spm.vocab.EOS)
  186. ids = append(ids, spm.vocab.EOS)
  187. }
  188. }
  189. return ids, nil
  190. }
  191. type candidate struct {
  192. a, b int
  193. score float32
  194. }
  195. func (spm SentencePieceModel) Decode(ids []int32) (string, error) {
  196. var sb strings.Builder
  197. for _, id := range ids {
  198. data := spm.vocab.Decode(id)
  199. data = strings.ReplaceAll(data, spmWhitespaceSep, " ")
  200. if _, err := sb.WriteString(data); err != nil {
  201. return "", err
  202. }
  203. }
  204. log.Debug("decoded", "ids", ids, "text", sb.String())
  205. return sb.String(), nil
  206. }