process_text_spm.go 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. package model
  2. import (
  3. "iter"
  4. "log/slog"
  5. "strings"
  6. "github.com/dlclark/regexp2"
  7. queue "github.com/emirpasic/gods/v2/queues/priorityqueue"
  8. )
  9. const spmWhitespaceSep = "▁"
  10. func replaceWhitespaceBySeperator(s string) string {
  11. return strings.ReplaceAll(s, " ", spmWhitespaceSep)
  12. }
  13. type SentencePieceModel struct {
  14. maxTokenLen int
  15. pre *regexp2.Regexp
  16. vocab *Vocabulary
  17. }
  18. var _ TextProcessor = (*SentencePieceModel)(nil)
  19. func NewSentencePieceModel(pre string, vocab *Vocabulary) SentencePieceModel {
  20. slog.Debug("Tokens", "num tokens", len(vocab.Values), "vals", vocab.Values[:5], "scores", vocab.Scores[:5], "types", vocab.Types[:5])
  21. counter := map[int]int{}
  22. var maxTokenLen int
  23. for cnt := range vocab.Types {
  24. switch vocab.Types[cnt] {
  25. case TOKEN_TYPE_NORMAL, TOKEN_TYPE_USER_DEFINED, TOKEN_TYPE_UNUSED:
  26. maxTokenLen = max(maxTokenLen, len(vocab.Values[cnt]))
  27. fallthrough
  28. default:
  29. counter[int(vocab.Types[cnt])] += 1
  30. }
  31. }
  32. slog.Debug("Token counts", "normal", counter[TOKEN_TYPE_NORMAL], "unknown", counter[TOKEN_TYPE_UNKNOWN], "control", counter[TOKEN_TYPE_CONTROL],
  33. "user defined", counter[TOKEN_TYPE_USER_DEFINED], "unused", counter[TOKEN_TYPE_UNUSED], "byte", counter[TOKEN_TYPE_BYTE],
  34. "max token len", maxTokenLen)
  35. return SentencePieceModel{
  36. maxTokenLen: maxTokenLen,
  37. pre: regexp2.MustCompile(pre, regexp2.Unicode|regexp2.RE2),
  38. vocab: vocab,
  39. }
  40. }
  41. func (spm SentencePieceModel) Is(id int32, special Special) bool {
  42. return spm.vocab.Is(id, special)
  43. }
  44. func (spm *SentencePieceModel) split(s string) iter.Seq[string] {
  45. return func(yield func(string) bool) {
  46. for m, _ := spm.pre.FindStringMatch(s); m != nil; m, _ = spm.pre.FindNextMatch(m) {
  47. if !yield(m.String()) {
  48. break
  49. }
  50. }
  51. }
  52. }
  53. func (spm SentencePieceModel) Encode(s string, addSpecial bool) ([]int32, error) {
  54. fragments := []fragment{{value: s}}
  55. for _, special := range spm.vocab.SpecialVocabulary() {
  56. // TODO: process special tokens concurrently
  57. id := spm.vocab.Encode(special)
  58. for i := 0; i < len(fragments); i++ {
  59. frag := fragments[i]
  60. if len(frag.ids) > 0 {
  61. continue
  62. }
  63. var middle []fragment
  64. switch i := strings.Index(frag.value, special); {
  65. case i < 0:
  66. middle = append(middle, frag)
  67. case i > 0:
  68. middle = append(middle, fragment{value: frag.value[:i]})
  69. fallthrough
  70. default:
  71. middle = append(middle, fragment{value: special, ids: []int32{id}})
  72. if rest := frag.value[i+len(special):]; rest != "" {
  73. middle = append(middle, fragment{value: rest})
  74. }
  75. }
  76. fragments = append(fragments[:i], append(middle, fragments[i+1:]...)...)
  77. }
  78. }
  79. slog.Debug("fragments", "frags", fragments)
  80. var ids []int32
  81. for _, frag := range fragments {
  82. if len(frag.ids) > 0 {
  83. ids = append(ids, frag.ids...)
  84. continue
  85. }
  86. for split := range spm.split(frag.value) {
  87. split = replaceWhitespaceBySeperator(split)
  88. var sb strings.Builder
  89. sb.Write([]byte(split))
  90. if id := spm.vocab.Encode(sb.String()); id >= 0 {
  91. ids = append(ids, id)
  92. continue
  93. }
  94. runes := []rune(sb.String())
  95. pq := queue.NewWith(func(a, b any) int {
  96. priA := a.(*candidate)
  97. priB := b.(*candidate)
  98. if priA.score > priB.score || (priA.score == priB.score && priA.a < priB.a) {
  99. return -1
  100. }
  101. return 1
  102. })
  103. merges := make([]merge, len(runes))
  104. for r := range runes {
  105. merges[r] = merge{
  106. p: r - 1,
  107. n: r + 1,
  108. runes: []rune{runes[r]},
  109. }
  110. }
  111. slog.Debug("tokenizer", "merges", merges)
  112. pairwise := func(a, b int) *candidate {
  113. if a < 0 || b >= len(runes) {
  114. return nil
  115. }
  116. left, right := string(merges[a].runes), string(merges[b].runes)
  117. if id := spm.vocab.Encode(left + right); id >= 0 {
  118. return &candidate{
  119. a: a,
  120. b: b,
  121. score: spm.vocab.Scores[id],
  122. }
  123. }
  124. return nil
  125. }
  126. for i := range len(runes) - 1 {
  127. if pair := pairwise(i, i+1); pair != nil {
  128. pq.Enqueue(pair)
  129. }
  130. }
  131. pqv := pq.Values()
  132. for _, v := range pqv {
  133. e := v.(*candidate)
  134. slog.Debug("candidate", "candidate", e)
  135. }
  136. for !pq.Empty() {
  137. v, _ := pq.Dequeue()
  138. pair := v.(*candidate)
  139. left, right := merges[pair.a], merges[pair.b]
  140. slog.Debug("pair", "left", left, "right", right)
  141. if len(left.runes) == 0 || len(right.runes) == 0 {
  142. continue
  143. }
  144. merges[pair.a].runes = append(left.runes, right.runes...)
  145. merges[pair.b].runes = nil
  146. merges[pair.a].n = right.n
  147. if right.n < len(merges) {
  148. merges[right.n].p = pair.a
  149. }
  150. if pair := pairwise(merges[pair.a].p, pair.a); pair != nil {
  151. pq.Enqueue(pair)
  152. }
  153. if pair := pairwise(pair.a, merges[pair.a].n); pair != nil {
  154. pq.Enqueue(pair)
  155. }
  156. }
  157. slog.Debug("merges", "merges", merges)
  158. for _, merge := range merges {
  159. if len(merge.runes) > 0 {
  160. if id := spm.vocab.Encode(string(merge.runes)); id >= 0 {
  161. ids = append(ids, id)
  162. } else {
  163. slog.Debug("missing token", "token", string(merge.runes))
  164. }
  165. }
  166. }
  167. }
  168. }
  169. if addSpecial && len(ids) > 0 {
  170. if spm.vocab.AddBOS {
  171. if ids[0] == spm.vocab.BOS {
  172. slog.Warn("adding bos token to prompt which already has it", "id", spm.vocab.BOS)
  173. }
  174. slog.Debug("adding bos token to prompt", "id", spm.vocab.BOS)
  175. ids = append([]int32{spm.vocab.BOS}, ids...)
  176. }
  177. if spm.vocab.AddEOS {
  178. if ids[len(ids)-1] == spm.vocab.EOS {
  179. slog.Warn("adding eos token to prompt which already has it", "id", spm.vocab.EOS)
  180. }
  181. slog.Debug("adding eos token to prompt", "id", spm.vocab.EOS)
  182. ids = append(ids, spm.vocab.EOS)
  183. }
  184. }
  185. return ids, nil
  186. }
  187. type candidate struct {
  188. a, b int
  189. score float32
  190. }
  191. func (spm SentencePieceModel) Decode(ids []int32) (string, error) {
  192. var sb strings.Builder
  193. for _, id := range ids {
  194. data := spm.vocab.Decode(id)
  195. data = strings.ReplaceAll(data, spmWhitespaceSep, " ")
  196. if _, err := sb.WriteString(data); err != nil {
  197. return "", err
  198. }
  199. }
  200. slog.Debug("decoded", "ids", ids, "text", sb.String())
  201. return sb.String(), nil
  202. }