0008-solar-pro.patch 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. From 8313ce5f43f11f3d84f352f97f3802792e90e18c Mon Sep 17 00:00:00 2001
  2. From: Michael Yang <mxyng@pm.me>
  3. Date: Mon, 16 Sep 2024 15:53:16 -0700
  4. Subject: [PATCH] add solar-pro support
  5. solar-pro introduces block skip connections where blocks are connected
  6. to other, non-sequential blocks with a scale multiple
  7. this change adds 4 new keys to store the skip connections and one new
  8. tensor to store the scalar. the scalar is implemented a 1-dimensional
  9. tensor with 2 elements dervied from the model's bskcn_tv configuration.
  10. in general, the values are (bskcn_tv, 1 - bskcn_tv)
  11. ---
  12. src/llama.cpp | 267 +++++++++++++++++++++++++++++++++++++++++++++++---
  13. 1 file changed, 254 insertions(+), 13 deletions(-)
  14. diff --git a/src/llama.cpp b/src/llama.cpp
  15. index f79bd782..b7771f53 100644
  16. --- a/src/llama.cpp
  17. +++ b/src/llama.cpp
  18. @@ -213,6 +213,7 @@ enum llm_arch {
  19. LLM_ARCH_NEMOTRON,
  20. LLM_ARCH_EXAONE,
  21. LLM_ARCH_RWKV6,
  22. + LLM_ARCH_SOLAR,
  23. LLM_ARCH_UNKNOWN,
  24. };
  25. @@ -261,6 +262,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
  26. { LLM_ARCH_NEMOTRON, "nemotron" },
  27. { LLM_ARCH_EXAONE, "exaone" },
  28. { LLM_ARCH_RWKV6, "rwkv6" },
  29. + { LLM_ARCH_SOLAR, "solar" },
  30. { LLM_ARCH_UNKNOWN, "(unknown)" },
  31. };
  32. @@ -314,6 +316,7 @@ enum llm_kv {
  33. LLM_KV_ATTENTION_KV_LORA_RANK,
  34. LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,
  35. LLM_KV_ATTENTION_SLIDING_WINDOW,
  36. + LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION,
  37. LLM_KV_ROPE_DIMENSION_COUNT,
  38. LLM_KV_ROPE_FREQ_BASE,
  39. @@ -405,19 +408,20 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
  40. { LLM_KV_TIME_MIX_EXTRA_DIM, "%s.time_mix_extra_dim" },
  41. { LLM_KV_TIME_DECAY_EXTRA_DIM, "%s.time_decay_extra_dim" },
  42. - { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
  43. - { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
  44. - { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
  45. - { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
  46. - { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" },
  47. - { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" },
  48. - { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
  49. - { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
  50. - { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" },
  51. - { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" },
  52. - { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" },
  53. - { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" },
  54. - { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" },
  55. + { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
  56. + { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
  57. + { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
  58. + { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
  59. + { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" },
  60. + { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" },
  61. + { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
  62. + { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
  63. + { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" },
  64. + { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" },
  65. + { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" },
  66. + { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" },
  67. + { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" },
  68. + { LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION, "%s.attention.block_skip_connection.%d" },
  69. { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
  70. { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
  71. @@ -589,6 +593,7 @@ enum llm_tensor {
  72. LLM_TENSOR_ENC_FFN_DOWN,
  73. LLM_TENSOR_ENC_FFN_UP,
  74. LLM_TENSOR_ENC_OUTPUT_NORM,
  75. + LLM_TENSOR_BSKCN_TV,
  76. };
  77. static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
  78. @@ -1408,6 +1413,24 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
  79. { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "blk.%d.channel_mix_receptance" },
  80. },
  81. },
  82. + {
  83. + LLM_ARCH_SOLAR,
  84. + {
  85. + { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
  86. + { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
  87. + { LLM_TENSOR_OUTPUT, "output" },
  88. + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
  89. + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
  90. + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
  91. + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
  92. + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
  93. + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
  94. + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
  95. + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
  96. + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
  97. + { LLM_TENSOR_BSKCN_TV, "bskcn_tv" },
  98. + },
  99. + },
  100. {
  101. LLM_ARCH_UNKNOWN,
  102. {
  103. @@ -2237,6 +2260,7 @@ enum e_model {
  104. MODEL_15B,
  105. MODEL_16B,
  106. MODEL_20B,
  107. + MODEL_22B,
  108. MODEL_30B,
  109. MODEL_34B,
  110. MODEL_35B,
  111. @@ -2284,6 +2308,8 @@ struct llama_hparams {
  112. std::array<uint32_t, LLAMA_MAX_LAYERS> n_head_kv_arr;
  113. std::array<uint32_t, LLAMA_MAX_LAYERS> n_ff_arr;
  114. + std::array<std::array<uint32_t, LLAMA_MAX_LAYERS>, 4> n_bskcn_arr;
  115. +
  116. uint32_t n_layer_dense_lead = 0;
  117. uint32_t n_lora_q = 0;
  118. uint32_t n_lora_kv = 0;
  119. @@ -2349,6 +2375,7 @@ struct llama_hparams {
  120. if (this->n_head_arr != other.n_head_arr) return true;
  121. if (this->n_head_kv_arr != other.n_head_kv_arr) return true;
  122. if (this->n_ff_arr != other.n_ff_arr) return true;
  123. + if (this->n_bskcn_arr != other.n_bskcn_arr) return true;
  124. if (this->n_rel_attn_bkts != other.n_rel_attn_bkts) return true;
  125. if (this->n_layer_dense_lead != other.n_layer_dense_lead) return true;
  126. @@ -2455,6 +2482,14 @@ struct llama_hparams {
  127. return ssm_d_state * ssm_d_inner;
  128. }
  129. }
  130. +
  131. + bool n_bskcn(uint32_t n, uint32_t il = 0) const {
  132. + if (il < n_layer) {
  133. + return n_bskcn_arr[n][il] > 0;
  134. + }
  135. +
  136. + GGML_ABORT("fatal error");
  137. + }
  138. };
  139. static_assert(std::is_trivially_copyable<llama_hparams>::value, "llama_hparams must be trivially copyable");
  140. @@ -2635,6 +2670,8 @@ struct llama_layer {
  141. struct ggml_tensor * ffn_gate_scale;
  142. struct ggml_tensor * ffn_up_scale;
  143. struct ggml_tensor * ffn_down_scale;
  144. +
  145. + struct ggml_tensor * bskcn_tv;
  146. };
  147. // very similar to llama_batch,
  148. @@ -5937,6 +5974,21 @@ static void llm_load_hparams(
  149. default: model.type = e_model::MODEL_UNKNOWN;
  150. }
  151. } break;
  152. + case LLM_ARCH_SOLAR:
  153. + {
  154. + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
  155. +
  156. + for (int i = 0; i < hparams.n_bskcn_arr.max_size(); ++i) {
  157. + auto & bskcn = hparams.n_bskcn_arr.at(i);
  158. + bskcn.fill(0);
  159. + ml.get_key_or_arr(::format(LLM_KV_NAMES.at(LLM_KV_ATTENTION_BLOCK_SKIP_CONNECTION), LLM_ARCH_NAMES.at(ml.llm_kv.arch), i), bskcn, hparams.n_layer, false);
  160. + }
  161. +
  162. + switch (hparams.n_layer) {
  163. + case 64: model.type = e_model::MODEL_22B; break;
  164. + default: model.type = e_model::MODEL_UNKNOWN;
  165. + }
  166. + }
  167. default: (void)0;
  168. }
  169. @@ -8420,6 +8472,38 @@ static bool llm_load_tensors(
  170. }
  171. } break;
  172. + case LLM_ARCH_SOLAR:
  173. + {
  174. + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
  175. +
  176. + // output
  177. + {
  178. + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
  179. + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
  180. + }
  181. +
  182. + for (int i = 0; i < n_layer; ++i) {
  183. + ggml_context * ctx_layer = ctx_for_layer(i);
  184. + ggml_context * ctx_split = ctx_for_layer_split(i);
  185. +
  186. + auto & layer = model.layers[i];
  187. +
  188. + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
  189. +
  190. + layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head});
  191. + layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
  192. + layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
  193. + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd});
  194. +
  195. + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
  196. +
  197. + layer.bskcn_tv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_BSKCN_TV, "weight"), {2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
  198. +
  199. + layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
  200. + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
  201. + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
  202. + }
  203. + } break;
  204. default:
  205. throw std::runtime_error("unknown architecture");
  206. }
  207. @@ -15173,6 +15257,158 @@ struct llm_build_context {
  208. return gf;
  209. }
  210. +
  211. + ggml_cgraph * build_solar() {
  212. + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
  213. +
  214. + // mutable variable, needed during the last layer of the computation to skip unused tokens
  215. + int32_t n_tokens = this->n_tokens;
  216. +
  217. + const int64_t n_embd_head = hparams.n_embd_head_v;
  218. + GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  219. + GGML_ASSERT(n_embd_head == hparams.n_rot);
  220. +
  221. + struct ggml_tensor * cur;
  222. + struct ggml_tensor * inpL;
  223. +
  224. + inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
  225. +
  226. + // inp_pos - contains the positions
  227. + struct ggml_tensor * inp_pos = build_inp_pos();
  228. +
  229. + // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
  230. + struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
  231. +
  232. + struct ggml_tensor * bskcn_1;
  233. + struct ggml_tensor * bskcn_2;
  234. +
  235. + for (int il = 0; il < n_layer; ++il) {
  236. + struct ggml_tensor * inpSA = inpL;
  237. +
  238. + if (hparams.n_bskcn(0, il)) {
  239. + bskcn_1 = inpSA;
  240. + }
  241. +
  242. + if (hparams.n_bskcn(1, il)) {
  243. + bskcn_2 = inpSA;
  244. + }
  245. +
  246. + if (hparams.n_bskcn(2, il)) {
  247. + inpSA = ggml_add(
  248. + ctx0,
  249. + ggml_mul(ctx0, bskcn_1, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)),
  250. + ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv))));
  251. + }
  252. +
  253. + if (hparams.n_bskcn(3, il)) {
  254. + inpSA = ggml_add(
  255. + ctx0,
  256. + ggml_mul(ctx0, bskcn_2, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, 0)),
  257. + ggml_mul(ctx0, inpSA, ggml_view_1d(ctx0, model.layers[il].bskcn_tv, 1, ggml_element_size(model.layers[il].bskcn_tv))));
  258. + }
  259. +
  260. + // norm
  261. + cur = llm_build_norm(ctx0, inpL, hparams,
  262. + model.layers[il].attn_norm, NULL,
  263. + LLM_NORM_RMS, cb, il);
  264. + cb(cur, "attn_norm", il);
  265. +
  266. + // self-attention
  267. + {
  268. + // rope freq factors for llama3; may return nullptr for llama2 and other models
  269. + struct ggml_tensor * rope_factors = build_rope_factors(il);
  270. +
  271. + // compute Q and K and RoPE them
  272. + struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
  273. + cb(Qcur, "Qcur", il);
  274. + if (model.layers[il].bq) {
  275. + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
  276. + cb(Qcur, "Qcur", il);
  277. + }
  278. +
  279. + struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
  280. + cb(Kcur, "Kcur", il);
  281. + if (model.layers[il].bk) {
  282. + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
  283. + cb(Kcur, "Kcur", il);
  284. + }
  285. +
  286. + struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
  287. + cb(Vcur, "Vcur", il);
  288. + if (model.layers[il].bv) {
  289. + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
  290. + cb(Vcur, "Vcur", il);
  291. + }
  292. +
  293. + Qcur = ggml_rope_ext(
  294. + ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,
  295. + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  296. + ext_factor, attn_factor, beta_fast, beta_slow
  297. + );
  298. + cb(Qcur, "Qcur", il);
  299. +
  300. + Kcur = ggml_rope_ext(
  301. + ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, rope_factors,
  302. + n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
  303. + ext_factor, attn_factor, beta_fast, beta_slow
  304. + );
  305. + cb(Kcur, "Kcur", il);
  306. +
  307. + cur = llm_build_kv(ctx0, lctx, kv_self, gf,
  308. + model.layers[il].wo, model.layers[il].bo,
  309. + Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
  310. + }
  311. +
  312. + if (il == n_layer - 1) {
  313. + // skip computing output for unused tokens
  314. + struct ggml_tensor * inp_out_ids = build_inp_out_ids();
  315. + n_tokens = n_outputs;
  316. + cur = ggml_get_rows(ctx0, cur, inp_out_ids);
  317. + inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
  318. + }
  319. +
  320. + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
  321. + cb(ffn_inp, "ffn_inp", il);
  322. +
  323. + // feed-forward network
  324. + cur = llm_build_norm(ctx0, ffn_inp, hparams,
  325. + model.layers[il].ffn_norm, NULL,
  326. + LLM_NORM_RMS, cb, il);
  327. + cb(cur, "ffn_norm", il);
  328. +
  329. + cur = llm_build_ffn(ctx0, lctx, cur,
  330. + model.layers[il].ffn_up, model.layers[il].ffn_up_b, NULL,
  331. + model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
  332. + model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
  333. + NULL,
  334. + LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
  335. + cb(cur, "ffn_out", il);
  336. +
  337. + cur = ggml_add(ctx0, cur, ffn_inp);
  338. + cb(cur, "ffn_out", il);
  339. +
  340. + cur = lctx.cvec.apply_to(ctx0, cur, il);
  341. + cb(cur, "l_out", il);
  342. +
  343. + // input for next layer
  344. + inpL = cur;
  345. + }
  346. +
  347. + cur = inpL;
  348. +
  349. + cur = llm_build_norm(ctx0, cur, hparams,
  350. + model.output_norm, NULL,
  351. + LLM_NORM_RMS, cb, -1);
  352. + cb(cur, "result_norm", -1);
  353. +
  354. + // lm_head
  355. + cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
  356. + cb(cur, "result_output", -1);
  357. +
  358. + ggml_build_forward_expand(gf, cur);
  359. +
  360. + return gf;
  361. + }
  362. };
  363. static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const std::vector<uint32_t> & ids) {
  364. @@ -15423,6 +15659,10 @@ static struct ggml_cgraph * llama_build_graph(
  365. {
  366. result = llm.build_rwkv6();
  367. } break;
  368. + case LLM_ARCH_SOLAR:
  369. + {
  370. + result = llm.build_solar();
  371. + } break;
  372. default:
  373. GGML_ABORT("fatal error");
  374. }
  375. @@ -18503,6 +18743,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
  376. case LLM_ARCH_ARCTIC:
  377. case LLM_ARCH_DEEPSEEK2:
  378. case LLM_ARCH_CHATGLM:
  379. + case LLM_ARCH_SOLAR:
  380. return LLAMA_ROPE_TYPE_NORM;
  381. // the pairs of head values are offset by n_rot/2
  382. --
  383. 2.46.0