concurrency_test.go 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. //go:build integration
  2. package integration
  3. import (
  4. "context"
  5. "log/slog"
  6. "os"
  7. "strconv"
  8. "sync"
  9. "testing"
  10. "time"
  11. "github.com/stretchr/testify/require"
  12. "github.com/ollama/ollama/api"
  13. "github.com/ollama/ollama/format"
  14. )
  15. func TestMultiModelConcurrency(t *testing.T) {
  16. var (
  17. req = [2]api.GenerateRequest{
  18. {
  19. Model: "orca-mini",
  20. Prompt: "why is the ocean blue?",
  21. Stream: &stream,
  22. KeepAlive: &api.Duration{Duration: 10 * time.Second},
  23. Options: map[string]interface{}{
  24. "seed": 42,
  25. "temperature": 0.0,
  26. },
  27. }, {
  28. Model: "tinydolphin",
  29. Prompt: "what is the origin of the us thanksgiving holiday?",
  30. Stream: &stream,
  31. KeepAlive: &api.Duration{Duration: 10 * time.Second},
  32. Options: map[string]interface{}{
  33. "seed": 42,
  34. "temperature": 0.0,
  35. },
  36. },
  37. }
  38. resp = [2][]string{
  39. {"sunlight"},
  40. {"england", "english", "massachusetts", "pilgrims", "british", "festival"},
  41. }
  42. )
  43. var wg sync.WaitGroup
  44. wg.Add(len(req))
  45. ctx, cancel := context.WithTimeout(context.Background(), time.Second*240)
  46. defer cancel()
  47. client, _, cleanup := InitServerConnection(ctx, t)
  48. defer cleanup()
  49. for i := 0; i < len(req); i++ {
  50. require.NoError(t, PullIfMissing(ctx, client, req[i].Model))
  51. }
  52. for i := 0; i < len(req); i++ {
  53. go func(i int) {
  54. defer wg.Done()
  55. DoGenerate(ctx, t, client, req[i], resp[i], 60*time.Second, 10*time.Second)
  56. }(i)
  57. }
  58. wg.Wait()
  59. }
  60. func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) {
  61. req, resp := GenerateRequests()
  62. reqLimit := len(req)
  63. iterLimit := 5
  64. if s := os.Getenv("OLLAMA_MAX_VRAM"); s != "" {
  65. maxVram, err := strconv.ParseUint(s, 10, 64)
  66. require.NoError(t, err)
  67. // Don't hammer on small VRAM cards...
  68. if maxVram < 4*format.GibiByte {
  69. reqLimit = min(reqLimit, 2)
  70. iterLimit = 2
  71. }
  72. }
  73. ctx, cancel := context.WithTimeout(context.Background(), 9*time.Minute)
  74. defer cancel()
  75. client, _, cleanup := InitServerConnection(ctx, t)
  76. defer cleanup()
  77. // Get the server running (if applicable) warm the model up with a single initial request
  78. DoGenerate(ctx, t, client, req[0], resp[0], 60*time.Second, 10*time.Second)
  79. var wg sync.WaitGroup
  80. wg.Add(reqLimit)
  81. for i := 0; i < reqLimit; i++ {
  82. go func(i int) {
  83. defer wg.Done()
  84. for j := 0; j < iterLimit; j++ {
  85. slog.Info("Starting", "req", i, "iter", j)
  86. // On slower GPUs it can take a while to process the concurrent requests
  87. // so we allow a much longer initial timeout
  88. DoGenerate(ctx, t, client, req[i], resp[i], 120*time.Second, 20*time.Second)
  89. }
  90. }(i)
  91. }
  92. wg.Wait()
  93. }
  94. // Stress the system if we know how much VRAM it has, and attempt to load more models than will fit
  95. func TestMultiModelStress(t *testing.T) {
  96. s := os.Getenv("OLLAMA_MAX_VRAM") // TODO - discover actual VRAM
  97. if s == "" {
  98. t.Skip("OLLAMA_MAX_VRAM not specified, can't pick the right models for the stress test")
  99. }
  100. maxVram, err := strconv.ParseUint(s, 10, 64)
  101. if err != nil {
  102. t.Fatal(err)
  103. }
  104. type model struct {
  105. name string
  106. size uint64 // Approximate amount of VRAM they typically use when fully loaded in VRAM
  107. }
  108. smallModels := []model{
  109. {
  110. name: "orca-mini",
  111. size: 2992 * format.MebiByte,
  112. },
  113. {
  114. name: "phi",
  115. size: 2616 * format.MebiByte,
  116. },
  117. {
  118. name: "gemma:2b",
  119. size: 2364 * format.MebiByte,
  120. },
  121. {
  122. name: "stable-code:3b",
  123. size: 2608 * format.MebiByte,
  124. },
  125. {
  126. name: "starcoder2:3b",
  127. size: 2166 * format.MebiByte,
  128. },
  129. }
  130. mediumModels := []model{
  131. {
  132. name: "llama2",
  133. size: 5118 * format.MebiByte,
  134. },
  135. {
  136. name: "mistral",
  137. size: 4620 * format.MebiByte,
  138. },
  139. {
  140. name: "orca-mini:7b",
  141. size: 5118 * format.MebiByte,
  142. },
  143. {
  144. name: "dolphin-mistral",
  145. size: 4620 * format.MebiByte,
  146. },
  147. {
  148. name: "gemma:7b",
  149. size: 5000 * format.MebiByte,
  150. },
  151. {
  152. name: "codellama:7b",
  153. size: 5118 * format.MebiByte,
  154. },
  155. }
  156. // These seem to be too slow to be useful...
  157. // largeModels := []model{
  158. // {
  159. // name: "llama2:13b",
  160. // size: 7400 * format.MebiByte,
  161. // },
  162. // {
  163. // name: "codellama:13b",
  164. // size: 7400 * format.MebiByte,
  165. // },
  166. // {
  167. // name: "orca-mini:13b",
  168. // size: 7400 * format.MebiByte,
  169. // },
  170. // {
  171. // name: "gemma:7b",
  172. // size: 5000 * format.MebiByte,
  173. // },
  174. // {
  175. // name: "starcoder2:15b",
  176. // size: 9100 * format.MebiByte,
  177. // },
  178. // }
  179. var chosenModels []model
  180. switch {
  181. case maxVram < 10000*format.MebiByte:
  182. slog.Info("selecting small models")
  183. chosenModels = smallModels
  184. // case maxVram < 30000*format.MebiByte:
  185. default:
  186. slog.Info("selecting medium models")
  187. chosenModels = mediumModels
  188. // default:
  189. // slog.Info("selecting large models")
  190. // chosenModels = largModels
  191. }
  192. req, resp := GenerateRequests()
  193. for i := range req {
  194. if i > len(chosenModels) {
  195. break
  196. }
  197. req[i].Model = chosenModels[i].name
  198. }
  199. ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) // TODO baseline -- 10m too short
  200. defer cancel()
  201. client, _, cleanup := InitServerConnection(ctx, t)
  202. defer cleanup()
  203. // Make sure all the models are pulled before we get started
  204. for _, r := range req {
  205. require.NoError(t, PullIfMissing(ctx, client, r.Model))
  206. }
  207. var wg sync.WaitGroup
  208. consumed := uint64(256 * format.MebiByte) // Assume some baseline usage
  209. for i := 0; i < len(req); i++ {
  210. // Always get at least 2 models, but dont' overshoot VRAM too much or we'll take too long
  211. if i > 1 && consumed > maxVram {
  212. slog.Info("achieved target vram exhaustion", "count", i, "vram", format.HumanBytes2(maxVram), "models", format.HumanBytes2(consumed))
  213. break
  214. }
  215. consumed += chosenModels[i].size
  216. slog.Info("target vram", "count", i, "vram", format.HumanBytes2(maxVram), "models", format.HumanBytes2(consumed))
  217. wg.Add(1)
  218. go func(i int) {
  219. defer wg.Done()
  220. for j := 0; j < 3; j++ {
  221. slog.Info("Starting", "req", i, "iter", j, "model", req[i].Model)
  222. DoGenerate(ctx, t, client, req[i], resp[i], 120*time.Second, 5*time.Second)
  223. }
  224. }(i)
  225. }
  226. go func() {
  227. for {
  228. time.Sleep(2 * time.Second)
  229. select {
  230. case <-ctx.Done():
  231. return
  232. default:
  233. models, err := client.ListRunning(ctx)
  234. if err != nil {
  235. slog.Warn("failed to list running models", "error", err)
  236. continue
  237. }
  238. for _, m := range models.Models {
  239. slog.Info("loaded model snapshot", "model", m)
  240. }
  241. }
  242. }
  243. }()
  244. wg.Wait()
  245. }