server.py 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. from aiohttp import web
  2. from ollama import engine
  3. def set_parser(parser):
  4. parser.add_argument('--host', default='127.0.0.1')
  5. parser.add_argument('--port', default=7734)
  6. parser.set_defaults(fn=serve)
  7. def serve(models_home='.', *args, **kwargs):
  8. app = web.Application()
  9. app.add_routes([
  10. web.post('/load', load),
  11. web.post('/unload', unload),
  12. web.post('/generate', generate),
  13. ])
  14. app.update({
  15. 'llms': {},
  16. 'models_home': models_home,
  17. })
  18. web.run_app(app, **kwargs)
  19. async def load(request):
  20. body = await request.json()
  21. model = body.get('model')
  22. if not model:
  23. raise web.HTTPBadRequest()
  24. kwargs = {
  25. 'llms': request.app.get('llms'),
  26. 'models_home': request.app.get('models_home'),
  27. }
  28. engine.load(model, **kwargs)
  29. return web.Response()
  30. async def unload(request):
  31. body = await request.json()
  32. model = body.get('model')
  33. if not model:
  34. raise web.HTTPBadRequest()
  35. engine.unload(model, llms=request.app.get('llms'))
  36. return web.Response()
  37. async def generate(request):
  38. body = await request.json()
  39. model = body.get('model')
  40. if not model:
  41. raise web.HTTPBadRequest()
  42. prompt = body.get('prompt')
  43. if not prompt:
  44. raise web.HTTPBadRequest()
  45. response = web.StreamResponse()
  46. await response.prepare(request)
  47. kwargs = {
  48. 'llms': request.app.get('llms'),
  49. 'models_home': request.app.get('models_home'),
  50. }
  51. for output in engine.generate(model, prompt, **kwargs):
  52. await response.write(output.encode('utf-8'))
  53. await response.write(b'\n')
  54. return response