ggml-metal.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. //go:build darwin
  2. /**
  3. * llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
  4. *
  5. * MIT License
  6. *
  7. * Copyright (c) 2023 Georgi Gerganov
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a copy
  10. * of this software and associated documentation files (the "Software"), to deal
  11. * in the Software without restriction, including without limitation the rights
  12. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13. * copies of the Software, and to permit persons to whom the Software is
  14. * furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice shall be included in all
  17. * copies or substantial portions of the Software.
  18. *
  19. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  22. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  25. * SOFTWARE.
  26. */
  27. // An interface allowing to compute ggml_cgraph with Metal
  28. //
  29. // This is a fully functional interface that extends ggml with GPU support for Apple devices.
  30. // A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
  31. //
  32. // How it works?
  33. //
  34. // As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this
  35. // interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you
  36. // use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.)
  37. //
  38. // You only need to make sure that all memory buffers that you used during the graph creation
  39. // are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is
  40. // used during the graph evaluation to determine the arguments of the compute kernels.
  41. //
  42. // Synchronization between device and host memory (for example for input and output tensors)
  43. // is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions.
  44. //
  45. #pragma once
  46. #include <stddef.h>
  47. #include <stdbool.h>
  48. // max memory buffers that can be mapped to the device
  49. #define GGML_METAL_MAX_BUFFERS 16
  50. struct ggml_tensor;
  51. struct ggml_cgraph;
  52. #ifdef __cplusplus
  53. extern "C" {
  54. #endif
  55. struct ggml_metal_context;
  56. // number of command buffers to use
  57. struct ggml_metal_context * ggml_metal_init(int n_cb);
  58. void ggml_metal_free(struct ggml_metal_context * ctx);
  59. // set the number of command buffers to use
  60. void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb);
  61. // creates a mapping between a host memory buffer and a device memory buffer
  62. // - make sure to map all buffers used in the graph before calling ggml_metal_graph_compute
  63. // - the mapping is used during computation to determine the arguments of the compute kernels
  64. // - you don't need to keep the host memory buffer allocated as it is never accessed by Metal
  65. // - max_size specifies the maximum size of a tensor and is used to create shared views such
  66. // that it is guaranteed that the tensor will fit in at least one of the views
  67. //
  68. bool ggml_metal_add_buffer(
  69. struct ggml_metal_context * ctx,
  70. const char * name,
  71. void * data,
  72. size_t size,
  73. size_t max_size);
  74. // set data from host memory into the device
  75. void ggml_metal_set_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
  76. // get data from the device into host memory
  77. void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
  78. // same as ggml_graph_compute but uses Metal
  79. // creates gf->n_threads command buffers in parallel
  80. void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
  81. #ifdef __cplusplus
  82. }
  83. #endif