|
@@ -1,3 +1,29 @@
|
|
|
|
+/**
|
|
|
|
+ * llama.cpp - git 059031b8c40e1f4ba60586842c5b1ed3ddf61842
|
|
|
|
+ *
|
|
|
|
+ * MIT License
|
|
|
|
+ *
|
|
|
|
+ * Copyright (c) 2023-2024 The ggml authors
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
+ * of this software and associated documentation files (the "Software"), to deal
|
|
|
|
+ * in the Software without restriction, including without limitation the rights
|
|
|
|
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
+ * copies of the Software, and to permit persons to whom the Software is
|
|
|
|
+ * furnished to do so, subject to the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice shall be included in all
|
|
|
|
+ * copies or substantial portions of the Software.
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
+ * SOFTWARE.
|
|
|
|
+ */
|
|
|
|
+
|
|
#include "ggml-cuda.h"
|
|
#include "ggml-cuda.h"
|
|
#include "ggml.h"
|
|
#include "ggml.h"
|
|
#include "ggml-backend-impl.h"
|
|
#include "ggml-backend-impl.h"
|
|
@@ -392,6 +418,10 @@ GGML_CALL static bool ggml_backend_buffer_is_cuda(ggml_backend_buffer_t buffer)
|
|
GGML_CALL static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
|
GGML_CALL static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
|
ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
|
|
ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
|
|
delete ctx;
|
|
delete ctx;
|
|
|
|
+
|
|
|
|
+ // TODO: this needs to be freed in cuda and hipblas backends because
|
|
|
|
+ // the cuda backend implementation compiled with msvc
|
|
|
|
+ free(buffer);
|
|
}
|
|
}
|
|
|
|
|
|
GGML_CALL static void * ggml_backend_cuda_buffer_get_base(ggml_backend_buffer_t buffer) {
|
|
GGML_CALL static void * ggml_backend_cuda_buffer_get_base(ggml_backend_buffer_t buffer) {
|
|
@@ -3028,8 +3058,6 @@ GGML_CALL static ggml_backend_t ggml_backend_reg_cuda_init(const char * params,
|
|
GGML_UNUSED(params);
|
|
GGML_UNUSED(params);
|
|
}
|
|
}
|
|
|
|
|
|
-extern "C" GGML_CALL int ggml_backend_cuda_reg_devices();
|
|
|
|
-
|
|
|
|
GGML_CALL int ggml_backend_cuda_reg_devices() {
|
|
GGML_CALL int ggml_backend_cuda_reg_devices() {
|
|
int device_count = ggml_backend_cuda_get_device_count();
|
|
int device_count = ggml_backend_cuda_get_device_count();
|
|
//int device_count = 1; // DEBUG: some tools require delaying CUDA initialization
|
|
//int device_count = 1; // DEBUG: some tools require delaying CUDA initialization
|