0003-load_exception.patch 1.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. From e43bfd3f607a6dfcaba2d490d35f412a52e55e30 Mon Sep 17 00:00:00 2001
  2. From: Michael Yang <mxyng@pm.me>
  3. Date: Mon, 16 Sep 2024 15:53:12 -0700
  4. Subject: [PATCH] 03-load_exception.diff
  5. ---
  6. src/llama.cpp | 25 ++++++++++++++++---------
  7. 1 file changed, 16 insertions(+), 9 deletions(-)
  8. diff --git a/src/llama.cpp b/src/llama.cpp
  9. index 88355971..926bb71a 100644
  10. --- a/src/llama.cpp
  11. +++ b/src/llama.cpp
  12. @@ -8635,7 +8635,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
  13. }
  14. } catch (const std::exception & err) {
  15. LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
  16. - return -1;
  17. + throw;
  18. }
  19. return 0;
  20. @@ -18022,16 +18022,23 @@ struct llama_model * llama_load_model_from_file(
  21. }
  22. model->rpc_servers.push_back(servers);
  23. }
  24. - int status = llama_model_load(path_model, *model, params);
  25. - GGML_ASSERT(status <= 0);
  26. - if (status < 0) {
  27. - if (status == -1) {
  28. - LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  29. - } else if (status == -2) {
  30. - LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  31. +
  32. + try {
  33. + int status = llama_model_load(path_model, *model, params);
  34. + GGML_ASSERT(status <= 0);
  35. + if (status < 0) {
  36. + if (status == -1) {
  37. + LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  38. + } else if (status == -2) {
  39. + LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  40. + }
  41. + delete model;
  42. + return nullptr;
  43. }
  44. + } catch (...) {
  45. + LLAMA_LOG_ERROR("%s: exception loading model\n", __func__);
  46. delete model;
  47. - return nullptr;
  48. + throw;
  49. }
  50. return model;
  51. --
  52. 2.46.0