03-load_exception.diff 1.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001
  2. From: Michael Yang <mxyng@pm.me>
  3. Date: Thu, 23 May 2024 11:18:45 -0700
  4. Subject: [PATCH] throw exception on load errors
  5. ---
  6. llama.cpp | 25 ++++++++++++++++---------
  7. 1 file changed, 16 insertions(+), 9 deletions(-)
  8. diff --git a/llama.cpp b/llama.cpp
  9. index 15c66077..8ba90b6a 100644
  10. --- a/llama.cpp
  11. +++ b/llama.cpp
  12. @@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
  13. }
  14. } catch (const std::exception & err) {
  15. LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
  16. - return -1;
  17. + throw;
  18. }
  19. return 0;
  20. @@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file(
  21. }
  22. model->rpc_servers.push_back(servers);
  23. }
  24. - int status = llama_model_load(path_model, *model, params);
  25. - GGML_ASSERT(status <= 0);
  26. - if (status < 0) {
  27. - if (status == -1) {
  28. - LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  29. - } else if (status == -2) {
  30. - LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  31. +
  32. + try {
  33. + int status = llama_model_load(path_model, *model, params);
  34. + GGML_ASSERT(status <= 0);
  35. + if (status < 0) {
  36. + if (status == -1) {
  37. + LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  38. + } else if (status == -2) {
  39. + LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  40. + }
  41. + delete model;
  42. + return nullptr;
  43. }
  44. + } catch (...) {
  45. + LLAMA_LOG_ERROR("%s: exception loading model\n", __func__);
  46. delete model;
  47. - return nullptr;
  48. + throw;
  49. }
  50. return model;
  51. --
  52. 2.45.1