03-load_exception.diff 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344
  1. diff --git a/llama.cpp b/llama.cpp
  2. index 4225f955..7b762f86 100644
  3. --- a/llama.cpp
  4. +++ b/llama.cpp
  5. @@ -4756,7 +4756,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
  6. }
  7. } catch (const std::exception & err) {
  8. LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
  9. - return -1;
  10. + throw;
  11. }
  12. return 0;
  13. @@ -12102,16 +12102,22 @@ struct llama_model * llama_load_model_from_file(
  14. };
  15. }
  16. - int status = llama_model_load(path_model, *model, params);
  17. - GGML_ASSERT(status <= 0);
  18. - if (status < 0) {
  19. - if (status == -1) {
  20. - LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  21. - } else if (status == -2) {
  22. - LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  23. + try {
  24. + int status = llama_model_load(path_model, *model, params);
  25. + GGML_ASSERT(status <= 0);
  26. + if (status < 0) {
  27. + if (status == -1) {
  28. + LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
  29. + } else if (status == -2) {
  30. + LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
  31. + }
  32. + delete model;
  33. + return nullptr;
  34. }
  35. + } catch (...) {
  36. + LLAMA_LOG_ERROR("%s: exception loading model\n", __func__);
  37. delete model;
  38. - return nullptr;
  39. + throw;
  40. }
  41. return model;