loganalysis.py 1.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546
  1. import sys
  2. import re
  3. import requests
  4. import json
  5. # prelines and postlines represent the number of lines of context to include in the output around the error
  6. prelines = 10
  7. postlines = 10
  8. def find_errors_in_log_file():
  9. if len(sys.argv) < 2:
  10. print("Usage: python loganalysis.py <filename>")
  11. return
  12. log_file_path = sys.argv[1]
  13. with open(log_file_path, 'r') as log_file:
  14. log_lines = log_file.readlines()
  15. error_lines = []
  16. for i, line in enumerate(log_lines):
  17. if re.search('error', line, re.IGNORECASE):
  18. error_lines.append(i)
  19. error_logs = []
  20. for error_line in error_lines:
  21. start_index = max(0, error_line - prelines)
  22. end_index = min(len(log_lines), error_line + postlines)
  23. error_logs.extend(log_lines[start_index:end_index])
  24. return error_logs
  25. error_logs = find_errors_in_log_file()
  26. data = {
  27. "prompt": "\n".join(error_logs),
  28. "model": "mattw/loganalyzer"
  29. }
  30. response = requests.post("http://localhost:11434/api/generate", json=data, stream=True)
  31. for line in response.iter_lines():
  32. if line:
  33. json_data = json.loads(line)
  34. if json_data['done'] == False:
  35. print(json_data['response'], end='', flush=True)