|
@@ -1,5 +1,9 @@
|
|
|
|
+from bs4 import BeautifulSoup
|
|
|
|
+import json
|
|
|
|
+import markdown
|
|
import time
|
|
import time
|
|
|
|
|
|
|
|
+
|
|
from fastapi import FastAPI, Request
|
|
from fastapi import FastAPI, Request
|
|
from fastapi.staticfiles import StaticFiles
|
|
from fastapi.staticfiles import StaticFiles
|
|
from fastapi import HTTPException
|
|
from fastapi import HTTPException
|
|
@@ -16,7 +20,7 @@ from apps.rag.main import app as rag_app
|
|
|
|
|
|
from apps.web.main import app as webui_app
|
|
from apps.web.main import app as webui_app
|
|
|
|
|
|
-from config import ENV, FRONTEND_BUILD_DIR
|
|
|
|
|
|
+from config import ENV, VERSION, FRONTEND_BUILD_DIR
|
|
|
|
|
|
|
|
|
|
class SPAStaticFiles(StaticFiles):
|
|
class SPAStaticFiles(StaticFiles):
|
|
@@ -65,14 +69,85 @@ app.mount("/rag/api/v1", rag_app)
|
|
|
|
|
|
@app.get("/api/config")
|
|
@app.get("/api/config")
|
|
async def get_app_config():
|
|
async def get_app_config():
|
|
|
|
+
|
|
return {
|
|
return {
|
|
"status": True,
|
|
"status": True,
|
|
|
|
+ "version": VERSION,
|
|
"images": images_app.state.ENABLED,
|
|
"images": images_app.state.ENABLED,
|
|
"default_models": webui_app.state.DEFAULT_MODELS,
|
|
"default_models": webui_app.state.DEFAULT_MODELS,
|
|
"default_prompt_suggestions": webui_app.state.DEFAULT_PROMPT_SUGGESTIONS,
|
|
"default_prompt_suggestions": webui_app.state.DEFAULT_PROMPT_SUGGESTIONS,
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
+# Function to parse each section
|
|
|
|
+def parse_section(section):
|
|
|
|
+ items = []
|
|
|
|
+ for li in section.find_all("li"):
|
|
|
|
+ # Extract raw HTML string
|
|
|
|
+ raw_html = str(li)
|
|
|
|
+
|
|
|
|
+ # Extract text without HTML tags
|
|
|
|
+ text = li.get_text(separator=" ", strip=True)
|
|
|
|
+
|
|
|
|
+ # Split into title and content
|
|
|
|
+ parts = text.split(": ", 1)
|
|
|
|
+ title = parts[0].strip() if len(parts) > 1 else ""
|
|
|
|
+ content = parts[1].strip() if len(parts) > 1 else text
|
|
|
|
+
|
|
|
|
+ items.append({"title": title, "content": content, "raw": raw_html})
|
|
|
|
+ return items
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+@app.get("/api/changelog")
|
|
|
|
+async def get_app_changelog():
|
|
|
|
+ try:
|
|
|
|
+ with open("../CHANGELOG.md", "r") as file:
|
|
|
|
+ changelog_content = file.read()
|
|
|
|
+ # Convert markdown content to HTML
|
|
|
|
+ html_content = markdown.markdown(changelog_content)
|
|
|
|
+
|
|
|
|
+ # Parse the HTML content
|
|
|
|
+ soup = BeautifulSoup(html_content, "html.parser")
|
|
|
|
+
|
|
|
|
+ print(soup)
|
|
|
|
+ # Initialize JSON structure
|
|
|
|
+ changelog_json = {}
|
|
|
|
+
|
|
|
|
+ # Iterate over each version
|
|
|
|
+ for version in soup.find_all("h2"):
|
|
|
|
+ version_number = (
|
|
|
|
+ version.get_text().strip().split(" - ")[0][1:-1]
|
|
|
|
+ ) # Remove brackets
|
|
|
|
+ date = version.get_text().strip().split(" - ")[1]
|
|
|
|
+
|
|
|
|
+ version_data = {"date": date}
|
|
|
|
+
|
|
|
|
+ # Find the next sibling that is a h3 tag (section title)
|
|
|
|
+ current = version.find_next_sibling()
|
|
|
|
+
|
|
|
|
+ print(current)
|
|
|
|
+
|
|
|
|
+ while current and current.name != "h2":
|
|
|
|
+ if current.name == "h3":
|
|
|
|
+ section_title = current.get_text().lower() # e.g., "added", "fixed"
|
|
|
|
+ section_items = parse_section(current.find_next_sibling("ul"))
|
|
|
|
+ version_data[section_title] = section_items
|
|
|
|
+
|
|
|
|
+ # Move to the next element
|
|
|
|
+ current = current.find_next_sibling()
|
|
|
|
+
|
|
|
|
+ changelog_json[version_number] = version_data
|
|
|
|
+
|
|
|
|
+ # print(changelog_json)
|
|
|
|
+
|
|
|
|
+ # Return content as JSON string
|
|
|
|
+ return changelog_json
|
|
|
|
+ except FileNotFoundError:
|
|
|
|
+ return {"error": "readme.md not found"}
|
|
|
|
+ except Exception as e:
|
|
|
|
+ return {"error": f"An error occurred: {e}"}
|
|
|
|
+
|
|
|
|
+
|
|
app.mount(
|
|
app.mount(
|
|
"/",
|
|
"/",
|
|
SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
|
|
SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
|