Browse Source

chore: format

Timothy Jaeryang Baek 2 months ago
parent
commit
6f7161acf8
49 changed files with 882 additions and 882 deletions
  1. 18 18
      src/lib/i18n/locales/ar-BH/translation.json
  2. 18 18
      src/lib/i18n/locales/bg-BG/translation.json
  3. 18 18
      src/lib/i18n/locales/bn-BD/translation.json
  4. 18 18
      src/lib/i18n/locales/ca-ES/translation.json
  5. 18 18
      src/lib/i18n/locales/ceb-PH/translation.json
  6. 18 18
      src/lib/i18n/locales/cs-CZ/translation.json
  7. 18 18
      src/lib/i18n/locales/da-DK/translation.json
  8. 18 18
      src/lib/i18n/locales/de-DE/translation.json
  9. 18 18
      src/lib/i18n/locales/dg-DG/translation.json
  10. 18 18
      src/lib/i18n/locales/el-GR/translation.json
  11. 18 18
      src/lib/i18n/locales/en-GB/translation.json
  12. 18 18
      src/lib/i18n/locales/en-US/translation.json
  13. 18 18
      src/lib/i18n/locales/es-ES/translation.json
  14. 18 18
      src/lib/i18n/locales/eu-ES/translation.json
  15. 18 18
      src/lib/i18n/locales/fa-IR/translation.json
  16. 18 18
      src/lib/i18n/locales/fi-FI/translation.json
  17. 18 18
      src/lib/i18n/locales/fr-CA/translation.json
  18. 18 18
      src/lib/i18n/locales/fr-FR/translation.json
  19. 18 18
      src/lib/i18n/locales/he-IL/translation.json
  20. 18 18
      src/lib/i18n/locales/hi-IN/translation.json
  21. 18 18
      src/lib/i18n/locales/hr-HR/translation.json
  22. 18 18
      src/lib/i18n/locales/hu-HU/translation.json
  23. 18 18
      src/lib/i18n/locales/id-ID/translation.json
  24. 18 18
      src/lib/i18n/locales/ie-GA/translation.json
  25. 18 18
      src/lib/i18n/locales/it-IT/translation.json
  26. 18 18
      src/lib/i18n/locales/ja-JP/translation.json
  27. 18 18
      src/lib/i18n/locales/ka-GE/translation.json
  28. 18 18
      src/lib/i18n/locales/ko-KR/translation.json
  29. 18 18
      src/lib/i18n/locales/lt-LT/translation.json
  30. 18 18
      src/lib/i18n/locales/ms-MY/translation.json
  31. 18 18
      src/lib/i18n/locales/nb-NO/translation.json
  32. 18 18
      src/lib/i18n/locales/nl-NL/translation.json
  33. 18 18
      src/lib/i18n/locales/pa-IN/translation.json
  34. 18 18
      src/lib/i18n/locales/pl-PL/translation.json
  35. 18 18
      src/lib/i18n/locales/pt-BR/translation.json
  36. 18 18
      src/lib/i18n/locales/pt-PT/translation.json
  37. 18 18
      src/lib/i18n/locales/ro-RO/translation.json
  38. 18 18
      src/lib/i18n/locales/ru-RU/translation.json
  39. 18 18
      src/lib/i18n/locales/sk-SK/translation.json
  40. 18 18
      src/lib/i18n/locales/sr-RS/translation.json
  41. 18 18
      src/lib/i18n/locales/sv-SE/translation.json
  42. 18 18
      src/lib/i18n/locales/th-TH/translation.json
  43. 18 18
      src/lib/i18n/locales/tk-TW/translation.json
  44. 18 18
      src/lib/i18n/locales/tr-TR/translation.json
  45. 18 18
      src/lib/i18n/locales/uk-UA/translation.json
  46. 18 18
      src/lib/i18n/locales/ur-PK/translation.json
  47. 18 18
      src/lib/i18n/locales/vi-VN/translation.json
  48. 18 18
      src/lib/i18n/locales/zh-CN/translation.json
  49. 18 18
      src/lib/i18n/locales/zh-TW/translation.json

+ 18 - 18
src/lib/i18n/locales/ar-BH/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "هل تملك حساب ؟",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "مساعد",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "اتصالات",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "الاتصال",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "تم نسخ عنوان URL للدردشة المشتركة إلى الحافظة",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "تفعيل عمليات التسجيل الجديدة",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "تأكد من أن ملف CSV الخاص بك يتضمن 4 أعمدة بهذا الترتيب: Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "قم بتضمين علامة `-api` عند تشغيل Stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "معلومات",
 	"Input commands": "إدخال الأوامر",
 	"Install from Github URL": "التثبيت من عنوان URL لجيثب",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "سجل صوت",
 	"Redirecting you to Open WebUI Community": "OpenWebUI إعادة توجيهك إلى مجتمع ",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "رفض عندما لا ينبغي أن يكون",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "ضبط الصوت",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "الاعدادات",
 	"Settings saved successfully!": "تم حفظ الاعدادات بنجاح",
@@ -964,7 +964,7 @@
 	"System Prompt": "محادثة النظام",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "شكرا لملاحظاتك!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "يجب أن تكون النتيجة قيمة تتراوح بين 0.0 (0%) و1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "الثيم",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "وهذا يضمن حفظ محادثاتك القيمة بشكل آمن في قاعدة بياناتك الخلفية. شكرًا لك!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "مساحة العمل",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/bg-BG/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Разреши прекъсване на гласа по време на разговор",
 	"Allowed Endpoints": "Разрешени крайни точки",
 	"Already have an account?": "Вече имате акаунт?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Алтернатива на top_p, която цели да осигури баланс между качество и разнообразие. Параметърът p представлява минималната вероятност за разглеждане на токен, спрямо вероятността на най-вероятния токен. Например, при p=0.05 и най-вероятен токен с вероятност 0.9, логитите със стойност по-малка от 0.045 се филтрират. (По подразбиране: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Винаги",
 	"Amazing": "Невероятно",
 	"an assistant": "асистент",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Потвърдете новата си парола",
 	"Connect to your own OpenAI compatible API endpoints.": "Свържете се със собствени крайни точки на API, съвместими с OpenAI.",
 	"Connections": "Връзки",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Ограничава усилията за разсъждение при модели за разсъждение. Приложимо само за модели за разсъждение от конкретни доставчици, които поддържат усилия за разсъждение. (По подразбиране: средно)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Свържете се с администратор за достъп до WebUI",
 	"Content": "Съдържание",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Продължете с имейл",
 	"Continue with LDAP": "Продължете с LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Контролирайте как текстът на съобщението се разделя за TTS заявки. 'Пунктуация' разделя на изречения, 'параграфи' разделя на параграфи, а 'нищо' запазва съобщението като един низ.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "Контролирайте повторението на последователности от токени в генерирания текст. По-висока стойност (напр. 1.5) ще наказва повторенията по-силно, докато по-ниска стойност (напр. 1.1) ще бъде по-снизходителна. При 1 е изключено. (По подразбиране: 1.1)",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Контроли",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Контролира баланса между съгласуваност и разнообразие на изхода. По-ниска стойност ще доведе до по-фокусиран и съгласуван текст. (По подразбиране: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Копирано",
 	"Copied shared chat URL to clipboard!": "Копирана е връзката за споделен чат в клипборда!",
 	"Copied to clipboard": "Копирано в клипборда",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Активиране на заключване на паметта (mlock), за да се предотврати изваждането на данните на модела от RAM. Тази опция заключва работния набор от страници на модела в RAM, гарантирайки, че няма да бъдат изхвърлени на диска. Това може да помогне за поддържане на производителността, като се избягват грешки в страниците и се осигурява бърз достъп до данните.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Активиране на мапиране на паметта (mmap) за зареждане на данни на модела. Тази опция позволява на системата да използва дисковото пространство като разширение на RAM, третирайки дисковите файлове, сякаш са в RAM. Това може да подобри производителността на модела, като позволява по-бърз достъп до данните. Въпреки това, може да не работи правилно с всички системи и може да консумира значително количество дисково пространство.",
 	"Enable Message Rating": "Активиране на оценяване на съобщения",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Активиране на Mirostat семплиране за контрол на перплексията. (По подразбиране: 0, 0 = Деактивирано, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Включване на нови регистрации",
 	"Enabled": "Активирано",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Уверете се, че вашият CSV файл включва 4 колони в следния ред: Име, Имейл, Парола, Роля.",
@@ -566,7 +566,7 @@
 	"Include": "Включи",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Влияе върху това колко бързо алгоритъмът реагира на обратната връзка от генерирания текст. По-ниска скорост на обучение ще доведе до по-бавни корекции, докато по-висока скорост на обучение ще направи алгоритъма по-отзивчив. (По подразбиране: 0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Информация",
 	"Input commands": "Въведете команди",
 	"Install from Github URL": "Инсталиране от URL адреса на Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "Усилие за разсъждение",
 	"Record voice": "Записване на глас",
 	"Redirecting you to Open WebUI Community": "Пренасочване към OpenWebUI общността",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Намалява вероятността за генериране на безсмислици. По-висока стойност (напр. 100) ще даде по-разнообразни отговори, докато по-ниска стойност (напр. 10) ще бъде по-консервативна. (По подразбиране: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Отнасяйте се към себе си като \"Потребител\" (напр. \"Потребителят учи испански\")",
 	"References from": "Препратки от",
 	"Refused when it shouldn't have": "Отказано, когато не трябва да бъде",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Задайте броя работни нишки, използвани за изчисления. Тази опция контролира колко нишки се използват за едновременна обработка на входящи заявки. Увеличаването на тази стойност може да подобри производителността при високи натоварвания с паралелизъм, но може също да консумира повече CPU ресурси.",
 	"Set Voice": "Задай Глас",
 	"Set whisper model": "Задай модел на шепот",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "Задава плоско отклонение срещу токени, които са се появили поне веднъж. По-висока стойност (напр. 1.5) ще наказва повторенията по-силно, докато по-ниска стойност (напр. 0.9) ще бъде по-снизходителна. При 0 е деактивирано. (По подразбиране: 0)",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "Задава мащабиращо отклонение срещу токени за наказване на повторения, базирано на това колко пъти са се появили. По-висока стойност (напр. 1.5) ще наказва повторенията по-силно, докато по-ниска стойност (напр. 0.9) ще бъде по-снизходителна. При 0 е деактивирано. (По подразбиране: 1.1)",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Задава колко назад моделът да гледа, за да предотврати повторение. (По подразбиране: 64, 0 = деактивирано, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Задава семето на случайното число, което да се използва за генериране. Задаването на конкретно число ще накара модела да генерира същия текст за същата подкана. (По подразбиране: случайно)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Задава размера на контекстния прозорец, използван за генериране на следващия токен. (По подразбиране: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Задава последователностите за спиране, които да се използват. Когато се срещне този модел, LLM ще спре да генерира текст и ще се върне. Множество модели за спиране могат да бъдат зададени чрез определяне на множество отделни параметри за спиране в моделния файл.",
 	"Settings": "Настройки",
 	"Settings saved successfully!": "Настройките са запазени успешно!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Системен Промпт",
 	"Tags Generation": "Генериране на тагове",
 	"Tags Generation Prompt": "Промпт за генериране на тагове",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Безопашковото семплиране се използва за намаляване на влиянието на по-малко вероятните токени от изхода. По-висока стойност (напр. 2.0) ще намали влиянието повече, докато стойност 1.0 деактивира тази настройка. (по подразбиране: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Докоснете за прекъсване",
 	"Tasks": "Задачи",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Благодарим ви за вашия отзив!",
 	"The Application Account DN you bind with for search": "DN на акаунта на приложението, с който се свързвате за търсене",
 	"The base to search for users": "Базата за търсене на потребители",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Разработчиците зад този плъгин са страстни доброволци от общността. Ако намирате този плъгин полезен, моля, обмислете да допринесете за неговото развитие.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Класацията за оценка се базира на рейтинговата система Elo и се обновява в реално време.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "LDAP атрибутът, който съответства на имейла, който потребителите използват за вписване.",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Максималният размер на файла в MB. Ако размерът на файла надвишава този лимит, файлът няма да бъде качен.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Максималният брой файлове, които могат да се използват едновременно в чата. Ако броят на файловете надвишава този лимит, файловете няма да бъдат качени.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Резултатът трябва да бъде стойност между 0.0 (0%) и 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Температурата на модела. Увеличаването на температурата ще накара модела да отговаря по-креативно. (По подразбиране: 0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Тема",
 	"Thinking...": "Мисля...",
 	"This action cannot be undone. Do you wish to continue?": "Това действие не може да бъде отменено. Желаете ли да продължите?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Това гарантира, че ценните ви разговори се запазват сигурно във вашата бекенд база данни. Благодарим ви!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Това е експериментална функция, може да не работи според очакванията и подлежи на промяна по всяко време.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Тази опция контролира колко токена се запазват при обновяване на контекста. Например, ако е зададено на 2, последните 2 токена от контекста на разговора ще бъдат запазени. Запазването на контекста може да помогне за поддържане на непрекъснатостта на разговора, но може да намали способността за отговор на нови теми. (По подразбиране: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Тази опция ще изтрие всички съществуващи файлове в колекцията и ще ги замени с новокачени файлове.",
 	"This response was generated by \"{{model}}\"": "Този отговор беше генериран от \"{{model}}\"",
 	"This will delete": "Това ще изтрие",
@@ -1132,7 +1132,7 @@
 	"Why?": "Защо?",
 	"Widescreen Mode": "Широкоекранен режим",
 	"Won": "Спечелено",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Работи заедно с top-k. По-висока стойност (напр. 0.95) ще доведе до по-разнообразен текст, докато по-ниска стойност (напр. 0.5) ще генерира по-фокусиран и консервативен текст. (По подразбиране: 0.9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Работно пространство",
 	"Workspace Permissions": "Разрешения за работното пространство",
 	"Write": "Напиши",

+ 18 - 18
src/lib/i18n/locales/bn-BD/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "আগে থেকেই একাউন্ট আছে?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "একটা এসিস্ট্যান্ট",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "কানেকশনগুলো",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "বিষয়বস্তু",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "শেয়ারকৃত কথা-ব্যবহারের URL ক্লিপবোর্ডে কপি করা হয়েছে!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "নতুন সাইনআপ চালু করুন",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "আপনার সিএসভি ফাইলটিতে এই ক্রমে 4 টি কলাম অন্তর্ভুক্ত রয়েছে তা নিশ্চিত করুন: নাম, ইমেল, পাসওয়ার্ড, ভূমিকা।.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui চালু করার সময় `--api` ফ্ল্যাগ সংযুক্ত করুন",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "তথ্য",
 	"Input commands": "ইনপুট কমান্ডস",
 	"Install from Github URL": "Github URL থেকে ইনস্টল করুন",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "ভয়েস রেকর্ড করুন",
 	"Redirecting you to Open WebUI Community": "আপনাকে OpenWebUI কমিউনিটিতে পাঠানো হচ্ছে",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "যদি উপযুক্ত নয়, তবে রেজিগেনেট করা হচ্ছে",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "কন্ঠস্বর নির্ধারণ করুন",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "সেটিংসমূহ",
 	"Settings saved successfully!": "সেটিংগুলো সফলভাবে সংরক্ষিত হয়েছে",
@@ -964,7 +964,7 @@
 	"System Prompt": "সিস্টেম প্রম্পট",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "আপনার মতামত ধন্যবাদ!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "স্কোর একটি 0.0 (0%) এবং 1.0 (100%) এর মধ্যে একটি মান হওয়া উচিত।",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "থিম",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "এটা নিশ্চিত করে যে, আপনার গুরুত্বপূর্ণ আলোচনা নিরাপদে আপনার ব্যাকএন্ড ডেটাবেজে সংরক্ষিত আছে। ধন্যবাদ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "ওয়ার্কস্পেস",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/ca-ES/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Permetre la interrupció de la veu en una trucada",
 	"Allowed Endpoints": "Punts d'accés permesos",
 	"Already have an account?": "Ja tens un compte?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternativa al top_p, i pretén garantir un equilibri de qualitat i varietat. El paràmetre p representa la probabilitat mínima que es consideri un token, en relació amb la probabilitat del token més probable. Per exemple, amb p=0,05 i el token més probable amb una probabilitat de 0,9, es filtren els logits amb un valor inferior a 0,045. (Per defecte: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Sempre",
 	"Amazing": "Al·lucinant",
 	"an assistant": "un assistent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Confirma la teva nova contrasenya",
 	"Connect to your own OpenAI compatible API endpoints.": "Connecta als teus propis punts de connexió de l'API compatible amb OpenAI",
 	"Connections": "Connexions",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Restringeix l'esforç de raonament dels models de raonament. Només aplicable a models de raonament de proveïdors específics que donen suport a l'esforç de raonament. (Per defecte: mitjà)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Posat en contacte amb l'administrador per accedir a WebUI",
 	"Content": "Contingut",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Continuar amb el correu",
 	"Continue with LDAP": "Continuar amb LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlar com es divideix el text del missatge per a les sol·licituds TTS. 'Puntuació' divideix en frases, 'paràgrafs' divideix en paràgrafs i 'cap' manté el missatge com una cadena única.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "Controlar la repetició de seqüències de tokens en el text generat. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 1,1) serà més indulgent. A l'1, està desactivat. (Per defecte: 1.1)",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Controls",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Controlar l'equilibri entre la coherència i la diversitat de la sortida. Un valor més baix donarà lloc a un text més enfocat i coherent. (Per defecte: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Copiat",
 	"Copied shared chat URL to clipboard!": "S'ha copiat l'URL compartida al porta-retalls!",
 	"Copied to clipboard": "Copiat al porta-retalls",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activar el bloqueig de memòria (mlock) per evitar que les dades del model s'intercanviïn fora de la memòria RAM. Aquesta opció bloqueja el conjunt de pàgines de treball del model a la memòria RAM, assegurant-se que no s'intercanviaran al disc. Això pot ajudar a mantenir el rendiment evitant errors de pàgina i garantint un accés ràpid a les dades.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Activar l'assignació de memòria (mmap) per carregar les dades del model. Aquesta opció permet que el sistema utilitzi l'emmagatzematge en disc com a extensió de la memòria RAM tractant els fitxers de disc com si estiguessin a la memòria RAM. Això pot millorar el rendiment del model permetent un accés més ràpid a les dades. Tanmateix, és possible que no funcioni correctament amb tots els sistemes i pot consumir una quantitat important d'espai en disc.",
 	"Enable Message Rating": "Permetre la qualificació de missatges",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Activar el mostreig de Mirostat per controlar la perplexitat. (Per defecte: 0, 0 = Inhabilitat, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Permetre nous registres",
 	"Enabled": "Habilitat",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Assegura't que els teus fitxers CSV inclouen 4 columnes en aquest ordre: Nom, Correu electrònic, Contrasenya, Rol.",
@@ -566,7 +566,7 @@
 	"Include": "Incloure",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Inclou `--api-auth` quan executis stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inclou `--api` quan executis stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Influeix amb la rapidesa amb què l'algoritme respon als comentaris del text generat. Una taxa d'aprenentatge més baixa donarà lloc a ajustos més lents, mentre que una taxa d'aprenentatge més alta farà que l'algorisme sigui més sensible. (Per defecte: 0,1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informació",
 	"Input commands": "Entra comandes",
 	"Install from Github URL": "Instal·lar des de l'URL de Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "Esforç de raonament",
 	"Record voice": "Enregistrar la veu",
 	"Redirecting you to Open WebUI Community": "Redirigint-te a la comunitat OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Redueix la probabilitat de generar ximpleries. Un valor més alt (p. ex. 100) donarà respostes més diverses, mentre que un valor més baix (p. ex. 10) serà més conservador. (Per defecte: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Fes referència a tu mateix com a \"Usuari\" (p. ex., \"L'usuari està aprenent espanyol\")",
 	"References from": "Referències de",
 	"Refused when it shouldn't have": "Refusat quan no hauria d'haver estat",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Establir el nombre de fils de treball utilitzats per al càlcul. Aquesta opció controla quants fils s'utilitzen per processar les sol·licituds entrants simultàniament. Augmentar aquest valor pot millorar el rendiment amb càrregues de treball de concurrència elevada, però també pot consumir més recursos de CPU.",
 	"Set Voice": "Establir la veu",
 	"Set whisper model": "Establir el model whisper",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "Estableix un biaix pla contra tokens que han aparegut almenys una vegada. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 0,9) serà més indulgent. A 0, està desactivat. (Per defecte: 0)",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "Estableix un biaix d'escala contra tokens per penalitzar les repeticions, en funció de quantes vegades han aparegut. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 0,9) serà més indulgent. A 0, està desactivat. (Per defecte: 1.1)",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Establir fins a quin punt el model mira enrere per evitar la repetició. (Per defecte: 64, 0 = desactivat, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Establir la llavor del nombre aleatori que s'utilitzarà per a la generació. Establir-ho a un número específic farà que el model generi el mateix text per a la mateixa sol·licitud. (Per defecte: aleatori)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Estableix la mida de la finestra de context utilitzada per generar el següent token. (Per defecte: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Establir les seqüències d'aturada a utilitzar. Quan es trobi aquest patró, el LLM deixarà de generar text. Es poden establir diversos patrons de parada especificant diversos paràmetres de parada separats en un fitxer model.",
 	"Settings": "Preferències",
 	"Settings saved successfully!": "Les preferències s'han desat correctament",
@@ -964,7 +964,7 @@
 	"System Prompt": "Indicació del Sistema",
 	"Tags Generation": "Generació d'etiquetes",
 	"Tags Generation Prompt": "Indicació per a la generació d'etiquetes",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "El mostreig sense cua s'utilitza per reduir l'impacte de tokens menys probables de la sortida. Un valor més alt (p. ex., 2,0) reduirà més l'impacte, mentre que un valor d'1,0 desactiva aquesta configuració. (per defecte: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Prem per interrompre",
 	"Tasks": "Tasques",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Gràcies pel teu comentari!",
 	"The Application Account DN you bind with for search": "El DN del compte d'aplicació per realitzar la cerca",
 	"The base to search for users": "La base per cercar usuaris",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "La mida del lot determina quantes sol·licituds de text es processen alhora. Una mida de lot més gran pot augmentar el rendiment i la velocitat del model, però també requereix més memòria. (Per defecte: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Els desenvolupadors d'aquest complement són voluntaris apassionats de la comunitat. Si trobeu útil aquest complement, considereu contribuir al seu desenvolupament.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "La classificació d'avaluació es basa en el sistema de qualificació Elo i s'actualitza en temps real.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "L'atribut LDAP que s'associa al correu que els usuaris utilitzen per iniciar la sessió.",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "La mida màxima del fitxer en MB. Si la mida del fitxer supera aquest límit, el fitxer no es carregarà.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "El nombre màxim de fitxers que es poden utilitzar alhora al xat. Si el nombre de fitxers supera aquest límit, els fitxers no es penjaran.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "El valor de puntuació hauria de ser entre 0.0 (0%) i 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "La temperatura del model. Augmentar la temperatura farà que el model respongui de manera més creativa. (Per defecte: 0,8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Pensant...",
 	"This action cannot be undone. Do you wish to continue?": "Aquesta acció no es pot desfer. Vols continuar?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Això assegura que les teves converses valuoses queden desades de manera segura a la teva base de dades. Gràcies!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aquesta és una funció experimental, és possible que no funcioni com s'espera i està subjecta a canvis en qualsevol moment.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Aquesta opció controla quants tokens es conserven en actualitzar el context. Per exemple, si s'estableix en 2, es conservaran els darrers 2 tokens del context de conversa. Preservar el context pot ajudar a mantenir la continuïtat d'una conversa, però pot reduir la capacitat de respondre a nous temes. (Per defecte: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Aquesta opció estableix el nombre màxim de tokens que el model pot generar en la seva resposta. Augmentar aquest límit permet que el model proporcioni respostes més llargues, però també pot augmentar la probabilitat que es generi contingut poc útil o irrellevant. (Per defecte: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Aquesta opció eliminarà tots els fitxers existents de la col·lecció i els substituirà per fitxers recentment penjats.",
 	"This response was generated by \"{{model}}\"": "Aquesta resposta l'ha generat el model \"{{model}}\"",
 	"This will delete": "Això eliminarà",
@@ -1132,7 +1132,7 @@
 	"Why?": "Per què?",
 	"Widescreen Mode": "Mode de pantalla ampla",
 	"Won": "Ha guanyat",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Funciona juntament amb top-k. Un valor més alt (p. ex., 0,95) donarà lloc a un text més divers, mentre que un valor més baix (p. ex., 0,5) generarà un text més concentrat i conservador. (Per defecte: 0,9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Espai de treball",
 	"Workspace Permissions": "Permisos de l'espai de treball",
 	"Write": "Escriure",

+ 18 - 18
src/lib/i18n/locales/ceb-PH/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Naa na kay account ?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "usa ka katabang",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Mga koneksyon",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "Kontento",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "I-enable ang bag-ong mga rehistro",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "Iapil ang `--api` nga bandila kung nagdagan nga stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Input commands": "Pagsulod sa input commands",
 	"Install from Github URL": "",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Irekord ang tingog",
 	"Redirecting you to Open WebUI Community": "Gi-redirect ka sa komunidad sa OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Ibutang ang tingog",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Mga setting",
 	"Settings saved successfully!": "Malampuson nga na-save ang mga setting!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Madasig nga Sistema",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Kini nagsiguro nga ang imong bililhon nga mga panag-istoryahanay luwas nga natipig sa imong backend database. ",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/cs-CZ/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Povolit přerušení hlasu při hovoru",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Už máte účet?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "asistent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Připojení",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Kontaktujte administrátora pro přístup k webovému rozhraní.",
 	"Content": "Obsah",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Řízení, jak se text zprávy rozděluje pro požadavky TTS. 'Punctuation' rozděluje text na věty, 'paragraphs' rozděluje text na odstavce a 'none' udržuje zprávu jako jeden celý řetězec.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Ovládací prvky",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Zkopírováno",
 	"Copied shared chat URL to clipboard!": "URL sdíleného chatu zkopírován do schránky!",
 	"Copied to clipboard": "Zkopírováno do schránky",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "Povolit hodnocení zpráv",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Povolit nové registrace",
 	"Enabled": "Povoleno",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Ujistěte se, že váš CSV soubor obsahuje 4 sloupce v tomto pořadí: Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "Zahrnout",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Zahrňte přepínač `--api-auth` při spuštění stable-diffusion-webui.",
 	"Include `--api` flag when running stable-diffusion-webui": "Při spuštění stable-diffusion-webui zahrňte příznak `--api`.",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Input commands": "Vstupní příkazy",
 	"Install from Github URL": "Instalace z URL adresy Githubu",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Nahrát hlas",
 	"Redirecting you to Open WebUI Community": "Přesměrování na komunitu OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Odkazujte na sebe jako na \"uživatele\" (např. \"Uživatel se učí španělsky\").",
 	"References from": "Reference z",
 	"Refused when it shouldn't have": "Odmítnuto, když nemělo být.",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Nastavit hlas",
 	"Set whisper model": "Nastavit model whisper",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Nastavení",
 	"Settings saved successfully!": "Nastavení byla úspěšně uložena!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Systémový prompt",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Prompt pro generování značek",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Klepněte pro přerušení",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Děkujeme za vaši zpětnou vazbu!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Vývojáři stojící za tímto pluginem jsou zapálení dobrovolníci z komunity. Pokud považujete tento plugin za užitečný, zvažte příspěvek k jeho vývoji.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Hodnotící žebříček je založen na systému hodnocení Elo a je aktualizován v reálném čase.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Maximální velikost souboru v MB. Pokud velikost souboru překročí tento limit, soubor nebude nahrán.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maximální počet souborů, které mohou být použity najednou v chatu. Pokud počet souborů překročí tento limit, soubory nebudou nahrány.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Skóre by mělo být hodnotou mezi 0,0 (0%) a 1,0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Téma",
 	"Thinking...": "Přemýšlím...",
 	"This action cannot be undone. Do you wish to continue?": "Tuto akci nelze vrátit zpět. Přejete si pokračovat?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To zajišťuje, že vaše cenné konverzace jsou bezpečně uloženy ve vaší backendové databázi. Děkujeme!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Jedná se o experimentální funkci, nemusí fungovat podle očekávání a může být kdykoliv změněna.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Tato volba odstraní všechny existující soubory ve sbírce a nahradí je nově nahranými soubory.",
 	"This response was generated by \"{{model}}\"": "Tato odpověď byla vygenerována pomocí \"{{model}}\"",
 	"This will delete": "Tohle odstraní",
@@ -1132,7 +1132,7 @@
 	"Why?": "Proč?",
 	"Widescreen Mode": "Režim širokoúhlého zobrazení",
 	"Won": "Vyhrál",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/da-DK/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Tillad afbrydelser i stemme i opkald",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Har du allerede en profil?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "en assistent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Forbindelser",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Kontakt din administrator for adgang til WebUI",
 	"Content": "Indhold",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontroller hvordan beskedens tekst bliver splittet til TTS requests. 'Punctuation' (tegnsætning) splitter i sætninger, 'paragraphs' splitter i paragraffer, og 'none' beholder beskeden som en samlet streng.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Indstillinger",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Kopieret",
 	"Copied shared chat URL to clipboard!": "Link til deling kopieret til udklipsholder",
 	"Copied to clipboard": "Kopieret til udklipsholder",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "Aktiver rating af besked",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Aktiver nye signups",
 	"Enabled": "Aktiveret",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Sørg for at din CSV-fil indeholder 4 kolonner in denne rækkefølge: Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Inkluder `--api-auth` flag, når du kører stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inkluder `--api` flag, når du kører stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Input commands": "Inputkommandoer",
 	"Install from Github URL": "Installer fra Github URL",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Optag stemme",
 	"Redirecting you to Open WebUI Community": "Omdirigerer dig til OpenWebUI Community",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Referer til dig selv som \"Bruger\" (f.eks. \"Bruger lærer spansk\")",
 	"References from": "",
 	"Refused when it shouldn't have": "Afvist, når den ikke burde have været det",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Indstil stemme",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Indstillinger",
 	"Settings saved successfully!": "Indstillinger gemt!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Systemprompt",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Tryk for at afbryde",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Tak for din feedback!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Udviklerne bag dette plugin er passionerede frivillige fra fællesskabet. Hvis du finder dette plugin nyttigt, kan du overveje at bidrage til dets udvikling.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Den maksimale filstørrelse i MB. Hvis filstørrelsen overstiger denne grænse, uploades filen ikke.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Det maksimale antal filer, der kan bruges på én gang i chatten. Hvis antallet af filer overstiger denne grænse, uploades filerne ikke.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Scoren skal være en værdi mellem 0,0 (0%) og 1,0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Tænker...",
 	"This action cannot be undone. Do you wish to continue?": "Denne handling kan ikke fortrydes. Vil du fortsætte?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer, at dine værdifulde samtaler gemmes sikkert i din backend-database. Tak!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentel funktion, den fungerer muligvis ikke som forventet og kan ændres når som helst.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Denne indstilling sletter alle eksisterende filer i samlingen og erstatter dem med nyligt uploadede filer.",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "Dette vil slette",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "Widescreen-tilstand",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Arbejdsområde",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/de-DE/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Unterbrechung durch Stimme im Anruf zulassen",
 	"Allowed Endpoints": "Erlaubte Endpunkte",
 	"Already have an account?": "Haben Sie bereits einen Account?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternative zu top_p und zielt darauf ab, ein Gleichgewicht zwischen Qualität und Vielfalt zu gewährleisten. Der Parameter p repräsentiert die Mindestwahrscheinlichkeit für ein Token, um berücksichtigt zu werden, relativ zur Wahrscheinlichkeit des wahrscheinlichsten Tokens. Zum Beispiel, bei p=0.05 und das wahrscheinlichste Token hat eine Wahrscheinlichkeit von 0.9, werden Logits mit einem Wert von weniger als 0.045 herausgefiltert. (Standard: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Immer",
 	"Amazing": "Fantastisch",
 	"an assistant": "ein Assistent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Neues Passwort bestätigen",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Verbindungen",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Beschränkt den Aufwand für das Schlussfolgern bei Schlussfolgerungsmodellen. Nur anwendbar auf Schlussfolgerungsmodelle von spezifischen Anbietern, die den Schlussfolgerungsaufwand unterstützen. (Standard: medium)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Kontaktieren Sie den Administrator für den Zugriff auf die Weboberfläche",
 	"Content": "Info",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Mit Email fortfahren",
 	"Continue with LDAP": "Mit LDAP fortfahren",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrollieren Sie, wie Nachrichtentext für TTS-Anfragen aufgeteilt wird. 'Punctuation' teilt in Sätze auf, 'paragraphs' teilt in Absätze auf und 'none' behält die Nachricht als einzelnen String.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Steuerung",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Kontrolliert das Gleichgewicht zwischen Kohärenz und Vielfalt des Ausgabetextes. Ein niedrigerer Wert führt zu fokussierterem und kohärenterem Text. (Standard: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Kopiert",
 	"Copied shared chat URL to clipboard!": "Freigabelink in die Zwischenablage kopiert!",
 	"Copied to clipboard": "In die Zwischenablage kopiert",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Aktiviere Memory Locking (mlock), um zu verhindern, dass Modelldaten aus dem RAM ausgelagert werden. Diese Option sperrt die Arbeitsseiten des Modells im RAM, um sicherzustellen, dass sie nicht auf die Festplatte ausgelagert werden. Dies kann die Leistung verbessern, indem Page Faults vermieden und ein schneller Datenzugriff sichergestellt werden.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Aktiviere Memory Mapping (mmap), um Modelldaten zu laden. Diese Option ermöglicht es dem System, den Festplattenspeicher als Erweiterung des RAM zu verwenden, indem Festplattendateien so behandelt werden, als ob sie im RAM wären. Dies kann die Modellleistung verbessern, indem ein schnellerer Datenzugriff ermöglicht wird. Es kann jedoch nicht auf allen Systemen korrekt funktionieren und einen erheblichen Teil des Festplattenspeichers beanspruchen.",
 	"Enable Message Rating": "Nachrichtenbewertung aktivieren",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Mirostat Sampling zur Steuerung der Perplexität aktivieren. (Standard: 0, 0 = Deaktiviert, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Registrierung erlauben",
 	"Enabled": "Aktiviert",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Stellen Sie sicher, dass Ihre CSV-Datei 4 Spalten in dieser Reihenfolge enthält: Name, E-Mail, Passwort, Rolle.",
@@ -566,7 +566,7 @@
 	"Include": "Einschließen",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Fügen Sie beim Ausführen von stable-diffusion-webui die Option `--api-auth` hinzu",
 	"Include `--api` flag when running stable-diffusion-webui": "Fügen Sie beim Ausführen von stable-diffusion-webui die Option `--api` hinzu",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Beeinflusst, wie schnell der Algorithmus auf Feedback aus dem generierten Text reagiert. Eine niedrigere Lernrate führt zu langsameren Anpassungen, während eine höhere Lernrate den Algorithmus reaktionsschneller macht. (Standard: 0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Input commands": "Eingabebefehle",
 	"Install from Github URL": "Installiere von der Github-URL",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "Schlussfolgerungsaufwand",
 	"Record voice": "Stimme aufnehmen",
 	"Redirecting you to Open WebUI Community": "Sie werden zur OpenWebUI-Community weitergeleitet",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Reduziert die Wahrscheinlichkeit, Unsinn zu generieren. Ein höherer Wert (z.B. 100) liefert vielfältigere Antworten, während ein niedrigerer Wert (z.B. 10) konservativer ist. (Standard: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Beziehen Sie sich auf sich selbst als \"Benutzer\" (z. B. \"Benutzer lernt Spanisch\")",
 	"References from": "Referenzen aus",
 	"Refused when it shouldn't have": "Abgelehnt, obwohl es nicht hätte abgelehnt werden sollen",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Legt die Anzahl der für die Berechnung verwendeten GPU-Geräte fest. Diese Option steuert, wie viele GPU-Geräte (falls verfügbar) zur Verarbeitung eingehender Anfragen verwendet werden. Eine Erhöhung dieses Wertes kann die Leistung für Modelle, die für GPU-Beschleunigung optimiert sind, erheblich verbessern, kann jedoch auch mehr Strom und GPU-Ressourcen verbrauchen.",
 	"Set Voice": "Stimme festlegen",
 	"Set whisper model": "Whisper-Modell festlegen",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Legt fest, wie weit das Modell zurückblicken soll, um Wiederholungen zu verhindern. (Standard: 64, 0 = deaktiviert, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Legt den Zufallszahlengenerator-Seed für die Generierung fest. Wenn dieser auf eine bestimmte Zahl gesetzt wird, erzeugt das Modell denselben Text für denselben Prompt. (Standard: zufällig)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Legt die Größe des Kontextfensters fest, das zur Generierung des nächsten Tokens verwendet wird. (Standard: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Legt die zu verwendenden Stoppsequenzen fest. Wenn dieses Muster erkannt wird, stoppt das LLM die Textgenerierung und gibt zurück. Mehrere Stoppmuster können festgelegt werden, indem mehrere separate Stopp-Parameter in einer Modelldatei angegeben werden.",
 	"Settings": "Einstellungen",
 	"Settings saved successfully!": "Einstellungen erfolgreich gespeichert!",
@@ -964,7 +964,7 @@
 	"System Prompt": "System-Prompt",
 	"Tags Generation": "Tag-Generierung",
 	"Tags Generation Prompt": "Prompt für Tag-Generierung",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail-Free Sampling wird verwendet, um den Einfluss weniger wahrscheinlicher Tokens auf die Ausgabe zu reduzieren. Ein höherer Wert (z.B. 2.0) reduziert den Einfluss stärker, während ein Wert von 1.0 diese Einstellung deaktiviert. (Standard: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Zum Unterbrechen tippen",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Danke für Ihr Feedback!",
 	"The Application Account DN you bind with for search": "Der Anwendungs-Konto-DN, mit dem Sie für die Suche binden",
 	"The base to search for users": "Die Basis, in der nach Benutzern gesucht wird",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Die Batch-Größe bestimmt, wie viele Textanfragen gleichzeitig verarbeitet werden. Eine größere Batch-Größe kann die Leistung und Geschwindigkeit des Modells erhöhen, erfordert jedoch auch mehr Speicher. (Standard: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Die Entwickler hinter diesem Plugin sind leidenschaftliche Freiwillige aus der Community. Wenn Sie dieses Plugin hilfreich finden, erwägen Sie bitte, zu seiner Entwicklung beizutragen.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Die Bewertungs-Bestenliste basiert auf dem Elo-Bewertungssystem und wird in Echtzeit aktualisiert.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "Das LDAP-Attribut, das der Mail zugeordnet ist, die Benutzer zum Anmelden verwenden.",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Die maximale Dateigröße in MB. Wenn die Dateigröße dieses Limit überschreitet, wird die Datei nicht hochgeladen.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Die maximale Anzahl von Dateien, die gleichzeitig in der Unterhaltung verwendet werden können. Wenn die Anzahl der Dateien dieses Limit überschreitet, werden die Dateien nicht hochgeladen.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Die Punktzahl sollte ein Wert zwischen 0,0 (0 %) und 1,0 (100 %) sein.",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Die Temperatur des Modells. Eine Erhöhung der Temperatur führt dazu, dass das Modell kreativer antwortet. (Standard: 0,8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Design",
 	"Thinking...": "Denke nach...",
 	"This action cannot be undone. Do you wish to continue?": "Diese Aktion kann nicht rückgängig gemacht werden. Möchten Sie fortfahren?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dies stellt sicher, dass Ihre wertvollen Unterhaltungen sicher in Ihrer Backend-Datenbank gespeichert werden. Vielen Dank!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dies ist eine experimentelle Funktion, sie funktioniert möglicherweise nicht wie erwartet und kann jederzeit geändert werden.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Diese Option steuert, wie viele Tokens beim Aktualisieren des Kontexts beibehalten werden. Wenn sie beispielsweise auf 2 gesetzt ist, werden die letzten 2 Tokens des Gesprächskontexts beibehalten. Das Beibehalten des Kontexts kann helfen, die Kontinuität eines Gesprächs aufrechtzuerhalten, kann jedoch die Fähigkeit verringern, auf neue Themen zu reagieren. (Standard: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Diese Option legt die maximale Anzahl von Tokens fest, die das Modell in seiner Antwort generieren kann. Eine Erhöhung dieses Limits ermöglicht es dem Modell, längere Antworten zu geben, kann jedoch auch die Wahrscheinlichkeit erhöhen, dass unhilfreicher oder irrelevanter Inhalt generiert wird. (Standard: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Diese Option löscht alle vorhandenen Dateien in der Sammlung und ersetzt sie durch neu hochgeladene Dateien.",
 	"This response was generated by \"{{model}}\"": "Diese Antwort wurde von \"{{model}}\" generiert",
 	"This will delete": "Dies löscht",
@@ -1132,7 +1132,7 @@
 	"Why?": "Warum?",
 	"Widescreen Mode": "Breitbildmodus",
 	"Won": "Gewonnen",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Funktioniert zusammen mit top-k. Ein höherer Wert (z.B. 0,95) führt zu vielfältigerem Text, während ein niedrigerer Wert (z.B. 0,5) fokussierteren und konservativeren Text erzeugt. (Standard: 0,9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Arbeitsbereich",
 	"Workspace Permissions": "Arbeitsbereichsberechtigungen",
 	"Write": "Schreiben",

+ 18 - 18
src/lib/i18n/locales/dg-DG/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Such account exists?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "such assistant",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Connections",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "Content",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Enable New Bark Ups",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "Include `--api` flag when running stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Input commands": "Input commands",
 	"Install from Github URL": "",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Record Bark",
 	"Redirecting you to Open WebUI Community": "Redirecting you to Open WebUI Community",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Set Voice so speak",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Settings much settings",
 	"Settings saved successfully!": "Settings saved successfully! Very success!",
@@ -964,7 +964,7 @@
 	"System Prompt": "System Prompt much prompt",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Theme much theme",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "This ensures that your valuable conversations are securely saved to your backend database. Thank you! Much secure!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/el-GR/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Επιτρέπεται η Παύση Φωνής στην Κλήση",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Έχετε ήδη λογαριασμό;",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Εναλλακτικό στο top_p, και στοχεύει στη διασφάλιση μιας ισορροπίας μεταξύ ποιότητας και ποικιλίας. Η παράμετρος p αντιπροσωπεύει την ελάχιστη πιθανότητα για ένα token να θεωρηθεί, σε σχέση με την πιθανότητα του πιο πιθανού token. Για παράδειγμα, με p=0.05 και το πιο πιθανό token να έχει πιθανότητα 0.9, τα logits με τιμή μικρότερη από 0.045 φιλτράρονται. (Προεπιλογή: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "Καταπληκτικό",
 	"an assistant": "ένας βοηθός",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Συνδέσεις",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Επικοινωνήστε με τον Διαχειριστή για Πρόσβαση στο WebUI",
 	"Content": "Περιεχόμενο",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Συνέχεια με Email",
 	"Continue with LDAP": "Συνέχεια με LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Έλεγχος πώς διαχωρίζεται το κείμενο του μηνύματος για αιτήματα TTS. Το 'Στίξη' διαχωρίζει σε προτάσεις, οι 'παραγράφοι' σε παραγράφους, και το 'κανένα' κρατά το μήνυμα ως μια αλυσίδα.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Έλεγχοι",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Διαχειρίζεται την ισορροπία μεταξύ συνεκτικότητας και ποικιλίας της εξόδου. Μια χαμηλότερη τιμή θα έχει ως αποτέλεσμα πιο εστιασμένο και συνεκτικό κείμενο. (Προεπιλογή: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Αντιγράφηκε",
 	"Copied shared chat URL to clipboard!": "Αντιγράφηκε το URL της κοινόχρηστης συνομιλίας στο πρόχειρο!",
 	"Copied to clipboard": "Αντιγράφηκε στο πρόχειρο",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Ενεργοποίηση Κλείδωσης Μνήμης (mlock) για την αποτροπή της ανταλλαγής δεδομένων του μοντέλου από τη μνήμη RAM. Αυτή η επιλογή κλειδώνει το σύνολο εργασίας των σελίδων του μοντέλου στη μνήμη RAM, διασφαλίζοντας ότι δεν θα ανταλλαχθούν στο δίσκο. Αυτό μπορεί να βοηθήσει στη διατήρηση της απόδοσης αποφεύγοντας σφάλματα σελίδων και διασφαλίζοντας γρήγορη πρόσβαση στα δεδομένα.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Ενεργοποίηση Χαρτογράφησης Μνήμης (mmap) για φόρτωση δεδομένων μοντέλου. Αυτή η επιλογή επιτρέπει στο σύστημα να χρησιμοποιεί αποθήκευση δίσκου ως επέκταση της μνήμης RAM, αντιμετωπίζοντας αρχεία δίσκου σαν να ήταν στη μνήμη RAM. Αυτό μπορεί να βελτιώσει την απόδοση του μοντέλου επιτρέποντας γρηγορότερη πρόσβαση στα δεδομένα. Ωστόσο, μπορεί να μην λειτουργεί σωστά με όλα τα συστήματα και να καταναλώνει σημαντικό χώρο στο δίσκο.",
 	"Enable Message Rating": "Ενεργοποίηση Αξιολόγησης Μηνυμάτων",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Ενεργοποίηση δειγματοληψίας Mirostat για έλεγχο της περιπλοκότητας. (Προεπιλογή: 0, 0 = Απενεργοποιημένο, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Ενεργοποίηση Νέων Εγγραφών",
 	"Enabled": "Ενεργοποιημένο",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Βεβαιωθείτε ότι το αρχείο CSV σας περιλαμβάνει 4 στήλες με αυτή τη σειρά: Όνομα, Email, Κωδικός, Ρόλος.",
@@ -566,7 +566,7 @@
 	"Include": "Συμπερίληψη",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Συμπεριλάβετε το flag `--api-auth` όταν τρέχετε το stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Συμπεριλάβετε το flag `--api` όταν τρέχετε το stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Επηρεάζει πόσο γρήγορα ανταποκρίνεται ο αλγόριθμος στην ανατροφοδότηση από το παραγόμενο κείμενο. Μια χαμηλότερη ταχύτητα μάθησης θα έχει ως αποτέλεσμα πιο αργές προσαρμογές, ενώ μια υψηλότερη ταχύτητα μάθησης θα κάνει τον αλγόριθμο πιο ανταποκρινόμενο. (Προεπιλογή: 0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Πληροφορίες",
 	"Input commands": "Εισαγωγή εντολών",
 	"Install from Github URL": "Εγκατάσταση από URL Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Εγγραφή φωνής",
 	"Redirecting you to Open WebUI Community": "Μετακατεύθυνση στην Κοινότητα OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Μειώνει την πιθανότητα δημιουργίας ανοησιών. Μια υψηλότερη τιμή (π.χ. 100) θα δώσει πιο ποικίλες απαντήσεις, ενώ μια χαμηλότερη τιμή (π.χ. 10) θα δημιουργήσει πιο συντηρητικές απαντήσεις. (Προεπιλογή: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Αναφέρεστε στον εαυτό σας ως \"User\" (π.χ., \"User μαθαίνει Ισπανικά\")",
 	"References from": "Αναφορές από",
 	"Refused when it shouldn't have": "Αρνήθηκε όταν δεν έπρεπε",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Ορισμός του αριθμού των νημάτων εργασίας που χρησιμοποιούνται για υπολογισμούς. Αυτή η επιλογή ελέγχει πόσα νήματα χρησιμοποιούνται για την επεξεργασία των εισερχόμενων αιτημάτων ταυτόχρονα. Η αύξηση αυτής της τιμής μπορεί να βελτιώσει την απόδοση σε εργασίες υψηλής συγχρονισμένης φόρτωσης αλλά μπορεί επίσης να καταναλώσει περισσότερους πόρους CPU.",
 	"Set Voice": "Ορισμός Φωνής",
 	"Set whisper model": "Ορισμός μοντέλου whisper",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Ορίζει πόσο πίσω θα κοιτάξει το μοντέλο για να αποτρέψει την επανάληψη. (Προεπιλογή: 64, 0 = απενεργοποιημένο, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Ορίζει τον τυχαίο σπόρο αριθμού που θα χρησιμοποιηθεί για τη δημιουργία. Ορισμός αυτού σε έναν συγκεκριμένο αριθμό θα κάνει το μοντέλο να δημιουργεί το ίδιο κείμενο για την ίδια προτροπή. (Προεπιλογή: τυχαίο)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Ορίζει το μέγεθος του παραθύρου πλαισίου που χρησιμοποιείται για τη δημιουργία του επόμενου token. (Προεπιλογή: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Ορίζει τις σειρές παύσης που θα χρησιμοποιηθούν. Όταν εντοπιστεί αυτό το μοτίβο, το LLM θα σταματήσει να δημιουργεί κείμενο και θα επιστρέψει. Πολλαπλά μοτίβα παύσης μπορούν να οριστούν καθορίζοντας πολλαπλές ξεχωριστές παραμέτρους παύσης σε ένα αρχείο μοντέλου.",
 	"Settings": "Ρυθμίσεις",
 	"Settings saved successfully!": "Οι Ρυθμίσεις αποθηκεύτηκαν με επιτυχία!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Προτροπή Συστήματος",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Προτροπή Γενιάς Ετικετών",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Η δειγματοληψία Tail free χρησιμοποιείται για να μειώσει την επίδραση των λιγότερο πιθανών tokens από την έξοδο. Μια υψηλότερη τιμή (π.χ., 2.0) θα μειώσει την επίδραση περισσότερο, ενώ μια τιμή 1.0 απενεργοποιεί αυτή τη ρύθμιση. (προεπιλογή: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Πατήστε για παύση",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Ευχαριστούμε για την ανατροφοδότησή σας!",
 	"The Application Account DN you bind with for search": "Το DN του Λογαριασμού Εφαρμογής που συνδέετε για αναζήτηση",
 	"The base to search for users": "Η βάση για αναζήτηση χρηστών",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Το μέγεθος παρτίδας καθορίζει πόσες αιτήσεις κειμένου επεξεργάζονται μαζί ταυτόχρονα. Ένα μεγαλύτερο μέγεθος παρτίδας μπορεί να αυξήσει την απόδοση και την ταχύτητα του μοντέλου, αλλά απαιτεί επίσης περισσότερη μνήμη. (Προεπιλογή: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Οι προγραμματιστές πίσω από αυτό το plugin είναι παθιασμένοι εθελοντές από την κοινότητα. Αν βρείτε αυτό το plugin χρήσιμο, παρακαλώ σκεφτείτε να συνεισφέρετε στην ανάπτυξή του.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Η κατάταξη αξιολόγησης βασίζεται στο σύστημα βαθμολόγησης Elo και ενημερώνεται σε πραγματικό χρόνο.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Το μέγιστο μέγεθος αρχείου σε MB. Αν το μέγεθος του αρχείου υπερβαίνει αυτό το όριο, το αρχείο δεν θα ανεβεί.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Ο μέγιστος αριθμός αρχείων που μπορούν να χρησιμοποιηθούν ταυτόχρονα στη συνομιλία. Αν ο αριθμός των αρχείων υπερβαίνει αυτό το όριο, τα αρχεία δεν θα ανεβούν.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Η βαθμολογία θα πρέπει να είναι μια τιμή μεταξύ 0.0 (0%) και 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Η θερμοκρασία του μοντέλου. Η αύξηση της θερμοκρασίας θα κάνει το μοντέλο να απαντά πιο δημιουργικά. (Προεπιλογή: 0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Θέμα",
 	"Thinking...": "Σκέφτομαι...",
 	"This action cannot be undone. Do you wish to continue?": "Αυτή η ενέργεια δεν μπορεί να αναιρεθεί. Θέλετε να συνεχίσετε;",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Αυτό διασφαλίζει ότι οι πολύτιμες συνομιλίες σας αποθηκεύονται με ασφάλεια στη βάση δεδομένων backend σας. Ευχαριστούμε!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Αυτή είναι μια πειραματική λειτουργία, μπορεί να μην λειτουργεί όπως αναμένεται και υπόκειται σε αλλαγές οποιαδήποτε στιγμή.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Αυτή η επιλογή ελέγχει πόσα tokens διατηρούνται κατά την ανανέωση του πλαισίου. Για παράδειγμα, αν οριστεί σε 2, τα τελευταία 2 tokens του πλαισίου συνομιλίας θα διατηρηθούν. Η διατήρηση του πλαισίου μπορεί να βοηθήσει στη διατήρηση της συνέχειας μιας συνομιλίας, αλλά μπορεί να μειώσει την ικανότητα ανταπόκρισης σε νέα θέματα. (Προεπιλογή: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Αυτή η επιλογή ορίζει τον μέγιστο αριθμό tokens που μπορεί να δημιουργήσει το μοντέλο στην απάντησή του. Η αύξηση αυτού του ορίου επιτρέπει στο μοντέλο να παρέχει μεγαλύτερες απαντήσεις, αλλά μπορεί επίσης να αυξήσει την πιθανότητα δημιουργίας αχρήσιμου ή άσχετου περιεχομένου. (Προεπιλογή: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Αυτή η επιλογή θα διαγράψει όλα τα υπάρχοντα αρχεία στη συλλογή και θα τα αντικαταστήσει με νέα ανεβασμένα αρχεία.",
 	"This response was generated by \"{{model}}\"": "Αυτή η απάντηση δημιουργήθηκε από \"{{model}}\"",
 	"This will delete": "Αυτό θα διαγράψει",
@@ -1132,7 +1132,7 @@
 	"Why?": "Γιατί?",
 	"Widescreen Mode": "Λειτουργία Οθόνης Ευρείας",
 	"Won": "Κέρδισε",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Συνεργάζεται μαζί με top-k. Μια υψηλότερη τιμή (π.χ., 0.95) θα οδηγήσει σε πιο ποικίλο κείμενο, ενώ μια χαμηλότερη τιμή (π.χ., 0.5) θα δημιουργήσει πιο εστιασμένο και συντηρητικό κείμενο. (Προεπιλογή: 0.9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Χώρος Εργασίας",
 	"Workspace Permissions": "Δικαιώματα Χώρου Εργασίας",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/en-GB/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Input commands": "",
 	"Install from Github URL": "",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "",
 	"Redirecting you to Open WebUI Community": "",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "",
 	"Settings saved successfully!": "",
@@ -964,7 +964,7 @@
 	"System Prompt": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/en-US/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Input commands": "",
 	"Install from Github URL": "",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "",
 	"Redirecting you to Open WebUI Community": "",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "",
 	"Settings saved successfully!": "",
@@ -964,7 +964,7 @@
 	"System Prompt": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/es-ES/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Permitir interrupción de voz en llamada",
 	"Allowed Endpoints": "Endpoints permitidos",
 	"Already have an account?": "¿Ya tienes una cuenta?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternativa a top_p, y busca asegurar un equilibrio entre calidad y variedad. El parámetro p representa la probabilidad mínima para que un token sea considerado, en relación con la probabilidad del token más probable. Por ejemplo, con p=0.05 y el token más probable con una probabilidad de 0.9, los logits con un valor menor a 0.045 son filtrados. (Predeterminado: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Siempre",
 	"Amazing": "Sorprendente",
 	"an assistant": "un asistente",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Confirmar tu nueva contraseña",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Conexiones",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": " Restringe el esfuerzo en la razonamiento para los modelos de razonamiento. Solo aplicable a los modelos de razonamiento de proveedores específicos que admiten el esfuerzo de razonamiento. (Por defecto: medio)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Contacta el administrador para obtener acceso al WebUI",
 	"Content": "Contenido",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Continuar con email",
 	"Continue with LDAP": "Continuar con LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlar como el texto del mensaje se divide para las solicitudes de TTS. 'Punctuation' divide en oraciones, 'paragraphs' divide en párrafos y 'none' mantiene el mensaje como una sola cadena.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Controles",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": " Controlar el equilibrio entre la coherencia y la diversidad de la salida. Un valor más bajo resultará en un texto más enfocado y coherente. (Por defecto: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Copiado",
 	"Copied shared chat URL to clipboard!": "¡URL de chat compartido copiado al portapapeles!",
 	"Copied to clipboard": "Copiado al portapapeles",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Habilitar bloqueo de memoria (mlock) para evitar que los datos del modelo se intercambien fuera de la RAM. Esta opción bloquea el conjunto de páginas de trabajo del modelo en la RAM, asegurando que no se intercambiarán fuera del disco. Esto puede ayudar a mantener el rendimiento evitando fallos de página y asegurando un acceso rápido a los datos.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Habilitar asignación de memoria (mmap) para cargar datos del modelo. Esta opción permite al sistema usar el almacenamiento en disco como una extensión de la RAM al tratar los archivos en disco como si estuvieran en la RAM. Esto puede mejorar el rendimiento del modelo permitiendo un acceso más rápido a los datos. Sin embargo, puede no funcionar correctamente con todos los sistemas y puede consumir una cantidad significativa de espacio en disco.",
 	"Enable Message Rating": "Habilitar la calificación de los mensajes",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Habilitar muestreo Mirostat para controlar la perplejidad. (Predeterminado: 0, 0 = Deshabilitado, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Habilitar Nuevos Registros",
 	"Enabled": "Activado",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Asegúrese de que su archivo CSV incluya 4 columnas en este orden: Nombre, Correo Electrónico, Contraseña, Rol.",
@@ -566,7 +566,7 @@
 	"Include": "Incluir",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Incluir el indicador `--api-auth` al ejecutar stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Incluir el indicador `--api` al ejecutar stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Influencia en qué medida el algoritmo responde rápidamente a la retroalimentación del texto generado. Una tasa de aprendizaje más baja resultará en ajustes más lentos, mientras que una tasa de aprendizaje más alta hará que el algoritmo sea más receptivo. (Predeterminado: 0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Información",
 	"Input commands": "Ingresar comandos",
 	"Install from Github URL": "Instalar desde la URL de Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "Esfuerzo de razonamiento",
 	"Record voice": "Grabar voz",
 	"Redirecting you to Open WebUI Community": "Redireccionándote a la comunidad OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Reduce la probabilidad de generar tonterías. Un valor más alto (p.ej. 100) dará respuestas más diversas, mientras que un valor más bajo (p.ej. 10) será más conservador. (Predeterminado: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Referirse a usted mismo como \"Usuario\" (por ejemplo, \"El usuario está aprendiendo Español\")",
 	"References from": "Referencias de",
 	"Refused when it shouldn't have": "Rechazado cuando no debería",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Establece el número de hilos de trabajo utilizados para el cálculo. Esta opción controla cuántos hilos se utilizan para procesar las solicitudes entrantes simultáneamente. Aumentar este valor puede mejorar el rendimiento bajo cargas de trabajo de alta concurrencia, pero también puede consumir más recursos de CPU.",
 	"Set Voice": "Establecer la voz",
 	"Set whisper model": "Establecer modelo de whisper",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Establece cuán lejos atrás debe mirar el modelo para evitar la repetición. (Predeterminado: 64, 0 = deshabilitado, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Establece la semilla de número aleatorio a usar para la generación. Establecer esto en un número específico hará que el modelo genere el mismo texto para el mismo prompt. (Predeterminado: aleatorio)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Establece el tamaño de la ventana de contexto utilizada para generar el siguiente token. (Predeterminado: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Establece las secuencias de parada a usar. Cuando se encuentre este patrón, el LLM dejará de generar texto y devolverá. Se pueden establecer varios patrones de parada especificando múltiples parámetros de parada separados en un archivo de modelo.",
 	"Settings": "Configuración",
 	"Settings saved successfully!": "¡Configuración guardada con éxito!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Prompt del sistema",
 	"Tags Generation": "Generación de etiquetas",
 	"Tags Generation Prompt": "Prompt de generación de etiquetas",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "El muestreo libre de cola se utiliza para reducir el impacto de los tokens menos probables en la salida. Un valor más alto (p.ej., 2.0) reducirá el impacto más, mientras que un valor de 1.0 deshabilitará esta configuración. (predeterminado: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Toca para interrumpir",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "¡Gracias por tu retroalimentación!",
 	"The Application Account DN you bind with for search": "La cuenta de aplicación DN que vincula para la búsqueda",
 	"The base to search for users": "La base para buscar usuarios",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Los desarrolladores de este plugin son apasionados voluntarios de la comunidad. Si encuentras este plugin útil, por favor considere contribuir a su desarrollo.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "El tablero de líderes de evaluación se basa en el sistema de clasificación Elo y se actualiza en tiempo real.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "El atributo LDAP que se asigna al correo que los usuarios utilizan para iniciar sesión.",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "El tamaño máximo del archivo en MB. Si el tamaño del archivo supera este límite, el archivo no se subirá.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "El número máximo de archivos que se pueden utilizar a la vez en chat. Si este límite es superado, los archivos no se subirán.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "La puntuación debe ser un valor entre 0.0 (0%) y 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "La temperatura del modelo. Aumentar la temperatura hará que el modelo responda de manera más creativa. (Predeterminado: 0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Pensando...",
 	"This action cannot be undone. Do you wish to continue?": "Esta acción no se puede deshacer. ¿Desea continuar?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Esto garantiza que sus valiosas conversaciones se guarden de forma segura en su base de datos en el backend. ¡Gracias!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta es una característica experimental que puede no funcionar como se esperaba y está sujeto a cambios en cualquier momento.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Esta opción controla cuántos tokens se conservan al actualizar el contexto. Por ejemplo, si se establece en 2, se conservarán los últimos 2 tokens del contexto de la conversación. Conservar el contexto puede ayudar a mantener la continuidad de una conversación, pero puede reducir la capacidad de responder a nuevos temas. (Predeterminado: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Esta opción eliminará todos los archivos existentes en la colección y los reemplazará con nuevos archivos subidos.",
 	"This response was generated by \"{{model}}\"": "Esta respuesta fue generada por \"{{model}}\"",
 	"This will delete": "Esto eliminará",
@@ -1132,7 +1132,7 @@
 	"Why?": "¿Por qué?",
 	"Widescreen Mode": "Modo de pantalla ancha",
 	"Won": "Ganado",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Funciona junto con top-k. Un valor más alto (p.ej., 0.95) dará como resultado un texto más diverso, mientras que un valor más bajo (p.ej., 0.5) generará un texto más enfocado y conservador. (Predeterminado: 0.9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Espacio de trabajo",
 	"Workspace Permissions": "Permisos del espacio de trabajo",
 	"Write": "Escribir",

+ 18 - 18
src/lib/i18n/locales/eu-ES/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Baimendu Ahots Etena Deietan",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Baduzu kontu bat?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "top_p-ren alternatiba, kalitate eta aniztasunaren arteko oreka bermatzea du helburu. p parametroak token bat kontuan hartzeko gutxieneko probabilitatea adierazten du, token probableenaren probabilitatearen arabera. Adibidez, p=0.05 balioarekin eta token probableenaren probabilitatea 0.9 denean, 0.045 baino balio txikiagoko logit-ak baztertzen dira. (Lehenetsia: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "Harrigarria",
 	"an assistant": "laguntzaile bat",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Konexioak",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Jarri harremanetan Administratzailearekin WebUI Sarbiderako",
 	"Content": "Edukia",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Jarraitu Posta Elektronikoarekin",
 	"Continue with LDAP": "Jarraitu LDAP-rekin",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrolatu nola banatzen den mezuaren testua TTS eskaeretarako. 'Puntuazioa'-k esaldietan banatzen du, 'paragrafoak'-k paragrafoetan, eta 'bat ere ez'-ek mezua kate bakar gisa mantentzen du.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Kontrolak",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Irteeraren koherentzia eta aniztasunaren arteko oreka kontrolatzen du. Balio txikiagoak testu zentratuagoa eta koherenteagoa emango du. (Lehenetsia: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Kopiatuta",
 	"Copied shared chat URL to clipboard!": "Partekatutako txataren URLa arbelera kopiatu da!",
 	"Copied to clipboard": "Arbelera kopiatuta",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Gaitu Memoria Blokeatzea (mlock) ereduaren datuak RAM memoriatik kanpo ez trukatzeko. Aukera honek ereduaren lan-orri multzoa RAMean blokatzen du, diskora ez direla trukatuko ziurtatuz. Honek errendimendua mantentzen lagun dezake, orri-hutsegiteak saihestuz eta datuen sarbide azkarra bermatuz.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Gaitu Memoria Mapaketa (mmap) ereduaren datuak kargatzeko. Aukera honek sistemari disko-biltegiratzea RAM memoriaren luzapen gisa erabiltzea ahalbidetzen dio, diskoko fitxategiak RAMean baleude bezala tratatuz. Honek ereduaren errendimendua hobe dezake, datuen sarbide azkarragoa ahalbidetuz. Hala ere, baliteke sistema guztietan behar bezala ez funtzionatzea eta disko-espazio handia kontsumitu dezake.",
 	"Enable Message Rating": "Gaitu Mezuen Balorazioa",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Gaitu Mirostat laginketa nahasmena kontrolatzeko. (Lehenetsia: 0, 0 = Desgaituta, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Gaitu Izena Emate Berriak",
 	"Enabled": "Gaituta",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Ziurtatu zure CSV fitxategiak 4 zutabe dituela ordena honetan: Izena, Posta elektronikoa, Pasahitza, Rola.",
@@ -566,7 +566,7 @@
 	"Include": "Sartu",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Sartu `--api-auth` bandera stable-diffusion-webui exekutatzean",
 	"Include `--api` flag when running stable-diffusion-webui": "Sartu `--api` bandera stable-diffusion-webui exekutatzean",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Algoritmoak sortutako testutik jasotako feedbackari erantzuteko abiadura zehazten du. Ikasketa-tasa baxuago batek doikuntza motelagoak eragingo ditu, eta ikasketa-tasa altuago batek algoritmoaren erantzuna bizkorragoa egingo du. (Lehenetsia: 0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informazioa",
 	"Input commands": "Sartu komandoak",
 	"Install from Github URL": "Instalatu Github URLtik",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Grabatu ahotsa",
 	"Redirecting you to Open WebUI Community": "OpenWebUI Komunitatera berbideratzen",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Zentzugabekeriak sortzeko probabilitatea murrizten du. Balio altuago batek (adib. 100) erantzun anitzagoak emango ditu, balio baxuago batek (adib. 10) kontserbadoreagoa izango den bitartean. (Lehenetsia: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Egin erreferentzia zure buruari \"Erabiltzaile\" gisa (adib., \"Erabiltzailea gaztelania ikasten ari da\")",
 	"References from": "Erreferentziak hemendik",
 	"Refused when it shouldn't have": "Ukatu duenean ukatu behar ez zuenean",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Ezarri kalkulurako erabilitako langile harien kopurua. Aukera honek kontrolatzen du zenbat hari erabiltzen diren sarrerako eskaerak aldi berean prozesatzeko. Balio hau handitzeak errendimendua hobetu dezake konkurrentzia altuko lan-kargetan, baina CPU baliabide gehiago kontsumitu ditzake.",
 	"Set Voice": "Ezarri ahotsa",
 	"Set whisper model": "Ezarri whisper modeloa",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Ezartzen du modeloak zenbat atzera begiratu behar duen errepikapenak saihesteko. (Lehenetsia: 64, 0 = desgaituta, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Ezartzen du sorkuntzarako erabiliko den ausazko zenbakien hazia. Hau zenbaki zehatz batera ezartzeak modeloak testu bera sortzea eragingo du prompt bererako. (Lehenetsia: ausazkoa)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Ezartzen du hurrengo tokena sortzeko erabilitako testuinguru leihoaren tamaina. (Lehenetsia: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Ezartzen ditu erabiliko diren gelditzeko sekuentziak. Patroi hau aurkitzen denean, LLMak testua sortzeari utziko dio eta itzuli egingo da. Gelditzeko patroi anitz ezar daitezke modelfile batean gelditzeko parametro anitz zehaztuz.",
 	"Settings": "Ezarpenak",
 	"Settings saved successfully!": "Ezarpenak ongi gorde dira!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Sistema prompta",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Etiketa sortzeko prompta",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Isats-libre laginketa erabiltzen da irteran probabilitate txikiagoko tokenen eragina murrizteko. Balio altuago batek (adib., 2.0) eragina gehiago murriztuko du, 1.0 balioak ezarpen hau desgaitzen duen bitartean. (lehenetsia: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Ukitu eteteko",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Eskerrik asko zure iritzia emateagatik!",
 	"The Application Account DN you bind with for search": "Bilaketarako lotzen duzun aplikazio kontuaren DN-a",
 	"The base to search for users": "Erabiltzaileak bilatzeko oinarria",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Sorta tamainak zehazten du zenbat testu eskaera prozesatzen diren batera aldi berean. Sorta tamaina handiago batek modeloaren errendimendua eta abiadura handitu ditzake, baina memoria gehiago behar du. (Lehenetsia: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Plugin honen atzean dauden garatzaileak komunitateko boluntario sutsuak dira. Plugin hau baliagarria iruditzen bazaizu, mesedez kontuan hartu bere garapenean laguntzea.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Ebaluazio sailkapena Elo sailkapen sisteman oinarritzen da eta denbora errealean eguneratzen da.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Fitxategiaren gehienezko tamaina MB-tan. Fitxategiaren tamainak muga hau gainditzen badu, fitxategia ez da kargatuko.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Txatean aldi berean erabili daitezkeen fitxategien gehienezko kopurua. Fitxategi kopuruak muga hau gainditzen badu, fitxategiak ez dira kargatuko.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Puntuazioa 0.0 (0%) eta 1.0 (100%) arteko balio bat izan behar da.",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Modeloaren tenperatura. Tenperatura handitzeak modeloaren erantzunak sortzaileagoak izatea eragingo du. (Lehenetsia: 0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Gaia",
 	"Thinking...": "Pentsatzen...",
 	"This action cannot be undone. Do you wish to continue?": "Ekintza hau ezin da desegin. Jarraitu nahi duzu?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Honek zure elkarrizketa baliotsuak modu seguruan zure backend datu-basean gordeko direla ziurtatzen du. Eskerrik asko!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Hau funtzionalitate esperimental bat da, baliteke espero bezala ez funtzionatzea eta edozein unetan aldaketak izatea.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Aukera honek kontrolatzen du zenbat token mantentzen diren testuingurua freskatzean. Adibidez, 2-ra ezarrita badago, elkarrizketaren testuinguruko azken 2 tokenak mantenduko dira. Testuingurua mantentzeak elkarrizketaren jarraitutasuna mantentzen lagun dezake, baina gai berriei erantzuteko gaitasuna murriztu dezake. (Lehenetsia: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Aukera honek ereduak bere erantzunean sor dezakeen token kopuru maximoa ezartzen du. Muga hau handitzeak ereduari erantzun luzeagoak emateko aukera ematen dio, baina eduki ez-erabilgarri edo ez-egokia sortzeko probabilitatea ere handitu dezake. (Lehenetsia: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Aukera honek bilduman dauden fitxategi guztiak ezabatuko ditu eta berriki kargatutako fitxategiekin ordezkatuko ditu.",
 	"This response was generated by \"{{model}}\"": "Erantzun hau \"{{model}}\" modeloak sortu du",
 	"This will delete": "Honek ezabatuko du",
@@ -1132,7 +1132,7 @@
 	"Why?": "Zergatik?",
 	"Widescreen Mode": "Pantaila zabaleko modua",
 	"Won": "Irabazi du",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Top-k-rekin batera lan egiten du. Balio altuago batek (adib., 0.95) testu anitzagoa sortuko du, balio baxuago batek (adib., 0.5) testu fokatu eta kontserbadoreagoa sortuko duen bitartean. (Lehenetsia: 0.9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Lan-eremua",
 	"Workspace Permissions": "Lan-eremuaren baimenak",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/fa-IR/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "از قبل حساب کاربری دارید؟",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "یک دستیار",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "ارتباطات",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "برای دسترسی به WebUI با مدیر تماس بگیرید",
 	"Content": "محتوا",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "کنترل\u200cها",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "کپی شد",
 	"Copied shared chat URL to clipboard!": "URL چت به کلیپ بورد کپی شد!",
 	"Copied to clipboard": "به بریده\u200cدان کپی\u200cشد",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "فعال کردن ثبت نام\u200cهای جدید",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "اطمینان حاصل کنید که فایل CSV شما شامل چهار ستون در این ترتیب است: نام، ایمیل، رمز عبور، نقش.",
@@ -566,7 +566,7 @@
 	"Include": "شامل",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "فلگ `--api` را هنکام اجرای stable-diffusion-webui استفاده کنید.",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "اطلاعات",
 	"Input commands": "ورودی دستورات",
 	"Install from Github URL": "نصب از ادرس Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "ضبط صدا",
 	"Redirecting you to Open WebUI Community": "در حال هدایت به OpenWebUI Community",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "رد شده زمانی که باید نباشد",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "تنظیم صدا",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "تنظیمات",
 	"Settings saved successfully!": "تنظیمات با موفقیت ذخیره شد!",
@@ -964,7 +964,7 @@
 	"System Prompt": "پرامپت سیستم",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "با تشکر از بازخورد شما!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "امتیاز باید یک مقدار بین 0.0 (0%) و 1.0 (100%) باشد.",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "پوسته",
 	"Thinking...": "در حال فکر...",
 	"This action cannot be undone. Do you wish to continue?": "این اقدام قابل بازگردانی نیست. برای ادامه اطمینان دارید؟",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "این تضمین می کند که مکالمات ارزشمند شما به طور ایمن در پایگاه داده بکند ذخیره می شود. تشکر!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "حالت صفحهٔ عریض",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "محیط کار",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/fi-FI/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Salli äänen keskeytys puhelussa",
 	"Allowed Endpoints": "Hyväksytyt päätepisteet",
 	"Already have an account?": "Onko sinulla jo tili?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Vaihtoehto top_p:lle, jolla pyritään varmistamaan laadun ja monipuolisuuden tasapaino. Parametri p edustaa pienintä todennäköisyyttä, jolla token otetaan huomioon suhteessa todennäköisimpään tokeniin. Esimerkiksi p=0.05 ja todennäköisin token todennäköisyydellä 0.9, arvoltaan alle 0.045 olevat logit suodatetaan pois. (Oletus: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Aina",
 	"Amazing": "Hämmästyttävä",
 	"an assistant": "avustaja",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Vahvista uusi salasanasi",
 	"Connect to your own OpenAI compatible API endpoints.": "Yhdistä oma OpenAI yhteensopiva API päätepiste.",
 	"Connections": "Yhteydet",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Ota yhteyttä ylläpitäjään WebUI-käyttöä varten",
 	"Content": "Sisältö",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Jatka sähköpostilla",
 	"Continue with LDAP": "Jatka LDAP:illa",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Säädä, miten viestin teksti jaetaan puhesynteesipyyntöjä varten. 'Välimerkit' jakaa lauseisiin, 'kappaleet' jakaa kappaleisiin ja 'ei mitään' pitää viestin yhtenä merkkijonona.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Ohjaimet",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Säätelee tulosteen yhtenäisyyden ja monimuotoisuuden välistä tasapainoa. Alhaisempi arvo tuottaa keskittyneempää ja yhtenäisempää tekstiä. (Oletus: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Kopioitu",
 	"Copied shared chat URL to clipboard!": "Jaettu keskustelulinkki kopioitu leikepöydälle!",
 	"Copied to clipboard": "Kopioitu leikepöydälle",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Ota Memory Locking (mlock) käyttöön estääksesi mallidatan vaihtamisen pois RAM-muistista. Tämä lukitsee mallin työsivut RAM-muistiin, varmistaen että niitä ei vaihdeta levylle. Tämä voi parantaa suorituskykyä välttämällä sivuvikoja ja varmistamalla nopean tietojen käytön.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Ota Memory Mapping (mmap) käyttöön ladataksesi mallidataa. Tämä vaihtoehto sallii järjestelmän käyttää levytilaa RAM-laajennuksena käsittelemällä levytiedostoja kuin ne olisivat RAM-muistissa. Tämä voi parantaa mallin suorituskykyä sallimalla nopeamman tietojen käytön. Kuitenkin se ei välttämättä toimi oikein kaikissa järjestelmissä ja voi kuluttaa huomattavasti levytilaa.",
 	"Enable Message Rating": "Ota viestiarviointi käyttöön",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Ota Mirostat-näytteenotto käyttöön hallinnan monimerkityksellisyydelle. (Oletus: 0, 0 = Ei käytössä, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Salli uudet rekisteröitymiset",
 	"Enabled": "Käytössä",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Varmista, että CSV-tiedostossasi on 4 saraketta tässä järjestyksessä: Nimi, Sähköposti, Salasana, Rooli.",
@@ -566,7 +566,7 @@
 	"Include": "Sisällytä",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Sisällytä `--api-auth`-lippu ajettaessa stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Sisällytä `--api`-lippu ajettaessa stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Vaikuttaa siihen, kuinka nopeasti algoritmi reagoi tuotetusta tekstistä saatuun palautteeseen. Alhaisempi oppimisaste johtaa hitaampiin säätöihin, kun taas korkeampi oppimisaste tekee algoritmista reaktiivisemman. (Oletus: 0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Tiedot",
 	"Input commands": "Syötekäskyt",
 	"Install from Github URL": "Asenna Github-URL:stä",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Nauhoita ääntä",
 	"Redirecting you to Open WebUI Community": "Ohjataan sinut OpenWebUI-yhteisöön",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Vähentää merkityksetöntä sisältöä tuottavan todennäköisyyttä. Korkeampi arvo (esim. 100) antaa monipuolisempia vastauksia, kun taas alhaisempi arvo (esim. 10) on konservatiivisempi. (Oletus: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Viittaa itseen \"Käyttäjänä\" (esim. \"Käyttäjä opiskelee espanjaa\")",
 	"References from": "Viitteet lähteistä",
 	"Refused when it shouldn't have": "Kieltäytyi, vaikka ei olisi pitänyt",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Aseta työntekijäsäikeiden määrä laskentaa varten. Tämä asetus kontrolloi, kuinka monta säiettä käytetään saapuvien pyyntöjen rinnakkaiseen käsittelyyn. Arvon kasvattaminen voi parantaa suorituskykyä suurissa samanaikaisissa työkuormissa, mutta voi myös kuluttaa enemmän keskussuorittimen resursseja.",
 	"Set Voice": "Aseta puheääni",
 	"Set whisper model": "Aseta whisper-malli",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Määrittää, kuinka kauas taaksepäin malli katsoo välttääkseen toistoa. (Oletus: 64, 0 = pois käytöstä, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Määrittää satunnaislukujen siemenen käytettäväksi generoinnissa. Tämän asettaminen tiettyyn numeroon saa mallin tuottamaan saman tekstin samalle kehoteelle. (Oletus: satunnainen)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Määrittää kontekstiikkunan koon, jota käytetään seuraavan tokenin tuottamiseen. (Oletus: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Määrittää käytettävät lopetussekvenssit. Kun tämä kuvio havaitaan, LLM lopettaa tekstin tuottamisen ja palauttaa. Useita lopetuskuvioita voidaan asettaa määrittämällä useita erillisiä lopetusparametreja mallitiedostoon.",
 	"Settings": "Asetukset",
 	"Settings saved successfully!": "Asetukset tallennettu onnistuneesti!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Järjestelmäkehote",
 	"Tags Generation": "Tagien luonti",
 	"Tags Generation Prompt": "Tagien luontikehote",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail-free-otanta käytetään vähentämään vähemmän todennäköisten tokenien vaikutusta tulokseen. Korkeampi arvo (esim. 2,0) vähentää vaikutusta enemmän, kun taas arvo 1,0 poistaa tämän asetuksen käytöstä. (oletus: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Napauta keskeyttääksesi",
 	"Tasks": "Tehtävät",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Kiitos palautteestasi!",
 	"The Application Account DN you bind with for search": "Hakua varten sidottu sovelluksen käyttäjätilin DN",
 	"The base to search for users": "Käyttäjien haun perusta",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Erän koko määrittää, kuinka monta tekstipyyntöä käsitellään yhdessä kerralla. Suurempi erän koko voi parantaa mallin suorituskykyä ja nopeutta, mutta se vaatii myös enemmän muistia. (Oletus: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Tämän lisäosan takana olevat kehittäjät ovat intohimoisia vapaaehtoisyhteisöstä. Jos koet tämän lisäosan hyödylliseksi, harkitse sen kehittämisen tukemista.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Arviointitulosluettelo perustuu Elo-luokitusjärjestelmään ja päivittyy reaaliajassa.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Enimmäistiedostokoko megatavuissa. Jos tiedoston koko ylittää tämän rajan, tiedostoa ei ladata.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Suurin sallittu tiedostojen määrä käytettäväksi kerralla chatissa. Jos tiedostojen määrä ylittää tämän rajan, niitä ei ladata.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Pisteytyksen tulee olla arvo välillä 0,0 (0 %) ja 1,0 (100 %).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Mallin lämpötila. Lämpötilan nostaminen saa mallin vastaamaan luovemmin. (Oletus: 0,8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Teema",
 	"Thinking...": "Ajattelee...",
 	"This action cannot be undone. Do you wish to continue?": "Tätä toimintoa ei voi peruuttaa. Haluatko jatkaa?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tämä varmistaa, että arvokkaat keskustelusi tallennetaan turvallisesti backend-tietokantaasi. Kiitos!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tämä on kokeellinen ominaisuus, se ei välttämättä toimi odotetulla tavalla ja se voi muuttua milloin tahansa.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Tämä asetus kontrolloi, kuinka monta tokenia säilytetään päivittäessä kontekstia. Esimerkiksi, jos asetetaan arvoksi 2, säilytetään viimeiset 2 keskustelukon-tekstin tokenia. Kontekstin säilyttäminen voi auttaa ylläpitämään keskustelun jatkuvuutta, mutta se voi vähentää kykyä vastata uusiin aiheisiin. (Oletus: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Tämä asetus määrittää mallin vastauksen enimmäistokenmäärän. Tämän rajan nostaminen mahdollistaa mallin antavan pidempiä vastauksia, mutta se voi myös lisätä epähyödyllisen tai epärelevantin sisällön todennäköisyyttä. (Oletus: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Tämä vaihtoehto poistaa kaikki kokoelman nykyiset tiedostot ja korvaa ne uusilla ladatuilla tiedostoilla.",
 	"This response was generated by \"{{model}}\"": "Tämän vastauksen tuotti \"{{model}}\"",
 	"This will delete": "Tämä poistaa",
@@ -1132,7 +1132,7 @@
 	"Why?": "Miksi?",
 	"Widescreen Mode": "Laajakuvatila",
 	"Won": "Voitti",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Toimii yhdessä top-k:n kanssa. Korkeampi arvo (esim. 0,95) tuottaa monipuolisempaa tekstiä, kun taas alhaisempi arvo (esim. 0,5) tuottaa keskittyneempää ja konservatiivisempaa tekstiä. (Oletus: 0,9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Työtila",
 	"Workspace Permissions": "Työtilan käyttöoikeudet",
 	"Write": "Kirjoita",

+ 18 - 18
src/lib/i18n/locales/fr-CA/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Autoriser l'interruption vocale pendant un appel",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Avez-vous déjà un compte ?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "un assistant",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Connexions",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Contacter l'administrateur pour l'accès à l'interface Web",
 	"Content": "Contenu",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Contrôle comment le texte des messages est divisé pour les demandes de TTS. 'Ponctuation' divise en phrases, 'paragraphes' divise en paragraphes et 'aucun' garde le message comme une seule chaîne.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Contrôles",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL du chat copiée dans le presse-papiers\u00a0!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Activer les nouvelles inscriptions",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Vérifiez que votre fichier CSV comprenne les 4 colonnes dans cet ordre : Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Inclure le drapeau `--api-auth` lors de l'exécution de stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Input commands": "Entrez les commandes",
 	"Install from Github URL": "Installer depuis l'URL GitHub",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Enregistrer la voix",
 	"Redirecting you to Open WebUI Community": "Redirection vers la communauté OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Désignez-vous comme « Utilisateur » (par ex. « L'utilisateur apprend l'espagnol »)",
 	"References from": "",
 	"Refused when it shouldn't have": "Refusé alors qu'il n'aurait pas dû l'être",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Définir la voix",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Paramètres",
 	"Settings saved successfully!": "Paramètres enregistrés avec succès !",
@@ -964,7 +964,7 @@
 	"System Prompt": "Prompt du système",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Appuyez pour interrompre",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Merci pour vos commentaires !",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Le score doit être une valeur comprise entre 0,0 (0\u00a0%) et 1,0 (100\u00a0%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Thème",
 	"Thinking...": "En train de réfléchir...",
 	"This action cannot be undone. Do you wish to continue?": "Cette action ne peut pas être annulée. Souhaitez-vous continuer ?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "Cela supprimera",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "Mode Grand Écran",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Espace de travail",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/fr-FR/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Autoriser l'interruption vocale pendant un appel",
 	"Allowed Endpoints": "Points de terminaison autorisés",
 	"Already have an account?": "Avez-vous déjà un compte ?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternative au top_p, visant à assurer un équilibre entre qualité et variété. Le paramètre p représente la probabilité minimale pour qu'un token soit pris en compte, par rapport à la probabilité du token le plus probable. Par exemple, avec p=0.05 et le token le plus probable ayant une probabilité de 0.9, les logits ayant une valeur inférieure à 0.045 sont filtrés. (Par défaut : 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "Incroyable",
 	"an assistant": "un assistant",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Confirmer votre nouveau mot de passe",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Connexions",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Contraint l'effort de raisonnement pour les modèles de raisonnement. Applicable uniquement aux modèles de raisonnement de fournisseurs spécifiques qui prennent en charge l'effort de raisonnement. (Par défaut : medium)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Contacter l'administrateur pour obtenir l'accès à WebUI",
 	"Content": "Contenu",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Continuer avec l'email",
 	"Continue with LDAP": "Continuer avec LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Contrôle la façon dont le texte des messages est divisé pour les demandes de Text-to-Speech. « ponctuation » divise en phrases, « paragraphes » divise en paragraphes et « aucun » garde le message en tant que chaîne de texte unique.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Contrôles",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Contrôle l'équilibre entre la cohérence et la diversité de la sortie. Une valeur plus basse produira un texte plus focalisé et cohérent. (Par défaut : 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Copié",
 	"Copied shared chat URL to clipboard!": "URL du chat copié dans le presse-papiers !",
 	"Copied to clipboard": "Copié dans le presse-papiers",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activer le verrouillage de la mémoire (mlock) pour empêcher les données du modèle d'être échangées de la RAM. Cette option verrouille l'ensemble de pages de travail du modèle en RAM, garantissant qu'elles ne seront pas échangées vers le disque. Cela peut aider à maintenir les performances en évitant les défauts de page et en assurant un accès rapide aux données.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Activer le mappage de la mémoire (mmap) pour charger les données du modèle. Cette option permet au système d'utiliser le stockage disque comme une extension de la RAM en traitant les fichiers disque comme s'ils étaient en RAM. Cela peut améliorer les performances du modèle en permettant un accès plus rapide aux données. Cependant, cela peut ne pas fonctionner correctement avec tous les systèmes et peut consommer une quantité significative d'espace disque.",
 	"Enable Message Rating": "Activer l'évaluation des messages",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Activer l'échantillonnage Mirostat pour contrôler la perplexité. (Par défaut : 0, 0 = Désactivé, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Activer les nouvelles inscriptions",
 	"Enabled": "Activé",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Vérifiez que votre fichier CSV comprenne les 4 colonnes dans cet ordre : Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "Inclure",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Inclure le drapeau `--api-auth` lors de l'exécution de stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Influence la rapidité avec laquelle l'algorithme répond aux retours du texte généré. Un taux d'apprentissage plus bas entraînera des ajustements plus lents, tandis qu'un taux d'apprentissage plus élevé rendra l'algorithme plus réactif. (Par défaut : 0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Input commands": "Commandes d'entrée",
 	"Install from Github URL": "Installer depuis une URL GitHub",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "Effort de raisonnement",
 	"Record voice": "Enregistrer la voix",
 	"Redirecting you to Open WebUI Community": "Redirection vers la communauté OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Réduit la probabilité de générer des non-sens. Une valeur plus élevée (par exemple 100) donnera des réponses plus diversifiées, tandis qu'une valeur plus basse (par exemple 10) sera plus conservatrice. (Par défaut : 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Désignez-vous comme « Utilisateur » (par ex. « L'utilisateur apprend l'espagnol »)",
 	"References from": "Références de",
 	"Refused when it shouldn't have": "Refusé alors qu'il n'aurait pas dû l'être",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Définir le nombre de threads de travail utilisés pour le calcul. Cette option contrôle combien de threads sont utilisés pour traiter les demandes entrantes simultanément. L'augmentation de cette valeur peut améliorer les performances sous de fortes charges de travail concurrentes mais peut également consommer plus de ressources CPU.",
 	"Set Voice": "Choisir la voix",
 	"Set whisper model": "Choisir le modèle Whisper",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Définit la profondeur de recherche du modèle pour prévenir les répétitions. (Par défaut : 64, 0 = désactivé, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Définit la graine de nombre aléatoire à utiliser pour la génération. La définition de cette valeur à un nombre spécifique fera que le modèle générera le même texte pour le même prompt. (Par défaut : aléatoire)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Définit la taille de la fenêtre contextuelle utilisée pour générer le prochain token. (Par défaut : 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Définit les séquences d'arrêt à utiliser. Lorsque ce motif est rencontré, le LLM cessera de générer du texte et retournera. Plusieurs motifs d'arrêt peuvent être définis en spécifiant plusieurs paramètres d'arrêt distincts dans un fichier modèle.",
 	"Settings": "Paramètres",
 	"Settings saved successfully!": "Paramètres enregistrés avec succès !",
@@ -964,7 +964,7 @@
 	"System Prompt": "Prompt système",
 	"Tags Generation": "Génération de tags",
 	"Tags Generation Prompt": "Prompt de génération de tags",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "L'échantillonnage sans queue est utilisé pour réduire l'impact des tokens moins probables dans la sortie. Une valeur plus élevée (par exemple 2.0) réduira davantage l'impact, tandis qu'une valeur de 1.0 désactive ce paramètre. (par défaut : 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "Parler au modèle",
 	"Tap to interrupt": "Appuyez pour interrompre",
 	"Tasks": "Tâches",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Merci pour vos commentaires !",
 	"The Application Account DN you bind with for search": "Le DN du compte de l'application avec lequel vous vous liez pour la recherche",
 	"The base to search for users": "La base pour rechercher des utilisateurs",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "La taille de lot détermine combien de demandes de texte sont traitées ensemble en une fois. Une taille de lot plus grande peut augmenter les performances et la vitesse du modèle, mais elle nécessite également plus de mémoire. (Par défaut : 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Les développeurs de ce plugin sont des bénévoles passionnés issus de la communauté. Si vous trouvez ce plugin utile, merci de contribuer à son développement.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Le classement d'évaluation est basé sur le système de notation Elo et est mis à jour en temps réel.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "L'attribut LDAP qui correspond à l'adresse e-mail que les utilisateurs utilisent pour se connecter.",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "La taille maximale du fichier en Mo. Si la taille du fichier dépasse cette limite, le fichier ne sera pas téléchargé.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Le nombre maximal de fichiers pouvant être utilisés en même temps dans la conversation. Si le nombre de fichiers dépasse cette limite, les fichiers ne seront pas téléchargés.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Le score doit être une valeur comprise entre 0,0 (0%) et 1,0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "La température du modèle. Augmenter la température rendra le modèle plus créatif dans ses réponses. (Par défaut : 0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Thème",
 	"Thinking...": "En train de réfléchir...",
 	"This action cannot be undone. Do you wish to continue?": "Cette action ne peut pas être annulée. Souhaitez-vous continuer ?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cela garantit que vos conversations précieuses soient sauvegardées en toute sécurité dans votre base de données backend. Merci !",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Il s'agit d'une fonctionnalité expérimentale, elle peut ne pas fonctionner comme prévu et est sujette à modification à tout moment.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Cette option contrôle combien de tokens sont conservés lors du rafraîchissement du contexte. Par exemple, si ce paramètre est défini à 2, les 2 derniers tokens du contexte de conversation seront conservés. Préserver le contexte peut aider à maintenir la continuité d'une conversation, mais cela peut réduire la capacité à répondre à de nouveaux sujets. (Par défaut : 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Cette option définit le nombre maximum de tokens que le modèle peut générer dans sa réponse. Augmenter cette limite permet au modèle de fournir des réponses plus longues, mais cela peut également augmenter la probabilité de générer du contenu inutile ou non pertinent. (Par défaut : 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Cette option supprimera tous les fichiers existants dans la collection et les remplacera par les fichiers nouvellement téléchargés.",
 	"This response was generated by \"{{model}}\"": "Cette réponse a été générée par \"{{model}}\"",
 	"This will delete": "Cela supprimera",
@@ -1132,7 +1132,7 @@
 	"Why?": "Pourquoi ?",
 	"Widescreen Mode": "Mode grand écran",
 	"Won": "Victoires",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Fonctionne avec le top-k. Une valeur plus élevée (par ex. 0.95) donnera un texte plus diversifié, tandis qu'une valeur plus basse (par ex. 0.5) générera un texte plus concentré et conservateur. (Par défaut : 0.9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Espace de travail",
 	"Workspace Permissions": "Autorisations de l'espace de travail",
 	"Write": "Écrire",

+ 18 - 18
src/lib/i18n/locales/he-IL/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "כבר יש לך חשבון?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "עוזר",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "חיבורים",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "תוכן",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "העתקת כתובת URL של צ'אט משותף ללוח!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "אפשר הרשמות חדשות",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "ודא שקובץ ה-CSV שלך כולל 4 עמודות בסדר הבא: שם, דוא\"ל, סיסמה, תפקיד.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "כלול את הדגל `--api` בעת הרצת stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "מידע",
 	"Input commands": "פקודות קלט",
 	"Install from Github URL": "התקן מכתובת URL של Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "הקלט קול",
 	"Redirecting you to Open WebUI Community": "מפנה אותך לקהילת OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "נדחה כאשר לא היה צריך",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "הגדר קול",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "הגדרות",
 	"Settings saved successfully!": "ההגדרות נשמרו בהצלחה!",
@@ -964,7 +964,7 @@
 	"System Prompt": "תגובת מערכת",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "תודה על המשוב שלך!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "ציון צריך להיות ערך בין 0.0 (0%) ל-1.0 (100%)",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "נושא",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "פעולה זו מבטיחה שהשיחות בעלות הערך שלך יישמרו באופן מאובטח במסד הנתונים העורפי שלך. תודה!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "סביבה",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/hi-IN/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "क्या आपके पास पहले से एक खाता मौजूद है?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "एक सहायक",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "सम्बन्ध",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "सामग्री",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "साझा चैट URL को क्लिपबोर्ड पर कॉपी किया गया!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "नए साइन अप सक्रिय करें",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "सुनिश्चित करें कि आपकी CSV फ़ाइल में इस क्रम में 4 कॉलम शामिल हैं: नाम, ईमेल, पासवर्ड, भूमिका।",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui चलाते समय `--api` ध्वज शामिल करें",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "सूचना-विषयक",
 	"Input commands": "इनपुट क命",
 	"Install from Github URL": "Github URL से इंस्टॉल करें",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "आवाज रिकॉर्ड करना",
 	"Redirecting you to Open WebUI Community": "आपको OpenWebUI समुदाय पर पुनर्निर्देशित किया जा रहा है",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "जब ऐसा नहीं होना चाहिए था तो मना कर दिया",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "आवाज सेट करें",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "सेटिंग्स",
 	"Settings saved successfully!": "सेटिंग्स सफलतापूर्वक सहेजी गईं!",
@@ -964,7 +964,7 @@
 	"System Prompt": "सिस्टम प्रॉम्प्ट",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "आपकी प्रतिक्रिया के लिए धन्यवाद!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "स्कोर का मान 0.0 (0%) और 1.0 (100%) के बीच होना चाहिए।",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "थीम",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "यह सुनिश्चित करता है कि आपकी मूल्यवान बातचीत आपके बैकएंड डेटाबेस में सुरक्षित रूप से सहेजी गई है। धन्यवाद!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "वर्कस्पेस",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/hr-HR/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Već imate račun?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "asistent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Povezivanja",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Kontaktirajte admina za WebUI pristup",
 	"Content": "Sadržaj",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL dijeljenog razgovora kopiran u međuspremnik!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Omogući nove prijave",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Provjerite da vaša CSV datoteka uključuje 4 stupca u ovom redoslijedu: Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "Uključite zastavicu `--api` prilikom pokretanja stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informacije",
 	"Input commands": "Unos naredbi",
 	"Install from Github URL": "Instaliraj s Github URL-a",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Snimanje glasa",
 	"Redirecting you to Open WebUI Community": "Preusmjeravanje na OpenWebUI zajednicu",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Nazivajte se \"Korisnik\" (npr. \"Korisnik uči španjolski\")",
 	"References from": "",
 	"Refused when it shouldn't have": "Odbijen kada nije trebao biti",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Postavi glas",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Postavke",
 	"Settings saved successfully!": "Postavke su uspješno spremljene!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Sistemski prompt",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Hvala na povratnim informacijama!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Ocjena treba biti vrijednost između 0,0 (0%) i 1,0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Razmišljam",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ovo osigurava da su vaši vrijedni razgovori sigurno spremljeni u bazu podataka. Hvala vam!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ovo je eksperimentalna značajka, možda neće funkcionirati prema očekivanjima i podložna je promjenama u bilo kojem trenutku.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "Mod širokog zaslona",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Radna ploča",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/hu-HU/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Hang megszakítás engedélyezése hívás közben",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Már van fiókod?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "egy asszisztens",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Kapcsolatok",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Lépj kapcsolatba az adminnal a WebUI hozzáférésért",
 	"Content": "Tartalom",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Szabályozd, hogyan legyen felosztva az üzenet szövege a TTS kérésekhez. A 'Központozás' mondatokra bontja, a 'Bekezdések' bekezdésekre bontja, a 'Nincs' pedig egyetlen szövegként kezeli az üzenetet.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Vezérlők",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Másolva",
 	"Copied shared chat URL to clipboard!": "Megosztott beszélgetés URL másolva a vágólapra!",
 	"Copied to clipboard": "Vágólapra másolva",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "Üzenet értékelés engedélyezése",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Új regisztrációk engedélyezése",
 	"Enabled": "Engedélyezve",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Győződj meg róla, hogy a CSV fájl tartalmazza ezt a 4 oszlopot ebben a sorrendben: Név, Email, Jelszó, Szerep.",
@@ -566,7 +566,7 @@
 	"Include": "Tartalmaz",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Add hozzá a `--api-auth` kapcsolót a stable-diffusion-webui futtatásakor",
 	"Include `--api` flag when running stable-diffusion-webui": "Add hozzá a `--api` kapcsolót a stable-diffusion-webui futtatásakor",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Információ",
 	"Input commands": "Beviteli parancsok",
 	"Install from Github URL": "Telepítés Github URL-ről",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Hang rögzítése",
 	"Redirecting you to Open WebUI Community": "Átirányítás az OpenWebUI közösséghez",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Hivatkozzon magára \"Felhasználó\"-ként (pl. \"A Felhasználó spanyolul tanul\")",
 	"References from": "Hivatkozások innen",
 	"Refused when it shouldn't have": "Elutasítva, amikor nem kellett volna",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Hang beállítása",
 	"Set whisper model": "Whisper modell beállítása",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Beállítások",
 	"Settings saved successfully!": "Beállítások sikeresen mentve!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Rendszer prompt",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Címke generálási prompt",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Koppintson a megszakításhoz",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Köszönjük a visszajelzést!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "A bővítmény fejlesztői lelkes önkéntesek a közösségből. Ha hasznosnak találja ezt a bővítményt, kérjük, fontolja meg a fejlesztéséhez való hozzájárulást.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Az értékelési ranglista az Elo értékelési rendszeren alapul és valós időben frissül.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "A maximális fájlméret MB-ban. Ha a fájlméret meghaladja ezt a limitet, a fájl nem lesz feltöltve.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "A chatben egyszerre használható fájlok maximális száma. Ha a fájlok száma meghaladja ezt a limitet, a fájlok nem lesznek feltöltve.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "A pontszámnak 0,0 (0%) és 1,0 (100%) közötti értéknek kell lennie.",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Téma",
 	"Thinking...": "Gondolkodik...",
 	"This action cannot be undone. Do you wish to continue?": "Ez a művelet nem vonható vissza. Szeretné folytatni?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ez biztosítja, hogy értékes beszélgetései biztonságosan mentésre kerüljenek a backend adatbázisban. Köszönjük!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ez egy kísérleti funkció, lehet, hogy nem a várt módon működik és bármikor változhat.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Ez az opció törli az összes meglévő fájlt a gyűjteményben és lecseréli őket az újonnan feltöltött fájlokkal.",
 	"This response was generated by \"{{model}}\"": "Ezt a választ a \"{{model}}\" generálta",
 	"This will delete": "Ez törölni fogja",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "Szélesvásznú mód",
 	"Won": "Nyert",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Munkaterület",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/id-ID/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Izinkan Gangguan Suara dalam Panggilan",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Sudah memiliki akun?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "asisten",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Koneksi",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Hubungi Admin untuk Akses WebUI",
 	"Content": "Konten",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Menyalin URL obrolan bersama ke papan klip!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Aktifkan Pendaftaran Baru",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Pastikan file CSV Anda menyertakan 4 kolom dengan urutan sebagai berikut: Nama, Email, Kata Sandi, Peran.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Sertakan bendera `--api-auth` saat menjalankan stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Sertakan bendera `--api` saat menjalankan stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Input commands": "Perintah masukan",
 	"Install from Github URL": "Instal dari URL Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Rekam suara",
 	"Redirecting you to Open WebUI Community": "Mengarahkan Anda ke Komunitas OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Merujuk diri Anda sebagai \"Pengguna\" (misalnya, \"Pengguna sedang belajar bahasa Spanyol\")",
 	"References from": "",
 	"Refused when it shouldn't have": "Menolak ketika seharusnya tidak",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Mengatur Suara",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Pengaturan",
 	"Settings saved successfully!": "Pengaturan berhasil disimpan!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Permintaan Sistem",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Ketuk untuk menyela",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Terima kasih atas umpan balik Anda!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Nilai yang diberikan haruslah nilai antara 0,0 (0%) dan 1,0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Berpikir",
 	"This action cannot be undone. Do you wish to continue?": "Tindakan ini tidak dapat dibatalkan. Apakah Anda ingin melanjutkan?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahwa percakapan Anda yang berharga disimpan dengan aman ke basis data backend. Terima kasih!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Ini adalah fitur eksperimental, mungkin tidak berfungsi seperti yang diharapkan dan dapat berubah sewaktu-waktu.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "Ini akan menghapus",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "Mode Layar Lebar",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Ruang Kerja",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/ie-GA/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Ceadaigh Briseadh Guth i nGlao",
 	"Allowed Endpoints": "Críochphointí Ceadaithe",
 	"Already have an account?": "Tá cuntas agat cheana féin?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Rogha eile seachas an top_p, agus tá sé mar aidhm aige cothromaíocht cáilíochta agus éagsúlachta a chinntiú. Léiríonn an paraiméadar p an dóchúlacht íosta go mbreithneofar comhartha, i gcoibhneas le dóchúlacht an chomhartha is dóichí. Mar shampla, le p=0.05 agus dóchúlacht 0.9 ag an comhartha is dóichí, déantar logits le luach níos lú ná 0.045 a scagadh amach. (Réamhshocrú: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "I gcónaí",
 	"Amazing": "Iontach",
 	"an assistant": "cúntóir",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Deimhnigh do phasfhocal nua",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Naisc",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Srianann iarracht ar réasúnaíocht a dhéanamh ar shamhlacha réasúnaíochta. Ní bhaineann ach le samhlacha réasúnaíochta ó sholáthraithe sonracha a thacaíonn le hiarracht réasúnaíochta. (Réamhshocrú: meánach)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Déan teagmháil le Riarachán le haghaidh Rochtana WebUI",
 	"Content": "Ábhar",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Lean ar aghaidh le Ríomhphost",
 	"Continue with LDAP": "Lean ar aghaidh le LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Rialú conas a roinntear téacs teachtaireachta d'iarratais TTS. Roinneann 'poncaíocht' ina abairtí, scoilteann 'míreanna' i míreanna, agus coinníonn 'aon' an teachtaireacht mar shreang amháin.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Rialuithe",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Rialaíonn sé an chothromaíocht idir comhleanúnachas agus éagsúlacht an aschuir. Beidh téacs níos dírithe agus níos soiléire mar thoradh ar luach níos ísle. (Réamhshocrú: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Cóipeáladh",
 	"Copied shared chat URL to clipboard!": "Cóipeáladh URL an chomhrá roinnte chuig an ngearrthaisce!",
 	"Copied to clipboard": "Cóipeáilte go gear",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Cumasaigh Glasáil Cuimhne (mlock) chun sonraí samhaltaithe a chosc ó RAM. Glasálann an rogha seo sraith oibre leathanaigh an mhúnla isteach i RAM, ag cinntiú nach ndéanfar iad a mhalartú go diosca. Is féidir leis seo cabhrú le feidhmíocht a choinneáil trí lochtanna leathanaigh a sheachaint agus rochtain tapa ar shonraí a chinntiú.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Cumasaigh Mapáil Cuimhne (mmap) chun sonraí samhla a lódáil. Ligeann an rogha seo don chóras stóráil diosca a úsáid mar leathnú ar RAM trí chomhaid diosca a chóireáil amhail is dá mba i RAM iad. Is féidir leis seo feidhmíocht na samhla a fheabhsú trí rochtain níos tapúla ar shonraí a cheadú. Mar sin féin, d'fhéadfadh sé nach n-oibreoidh sé i gceart le gach córas agus féadfaidh sé méid suntasach spáis diosca a ithe.",
 	"Enable Message Rating": "Cumasaigh Rátáil Teachtai",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Cumasaigh sampláil Mirostat chun seachrán a rialú. (Réamhshocrú: 0, 0 = Díchumasaithe, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Cumasaigh Clárúcháin Nua",
 	"Enabled": "Cumasaithe",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Déan cinnte go bhfuil 4 cholún san ord seo i do chomhad CSV: Ainm, Ríomhphost, Pasfhocal, Ról.",
@@ -566,7 +566,7 @@
 	"Include": "Cuir san áireamh",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Cuir bratach `--api-auth` san áireamh agus webui stable-diffusion-reatha á rith",
 	"Include `--api` flag when running stable-diffusion-webui": "Cuir bratach `--api` san áireamh agus webui cobhsaí-scaipthe á rith",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Bíonn tionchar aige ar chomh tapa agus a fhreagraíonn an t-algartam d’aiseolas ón téacs ginte. Beidh coigeartuithe níos moille mar thoradh ar ráta foghlama níos ísle, agus déanfaidh ráta foghlama níos airde an t-algartam níos freagraí. (Réamhshocrú: 0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Eolas",
 	"Input commands": "Orduithe ionchuir",
 	"Install from Github URL": "Suiteáil ó Github URL",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "Iarracht Réasúnúcháin",
 	"Record voice": "Taifead guth",
 	"Redirecting you to Open WebUI Community": "Tú a atreorú chuig OpenWebUI Community",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Laghdaíonn sé an dóchúlacht go giniúint nonsense. Tabharfaidh luach níos airde (m.sh. 100) freagraí níos éagsúla, agus beidh luach níos ísle (m.sh. 10) níos coimeádaí. (Réamhshocrú: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Tagairt duit féin mar \"Úsáideoir\" (m.sh., \"Tá an úsáideoir ag foghlaim Spáinnis\")",
 	"References from": "Tagairtí ó",
 	"Refused when it shouldn't have": "Diúltaíodh nuair nár chóir dó",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Socraigh líon na snáitheanna oibrithe a úsáidtear le haghaidh ríomh. Rialaíonn an rogha seo cé mhéad snáithe a úsáidtear chun iarratais a thagann isteach a phróiseáil i gcomhthráth. D'fhéadfadh méadú ar an luach seo feidhmíocht a fheabhsú faoi ualaí oibre comhairgeadra ard ach féadfaidh sé níos mó acmhainní LAP a úsáid freisin.",
 	"Set Voice": "Socraigh Guth",
 	"Set whisper model": "Socraigh múnla cogar",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Socraíonn sé cé chomh fada siar is atá an tsamhail le breathnú siar chun athrá a chosc. (Réamhshocrú: 64, 0 = díchumasaithe, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Socraíonn sé an síol uimhir randamach a úsáid le haghaidh giniúna. Má shocraítear é seo ar uimhir shainiúil, ginfidh an tsamhail an téacs céanna don leid céanna. (Réamhshocrú: randamach)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Socraíonn sé méid na fuinneoige comhthéacs a úsáidtear chun an chéad chomhartha eile a ghiniúint. (Réamhshocrú: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Socraíonn sé na stadanna le húsáid. Nuair a thagtar ar an bpatrún seo, stopfaidh an LLM ag giniúint téacs agus ag filleadh. Is féidir patrúin stad iolracha a shocrú trí pharaiméadair stadanna iolracha a shonrú i gcomhad samhail.",
 	"Settings": "Socruithe",
 	"Settings saved successfully!": "Socruithe sábhálta go rathúil!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Córas Leid",
 	"Tags Generation": "Giniúint Clibeanna",
 	"Tags Generation Prompt": "Clibeanna Giniúint Leid",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Úsáidtear sampláil saor ó eireabaill chun tionchar na n-chomharthaí ón aschur nach bhfuil chomh dóchúil céanna a laghdú. Laghdóidh luach níos airde (m.sh., 2.0) an tionchar níos mó, agus díchumasaíonn luach 1.0 an socrú seo. (réamhshocraithe: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Tapáil chun cur isteach",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Go raibh maith agat as do chuid aiseolas!",
 	"The Application Account DN you bind with for search": "An Cuntas Feidhmchláir DN a nascann tú leis le haghaidh cuardaigh",
 	"The base to search for users": "An bonn chun cuardach a dhéanamh ar úsáideoirí",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Cinneann méid an bhaisc cé mhéad iarratas téacs a phróiseáiltear le chéile ag an am céanna. Is féidir le méid baisc níos airde feidhmíocht agus luas an mhúnla a mhéadú, ach éilíonn sé níos mó cuimhne freisin. (Réamhshocrú: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Is deonacha paiseanta ón bpobal iad na forbróirí taobh thiar den bhreiseán seo. Má aimsíonn an breiseán seo cabhrach leat, smaoinigh ar rannchuidiú lena fhorbairt.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Tá an clár ceannairí meastóireachta bunaithe ar chóras rátála Elo agus déantar é a nuashonrú i bhfíor-am.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "An tréith LDAP a mhapálann don ríomhphost a úsáideann úsáideoirí chun síniú isteach.",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Uasmhéid an chomhaid i MB. Má sháraíonn méid an chomhaid an teorainn seo, ní uaslódófar an comhad.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "An líon uasta na gcomhaid is féidir a úsáid ag an am céanna i gcomhrá. Má sháraíonn líon na gcomhaid an teorainn seo, ní uaslódófar na comhaid.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Ba chóir go mbeadh an scór ina luach idir 0.0 (0%) agus 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Teocht an mhúnla. Déanfaidh méadú ar an teocht an freagra múnla níos cruthaithí. (Réamhshocrú: 0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Téama",
 	"Thinking...": "Ag smaoineamh...",
 	"This action cannot be undone. Do you wish to continue?": "Ní féidir an gníomh seo a chur ar ais. Ar mhaith leat leanúint ar aghaidh?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Cinntíonn sé seo go sábhálfar do chomhráite luachmhara go daingean i do bhunachar sonraí cúltaca Go raibh maith agat!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Is gné turgnamhach í seo, b'fhéidir nach bhfeidhmeoidh sé mar a bhíothas ag súil leis agus tá sé faoi réir athraithe ag am ar bith.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Rialaíonn an rogha seo cé mhéad comhartha a chaomhnaítear agus an comhthéacs á athnuachan. Mar shampla, má shocraítear go 2 é, coinneofar an 2 chomhartha dheireanacha de chomhthéacs an chomhrá. Is féidir le comhthéacs a chaomhnú cabhrú le leanúnachas comhrá a choinneáil, ach d’fhéadfadh sé laghdú a dhéanamh ar an gcumas freagairt do thopaicí nua. (Réamhshocrú: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Socraíonn an rogha seo an t-uaslíon comharthaí is féidir leis an tsamhail a ghiniúint ina fhreagra. Tríd an teorainn seo a mhéadú is féidir leis an tsamhail freagraí níos faide a sholáthar, ach d’fhéadfadh go méadódh sé an dóchúlacht go nginfear ábhar neamhchabhrach nó nach mbaineann le hábhar. (Réamhshocrú: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Scriosfaidh an rogha seo gach comhad atá sa bhailiúchán agus cuirfear comhaid nua-uaslódála ina n-ionad.",
 	"This response was generated by \"{{model}}\"": "Gin an freagra seo ag \"{{model}}\"",
 	"This will delete": "Scriosfaidh sé seo",
@@ -1132,7 +1132,7 @@
 	"Why?": "Cén fáth?",
 	"Widescreen Mode": "Mód Leathanscáileán",
 	"Won": "Bhuaigh",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Oibríonn sé le barr-k. Beidh téacs níos éagsúla mar thoradh ar luach níos airde (m.sh., 0.95), agus ginfidh luach níos ísle (m.sh., 0.5) téacs níos dírithe agus níos coimeádaí. (Réamhshocrú: 0.9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Spás oibre",
 	"Workspace Permissions": "Ceadanna Spás Oibre",
 	"Write": "Scríobh",

+ 18 - 18
src/lib/i18n/locales/it-IT/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Hai già un account?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "un assistente",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Connessioni",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "Contenuto",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL della chat condivisa copiato negli appunti!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Abilita nuove iscrizioni",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Assicurati che il tuo file CSV includa 4 colonne in questo ordine: Nome, Email, Password, Ruolo.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "Includi il flag `--api` quando esegui stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informazioni",
 	"Input commands": "Comandi di input",
 	"Install from Github URL": "Eseguire l'installazione dall'URL di Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Registra voce",
 	"Redirecting you to Open WebUI Community": "Reindirizzamento alla comunità OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "Rifiutato quando non avrebbe dovuto",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Imposta voce",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Impostazioni",
 	"Settings saved successfully!": "Impostazioni salvate con successo!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Prompt di sistema",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Grazie per il tuo feedback!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Il punteggio dovrebbe essere un valore compreso tra 0.0 (0%) e 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ciò garantisce che le tue preziose conversazioni siano salvate in modo sicuro nel tuo database backend. Grazie!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Area di lavoro",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/ja-JP/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "通話中に音声の割り込みを許可",
 	"Allowed Endpoints": "",
 	"Already have an account?": "すでにアカウントをお持ちですか?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "アシスタント",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "接続",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "WEBUIへの接続について管理者に問い合わせ下さい。",
 	"Content": "コンテンツ",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "コントロール",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "コピー",
 	"Copied shared chat URL to clipboard!": "共有チャットURLをクリップボードにコピーしました!",
 	"Copied to clipboard": "クリップボードにコピーしました。",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "メッセージ評価を有効にする",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "新規登録を有効にする",
 	"Enabled": "有効",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "CSVファイルに4つの列が含まれていることを確認してください: Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webuiを実行する際に`--api`フラグを含める",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "情報",
 	"Input commands": "入力コマンド",
 	"Install from Github URL": "Github URLからインストール",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "音声を録音",
 	"Redirecting you to Open WebUI Community": "OpenWebUI コミュニティにリダイレクトしています",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "拒否すべきでないのに拒否した",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "音声を設定",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "設定",
 	"Settings saved successfully!": "設定が正常に保存されました!",
@@ -964,7 +964,7 @@
 	"System Prompt": "システムプロンプト",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "ご意見ありがとうございます!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "スコアは0.0(0%)から1.0(100%)の間の値にしてください。",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "テーマ",
 	"Thinking...": "思考中...",
 	"This action cannot be undone. Do you wish to continue?": "このアクションは取り消し不可です。続けますか?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "これは、貴重な会話がバックエンドデータベースに安全に保存されることを保証します。ありがとうございます!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "実験的機能であり正常動作しない場合があります。",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "ワイドスクリーンモード",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "ワークスペース",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/ka-GE/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "დაშვებული ბოლოწერტილები",
 	"Already have an account?": "უკვე გაქვთ ანგარიში?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "ყოველთვის",
 	"Amazing": "გადასარევია",
 	"an assistant": "დამხმარე",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "კავშირები",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "შემცველობა",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "მმართველები",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "დაკოპირდა",
 	"Copied shared chat URL to clipboard!": "გაზიარებული ჩატის ბმული დაკოპირდა ბუფერში!",
 	"Copied to clipboard": "დაკოპირდა გაცვლის ბაფერში",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "ახალი რეგისტრაციების ჩართვა",
 	"Enabled": "ჩართულია",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "დარწმუნდით, რომ თქვენი CSV-ფაილი შეიცავს 4 ველს ამ მიმდევრობით: სახელი, ელფოსტა, პაროლი, როლი.",
@@ -566,7 +566,7 @@
 	"Include": "ჩართვა",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "`--api` ალმის ჩასმა stable-diffusion-webui-ის გამოყენებისას",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "ინფორმაცია",
 	"Input commands": "შეიყვანეთ ბრძანებები",
 	"Install from Github URL": "დაყენება Github-ის ბმულიდან",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "ხმის ჩაწერა",
 	"Redirecting you to Open WebUI Community": "მიმდინარეობს გადამისამართება OpenWebUI-ის საზოგადოების საიტზე",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "უარა, როგორც უნდა იყოს",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "ხმის დაყენება",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "მორგება",
 	"Settings saved successfully!": "პარამეტრები შენახვა წარმატებულია!",
@@ -964,7 +964,7 @@
 	"System Prompt": "სისტემური მოთხოვნა",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "ამოცანები",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "მადლობა გამოხმაურებისთვის!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "რეიტინგი უნდა იყოს მნიშვნელობ შუალედიდან 0.0 (0%) - 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "თემა",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ეს უზრუნველყოფს, რომ თქვენი ღირებული საუბრები უსაფრთხოდ შეინახება თქვენს უკანაბოლო მონაცემთა ბაზაში. მადლობა!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "რატომ?",
 	"Widescreen Mode": "",
 	"Won": "ვონი",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "სამუშაო სივრცე",
 	"Workspace Permissions": "",
 	"Write": "ჩაწერა",

+ 18 - 18
src/lib/i18n/locales/ko-KR/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "음성 기능에서 음성 방해 허용",
 	"Allowed Endpoints": "",
 	"Already have an account?": "이미 계정이 있으신가요?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "놀라움",
 	"an assistant": "어시스턴트",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "새로운 비밀번호를 한 번 더 입력해 주세요",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "연결",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "WebUI 접속을 위해서는 관리자에게 연락에 연락하십시오",
 	"Content": "내용",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "TTS 요청에 메시지가 어떻게 나뉘어지는지 제어하십시오. '문장 부호'는 문장으로 나뉘고, '문단'은 문단으로 나뉘고, '없음'은 메세지를 하나의 문자열로 인식합니다.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "제어",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "복사됨",
 	"Copied shared chat URL to clipboard!": "채팅 공유 URL이 클립보드에 복사되었습니다!",
 	"Copied to clipboard": "클립보드에 복사되었습니다",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "메시지 평가 활성화",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "새 회원가입 활성화",
 	"Enabled": "활성화됨",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "CSV 파일에 이름, 이메일, 비밀번호, 역할 4개의 열이 순서대로 포함되어 있는지 확인하세요.",
@@ -566,7 +566,7 @@
 	"Include": "포함",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "stable-diffusion-webui를 실행 시 `--api-auth` 플래그를 포함하세요",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui를 실행 시 `--api` 플래그를 포함하세요",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "정보",
 	"Input commands": "명령어 입력",
 	"Install from Github URL": "Github URL에서 설치",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "음성 녹음",
 	"Redirecting you to Open WebUI Community": "OpenWebUI 커뮤니티로 리디렉션 중",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "스스로를 \"사용자\" 라고 지칭하세요. (예: \"사용자는 영어를 배우고 있습니다\")",
 	"References from": "출처",
 	"Refused when it shouldn't have": "허용되지 않았지만 허용되어야 합니다.",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "음성 설정",
 	"Set whisper model": "자막 생성기 모델 설정",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "생성을 위한 무작위 숫자 시드를 설정합니다. 이 값을 특정 숫자로 설정하면 동일한 프롬프트에 대해 동일한 텍스트를 생성합니다. (기본값: 무작위)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "중단 시퀀스를 설정합니다. 이 패턴이 발생하면 LLM은 텍스트 생성을 중단하고 반환합니다. 여러 중단 패턴은 모델 파일에서 여러 개의 별도 중단 매개변수를 지정하여 설정할 수 있습니다.",
 	"Settings": "설정",
 	"Settings saved successfully!": "설정이 성공적으로 저장되었습니다!",
@@ -964,7 +964,7 @@
 	"System Prompt": "시스템 프롬프트",
 	"Tags Generation": "태그 생성",
 	"Tags Generation Prompt": "태그 생성 프롬프트",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "탭하여 중단",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "피드백 감사합니다!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "이 플러그인 뒤에 있는 개발자는 커뮤니티에서 활동하는 단순한 열정적인 일반인들입니다. 만약 플러그인이 도움 되었다면, 플러그인 개발에 기여를 고려해주세요!",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "평가 리더보드는 Elo 평가 시스템을 기반으로 하고 실시간으로 업데이트됩니다",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "최대 파일 크기(MB). 만약 파일 크기가 한도를 초과할 시, 파일은 업로드되지 않습니다",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "하나의 채팅에서는 사용가능한 최대 파일 수가 있습니다. 만약 파일 수가 한도를 초과할 시, 파일은 업로드되지 않습니다.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "점수는 0.0(0%)에서 1.0(100%) 사이의 값이어야 합니다.",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "모델의 온도. 온도를 높이면 모델이 더 창의적으로 답변합니다. (기본값: 0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "테마",
 	"Thinking...": "생각 중...",
 	"This action cannot be undone. Do you wish to continue?": "이 액션은 되돌릴 수 없습니다. 계속 하시겠습니까?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "이렇게 하면 소중한 대화 내용이 백엔드 데이터베이스에 안전하게 저장됩니다. 감사합니다!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "이것은 실험적 기능으로, 예상대로 작동하지 않을 수 있으며 언제든지 변경될 수 있습니다.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "이 행동은 컬렉션에 존재하는 모든 파일을 삭제하고 새로 업로드된 파일들로 대체됩니다",
 	"This response was generated by \"{{model}}\"": "\"{{model}}\"이 생성한 응답입니다",
 	"This will delete": "이것은 다음을 삭제합니다.",
@@ -1132,7 +1132,7 @@
 	"Why?": "이유는?",
 	"Widescreen Mode": "와이드스크린 모드",
 	"Won": "승리",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "워크스페이스",
 	"Workspace Permissions": "워크스페이스 권한",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/lt-LT/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Leisti pertraukimą skambučio metu",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Ar jau turite paskyrą?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "assistentas",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Ryšiai",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Susisiekite su administratoriumi dėl prieigos",
 	"Content": "Turinys",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Valdymas",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Nukopijuota",
 	"Copied shared chat URL to clipboard!": "Nukopijavote pokalbio nuorodą",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Aktyvuoti naujas registracijas",
 	"Enabled": "Leisti",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Įsitikinkite, kad CSV failas turi 4 kolonas šiuo eiliškumu: Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Įtraukti `--api-auth` flag when running stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Pridėti `--api` kai vykdomas stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informacija",
 	"Input commands": "Įvesties komandos",
 	"Install from Github URL": "Instaliuoti Github nuorodą",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Įrašyti balsą",
 	"Redirecting you to Open WebUI Community": "Perkeliam Jus į OpenWebUI bendruomenę",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Vadinkite save Naudotoju (pvz. Naudotojas mokosi prancūzų kalbos)",
 	"References from": "",
 	"Refused when it shouldn't have": "Atmesta kai neturėtų būti atmesta",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Numatyti balsą",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Nustatymai",
 	"Settings saved successfully!": "Parametrai sėkmingai išsaugoti!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Sistemos užklausa",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Paspauskite norėdami pertraukti",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Ačiū už atsiliepimus",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Šis modulis kuriamas savanorių. Palaikykite jų darbus finansiškai arba prisidėdami kodu.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Rezultatas turėtų būti tarp 0.0 (0%) ir 1.0 (100%)",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Mąsto...",
 	"This action cannot be undone. Do you wish to continue?": "Šis veiksmas negali būti atšauktas. Ar norite tęsti?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Tai užtikrina, kad Jūsų pokalbiai saugiai saugojami duomenų bazėje. Ačiū!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Tai eksperimentinė funkcija ir gali veikti nevisada.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "Tai ištrins",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "Plataus ekrano rėžimas",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Nuostatos",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/ms-MY/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Benarkan gangguan suara dalam panggilan",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Telah mempunyai akaun?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "seorang pembantu",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Sambungan",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Hubungi admin untuk akses WebUI",
 	"Content": "Kandungan",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Kawalan",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Disalin",
 	"Copied shared chat URL to clipboard!": "Menyalin URL sembang kongsi ke papan klip",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Benarkan Pendaftaran Baharu",
 	"Enabled": "Dibenarkan",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "astikan fail CSV anda mengandungi 4 lajur dalam susunan ini: Nama, E-mel, Kata Laluan, Peranan.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Sertakan bendera `-- api -auth` semasa menjalankan stable-diffusion-webui ",
 	"Include `--api` flag when running stable-diffusion-webui": "Sertakan bendera `-- api ` semasa menjalankan stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Maklumat",
 	"Input commands": "Masukkan Arahan",
 	"Install from Github URL": "Pasang daripada URL Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Rakam suara",
 	"Redirecting you to Open WebUI Community": "Membawa anda ke Komuniti OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Rujuk diri anda sebagai \"User\" (cth, \"Pengguna sedang belajar bahasa Sepanyol\")",
 	"References from": "",
 	"Refused when it shouldn't have": "Menolak dimana ia tidak sepatutnya",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Tetapan Suara",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Tetapan",
 	"Settings saved successfully!": "Tetapan berjaya disimpan!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Gesaan Sistem",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Sentuh untuk mengganggu",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Terima kasih atas maklum balas anda!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Pembangun di sebalik 'plugin' ini adalah sukarelawan yang bersemangat daripada komuniti. Jika anda mendapati 'plugin' ini membantu, sila pertimbangkan untuk menyumbang kepada pembangunannya.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Skor hendaklah berada diantara 0.0 (0%) dan 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Berfikir...",
 	"This action cannot be undone. Do you wish to continue?": "Tindakan ini tidak boleh diubah semula kepada asal. Adakah anda ingin teruskan",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ini akan memastikan bahawa perbualan berharga anda disimpan dengan selamat ke pangkalan data 'backend' anda. Terima kasih!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "ni adalah ciri percubaan, ia mungkin tidak berfungsi seperti yang diharapkan dan tertakluk kepada perubahan pada bila-bila masa.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "Ini akan memadam",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "Mod Skrin Lebar",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Ruangan Kerja",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/nb-NO/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Muliggjør taleavbrytelse i samtaler",
 	"Allowed Endpoints": "Tillatte endepunkter",
 	"Already have an account?": "Har du allerede en konto?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternativ til top_p, og har som mål å sikre en balanse mellom kvalitet og variasjon. Parameteren p representerer minimumssannsynligheten for at et token skal vurderes, i forhold til sannsynligheten for det mest sannsynlige tokenet. Hvis p for eksempel er 0,05 og det mest sannsynlige tokenet har en sannsynlighet på 0,9, filtreres logits med en verdi på mindre enn 0,045 bort. (Standard: 0,0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Alltid",
 	"Amazing": "Flott",
 	"an assistant": "en assistent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Bekreft det nye passordet ditt",
 	"Connect to your own OpenAI compatible API endpoints.": "Koble til egne OpenAI-kompatible API-endepunkter",
 	"Connections": "Tilkoblinger",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Begrenser resonneringsinnsatsen for resonneringsmodeller. Gjelder bare for resonneringsmodeller fra bestemte leverandører som har støtte for resonneringsinnsats. (Standard: middels)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Kontakt administrator for å få tilgang til WebUI",
 	"Content": "Innhold",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Fortsett med e-post",
 	"Continue with LDAP": "Fortsett med LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrollerer hvordan meldingsteksten deles opp for TTS-forespørsler. 'Punctuation' deler opp i setninger, 'paragraphs' deler opp i avsnitt, og 'none' beholder meldingen som én enkelt streng.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "Kontrollerer repetisjonen av tokensekvenser i den genererte teksten. En høyere verdi (f.eks. 1,5) vil straffe gjentakelser hardere, mens en lavere verdi (f.eks. 1,1) vil være mildere. Ved 1 er den deaktivert. (Standard: 1,1)",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Kontroller",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Styrer balansen mellom sammenheng og mangfold i utdataene. En lavere verdi gir en mer fokusert og sammenhengende tekst. (Standard: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Kopiert",
 	"Copied shared chat URL to clipboard!": "Kopierte delt chat-URL til utklippstavlen!",
 	"Copied to clipboard": "Kopier til utklippstaveln",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Aktiver Memory Locking (mlock) for å forhindre at modelldata byttes ut av RAM. Dette alternativet låser modellens arbeidssett med sider i RAM-minnet, slik at de ikke byttes ut til disk. Dette kan bidra til å opprettholde ytelsen ved å unngå sidefeil og sikre rask datatilgang.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Aktiver Memory Mapping (mmap) for å laste inn modelldata. Med dette alternativet kan systemet bruke disklagring som en utvidelse av RAM ved å behandle diskfiler som om de befant seg i RAM. Dette kan forbedre modellens ytelse ved å gi raskere datatilgang. Det er imidlertid ikke sikkert at det fungerer som det skal på alle systemer, og det kan kreve mye diskplass.",
 	"Enable Message Rating": "Aktivert vurdering av meldinger",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Aktiver Mirostat-sampling for kontroll av perpleksitet. (Standard: 0, 0 = deaktivert, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Aktiver nye registreringer",
 	"Enabled": "Aktivert",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Sørg for at CSV-filen din inkluderer fire kolonner i denne rekkefølgen: Navn, E-post, Passord, Rolle.",
@@ -566,7 +566,7 @@
 	"Include": "Inkluder",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Inkluder flagget --api-auth når du kjører stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inkluder flagget --api når du kjører stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Påvirker hvor raskt algoritmen reagerer på tilbakemeldinger fra den genererte teksten. En lavere læringshastighet vil føre til langsommere justeringer, mens en høyere læringshastighet vil gjøre algoritmen mer responsiv. (Standard: 0,1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Input commands": "Inntast kommandoer",
 	"Install from Github URL": "Installer fra GitHub-URL",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "Resonneringsinnsats",
 	"Record voice": "Ta opp tale",
 	"Redirecting you to Open WebUI Community": "Omdirigerer deg til OpenWebUI-fellesskapet",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Reduserer sannsynligheten for å generere meningsløse svar. En høyere verdi (f.eks. 100) vil gi mer varierte svar, mens en lavere verdi (f.eks. 10) vil være mer konservativ. (Standard: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Omtal deg selv som \"Bruker\" (f.eks. \"Bruker lærer spansk\")",
 	"References from": "Henviser fra",
 	"Refused when it shouldn't have": "Avvist når det ikke burde ha blitt det",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Angi antall arbeidstråder som skal brukes til beregning. Dette alternativet kontrollerer hvor mange tråder som brukes til å behandle innkommende forespørsler samtidig. Hvis du øker denne verdien, kan det forbedre ytelsen under arbeidsbelastninger med høy samtidighet, men det kan også føre til økt forbruk av CPU-ressurser.",
 	"Set Voice": "Angi stemme",
 	"Set whisper model": "Angi whisper-modell",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "Angir en flat bias mot tokener som har blitt vist minst én gang. En høyere verdi (f.eks. 1,5) vil straffe gjentakelser hardere, mens en lavere verdi (f.eks. 0,9) vil være mildere. Ved 0 er den deaktivert. (Standard: 0)",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "Angir en skaleringsbias mot tokener for å straffe gjentakelser, basert på hvor mange ganger de har dukket opp. En høyere verdi (f.eks. 1,5) vil straffe gjentakelser hardere, mens en lavere verdi (f.eks. 0,9) vil være mildere. Ved 0 er den deaktivert. (Standard: 1,1)",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Angir hvor langt tilbake modellen skal se for å forhindre repetisjon. (Standard: 64, 0 = deaktivert, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Angir det tilfeldige tallfrøet som skal brukes til generering. Hvis du setter dette til et bestemt tall, vil modellen generere den samme teksten for den samme ledeteksten (standard: tilfeldig).",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Angir størrelsen på kontekstvinduet som brukes til å generere neste token. (Standard: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Angir hvilke stoppsekvenser som skal brukes. Når dette mønsteret forekommer, stopper LLM genereringen av tekst og returnerer. Du kan angi flere stoppmønstre ved å spesifisere flere separate stoppparametere i en modellfil.",
 	"Settings": "Innstillinger",
 	"Settings saved successfully!": "Innstillinger lagret!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Systemledetekst",
 	"Tags Generation": "Genering av etiketter",
 	"Tags Generation Prompt": "Ledetekst for genering av etikett",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling brukes til å redusere innvirkningen av mindre sannsynlige tokens fra utdataene. En høyere verdi (f.eks. 2,0) vil redusere effekten mer, mens en verdi på 1,0 deaktiverer denne innstillingen. (standard: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "Snakk med modellen",
 	"Tap to interrupt": "Trykk for å avbryte",
 	"Tasks": "Oppgaver",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Takk for tilbakemeldingen!",
 	"The Application Account DN you bind with for search": "Applikasjonskontoens DN du binder deg med for søking",
 	"The base to search for users": "Basen for å søke etter brukere",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Batchstørrelsen avgjør hvor mange tekstforespørsler som behandles samtidig. En høyere batchstørrelse kan øke ytelsen og hastigheten til modellen, men det krever også mer minne. (Standard: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Utviklerne bak denne utvidelsen er lidenskapelige frivillige fra fellesskapet. Hvis du finner denne utvidelsen nyttig, vennligst vurder å bidra til utviklingen.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Ledertavlens over evalueringer er basert på Elo-rangeringssystemet, og oppdateres i sanntid.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "LDAP-attributtet som tilsvarer e-posten som brukerne bruker for å logge på.",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Den maksimale filstørrelsen i MB. Hvis en filstørrelse overskrider denne grensen, blir ikke filen lastet opp.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maksimalt antall filer som kan brukes samtidig i chatten. Hvis antallet filer overskrider denne grensen, blir de ikke lastet opp.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Poengsummen skal være en verdi mellom 0,0 (0 %) og 1,0 (100 %).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Temperaturen på modellen. Hvis du øker temperaturen, vil modellen svare mer kreativt. (Standard: 0,8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Tenker ...",
 	"This action cannot be undone. Do you wish to continue?": "Denne handlingen kan ikke angres. Vil du fortsette?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dette sikrer at de verdifulle samtalene dine lagres sikkert i backend-databasen din. Takk!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dette er en eksperimentell funksjon. Det er mulig den ikke fungerer som forventet, og den kan endres når som helst.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Dette alternativet styrer hvor mange tokens som bevares når konteksten oppdateres. Hvis det for eksempel er angitt til 2, beholdes de to siste symbolene i samtalekonteksten. Bevaring av konteksten kan bidra til å opprettholde kontinuiteten i en samtale, men det kan redusere muligheten til å svare på nye emner. (Standard: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Dette alternativet angir det maksimale antallet tokens modellen kan generere i svaret sitt. Hvis du øker denne grensen, kan modellen gi lengre svar, men det kan også øke sannsynligheten for at det genereres uhensiktsmessig eller irrelevant innhold. (Standard: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Dette alternativet sletter alle eksisterende filer i samlingen og erstatter dem med nyopplastede filer.",
 	"This response was generated by \"{{model}}\"": "Dette svaret er generert av \"{{modell}}\"",
 	"This will delete": "Dette sletter",
@@ -1132,7 +1132,7 @@
 	"Why?": "Hvorfor?",
 	"Widescreen Mode": "Bredskjermmodus",
 	"Won": "Vant",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Fungerer sammen med top-k. En høyere verdi (f.eks. 0,95) vil føre til mer mangfoldig tekst, mens en lavere verdi (f.eks. 0,5) vil generere mer fokusert og konservativ tekst. (Standard: 0,9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Arbeidsområde",
 	"Workspace Permissions": "Tillatelser for arbeidsområde",
 	"Write": "Skriv",

+ 18 - 18
src/lib/i18n/locales/nl-NL/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Stemonderbreking tijdens gesprek toestaan",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Heb je al een account?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternatief voor de top_p, en streeft naar een evenwicht tussen kwaliteit en variatie. De parameter p vertegenwoordigt de minimumwaarschijnlijkheid dat een token in aanmerking wordt genomen, in verhouding tot de waarschijnlijkheid van het meest waarschijnlijke token. Bijvoorbeeld, met p=0.05 en de meest waarschijnlijke token met een waarschijnlijkheid van 0.9, worden logits met een waarde kleiner dan 0.045 uitgefilterd. (Standaard: 0,0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "Geweldig",
 	"an assistant": "een assistent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Verbindingen",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Neem contact op met de beheerder voor WebUI-toegang",
 	"Content": "Inhoud",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Ga door met E-mail",
 	"Continue with LDAP": "Ga door met LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Bepaal hoe berichttekst wordt opgesplitst voor TTS-verzoeken. 'Leestekens' splitst op in zinnen, 'alinea's' splitst op in paragrafen en 'geen' houdt het bericht als een enkele string.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Besturingselementen",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Regelt de balans tussen coherentie en diversiteit van de uitvoer. Een lagere waarde resulteert in meer gerichte en coherente tekst. (Standaard: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Gekopieerd",
 	"Copied shared chat URL to clipboard!": "URL van gedeelde gesprekspagina gekopieerd naar klembord!",
 	"Copied to clipboard": "Gekopieerd naar klembord",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Schakel Memory Locking (mlock) in om te voorkomen dat modelgegevens uit het RAM worden verwisseld. Deze optie vergrendelt de werkset pagina's van het model in het RAM, zodat ze niet naar de schijf worden uitgewisseld. Dit kan helpen om de prestaties op peil te houden door paginafouten te voorkomen en snelle gegevenstoegang te garanderen.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Schakel Memory Mapping (mmap) in om modelgegevens te laden. Deze optie laat het systeem schijfopslag gebruiken als een uitbreiding van RAM door schijfbestanden te behandelen alsof ze in RAM zitten. Dit kan de prestaties van het model verbeteren door snellere gegevenstoegang mogelijk te maken. Het is echter mogelijk dat deze optie niet op alle systemen correct werkt en een aanzienlijke hoeveelheid schijfruimte in beslag kan nemen.",
 	"Enable Message Rating": "Schakel berichtbeoordeling in",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Mirostat-sampling inschakelen voor het regelen van de perplexiteit. (Standaard: 0, 0 = uitgeschakeld, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Schakel nieuwe registraties in",
 	"Enabled": "Ingeschakeld",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Zorg ervoor dat uw CSV-bestand de volgende vier kolommen in deze volgorde bevat: Naam, E-mail, Wachtwoord, Rol.",
@@ -566,7 +566,7 @@
 	"Include": "Voeg toe",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Voeg '--api-auth` toe bij het uitvoeren van stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Voeg `--api` vlag toe bij het uitvoeren van stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Beïnvloedt hoe snel het algoritme reageert op feedback van de gegenereerde tekst. Een lagere leersnelheid resulteert in langzamere aanpassingen, terwijl een hogere leersnelheid het algoritme gevoeliger maakt. (Standaard: 0,1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Input commands": "Voer commando's in",
 	"Install from Github URL": "Installeren vanaf Github-URL",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Neem stem op",
 	"Redirecting you to Open WebUI Community": "Je wordt doorgestuurd naar OpenWebUI Community",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Vermindert de kans op het genereren van onzin. Een hogere waarde (bijv. 100) zal meer diverse antwoorden geven, terwijl een lagere waarde (bijv. 10) conservatiever zal zijn. (Standaard: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Refereer naar jezelf als \"user\" (bv. \"User is Spaans aan het leren\"",
 	"References from": "Referenties van",
 	"Refused when it shouldn't have": "Geweigerd terwijl het niet had moeten",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Stel het aantal threads in dat wordt gebruikt voor berekeningen. Deze optie bepaalt hoeveel threads worden gebruikt om gelijktijdig binnenkomende verzoeken te verwerken. Het verhogen van deze waarde kan de prestaties verbeteren onder hoge concurrency werklasten, maar kan ook meer CPU-bronnen verbruiken.",
 	"Set Voice": "Stel stem in",
 	"Set whisper model": "Stel Whisper-model in",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Stelt in hoe ver het model terug moet kijken om herhaling te voorkomen. (Standaard: 64, 0 = uitgeschakeld, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Stelt de willekeurigheid in om te gebruiken voor het genereren. Als je dit op een specifiek getal instelt, genereert het model dezelfde tekst voor dezelfde prompt. (Standaard: willekeurig)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Stelt de grootte in van het contextvenster dat wordt gebruikt om het volgende token te genereren. (Standaard: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Stelt de te gebruiken stopsequentie in. Als dit patroon wordt gevonden, stopt de LLM met het genereren van tekst en keert terug. Er kunnen meerdere stoppatronen worden ingesteld door meerdere afzonderlijke stopparameters op te geven in een modelbestand.",
 	"Settings": "Instellingen",
 	"Settings saved successfully!": "Instellingen succesvol opgeslagen!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Systeem prompt",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Prompt voor taggeneratie",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling wordt gebruikt om de impact van minder waarschijnlijke tokens uit de uitvoer te verminderen. Een hogere waarde (bv. 2,0) zal de impact meer verminderen, terwijl een waarde van 1,0 deze instelling uitschakelt. (standaard: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Tik om te onderbreken",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Bedankt voor je feedback!",
 	"The Application Account DN you bind with for search": "Het applicatieaccount DN waarmee je zoekt",
 	"The base to search for users": "De basis om gebruikers te zoeken",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "De batchgrootte bepaalt hoeveel tekstverzoeken tegelijk worden verwerkt. Een hogere batchgrootte kan de prestaties en snelheid van het model verhogen, maar vereist ook meer geheugen.  (Standaard: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "De ontwikkelaars achter deze plugin zijn gepassioneerde vrijwilligers uit de gemeenschap. Als je deze plugin nuttig vindt, overweeg dan om bij te dragen aan de ontwikkeling ervan.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Het beoordelingsklassement is gebaseerd op het Elo-classificatiesysteem en wordt in realtime bijgewerkt.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "De maximale bestandsgrootte in MB. Als het bestand groter is dan deze limiet, wordt het bestand niet geüpload.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Het maximum aantal bestanden dat in één keer kan worden gebruikt in de chat. Als het aantal bestanden deze limiet overschrijdt, worden de bestanden niet geüpload.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Het score moet een waarde zijn tussen 0.0 (0%) en 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "De temperatuur van het model. Als je de temperatuur verhoogt, zal het model creatiever antwoorden. (Standaard: 0,8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Thema",
 	"Thinking...": "Aan het denken...",
 	"This action cannot be undone. Do you wish to continue?": "Deze actie kan niet ongedaan worden gemaakt. Wilt u doorgaan?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Dit zorgt ervoor dat je waardevolle gesprekken veilig worden opgeslagen in je backend database. Dank je wel!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Dit is een experimentele functie, het kan functioneren zoals verwacht en kan op elk moment veranderen.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Deze optie bepaalt hoeveel tokens bewaard blijven bij het verversen van de context. Als deze bijvoorbeeld op 2 staat, worden de laatste 2 tekens van de context van het gesprek bewaard. Het behouden van de context kan helpen om de continuïteit van een gesprek te behouden, maar het kan de mogelijkheid om te reageren op nieuwe onderwerpen verminderen. (Standaard: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Deze optie stelt het maximum aantal tokens in dat het model kan genereren in zijn antwoord. Door deze limiet te verhogen, kan het model langere antwoorden geven, maar het kan ook de kans vergroten dat er onbehulpzame of irrelevante inhoud wordt gegenereerd.  (Standaard: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Deze optie verwijdert alle bestaande bestanden in de collectie en vervangt ze door nieuw geüploade bestanden.",
 	"This response was generated by \"{{model}}\"": "Dit antwoord is gegenereerd door  \"{{model}}\"",
 	"This will delete": "Dit zal verwijderen",
@@ -1132,7 +1132,7 @@
 	"Why?": "Waarom?",
 	"Widescreen Mode": "Breedschermmodus",
 	"Won": "Gewonnen",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Werkt samen met top-k. Een hogere waarde (bijv. 0,95) zal leiden tot meer diverse tekst, terwijl een lagere waarde (bijv. 0,5) meer gerichte en conservatieve tekst zal genereren. (Standaard: 0,9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Werkruimte",
 	"Workspace Permissions": "Werkruimtemachtigingen",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/pa-IN/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "ਪਹਿਲਾਂ ਹੀ ਖਾਤਾ ਹੈ?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "ਇੱਕ ਸਹਾਇਕ",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "ਕਨੈਕਸ਼ਨ",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "ਸਮੱਗਰੀ",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "ਸਾਂਝੇ ਕੀਤੇ ਗੱਲਬਾਤ URL ਨੂੰ ਕਲਿੱਪਬੋਰਡ 'ਤੇ ਕਾਪੀ ਕਰ ਦਿੱਤਾ!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "ਨਵੇਂ ਸਾਈਨ ਅਪ ਯੋਗ ਕਰੋ",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "ਸੁਨਿਸ਼ਚਿਤ ਕਰੋ ਕਿ ਤੁਹਾਡੀ CSV ਫਾਈਲ ਵਿੱਚ ਇਸ ਕ੍ਰਮ ਵਿੱਚ 4 ਕਾਲਮ ਹਨ: ਨਾਮ, ਈਮੇਲ, ਪਾਸਵਰਡ, ਭੂਮਿਕਾ।",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "ਸਟੇਬਲ-ਡਿਫਿਊਸ਼ਨ-ਵੈਬਯੂਆਈ ਚਲਾਉਣ ਸਮੇਂ `--api` ਝੰਡਾ ਸ਼ਾਮਲ ਕਰੋ",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "ਜਾਣਕਾਰੀ",
 	"Input commands": "ਇਨਪੁਟ ਕਮਾਂਡਾਂ",
 	"Install from Github URL": "Github URL ਤੋਂ ਇੰਸਟਾਲ ਕਰੋ",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "ਆਵਾਜ਼ ਰਿਕਾਰਡ ਕਰੋ",
 	"Redirecting you to Open WebUI Community": "ਤੁਹਾਨੂੰ ਓਪਨਵੈਬਯੂਆਈ ਕਮਿਊਨਿਟੀ ਵੱਲ ਰੀਡਾਇਰੈਕਟ ਕੀਤਾ ਜਾ ਰਿਹਾ ਹੈ",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "ਜਦੋਂ ਇਹ ਨਹੀਂ ਹੋਣਾ ਚਾਹੀਦਾ ਸੀ ਤਾਂ ਇਨਕਾਰ ਕੀਤਾ",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "ਆਵਾਜ਼ ਸੈੱਟ ਕਰੋ",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "ਸੈਟਿੰਗਾਂ",
 	"Settings saved successfully!": "ਸੈਟਿੰਗਾਂ ਸਫਲਤਾਪੂਰਵਕ ਸੰਭਾਲੀਆਂ ਗਈਆਂ!",
@@ -964,7 +964,7 @@
 	"System Prompt": "ਸਿਸਟਮ ਪ੍ਰੰਪਟ",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "ਤੁਹਾਡੇ ਫੀਡਬੈਕ ਲਈ ਧੰਨਵਾਦ!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "ਸਕੋਰ 0.0 (0%) ਅਤੇ 1.0 (100%) ਦੇ ਵਿਚਕਾਰ ਹੋਣਾ ਚਾਹੀਦਾ ਹੈ।",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "ਥੀਮ",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "ਇਹ ਯਕੀਨੀ ਬਣਾਉਂਦਾ ਹੈ ਕਿ ਤੁਹਾਡੀਆਂ ਕੀਮਤੀ ਗੱਲਾਂ ਤੁਹਾਡੇ ਬੈਕਐਂਡ ਡਾਟਾਬੇਸ ਵਿੱਚ ਸੁਰੱਖਿਅਤ ਤੌਰ 'ਤੇ ਸੰਭਾਲੀਆਂ ਗਈਆਂ ਹਨ। ਧੰਨਵਾਦ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "ਕਾਰਜਸਥਲ",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/pl-PL/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Zezwól na przerwanie połączenia głosowego",
 	"Allowed Endpoints": "Dozwolone punkty końcowe",
 	"Already have an account?": "Czy masz już konto?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternatywa dla top_p, mająca na celu zapewnienie równowagi między jakością a różnorodnością. Parametr p reprezentuje minimalne prawdopodobieństwo, aby token był brany pod uwagę, względem prawdopodobieństwa najbardziej prawdopodobnego tokena. Na przykład, dla p=0,05 i najbardziej prawdopodobnym tokenem o prawdopodobieństwie 0,9, logity o wartości mniejszej niż 0,045 są wykluczane. (Domyślnie: 0,0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Zawsze",
 	"Amazing": "Niesamowite",
 	"an assistant": "asystent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Potwierdź nowe hasło",
 	"Connect to your own OpenAI compatible API endpoints.": "Połącz się ze swoimi własnymi punktami końcowymi API kompatybilnego z OpenAI.",
 	"Connections": "Połączenia",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Ogranicza wysiłek rozumowania dla modeli rozumowania. Stosuje się tylko do modeli rozumowania od określonych dostawców, którzy obsługują wysiłek rozumowania. (Domyślnie: średni)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Skontaktuj się z administratorem, aby uzyskać dostęp do WebUI.",
 	"Content": "Treść",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Kontynuuj z e-mailem",
 	"Continue with LDAP": "Kontynuuj z LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontroluj sposób dzielenia tekstu wiadomości dla żądań TTS. 'Punctuation' dzieli na zdania, 'paragraphs' dzieli na akapity, a 'none' pozostawia wiadomość jako pojedynczy ciąg znaków.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "Kontroluj powtarzanie się sekwencji tokenów w wygenerowanym tekście. Wyższa wartość (np. 1,5) będzie silniej penalizować powtórzenia, podczas gdy niższa wartość (np. 1,1) będzie bardziej pobłażliwa. Na poziomie 1 jest wyłączona. (Domyślnie: 1,1)",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Ustawienia",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Kontroluje równowagę między spójnością a zróżnicowaniem wyników. Niższa wartość zaowocuje bardziej skoncentrowanym i spójnym tekstem. (Domyślnie: 5,0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Skopiowane",
 	"Copied shared chat URL to clipboard!": "Skopiowano udostępniony URL czatu do schowka!",
 	"Copied to clipboard": "Skopiowane do schowka",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Włącz blokowanie pamięci (mlock), aby zapobiec swappingowi danych modelu z RAM. Ta opcja blokuje zbiór stron roboczych modelu w RAM, co gwarantuje, że nie będą one wymieniane na dysk. Może to pomóc w utrzymaniu wydajności poprzez unikanie błędów strony i zapewnienie szybkiego dostępu do danych.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Włącz mapowanie pamięci (mmap), aby załadować dane modelu. Ta opcja pozwala systemowi traktować pliki dysku jako rozszerzenie RAM, co może poprawić wydajność modelu przez umożliwienie szybszego dostępu do danych. Należy jednak pamiętać, że ta funkcja może nie działać poprawnie ze wszystkimi systemami i zużywać znaczną ilość przestrzeni dyskowej.",
 	"Enable Message Rating": "Włącz ocenianie wiadomości",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Włącz próbkowanie Mirostat w celu kontrolowania perplexity. (Domyślnie: 0, 0 = Wyłączone, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Włącz nowe rejestracje",
 	"Enabled": "Włączone",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Upewnij się, że twój plik CSV zawiera dokładnie 4 kolumny w następującej kolejności: Nazwa, Email, Hasło, Rola.",
@@ -566,7 +566,7 @@
 	"Include": "Włączyć",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Użyj flagi `--api-auth` podczas uruchamiania stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Użyj flagi `--api` podczas uruchamiania stable-diffusion-webui.",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Wpływa na to, jak szybko algorytm reaguje na informacje zwrotne z wygenerowanego tekstu. Niższa stopa uczenia się spowoduje wolniejsze dostosowania, podczas gdy wyższa stopa uczenia się sprawi, że algorytm będzie bardziej reaktywny. (Domyślna: 0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informacje",
 	"Input commands": "Wprowadź polecenia",
 	"Install from Github URL": "Instalacja z adresu URL serwisu Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "Wysiłek rozumowania",
 	"Record voice": "Nagraj swój głos",
 	"Redirecting you to Open WebUI Community": "Przekierowujemy Cię do społeczności Open WebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Zmniejsza prawdopodobieństwo generowania bezsensownych odpowiedzi. Wyższa wartość (np. 100) daje bardziej zróżnicowane odpowiedzi, podczas gdy niższa wartość (np. 10) jest bardziej konserwatywna. (Domyślnie: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Odnosić się do siebie jako \"Użytkownik\" (np. \"Użytkownik uczy się hiszpańskiego\")",
 	"References from": "Odniesienia do",
 	"Refused when it shouldn't have": "Odmówił, gdy nie powinien",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Ustaw liczbę wątków pracowników używanych do obliczeń. Ta opcja kontroluje, ile wątków jest używanych do jednoczesnego przetwarzania przychodzących żądań. Zwiększenie tej wartości może poprawić wydajność pod wysokim obciążeniem, ale może również zużywać więcej zasobów CPU.",
 	"Set Voice": "Ustaw głos",
 	"Set whisper model": "Ustaw model szeptu",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "Ustawia płaską karę za tokeny, które pojawiły się przynajmniej raz. Wyższa wartość (np. 1,5) bardziej surowo karze powtórzenia, podczas gdy niższa wartość (np. 0,9) jest bardziej pobłażliwa. Przy 0 jest wyłączona. (Domyślnie: 0)",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "Ustawia skalowanie przeciwko tokenom, aby penalizować powtórzenia na podstawie liczby ich wystąpień. Wyższa wartość (np. 1,5) bardziej penalizuje powtórzenia, natomiast niższa wartość (np. 0,9) jest bardziej pobłażliwa. Przy 0 jest wyłączona. (Domyślnie: 1,1)",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Określa, jak daleko wstecz model powinien sięgać, aby zapobiec powtarzaniu się. (Domyślnie: 64, 0 = wyłączone, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Ustawia ziarno liczby losowej do użycia podczas generowania. Ustawienie tego na konkretną liczbę spowoduje, że model będzie generował ten sam tekst dla tego samego zapytania. (Domyślnie: losowe)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Ustawia rozmiar okna kontekstowego używanego do generowania następnego tokenu. (Domyślnie: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Ustawia sekwencje stopu do użycia. Gdy ten wzorzec zostanie napotkany, LLM przestanie generować tekst i zwróci wynik. Można skonfigurować wiele sekwencji stopu, określając kilka oddzielnych parametrów stopu w pliku modelu.",
 	"Settings": "Ustawienia",
 	"Settings saved successfully!": "Ustawienia zostały zapisane pomyślnie!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Podpowiedź systemowa",
 	"Tags Generation": "Generowanie tagów",
 	"Tags Generation Prompt": "Podpowiedź do generowania tagów",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Próbkowanie bez ogona jest używane do zmniejszenia wpływu mniej prawdopodobnych tokenów na wyjście. Wyższa wartość (np. 2,0) zmniejszy ten wpływ bardziej, podczas gdy wartość 1,0 wyłącza to ustawienie. (domyślnie: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Kliknij, aby przerwać",
 	"Tasks": "Zadania",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Dziękuję za twoją opinię!",
 	"The Application Account DN you bind with for search": "Konto techniczne w formacie DN, z którym się wiążesz w celu przeszukiwania",
 	"The base to search for users": "Podstawa do wyszukiwania użytkowników",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Rozmiar partii określa, ile żądań tekstu jest przetwarzanych razem w jednym czasie. Większy rozmiar partii może zwiększyć wydajność i szybkość modelu, ale wymaga również więcej pamięci. (Domyślnie: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Twórcy tego wtyczki to entuzjaści, którzy działają jako wolontariusze ze społeczności. Jeśli uważasz, że ta wtyczka jest pomocna, rozważ wsparcie jej rozwoju.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Tablica wyników oceny opiera się na systemie rankingu Elo i jest aktualizowana w czasie rzeczywistym.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "Atrybut LDAP, który mapuje się na adres e-mail używany przez użytkowników do logowania.",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Maksymalny rozmiar pliku w MB. Jeśli rozmiar pliku przekroczy ten limit, plik nie zostanie przesłany.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maksymalna liczba plików, które można użyć jednocześnie w czacie. Jeśli liczba plików przekroczy ten limit, pliki nie zostaną przesłane.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Wynik powinien być wartością pomiędzy 0,0 (0%) a 1,0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Temperatura modelu. Zwiększenie temperatury sprawi, że model będzie odpowiadał w sposób bardziej kreatywny. (Domyślnie: 0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Motyw",
 	"Thinking...": "Myślę...",
 	"This action cannot be undone. Do you wish to continue?": "Czy na pewno chcesz kontynuować? Ta akcja nie może zostać cofnięta.",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "To gwarantuje, że Twoje wartościowe rozmowy są bezpiecznie zapisywane w bazie danych backendowej. Dziękujemy!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "To jest funkcja eksperymentalna, może nie działać zgodnie z oczekiwaniami i jest podatna na zmiany w dowolnym momencie.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Ta opcja kontroluje, ile tokenów jest zachowanych podczas odświeżania kontekstu. Na przykład, jeśli ustawiona na 2, ostatnie 2 tokeny kontekstu rozmowy zostaną zachowane. Zachowywanie kontekstu może pomóc w utrzymaniu ciągłości rozmowy, ale może zmniejszyć zdolność do odpowiadania na nowe tematy. (Domyślnie: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Ta opcja ustawia maksymalną liczbę tokenów, które model może wygenerować w swojej odpowiedzi. Zwiększenie tego limitu pozwala modelowi na dostarczanie dłuższych odpowiedzi, ale może również zwiększyć prawdopodobieństwo generowania nieprzydatnych lub nieistotnych treści. (Domyślnie: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Ta opcja usunie wszystkie istniejące pliki w kolekcji i zastąpi je nowo przesłanymi plikami.",
 	"This response was generated by \"{{model}}\"": "Ta odpowiedź została wygenerowana przez \"{{model}}\".",
 	"This will delete": "To usunie wszystkie pliki z katalogu.",
@@ -1132,7 +1132,7 @@
 	"Why?": "Dlaczego?",
 	"Widescreen Mode": "Tryb panoramiczny",
 	"Won": "Wygrał",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Działa razem z top-k. Wyższa wartość (np. 0,95) prowadzi do bardziej zróżnicowanego tekstu, podczas gdy niższa wartość (np. 0,5) generuje bardziej skoncentrowany i konserwatywny tekst. (Domyślnie: 0,9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Obszar roboczy",
 	"Workspace Permissions": "Uprawnienia do przestrzeni roboczej",
 	"Write": "Napisz",

+ 18 - 18
src/lib/i18n/locales/pt-BR/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Permitir Interrupção de Voz na Chamada",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Já tem uma conta?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternativa ao 'top_p', e visa garantir um equilíbrio entre qualidade e variedade. O parâmetro 'p' representa a probabilidade mínima para que um token seja considerado, em relação à probabilidade do token mais provável. Por exemplo, com 'p=0.05' e o token mais provável com probabilidade de '0.9', as predições com valor inferior a '0.045' são filtrados. (Default: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "Incrível",
 	"an assistant": "um assistente",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Conexões",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Contate o Admin para Acesso ao WebUI",
 	"Content": "Conteúdo",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Continuar com Email",
 	"Continue with LDAP": "Continuar com LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlar como o texto do mensagem é dividido para solicitações TTS. 'Pontuação' dividida em frases, 'parágrafos' divide em parágrafos e 'não' mantém a mensagem como uma cadeia de caracteres.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Controles",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Controlar o equilibrio entre a coerencia e a diversidade da saída. Um valor mais baixo fará com que o texto seja mais focado e coerente. (Padrão: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Copiado",
 	"Copied shared chat URL to clipboard!": "URL de chat compartilhado copiado para a área de transferência!",
 	"Copied to clipboard": "Copiado para a área de transferência",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Habilite o bloqueio de memória (mlock) para evitar que os dados do modelo sejam transferidos da RAM para a área de troca (swap). Essa opção bloqueia o conjunto de páginas em uso pelo modelo na RAM, garantindo que elas não sejam transferidas para o disco. Isso pode ajudar a manter o desempenho, evitando falhas de página e garantindo acesso rápido aos dados.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Habilite o mapeamento de memória (mmap) para carregar dados do modelo. Esta opção permite que o sistema use o armazenamento em disco como uma extensão da RAM, tratando os arquivos do disco como se estivessem na RAM. Isso pode melhorar o desempenho do modelo, permitindo acesso mais rápido aos dados. No entanto, pode não funcionar corretamente com todos os sistemas e consumir uma quantidade significativa de espaço em disco.",
 	"Enable Message Rating": "Ativar Avaliação de Mensagens",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Habilite a amostragem Mirostat para controlar a perplexidade. (Padrão: 0, 0 = Desativado, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Ativar Novos Cadastros",
 	"Enabled": "Ativado",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Certifique-se de que seu arquivo CSV inclua 4 colunas nesta ordem: Nome, Email, Senha, Função.",
@@ -566,7 +566,7 @@
 	"Include": "Incluir",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Incluir a flag `--api-auth` ao executar stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Incluir a flag `--api` ao executar stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Define a rapidez com que o algoritmo responde ao feedback do texto gerado. Uma taxa de aprendizado menor resultará em ajustes mais lentos, enquanto uma taxa maior tornará o algoritmo mais responsivo. (Padrão: 0,1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informação",
 	"Input commands": "Comandos de entrada",
 	"Install from Github URL": "Instalar da URL do Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Gravar voz",
 	"Redirecting you to Open WebUI Community": "Redirecionando você para a Comunidade OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Reduz a probabilidade de gerar absurdos. Um valor mais alto (por exemplo, 100) dará respostas mais diversas, enquanto um valor mais baixo (por exemplo, 10) será mais conservador. (Padrão: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Refira-se como \"Usuário\" (por exemplo, \"Usuário está aprendendo espanhol\")",
 	"References from": "Referências de",
 	"Refused when it shouldn't have": "Recusado quando não deveria",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Defina o número de threads de trabalho usadas para computação. Esta opção controla quantos threads são usados para processar as solicitações recebidas de forma simultânea. Aumentar esse valor pode melhorar o desempenho em cargas de trabalho de alta concorrência, mas também pode consumir mais recursos da CPU.",
 	"Set Voice": "Definir Voz",
 	"Set whisper model": "Definir modelo Whisper",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Define a distância de retrocesso que o modelo deve olhar para evitar repetições. (Padrão: 64, 0 = desativado, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Define a semente do número aleatório a ser usada para a geração. Definir isso como um número específico fará com que o modelo gere o mesmo texto para o mesmo prompt. (Padrão: aleatório)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Define o tamanho da janela de contexto usada para gerar o próximo token. (Padrão: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Define as sequências de parada a serem usadas. Quando esse padrão for encontrado, o modelo de linguagem (LLM) parará de gerar texto e retornará. Vários padrões de parada podem ser definidos especificando parâmetros de parada separados em um arquivo de modelo.",
 	"Settings": "Configurações",
 	"Settings saved successfully!": "Configurações salvas com sucesso!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Prompt do Sistema",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Prompt para geração de Tags",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "A amostragem *tail free* é usada para reduzir o impacto de tokens menos prováveis na saída. Um valor mais alto (por exemplo, 2,0) reduzirá mais o impacto, enquanto um valor de 1,0 desativa essa configuração. (Padrão: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Toque para interromper",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Obrigado pelo seu comentário!",
 	"The Application Account DN you bind with for search": "O DN (Distinguished Name) da Conta de Aplicação com a qual você se conecta para pesquisa.",
 	"The base to search for users": "Base para pesquisar usuários.",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "O tamanho do lote (batch size) determina quantas solicitações de texto são processadas juntas de uma vez. Um tamanho de lote maior pode aumentar o desempenho e a velocidade do modelo, mas também requer mais memória. (Padrão: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Os desenvolvedores por trás deste plugin são voluntários apaixonados da comunidade. Se você achar este plugin útil, considere contribuir para o seu desenvolvimento.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "A evolução do ranking de avaliação é baseada no sistema Elo e será atualizada em tempo real.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Máximo tamanho de arquivo em MB. Se o tamanho do arquivo exceder este limite, o arquivo não será enviado.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "O número máximo de arquivos que podem ser utilizados a cada vez em chat. Se o número de arquivos exceder este limite, os arquivos não serão enviados.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "A pontuação deve ser um valor entre 0.0 (0%) e 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Temperatura do modelo. Aumentar a temperatura fará com que o modelo responda de forma mais criativa. (Padrão: 0,8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Pensando...",
 	"This action cannot be undone. Do you wish to continue?": "Esta ação não pode ser desfeita. Você deseja continuar?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isso garante que suas conversas valiosas sejam salvas com segurança no banco de dados do backend. Obrigado!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Esta é uma funcionalidade experimental, pode não funcionar como esperado e está sujeita a alterações a qualquer momento.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Esta opção controla quantos tokens são preservados ao atualizar o contexto. Por exemplo, se definido como 2, os últimos 2 tokens do contexto da conversa serão mantidos. Preservar o contexto pode ajudar a manter a continuidade da conversa, mas pode reduzir a capacidade de responder a novos tópicos. (Padrão: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Esta opção define o número máximo de tokens que o modelo pode gerar em sua resposta. Aumentar esse limite permite que o modelo forneça respostas mais longas, mas também pode aumentar a probabilidade de gerar conteúdo irrelevante ou não útil. (Padrão: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Essa opção deletará todos os arquivos existentes na coleção e todos eles serão substituídos.",
 	"This response was generated by \"{{model}}\"": "Esta resposta foi gerada por \"{{model}}\"",
 	"This will delete": "Isso vai excluir",
@@ -1132,7 +1132,7 @@
 	"Why?": "Por que",
 	"Widescreen Mode": "Modo Tela Cheia",
 	"Won": "Ganhou",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Funciona em conjunto com o top-k. Um valor mais alto (por exemplo, 0,95) levará a um texto mais diversificado, enquanto um valor mais baixo (por exemplo, 0,5) gerará um texto mais focado e conservador. (Padrão: 0,9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Espaço de Trabalho",
 	"Workspace Permissions": "Permissões do espaço de trabalho",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/pt-PT/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Já tem uma conta?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "um assistente",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Conexões",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Contatar Admin para acesso ao WebUI",
 	"Content": "Conteúdo",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL de Conversa partilhado copiada com sucesso!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Ativar Novas Inscrições",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Confirme que o seu ficheiro CSV inclui 4 colunas nesta ordem: Nome, E-mail, Senha, Função.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "Inclua a flag `--api` ao executar stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informação",
 	"Input commands": "Comandos de entrada",
 	"Install from Github URL": "Instalar a partir do URL do Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Gravar voz",
 	"Redirecting you to Open WebUI Community": "Redirecionando-o para a Comunidade OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Refera-se a si próprio como \"User\" (por exemplo, \"User está a aprender Espanhol\")",
 	"References from": "",
 	"Refused when it shouldn't have": "Recusado quando não deveria",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Definir Voz",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Configurações",
 	"Settings saved successfully!": "Configurações guardadas com sucesso!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Prompt do Sistema",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Obrigado pelo seu feedback!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "A pontuação deve ser um valor entre 0.0 (0%) e 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "A pensar...",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Isto garante que suas conversas valiosas sejam guardadas com segurança na sua base de dados de backend. Obrigado!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Isto é um recurso experimental, pode não funcionar conforme o esperado e está sujeito a alterações a qualquer momento.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "Modo Widescreen",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Espaço de Trabalho",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/ro-RO/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Permite intreruperea vocii în apel",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Deja ai un cont?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Întotdeauna",
 	"Amazing": "Uimitor",
 	"an assistant": "un asistent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Conexiuni",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Contactează administratorul pentru acces WebUI",
 	"Content": "Conținut",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlează modul în care textul mesajului este divizat pentru cererile TTS. 'Punctuation' împarte în propoziții, 'paragraphs' împarte în paragrafe, iar 'none' menține mesajul ca un șir unic.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Controale",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Copiat",
 	"Copied shared chat URL to clipboard!": "URL-ul conversației partajate a fost copiat în clipboard!",
 	"Copied to clipboard": "Copiat în clipboard",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "Activează Evaluarea Mesajelor",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Activează Înscrierile Noi",
 	"Enabled": "Activat",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Asigurați-vă că fișierul CSV include 4 coloane în această ordine: Nume, Email, Parolă, Rol.",
@@ -566,7 +566,7 @@
 	"Include": "Include",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Includeți flag-ul `--api-auth` când rulați stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Includeți flag-ul `--api` când rulați stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Informații",
 	"Input commands": "Comenzi de intrare",
 	"Install from Github URL": "Instalează de la URL-ul Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Înregistrează vocea",
 	"Redirecting you to Open WebUI Community": "Vă redirecționăm către Comunitatea OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Referiți-vă la dvs. ca \"Utilizator\" (de ex., \"Utilizatorul învață spaniolă\")",
 	"References from": "Referințe din",
 	"Refused when it shouldn't have": "Refuzat când nu ar fi trebuit",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Setează Voce",
 	"Set whisper model": "Setează modelul whisper",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Setări",
 	"Settings saved successfully!": "Setările au fost salvate cu succes!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Prompt de Sistem",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Generarea de Etichete Prompt",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Apasă pentru a întrerupe",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Mulțumim pentru feedback!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Dezvoltatorii din spatele acestui plugin sunt voluntari pasionați din comunitate. Dacă considerați acest plugin util, vă rugăm să luați în considerare contribuția la dezvoltarea sa.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Clasamentul de evaluare se bazează pe sistemul de rating Elo și este actualizat în timp real.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Dimensiunea maximă a fișierului în MB. Dacă dimensiunea fișierului depășește această limită, fișierul nu va fi încărcat.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Numărul maxim de fișiere care pot fi utilizate simultan în chat. Dacă numărul de fișiere depășește această limită, fișierele nu vor fi încărcate.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Scorul ar trebui să fie o valoare între 0.0 (0%) și 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Temă",
 	"Thinking...": "Gândește...",
 	"This action cannot be undone. Do you wish to continue?": "Această acțiune nu poate fi anulată. Doriți să continuați?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Acest lucru asigură că conversațiile dvs. valoroase sunt salvate în siguranță în baza de date a backend-ului dvs. Mulțumim!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Aceasta este o funcție experimentală, poate să nu funcționeze așa cum vă așteptați și este supusă schimbării în orice moment.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Această opțiune va șterge toate fișierelor existente din colecție și le va înlocui cu fișierele nou încărcate.",
 	"This response was generated by \"{{model}}\"": "Acest răspuns a fost generat de \"{{model}}\"",
 	"This will delete": "Aceasta va șterge",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "Mod Ecran Larg",
 	"Won": "Câștigat",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Spațiu de Lucru",
 	"Workspace Permissions": "",
 	"Write": "Scrie",

+ 18 - 18
src/lib/i18n/locales/ru-RU/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Разрешить прерывание голоса во время вызова",
 	"Allowed Endpoints": "",
 	"Already have an account?": "У вас уже есть учетная запись?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "Удивительный",
 	"an assistant": "ассистент",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Соединение",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Обратитесь к администратору для получения доступа к WebUI",
 	"Content": "Содержание",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Продолжить с Email",
 	"Continue with LDAP": "Продолжить с LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Управляйте разделением текста сообщения для запросов TTS. 'Пунктуация' разделяет на предложения, 'абзацы' - разделяет на абзацы, а 'нет' сохраняет сообщение в виде одной строки.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Управление",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Управляет балансом между согласованностью и разнообразием выходных данных. Меньшее значение приведет к более сфокусированному и связному тексту. (По умолчанию: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Скопировано",
 	"Copied shared chat URL to clipboard!": "Копирование в буфер обмена выполнено успешно!",
 	"Copied to clipboard": "Скопировано в буфер обмена",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Включите блокировку памяти (mlock), чтобы предотвратить выгрузку данных модели из ОЗУ. Эта опция блокирует рабочий набор страниц модели в оперативной памяти, гарантируя, что они не будут выгружены на диск. Это может помочь поддерживать производительность, избегая ошибок страниц и обеспечивая быстрый доступ к данным.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Включите отображение памяти (mmap), чтобы загрузить данные модели. Эта опция позволяет системе использовать дисковое хранилище в качестве расширения оперативной памяти, обрабатывая дисковые файлы так, как если бы они находились в оперативной памяти. Это может улучшить производительность модели за счет более быстрого доступа к данным. Однако он может работать некорректно со всеми системами и занимать значительный объем дискового пространства.",
 	"Enable Message Rating": "Разрешить оценку ответов",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Разрешить новые регистрации",
 	"Enabled": "Включено",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Убедитесь, что ваш CSV-файл включает в себя 4 столбца в следующем порядке: Имя, Электронная почта, Пароль, Роль.",
@@ -566,7 +566,7 @@
 	"Include": "Включать",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Добавьте флаг '--api-auth' при запуске stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Добавьте флаг `--api` при запуске stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Информация",
 	"Input commands": "Введите команды",
 	"Install from Github URL": "Установка с URL-адреса Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Записать голос",
 	"Redirecting you to Open WebUI Community": "Перенаправляем вас в сообщество OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Называйте себя \"User\" (например, \"User is learning Spanish\").",
 	"References from": "",
 	"Refused when it shouldn't have": "Отказано в доступе, когда это не должно было произойти",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Установить голос",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Настройки",
 	"Settings saved successfully!": "Настройки успешно сохранены!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Системный промпт",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Нажмите, чтобы прервать",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Спасибо за вашу обратную связь!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Разработчики этого плагина - увлеченные волонтеры из сообщества. Если вы считаете этот плагин полезным, пожалуйста, подумайте о том, чтобы внести свой вклад в его разработку.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Максимальный размер файла в МБ. Если размер файла превысит это ограничение, файл не будет загружен.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Максимальное количество файлов, которые могут быть использованы одновременно в чате. Если количество файлов превысит это ограничение, файлы не будут загружены.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Оценка должна быть значением между 0,0 (0%) и 1,0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Температура модели. Повышение температуры заставит модель отвечать более творчески. (По умолчанию: 0,8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Тема",
 	"Thinking...": "Думаю...",
 	"This action cannot be undone. Do you wish to continue?": "Это действие нельзя отменить. Вы хотите продолжить?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Это обеспечивает сохранение ваших ценных разговоров в безопасной базе данных на вашем сервере. Спасибо!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Это экспериментальная функция, она может работать не так, как ожидалось, и может быть изменена в любое время.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Этот параметр определяет, сколько токенов сохраняется при обновлении контекста. Например, если установлено значение 2, будут сохранены два последних токена контекста разговора. Сохранение контекста может помочь сохранить непрерывность разговора, но может снизить способность реагировать на новые темы. (По умолчанию: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Этот параметр устанавливает максимальное количество токенов, которые модель может сгенерировать в своем ответе. Увеличение этого предела позволяет модели предоставлять более длинные ответы, но также может увеличить вероятность создания бесполезного или нерелевантного контента. (По умолчанию: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Эта опция удалит все существующие файлы в коллекции и заменит их вновь загруженными файлами.",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "Это приведет к удалению",
@@ -1132,7 +1132,7 @@
 	"Why?": "Почему?",
 	"Widescreen Mode": "Широкоэкранный режим",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Работает совместно с top-k. Более высокое значение (например, 0,95) приведет к созданию более разнообразного текста, а более низкое значение (например, 0,5) приведет к созданию более сфокусированного и консервативного текста. (По умолчанию: 0,9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Рабочее пространство",
 	"Workspace Permissions": "Разрешения для Рабочего пространства",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/sk-SK/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Povoliť prerušenie hlasu počas hovoru",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Už máte účet?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "asistent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Pripojenia",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Kontaktujte administrátora pre prístup k webovému rozhraniu.",
 	"Content": "Obsah",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Kontrola, ako sa text správy rozdeľuje pre požiadavky TTS. 'Punctuation' rozdeľuje text na vety, 'paragraphs' rozdeľuje text na odseky a 'none' ponecháva správu ako jeden celý reťazec.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Ovládacie prvky",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Skopírované",
 	"Copied shared chat URL to clipboard!": "URL zdieľanej konverzácie skopírované do schránky!",
 	"Copied to clipboard": "Skopírované do schránky",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "Povoliť hodnotenie správ",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Povoliť nové registrácie",
 	"Enabled": "Povolené",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Uistite sa, že váš CSV súbor obsahuje 4 stĺpce v tomto poradí: Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "Zahrnúť",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Zahrňte prepínač `--api-auth` pri spustení stable-diffusion-webui.",
 	"Include `--api` flag when running stable-diffusion-webui": "Pri spustení stable-diffusion-webui zahrňte príznak `--api`.",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Info",
 	"Input commands": "Vstupné príkazy",
 	"Install from Github URL": "Inštalácia z URL adresy Githubu",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Nahrať hlas",
 	"Redirecting you to Open WebUI Community": "Presmerovanie na komunitu OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Odkazujte na seba ako na \"užívateľa\" (napr. \"Užívateľ sa učí španielsky\").",
 	"References from": "Referencie z",
 	"Refused when it shouldn't have": "Odmietnuté, keď nemalo byť.",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Nastaviť hlas",
 	"Set whisper model": "Nastaviť model whisper",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Nastavenia",
 	"Settings saved successfully!": "Nastavenia boli úspešne uložené!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Systémový prompt",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Prompt na generovanie značiek",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Klepnite na prerušenie",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Ďakujeme za vašu spätnú väzbu!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Vývojári stojaci za týmto pluginom sú zapálení dobrovoľníci z komunity. Ak považujete tento plugin za užitočný, zvážte príspevok na jeho vývoj.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Hodnotiaca tabuľka je založená na systéme hodnotenia Elo a aktualizuje sa v reálnom čase.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Maximálna veľkosť súboru v MB. Ak veľkosť súboru presiahne tento limit, súbor nebude nahraný.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Maximálny počet súborov, ktoré je možné použiť naraz v chate. Ak počet súborov presiahne tento limit, súbory nebudú nahrané.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Skóre by malo byť hodnotou medzi 0,0 (0%) a 1,0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Téma",
 	"Thinking...": "Premýšľam...",
 	"This action cannot be undone. Do you wish to continue?": "Túto akciu nie je možné vrátiť späť. Prajete si pokračovať?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Týmto je zaistené, že vaše cenné konverzácie sú bezpečne uložené vo vašej backendovej databáze. Ďakujeme!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Toto je experimentálna funkcia, nemusí fungovať podľa očakávania a môže byť kedykoľvek zmenená.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Táto voľba odstráni všetky existujúce súbory v kolekcii a nahradí ich novo nahranými súbormi.",
 	"This response was generated by \"{{model}}\"": "Táto odpoveď bola vygenerovaná pomocou \"{{model}}\"",
 	"This will delete": "Toto odstráni",
@@ -1132,7 +1132,7 @@
 	"Why?": "Prečo?",
 	"Widescreen Mode": "Režim širokouhlého zobrazenia",
 	"Won": "Vyhral",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/sr-RS/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Дозволи прекид гласа у позиву",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Већ имате налог?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "Невероватно",
 	"an assistant": "помоћник",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Потврди нову лозинку",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Везе",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Пишите админима за приступ на WebUI",
 	"Content": "Садржај",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Настави са е-адресом",
 	"Continue with LDAP": "Настави са ЛДАП-ом",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Контроле",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Копирано",
 	"Copied shared chat URL to clipboard!": "Адреса дељеног ћаскања ископирана у оставу!",
 	"Copied to clipboard": "Копирано у оставу",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Омогући нове пријаве",
 	"Enabled": "Омогућено",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Уверите се да ваша CSV датотека укључује 4 колоне у овом редоследу: Име, Е-пошта, Лозинка, Улога.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "Укључи `--api` заставицу при покретању stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Инфо",
 	"Input commands": "Унеси наредбе",
 	"Install from Github URL": "Инсталирај из Гитхуб УРЛ адресе",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "Јачина размишљања",
 	"Record voice": "Сними глас",
 	"Redirecting you to Open WebUI Community": "Преусмеравање на OpenWebUI заједницу",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "Референце од",
 	"Refused when it shouldn't have": "Одбијено када није требало",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Подеси глас",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Подешавања",
 	"Settings saved successfully!": "Подешавања успешно сачувана!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Системски упит",
 	"Tags Generation": "Стварање ознака",
 	"Tags Generation Prompt": "Упит стварања ознака",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Хвала на вашем коментару!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Резултат треба да буде вредност између 0.0 (0%) и 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Тема",
 	"Thinking...": "Размишљам...",
 	"This action cannot be undone. Do you wish to continue?": "Ова радња се не може опозвати. Да ли желите наставити?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Ово осигурава да су ваши вредни разговори безбедно сачувани у вашој бекенд бази података. Хвала вам!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "Ово ће обрисати",
@@ -1132,7 +1132,7 @@
 	"Why?": "Зашто?",
 	"Widescreen Mode": "Режим широког екрана",
 	"Won": "Победа",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Радни простор",
 	"Workspace Permissions": "",
 	"Write": "Пиши",

+ 18 - 18
src/lib/i18n/locales/sv-SE/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Tillåt röstavbrott under samtal",
 	"Allowed Endpoints": "Tillåtna Endpoints",
 	"Already have an account?": "Har du redan ett konto?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Alternativ till top_p, och syftar till att säkerställa en balans mellan kvalitet och variation. Parametern p representerar den minsta sannolikheten för att en token ska beaktas, i förhållande till sannolikheten för den mest sannolika token. Till exempel, med p=0.05 och den mest sannolika token som har en sannolikhet på 0.9, filtreras logiter med ett värde mindre än 0.045 bort. (Standard: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Alltid",
 	"Amazing": "Fantastiskt",
 	"an assistant": "en assistent",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Bekräfta ditt nya lösenord",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Anslutningar",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Kontakta administratören för att få åtkomst till WebUI",
 	"Content": "Innehåll",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Kopierad delad chatt-URL till urklipp!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Aktivera nya registreringar",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Se till att din CSV-fil innehåller fyra kolumner i denna ordning: Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "Inkludera flaggan `--api` när du kör stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Information",
 	"Input commands": "Indatakommandon",
 	"Install from Github URL": "Installera från Github-URL",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Spela in röst",
 	"Redirecting you to Open WebUI Community": "Omdirigerar dig till OpenWebUI Community",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Referera till dig själv som \"Användare\" (t.ex. \"Användaren lär sig spanska\")",
 	"References from": "",
 	"Refused when it shouldn't have": "Avvisades när det inte borde ha gjort det",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Ange röst",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Inställningar",
 	"Settings saved successfully!": "Inställningar sparades framgångsrikt!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Systeminstruktion",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling används för att minska effekten av mindre sannolika tokens i resultatet. Ett högre värde (t.ex. 2,0) minskar effekten mer, medan ett värde på 1,0 inaktiverar den här inställningen. (standard: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Tack för din feedback!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Batchstorleken avgör hur många textförfrågningar som behandlas samtidigt. En högre batchstorlek kan öka modellens prestanda och hastighet, men kräver också mer minne.  (Standard: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Utvärderingens topplista är baserad på Elo-betygssystemet och uppdateras i realtid",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Betyget ska vara ett värde mellan 0.0 (0%) och 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Modellens temperatur. Om temperaturen höjs kommer modellen att svara mer kreativt. (Standard: 0,8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Tänker...",
 	"This action cannot be undone. Do you wish to continue?": "Denna åtgärd kan inte ångras. Vill du fortsätta?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Detta säkerställer att dina värdefulla samtal sparas säkert till din backend-databas. Tack!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Detta är en experimentell funktion som kanske inte fungerar som förväntat och som kan komma att ändras när som helst.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Det här alternativet styr hur många tokens som ska bevaras när kontexten uppdateras. Om det t.ex. är inställt på 2 bevaras de senaste 2 tokens i konversationskontexten. Att bevara kontexten kan bidra till att upprätthålla kontinuiteten i en konversation, men det kan minska möjligheten att svara på nya ämnen. (Standard: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Det här alternativet anger det maximala antalet tokens som modellen kan generera i sitt svar. Om du ökar denna gräns kan modellen ge längre svar, men det kan också öka sannolikheten för att ohjälpsamt eller irrelevant innehåll genereras.  (Standard: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "Varför?",
 	"Widescreen Mode": "Bredbildsläge",
 	"Won": "Vann",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Fungerar tillsammans med top-k. Ett högre värde (t.ex. 0,95) leder till en mer varierad text, medan ett lägre värde (t.ex. 0,5) ger en mer fokuserad och konservativ text. (Standard: 0,9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Arbetsyta",
 	"Workspace Permissions": "",
 	"Write": "Skriv",

+ 18 - 18
src/lib/i18n/locales/th-TH/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "อนุญาตการแทรกเสียงในสาย",
 	"Allowed Endpoints": "",
 	"Already have an account?": "มีบัญชีอยู่แล้ว?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "ผู้ช่วย",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "การเชื่อมต่อ",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "ติดต่อผู้ดูแลระบบสำหรับการเข้าถึง WebUI",
 	"Content": "เนื้อหา",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "การควบคุม",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "คัดลอก URL แชทที่แชร์ไปยังคลิปบอร์ดแล้ว!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "เปิดใช้งานการสมัครใหม่",
 	"Enabled": "เปิดใช้งาน",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "ตรวจสอบว่าไฟล์ CSV ของคุณมี 4 คอลัมน์ในลำดับนี้: ชื่อ, อีเมล, รหัสผ่าน, บทบาท",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "รวมแฟลก `--api-auth` เมื่อเรียกใช้ stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "รวมแฟลก `--api` เมื่อเรียกใช้ stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "ข้อมูล",
 	"Input commands": "คำสั่งป้อนข้อมูล",
 	"Install from Github URL": "ติดตั้งจาก URL ของ Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "บันทึกเสียง",
 	"Redirecting you to Open WebUI Community": "กำลังเปลี่ยนเส้นทางคุณไปยังชุมชน OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "เรียกตัวเองว่า \"ผู้ใช้\" (เช่น \"ผู้ใช้กำลังเรียนภาษาสเปน\")",
 	"References from": "",
 	"Refused when it shouldn't have": "ปฏิเสธเมื่อไม่ควรทำ",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "ตั้งค่าเสียง",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "การตั้งค่า",
 	"Settings saved successfully!": "บันทึกการตั้งค่าเรียบร้อยแล้ว!",
@@ -964,7 +964,7 @@
 	"System Prompt": "ระบบพรอมต์",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "แตะเพื่อขัดจังหวะ",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "ขอบคุณสำหรับความคิดเห็นของคุณ!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "นักพัฒนาที่อยู่เบื้องหลังปลั๊กอินนี้เป็นอาสาสมัครที่มีชื่นชอบการแบ่งบัน หากคุณพบว่าปลั๊กอินนี้มีประโยชน์ โปรดพิจารณาสนับสนุนการพัฒนาของเขา",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "คะแนนควรอยู่ระหว่าง 0.0 (0%) ถึง 1.0 (100%)",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "ธีม",
 	"Thinking...": "กำลังคิด...",
 	"This action cannot be undone. Do you wish to continue?": "การกระทำนี้ไม่สามารถย้อนกลับได้ คุณต้องการดำเนินการต่อหรือไม่?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "สิ่งนี้ทำให้มั่นใจได้ว่าการสนทนาที่มีค่าของคุณจะถูกบันทึกอย่างปลอดภัยในฐานข้อมูลแบ็กเอนด์ของคุณ ขอบคุณ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "นี่เป็นฟีเจอร์ทดลอง อาจไม่ทำงานตามที่คาดไว้และอาจมีการเปลี่ยนแปลงได้ตลอดเวลา",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "สิ่งนี้จะลบ",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "โหมดหน้าจอกว้าง",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "พื้นที่ทำงาน",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/tk-TW/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "",
 	"Allowed Endpoints": "",
 	"Already have an account?": "",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "",
 	"Enabled": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "",
 	"Input commands": "",
 	"Install from Github URL": "",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "",
 	"Redirecting you to Open WebUI Community": "",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
 	"References from": "",
 	"Refused when it shouldn't have": "",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "",
 	"Settings saved successfully!": "",
@@ -964,7 +964,7 @@
 	"System Prompt": "",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "",
 	"Thinking...": "",
 	"This action cannot be undone. Do you wish to continue?": "",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/tr-TR/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Aramada Ses Kesintisine İzin Ver",
 	"Allowed Endpoints": "İzin Verilen Uç Noktalar",
 	"Already have an account?": "Zaten bir hesabınız mı var?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "top_p'ye bir alternatif ve kalite ile çeşitlilik arasında bir denge sağlamayı amaçlar. p parametresi, en olası tokenin olasılığına göre, bir tokenin dikkate alınması için minimum olasılığı temsil eder. Örneğin, p=0.05 ve en olası tokenin 0.9 olasılığı ile 0.045'ten küçük bir değere sahip logitler filtrelenir. (Varsayılan: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "Harika",
 	"an assistant": "bir asistan",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Yeni parolanızı onaylayın",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Bağlantılar",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "WebUI Erişimi için Yöneticiyle İletişime Geçin",
 	"Content": "İçerik",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "E-posta ile devam edin",
 	"Continue with LDAP": "LDAP ile devam edin",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Mesaj metninin TTS istekleri için nasıl bölüneceğini kontrol edin. 'Noktalama' cümlelere, 'paragraflar' paragraflara böler ve 'hiçbiri' mesajı tek bir dize olarak tutar.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Kontroller",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Çıktının tutarlılığı ve çeşitliliği arasındaki dengeyi kontrol eder. Daha düşük bir değer, daha odaklanmış ve tutarlı bir metinle sonuçlanacaktır. (Varsayılan: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Kopyalandı",
 	"Copied shared chat URL to clipboard!": "Paylaşılan sohbet URL'si panoya kopyalandı!",
 	"Copied to clipboard": "Panoya kopyalandı",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "Mesaj Değerlendirmeyi Etkinleştir",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Perplexity'yi kontrol etmek için Mirostat örnekleme özelliğini etkinleştirin. (Varsayılan: 0, 0 = Devre Dışı, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Yeni Kayıtları Etkinleştir",
 	"Enabled": "Etkin",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "CSV dosyanızın şu sırayla 4 sütun içerdiğinden emin olun: İsim, E-posta, Şifre, Rol.",
@@ -566,7 +566,7 @@
 	"Include": "Dahil etmek",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "stable-diffusion-webui çalıştırılırken `--api-auth` bayrağını dahil edin",
 	"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui çalıştırılırken `--api` bayrağını dahil edin",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Bilgi",
 	"Input commands": "Giriş komutları",
 	"Install from Github URL": "Github URL'sinden yükleyin",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Ses kaydı yap",
 	"Redirecting you to Open WebUI Community": "OpenWebUI Topluluğuna yönlendiriliyorsunuz",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Kendinizden \"User\" olarak bahsedin (örneğin, \"User İspanyolca öğreniyor\")",
 	"References from": "Referanslar arasından",
 	"Refused when it shouldn't have": "Reddedilmemesi gerekirken reddedildi",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Ses Ayarla",
 	"Set whisper model": "Fısıltı modelini ayarla",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Tekrarları önlemek için modelin ne kadar geriye bakacağını ayarlar. (Varsayılan: 64, 0 = devre dışı, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Oluşturma için kullanılacak rastgele sayı tohumunu(seed) ayarlar. Bunu belirli bir sayıya ayarlamak, modelin aynı prompt için aynı metni oluşturmasını sağlar. (Varsayılan: rastgele)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Bir sonraki tokenı oluşturmak için kullanılan bağlam penceresinin boyutunu ayarlar. (Varsayılan: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Kullanılacak durma dizilerini ayarlar. Bu desenle karşılaşıldığında, LLM metin oluşturmayı durduracak ve geri dönecektir. Birden çok durma deseni, bir modelfile'da birden çok ayrı durma parametresi belirterek ayarlanabilir.",
 	"Settings": "Ayarlar",
 	"Settings saved successfully!": "Ayarlar başarıyla kaydedildi!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Sistem Promptu",
 	"Tags Generation": "Etiketler Oluşturma",
 	"Tags Generation Prompt": "Etiketler Oluşturma Promptu",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Durdurmak için dokunun",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Geri bildiriminiz için teşekkürler!",
 	"The Application Account DN you bind with for search": "Arama için bağlandığınız Uygulama Hesap DN'si",
 	"The base to search for users": "Kullanıcıları aramak için temel",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Toplu boyut, kaç metin isteğinin aynı anda işlendiğini belirler. Daha yüksek bir toplu boyut, modelin performansını ve hızını artırabilir, ancak daha fazla bellek gerektirir. (Varsayılan: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Bu eklentinin arkasındaki geliştiriciler topluluktan tutkulu gönüllülerdir. Bu eklentinin yararlı olduğunu düşünüyorsanız, gelişimine katkıda bulunmayı düşünün.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "MB cinsinden maksimum dosya boyutu. Dosya boyutu bu sınırı aşarsa, dosya yüklenmeyecektir.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Sohbette aynı anda kullanılabilecek maksimum dosya sayısı. Dosya sayısı bu sınırı aşarsa, dosyalar yüklenmeyecektir.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Puan 0.0 (%0) ile 1.0 (%100) arasında bir değer olmalıdır.",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Tema",
 	"Thinking...": "Düşünüyor...",
 	"This action cannot be undone. Do you wish to continue?": "Bu eylem geri alınamaz. Devam etmek istiyor musunuz?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Bu, önemli konuşmalarınızın güvenli bir şekilde arkayüz veritabanınıza kaydedildiğini garantiler. Teşekkür ederiz!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Bu deneysel bir özelliktir, beklendiği gibi çalışmayabilir ve her an değişiklik yapılabilir.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Bu seçenek, koleksiyondaki tüm mevcut dosyaları silecek ve bunları yeni yüklenen dosyalarla değiştirecek.",
 	"This response was generated by \"{{model}}\"": "Bu yanıt \"{{model}}\" tarafından oluşturuldu",
 	"This will delete": "Bu silinecek",
@@ -1132,7 +1132,7 @@
 	"Why?": "Neden?",
 	"Widescreen Mode": "Geniş Ekran Modu",
 	"Won": "kazandı",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Çalışma Alanı",
 	"Workspace Permissions": "Çalışma Alanı İzinleri",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/uk-UA/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Дозволити переривання голосу під час виклику",
 	"Allowed Endpoints": "Дозволені кінцеві точки",
 	"Already have an account?": "Вже є обліковий запис?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "Альтернатива параметру top_p, яка має на меті забезпечити баланс якості та різноманітності. Параметр p представляє мінімальну ймовірність для того, щоб токен був врахований, відносно ймовірності найбільш ймовірного токена. Наприклад, при p=0.05 і найбільш імовірному токені з ймовірністю 0.9, логіти зі значенням менше 0.045 будуть відфільтровані. (За замовчуванням: 0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "Завжди",
 	"Amazing": "Чудово",
 	"an assistant": "асистента",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "Підтвердіть свій новий пароль",
 	"Connect to your own OpenAI compatible API endpoints.": "Підключіться до своїх власних API-ендпоінтів, сумісних з OpenAI.",
 	"Connections": "З'єднання",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Обмежує зусилля на міркування для моделей міркування. Застосовується лише до моделей від певних постачальників, які підтримують зусилля на міркування. (За замовчуванням: середній)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Зверніться до адміна для отримання доступу до WebUI",
 	"Content": "Зміст",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "Продовжити з електронною поштою",
 	"Continue with LDAP": "Продовжити з LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Керування розбиттям тексту повідомлення для TTS-запитів. 'Punctuation' розбиває на речення, 'paragraphs' розбиває на абзаци, а 'none' залишає повідомлення як один рядок.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "Керування",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Контролює баланс між зв'язністю та різноманітністю виходу. Нижче значення призведе до більш зосередженого та зв'язного тексту. (За замовчуванням: 5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Скопійовано",
 	"Copied shared chat URL to clipboard!": "Скопійовано URL-адресу спільного чату в буфер обміну!",
 	"Copied to clipboard": "Скопійовано в буфер обміну",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Увімкнути блокування пам'яті (mlock), щоб запобігти виведенню даних моделі з оперативної пам'яті. Цей параметр блокує робочий набір сторінок моделі в оперативній пам'яті, гарантуючи, що вони не будуть виведені на диск. Це може допомогти підтримувати продуктивність, уникати помилок сторінок та забезпечувати швидкий доступ до даних.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Увімкнути відображення пам'яті (mmap) для завантаження даних моделі. Цей параметр дозволяє системі використовувати дискове сховище як розширення оперативної пам'яті, трактуючи файли на диску, як ніби вони знаходяться в RAM. Це може покращити продуктивність моделі, дозволяючи швидший доступ до даних. Однак, він може не працювати коректно на всіх системах і може споживати значну кількість дискового простору.",
 	"Enable Message Rating": "Увімкнути оцінку повідомлень",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Увімкнути вибірку Mirostat для контролю над непередбачуваністю. (За замовчуванням: 0, 0 = Вимкнено, 1 = Mirostat, 2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Дозволити нові реєстрації",
 	"Enabled": "Увімкнено",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Переконайтеся, що ваш CSV-файл містить 4 колонки в такому порядку: Ім'я, Email, Пароль, Роль.",
@@ -566,7 +566,7 @@
 	"Include": "Включити",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Включіть прапорець `--api-auth` під час запуску stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Включіть прапор `--api` при запуску stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "Впливає на швидкість, з якою алгоритм реагує на зворотній зв'язок від згенерованого тексту. Нижча швидкість навчання призведе до повільнішої корекції, тоді як вища швидкість навчання зробить алгоритм більш реакційним. (За замовчуванням: 0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Інфо",
 	"Input commands": "Команди вводу",
 	"Install from Github URL": "Встановіть з URL-адреси Github",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "Зусилля на міркування",
 	"Record voice": "Записати голос",
 	"Redirecting you to Open WebUI Community": "Перенаправляємо вас до спільноти OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "Знижує ймовірність генерації безглуздих відповідей. Вищі значення (напр., 100) призведуть до більш різноманітних відповідей, тоді як нижчі значення (напр., 10) будуть більш обережними. (За замовчуванням: 40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Називайте себе \"Користувач\" (напр., \"Користувач вивчає іспанську мову\")",
 	"References from": "Посилання з",
 	"Refused when it shouldn't have": "Відмовив, коли не мав би",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Встановити кількість робочих потоків, що використовуються для обробки інформації. Ця опція керує кількістю потоків, що використовуються для обробки надходження запитів одночасно. Збільшення цього значення може підвищити продуктивність при великій одночасності робіт, але також може споживати більше ресурсів CPU.",
 	"Set Voice": "Встановити голос",
 	"Set whisper model": "Встановити модель whisper",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Встановлює, на скільки кроків назад модель повинна звертатися, щоб запобігти повторенням. (За замовчуванням: 64, 0 = вимкнено, -1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Встановлює насіння випадкового числа для генерації. Вказавши конкретне число, модель буде генерувати той самий текст для одного й того ж запиту. (За замовчуванням: випадкове)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Встановлює розмір вікна контексту, яке використовується для генерації наступного токена. (За замовчуванням: 2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "Встановлює послідовності зупинки, які будуть використовуватися. Коли зустрічається така послідовність, LLM припиняє генерацію тексту і повертає результат. Можна встановити кілька послідовностей зупинки, вказавши кілька окремих параметрів зупинки у файлі моделі.",
 	"Settings": "Налаштування",
 	"Settings saved successfully!": "Налаштування успішно збережено!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Системний промт",
 	"Tags Generation": "Генерація тегів",
 	"Tags Generation Prompt": "Підказка для генерації тегів",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Вибірка з відрізанням хвоста використовується для зменшення впливу малоймовірних токенів на результат. Вищі значення (напр., 2.0) зменшують цей вплив більше, в той час як значення 1.0 вимикає цю настройку. (За замовчуванням: 1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Натисніть, щоб перервати",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Дякуємо за ваш відгук!",
 	"The Application Account DN you bind with for search": "DN облікового запису застосунку, з яким ви здійснюєте прив'язку для пошуку",
 	"The base to search for users": "База для пошуку користувачів",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Розмір пакету визначає, скільки текстових запитів обробляється одночасно. Більший розмір пакету може підвищити продуктивність та швидкість моделі, але також вимагає більше пам'яті. (За замовчуванням: 512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Розробники цього плагіна - пристрасні волонтери зі спільноти. Якщо ви вважаєте цей плагін корисним, будь ласка, зробіть свій внесок у його розвиток.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "Таблиця лідерів оцінки базується на системі рейтингу Ело і оновлюється в реальному часі.",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "LDAP-атрибут, який відповідає за пошту, яку користувачі використовують для входу.",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "Максимальний розмір файлу в МБ. Якщо розмір файлу перевищує цей ліміт, файл не буде завантажено.",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Максимальна кількість файлів, які можна використати одночасно в чаті. Якщо кількість файлів перевищує цей ліміт, файли не будуть завантажені.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Оцінка повинна бути в діапазоні від 0.0 (0%) до 1.0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "Температура моделі. Збільшення температури зробить відповіді моделі більш креативними. (За замовчуванням: 0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Тема",
 	"Thinking...": "Думаю...",
 	"This action cannot be undone. Do you wish to continue?": "Цю дію не можна скасувати. Ви бажаєте продовжити?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Це забезпечує збереження ваших цінних розмов у безпечному бекенд-сховищі. Дякуємо!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Це експериментальна функція, вона може працювати не так, як очікувалося, і може бути змінена в будь-який час.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "Ця опція керує кількістю токенів, яка зберігається під час оновлення контексту. Наприклад, якщо встановити як 2, останні 2 токени контексту розмови будуть збережені. Збереження контексту може допомогти підтримувати безперервність розмови, але воно також може зменшити здатність відповідати на нові теми. (За замовчуванням: 24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "Ця опція встановлює максимальну кількість токенів, які модель може генерувати в своєму відповіді. Збільшення цього обмеження дозволяє моделі надавати довші відповіді, але також може збільшити вірогідність генерації недопоможного чи невідповідного вмісту. (За замовчуванням: 128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "Цей варіант видалить усі існуючі файли в колекції та замінить їх новими завантаженими файлами.",
 	"This response was generated by \"{{model}}\"": "Цю відповідь згенеровано за допомогою \"{{model}}\"",
 	"This will delete": "Це призведе до видалення",
@@ -1132,7 +1132,7 @@
 	"Why?": "Чому?",
 	"Widescreen Mode": "Широкоекранний режим",
 	"Won": "Переможець",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "Працює разом з top-k. Більше значення (напр., 0.95) приведе до більш різноманітного тексту, тоді як менше значення (напр., 0.5) згенерує більш зосереджений і консервативний текст. (За замовчуванням: 0.9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Робочий простір",
 	"Workspace Permissions": "Дозволи робочого простору.",
 	"Write": "Писати",

+ 18 - 18
src/lib/i18n/locales/ur-PK/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "کال میں آواز کی مداخلت کی اجازت دیں",
 	"Allowed Endpoints": "",
 	"Already have an account?": "کیا پہلے سے اکاؤنٹ موجود ہے؟",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "معاون",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "کنکشنز",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "ویب یو آئی رسائی کے لیے ایڈمن سے رابطہ کریں",
 	"Content": "مواد",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "TTS درخواستوں کے لیے پیغام کے متن کی تقسیم کو کنٹرول کریں 'Punctuation' جملوں میں تقسیم کرتا ہے، 'paragraphs' پیراگراف میں تقسیم کرتا ہے، اور 'none' پیغام کو ایک ہی سٹرنگ کے طور پر رکھتا ہے",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "کنٹرولز",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "کاپی کیا گیا",
 	"Copied shared chat URL to clipboard!": "مشترکہ چیٹ یو آر ایل کلپ بورڈ میں نقل کر دیا گیا!",
 	"Copied to clipboard": "کلپ بورڈ پر نقل کر دیا گیا",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "پیغام کی درجہ بندی فعال کریں",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "نئے سائن اپس کو فعال کریں",
 	"Enabled": "فعال کردیا گیا ہے",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "یقینی بنائیں کہ آپ کی CSV فائل میں 4 کالم اس ترتیب میں شامل ہوں: نام، ای میل، پاس ورڈ، کردار",
@@ -566,7 +566,7 @@
 	"Include": "شامل کریں",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "`--api-auth` پرچم کو چلانے کے وقت شامل کریں stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "اسٹیبل-ڈیفیوژن-ویب یو آئی چلانے کے دوران `--api` فلیگ شامل کریں",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "معلومات",
 	"Input commands": "کمانڈز داخل کریں",
 	"Install from Github URL": "گِٹ حب یو آر ایل سے انسٹال کریں",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "صوت ریکارڈ کریں",
 	"Redirecting you to Open WebUI Community": "آپ کو اوپن ویب یو آئی کمیونٹی کی طرف ری ڈائریکٹ کیا جا رہا ہے",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "خود کو \"صارف\" کے طور پر حوالہ دیں (جیسے، \"صارف ہسپانوی سیکھ رہا ہے\")",
 	"References from": "سے حوالہ جات",
 	"Refused when it shouldn't have": "جب انکار نہیں ہونا چاہیے تھا، انکار کر دیا",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "آواز کے لئے سیٹ کریں",
 	"Set whisper model": "وِسپر ماڈل مرتب کریں",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "ترتیبات",
 	"Settings saved successfully!": "ترتیبات کامیابی کے ساتھ محفوظ ہو گئیں!",
@@ -964,7 +964,7 @@
 	"System Prompt": "سسٹم پرومپٹ",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "پرمپٹ کے لیے ٹیگز بنائیں",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "رکنے کے لئے ٹچ کریں",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "آپ کی رائے کا شکریہ!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "اس پلگ ان کے پیچھے موجود ڈویلپرز کمیونٹی کے پرجوش رضاکار ہیں اگر آپ کو یہ پلگ ان مددگار لگتا ہے تو برائے مہربانی اس کی ترقی میں اپنا حصہ ڈالنے پر غور کریں",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "تشخیصی لیڈربورڈ ایلو ریٹنگ سسٹم پر مبنی ہے اور یہ حقیقی وقت میں اپ ڈیٹ ہوتا ہے",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "زیادہ سے زیادہ فائل سائز ایم بی میں اگر فائل سائز اس حد سے تجاوز کر جاتا ہے، تو فائل اپ لوڈ نہیں ہوگی",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "چیٹ میں ایک وقت میں استعمال ہونے والی فائلوں کی زیادہ سے زیادہ تعداد اگر فائلوں کی تعداد اس حد سے تجاوز کر جائے تو فائلیں اپلوڈ نہیں کی جائیں گی",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "سکور کی قیمت کو 0.0 (0%) اور 1.0 (100%) کے درمیان ہونا چاہیے",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "تھیم",
 	"Thinking...": "سوچ رہا ہے...",
 	"This action cannot be undone. Do you wish to continue?": "یہ عمل واپس نہیں کیا جا سکتا کیا آپ جاری رکھنا چاہتے ہیں؟",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "یہ یقینی بناتا ہے کہ آپ کی قیمتی گفتگو محفوظ طریقے سے آپ کے بیک اینڈ ڈیٹا بیس میں محفوظ کی گئی ہیں شکریہ!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "یہ ایک تجرباتی خصوصیت ہے، یہ متوقع طور پر کام نہ کر سکتی ہو اور کسی بھی وقت تبدیل کی جا سکتی ہے",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "اس اختیار سے مجموعہ میں موجود تمام فائلز حذف ہو جائیں گی اور ان کی جگہ نئی اپ لوڈ کردہ فائلز لی جائیں گی",
 	"This response was generated by \"{{model}}\"": "یہ جواب \"{{model}}\" کے ذریعہ تیار کیا گیا",
 	"This will delete": "یہ حذف کر دے گا",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "وائڈ اسکرین موڈ",
 	"Won": "جیتا",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "ورک اسپیس",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/vi-VN/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "Cho phép gián đoạn giọng nói trong cuộc gọi",
 	"Allowed Endpoints": "",
 	"Already have an account?": "Bạn đã có tài khoản?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "",
 	"Amazing": "",
 	"an assistant": "trợ lý",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "",
 	"Connect to your own OpenAI compatible API endpoints.": "",
 	"Connections": "Kết nối",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "Liên hệ với Quản trị viên để được cấp quyền truy cập",
 	"Content": "Nội dung",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "",
 	"Continue with LDAP": "",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "Đã sao chép",
 	"Copied shared chat URL to clipboard!": "Đã sao chép link chia sẻ trò chuyện vào clipboard!",
 	"Copied to clipboard": "",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "Cho phép phản hồi, đánh giá",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "Cho phép đăng ký mới",
 	"Enabled": "Đã bật",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Đảm bảo tệp CSV của bạn bao gồm 4 cột theo thứ tự sau: Name, Email, Password, Role.",
@@ -566,7 +566,7 @@
 	"Include": "",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "Bao gồm flag `--api` khi chạy stable-diffusion-webui",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "Thông tin",
 	"Input commands": "Nhập các câu lệnh",
 	"Install from Github URL": "Cài đặt từ Github URL",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "",
 	"Record voice": "Ghi âm",
 	"Redirecting you to Open WebUI Community": "Đang chuyển hướng bạn đến Cộng đồng OpenWebUI",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "Hãy coi bản thân mình như \"Người dùng\" (ví dụ: \"Người dùng đang học Tiếng Tây Ban Nha\")",
 	"References from": "",
 	"Refused when it shouldn't have": "Từ chối trả lời mà nhẽ không nên làm vậy",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "",
 	"Set Voice": "Đặt Giọng nói",
 	"Set whisper model": "",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "",
 	"Settings": "Cài đặt",
 	"Settings saved successfully!": "Cài đặt đã được lưu thành công!",
@@ -964,7 +964,7 @@
 	"System Prompt": "Prompt Hệ thống (System Prompt)",
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "",
 	"Tap to interrupt": "Chạm để ngừng",
 	"Tasks": "",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "Cám ơn bạn đã gửi phản hồi!",
 	"The Application Account DN you bind with for search": "",
 	"The base to search for users": "",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Các nhà phát triển đằng sau plugin này là những tình nguyện viên nhiệt huyết của cộng đồng. Nếu bạn thấy plugin này hữu ích, vui lòng cân nhắc đóng góp cho sự phát triển của nó.",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Điểm (score) phải có giá trị từ 0,0 (0%) đến 1,0 (100%).",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "Chủ đề",
 	"Thinking...": "Đang suy luận...",
 	"This action cannot be undone. Do you wish to continue?": "Hành động này không thể được hoàn tác. Bạn có muốn tiếp tục không?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "Điều này đảm bảo rằng các nội dung chat có giá trị của bạn được lưu an toàn vào cơ sở dữ liệu backend của bạn. Cảm ơn bạn!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "Đây là tính năng thử nghiệm, có thể không hoạt động như mong đợi và có thể thay đổi bất kỳ lúc nào.",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
 	"This response was generated by \"{{model}}\"": "",
 	"This will delete": "Chat này sẽ bị xóa",
@@ -1132,7 +1132,7 @@
 	"Why?": "",
 	"Widescreen Mode": "Chế độ màn hình rộng",
 	"Won": "",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "Workspace",
 	"Workspace Permissions": "",
 	"Write": "",

+ 18 - 18
src/lib/i18n/locales/zh-CN/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "允许通话中的打断语音",
 	"Allowed Endpoints": "允许的端点",
 	"Already have an account?": "已经拥有账号了?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "top_p的替代方法,目标是在质量和多样性之间取得平衡。参数p表示一个Token相对于最有可能的Token所需的最低概率。比如,当p=0.05且最有可能的Token概率为0.9时,概率低于0.045的logits会被排除。(默认值:0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "保持",
 	"Amazing": "很棒",
 	"an assistant": "一个助手",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "确认新密码",
 	"Connect to your own OpenAI compatible API endpoints.": "连接到你自己的与 OpenAI 兼容的 API 接口端点。",
 	"Connections": "外部连接",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "限制模型的努力。仅适用于支持努力的特定提供商的模型。(默认值:中等)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "请联系管理员以获取访问权限",
 	"Content": "内容",
 	"Content Extraction Engine": "",
@@ -218,9 +218,9 @@
 	"Continue with Email": "使用邮箱登录",
 	"Continue with LDAP": "使用 LDAP 登录",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "控制消息文本如何拆分以用于 TTS 请求。“Punctuation”拆分为句子,“paragraphs”拆分为段落,“none”将消息保留为单个字符串。",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "控制生成文本中 Token 的重复。较高的值(例如 1.5)会更强烈地惩罚重复,而较低的值(例如 1.1)则更宽松。设置为 1 时,此功能被禁用。(默认值:1.1)",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "对话高级设置",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "控制输出的连贯性和多样性之间的平衡。较低的值将导致更集中和连贯的文本。(默认值:5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "已复制",
 	"Copied shared chat URL to clipboard!": "已复制此对话分享链接至剪贴板!",
 	"Copied to clipboard": "已复制到剪贴板",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "启用内存锁定(mlock)以防止模型数据被交换出RAM。此选项将模型的工作集页面锁定在RAM中,确保它们不会被交换到磁盘。这可以通过避免页面错误和确保快速数据访问来帮助维持性能。",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "启用内存映射(mmap)以加载模型数据。此选项允许系统通过将磁盘文件视为在RAM中来使用磁盘存储作为RAM的扩展。这可以通过更快的数据访问来提高模型性能。然而,它可能无法在所有系统上正常工作,并且可能会消耗大量磁盘空间。",
 	"Enable Message Rating": "启用回复评价",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "启用 Mirostat 采样以控制困惑度。(默认值:0,0 = 禁用,1 = Mirostat,2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "允许新用户注册",
 	"Enabled": "启用",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "确保您的 CSV 文件按以下顺序包含 4 列: 姓名、电子邮箱、密码、角色。",
@@ -566,7 +566,7 @@
 	"Include": "包括",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api-auth` 参数",
 	"Include `--api` flag when running stable-diffusion-webui": "运行 stable-diffusion-webui 时包含 `--api` 参数",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "影响算法对生成文本反馈的响应速度。较低的学习率会导致调整速度较慢,而较高的学习率会使算法更具响应性。(默认值:0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "信息",
 	"Input commands": "输入命令",
 	"Install from Github URL": "从 Github URL 安装",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "推理努力",
 	"Record voice": "录音",
 	"Redirecting you to Open WebUI Community": "正在将您重定向到 OpenWebUI 社区",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "降低产生无意义答案的概率。数值越大(如 100),答案就越多样化,而数值越小(如 10),答案就越保守。(默认值:40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "使用\"User\" (用户) 来指代自己(例如:“User 正在学习西班牙语”)",
 	"References from": "来自",
 	"Refused when it shouldn't have": "无理拒绝",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "设置用于计算的工作线程数量。该选项可控制并发处理传入请求的线程数量。增加该值可以提高高并发工作负载下的性能,但也可能消耗更多的 CPU 资源。",
 	"Set Voice": "设置音色",
 	"Set whisper model": "设置 whisper 模型",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "这个设置项用于调整对重复 Token 的抑制强度。当某个 Token 至少出现过一次后,系统会通过 flat bias 参数施加惩罚力度:数值越大(如 1.5),抑制重复的效果越强烈;数值较小(如 0.9)则相对宽容。当设为 0 时,系统会完全关闭这个重复抑制功能(默认值为 0)。",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "这个参数用于通过 scaling bias 机制抑制重复内容:当某些 Token 重复出现时,系统会根据它们已出现的次数自动施加惩罚。数值越大(如 1.5)惩罚力度越强,能更有效减少重复;数值较小(如 0.9)则允许更多重复。当设为 0 时完全关闭该功能,默认值设置为 1.1 保持适度抑制。",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "设置模型回溯多远以防止重复。(默认值:64,0 = 禁用,-1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "设置 random number seed 可以控制模型生成文本的随机起点。如果指定一个具体数字,当输入相同的提示语时,模型每次都会生成完全相同的文本内容(默认是随机选取 seed)。",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "设置用于生成下一个 Token 的上下文大小。(默认值:2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "设置要使用的停止序列。遇到这种模式时,大语言模型将停止生成文本并返回。可以通过在模型文件中指定多个单独的停止参数来设置多个停止模式。",
 	"Settings": "设置",
 	"Settings saved successfully!": "设置已成功保存!",
@@ -964,7 +964,7 @@
 	"System Prompt": "系统提示词 (System Prompt)",
 	"Tags Generation": "标签生成",
 	"Tags Generation Prompt": "标签生成提示词",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail free sampling 用于减少输出中可能性较低的Token的影响。数值越大(如 2.0),影响就越小,而数值为 1.0 则会禁用此设置。(默认值:1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "与模型交谈",
 	"Tap to interrupt": "点击以中断",
 	"Tasks": "任务",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "感谢您的反馈!",
 	"The Application Account DN you bind with for search": "您所绑定用于搜索的 Application Account DN",
 	"The base to search for users": "搜索用户的 Base",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "Batch size 决定了同时处理多少个文本请求。Batch size 越大,模型的性能和速度越快,但也需要更多内存。  (默认值:512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "本插件的背后开发者是社区中热情的志愿者。如果此插件有帮助到您,烦请考虑一下为它的开发做出贡献。",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "排行榜基于 Elo 评级系统并实时更新。",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "映射到用户登录时使用的邮箱的 LDAP 属性。",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "最大文件大小(MB)。如果文件大小超过此限制,则无法上传该文件。",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "在单次对话中可以使用的最大文件数。如果文件数超过此限制,则文件不会上传。",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "分值应介于 0.0(0%)和 1.0(100%)之间。",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "模型的温度。提高温度将使模型更具创造性地回答。(默认值:0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "主题",
 	"Thinking...": "正在思考...",
 	"This action cannot be undone. Do you wish to continue?": "此操作无法撤销。是否确认继续?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "这将确保您的宝贵对话被安全地保存到后台数据库中。感谢!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "这是一个实验功能,可能不会如预期那样工作,而且可能随时发生变化。",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "该选项控制刷新上下文时保留多少Token。例如,如果设置为 2,就会保留对话上下文的最后 2 个Token。保留上下文有助于保持对话的连续性,但可能会降低回复新话题的能力。(默认值:24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "此选项设置了模型在回答中可以生成的最大 Token 数。增加这个限制可以让模型提供更长的答案,但也可能增加生成无用或不相关内容的可能性。  (默认值:128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "此选项将会删除文件集中所有文件,并用新上传的文件替换。",
 	"This response was generated by \"{{model}}\"": "此回复由 \"{{model}}\" 生成",
 	"This will delete": "这将删除",
@@ -1132,7 +1132,7 @@
 	"Why?": "为什么?",
 	"Widescreen Mode": "宽屏模式",
 	"Won": "获胜",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "与 top-k 一起工作。较高的值(例如0.95)将导致更具多样性的文本,而较低的值(例如0.5)将生成更集中和保守的文本。(默认值:0.9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "工作空间",
 	"Workspace Permissions": "工作空间权限",
 	"Write": "写作",

+ 18 - 18
src/lib/i18n/locales/zh-TW/translation.json

@@ -64,7 +64,7 @@
 	"Allow Voice Interruption in Call": "允許在通話中打斷語音",
 	"Allowed Endpoints": "允許的端點",
 	"Already have an account?": "已經有帳號了嗎?",
-	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out. (Default: 0.0)": "作為 top_p 的替代方案,旨在確保質量和多樣性的平衡。相對於最可能的 token 機率而言,參數 p 代表一個 token 被考慮在内的最低機率。例如,當 p=0.05 且最可能的 token 機率為 0.9 時,數值低於 0.045 的對數機率會被過濾掉。(預設值:0.0)",
+	"Alternative to the top_p, and aims to ensure a balance of quality and variety. The parameter p represents the minimum probability for a token to be considered, relative to the probability of the most likely token. For example, with p=0.05 and the most likely token having a probability of 0.9, logits with a value less than 0.045 are filtered out.": "",
 	"Always": "總是",
 	"Amazing": "很棒",
 	"an assistant": "一位助手",
@@ -208,7 +208,7 @@
 	"Confirm your new password": "確認您的新密碼",
 	"Connect to your own OpenAI compatible API endpoints.": "連線到您自己的 OpenAI 相容 API 端點。",
 	"Connections": "連線",
-	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "限制用於推理模型的推理程度 。僅適用於特定供應商提供的、支援推理程度設定的推理模型。(預設:中等)",
+	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort.": "",
 	"Contact Admin for WebUI Access": "請聯絡管理員以取得 WebUI 存取權限",
 	"Content": "內容",
 	"Content Extraction Engine": "內容擷取引擎",
@@ -218,9 +218,9 @@
 	"Continue with Email": "使用 Email 繼續",
 	"Continue with LDAP": "使用 LDAP 繼續",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "控制文字轉語音(TTS)請求中如何分割訊息文字。「標點符號」分割為句子,「段落」分割為段落,「無」則保持訊息為單一字串。",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "控制在生成文本中 token 序列的重複程度。 數值越高(例如 1.5)將會更強烈地懲罰重複,而數值越低(例如 1.1)則會較為寬鬆。 若數值為 1,則停用此功能。(預設值:1.1)",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled.": "",
 	"Controls": "控制項",
-	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "控制輸出的連貫性和多樣性之間的平衡。較低的值會產生更專注和連貫的文字。(預設:5.0)",
+	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text.": "",
 	"Copied": "已複製",
 	"Copied shared chat URL to clipboard!": "已複製共用對話 URL 到剪貼簿!",
 	"Copied to clipboard": "已複製到剪貼簿",
@@ -358,7 +358,7 @@
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "啟用記憶體鎖定(mlock)以防止模型資料被換出 RAM。此選項會將模型的工作頁面集鎖定在 RAM 中,確保它們不會被換出到磁碟。這可以透過避免頁面錯誤和確保快速資料存取來維持效能。",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "啟用記憶體映射(mmap)以載入模型資料。此選項允許系統使用磁碟儲存作為 RAM 的延伸,透過將磁碟檔案視為在 RAM 中來處理。這可以透過允許更快的資料存取來改善模型效能。然而,它可能無法在所有系統上正常運作,並且可能會消耗大量磁碟空間。",
 	"Enable Message Rating": "啟用訊息評分",
-	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "啟用 Mirostat 採樣以控制困惑度。(預設:0,0 = 停用,1 = Mirostat,2 = Mirostat 2.0)",
+	"Enable Mirostat sampling for controlling perplexity.": "",
 	"Enable New Sign Ups": "允許新使用者註冊",
 	"Enabled": "已啟用",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "請確認您的 CSV 檔案包含以下 4 個欄位,並按照此順序排列:姓名、電子郵件、密碼、角色。",
@@ -566,7 +566,7 @@
 	"Include": "包含",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "執行 stable-diffusion-webui 時包含 `--api-auth` 參數",
 	"Include `--api` flag when running stable-diffusion-webui": "執行 stable-diffusion-webui 時包含 `--api` 參數",
-	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1)": "影響演算法對回饋的反應速度。較低的學習率會導致調整速度較慢,而較高的學習率會使演算法更快回應。(預設:0.1)",
+	"Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive.": "",
 	"Info": "資訊",
 	"Input commands": "輸入命令",
 	"Install from Github URL": "從 GitHub URL 安裝",
@@ -809,7 +809,7 @@
 	"Reasoning Effort": "推理程度",
 	"Record voice": "錄音",
 	"Redirecting you to Open WebUI Community": "正在將您重導向至 OpenWebUI 社群",
-	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)": "降低產生無意義內容的機率。較高的值(例如 100)會給出更多樣化的答案,而較低的值(例如 10)會更保守。(預設:40)",
+	"Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative.": "",
 	"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "以「使用者」稱呼自己(例如:「使用者正在學習西班牙文」)",
 	"References from": "引用來源",
 	"Refused when it shouldn't have": "不應拒絕時拒絕了",
@@ -918,11 +918,11 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "設定用於計算的工作執行緒數量。此選項控制使用多少執行緒來同時處理傳入的請求。增加此值可以在高併發工作負載下提升效能,但也可能消耗更多 CPU 資源。",
 	"Set Voice": "設定語音",
 	"Set whisper model": "設定 whisper 模型",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "針對至少出現一次的 token,設定固定的負面偏見。 數值越高(例如 1.5)將會更強烈地懲罰重複,而數值越低(例如 0.9)則會較為寬鬆。 若數值為 0,則停用此功能。(預設值:0)",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "針對 token 設定比例偏差以懲罰重複,其基於 token 出現的次數。 數值越高(例如 1.5)將會更強烈地懲罰重複,而數值越低(例如 0.9)則會較為寬鬆。 若數值為 0,則停用此功能。(預設值:1.1)",
-	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "設定模型向後查看以防止重複的距離。(預設:64,0 = 停用,-1 = num_ctx)",
-	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "設定用於生成的隨機數種子。將其設定為特定數字會使模型對相同的提示詞產生相同的文字。(預設:隨機)",
-	"Sets the size of the context window used to generate the next token. (Default: 2048)": "設定用於生成下一個 token 的上下文視窗大小。(預設:2048)",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled.": "",
+	"Sets how far back for the model to look back to prevent repetition.": "",
+	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt.": "",
+	"Sets the size of the context window used to generate the next token.": "",
 	"Sets the stop sequences to use. When this pattern is encountered, the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate stop parameters in a modelfile.": "設定要使用的停止序列。當遇到此模式時,大型語言模型將停止生成文字並返回。可以在模型檔案中指定多個單獨的停止參數來設定多個停止模式。",
 	"Settings": "設定",
 	"Settings saved successfully!": "設定已成功儲存!",
@@ -964,7 +964,7 @@
 	"System Prompt": "系統提示詞",
 	"Tags Generation": "標籤生成",
 	"Tags Generation Prompt": "標籤生成提示詞",
-	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "使用無尾採樣來減少較不可能的 token 對輸出的影響。較高的值(例如 2.0)會減少更多影響,而值為 1.0 則停用此設定。(預設:1)",
+	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.": "",
 	"Talk to model": "與模型對話",
 	"Tap to interrupt": "點選以中斷",
 	"Tasks": "任務",
@@ -979,7 +979,7 @@
 	"Thanks for your feedback!": "感謝您的回饋!",
 	"The Application Account DN you bind with for search": "您綁定用於搜尋的應用程式帳號 DN",
 	"The base to search for users": "搜尋使用者的基礎",
-	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.  (Default: 512)": "批次大小決定一次處理多少文字請求。較高的批次大小可以提高模型的效能和速度,但也需要更多記憶體。(預設:512)",
+	"The batch size determines how many text requests are processed together at once. A higher batch size can increase the performance and speed of the model, but it also requires more memory.": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "這個外掛背後的開發者是來自社群的熱情志願者。如果您覺得這個外掛很有幫助,請考慮為其開發做出貢獻。",
 	"The evaluation leaderboard is based on the Elo rating system and is updated in real-time.": "評估排行榜基於 Elo 評分系統,並即時更新。",
 	"The LDAP attribute that maps to the mail that users use to sign in.": "映射到使用者用於登入的使用者郵箱的 LDAP 屬性。",
@@ -988,14 +988,14 @@
 	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "檔案大小上限(MB)。如果檔案大小超過此限制,檔案將不會被上傳。",
 	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "對話中一次可使用的最大檔案數量。如果檔案數量超過此限制,檔案將不會被上傳。",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "分數應該是介於 0.0(0%)和 1.0(100%)之間的值。",
-	"The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)": "模型的溫度。提高溫度會使模型回答更具創意。(預設:0.8)",
+	"The temperature of the model. Increasing the temperature will make the model answer more creatively.": "",
 	"Theme": "主題",
 	"Thinking...": "正在思考...",
 	"This action cannot be undone. Do you wish to continue?": "此操作無法復原。您確定要繼續進行嗎?",
 	"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "這確保您寶貴的對話會安全地儲存到您的後端資料庫。謝謝!",
 	"This is an experimental feature, it may not function as expected and is subject to change at any time.": "這是一個實驗性功能,它可能無法如預期運作,並且可能會隨時變更。",
-	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics. (Default: 24)": "此選項控制重新整理上下文時保留多少 token。例如,如果設定為 2,則會保留對話上下文的最後 2 個 token。保留上下文可以幫助維持對話的連續性,但可能會降低回應新主題的能力。(預設:24)",
-	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.  (Default: 128)": "此選項設定模型可以在其回應中生成的最大 token 數量。增加此限制允許模型提供更長的答案,但也可能增加產生無用或不相關內容的可能性。(預設:128)",
+	"This option controls how many tokens are preserved when refreshing the context. For example, if set to 2, the last 2 tokens of the conversation context will be retained. Preserving context can help maintain the continuity of a conversation, but it may reduce the ability to respond to new topics.": "",
+	"This option sets the maximum number of tokens the model can generate in its response. Increasing this limit allows the model to provide longer answers, but it may also increase the likelihood of unhelpful or irrelevant content being generated.": "",
 	"This option will delete all existing files in the collection and replace them with newly uploaded files.": "此選項將刪除集合中的所有現有檔案,並用新上傳的檔案取代它們。",
 	"This response was generated by \"{{model}}\"": "此回應由「{{model}}」生成",
 	"This will delete": "這將會刪除",
@@ -1132,7 +1132,7 @@
 	"Why?": "為什麼?",
 	"Widescreen Mode": "寬螢幕模式",
 	"Won": "獲勝",
-	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)": "與 top-k 一起運作。較高的值(例如 0.95)會產生更多樣化的文字,而較低的值(例如 0.5)會產生更專注和保守的文字。(預設:0.9)",
+	"Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text.": "",
 	"Workspace": "工作區",
 	"Workspace Permissions": "工作區權限",
 	"Write": "寫入",