changedetectionio/blueprint/settings/templates/settings_llm_tab.html
{% from '_helpers.html' import render_field %} {% from '_stab.html' import stab_shell, stab_pane %} {# AI / LLM settings tab content — included from settings.html. Requires template context: form, llm_config, llm_env_configured #}
{# TRANSLATORS: 'Usage' here means token consumption/cost stats for the AI provider, not a how-to guide #} {% set _usage_label = pgettext('AI usage stats', 'Usage') %} {% call stab_shell('ai-settings', [{'id': 'overview', 'label': _('Overview'), 'icon': '✦'}, {'id': 'provider', 'label': _('Provider'), 'icon': '⚙'}, {'id': 'prompts', 'label': _('Prompts'), 'icon': '≡'}, {'id': 'behaviour', 'label': _('Behaviour'), 'icon': '⚑'}, {'id': 'usage', 'label': _usage_label, 'icon': '$'},]) %} {# ── Overview ──────────────────────────────────────────────────────────── #} {% call stab_pane('overview') %}
{{ _('Connect an LLM to move from "something changed" to "only the thing you care about changed".') }}
⊞
{{ _('Intent filtering') }}
{{ _('Each watch or tag can carry a plain-text intent — %(ex1)s or %(ex2)s. On every detected change the AI evaluates the diff against it and suppresses irrelevant noise.', ex1=' "notify me only when the price drops"', ex2=' "alert when the item goes out of stock"') | safe }}
≡
{{ _('AI Change Summary') }}
{{ _('Instead of raw diffs, receive plain-language summaries in notifications — %(ex1)s or %(ex2)s. Set a global default prompt here, or override per watch or tag.', ex1=' "Price dropped from $89 to $67"', ex2=' "3 new items added to the listing"') | safe }}
≈
{{ _('Minimal cost') }}
{{ _('The AI sees only a unified diff of what changed — never full page HTML. Low-cost models like %(gpt)s or %(gemini)s handle this well, typically fractions of a cent per check.', gpt='gpt-4o-mini', gemini='Gemini Flash') | safe }}
{% if llm_config and llm_config.get('model') %} ✓ {{ _('AI / LLM configured:') }} {{ llm_config.get('model') }} {% else %} ⚙ {{ _('Configure AI Provider') }} → {% endif %} {% endcall %} {# ── Provider ──────────────────────────────────────────────────────────── #} {% call stab_pane('provider') %}
{{ _('AI Provider') }}
{% if not llm_env_configured and not (llm_config and llm_config.get('model')) %}
⚠
{{ _('Third-party data transfer — please read') }}
{{ _('When AI features are active, change data from the websites you monitor — including page diffs and extracted text — is sent to an external AI provider of your choice.') }}
{{ _('I have read and understood the above. I accept full responsibility and indemnify the creator(s) of this software.') }}
{% endif %} {% if llm_env_configured %}
{{ _('AI / LLM is configured via environment variables (LLM_MODEL=%(model)s%(api_base)s). Remove the LLM_MODEL environment variable to configure via this form instead.', model=llm_config.get('model', '')|e, api_base=(', LLM_API_BASE=' ~ (llm_config.get('api_base')|e) ~ '') if llm_config.get('api_base') else '') | safe }}
{% else %}
{{ _('Provider') }}— {{ _('select a provider') }} —OpenAIAnthropicGoogle (Gemini)Ollama (local)OpenRouter (200+ models)
{{ render_field(form.llm.form.llm_api_key) }}
{{ render_field(form.llm.form.llm_api_base) }} {{ _('Only needed for Ollama or custom/self-hosted endpoints. Leave blank for cloud providers.') }}
↻ {{ _('Load available models') }}
{{ _('Available models') }}— {{ _('choose a model') }} —
{{ render_field(form.llm.form.llm_model, readonly=True, placeholder=_("Enter API key and click 'Load available models'")) }} {% if llm_config and llm_config.get('model') %} ✓ {{ _('AI / LLM configured:') }} {{ llm_config.get('model') }} ✕ {{ _('Remove') }} ▶ {{ _('Test connection') }}
{% endif %}
{{ _("Your API key is stored locally and sent only to your chosen provider. On each detected change, the watch's diff and extracted text are sent to the LLM — no full page HTML.") }}
{{ _('Cache') }}✕ {{ _('Clear all summary cache') }}{{ _('Removes all cached AI change summaries across all watches. They will be regenerated on the next check.') }} {% endif %}{# llm_env_configured #} {% if not llm_env_configured and not (llm_config and llm_config.get('model')) %} {# llm-provider-fields #} {% endif %} {% endcall %} {# ── Prompts ───────────────────────────────────────────────────────────── #} {% call stab_pane('prompts') %}
{{ _('Default AI Change Summary') }}
{{ render_field(form.llm.form.llm_change_summary_default) }} {{ _('Used for all watches unless overridden by the watch or its tag/group.') }} {{ _('Modify default prompt') }} {% endcall %} {# ── Behaviour ─────────────────────────────────────────────────────────── #} {% call stab_pane('behaviour') %}
{{ _('Behaviour') }}
{% if llm_config and llm_config.get('model') %}
{{ form.llm.form.llm_override_diff_with_summary() }} {{ form.llm.form.llm_override_diff_with_summary.label.text }} {{ _('When enabled, the %(diff)s notification token shows the AI summary instead of the raw diff. Use %(raw_diff)s to always get the original.', diff='{{diff}}', raw_diff='{{raw_diff}}') | safe }}
{{ form.llm.form.llm_restock_use_fallback_extract() }} {{ form.llm.form.llm_restock_use_fallback_extract.label.text }} {{ _('When enabled, the AI will be used as a last resort to extract price and stock status from product pages where no structured metadata (JSON-LD, microdata, OpenGraph) is found.') }}
{{ form.llm.form.llm_thinking_budget.label.text }} {{ form.llm.form.llm_thinking_budget() }} {{ _('For Gemini 2.5+ models only. Thinking tokens improve reasoning quality but count against the output budget. Set to Off if summaries are being cut short.') }}
{{ form.llm.form.llm_max_summary_tokens.label.text }} {{ form.llm.form.llm_max_summary_tokens() }} {{ _('Upper limit on tokens the AI may use when writing a change summary. Higher values allow longer summaries but cost more.') }}
{{ form.llm.form.llm_budget_action.label.text }} {% for subfield in form.llm.form.llm_budget_action %} {{ subfield() }} {{ subfield.label.text }} {% endfor %}
{% else %}
{{ _('Configure a provider first to unlock behaviour settings.') }}
{% endif %} {% endcall %} {# ── Usage ─────────────────────────────────────────────────────────────── #} {% call stab_pane('usage') %}
{{ _('Token & Cost Tracking') }}
{% if llm_stored.get('tokens_total_cumulative') or llm_stored.get('tokens_this_month') %}
{{ _('This month') }}
{{ '{:,}'.format(llm_stored.get('tokens_this_month', 0)) }}
{{ _('tokens') }}{% if llm_show_costs and llm_stored.get('cost_usd_this_month') %} · ≈ ${{ '%.4f'|format(llm_stored.get('cost_usd_this_month', 0)) }}{% endif %} {% if llm_token_budget_month %} {% set pct = (llm_stored.get('tokens_this_month', 0) / llm_token_budget_month * 100)|int %}
{{ _('%(percent)s%% of %(budget)s', percent=pct, budget='{:,}'.format(llm_token_budget_month)) }} {% endif %}
{{ _('All-time total') }}
{{ '{:,}'.format(llm_stored.get('tokens_total_cumulative', 0)) }}
{{ _('tokens') }}{% if llm_show_costs and llm_stored.get('cost_usd_total_cumulative') %} · ≈ ${{ '%.4f'|format(llm_stored.get('cost_usd_total_cumulative', 0)) }}{% endif %}
{% if llm_token_budget_month and llm_stored.get('tokens_this_month', 0) >= llm_token_budget_month %}
⚠ {{ _('Monthly token budget reached. AI summarisation is paused until next month.') }}
{% endif %}
{{ _('Token budget this period') }} {% if llm_token_budget_month_env %} {{ '{:,}'.format(llm_token_budget_month_env) }} {{ _('(set via LLM_TOKEN_BUDGET_MONTH)') | safe }} {% else %} {{ form.llm.form.llm_token_budget_month(placeholder=_('0 = unlimited'), value=llm_stored.get('token_budget_month', 0) or '') }} {{ _('tokens (0 = unlimited)') }} {% endif %}
{% if llm_stored.get('tokens_month_key') %}
{{ _('Current billing period') }}{{ llm_stored.get('tokens_month_key') }}
{% endif %}
{{ _('Max input characters') }} {% if llm_max_input_chars_env %} {{ form.llm.form.llm_max_input_chars(value=llm_max_input_chars_env, readonly=True, style="width:10em;opacity:0.6;cursor:not-allowed;") }} {{ _('(set via LLM_MAX_INPUT_CHARS)') | safe }} {% else %} {{ form.llm.form.llm_max_input_chars(placeholder='100000', value=llm_stored.get('max_input_chars', 100000) or '') }} {{ _('characters — currently enforcing: %(n)s', n='{:,}'.format(llm_effective_max_input_chars)) }} {% endif %}
{% else %}
{{ _('No AI usage recorded yet.') }}
{{ _('Token budget') }} {% if llm_token_budget_month_env %} {{ '{:,}'.format(llm_token_budget_month_env) }} {{ _('(set via LLM_TOKEN_BUDGET_MONTH)') | safe }} {% else %} {{ form.llm.form.llm_token_budget_month(placeholder=_('0 = unlimited'), value=llm_stored.get('token_budget_month', 0) or '') }} {{ _('tokens per month (0 = unlimited)') }} {% endif %}
{{ _('Max input characters') }} {% if llm_max_input_chars_env %} {{ form.llm.form.llm_max_input_chars(value=llm_max_input_chars_env, readonly=True, style="width:10em;opacity:0.6;cursor:not-allowed;") }} {{ _('(set via LLM_MAX_INPUT_CHARS)') | safe }} {% else %} {{ form.llm.form.llm_max_input_chars(placeholder='100000', value=llm_stored.get('max_input_chars', 100000) or '') }} {{ _('characters — currently enforcing: %(n)s', n='{:,}'.format(llm_effective_max_input_chars)) }} {% endif %}
{% endif %} {% endcall %} {% endcall %}{# stab_shell #}