Merge pull request #1325 from TriliumNext/ai-llm-integration

[WIP] AI/LLM integration
This commit is contained in:
Elian Doran 2025-04-17 22:57:30 +03:00 committed by GitHub
commit 03e3863b16
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
224 changed files with 35731 additions and 51 deletions

View File

@ -0,0 +1,46 @@
-- Add tables for vector embeddings storage and management
-- This migration adds embedding support to the main document.db database
-- Store embeddings for notes
CREATE TABLE IF NOT EXISTS "note_embeddings" (
"embedId" TEXT NOT NULL PRIMARY KEY,
"noteId" TEXT NOT NULL,
"providerId" TEXT NOT NULL,
"modelId" TEXT NOT NULL,
"dimension" INTEGER NOT NULL,
"embedding" BLOB NOT NULL,
"version" INTEGER NOT NULL DEFAULT 1,
"dateCreated" TEXT NOT NULL,
"utcDateCreated" TEXT NOT NULL,
"dateModified" TEXT NOT NULL,
"utcDateModified" TEXT NOT NULL
);
CREATE INDEX "IDX_note_embeddings_noteId" ON "note_embeddings" ("noteId");
CREATE INDEX "IDX_note_embeddings_providerId_modelId" ON "note_embeddings" ("providerId", "modelId");
-- Table to track which notes need embedding updates
CREATE TABLE IF NOT EXISTS "embedding_queue" (
"noteId" TEXT NOT NULL PRIMARY KEY,
"operation" TEXT NOT NULL, -- CREATE, UPDATE, DELETE
"dateQueued" TEXT NOT NULL,
"utcDateQueued" TEXT NOT NULL,
"priority" INTEGER NOT NULL DEFAULT 0,
"attempts" INTEGER NOT NULL DEFAULT 0,
"lastAttempt" TEXT NULL,
"error" TEXT NULL,
"failed" INTEGER NOT NULL DEFAULT 0,
"isProcessing" INTEGER NOT NULL DEFAULT 0
);
-- Table to store embedding provider configurations
CREATE TABLE IF NOT EXISTS "embedding_providers" (
"providerId" TEXT NOT NULL PRIMARY KEY,
"name" TEXT NOT NULL,
"priority" INTEGER NOT NULL DEFAULT 0,
"config" TEXT NOT NULL, -- JSON config object
"dateCreated" TEXT NOT NULL,
"utcDateCreated" TEXT NOT NULL,
"dateModified" TEXT NOT NULL,
"utcDateModified" TEXT NOT NULL
);

View File

@ -145,3 +145,45 @@ CREATE INDEX IDX_attachments_ownerId_role
CREATE INDEX IDX_notes_blobId on notes (blobId);
CREATE INDEX IDX_revisions_blobId on revisions (blobId);
CREATE INDEX IDX_attachments_blobId on attachments (blobId);
CREATE TABLE IF NOT EXISTS "note_embeddings" (
"embedId" TEXT NOT NULL PRIMARY KEY,
"noteId" TEXT NOT NULL,
"providerId" TEXT NOT NULL,
"modelId" TEXT NOT NULL,
"dimension" INTEGER NOT NULL,
"embedding" BLOB NOT NULL,
"version" INTEGER NOT NULL DEFAULT 1,
"dateCreated" TEXT NOT NULL,
"utcDateCreated" TEXT NOT NULL,
"dateModified" TEXT NOT NULL,
"utcDateModified" TEXT NOT NULL
);
CREATE INDEX "IDX_note_embeddings_noteId" ON "note_embeddings" ("noteId");
CREATE INDEX "IDX_note_embeddings_providerId_modelId" ON "note_embeddings" ("providerId", "modelId");
CREATE TABLE IF NOT EXISTS "embedding_queue" (
"noteId" TEXT NOT NULL PRIMARY KEY,
"operation" TEXT NOT NULL,
"dateQueued" TEXT NOT NULL,
"utcDateQueued" TEXT NOT NULL,
"priority" INTEGER NOT NULL DEFAULT 0,
"attempts" INTEGER NOT NULL DEFAULT 0,
"lastAttempt" TEXT NULL,
"error" TEXT NULL,
"failed" INTEGER NOT NULL DEFAULT 0,
"isProcessing" INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS "embedding_providers" (
"providerId" TEXT NOT NULL PRIMARY KEY,
"name" TEXT NOT NULL,
"isEnabled" INTEGER NOT NULL DEFAULT 0,
"priority" INTEGER NOT NULL DEFAULT 0,
"config" TEXT NOT NULL,
"dateCreated" TEXT NOT NULL,
"utcDateCreated" TEXT NOT NULL,
"dateModified" TEXT NOT NULL,
"utcDateModified" TEXT NOT NULL
);

View File

@ -1,6 +1,6 @@
{
"formatVersion": 2,
"appVersion": "0.92.7",
"appVersion": "0.93.0",
"files": [
{
"isClone": false,

View File

@ -1,6 +1,6 @@
{
"formatVersion": 2,
"appVersion": "0.92.7",
"appVersion": "0.93.0",
"files": [
{
"isClone": false,

View File

@ -1,5 +1,4 @@
# v0.93.0
## 🐞 Bugfixes
* Calendar does not hide when clicking on a note by @JYC333

View File

@ -1,6 +1,6 @@
{
"formatVersion": 2,
"appVersion": "0.92.7",
"appVersion": "0.93.0",
"files": [
{
"isClone": false,
@ -10598,6 +10598,369 @@
}
]
},
{
"isClone": false,
"noteId": "LMAv4Uy3Wk6J",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J"
],
"title": "AI",
"notePosition": 320,
"prefix": null,
"isExpanded": false,
"type": "book",
"mime": "",
"attributes": [
{
"type": "label",
"name": "iconClass",
"value": "bx bx-bot",
"isInheritable": false,
"position": 10
},
{
"type": "label",
"name": "viewType",
"value": "list",
"isInheritable": false,
"position": 20
},
{
"type": "label",
"name": "expanded",
"value": "",
"isInheritable": false,
"position": 30
}
],
"attachments": [],
"dirFileName": "AI",
"children": [
{
"isClone": false,
"noteId": "GBBMSlVSOIGP",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"GBBMSlVSOIGP"
],
"title": "Introduction",
"notePosition": 10,
"prefix": null,
"isExpanded": false,
"type": "text",
"mime": "text/html",
"attributes": [
{
"type": "relation",
"name": "internalLink",
"value": "vvUCN7FDkq7G",
"isInheritable": false,
"position": 10
}
],
"format": "markdown",
"dataFileName": "Introduction.md",
"attachments": [
{
"attachmentId": "4UpXwA3WvbmA",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "Introduction_image.png"
},
{
"attachmentId": "8Bn5IsE3Bv1k",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "1_Introduction_image.png"
},
{
"attachmentId": "ABN1rFIIJ8no",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "2_Introduction_image.png"
},
{
"attachmentId": "CK3z7sYw63XT",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "3_Introduction_image.png"
},
{
"attachmentId": "E6Y09N2t7vyA",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "4_Introduction_image.png"
},
{
"attachmentId": "JlIPeTtl5wlV",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "5_Introduction_image.png"
},
{
"attachmentId": "ur4TDJeRqpUC",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "6_Introduction_image.png"
},
{
"attachmentId": "UTH83LkQEA8u",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "7_Introduction_image.png"
},
{
"attachmentId": "V68TCCTUdyl7",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "8_Introduction_image.png"
},
{
"attachmentId": "YbWoNq58T9kB",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "9_Introduction_image.png"
}
]
},
{
"isClone": false,
"noteId": "WkM7gsEUyCXs",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"WkM7gsEUyCXs"
],
"title": "AI Provider Information",
"notePosition": 20,
"prefix": null,
"isExpanded": false,
"type": "text",
"mime": "text/html",
"attributes": [
{
"type": "relation",
"name": "internalLink",
"value": "7EdTxPADv95W",
"isInheritable": false,
"position": 10
},
{
"type": "relation",
"name": "internalLink",
"value": "ZavFigBX9AwP",
"isInheritable": false,
"position": 20
},
{
"type": "relation",
"name": "internalLink",
"value": "e0lkirXEiSNc",
"isInheritable": false,
"position": 30
},
{
"type": "label",
"name": "viewType",
"value": "list",
"isInheritable": false,
"position": 10
}
],
"format": "markdown",
"dataFileName": "AI Provider Information.md",
"attachments": [
{
"attachmentId": "BNN9Vv3JEf2X",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "AI Provider Information_im.png"
},
{
"attachmentId": "diIollN3KEbn",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "1_AI Provider Information_im.png"
}
],
"dirFileName": "AI Provider Information",
"children": [
{
"isClone": false,
"noteId": "7EdTxPADv95W",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"WkM7gsEUyCXs",
"7EdTxPADv95W"
],
"title": "Ollama",
"notePosition": 10,
"prefix": null,
"isExpanded": false,
"type": "book",
"mime": "",
"attributes": [
{
"type": "label",
"name": "viewType",
"value": "list",
"isInheritable": false,
"position": 10
},
{
"type": "label",
"name": "expanded",
"value": "",
"isInheritable": false,
"position": 20
}
],
"attachments": [],
"dirFileName": "Ollama",
"children": [
{
"isClone": false,
"noteId": "vvUCN7FDkq7G",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"WkM7gsEUyCXs",
"7EdTxPADv95W",
"vvUCN7FDkq7G"
],
"title": "Installing Ollama",
"notePosition": 10,
"prefix": null,
"isExpanded": false,
"type": "text",
"mime": "text/html",
"attributes": [],
"format": "markdown",
"dataFileName": "Installing Ollama.md",
"attachments": [
{
"attachmentId": "CG9q2FfKuEsr",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "Installing Ollama_image.png"
},
{
"attachmentId": "GEcgXxUE1IDx",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "1_Installing Ollama_image.png"
},
{
"attachmentId": "OMGDDxjScXCl",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "2_Installing Ollama_image.png"
},
{
"attachmentId": "Qacg7ibmEBkZ",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "3_Installing Ollama_image.png"
},
{
"attachmentId": "vSjU929VnBm4",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "4_Installing Ollama_image.png"
},
{
"attachmentId": "xGrxARTj79Gv",
"title": "image.png",
"role": "image",
"mime": "image/png",
"position": 10,
"dataFileName": "5_Installing Ollama_image.png"
}
]
}
]
},
{
"isClone": false,
"noteId": "ZavFigBX9AwP",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"WkM7gsEUyCXs",
"ZavFigBX9AwP"
],
"title": "OpenAI",
"notePosition": 20,
"prefix": null,
"isExpanded": false,
"type": "text",
"mime": "text/html",
"attributes": [],
"format": "markdown",
"dataFileName": "OpenAI.md",
"attachments": []
},
{
"isClone": false,
"noteId": "e0lkirXEiSNc",
"notePath": [
"pOsGYCXsbNQG",
"LMAv4Uy3Wk6J",
"WkM7gsEUyCXs",
"e0lkirXEiSNc"
],
"title": "Anthropic",
"notePosition": 30,
"prefix": null,
"isExpanded": false,
"type": "text",
"mime": "text/html",
"attributes": [],
"format": "markdown",
"dataFileName": "Anthropic.md",
"attachments": []
}
]
}
]
},
{
"isClone": false,
"noteId": "CdNpE2pqjmI6",
@ -10606,7 +10969,7 @@
"CdNpE2pqjmI6"
],
"title": "Scripting",
"notePosition": 320,
"notePosition": 330,
"prefix": null,
"isExpanded": false,
"type": "text",

Binary file not shown.

After

Width:  |  Height:  |  Size: 186 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 168 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 172 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 167 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 237 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

View File

@ -0,0 +1,15 @@
# AI Provider Information
Currently, we support the following providers:
* <a class="reference-link" href="AI%20Provider%20Information/Ollama">Ollama</a>
* <a class="reference-link" href="AI%20Provider%20Information/OpenAI.md">OpenAI</a>
* <a class="reference-link" href="AI%20Provider%20Information/Anthropic.md">Anthropic</a>
* Voyage AI
To set your preferred chat model, you'll want to enter the provider's name here:
<figure class="image image_resized" style="width:88.38%;"><img style="aspect-ratio:1884/1267;" src="AI Provider Information_im.png" width="1884" height="1267"></figure>
And to set your preferred embedding provider:
<figure class="image image_resized" style="width:93.47%;"><img style="aspect-ratio:1907/1002;" src="1_AI Provider Information_im.png" width="1907" height="1002"></figure>

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

View File

@ -0,0 +1,25 @@
# Installing Ollama
[Ollama](https://ollama.com/) can be installed in a variety of ways, and even runs [within a Docker container](https://hub.docker.com/r/ollama/ollama). Ollama will be noticeably quicker when running on a GPU (Nvidia, AMD, Intel), but it can run on CPU and RAM. To install Ollama without any other prerequisites, you can follow their [installer](https://ollama.com/download):
<figure class="image image_resized" style="width:50.49%;"><img style="aspect-ratio:785/498;" src="3_Installing Ollama_image.png" width="785" height="498"></figure><figure class="image image_resized" style="width:40.54%;"><img style="aspect-ratio:467/100;" src="Installing Ollama_image.png" width="467" height="100"></figure><figure class="image image_resized" style="width:55.73%;"><img style="aspect-ratio:1296/1011;" src="1_Installing Ollama_image.png" width="1296" height="1011"></figure>
After their installer completes, if you're on Windows, you should see an entry in the start menu to run it:
<figure class="image image_resized" style="width:66.12%;"><img style="aspect-ratio:1161/480;" src="2_Installing Ollama_image.png" width="1161" height="480"></figure>
Also, you should have access to the `ollama` CLI via Powershell or CMD:
<figure class="image image_resized" style="width:86.09%;"><img style="aspect-ratio:1730/924;" src="5_Installing Ollama_image.png" width="1730" height="924"></figure>
After Ollama is installed, you can go ahead and `pull` the models you want to use and run. Here's a command to pull my favorite tool-compatible model and embedding model as of April 2025:
```
ollama pull llama3.1:8b
ollama pull mxbai-embed-large
```
Also, you can make sure it's running by going to [http://localhost:11434](http://localhost:11434) and you should get the following response (port 11434 being the “normal” Ollama port):
<figure class="image"><img style="aspect-ratio:585/202;" src="4_Installing Ollama_image.png" width="585" height="202"></figure>
Now that you have Ollama up and running, have a few models pulled, you're ready to go to go ahead and start using Ollama as both a chat provider, and embedding provider!

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 198 KiB

View File

@ -0,0 +1,89 @@
# Introduction
<figure class="image image_resized" style="width:63.68%;"><img style="aspect-ratio:1363/1364;" src="Introduction_image.png" width="1363" height="1364"><figcaption>An example chat with an LLM</figcaption></figure>
The AI / LLM features within Trilium Notes are designed to allow you to interact with your Notes in a variety of ways, using as many of the major providers as we can support. 
In addition to being able to send chats to LLM providers such as OpenAI, Anthropic, and Ollama - we also support agentic tool calling, and embeddings.
The quickest way to get started is to navigate to the “AI/LLM” settings:
<figure class="image image_resized" style="width:74.04%;"><img style="aspect-ratio:1916/1906;" src="5_Introduction_image.png" width="1916" height="1906"></figure>
Enable the feature:
<figure class="image image_resized" style="width:82.82%;"><img style="aspect-ratio:1911/997;" src="1_Introduction_image.png" width="1911" height="997"></figure>
## Embeddings
**Embeddings** are important as it allows us to have an compact AI “summary” (it's not human readable text) of each of your Notes, that we can then perform mathematical functions on (such as cosine similarity) to smartly figure out which Notes to send as context to the LLM when you're chatting, among other useful functions.
You will then need to set up the AI “provider” that you wish to use to create the embeddings for your Notes. Currently OpenAI, Voyage AI, and Ollama are supported providers for embedding generation.
In the following example, we're going to use our self-hosted Ollama instance to create the embeddings for our Notes. You can see additional documentation about installing your own Ollama locally in <a class="reference-link" href="AI%20Provider%20Information/Ollama/Installing%20Ollama.md">Installing Ollama</a>.
To see what embedding models Ollama has available, you can check out [this search](https://ollama.com/search?c=embedding)on their website, and then `pull` whichever one you want to try out. As of 4/15/25, my personal favorite is `mxbai-embed-large`.
First, we'll need to select the Ollama provider from the tabs of providers, then we will enter in the Base URL for our Ollama. Since our Ollama is running on our local machine, our Base URL is `http://localhost:11434`. We will then hit the “refresh” button to have it fetch our models:
<figure class="image image_resized" style="width:82.28%;"><img style="aspect-ratio:1912/1075;" src="4_Introduction_image.png" width="1912" height="1075"></figure>
When selecting the dropdown for the “Embedding Model”, embedding models should be at the top of the list, separated by regular chat models with a horizontal line, as seen below:
<figure class="image image_resized" style="width:61.73%;"><img style="aspect-ratio:1232/959;" src="8_Introduction_image.png" width="1232" height="959"></figure>
After selecting an embedding model, embeddings should automatically begin to be generated by checking the embedding statistics at the top of the “AI/LLM” settings panel:
<figure class="image image_resized" style="width:67.06%;"><img style="aspect-ratio:1333/499;" src="7_Introduction_image.png" width="1333" height="499"></figure>
If you don't see any embeddings being created, you will want to scroll to the bottom of the settings, and hit “Recreate All Embeddings”:
<figure class="image image_resized" style="width:65.69%;"><img style="aspect-ratio:1337/1490;" src="3_Introduction_image.png" width="1337" height="1490"></figure>
Creating the embeddings will take some time, and will be regenerated when a Note is created, updated, or deleted (removed).
If for some reason you choose to change your embedding provider, or the model used, you'll need to recreate all embeddings.
## Tools
Tools are essentially functions that we provide to the various LLM providers, and then LLMs can respond in a specific format that tells us what tool function and parameters they would like to invoke. We then execute these tools, and provide it as additional context in the Chat conversation. 
These are the tools that currently exist, and will certainly be updated to be more effectively (and even more to be added!):
* `search_notes`
* Semantic search
* `keyword_search`
* Keyword-based search
* `attribute_search`
* Attribute-specific search
* `search_suggestion`
* Search syntax helper
* `read_note`
* Read note content (helps the LLM read Notes)
* `create_note`
* Create a Note
* `update_note`
* Update a Note
* `manage_attributes`
* Manage attributes on a Note
* `manage_relationships`
* Manage the various relationships between Notes
* `extract_content`
* Used to smartly extract content from a Note
* `calendar_integration`
* Used to find date notes, create date notes, get the daily note, etc.
When Tools are executed within your Chat, you'll see output like the following:
<figure class="image image_resized" style="width:66.88%;"><img style="aspect-ratio:1372/1591;" src="6_Introduction_image.png" width="1372" height="1591"></figure>
You don't need to tell the LLM to execute a certain tool, it should “smartly” call tools and automatically execute them as needed.
## Overview
Now that you know about embeddings and tools, you can just go ahead and use the “Chat with Notes” button, where you can go ahead and start chatting!:
<figure class="image image_resized" style="width:60.77%;"><img style="aspect-ratio:1378/539;" src="2_Introduction_image.png" width="1378" height="539"></figure>
If you don't see the “Chat with Notes” button on your side launchbar, you might need to move it from the “Available Launchers” section to the “Visible Launchers” section:
<figure class="image image_resized" style="width:69.81%;"><img style="aspect-ratio:1765/1287;" src="9_Introduction_image.png" width="1765" height="1287"></figure>

Binary file not shown.

After

Width:  |  Height:  |  Size: 175 KiB

157
package-lock.json generated
View File

@ -9,6 +9,7 @@
"version": "0.93.0",
"license": "AGPL-3.0-only",
"dependencies": {
"@anthropic-ai/sdk": "0.39.0",
"@braintree/sanitize-url": "7.1.1",
"@electron/remote": "2.1.2",
"@highlightjs/cdn-assets": "11.11.1",
@ -67,6 +68,8 @@
"multer": "1.4.5-lts.2",
"normalize-strings": "1.1.1",
"normalize.css": "8.0.1",
"ollama": "0.5.14",
"openai": "4.93.0",
"rand-token": "1.0.1",
"safe-compare": "1.1.4",
"sanitize-filename": "1.6.3",
@ -254,6 +257,36 @@
"url": "https://github.com/sponsors/antfu"
}
},
"node_modules/@anthropic-ai/sdk": {
"version": "0.39.0",
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.39.0.tgz",
"integrity": "sha512-eMyDIPRZbt1CCLErRCi3exlAvNkBtRe+kW5vvJyef93PmNr/clstYgHhtvmkxN82nlKgzyGPCyGxrm0JQ1ZIdg==",
"license": "MIT",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7"
}
},
"node_modules/@anthropic-ai/sdk/node_modules/@types/node": {
"version": "18.19.86",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.86.tgz",
"integrity": "sha512-fifKayi175wLyKyc5qUfyENhQ1dCNI1UNjp653d8kuYcPQN5JhX3dGuP/XmvPTg/xRBn1VTLpbmi+H/Mr7tLfQ==",
"license": "MIT",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/@anthropic-ai/sdk/node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"license": "MIT"
},
"node_modules/@apidevtools/json-schema-ref-parser": {
"version": "9.1.2",
"resolved": "https://registry.npmjs.org/@apidevtools/json-schema-ref-parser/-/json-schema-ref-parser-9.1.2.tgz",
@ -5260,6 +5293,16 @@
"undici-types": "~6.21.0"
}
},
"node_modules/@types/node-fetch": {
"version": "2.6.12",
"resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.12.tgz",
"integrity": "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==",
"license": "MIT",
"dependencies": {
"@types/node": "*",
"form-data": "^4.0.0"
}
},
"node_modules/@types/prop-types": {
"version": "15.7.14",
"resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.14.tgz",
@ -6238,7 +6281,6 @@
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz",
"integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==",
"dev": true,
"license": "MIT",
"dependencies": {
"humanize-ms": "^1.2.1"
@ -10299,7 +10341,6 @@
"version": "0.1.13",
"resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz",
"integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==",
"dev": true,
"license": "MIT",
"optional": true,
"dependencies": {
@ -11670,6 +11711,12 @@
"node": ">= 6"
}
},
"node_modules/form-data-encoder": {
"version": "1.7.2",
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
"license": "MIT"
},
"node_modules/form-data/node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
@ -11691,6 +11738,19 @@
"node": ">= 0.6"
}
},
"node_modules/formdata-node": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
"license": "MIT",
"dependencies": {
"node-domexception": "1.0.0",
"web-streams-polyfill": "4.0.0-beta.3"
},
"engines": {
"node": ">= 12.20"
}
},
"node_modules/formidable": {
"version": "3.5.2",
"resolved": "https://registry.npmjs.org/formidable/-/formidable-3.5.2.tgz",
@ -12666,7 +12726,6 @@
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
"integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"ms": "^2.0.0"
@ -15516,11 +15575,29 @@
"semver": "^7.3.5"
}
},
"node_modules/node-domexception": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
"integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/jimmywarting"
},
{
"type": "github",
"url": "https://paypal.me/jimmywarting"
}
],
"license": "MIT",
"engines": {
"node": ">=10.5.0"
}
},
"node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"dev": true,
"license": "MIT",
"dependencies": {
"whatwg-url": "^5.0.0"
@ -15541,21 +15618,18 @@
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
"dev": true,
"license": "MIT"
},
"node_modules/node-fetch/node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
"dev": true,
"license": "BSD-2-Clause"
},
"node_modules/node-fetch/node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"dev": true,
"license": "MIT",
"dependencies": {
"tr46": "~0.0.3",
@ -15851,6 +15925,15 @@
"node": "^10.13.0 || >=12.0.0"
}
},
"node_modules/ollama": {
"version": "0.5.14",
"resolved": "https://registry.npmjs.org/ollama/-/ollama-0.5.14.tgz",
"integrity": "sha512-pvOuEYa2WkkAumxzJP0RdEYHkbZ64AYyyUszXVX7ruLvk5L+EiO2G71da2GqEQ4IAk4j6eLoUbGk5arzFT1wJA==",
"license": "MIT",
"dependencies": {
"whatwg-fetch": "^3.6.20"
}
},
"node_modules/omggif": {
"version": "1.0.10",
"resolved": "https://registry.npmjs.org/omggif/-/omggif-1.0.10.tgz",
@ -15910,6 +15993,51 @@
"dev": true,
"license": "MIT"
},
"node_modules/openai": {
"version": "4.93.0",
"resolved": "https://registry.npmjs.org/openai/-/openai-4.93.0.tgz",
"integrity": "sha512-2kONcISbThKLfm7T9paVzg+QCE1FOZtNMMUfXyXckUAoXRRS/mTP89JSDHPMp8uM5s0bz28RISbvQjArD6mgUQ==",
"license": "Apache-2.0",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7"
},
"bin": {
"openai": "bin/cli"
},
"peerDependencies": {
"ws": "^8.18.0",
"zod": "^3.23.8"
},
"peerDependenciesMeta": {
"ws": {
"optional": true
},
"zod": {
"optional": true
}
}
},
"node_modules/openai/node_modules/@types/node": {
"version": "18.19.86",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.86.tgz",
"integrity": "sha512-fifKayi175wLyKyc5qUfyENhQ1dCNI1UNjp653d8kuYcPQN5JhX3dGuP/XmvPTg/xRBn1VTLpbmi+H/Mr7tLfQ==",
"license": "MIT",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/openai/node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"license": "MIT"
},
"node_modules/openapi-types": {
"version": "12.1.3",
"resolved": "https://registry.npmjs.org/openapi-types/-/openapi-types-12.1.3.tgz",
@ -21056,6 +21184,15 @@
"defaults": "^1.0.3"
}
},
"node_modules/web-streams-polyfill": {
"version": "4.0.0-beta.3",
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
"license": "MIT",
"engines": {
"node": ">= 14"
}
},
"node_modules/web-worker": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/web-worker/-/web-worker-1.5.0.tgz",
@ -21292,6 +21429,12 @@
"node": ">=18"
}
},
"node_modules/whatwg-fetch": {
"version": "3.6.20",
"resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz",
"integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==",
"license": "MIT"
},
"node_modules/whatwg-mimetype": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz",

View File

@ -69,6 +69,7 @@
"chore:generate-openapi": "tsx bin/generate-openapi.js"
},
"dependencies": {
"@anthropic-ai/sdk": "0.39.0",
"@braintree/sanitize-url": "7.1.1",
"@electron/remote": "2.1.2",
"@highlightjs/cdn-assets": "11.11.1",
@ -127,6 +128,8 @@
"multer": "1.4.5-lts.2",
"normalize-strings": "1.1.1",
"normalize.css": "8.0.1",
"ollama": "0.5.14",
"openai": "4.93.0",
"rand-token": "1.0.1",
"safe-compare": "1.1.4",
"sanitize-filename": "1.6.3",

View File

@ -18,6 +18,8 @@ import sql_init from "./services/sql_init.js";
import { auth } from "express-openid-connect";
import openID from "./services/open_id.js";
import { t } from "i18next";
import eventService from "./services/events.js";
import log from "./services/log.js";
await import("./services/handlers.js");
await import("./becca/becca_loader.js");
@ -29,6 +31,42 @@ const scriptDir = dirname(fileURLToPath(import.meta.url));
// Initialize DB
sql_init.initializeDb();
// Listen for database initialization event
eventService.subscribe(eventService.DB_INITIALIZED, async () => {
try {
log.info("Database initialized, setting up LLM features");
// Initialize embedding providers
const { initializeEmbeddings } = await import("./services/llm/embeddings/init.js");
await initializeEmbeddings();
// Initialize the index service for LLM functionality
const { default: indexService } = await import("./services/llm/index_service.js");
await indexService.initialize().catch(e => console.error("Failed to initialize index service:", e));
log.info("LLM features initialized successfully");
} catch (error) {
console.error("Error initializing LLM features:", error);
}
});
// Initialize LLM features only if database is already initialized
if (sql_init.isDbInitialized()) {
try {
// Initialize embedding providers
const { initializeEmbeddings } = await import("./services/llm/embeddings/init.js");
await initializeEmbeddings();
// Initialize the index service for LLM functionality
const { default: indexService } = await import("./services/llm/index_service.js");
await indexService.initialize().catch(e => console.error("Failed to initialize index service:", e));
} catch (error) {
console.error("Error initializing LLM features:", error);
}
} else {
console.log("Database not initialized yet. LLM features will be initialized after setup.");
}
// view engine setup
app.set("views", path.join(scriptDir, "views"));
app.set("view engine", "ejs");

View File

@ -0,0 +1,73 @@
import AbstractBeccaEntity from "./abstract_becca_entity.js";
import dateUtils from "../../services/date_utils.js";
import type { NoteEmbeddingRow } from "./rows.js";
/**
* Entity representing a note's vector embedding for semantic search and AI features
*/
class BNoteEmbedding extends AbstractBeccaEntity<BNoteEmbedding> {
static get entityName() {
return "note_embeddings";
}
static get primaryKeyName() {
return "embedId";
}
static get hashedProperties() {
return ["embedId", "noteId", "providerId", "modelId", "dimension", "version"];
}
embedId!: string;
noteId!: string;
providerId!: string;
modelId!: string;
dimension!: number;
embedding!: Buffer;
version!: number;
constructor(row?: NoteEmbeddingRow) {
super();
if (row) {
this.updateFromRow(row);
}
}
updateFromRow(row: NoteEmbeddingRow): void {
this.embedId = row.embedId;
this.noteId = row.noteId;
this.providerId = row.providerId;
this.modelId = row.modelId;
this.dimension = row.dimension;
this.embedding = row.embedding;
this.version = row.version;
this.dateCreated = row.dateCreated;
this.dateModified = row.dateModified;
this.utcDateCreated = row.utcDateCreated;
this.utcDateModified = row.utcDateModified;
}
beforeSaving() {
super.beforeSaving();
this.dateModified = dateUtils.localNowDateTime();
this.utcDateModified = dateUtils.utcNowDateTime();
}
getPojo(): NoteEmbeddingRow {
return {
embedId: this.embedId,
noteId: this.noteId,
providerId: this.providerId,
modelId: this.modelId,
dimension: this.dimension,
embedding: this.embedding,
version: this.version,
dateCreated: this.dateCreated!,
dateModified: this.dateModified!,
utcDateCreated: this.utcDateCreated,
utcDateModified: this.utcDateModified!
};
}
}
export default BNoteEmbedding;

View File

@ -139,3 +139,17 @@ export interface NoteRow {
utcDateModified: string;
content?: string | Buffer;
}
export interface NoteEmbeddingRow {
embedId: string;
noteId: string;
providerId: string;
modelId: string;
dimension: number;
embedding: Buffer;
version: number;
dateCreated: string;
utcDateCreated: string;
dateModified: string;
utcDateModified: string;
}

View File

@ -6,6 +6,7 @@ import BBlob from "./entities/bblob.js";
import BBranch from "./entities/bbranch.js";
import BEtapiToken from "./entities/betapi_token.js";
import BNote from "./entities/bnote.js";
import BNoteEmbedding from "./entities/bnote_embedding.js";
import BOption from "./entities/boption.js";
import BRecentNote from "./entities/brecent_note.js";
import BRevision from "./entities/brevision.js";
@ -19,6 +20,7 @@ const ENTITY_NAME_TO_ENTITY: Record<string, ConstructorData<any> & EntityClass>
branches: BBranch,
etapi_tokens: BEtapiToken,
notes: BNote,
note_embeddings: BNoteEmbedding,
options: BOption,
recent_notes: BRecentNote,
revisions: BRevision

View File

@ -89,6 +89,8 @@ export type CommandMappings = {
closeHlt: CommandData;
showLaunchBarSubtree: CommandData;
showRevisions: CommandData;
showLlmChat: CommandData;
createAiChat: CommandData;
showOptions: CommandData & {
section: string;
};

View File

@ -7,6 +7,9 @@ import protectedSessionService from "../services/protected_session.js";
import options from "../services/options.js";
import froca from "../services/froca.js";
import utils from "../services/utils.js";
import LlmChatPanel from "../widgets/llm_chat_panel.js";
import toastService from "../services/toast.js";
import noteCreateService from "../services/note_create.js";
export default class RootCommandExecutor extends Component {
editReadOnlyNoteCommand() {
@ -226,4 +229,35 @@ export default class RootCommandExecutor extends Component {
appContext.tabManager.activateNoteContext(tab.ntxId);
}
}
async createAiChatCommand() {
try {
// Create a new AI Chat note at the root level
const rootNoteId = "root";
const result = await noteCreateService.createNote(rootNoteId, {
title: "New AI Chat",
type: "aiChat",
content: JSON.stringify({
messages: [],
title: "New AI Chat"
})
});
if (!result.note) {
toastService.showError("Failed to create AI Chat note");
return;
}
await appContext.tabManager.openTabWithNoteWithHoisting(result.note.noteId, {
activate: true
});
toastService.showMessage("Created new AI Chat note");
}
catch (e) {
console.error("Error creating AI Chat note:", e);
toastService.showError("Failed to create AI Chat note: " + (e as Error).message);
}
}
}

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 186 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 168 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 172 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 167 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 237 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

View File

@ -0,0 +1,22 @@
<p>Currently, we support the following providers:</p>
<ul>
<li><a class="reference-link" href="#root/_help_7EdTxPADv95W">Ollama</a>
</li>
<li><a class="reference-link" href="#root/_help_ZavFigBX9AwP">OpenAI</a>
</li>
<li><a class="reference-link" href="#root/_help_e0lkirXEiSNc">Anthropic</a>
</li>
<li>Voyage AI</li>
</ul>
<p>To set your preferred chat model, you'll want to enter the provider's
name here:</p>
<figure class="image image_resized" style="width:88.38%;">
<img style="aspect-ratio:1884/1267;" src="AI Provider Information_im.png"
width="1884" height="1267">
</figure>
<p>And to set your preferred embedding provider:</p>
<figure class="image image_resized"
style="width:93.47%;">
<img style="aspect-ratio:1907/1002;" src="1_AI Provider Information_im.png"
width="1907" height="1002">
</figure>

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

View File

@ -0,0 +1,45 @@
<p><a href="https://ollama.com/">Ollama</a> can be installed in a variety
of ways, and even runs <a href="https://hub.docker.com/r/ollama/ollama">within a Docker container</a>.
Ollama will be noticeably quicker when running on a GPU (Nvidia, AMD, Intel),
but it can run on CPU and RAM. To install Ollama without any other prerequisites,
you can follow their <a href="https://ollama.com/download">installer</a>:</p>
<figure
class="image image_resized" style="width:50.49%;">
<img style="aspect-ratio:785/498;" src="3_Installing Ollama_image.png"
width="785" height="498">
</figure>
<figure class="image image_resized" style="width:40.54%;">
<img style="aspect-ratio:467/100;" src="Installing Ollama_image.png" width="467"
height="100">
</figure>
<figure class="image image_resized" style="width:55.73%;">
<img style="aspect-ratio:1296/1011;" src="1_Installing Ollama_image.png"
width="1296" height="1011">
</figure>
<p>After their installer completes, if you're on Windows, you should see
an entry in the start menu to run it:</p>
<figure class="image image_resized"
style="width:66.12%;">
<img style="aspect-ratio:1161/480;" src="2_Installing Ollama_image.png"
width="1161" height="480">
</figure>
<p>Also, you should have access to the <code>ollama</code> CLI via Powershell
or CMD:</p>
<figure class="image image_resized" style="width:86.09%;">
<img style="aspect-ratio:1730/924;" src="5_Installing Ollama_image.png"
width="1730" height="924">
</figure>
<p>After Ollama is installed, you can go ahead and <code>pull</code> the models
you want to use and run. Here's a command to pull my favorite tool-compatible
model and embedding model as of April 2025:</p><pre><code class="language-text-x-trilium-auto">ollama pull llama3.1:8b
ollama pull mxbai-embed-large</code></pre>
<p>Also, you can make sure it's running by going to <a href="http://localhost:11434">http://localhost:11434</a> and
you should get the following response (port 11434 being the “normal” Ollama
port):</p>
<figure class="image">
<img style="aspect-ratio:585/202;" src="4_Installing Ollama_image.png"
width="585" height="202">
</figure>
<p>Now that you have Ollama up and running, have a few models pulled, you're
ready to go to go ahead and start using Ollama as both a chat provider,
and embedding provider!</p>

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 198 KiB

View File

@ -0,0 +1,161 @@
<figure class="image image_resized" style="width:63.68%;">
<img style="aspect-ratio:1363/1364;" src="Introduction_image.png" width="1363"
height="1364">
<figcaption>An example chat with an LLM</figcaption>
</figure>
<p>The AI / LLM features within Trilium Notes are designed to allow you to
interact with your Notes in a variety of ways, using as many of the major
providers as we can support.&nbsp;</p>
<p>In addition to being able to send chats to LLM providers such as OpenAI,
Anthropic, and Ollama - we also support agentic tool calling, and embeddings.</p>
<p>The quickest way to get started is to navigate to the “AI/LLM” settings:</p>
<figure
class="image image_resized" style="width:74.04%;">
<img style="aspect-ratio:1916/1906;" src="5_Introduction_image.png" width="1916"
height="1906">
</figure>
<p>Enable the feature:</p>
<figure class="image image_resized" style="width:82.82%;">
<img style="aspect-ratio:1911/997;" src="1_Introduction_image.png" width="1911"
height="997">
</figure>
<h2>Embeddings</h2>
<p><strong>Embeddings</strong> are important as it allows us to have an compact
AI “summary” (it's not human readable text) of each of your Notes, that
we can then perform mathematical functions on (such as cosine similarity)
to smartly figure out which Notes to send as context to the LLM when you're
chatting, among other useful functions.</p>
<p>You will then need to set up the AI “provider” that you wish to use to
create the embeddings for your Notes. Currently OpenAI, Voyage AI, and
Ollama are supported providers for embedding generation.</p>
<p>In the following example, we're going to use our self-hosted Ollama instance
to create the embeddings for our Notes. You can see additional documentation
about installing your own Ollama locally in&nbsp;<a class="reference-link"
href="#root/_help_vvUCN7FDkq7G">Installing Ollama</a>.</p>
<p>To see what embedding models Ollama has available, you can check out
<a
href="https://ollama.com/search?c=embedding">this search</a>on their website, and then <code>pull</code> whichever one
you want to try out. As of 4/15/25, my personal favorite is <code>mxbai-embed-large</code>.</p>
<p>First, we'll need to select the Ollama provider from the tabs of providers,
then we will enter in the Base URL for our Ollama. Since our Ollama is
running on our local machine, our Base URL is <code>http://localhost:11434</code>.
We will then hit the “refresh” button to have it fetch our models:</p>
<figure
class="image image_resized" style="width:82.28%;">
<img style="aspect-ratio:1912/1075;" src="4_Introduction_image.png" width="1912"
height="1075">
</figure>
<p>When selecting the dropdown for the “Embedding Model”, embedding models
should be at the top of the list, separated by regular chat models with
a horizontal line, as seen below:</p>
<figure class="image image_resized"
style="width:61.73%;">
<img style="aspect-ratio:1232/959;" src="8_Introduction_image.png" width="1232"
height="959">
</figure>
<p>After selecting an embedding model, embeddings should automatically begin
to be generated by checking the embedding statistics at the top of the
“AI/LLM” settings panel:</p>
<figure class="image image_resized" style="width:67.06%;">
<img style="aspect-ratio:1333/499;" src="7_Introduction_image.png" width="1333"
height="499">
</figure>
<p>If you don't see any embeddings being created, you will want to scroll
to the bottom of the settings, and hit “Recreate All Embeddings”:</p>
<figure
class="image image_resized" style="width:65.69%;">
<img style="aspect-ratio:1337/1490;" src="3_Introduction_image.png" width="1337"
height="1490">
</figure>
<p>Creating the embeddings will take some time, and will be regenerated when
a Note is created, updated, or deleted (removed).</p>
<p>If for some reason you choose to change your embedding provider, or the
model used, you'll need to recreate all embeddings.</p>
<h2>Tools</h2>
<p>Tools are essentially functions that we provide to the various LLM providers,
and then LLMs can respond in a specific format that tells us what tool
function and parameters they would like to invoke. We then execute these
tools, and provide it as additional context in the Chat conversation.&nbsp;</p>
<p>These are the tools that currently exist, and will certainly be updated
to be more effectively (and even more to be added!):</p>
<ul>
<li><code>search_notes</code>
<ul>
<li>Semantic search</li>
</ul>
</li>
<li><code>keyword_search</code>
<ul>
<li>Keyword-based search</li>
</ul>
</li>
<li><code>attribute_search</code>
<ul>
<li>Attribute-specific search</li>
</ul>
</li>
<li><code>search_suggestion</code>
<ul>
<li>Search syntax helper</li>
</ul>
</li>
<li><code>read_note</code>
<ul>
<li>Read note content (helps the LLM read Notes)</li>
</ul>
</li>
<li><code>create_note</code>
<ul>
<li>Create a Note</li>
</ul>
</li>
<li><code>update_note</code>
<ul>
<li>Update a Note</li>
</ul>
</li>
<li><code>manage_attributes</code>
<ul>
<li>Manage attributes on a Note</li>
</ul>
</li>
<li><code>manage_relationships</code>
<ul>
<li>Manage the various relationships between Notes</li>
</ul>
</li>
<li><code>extract_content</code>
<ul>
<li>Used to smartly extract content from a Note</li>
</ul>
</li>
<li><code>calendar_integration</code>
<ul>
<li>Used to find date notes, create date notes, get the daily note, etc.</li>
</ul>
</li>
</ul>
<p>When Tools are executed within your Chat, you'll see output like the following:</p>
<figure
class="image image_resized" style="width:66.88%;">
<img style="aspect-ratio:1372/1591;" src="6_Introduction_image.png" width="1372"
height="1591">
</figure>
<p>You don't need to tell the LLM to execute a certain tool, it should “smartly”
call tools and automatically execute them as needed.</p>
<h2>Overview</h2>
<p>Now that you know about embeddings and tools, you can just go ahead and
use the “Chat with Notes” button, where you can go ahead and start chatting!:</p>
<figure
class="image image_resized" style="width:60.77%;">
<img style="aspect-ratio:1378/539;" src="2_Introduction_image.png" width="1378"
height="539">
</figure>
<p>If you don't see the “Chat with Notes” button on your side launchbar,
you might need to move it from the “Available Launchers” section to the
“Visible Launchers” section:</p>
<figure class="image image_resized" style="width:69.81%;">
<img style="aspect-ratio:1765/1287;" src="9_Introduction_image.png" width="1765"
height="1287">
</figure>

Binary file not shown.

After

Width:  |  Height:  |  Size: 175 KiB

View File

@ -28,7 +28,8 @@ const NOTE_TYPE_ICONS = {
doc: "bx bxs-file-doc",
contentWidget: "bx bxs-widget",
mindMap: "bx bx-sitemap",
geoMap: "bx bx-map-alt"
geoMap: "bx bx-map-alt",
aiChat: "bx bx-bot"
};
/**
@ -36,7 +37,7 @@ const NOTE_TYPE_ICONS = {
* end user. Those types should be used only for checking against, they are
* not for direct use.
*/
export type NoteType = "file" | "image" | "search" | "noteMap" | "launcher" | "doc" | "contentWidget" | "text" | "relationMap" | "render" | "canvas" | "mermaid" | "book" | "webView" | "code" | "mindMap" | "geoMap";
export type NoteType = "file" | "image" | "search" | "noteMap" | "launcher" | "doc" | "contentWidget" | "text" | "relationMap" | "render" | "canvas" | "mermaid" | "book" | "webView" | "code" | "mindMap" | "geoMap" | "aiChat";
export interface NotePathRecord {
isArchived: boolean;

View File

@ -127,6 +127,49 @@ async function handleMessage(event: MessageEvent<any>) {
appContext.triggerEvent("apiLogMessages", { noteId: message.noteId, messages: message.messages });
} else if (message.type === "toast") {
toastService.showMessage(message.message);
} else if (message.type === "llm-stream") {
// ENHANCED LOGGING FOR DEBUGGING
console.log(`[WS-CLIENT] >>> RECEIVED LLM STREAM MESSAGE <<<`);
console.log(`[WS-CLIENT] Message details: sessionId=${message.sessionId}, hasContent=${!!message.content}, contentLength=${message.content ? message.content.length : 0}, hasThinking=${!!message.thinking}, hasToolExecution=${!!message.toolExecution}, isDone=${!!message.done}`);
if (message.content) {
console.log(`[WS-CLIENT] CONTENT PREVIEW: "${message.content.substring(0, 50)}..."`);
}
// Create the event with detailed logging
console.log(`[WS-CLIENT] Creating CustomEvent 'llm-stream-message'`);
const llmStreamEvent = new CustomEvent('llm-stream-message', { detail: message });
// Dispatch to multiple targets to ensure delivery
try {
console.log(`[WS-CLIENT] Dispatching event to window`);
window.dispatchEvent(llmStreamEvent);
console.log(`[WS-CLIENT] Event dispatched to window`);
// Also try document for completeness
console.log(`[WS-CLIENT] Dispatching event to document`);
document.dispatchEvent(new CustomEvent('llm-stream-message', { detail: message }));
console.log(`[WS-CLIENT] Event dispatched to document`);
} catch (err) {
console.error(`[WS-CLIENT] Error dispatching event:`, err);
}
// Debug current listeners (though we can't directly check for specific event listeners)
console.log(`[WS-CLIENT] Active event listeners should receive this message now`);
// Detailed logging based on message type
if (message.content) {
console.log(`[WS-CLIENT] Content message: ${message.content.length} chars`);
} else if (message.thinking) {
console.log(`[WS-CLIENT] Thinking update: "${message.thinking}"`);
} else if (message.toolExecution) {
console.log(`[WS-CLIENT] Tool execution: action=${message.toolExecution.action}, tool=${message.toolExecution.tool || 'unknown'}`);
if (message.toolExecution.result) {
console.log(`[WS-CLIENT] Tool result preview: "${String(message.toolExecution.result).substring(0, 50)}..."`);
}
} else if (message.done) {
console.log(`[WS-CLIENT] Completion signal received`);
}
} else if (message.type === "execute-script") {
// TODO: Remove after porting the file
// @ts-ignore

View File

@ -0,0 +1,26 @@
import type { EventData } from "../../components/app_context.js";
import type FNote from "../../entities/fnote.js";
import options from "../../services/options.js";
import CommandButtonWidget from "./command_button.js";
export default class AiChatButton extends CommandButtonWidget {
constructor(note: FNote) {
super();
this.command("createAiChat")
.title(() => note.title)
.icon(() => note.getIcon())
.class("launcher-button");
}
isEnabled() {
return options.get("aiEnabled") === "true";
}
entitiesReloadedEvent({ loadResults }: EventData<"entitiesReloaded">) {
if (loadResults.isOptionReloaded("aiEnabled")) {
this.refresh();
}
}
}

View File

@ -0,0 +1,27 @@
import { t } from "../../services/i18n.js";
import options from "../../services/options.js";
import CommandButtonWidget from "./command_button.js";
export default class CreateAiChatButton extends CommandButtonWidget {
constructor() {
super();
this.icon("bx bx-bot")
.title(t("ai.create_new_ai_chat"))
.titlePlacement("bottom")
.command("createAiChat")
.class("icon-action");
}
isEnabled() {
return options.get("aiEnabled") === "true";
}
async refreshWithNote() {
if (this.isEnabled()) {
this.$widget.show();
} else {
this.$widget.hide();
}
}
}

View File

@ -13,6 +13,7 @@ import HistoryNavigationButton from "../buttons/history_navigation.js";
import QuickSearchLauncherWidget from "../quick_search_launcher.js";
import type FNote from "../../entities/fnote.js";
import type { CommandNames } from "../../components/app_context.js";
import AiChatButton from "../buttons/ai_chat_button.js";
interface InnerWidget extends BasicWidget {
settings?: {
@ -123,6 +124,8 @@ export default class LauncherWidget extends BasicWidget {
return new TodayLauncher(note);
case "quickSearch":
return new QuickSearchLauncherWidget(this.isHorizontalLayout);
case "aiChatLauncher":
return new AiChatButton(note);
default:
throw new Error(`Unrecognized builtin widget ${builtinWidget} for launcher ${note.noteId} "${note.title}"`);
}

View File

@ -28,7 +28,8 @@ export const byNoteType: Record<Exclude<NoteType, "book">, string | null> = {
render: null,
search: null,
text: null,
webView: null
webView: null,
aiChat: null
};
export const byBookType: Record<ViewTypeOptions, string | null> = {

View File

@ -0,0 +1,495 @@
/**
* Communication functions for LLM Chat
*/
import server from "../../services/server.js";
import type { SessionResponse } from "./types.js";
/**
* Create a new chat session
*/
export async function createChatSession(currentNoteId?: string): Promise<{chatNoteId: string | null, noteId: string | null}> {
try {
const resp = await server.post<SessionResponse>('llm/chat', {
title: 'Note Chat',
currentNoteId: currentNoteId // Pass the current note ID if available
});
if (resp && resp.id) {
// The backend might provide the noteId separately from the chatNoteId
// If noteId is provided, use it; otherwise, we'll need to query for it separately
return {
chatNoteId: resp.id,
noteId: resp.noteId || null
};
}
} catch (error) {
console.error('Failed to create chat session:', error);
}
return {
chatNoteId: null,
noteId: null
};
}
/**
* Check if a session exists
*/
export async function checkSessionExists(chatNoteId: string): Promise<boolean> {
try {
// Validate that we have a proper note ID format, not a session ID
// Note IDs in Trilium are typically longer or in a different format
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.warn(`Invalid note ID format detected: ${chatNoteId} appears to be a legacy session ID`);
return false;
}
const sessionCheck = await server.getWithSilentNotFound<any>(`llm/chat/${chatNoteId}`);
return !!(sessionCheck && sessionCheck.id);
} catch (error: any) {
console.log(`Error checking chat note ${chatNoteId}:`, error);
return false;
}
}
/**
* Set up streaming response via WebSocket
*/
export async function setupStreamingResponse(
chatNoteId: string,
messageParams: any,
onContentUpdate: (content: string, isDone?: boolean) => void,
onThinkingUpdate: (thinking: string) => void,
onToolExecution: (toolData: any) => void,
onComplete: () => void,
onError: (error: Error) => void
): Promise<void> {
// Validate that we have a proper note ID format, not a session ID
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.error(`Invalid note ID format: ${chatNoteId} appears to be a legacy session ID`);
onError(new Error("Invalid note ID format - using a legacy session ID"));
return;
}
return new Promise((resolve, reject) => {
let assistantResponse = '';
let postToolResponse = ''; // Separate accumulator for post-tool execution content
let receivedAnyContent = false;
let receivedPostToolContent = false; // Track if we've started receiving post-tool content
let timeoutId: number | null = null;
let initialTimeoutId: number | null = null;
let cleanupTimeoutId: number | null = null;
let receivedAnyMessage = false;
let toolsExecuted = false; // Flag to track if tools were executed in this session
let toolExecutionCompleted = false; // Flag to track if tool execution is completed
let eventListener: ((event: Event) => void) | null = null;
let lastMessageTimestamp = 0;
// Create a unique identifier for this response process
const responseId = `llm-stream-${Date.now()}-${Math.floor(Math.random() * 1000)}`;
console.log(`[${responseId}] Setting up WebSocket streaming for chat note ${chatNoteId}`);
// Send the initial request to initiate streaming
(async () => {
try {
const streamResponse = await server.post<any>(`llm/chat/${chatNoteId}/messages/stream`, {
content: messageParams.content,
useAdvancedContext: messageParams.useAdvancedContext,
showThinking: messageParams.showThinking,
options: {
temperature: 0.7,
maxTokens: 2000
}
});
if (!streamResponse || !streamResponse.success) {
console.error(`[${responseId}] Failed to initiate streaming`);
reject(new Error('Failed to initiate streaming'));
return;
}
console.log(`[${responseId}] Streaming initiated successfully`);
} catch (error) {
console.error(`[${responseId}] Error initiating streaming:`, error);
reject(error);
return;
}
})();
// Function to safely perform cleanup
const performCleanup = () => {
if (cleanupTimeoutId) {
window.clearTimeout(cleanupTimeoutId);
cleanupTimeoutId = null;
}
console.log(`[${responseId}] Performing final cleanup of event listener`);
cleanupEventListener(eventListener);
onComplete();
resolve();
};
// Function to schedule cleanup with ability to cancel
const scheduleCleanup = (delay: number) => {
// Clear any existing cleanup timeout
if (cleanupTimeoutId) {
window.clearTimeout(cleanupTimeoutId);
}
console.log(`[${responseId}] Scheduling listener cleanup in ${delay}ms`);
// Set new cleanup timeout
cleanupTimeoutId = window.setTimeout(() => {
// Only clean up if no messages received recently (in last 2 seconds)
const timeSinceLastMessage = Date.now() - lastMessageTimestamp;
if (timeSinceLastMessage > 2000) {
performCleanup();
} else {
console.log(`[${responseId}] Received message recently, delaying cleanup`);
// Reschedule cleanup
scheduleCleanup(2000);
}
}, delay);
};
// Create a message handler for CustomEvents
eventListener = (event: Event) => {
const customEvent = event as CustomEvent;
const message = customEvent.detail;
// Only process messages for our chat note
if (!message || message.chatNoteId !== chatNoteId) {
return;
}
// Update last message timestamp
lastMessageTimestamp = Date.now();
// Cancel any pending cleanup when we receive a new message
if (cleanupTimeoutId) {
console.log(`[${responseId}] Cancelling scheduled cleanup due to new message`);
window.clearTimeout(cleanupTimeoutId);
cleanupTimeoutId = null;
}
console.log(`[${responseId}] LLM Stream message received via CustomEvent: chatNoteId=${chatNoteId}, content=${!!message.content}, contentLength=${message.content?.length || 0}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${!!message.done}, type=${message.type || 'llm-stream'}`);
// Mark first message received
if (!receivedAnyMessage) {
receivedAnyMessage = true;
console.log(`[${responseId}] First message received for chat note ${chatNoteId}`);
// Clear the initial timeout since we've received a message
if (initialTimeoutId !== null) {
window.clearTimeout(initialTimeoutId);
initialTimeoutId = null;
}
}
// Handle specific message types
if (message.type === 'tool_execution_start') {
toolsExecuted = true; // Mark that tools were executed
onThinkingUpdate('Executing tools...');
// Also trigger tool execution UI with a specific format
onToolExecution({
action: 'start',
tool: 'tools',
result: 'Executing tools...'
});
return; // Skip accumulating content from this message
}
if (message.type === 'tool_result' && message.toolExecution) {
toolsExecuted = true; // Mark that tools were executed
console.log(`[${responseId}] Processing tool result: ${JSON.stringify(message.toolExecution)}`);
// If tool execution doesn't have an action, add 'result' as the default
if (!message.toolExecution.action) {
message.toolExecution.action = 'result';
}
// First send a 'start' action to ensure the container is created
onToolExecution({
action: 'start',
tool: 'tools',
result: 'Tool execution initialized'
});
// Then send the actual tool execution data
onToolExecution(message.toolExecution);
// Mark tool execution as completed if this is a result or error
if (message.toolExecution.action === 'result' || message.toolExecution.action === 'complete' || message.toolExecution.action === 'error') {
toolExecutionCompleted = true;
console.log(`[${responseId}] Tool execution completed`);
}
return; // Skip accumulating content from this message
}
if (message.type === 'tool_execution_error' && message.toolExecution) {
toolsExecuted = true; // Mark that tools were executed
toolExecutionCompleted = true; // Mark tool execution as completed
onToolExecution({
...message.toolExecution,
action: 'error',
error: message.toolExecution.error || 'Unknown error during tool execution'
});
return; // Skip accumulating content from this message
}
if (message.type === 'tool_completion_processing') {
toolsExecuted = true; // Mark that tools were executed
toolExecutionCompleted = true; // Tools are done, now processing the result
onThinkingUpdate('Generating response with tool results...');
// Also trigger tool execution UI with a specific format
onToolExecution({
action: 'generating',
tool: 'tools',
result: 'Generating response with tool results...'
});
return; // Skip accumulating content from this message
}
// Handle content updates
if (message.content) {
console.log(`[${responseId}] Received content chunk of length ${message.content.length}, preview: "${message.content.substring(0, 50)}${message.content.length > 50 ? '...' : ''}"`);
// If tools were executed and completed, and we're now getting new content,
// this is likely the final response after tool execution from Anthropic
if (toolsExecuted && toolExecutionCompleted && message.content) {
console.log(`[${responseId}] Post-tool execution content detected`);
// If this is the first post-tool chunk, indicate we're starting a new response
if (!receivedPostToolContent) {
receivedPostToolContent = true;
postToolResponse = ''; // Clear any previous post-tool response
console.log(`[${responseId}] First post-tool content chunk, starting fresh accumulation`);
}
// Accumulate post-tool execution content
postToolResponse += message.content;
console.log(`[${responseId}] Accumulated post-tool content, now ${postToolResponse.length} chars`);
// Update the UI with the accumulated post-tool content
// This replaces the pre-tool content with our accumulated post-tool content
onContentUpdate(postToolResponse, message.done || false);
} else {
// Standard content handling for non-tool cases or initial tool response
// Check if this is a duplicated message containing the same content we already have
if (message.done && assistantResponse.includes(message.content)) {
console.log(`[${responseId}] Ignoring duplicated content in done message`);
} else {
// Add to our accumulated response
assistantResponse += message.content;
}
// Update the UI immediately with each chunk
onContentUpdate(assistantResponse, message.done || false);
}
receivedAnyContent = true;
// Reset timeout since we got content
if (timeoutId !== null) {
window.clearTimeout(timeoutId);
}
// Set new timeout
timeoutId = window.setTimeout(() => {
console.warn(`[${responseId}] Stream timeout for chat note ${chatNoteId}`);
// Clean up
performCleanup();
reject(new Error('Stream timeout'));
}, 30000);
}
// Handle tool execution updates (legacy format and standard format with llm-stream type)
if (message.toolExecution) {
// Only process if we haven't already handled this message via specific message types
if (message.type === 'llm-stream' || !message.type) {
console.log(`[${responseId}] Received tool execution update: action=${message.toolExecution.action || 'unknown'}`);
toolsExecuted = true; // Mark that tools were executed
// Mark tool execution as completed if this is a result or error
if (message.toolExecution.action === 'result' ||
message.toolExecution.action === 'complete' ||
message.toolExecution.action === 'error') {
toolExecutionCompleted = true;
console.log(`[${responseId}] Tool execution completed via toolExecution message`);
}
onToolExecution(message.toolExecution);
}
}
// Handle tool calls from the raw data or direct in message (OpenAI format)
const toolCalls = message.tool_calls || (message.raw && message.raw.tool_calls);
if (toolCalls && Array.isArray(toolCalls)) {
console.log(`[${responseId}] Received tool calls: ${toolCalls.length} tools`);
toolsExecuted = true; // Mark that tools were executed
// First send a 'start' action to ensure the container is created
onToolExecution({
action: 'start',
tool: 'tools',
result: 'Tool execution initialized'
});
// Then process each tool call
for (const toolCall of toolCalls) {
let args = toolCall.function?.arguments || {};
// Try to parse arguments if they're a string
if (typeof args === 'string') {
try {
args = JSON.parse(args);
} catch (e) {
console.log(`[${responseId}] Could not parse tool arguments as JSON: ${e}`);
args = { raw: args };
}
}
onToolExecution({
action: 'executing',
tool: toolCall.function?.name || 'unknown',
toolCallId: toolCall.id,
args: args
});
}
}
// Handle thinking state updates
if (message.thinking) {
console.log(`[${responseId}] Received thinking update: ${message.thinking.substring(0, 50)}...`);
onThinkingUpdate(message.thinking);
}
// Handle completion
if (message.done) {
console.log(`[${responseId}] Stream completed for chat note ${chatNoteId}, has content: ${!!message.content}, content length: ${message.content?.length || 0}, current response: ${assistantResponse.length} chars`);
// Dump message content to console for debugging
if (message.content) {
console.log(`[${responseId}] CONTENT IN DONE MESSAGE (first 200 chars): "${message.content.substring(0, 200)}..."`);
// Check if the done message contains the exact same content as our accumulated response
// We normalize by removing whitespace to avoid false negatives due to spacing differences
const normalizedMessage = message.content.trim();
const normalizedResponse = assistantResponse.trim();
if (normalizedMessage === normalizedResponse) {
console.log(`[${responseId}] Final message is identical to accumulated response, no need to update`);
}
// If the done message is longer but contains our accumulated response, use the done message
else if (normalizedMessage.includes(normalizedResponse) && normalizedMessage.length > normalizedResponse.length) {
console.log(`[${responseId}] Final message is more complete than accumulated response, using it`);
assistantResponse = message.content;
}
// If the done message is different and not already included, append it to avoid duplication
else if (!normalizedResponse.includes(normalizedMessage) && normalizedMessage.length > 0) {
console.log(`[${responseId}] Final message has unique content, using it`);
assistantResponse = message.content;
}
// Otherwise, we already have the content accumulated, so no need to update
else {
console.log(`[${responseId}] Already have this content accumulated, not updating`);
}
}
// Clear timeout if set
if (timeoutId !== null) {
window.clearTimeout(timeoutId);
timeoutId = null;
}
// Always mark as done when we receive the done flag
onContentUpdate(assistantResponse, true);
// Set a longer delay before cleanup to allow for post-tool execution messages
// Especially important for Anthropic which may send final message after tool execution
const cleanupDelay = toolsExecuted ? 15000 : 1000; // 15 seconds if tools were used, otherwise 1 second
console.log(`[${responseId}] Setting cleanup delay of ${cleanupDelay}ms since toolsExecuted=${toolsExecuted}`);
scheduleCleanup(cleanupDelay);
}
};
// Register event listener for the custom event
try {
window.addEventListener('llm-stream-message', eventListener);
console.log(`[${responseId}] Event listener added for llm-stream-message events`);
} catch (err) {
console.error(`[${responseId}] Error setting up event listener:`, err);
reject(err);
return;
}
// Set initial timeout for receiving any message
initialTimeoutId = window.setTimeout(() => {
console.warn(`[${responseId}] No messages received for initial period in chat note ${chatNoteId}`);
if (!receivedAnyMessage) {
console.error(`[${responseId}] WebSocket connection not established for chat note ${chatNoteId}`);
if (timeoutId !== null) {
window.clearTimeout(timeoutId);
}
// Clean up
cleanupEventListener(eventListener);
// Show error message to user
reject(new Error('WebSocket connection not established'));
}
}, 10000);
});
}
/**
* Clean up an event listener
*/
function cleanupEventListener(listener: ((event: Event) => void) | null): void {
if (listener) {
try {
window.removeEventListener('llm-stream-message', listener);
console.log(`Successfully removed event listener`);
} catch (err) {
console.error(`Error removing event listener:`, err);
}
}
}
/**
* Get a direct response from the server without streaming
*/
export async function getDirectResponse(chatNoteId: string, messageParams: any): Promise<any> {
try {
// Validate that we have a proper note ID format, not a session ID
if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) {
console.error(`Invalid note ID format: ${chatNoteId} appears to be a legacy session ID`);
throw new Error("Invalid note ID format - using a legacy session ID");
}
const postResponse = await server.post<any>(`llm/chat/${chatNoteId}/messages`, {
message: messageParams.content,
includeContext: messageParams.useAdvancedContext,
options: {
temperature: 0.7,
maxTokens: 2000
}
});
return postResponse;
} catch (error) {
console.error('Error getting direct response:', error);
throw error;
}
}
/**
* Get embedding statistics
*/
export async function getEmbeddingStats(): Promise<any> {
return server.get('llm/embeddings/stats');
}

View File

@ -0,0 +1,6 @@
/**
* LLM Chat Panel Widget Module
*/
import LlmChatPanel from './llm_chat_panel.js';
export default LlmChatPanel;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,59 @@
/**
* Message processing functions for LLM Chat
*/
import type { ToolExecutionStep } from "./types.js";
/**
* Extract tool execution steps from the DOM that are within the chat flow
*/
export function extractInChatToolSteps(chatMessagesElement: HTMLElement): ToolExecutionStep[] {
const steps: ToolExecutionStep[] = [];
// Look for tool execution in the chat flow
const toolExecutionElement = chatMessagesElement.querySelector('.chat-tool-execution');
if (toolExecutionElement) {
// Find all tool step elements
const stepElements = toolExecutionElement.querySelectorAll('.tool-step');
stepElements.forEach(stepEl => {
const stepHtml = stepEl.innerHTML;
// Determine the step type based on icons or classes present
let type = 'info';
let name: string | undefined;
let content = '';
if (stepHtml.includes('bx-code-block')) {
type = 'executing';
content = 'Executing tools...';
} else if (stepHtml.includes('bx-terminal')) {
type = 'result';
// Extract the tool name from the step
const nameMatch = stepHtml.match(/<span[^>]*>Tool: ([^<]+)<\/span>/);
name = nameMatch ? nameMatch[1] : 'unknown';
// Extract the content from the div with class mt-1 ps-3
const contentEl = stepEl.querySelector('.mt-1.ps-3');
content = contentEl ? contentEl.innerHTML : '';
} else if (stepHtml.includes('bx-error-circle')) {
type = 'error';
const nameMatch = stepHtml.match(/<span[^>]*>Tool: ([^<]+)<\/span>/);
name = nameMatch ? nameMatch[1] : 'unknown';
const contentEl = stepEl.querySelector('.mt-1.ps-3.text-danger');
content = contentEl ? contentEl.innerHTML : '';
} else if (stepHtml.includes('bx-message-dots')) {
type = 'generating';
content = 'Generating response with tool results...';
} else if (stepHtml.includes('bx-loader-alt')) {
// Skip the initializing spinner
return;
}
steps.push({ type, name, content });
});
}
return steps;
}

View File

@ -0,0 +1,55 @@
/**
* Types for LLM Chat Panel
*/
export interface ChatResponse {
id: string;
messages: Array<{role: string; content: string}>;
sources?: Array<{noteId: string; title: string}>;
}
export interface SessionResponse {
id: string;
title: string;
noteId?: string;
}
export interface ToolExecutionStep {
type: string;
name?: string;
content: string;
}
export interface MessageData {
role: string;
content: string;
timestamp?: Date;
}
export interface ChatData {
messages: MessageData[];
chatNoteId: string | null;
noteId?: string | null;
toolSteps: ToolExecutionStep[];
sources?: Array<{
noteId: string;
title: string;
similarity?: number;
content?: string;
}>;
metadata?: {
model?: string;
provider?: string;
temperature?: number;
maxTokens?: number;
lastUpdated?: string;
toolExecutions?: Array<{
id: string;
name: string;
arguments: any;
result: any;
error?: string;
timestamp: string;
}>;
};
}

View File

@ -0,0 +1,251 @@
/**
* UI-related functions for LLM Chat
*/
import { t } from "../../services/i18n.js";
import type { ToolExecutionStep } from "./types.js";
import { formatMarkdown, applyHighlighting } from "./utils.js";
// Template for the chat widget
export const TPL = `
<div class="note-context-chat h-100 w-100 d-flex flex-column">
<!-- Move validation warning outside the card with better styling -->
<div class="provider-validation-warning alert alert-warning m-2 border-left border-warning" style="display: none; padding-left: 15px; border-left: 4px solid #ffc107; background-color: rgba(255, 248, 230, 0.9); font-size: 0.9rem; box-shadow: 0 2px 5px rgba(0,0,0,0.05);"></div>
<div class="note-context-chat-container flex-grow-1 overflow-auto p-3">
<div class="note-context-chat-messages"></div>
<div class="loading-indicator" style="display: none;">
<div class="spinner-border spinner-border-sm text-primary" role="status">
<span class="visually-hidden">Loading...</span>
</div>
<span class="ms-2">${t('ai_llm.agent.processing')}</span>
</div>
</div>
<div class="sources-container p-2 border-top" style="display: none;">
<h6 class="m-0 p-1 d-flex align-items-center">
<i class="bx bx-link-alt me-1"></i> ${t('ai_llm.sources')}
<span class="badge bg-primary rounded-pill ms-2 sources-count"></span>
</h6>
<div class="sources-list mt-2"></div>
</div>
<form class="note-context-chat-form d-flex flex-column border-top p-2">
<div class="d-flex chat-input-container mb-2">
<textarea
class="form-control note-context-chat-input"
placeholder="${t('ai_llm.enter_message')}"
rows="2"
></textarea>
<button type="submit" class="btn btn-primary note-context-chat-send-button ms-2 d-flex align-items-center justify-content-center">
<i class="bx bx-send"></i>
</button>
</div>
<div class="d-flex align-items-center context-option-container mt-1 justify-content-end">
<small class="text-muted me-auto fst-italic">Options:</small>
<div class="form-check form-switch me-3 small">
<input class="form-check-input use-advanced-context-checkbox" type="checkbox" id="useEnhancedContext" checked>
<label class="form-check-label small" for="useEnhancedContext" title="${t('ai.enhanced_context_description')}">
${t('ai_llm.use_enhanced_context')}
<i class="bx bx-info-circle small text-muted"></i>
</label>
</div>
<div class="form-check form-switch small">
<input class="form-check-input show-thinking-checkbox" type="checkbox" id="showThinking">
<label class="form-check-label small" for="showThinking" title="${t('ai.show_thinking_description')}">
${t('ai_llm.show_thinking')}
<i class="bx bx-info-circle small text-muted"></i>
</label>
</div>
</div>
</form>
</div>
`;
/**
* Add a message to the chat UI
*/
export function addMessageToChat(messagesContainer: HTMLElement, chatContainer: HTMLElement, role: 'user' | 'assistant', content: string) {
const messageElement = document.createElement('div');
messageElement.className = `chat-message ${role}-message mb-3 d-flex`;
const avatarElement = document.createElement('div');
avatarElement.className = 'message-avatar d-flex align-items-center justify-content-center me-2';
if (role === 'user') {
avatarElement.innerHTML = '<i class="bx bx-user"></i>';
avatarElement.classList.add('user-avatar');
} else {
avatarElement.innerHTML = '<i class="bx bx-bot"></i>';
avatarElement.classList.add('assistant-avatar');
}
const contentElement = document.createElement('div');
contentElement.className = 'message-content p-3 rounded flex-grow-1';
if (role === 'user') {
contentElement.classList.add('user-content', 'bg-light');
} else {
contentElement.classList.add('assistant-content');
}
// Format the content with markdown
contentElement.innerHTML = formatMarkdown(content);
messageElement.appendChild(avatarElement);
messageElement.appendChild(contentElement);
messagesContainer.appendChild(messageElement);
// Apply syntax highlighting to any code blocks in the message
applyHighlighting(contentElement);
// Scroll to bottom
chatContainer.scrollTop = chatContainer.scrollHeight;
}
/**
* Show sources in the UI
*/
export function showSources(
sourcesList: HTMLElement,
sourcesContainer: HTMLElement,
sourcesCount: HTMLElement,
sources: Array<{noteId: string, title: string}>,
onSourceClick: (noteId: string) => void
) {
sourcesList.innerHTML = '';
sourcesCount.textContent = sources.length.toString();
sources.forEach(source => {
const sourceElement = document.createElement('div');
sourceElement.className = 'source-item p-2 mb-1 border rounded d-flex align-items-center';
// Create the direct link to the note
sourceElement.innerHTML = `
<div class="d-flex align-items-center w-100">
<a href="#root/${source.noteId}"
data-note-id="${source.noteId}"
class="source-link text-truncate d-flex align-items-center"
title="Open note: ${source.title}">
<i class="bx bx-file-blank me-1"></i>
<span class="source-title">${source.title}</span>
</a>
</div>`;
// Add click handler
sourceElement.querySelector('.source-link')?.addEventListener('click', (e) => {
e.preventDefault();
e.stopPropagation();
onSourceClick(source.noteId);
return false;
});
sourcesList.appendChild(sourceElement);
});
sourcesContainer.style.display = 'block';
}
/**
* Hide sources in the UI
*/
export function hideSources(sourcesContainer: HTMLElement) {
sourcesContainer.style.display = 'none';
}
/**
* Show loading indicator
*/
export function showLoadingIndicator(loadingIndicator: HTMLElement) {
const logId = `ui-${Date.now()}`;
console.log(`[${logId}] Showing loading indicator`);
try {
loadingIndicator.style.display = 'flex';
const forceUpdate = loadingIndicator.offsetHeight;
console.log(`[${logId}] Loading indicator initialized`);
} catch (err) {
console.error(`[${logId}] Error showing loading indicator:`, err);
}
}
/**
* Hide loading indicator
*/
export function hideLoadingIndicator(loadingIndicator: HTMLElement) {
const logId = `ui-${Date.now()}`;
console.log(`[${logId}] Hiding loading indicator`);
try {
loadingIndicator.style.display = 'none';
const forceUpdate = loadingIndicator.offsetHeight;
console.log(`[${logId}] Loading indicator hidden`);
} catch (err) {
console.error(`[${logId}] Error hiding loading indicator:`, err);
}
}
/**
* Render tool steps as HTML for display in chat
*/
export function renderToolStepsHtml(steps: ToolExecutionStep[]): string {
if (!steps || steps.length === 0) return '';
let html = '';
steps.forEach(step => {
let icon, labelClass, content;
switch (step.type) {
case 'executing':
icon = 'bx-code-block text-primary';
labelClass = '';
content = `<div class="d-flex align-items-center">
<i class="bx ${icon} me-1"></i>
<span>${step.content}</span>
</div>`;
break;
case 'result':
icon = 'bx-terminal text-success';
labelClass = 'fw-bold';
content = `<div class="d-flex align-items-center">
<i class="bx ${icon} me-1"></i>
<span class="${labelClass}">Tool: ${step.name || 'unknown'}</span>
</div>
<div class="mt-1 ps-3">${step.content}</div>`;
break;
case 'error':
icon = 'bx-error-circle text-danger';
labelClass = 'fw-bold text-danger';
content = `<div class="d-flex align-items-center">
<i class="bx ${icon} me-1"></i>
<span class="${labelClass}">Tool: ${step.name || 'unknown'}</span>
</div>
<div class="mt-1 ps-3 text-danger">${step.content}</div>`;
break;
case 'generating':
icon = 'bx-message-dots text-info';
labelClass = '';
content = `<div class="d-flex align-items-center">
<i class="bx ${icon} me-1"></i>
<span>${step.content}</span>
</div>`;
break;
default:
icon = 'bx-info-circle text-muted';
labelClass = '';
content = `<div class="d-flex align-items-center">
<i class="bx ${icon} me-1"></i>
<span>${step.content}</span>
</div>`;
}
html += `<div class="tool-step my-1">${content}</div>`;
});
return html;
}

View File

@ -0,0 +1,93 @@
/**
* Utility functions for LLM Chat
*/
import { marked } from "marked";
import { applySyntaxHighlight } from "../../services/syntax_highlight.js";
/**
* Format markdown content for display
*/
export function formatMarkdown(content: string): string {
if (!content) return '';
// First, extract HTML thinking visualization to protect it from replacements
const thinkingBlocks: string[] = [];
let processedContent = content.replace(/<div class=['"](thinking-process|reasoning-process)['"][\s\S]*?<\/div>/g, (match) => {
const placeholder = `__THINKING_BLOCK_${thinkingBlocks.length}__`;
thinkingBlocks.push(match);
return placeholder;
});
// Use marked library to parse the markdown
const markedContent = marked(processedContent, {
breaks: true, // Convert line breaks to <br>
gfm: true, // Enable GitHub Flavored Markdown
silent: true // Ignore errors
});
// Handle potential promise (though it shouldn't be with our options)
if (typeof markedContent === 'string') {
processedContent = markedContent;
} else {
console.warn('Marked returned a promise unexpectedly');
// Use the original content as fallback
processedContent = content;
}
// Restore thinking visualization blocks
thinkingBlocks.forEach((block, index) => {
processedContent = processedContent.replace(`__THINKING_BLOCK_${index}__`, block);
});
return processedContent;
}
/**
* Simple HTML escaping for safer content display
*/
export function escapeHtml(text: string): string {
if (typeof text !== 'string') {
text = String(text || '');
}
return text
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&#039;');
}
/**
* Apply syntax highlighting to content
*/
export function applyHighlighting(element: HTMLElement): void {
applySyntaxHighlight($(element));
}
/**
* Format tool arguments for display
*/
export function formatToolArgs(args: any): string {
if (!args || typeof args !== 'object') return '';
return Object.entries(args)
.map(([key, value]) => {
// Format the value based on its type
let displayValue;
if (typeof value === 'string') {
displayValue = value.length > 50 ? `"${value.substring(0, 47)}..."` : `"${value}"`;
} else if (value === null) {
displayValue = 'null';
} else if (Array.isArray(value)) {
displayValue = '[...]'; // Simplified array representation
} else if (typeof value === 'object') {
displayValue = '{...}'; // Simplified object representation
} else {
displayValue = String(value);
}
return `<span class="text-primary">${escapeHtml(key)}</span>: ${escapeHtml(displayValue)}`;
})
.join(', ');
}

View File

@ -0,0 +1,104 @@
/**
* Validation functions for LLM Chat
*/
import options from "../../services/options.js";
import { getEmbeddingStats } from "./communication.js";
/**
* Validate embedding providers configuration
*/
export async function validateEmbeddingProviders(validationWarning: HTMLElement): Promise<void> {
try {
// Check if AI is enabled
const aiEnabled = options.is('aiEnabled');
if (!aiEnabled) {
validationWarning.style.display = 'none';
return;
}
// Get provider precedence
const precedenceStr = options.get('aiProviderPrecedence') || 'openai,anthropic,ollama';
let precedenceList: string[] = [];
if (precedenceStr) {
if (precedenceStr.startsWith('[') && precedenceStr.endsWith(']')) {
precedenceList = JSON.parse(precedenceStr);
} else if (precedenceStr.includes(',')) {
precedenceList = precedenceStr.split(',').map(p => p.trim());
} else {
precedenceList = [precedenceStr];
}
}
// Get enabled providers - this is a simplification since we don't have direct DB access
// We'll determine enabled status based on the presence of keys or settings
const enabledProviders: string[] = [];
// OpenAI is enabled if API key is set
const openaiKey = options.get('openaiApiKey');
if (openaiKey) {
enabledProviders.push('openai');
}
// Anthropic is enabled if API key is set
const anthropicKey = options.get('anthropicApiKey');
if (anthropicKey) {
enabledProviders.push('anthropic');
}
// Ollama is enabled if base URL is set
const ollamaBaseUrl = options.get('ollamaBaseUrl');
if (ollamaBaseUrl) {
enabledProviders.push('ollama');
}
// Local is always available
enabledProviders.push('local');
// Perform validation checks
const allPrecedenceEnabled = precedenceList.every((p: string) => enabledProviders.includes(p));
// Get embedding queue status
const embeddingStats = await getEmbeddingStats() as {
success: boolean,
stats: {
totalNotesCount: number;
embeddedNotesCount: number;
queuedNotesCount: number;
failedNotesCount: number;
lastProcessedDate: string | null;
percentComplete: number;
}
};
const queuedNotes = embeddingStats?.stats?.queuedNotesCount || 0;
const hasEmbeddingsInQueue = queuedNotes > 0;
// Show warning if there are issues
if (!allPrecedenceEnabled || hasEmbeddingsInQueue) {
let message = '<i class="bx bx-error-circle me-2"></i><strong>AI Provider Configuration Issues</strong>';
message += '<ul class="mb-1 ps-4">';
if (!allPrecedenceEnabled) {
const disabledProviders = precedenceList.filter((p: string) => !enabledProviders.includes(p));
message += `<li>The following providers in your precedence list are not enabled: ${disabledProviders.join(', ')}.</li>`;
}
if (hasEmbeddingsInQueue) {
message += `<li>Currently processing embeddings for ${queuedNotes} notes. Some AI features may produce incomplete results until processing completes.</li>`;
}
message += '</ul>';
message += '<div class="mt-2"><a href="javascript:" class="settings-link btn btn-sm btn-outline-secondary"><i class="bx bx-cog me-1"></i>Open AI Settings</a></div>';
// Update HTML content
validationWarning.innerHTML = message;
validationWarning.style.display = 'block';
} else {
validationWarning.style.display = 'none';
}
} catch (error) {
console.error('Error validating embedding providers:', error);
validationWarning.style.display = 'none';
}
}

View File

@ -0,0 +1,7 @@
/**
* LLM Chat Panel Widget
* This file is preserved for backward compatibility.
* The actual implementation has been moved to the llm_chat/ folder.
*/
import LlmChatPanel from './llm_chat/index.js';
export default LlmChatPanel;

View File

@ -36,6 +36,7 @@ import utils from "../services/utils.js";
import type { NoteType } from "../entities/fnote.js";
import type TypeWidget from "./type_widgets/type_widget.js";
import { MermaidTypeWidget } from "./type_widgets/mermaid.js";
import AiChatTypeWidget from "./type_widgets/ai_chat.js";
const TPL = /*html*/`
<div class="note-detail">
@ -74,6 +75,7 @@ const typeWidgetClasses = {
attachmentList: AttachmentListTypeWidget,
mindMap: MindMapWidget,
geoMap: GeoMapTypeWidget,
aiChat: AiChatTypeWidget,
// Split type editors
mermaid: MermaidTypeWidget
@ -92,7 +94,8 @@ type ExtendedNoteType =
| "editableCode"
| "attachmentDetail"
| "attachmentList"
| "protectedSession";
| "protectedSession"
| "aiChat";
export default class NoteDetailWidget extends NoteContextAwareWidget {
@ -215,12 +218,11 @@ export default class NoteDetailWidget extends NoteContextAwareWidget {
async getWidgetType(): Promise<ExtendedNoteType> {
const note = this.note;
if (!note) {
return "empty";
}
let type: NoteType = note.type;
const type = note.type;
let resultingType: ExtendedNoteType;
const viewScope = this.noteContext?.viewScope;

View File

@ -38,6 +38,7 @@ const NOTE_TYPES: NoteTypeMapping[] = [
// Misc note types
{ type: "render", mime: "", title: t("note_types.render-note"), selectable: true },
{ type: "webView", mime: "", title: t("note_types.web-view"), selectable: true },
{ type: "aiChat", mime: "application/json", title: t("note_types.ai-chat"), selectable: true },
// Code notes
{ type: "code", mime: "text/plain", title: t("note_types.code"), selectable: true },

View File

@ -0,0 +1,53 @@
import BasicWidget from "./basic_widget.js";
/**
* Base class for widgets that need to track the active tab/note
*/
export default class TabAwareWidget extends BasicWidget {
constructor() {
super();
this.noteId = null;
this.noteType = null;
this.notePath = null;
this.isActiveTab = false;
}
/**
* Called when the active note is switched
*
* @param {string} noteId
* @param {string|null} noteType
* @param {string|null} notePath
*/
async noteSwitched(noteId, noteType, notePath) {
this.noteId = noteId;
this.noteType = noteType;
this.notePath = notePath;
}
/**
* Called when the widget's tab becomes active or inactive
*
* @param {boolean} active
*/
activeTabChanged(active) {
this.isActiveTab = active;
}
/**
* Called when entities (notes, attributes, etc.) are reloaded
*/
entitiesReloaded() {}
/**
* Check if this widget is enabled
*/
isEnabled() {
return true;
}
/**
* Refresh widget with current data
*/
async refresh() {}
}

View File

@ -0,0 +1,255 @@
import TypeWidget from "./type_widget.js";
import LlmChatPanel from "../llm_chat_panel.js";
import { type EventData } from "../../components/app_context.js";
import type FNote from "../../entities/fnote.js";
import server from "../../services/server.js";
import toastService from "../../services/toast.js";
export default class AiChatTypeWidget extends TypeWidget {
private llmChatPanel: LlmChatPanel;
private isInitialized: boolean = false;
private initPromise: Promise<void> | null = null;
constructor() {
super();
this.llmChatPanel = new LlmChatPanel();
// Connect the data callbacks
this.llmChatPanel.setDataCallbacks(
(data) => this.saveData(data),
() => this.getData()
);
}
static getType() {
return "aiChat";
}
doRender() {
this.$widget = $('<div class="ai-chat-widget-container" style="height: 100%;"></div>');
this.$widget.append(this.llmChatPanel.render());
return this.$widget;
}
// Override the refreshWithNote method to ensure we get note changes
async refreshWithNote(note: FNote | null | undefined) {
console.log("refreshWithNote called for note:", note?.noteId);
// Always force a refresh when the note changes
if (this.note?.noteId !== note?.noteId) {
console.log(`Note ID changed from ${this.note?.noteId} to ${note?.noteId}, forcing reset`);
this.isInitialized = false;
this.initPromise = null;
// Force refresh the chat panel with the new note
if (note) {
this.llmChatPanel.setCurrentNoteId(note.noteId);
}
}
// Continue with regular doRefresh
await this.doRefresh(note);
}
async doRefresh(note: FNote | null | undefined) {
try {
console.log("doRefresh called for note:", note?.noteId);
// If we're already initializing, wait for that to complete
if (this.initPromise) {
await this.initPromise;
return;
}
// Initialize once or when note changes
if (!this.isInitialized) {
console.log("Initializing AI Chat Panel for note:", note?.noteId);
// Initialize the note content first
if (note) {
try {
const content = await note.getContent();
// Check if content is empty
if (!content || content === '{}') {
// Initialize with empty chat history
await this.saveData({
messages: [],
title: note.title,
noteId: note.noteId // Store the note ID in the data
});
console.log("Initialized empty chat history for new note");
} else {
console.log("Note already has content, will load in LlmChatPanel.refresh()");
}
} catch (e) {
console.error("Error initializing AI Chat note content:", e);
}
}
// Create a promise to track initialization
this.initPromise = (async () => {
try {
// Reset the UI before refreshing
this.llmChatPanel.clearNoteContextChatMessages();
this.llmChatPanel.setMessages([]);
// This will load saved data via the getData callback
await this.llmChatPanel.refresh();
this.isInitialized = true;
} catch (e) {
console.error("Error initializing LlmChatPanel:", e);
toastService.showError("Failed to initialize chat panel. Try reloading.");
}
})();
await this.initPromise;
this.initPromise = null;
}
} catch (e) {
console.error("Error in doRefresh:", e);
toastService.showError("Error refreshing chat. Please try again.");
}
}
async entitiesReloadedEvent(data: EventData<"entitiesReloaded">) {
// We don't need to refresh on entities reloaded for the chat
}
async noteSwitched() {
console.log("Note switched to:", this.noteId);
// Force a full reset when switching notes
this.isInitialized = false;
this.initPromise = null;
if (this.note) {
// Update the chat panel with the new note ID before refreshing
this.llmChatPanel.setCurrentNoteId(this.note.noteId);
// Reset the chat panel UI
this.llmChatPanel.clearNoteContextChatMessages();
this.llmChatPanel.setMessages([]);
this.llmChatPanel.setChatNoteId(null);
}
// Call the parent method to refresh
await super.noteSwitched();
}
async activeContextChangedEvent(data: EventData<"activeContextChanged">) {
if (!this.isActive()) {
return;
}
console.log("Active context changed, refreshing AI Chat Panel");
// Always refresh when we become active - this ensures we load the correct note data
try {
// Reset initialization flag to force a refresh
this.isInitialized = false;
// Make sure the chat panel has the current note ID
if (this.note) {
this.llmChatPanel.setCurrentNoteId(this.note.noteId);
}
this.initPromise = (async () => {
try {
// Reset the UI before refreshing
this.llmChatPanel.clearNoteContextChatMessages();
this.llmChatPanel.setMessages([]);
await this.llmChatPanel.refresh();
this.isInitialized = true;
} catch (e) {
console.error("Error refreshing LlmChatPanel:", e);
}
})();
await this.initPromise;
this.initPromise = null;
} catch (e) {
console.error("Error in activeContextChangedEvent:", e);
}
}
// Save chat data to the note
async saveData(data: any) {
if (!this.note) {
return;
}
try {
console.log(`AiChatTypeWidget: Saving data for note ${this.note.noteId}`);
// Format the data properly - this is the canonical format of the data
const formattedData = {
messages: data.messages || [],
chatNoteId: data.chatNoteId || this.note.noteId,
toolSteps: data.toolSteps || [],
sources: data.sources || [],
metadata: {
...(data.metadata || {}),
lastUpdated: new Date().toISOString()
}
};
// Save the data to the note
await server.put(`notes/${this.note.noteId}/data`, {
content: JSON.stringify(formattedData, null, 2)
});
} catch (e) {
console.error("Error saving AI Chat data:", e);
toastService.showError("Failed to save chat data");
}
}
// Get data from the note
async getData() {
if (!this.note) {
return null;
}
try {
console.log(`AiChatTypeWidget: Getting data for note ${this.note.noteId}`);
const content = await this.note.getContent();
if (!content) {
console.log("Note content is empty");
return null;
}
// Parse the content as JSON
let parsedContent;
try {
parsedContent = JSON.parse(content as string);
console.log("Successfully parsed note content as JSON");
} catch (e) {
console.error("Error parsing chat content as JSON:", e);
return null;
}
// Check if this is a blob response with 'content' property that needs to be parsed again
// This happens when the content is returned from the /blob endpoint
if (parsedContent.content && typeof parsedContent.content === 'string' &&
parsedContent.blobId && parsedContent.contentLength) {
try {
// The actual chat data is inside the 'content' property as a string
console.log("Detected blob response structure, parsing inner content");
const innerContent = JSON.parse(parsedContent.content);
console.log("Successfully parsed blob inner content");
return innerContent;
} catch (innerError) {
console.error("Error parsing inner blob content:", innerError);
return null;
}
}
return parsedContent;
} catch (e) {
console.error("Error loading AI Chat data:", e);
return null;
}
}
}

View File

@ -37,6 +37,7 @@ import LocalizationOptions from "./options/i18n/i18n.js";
import CodeBlockOptions from "./options/appearance/code_block.js";
import EditorOptions from "./options/text_notes/editor.js";
import ShareSettingsOptions from "./options/other/share_settings.js";
import AiSettingsOptions from "./options/ai_settings.js";
import type FNote from "../../entities/fnote.js";
import type NoteContextAwareWidget from "../note_context_aware_widget.js";
import { t } from "i18next";
@ -111,6 +112,7 @@ const CONTENT_WIDGETS: Record<string, (typeof NoteContextAwareWidget)[]> = {
_optionsSync: [
SyncOptions
],
_optionsAi: [AiSettingsOptions],
_optionsOther: [
SearchEngineOptions,
TrayOptions,

View File

@ -0,0 +1,2 @@
import AiSettingsWidget from './ai_settings/index.js';
export default AiSettingsWidget;

View File

@ -0,0 +1,510 @@
import OptionsWidget from "../options_widget.js";
import { TPL } from "./template.js";
import { t } from "../../../../services/i18n.js";
import type { OptionDefinitions, OptionMap } from "../../../../../../services/options_interface.js";
import server from "../../../../services/server.js";
import toastService from "../../../../services/toast.js";
import type { EmbeddingStats, FailedEmbeddingNotes } from "./interfaces.js";
import { ProviderService } from "./providers.js";
export default class AiSettingsWidget extends OptionsWidget {
private ollamaModelsRefreshed = false;
private openaiModelsRefreshed = false;
private anthropicModelsRefreshed = false;
private statsRefreshInterval: NodeJS.Timeout | null = null;
private indexRebuildRefreshInterval: NodeJS.Timeout | null = null;
private readonly STATS_REFRESH_INTERVAL = 5000; // 5 seconds
private providerService: ProviderService | null = null;
doRender() {
this.$widget = $(TPL);
this.providerService = new ProviderService(this.$widget);
// Setup event handlers for options
this.setupEventHandlers();
this.refreshEmbeddingStats();
this.fetchFailedEmbeddingNotes();
return this.$widget;
}
/**
* Helper method to set up a change event handler for an option
* @param selector The jQuery selector for the element
* @param optionName The name of the option to update
* @param validateAfter Whether to run validation after the update
* @param isCheckbox Whether the element is a checkbox
*/
setupChangeHandler(selector: string, optionName: keyof OptionDefinitions, validateAfter: boolean = false, isCheckbox: boolean = false) {
if (!this.$widget) return;
const $element = this.$widget.find(selector);
$element.on('change', async () => {
let value: string;
if (isCheckbox) {
value = $element.prop('checked') ? 'true' : 'false';
} else {
value = $element.val() as string;
}
await this.updateOption(optionName, value);
if (validateAfter) {
await this.displayValidationWarnings();
}
});
}
/**
* Set up all event handlers for options
*/
setupEventHandlers() {
if (!this.$widget) return;
// Core AI options
this.setupChangeHandler('.ai-enabled', 'aiEnabled', true, true);
this.setupChangeHandler('.ai-provider-precedence', 'aiProviderPrecedence', true);
this.setupChangeHandler('.ai-temperature', 'aiTemperature');
this.setupChangeHandler('.ai-system-prompt', 'aiSystemPrompt');
// OpenAI options
this.setupChangeHandler('.openai-api-key', 'openaiApiKey', true);
this.setupChangeHandler('.openai-base-url', 'openaiBaseUrl', true);
this.setupChangeHandler('.openai-default-model', 'openaiDefaultModel');
this.setupChangeHandler('.openai-embedding-model', 'openaiEmbeddingModel');
// Anthropic options
this.setupChangeHandler('.anthropic-api-key', 'anthropicApiKey', true);
this.setupChangeHandler('.anthropic-default-model', 'anthropicDefaultModel');
this.setupChangeHandler('.anthropic-base-url', 'anthropicBaseUrl');
// Voyage options
this.setupChangeHandler('.voyage-api-key', 'voyageApiKey');
this.setupChangeHandler('.voyage-embedding-model', 'voyageEmbeddingModel');
// Ollama options
this.setupChangeHandler('.ollama-base-url', 'ollamaBaseUrl');
this.setupChangeHandler('.ollama-default-model', 'ollamaDefaultModel');
this.setupChangeHandler('.ollama-embedding-model', 'ollamaEmbeddingModel');
const $refreshModels = this.$widget.find('.refresh-models');
$refreshModels.on('click', async () => {
this.ollamaModelsRefreshed = await this.providerService?.refreshOllamaModels(true, this.ollamaModelsRefreshed) || false;
});
// Add tab change handler for Ollama tab
const $ollamaTab = this.$widget.find('#nav-ollama-tab');
$ollamaTab.on('shown.bs.tab', async () => {
// Only refresh the models if we haven't done it before
this.ollamaModelsRefreshed = await this.providerService?.refreshOllamaModels(false, this.ollamaModelsRefreshed) || false;
});
// OpenAI models refresh button
const $refreshOpenAIModels = this.$widget.find('.refresh-openai-models');
$refreshOpenAIModels.on('click', async () => {
this.openaiModelsRefreshed = await this.providerService?.refreshOpenAIModels(true, this.openaiModelsRefreshed) || false;
});
// Add tab change handler for OpenAI tab
const $openaiTab = this.$widget.find('#nav-openai-tab');
$openaiTab.on('shown.bs.tab', async () => {
// Only refresh the models if we haven't done it before
this.openaiModelsRefreshed = await this.providerService?.refreshOpenAIModels(false, this.openaiModelsRefreshed) || false;
});
// Anthropic models refresh button
const $refreshAnthropicModels = this.$widget.find('.refresh-anthropic-models');
$refreshAnthropicModels.on('click', async () => {
this.anthropicModelsRefreshed = await this.providerService?.refreshAnthropicModels(true, this.anthropicModelsRefreshed) || false;
});
// Add tab change handler for Anthropic tab
const $anthropicTab = this.$widget.find('#nav-anthropic-tab');
$anthropicTab.on('shown.bs.tab', async () => {
// Only refresh the models if we haven't done it before
this.anthropicModelsRefreshed = await this.providerService?.refreshAnthropicModels(false, this.anthropicModelsRefreshed) || false;
});
// Embedding options event handlers
this.setupChangeHandler('.embedding-auto-update-enabled', 'embeddingAutoUpdateEnabled', false, true);
this.setupChangeHandler('.enable-automatic-indexing', 'enableAutomaticIndexing', false, true);
this.setupChangeHandler('.embedding-similarity-threshold', 'embeddingSimilarityThreshold');
this.setupChangeHandler('.max-notes-per-llm-query', 'maxNotesPerLlmQuery');
this.setupChangeHandler('.embedding-provider-precedence', 'embeddingProviderPrecedence', true);
this.setupChangeHandler('.embedding-dimension-strategy', 'embeddingDimensionStrategy');
this.setupChangeHandler('.embedding-batch-size', 'embeddingBatchSize');
this.setupChangeHandler('.embedding-update-interval', 'embeddingUpdateInterval');
// No sortable behavior needed anymore
// Embedding stats refresh button
const $refreshStats = this.$widget.find('.embedding-refresh-stats');
$refreshStats.on('click', async () => {
await this.refreshEmbeddingStats();
await this.fetchFailedEmbeddingNotes();
});
// Recreate embeddings button
const $recreateEmbeddings = this.$widget.find('.recreate-embeddings');
$recreateEmbeddings.on('click', async () => {
if (confirm(t("ai_llm.recreate_embeddings_confirm") || "Are you sure you want to recreate all embeddings? This may take a long time.")) {
try {
await server.post('llm/embeddings/reprocess');
toastService.showMessage(t("ai_llm.recreate_embeddings_started"));
// Start progress polling
this.pollIndexRebuildProgress();
} catch (e) {
console.error('Error starting embeddings regeneration:', e);
toastService.showError(t("ai_llm.recreate_embeddings_error"));
}
}
});
// Rebuild index button
const $rebuildIndex = this.$widget.find('.rebuild-embeddings-index');
$rebuildIndex.on('click', async () => {
try {
await server.post('llm/embeddings/rebuild-index');
toastService.showMessage(t("ai_llm.rebuild_index_started"));
// Start progress polling
this.pollIndexRebuildProgress();
} catch (e) {
console.error('Error starting index rebuild:', e);
toastService.showError(t("ai_llm.rebuild_index_error"));
}
});
}
/**
* Display warnings for validation issues with providers
*/
async displayValidationWarnings() {
if (!this.$widget) return;
const $warningDiv = this.$widget.find('.provider-validation-warning');
// Check if AI is enabled
const aiEnabled = this.$widget.find('.ai-enabled').prop('checked');
if (!aiEnabled) {
$warningDiv.hide();
return;
}
// Get provider precedence
const providerPrecedence = (this.$widget.find('.ai-provider-precedence').val() as string || '').split(',');
// Check for OpenAI configuration if it's in the precedence list
const openaiWarnings = [];
if (providerPrecedence.includes('openai')) {
const openaiApiKey = this.$widget.find('.openai-api-key').val();
if (!openaiApiKey) {
openaiWarnings.push(t("ai_llm.empty_key_warning.openai"));
}
}
// Check for Anthropic configuration if it's in the precedence list
const anthropicWarnings = [];
if (providerPrecedence.includes('anthropic')) {
const anthropicApiKey = this.$widget.find('.anthropic-api-key').val();
if (!anthropicApiKey) {
anthropicWarnings.push(t("ai_llm.empty_key_warning.anthropic"));
}
}
// Check for Voyage configuration if it's in the precedence list
const voyageWarnings = [];
if (providerPrecedence.includes('voyage')) {
const voyageApiKey = this.$widget.find('.voyage-api-key').val();
if (!voyageApiKey) {
voyageWarnings.push(t("ai_llm.empty_key_warning.voyage"));
}
}
// Check for Ollama configuration if it's in the precedence list
const ollamaWarnings = [];
if (providerPrecedence.includes('ollama')) {
const ollamaBaseUrl = this.$widget.find('.ollama-base-url').val();
if (!ollamaBaseUrl) {
ollamaWarnings.push(t("ai_llm.ollama_no_url"));
}
}
// Similar checks for embeddings
const embeddingWarnings = [];
const embeddingsEnabled = this.$widget.find('.enable-automatic-indexing').prop('checked');
if (embeddingsEnabled) {
const embeddingProviderPrecedence = (this.$widget.find('.embedding-provider-precedence').val() as string || '').split(',');
if (embeddingProviderPrecedence.includes('openai') && !this.$widget.find('.openai-api-key').val()) {
embeddingWarnings.push(t("ai_llm.empty_key_warning.openai"));
}
if (embeddingProviderPrecedence.includes('voyage') && !this.$widget.find('.voyage-api-key').val()) {
embeddingWarnings.push(t("ai_llm.empty_key_warning.voyage"));
}
if (embeddingProviderPrecedence.includes('ollama') && !this.$widget.find('.ollama-base-url').val()) {
embeddingWarnings.push(t("ai_llm.empty_key_warning.ollama"));
}
}
// Combine all warnings
const allWarnings = [
...openaiWarnings,
...anthropicWarnings,
...voyageWarnings,
...ollamaWarnings,
...embeddingWarnings
];
// Show or hide warnings
if (allWarnings.length > 0) {
const warningHtml = '<strong>' + t("ai_llm.configuration_warnings") + '</strong><ul>' +
allWarnings.map(warning => `<li>${warning}</li>`).join('') + '</ul>';
$warningDiv.html(warningHtml).show();
} else {
$warningDiv.hide();
}
}
/**
* Poll for index rebuild progress
*/
pollIndexRebuildProgress() {
if (this.indexRebuildRefreshInterval) {
clearInterval(this.indexRebuildRefreshInterval);
}
// Set up polling interval for index rebuild progress
this.indexRebuildRefreshInterval = setInterval(async () => {
await this.refreshEmbeddingStats();
}, this.STATS_REFRESH_INTERVAL);
// Stop polling after 5 minutes to avoid indefinite polling
setTimeout(() => {
if (this.indexRebuildRefreshInterval) {
clearInterval(this.indexRebuildRefreshInterval);
this.indexRebuildRefreshInterval = null;
}
}, 5 * 60 * 1000);
}
/**
* Refresh embedding statistics
*/
async refreshEmbeddingStats() {
if (!this.$widget) return;
try {
const response = await server.get<EmbeddingStats>('llm/embeddings/stats');
if (response && response.success) {
const stats = response.stats;
// Update stats display
this.$widget.find('.embedding-processed-notes').text(stats.embeddedNotesCount);
this.$widget.find('.embedding-total-notes').text(stats.totalNotesCount);
this.$widget.find('.embedding-queued-notes').text(stats.queuedNotesCount);
this.$widget.find('.embedding-failed-notes').text(stats.failedNotesCount);
if (stats.lastProcessedDate) {
const date = new Date(stats.lastProcessedDate);
this.$widget.find('.embedding-last-processed').text(date.toLocaleString());
} else {
this.$widget.find('.embedding-last-processed').text('-');
}
// Update progress bar
const $progressBar = this.$widget.find('.embedding-progress');
const progressPercent = stats.percentComplete;
$progressBar.css('width', `${progressPercent}%`);
$progressBar.attr('aria-valuenow', progressPercent.toString());
$progressBar.text(`${progressPercent}%`);
// Update status text
let statusText;
if (stats.queuedNotesCount > 0) {
statusText = t("ai_llm.agent.processing", { percentage: progressPercent });
} else if (stats.embeddedNotesCount === 0) {
statusText = t("ai_llm.not_started");
} else if (stats.embeddedNotesCount === stats.totalNotesCount) {
statusText = t("ai_llm.complete");
// Clear polling interval if processing is complete
if (this.indexRebuildRefreshInterval) {
clearInterval(this.indexRebuildRefreshInterval);
this.indexRebuildRefreshInterval = null;
}
} else {
statusText = t("ai_llm.partial", { percentage: progressPercent });
}
this.$widget.find('.embedding-status-text').text(statusText);
}
} catch (e) {
console.error('Error fetching embedding stats:', e);
}
}
/**
* Fetch failed embedding notes
*/
async fetchFailedEmbeddingNotes() {
if (!this.$widget) return;
try {
const response = await server.get<FailedEmbeddingNotes>('llm/embeddings/failed');
if (response && response.success) {
const failedNotes = response.failedNotes || [];
const $failedNotesList = this.$widget.find('.embedding-failed-notes-list');
if (failedNotes.length === 0) {
$failedNotesList.html(`<div class="alert alert-info">${t("ai_llm.no_failed_embeddings")}</div>`);
return;
}
// Create a table with failed notes
let html = `
<table class="table table-sm table-striped">
<thead>
<tr>
<th>${t("ai_llm.note_title")}</th>
<th>${t("ai_llm.error")}</th>
<th>${t("ai_llm.last_attempt")}</th>
<th>${t("ai_llm.actions")}</th>
</tr>
</thead>
<tbody>
`;
for (const note of failedNotes) {
const date = new Date(note.lastAttempt);
const isPermanent = note.isPermanent;
const noteTitle = note.title || note.noteId;
html += `
<tr data-note-id="${note.noteId}">
<td><a href="#" class="open-note">${noteTitle}</a></td>
<td>${note.error}</td>
<td>${date.toLocaleString()}</td>
<td>
<button class="btn btn-sm btn-outline-secondary retry-embedding" ${isPermanent ? 'disabled' : ''}>
${t("ai_llm.retry")}
</button>
</td>
</tr>
`;
}
html += `
</tbody>
</table>
`;
$failedNotesList.html(html);
// Add event handlers for retry buttons
$failedNotesList.find('.retry-embedding').on('click', async function() {
const noteId = $(this).closest('tr').data('note-id');
try {
await server.post('llm/embeddings/retry', { noteId });
toastService.showMessage(t("ai_llm.retry_queued"));
// Remove this row or update status
$(this).closest('tr').remove();
} catch (e) {
console.error('Error retrying embedding:', e);
toastService.showError(t("ai_llm.retry_failed"));
}
});
// Add event handlers for open note links
$failedNotesList.find('.open-note').on('click', function(e) {
e.preventDefault();
const noteId = $(this).closest('tr').data('note-id');
window.open(`#${noteId}`, '_blank');
});
}
} catch (e) {
console.error('Error fetching failed embedding notes:', e);
}
}
/**
* Helper to get display name for providers
*/
getProviderDisplayName(provider: string): string {
switch(provider) {
case 'openai': return 'OpenAI';
case 'anthropic': return 'Anthropic';
case 'ollama': return 'Ollama';
case 'voyage': return 'Voyage';
case 'local': return 'Local';
default: return provider.charAt(0).toUpperCase() + provider.slice(1);
}
}
/**
* Called when the options have been loaded from the server
*/
optionsLoaded(options: OptionMap) {
if (!this.$widget) return;
// AI Options
this.$widget.find('.ai-enabled').prop('checked', options.aiEnabled !== 'false');
this.$widget.find('.ai-temperature').val(options.aiTemperature || '0.7');
this.$widget.find('.ai-system-prompt').val(options.aiSystemPrompt || '');
this.$widget.find('.ai-provider-precedence').val(options.aiProviderPrecedence || 'openai,anthropic,ollama');
// OpenAI Section
this.$widget.find('.openai-api-key').val(options.openaiApiKey || '');
this.$widget.find('.openai-base-url').val(options.openaiBaseUrl || 'https://api.openai_llm.com/v1');
this.$widget.find('.openai-default-model').val(options.openaiDefaultModel || 'gpt-4o');
this.$widget.find('.openai-embedding-model').val(options.openaiEmbeddingModel || 'text-embedding-3-small');
// Anthropic Section
this.$widget.find('.anthropic-api-key').val(options.anthropicApiKey || '');
this.$widget.find('.anthropic-base-url').val(options.anthropicBaseUrl || 'https://api.anthropic.com');
this.$widget.find('.anthropic-default-model').val(options.anthropicDefaultModel || 'claude-3-opus-20240229');
// Voyage Section
this.$widget.find('.voyage-api-key').val(options.voyageApiKey || '');
this.$widget.find('.voyage-embedding-model').val(options.voyageEmbeddingModel || 'voyage-2');
// Ollama Section
this.$widget.find('.ollama-base-url').val(options.ollamaBaseUrl || 'http://localhost:11434');
this.$widget.find('.ollama-default-model').val(options.ollamaDefaultModel || 'llama3');
this.$widget.find('.ollama-embedding-model').val(options.ollamaEmbeddingModel || 'nomic-embed-text');
// Embedding Options
this.$widget.find('.embedding-provider-precedence').val(options.embeddingProviderPrecedence || 'openai,voyage,ollama,local');
this.$widget.find('.embedding-auto-update-enabled').prop('checked', options.embeddingAutoUpdateEnabled !== 'false');
this.$widget.find('.enable-automatic-indexing').prop('checked', options.enableAutomaticIndexing !== 'false');
this.$widget.find('.embedding-similarity-threshold').val(options.embeddingSimilarityThreshold || '0.75');
this.$widget.find('.max-notes-per-llm-query').val(options.maxNotesPerLlmQuery || '3');
this.$widget.find('.embedding-dimension-strategy').val(options.embeddingDimensionStrategy || 'auto');
this.$widget.find('.embedding-batch-size').val(options.embeddingBatchSize || '10');
this.$widget.find('.embedding-update-interval').val(options.embeddingUpdateInterval || '5000');
// Display validation warnings
this.displayValidationWarnings();
}
cleanup() {
// Clear intervals
if (this.statsRefreshInterval) {
clearInterval(this.statsRefreshInterval);
this.statsRefreshInterval = null;
}
if (this.indexRebuildRefreshInterval) {
clearInterval(this.indexRebuildRefreshInterval);
this.indexRebuildRefreshInterval = null;
}
}
}

View File

@ -0,0 +1,2 @@
import AiSettingsWidget from './ai_settings_widget.js';
export default AiSettingsWidget;

View File

@ -0,0 +1,69 @@
// Interface for the Ollama model response
export interface OllamaModelResponse {
success: boolean;
models: Array<{
name: string;
model: string;
details?: {
family?: string;
parameter_size?: string;
}
}>;
}
// Interface for embedding statistics
export interface EmbeddingStats {
success: boolean;
stats: {
totalNotesCount: number;
embeddedNotesCount: number;
queuedNotesCount: number;
failedNotesCount: number;
lastProcessedDate: string | null;
percentComplete: number;
}
}
// Interface for failed embedding notes
export interface FailedEmbeddingNotes {
success: boolean;
failedNotes: Array<{
noteId: string;
title?: string;
operation: string;
attempts: number;
lastAttempt: string;
error: string;
failureType: string;
chunks: number;
isPermanent: boolean;
}>;
}
export interface OpenAIModelResponse {
success: boolean;
chatModels: Array<{
id: string;
name: string;
type: string;
}>;
embeddingModels: Array<{
id: string;
name: string;
type: string;
}>;
}
export interface AnthropicModelResponse {
success: boolean;
chatModels: Array<{
id: string;
name: string;
type: string;
}>;
embeddingModels: Array<{
id: string;
name: string;
type: string;
}>;
}

View File

@ -0,0 +1,318 @@
import server from "../../../../services/server.js";
import toastService from "../../../../services/toast.js";
import { t } from "../../../../services/i18n.js";
import options from "../../../../services/options.js";
import type { OpenAIModelResponse, AnthropicModelResponse, OllamaModelResponse } from "./interfaces.js";
export class ProviderService {
constructor(private $widget: JQuery<HTMLElement>) {
// Initialize Voyage models (since they don't have a dynamic refresh yet)
this.initializeVoyageModels();
}
/**
* Initialize Voyage models with default values and ensure proper selection
*/
private initializeVoyageModels() {
setTimeout(() => {
const $voyageModelSelect = this.$widget.find('.voyage-embedding-model');
if ($voyageModelSelect.length > 0) {
const currentValue = $voyageModelSelect.val();
this.ensureSelectedValue($voyageModelSelect, currentValue, 'voyageEmbeddingModel');
}
}, 100); // Small delay to ensure the widget is fully initialized
}
/**
* Ensures the dropdown has the correct value set, prioritizing:
* 1. Current UI value if present
* 2. Value from database options if available
* 3. Falling back to first option if neither is available
*/
private ensureSelectedValue($select: JQuery<HTMLElement>, currentValue: string | number | string[] | undefined | null, optionName: string) {
if (currentValue) {
$select.val(currentValue);
// If the value doesn't exist anymore, select the first option
if (!$select.val()) {
$select.prop('selectedIndex', 0);
}
} else {
// If no current value exists in the dropdown but there's a default in the database
const savedModel = options.get(optionName);
if (savedModel) {
$select.val(savedModel);
// If the saved model isn't in the dropdown, select the first option
if (!$select.val()) {
$select.prop('selectedIndex', 0);
}
}
}
}
/**
* Refreshes the list of OpenAI models
* @param showLoading Whether to show loading indicators and toasts
* @param openaiModelsRefreshed Reference to track if models have been refreshed
* @returns Promise that resolves when the refresh is complete
*/
async refreshOpenAIModels(showLoading: boolean, openaiModelsRefreshed: boolean): Promise<boolean> {
if (!this.$widget) return false;
const $refreshOpenAIModels = this.$widget.find('.refresh-openai-models');
// If we've already refreshed and we're not forcing a refresh, don't do it again
if (openaiModelsRefreshed && !showLoading) {
return openaiModelsRefreshed;
}
if (showLoading) {
$refreshOpenAIModels.prop('disabled', true);
$refreshOpenAIModels.html(`<i class="spinner-border spinner-border-sm"></i>`);
}
try {
const openaiBaseUrl = this.$widget.find('.openai-base-url').val() as string;
const response = await server.get<OpenAIModelResponse>(`llm/providers/openai/models?baseUrl=${encodeURIComponent(openaiBaseUrl)}`);
if (response && response.success) {
// Update the chat models dropdown
if (response.chatModels?.length > 0) {
const $chatModelSelect = this.$widget.find('.openai-default-model');
const currentChatValue = $chatModelSelect.val();
// Clear existing options
$chatModelSelect.empty();
// Sort models by name
const sortedChatModels = [...response.chatModels].sort((a, b) => a.name.localeCompare(b.name));
// Add models to the dropdown
sortedChatModels.forEach(model => {
$chatModelSelect.append(`<option value="${model.id}">${model.name}</option>`);
});
// Try to restore the previously selected value
this.ensureSelectedValue($chatModelSelect, currentChatValue, 'openaiDefaultModel');
}
// Update the embedding models dropdown
if (response.embeddingModels?.length > 0) {
const $embedModelSelect = this.$widget.find('.openai-embedding-model');
const currentEmbedValue = $embedModelSelect.val();
// Clear existing options
$embedModelSelect.empty();
// Sort models by name
const sortedEmbedModels = [...response.embeddingModels].sort((a, b) => a.name.localeCompare(b.name));
// Add models to the dropdown
sortedEmbedModels.forEach(model => {
$embedModelSelect.append(`<option value="${model.id}">${model.name}</option>`);
});
// Try to restore the previously selected value
this.ensureSelectedValue($embedModelSelect, currentEmbedValue, 'openaiEmbeddingModel');
}
if (showLoading) {
// Show success message
const totalModels = (response.chatModels?.length || 0) + (response.embeddingModels?.length || 0);
toastService.showMessage(`${totalModels} OpenAI models found.`);
}
return true;
} else if (showLoading) {
toastService.showError(`No OpenAI models found. Please check your API key and settings.`);
}
return openaiModelsRefreshed;
} catch (e) {
console.error(`Error fetching OpenAI models:`, e);
if (showLoading) {
toastService.showError(`Error fetching OpenAI models: ${e}`);
}
return openaiModelsRefreshed;
} finally {
if (showLoading) {
$refreshOpenAIModels.prop('disabled', false);
$refreshOpenAIModels.html(`<span class="bx bx-refresh"></span>`);
}
}
}
/**
* Refreshes the list of Anthropic models
* @param showLoading Whether to show loading indicators and toasts
* @param anthropicModelsRefreshed Reference to track if models have been refreshed
* @returns Promise that resolves when the refresh is complete
*/
async refreshAnthropicModels(showLoading: boolean, anthropicModelsRefreshed: boolean): Promise<boolean> {
if (!this.$widget) return false;
const $refreshAnthropicModels = this.$widget.find('.refresh-anthropic-models');
// If we've already refreshed and we're not forcing a refresh, don't do it again
if (anthropicModelsRefreshed && !showLoading) {
return anthropicModelsRefreshed;
}
if (showLoading) {
$refreshAnthropicModels.prop('disabled', true);
$refreshAnthropicModels.html(`<i class="spinner-border spinner-border-sm"></i>`);
}
try {
const anthropicBaseUrl = this.$widget.find('.anthropic-base-url').val() as string;
const response = await server.get<AnthropicModelResponse>(`llm/providers/anthropic/models?baseUrl=${encodeURIComponent(anthropicBaseUrl)}`);
if (response && response.success) {
// Update the chat models dropdown
if (response.chatModels?.length > 0) {
const $chatModelSelect = this.$widget.find('.anthropic-default-model');
const currentChatValue = $chatModelSelect.val();
// Clear existing options
$chatModelSelect.empty();
// Sort models by name
const sortedChatModels = [...response.chatModels].sort((a, b) => a.name.localeCompare(b.name));
// Add models to the dropdown
sortedChatModels.forEach(model => {
$chatModelSelect.append(`<option value="${model.id}">${model.name}</option>`);
});
// Try to restore the previously selected value
this.ensureSelectedValue($chatModelSelect, currentChatValue, 'anthropicDefaultModel');
}
// Handle embedding models if they exist
if (response.embeddingModels?.length > 0 && showLoading) {
toastService.showMessage(`Found ${response.embeddingModels.length} Anthropic embedding models.`);
}
if (showLoading) {
// Show success message
const totalModels = (response.chatModels?.length || 0) + (response.embeddingModels?.length || 0);
toastService.showMessage(`${totalModels} Anthropic models found.`);
}
return true;
} else if (showLoading) {
toastService.showError(`No Anthropic models found. Please check your API key and settings.`);
}
return anthropicModelsRefreshed;
} catch (e) {
console.error(`Error fetching Anthropic models:`, e);
if (showLoading) {
toastService.showError(`Error fetching Anthropic models: ${e}`);
}
return anthropicModelsRefreshed;
} finally {
if (showLoading) {
$refreshAnthropicModels.prop('disabled', false);
$refreshAnthropicModels.html(`<span class="bx bx-refresh"></span>`);
}
}
}
/**
* Refreshes the list of Ollama models
* @param showLoading Whether to show loading indicators and toasts
* @param ollamaModelsRefreshed Reference to track if models have been refreshed
* @returns Promise that resolves when the refresh is complete
*/
async refreshOllamaModels(showLoading: boolean, ollamaModelsRefreshed: boolean): Promise<boolean> {
if (!this.$widget) return false;
const $refreshModels = this.$widget.find('.refresh-models');
// If we've already refreshed and we're not forcing a refresh, don't do it again
if (ollamaModelsRefreshed && !showLoading) {
return ollamaModelsRefreshed;
}
if (showLoading) {
$refreshModels.prop('disabled', true);
$refreshModels.text(t("ai_llm.refreshing_models"));
}
try {
const ollamaBaseUrl = this.$widget.find('.ollama-base-url').val() as string;
const response = await server.get<OllamaModelResponse>(`llm/providers/ollama/models?baseUrl=${encodeURIComponent(ollamaBaseUrl)}`);
if (response && response.success && response.models && response.models.length > 0) {
const $embedModelSelect = this.$widget.find('.ollama-embedding-model');
const currentValue = $embedModelSelect.val();
// Clear existing options
$embedModelSelect.empty();
// Add embedding-specific models first
const embeddingModels = response.models.filter(model =>
model.name.includes('embed') || model.name.includes('bert'));
embeddingModels.forEach(model => {
$embedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
});
if (embeddingModels.length > 0) {
// Add separator if we have embedding models
$embedModelSelect.append(`<option disabled>─────────────</option>`);
}
// Then add general models which can be used for embeddings too
const generalModels = response.models.filter(model =>
!model.name.includes('embed') && !model.name.includes('bert'));
generalModels.forEach(model => {
$embedModelSelect.append(`<option value="${model.name}">${model.name}</option>`);
});
// Try to restore the previously selected value
this.ensureSelectedValue($embedModelSelect, currentValue, 'ollamaEmbeddingModel');
// Also update the LLM model dropdown
const $modelSelect = this.$widget.find('.ollama-default-model');
const currentModelValue = $modelSelect.val();
// Clear existing options
$modelSelect.empty();
// Sort models by name to make them easier to find
const sortedModels = [...response.models].sort((a, b) => a.name.localeCompare(b.name));
// Add all models to the dropdown
sortedModels.forEach(model => {
$modelSelect.append(`<option value="${model.name}">${model.name}</option>`);
});
// Try to restore the previously selected value
this.ensureSelectedValue($modelSelect, currentModelValue, 'ollamaDefaultModel');
if (showLoading) {
toastService.showMessage(`${response.models.length} Ollama models found.`);
}
return true;
} else if (showLoading) {
toastService.showError(`No Ollama models found. Please check if Ollama is running.`);
}
return ollamaModelsRefreshed;
} catch (e) {
console.error(`Error fetching Ollama models:`, e);
if (showLoading) {
toastService.showError(`Error fetching Ollama models: ${e}`);
}
return ollamaModelsRefreshed;
} finally {
if (showLoading) {
$refreshModels.prop('disabled', false);
$refreshModels.html(`<span class="bx bx-refresh"></span>`);
}
}
}
}

View File

@ -0,0 +1,305 @@
import { t } from "../../../../services/i18n.js";
export const TPL = `
<div class="options-section">
<h4>${t("ai_llm.title")}</h4>
<!-- Add warning alert div -->
<div class="provider-validation-warning alert alert-warning" style="display: none;"></div>
<div class="form-group">
<label class="tn-checkbox">
<input class="ai-enabled form-check-input" type="checkbox">
${t("ai_llm.enable_ai_features")}
</label>
<div class="form-text">${t("ai_llm.enable_ai_description")}</div>
</div>
</div>
<div class="options-section">
<h4>${t("ai_llm.embedding_statistics")}</h4>
<div class="embedding-stats-container">
<div class="embedding-stats">
<div class="row">
<div class="col-md-6">
<div><strong>${t("ai_llm.processed_notes")}:</strong> <span class="embedding-processed-notes">-</span></div>
<div><strong>${t("ai_llm.total_notes")}:</strong> <span class="embedding-total-notes">-</span></div>
<div><strong>${t("ai_llm.progress")}:</strong> <span class="embedding-status-text">-</span></div>
</div>
<div class="col-md-6">
<div><strong>${t("ai_llm.queued_notes")}:</strong> <span class="embedding-queued-notes">-</span></div>
<div><strong>${t("ai_llm.failed_notes")}:</strong> <span class="embedding-failed-notes">-</span></div>
<div><strong>${t("ai_llm.last_processed")}:</strong> <span class="embedding-last-processed">-</span></div>
</div>
</div>
</div>
<div class="progress mt-1" style="height: 10px;">
<div class="progress-bar embedding-progress" role="progressbar" style="width: 0%;"
aria-valuenow="0" aria-valuemin="0" aria-valuemax="100">0%</div>
</div>
<div class="mt-2">
<button class="btn btn-sm btn-outline-secondary embedding-refresh-stats">
${t("ai_llm.refresh_stats")}
</button>
</div>
</div>
<hr/>
<!-- Failed embeddings section -->
<h5>${t("ai_llm.failed_notes")}</h4>
<div class="form-group mt-4">
<div class="embedding-failed-notes-container">
<div class="embedding-failed-notes-list">
<div class="alert alert-info">${t("ai_llm.no_failed_embeddings")}</div>
</div>
</div>
</div>
</div>
<div class="ai-providers-section options-section">
<h4>${t("ai_llm.provider_configuration")}</h4>
<div class="form-group">
<label>${t("ai_llm.provider_precedence")}</label>
<input type="text" class="ai-provider-precedence form-control" placeholder="openai,anthropic,ollama">
<div class="form-text">${t("ai_llm.provider_precedence_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.temperature")}</label>
<input class="ai-temperature form-control" type="number" min="0" max="2" step="0.1">
<div class="form-text">${t("ai_llm.temperature_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.system_prompt")}</label>
<textarea class="ai-system-prompt form-control" rows="3"></textarea>
<div class="form-text">${t("ai_llm.system_prompt_description")}</div>
</div>
</div>
<nav class="options-section-tabs">
<div class="nav nav-tabs" id="nav-tab" role="tablist">
<button class="nav-link active" id="nav-openai-tab" data-bs-toggle="tab" data-bs-target="#nav-openai" type="button" role="tab" aria-controls="nav-openai" aria-selected="true">${t("ai_llm.openai_tab")}</button>
<button class="nav-link" id="nav-anthropic-tab" data-bs-toggle="tab" data-bs-target="#nav-anthropic" type="button" role="tab" aria-controls="nav-anthropic" aria-selected="false">${t("ai_llm.anthropic_tab")}</button>
<button class="nav-link" id="nav-voyage-tab" data-bs-toggle="tab" data-bs-target="#nav-voyage" type="button" role="tab" aria-controls="nav-voyage" aria-selected="false">${t("ai_llm.voyage_tab")}</button>
<button class="nav-link" id="nav-ollama-tab" data-bs-toggle="tab" data-bs-target="#nav-ollama" type="button" role="tab" aria-controls="nav-ollama" aria-selected="false">${t("ai_llm.ollama_tab")}</button>
</div>
</nav>
<div class="options-section">
<div class="tab-content" id="nav-tabContent">
<div class="tab-pane fade show active" id="nav-openai" role="tabpanel" aria-labelledby="nav-openai-tab">
<div class="card">
<div class="card-header">
<h5>${t("ai_llm.openai_settings")}</h5>
</div>
<div class="card-body">
<div class="form-group">
<label>${t("ai_llm.api_key")}</label>
<input type="password" class="openai-api-key form-control" autocomplete="off" />
<div class="form-text">${t("ai_llm.openai_api_key_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.url")}</label>
<input type="text" class="openai-base-url form-control" />
<div class="form-text">${t("ai_llm.openai_url_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.model")}</label>
<select class="openai-default-model form-control">
<option value="gpt-4o">GPT-4o (recommended)</option>
<option value="gpt-4">GPT-4</option>
<option value="gpt-3.5-turbo">GPT-3.5 Turbo</option>
</select>
<div class="form-text">${t("ai_llm.openai_model_description")}</div>
<button class="btn btn-sm btn-outline-secondary refresh-openai-models">${t("ai_llm.refresh_models")}</button>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_model")}</label>
<select class="openai-embedding-model form-control">
<option value="text-embedding-3-small">text-embedding-3-small (recommended)</option>
<option value="text-embedding-3-large">text-embedding-3-large</option>
</select>
<div class="form-text">${t("ai_llm.openai_embedding_model_description")}</div>
</div>
</div>
</div>
</div>
<div class="tab-pane fade" id="nav-anthropic" role="tabpanel" aria-labelledby="nav-anthropic-tab">
<div class="card">
<div class="card-header">
<h5>${t("ai_llm.anthropic_settings")}</h5>
</div>
<div class="card-body">
<div class="form-group">
<label>${t("ai_llm.api_key")}</label>
<input type="password" class="anthropic-api-key form-control" autocomplete="off" />
<div class="form-text">${t("ai_llm.anthropic_api_key_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.url")}</label>
<input type="text" class="anthropic-base-url form-control" />
<div class="form-text">${t("ai_llm.anthropic_url_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.model")}</label>
<select class="anthropic-default-model form-control">
<option value="claude-3-opus-20240229">Claude 3 Opus (recommended)</option>
<option value="claude-3-sonnet-20240229">Claude 3 Sonnet</option>
<option value="claude-3-haiku-20240307">Claude 3 Haiku</option>
</select>
<div class="form-text">${t("ai_llm.anthropic_model_description")}</div>
<button class="btn btn-sm btn-outline-secondary refresh-anthropic-models">${t("ai_llm.refresh_models")}</button>
</div>
</div>
</div>
</div>
<div class="tab-pane fade" id="nav-voyage" role="tabpanel" aria-labelledby="nav-voyage-tab">
<div class="card">
<div class="card-header">
<h5>${t("ai_llm.voyage_settings")}</h5>
</div>
<div class="card-body">
<div class="form-group">
<label>${t("ai_llm.api_key")}</label>
<input type="password" class="voyage-api-key form-control" autocomplete="off" />
<div class="form-text">${t("ai_llm.voyage_api_key_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_model")}</label>
<select class="voyage-embedding-model form-control">
<option value="voyage-2">Voyage-2 (recommended)</option>
<option value="voyage-2-code">Voyage-2-Code</option>
<option value="voyage-large-2">Voyage-Large-2</option>
</select>
<div class="form-text">${t("ai_llm.voyage_embedding_model_description")}</div>
</div>
</div>
</div>
</div>
<div class="tab-pane fade" id="nav-ollama" role="tabpanel" aria-labelledby="nav-ollama-tab">
<div class="card">
<div class="card-header">
<h5>${t("ai_llm.ollama_settings")}</h5>
</div>
<div class="card-body">
<div class="form-group">
<label>${t("ai_llm.url")}</label>
<input type="text" class="ollama-base-url form-control" />
<div class="form-text">${t("ai_llm.ollama_url_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.model")}</label>
<select class="ollama-default-model form-control">
<option value="llama3">llama3 (recommended)</option>
<option value="mistral">mistral</option>
<option value="phi3">phi3</option>
</select>
<div class="form-text">${t("ai_llm.ollama_model_description")}</div>
<button class="btn btn-sm btn-outline-secondary refresh-models"><span class="bx bx-refresh"></span></button>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_model")}</label>
<select class="ollama-embedding-model form-control">
<option value="nomic-embed-text">nomic-embed-text (recommended)</option>
<option value="all-MiniLM-L6-v2">all-MiniLM-L6-v2</option>
</select>
<div class="form-text">${t("ai_llm.ollama_embedding_model_description")}</div>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="options-section">
<h4>${t("ai_llm.embeddings_configuration")}</h4>
<div class="form-group">
<label class="embedding-provider-label">${t("ai_llm.embedding_provider_precedence")}</label>
<input type="text" class="embedding-provider-precedence form-control" placeholder="openai,voyage,ollama,local">
<div class="form-text">${t("ai_llm.embedding_provider_precedence_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_dimension_strategy")}</label>
<select class="embedding-dimension-strategy form-control">
<option value="auto">${t("ai_llm.embedding_dimension_auto")}</option>
<option value="fixed-768">${t("ai_llm.embedding_dimension_fixed")} (768)</option>
<option value="fixed-1024">${t("ai_llm.embedding_dimension_fixed")} (1024)</option>
<option value="fixed-1536">${t("ai_llm.embedding_dimension_fixed")} (1536)</option>
</select>
<div class="form-text">${t("ai_llm.embedding_dimension_strategy_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_similarity_threshold")}</label>
<input class="embedding-similarity-threshold form-control" type="number" min="0" max="1" step="0.01">
<div class="form-text">${t("ai_llm.embedding_similarity_threshold_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_batch_size")}</label>
<input class="embedding-batch-size form-control" type="number" min="1" max="100" step="1">
<div class="form-text">${t("ai_llm.embedding_batch_size_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.embedding_update_interval")}</label>
<input class="embedding-update-interval form-control" type="number" min="100" max="60000" step="100">
<div class="form-text">${t("ai_llm.embedding_update_interval_description")}</div>
</div>
<div class="form-group">
<label>${t("ai_llm.max_notes_per_llm_query")}</label>
<input class="max-notes-per-llm-query form-control" type="number" min="1" max="20" step="1">
<div class="form-text">${t("ai_llm.max_notes_per_llm_query_description")}</div>
</div>
<div class="form-group">
<label class="tn-checkbox">
<input class="enable-automatic-indexing form-check-input" type="checkbox">
${t("ai_llm.enable_automatic_indexing")}
</label>
<div class="form-text">${t("ai_llm.enable_automatic_indexing_description")}</div>
</div>
<div class="form-group mt-3">
<label class="tn-checkbox">
<input class="embedding-auto-update-enabled form-check-input" type="checkbox">
${t("ai_llm.embedding_auto_update_enabled")}
</label>
<div class="form-text">${t("ai_llm.embedding_auto_update_enabled_description")}</div>
</div>
<!-- Recreate embeddings button -->
<div class="form-group mt-3">
<button class="btn btn-outline-primary recreate-embeddings">
${t("ai_llm.recreate_embeddings")}
</button>
<div class="form-text">${t("ai_llm.recreate_embeddings_description")}</div>
</div>
<!-- Rebuild index button -->
<div class="form-group mt-3">
<button class="btn btn-outline-primary rebuild-embeddings-index">
${t("ai_llm.rebuild_index")}
</button>
<div class="form-text">${t("ai_llm.rebuild_index_description")}</div>
</div>
<!-- Note about embedding provider precedence -->
<div class="form-group mt-3">
<h5>${t("ai_llm.embedding_providers_order")}</h5>
<div class="form-text mt-2">${t("ai_llm.embedding_providers_order_description")}</div>
</div>
</div>`;

View File

@ -0,0 +1,275 @@
/* LLM Chat Panel Styles */
.note-context-chat {
background-color: var(--main-background-color);
}
/* Message Styling */
.chat-message {
margin-bottom: 1rem;
}
.message-avatar {
width: 36px;
height: 36px;
border-radius: 50%;
font-size: 1.25rem;
flex-shrink: 0;
}
.user-avatar {
background-color: var(--input-background-color);
color: var(--cmd-button-icon-color);
}
.assistant-avatar {
background-color: var(--subtle-border-color, var(--main-border-color));
color: var(--hover-item-text-color);
}
.message-content {
max-width: calc(100% - 50px);
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05);
color: var(--main-text-color);
}
.user-content {
border-radius: 0.5rem 0.5rem 0 0.5rem !important;
background-color: var(--input-background-color) !important;
}
.assistant-content {
border-radius: 0.5rem 0.5rem 0.5rem 0 !important;
background-color: var(--main-background-color);
border: 1px solid var(--subtle-border-color, var(--main-border-color));
}
/* Tool Execution Styling */
.tool-execution-info {
margin-top: 0.75rem;
margin-bottom: 1.5rem;
border: 1px solid var(--subtle-border-color);
border-radius: 0.5rem;
overflow: hidden;
box-shadow: 0 1px 4px rgba(0, 0, 0, 0.05);
background-color: var(--main-background-color);
/* Add a subtle transition effect */
transition: all 0.2s ease-in-out;
}
.tool-execution-status {
background-color: var(--accented-background-color, rgba(0, 0, 0, 0.03)) !important;
border-radius: 0 !important;
padding: 0.5rem !important;
max-height: 250px !important;
overflow-y: auto;
}
.tool-execution-status .d-flex {
border-bottom: 1px solid var(--subtle-border-color);
padding-bottom: 0.5rem;
margin-bottom: 0.5rem;
}
.tool-step {
padding: 0.5rem;
margin-bottom: 0.75rem;
border-radius: 0.375rem;
background-color: var(--main-background-color);
border: 1px solid var(--subtle-border-color);
transition: background-color 0.2s ease;
}
.tool-step:hover {
background-color: rgba(0, 0, 0, 0.01);
}
.tool-step:last-child {
margin-bottom: 0;
}
/* Tool step specific styling */
.tool-step.executing {
background-color: rgba(0, 123, 255, 0.05);
border-color: rgba(0, 123, 255, 0.2);
}
.tool-step.result {
background-color: rgba(40, 167, 69, 0.05);
border-color: rgba(40, 167, 69, 0.2);
}
.tool-step.error {
background-color: rgba(220, 53, 69, 0.05);
border-color: rgba(220, 53, 69, 0.2);
}
/* Tool result formatting */
.tool-result pre {
margin: 0.5rem 0;
padding: 0.5rem;
background-color: rgba(0, 0, 0, 0.03);
border-radius: 0.25rem;
overflow: auto;
max-height: 300px;
}
.tool-result code {
font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, monospace;
font-size: 0.9em;
}
.tool-args code {
display: block;
padding: 0.5rem;
background-color: rgba(0, 0, 0, 0.03);
border-radius: 0.25rem;
margin-top: 0.25rem;
font-size: 0.85em;
color: var(--muted-text-color);
white-space: pre-wrap;
overflow: auto;
max-height: 100px;
}
/* Tool Execution in Chat Styling */
.chat-tool-execution {
padding: 0 0 0 36px; /* Aligned with message content, accounting for avatar width */
width: 100%;
margin-bottom: 1rem;
}
.tool-execution-container {
background-color: var(--accented-background-color, rgba(245, 247, 250, 0.7));
border: 1px solid var(--subtle-border-color);
border-radius: 0.375rem;
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05);
overflow: hidden;
max-width: calc(100% - 20px);
transition: all 0.3s ease;
}
.tool-execution-container.collapsed {
display: none;
}
.tool-execution-header {
background-color: var(--main-background-color);
border-bottom: 1px solid var(--subtle-border-color);
margin-bottom: 0.5rem;
color: var(--muted-text-color);
font-weight: 500;
padding: 0.6rem 0.8rem;
cursor: pointer;
transition: background-color 0.2s ease;
}
.tool-execution-header:hover {
background-color: var(--hover-item-background-color, rgba(0, 0, 0, 0.03));
}
.tool-execution-toggle {
color: var(--muted-text-color) !important;
background: transparent !important;
padding: 0.2rem 0.4rem !important;
transition: transform 0.2s ease;
}
.tool-execution-toggle:hover {
color: var(--main-text-color) !important;
}
.tool-execution-toggle i.bx-chevron-down {
transform: rotate(0deg);
transition: transform 0.3s ease;
}
.tool-execution-toggle i.bx-chevron-right {
transform: rotate(-90deg);
transition: transform 0.3s ease;
}
.tool-execution-chat-steps {
padding: 0.5rem;
max-height: 300px;
overflow-y: auto;
}
/* Make error text more visible */
.text-danger {
color: #dc3545 !important;
}
/* Sources Styling */
.sources-container {
background-color: var(--accented-background-color, var(--main-background-color));
border-top: 1px solid var(--main-border-color);
color: var(--main-text-color);
}
.source-item {
transition: all 0.2s ease;
background-color: var(--main-background-color);
border-color: var(--subtle-border-color, var(--main-border-color)) !important;
}
.source-item:hover {
background-color: var(--link-hover-background, var(--hover-item-background-color));
}
.source-link {
color: var(--link-color, var(--hover-item-text-color));
text-decoration: none;
display: block;
width: 100%;
}
.source-link:hover {
color: var(--link-hover-color, var(--hover-item-text-color));
}
/* Input Area Styling */
.note-context-chat-form {
background-color: var(--main-background-color);
border-top: 1px solid var(--main-border-color);
}
.context-option-container {
padding: 0.5rem 0;
border-bottom: 1px solid var(--subtle-border-color, var(--main-border-color));
color: var(--main-text-color);
}
.chat-input-container {
padding-top: 0.5rem;
}
.note-context-chat-input {
border-color: var(--subtle-border-color, var(--main-border-color));
background-color: var(--input-background-color) !important;
color: var(--input-text-color) !important;
resize: none;
transition: all 0.2s ease;
min-height: 50px;
max-height: 150px;
}
.note-context-chat-input:focus {
border-color: var(--input-focus-outline-color, var(--main-border-color));
box-shadow: 0 0 0 0.25rem var(--input-focus-outline-color, rgba(13, 110, 253, 0.25));
}
.note-context-chat-send-button {
width: 40px;
height: 40px;
align-self: flex-end;
background-color: var(--cmd-button-background-color) !important;
color: var(--cmd-button-text-color) !important;
}
/* Loading Indicator */
.loading-indicator {
align-items: center;
justify-content: center;
padding: 1rem;
color: var(--muted-text-color);
}

View File

@ -1804,6 +1804,187 @@ footer.file-footer button {
margin: 5px;
}
/* AI Chat Widget Styles */
.chat-widget {
display: flex;
flex-direction: column;
height: 100%;
overflow: hidden;
}
.chat-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 10px 15px;
border-bottom: 1px solid var(--main-border-color);
background-color: var(--accented-background-color);
}
.chat-title {
font-weight: bold;
flex-grow: 1;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.chat-actions {
display: flex;
gap: 5px;
}
.chat-messages {
flex-grow: 1;
overflow-y: auto;
padding: 15px;
display: flex;
flex-direction: column;
gap: 15px;
}
.chat-message {
display: flex;
gap: 10px;
max-width: 85%;
}
.chat-message-user {
align-self: flex-end;
flex-direction: row-reverse;
}
.chat-message-assistant {
align-self: flex-start;
}
.chat-message-avatar {
flex-shrink: 0;
width: 30px;
height: 30px;
border-radius: 50%;
background-color: var(--accented-background-color);
display: flex;
align-items: center;
justify-content: center;
}
.chat-message-user .chat-message-avatar {
background-color: var(--primary-color);
color: white;
}
.chat-message-assistant .chat-message-avatar {
background-color: var(--muted-text-color);
color: white;
}
.chat-message-content {
flex-grow: 1;
padding: 10px 15px;
border-radius: 12px;
background-color: var(--accented-background-color);
overflow-wrap: break-word;
word-break: break-word;
}
.chat-message-user .chat-message-content {
background-color: var(--primary-color);
color: white;
}
.chat-message-content pre {
background-color: var(--main-background-color);
border-radius: 5px;
padding: 10px;
overflow-x: auto;
margin: 10px 0;
}
.chat-message-user .chat-message-content pre {
background-color: rgba(255, 255, 255, 0.2);
}
.chat-message-content code {
font-family: monospace;
background-color: var(--main-background-color);
padding: 2px 4px;
border-radius: 3px;
}
.chat-message-user .chat-message-content code {
background-color: rgba(255, 255, 255, 0.2);
}
.chat-controls {
display: flex;
flex-direction: column;
padding: 15px;
gap: 10px;
border-top: 1px solid var(--main-border-color);
}
.chat-input-container {
position: relative;
}
.chat-input {
width: 100%;
resize: none;
padding-right: 40px;
}
.chat-buttons {
display: flex;
justify-content: space-between;
}
.chat-loading {
animation: chat-loading 1s infinite;
letter-spacing: 3px;
}
@keyframes chat-loading {
0% { opacity: 0.3; }
50% { opacity: 1; }
100% { opacity: 0.3; }
}
/* Right Pane Tab Styles */
#right-pane-tab-container {
display: flex;
gap: 10px;
}
.right-pane-tab {
padding: 5px 10px;
cursor: pointer;
border-radius: 5px;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
transition: background-color 0.2s ease;
}
.right-pane-tab:hover {
background-color: var(--hover-item-background-color);
}
.right-pane-tab.active {
background-color: var(--primary-color);
color: white;
}
.right-pane-tab .tab-title {
display: flex;
align-items: center;
gap: 5px;
}
.right-pane-tab .tab-title .bx {
font-size: 1.1em;
}
.admonition {
--accent-color: var(--card-border-color);
border: 1px solid var(--accent-color);
@ -1811,12 +1992,7 @@ footer.file-footer button {
background: var(--card-background-color);
border-radius: 0.5em;
padding: 1em;
margin: 1.25em 0;
position: relative;
padding-left: 2.5em;
overflow: hidden;
}
.admonition p:last-child {
margin-bottom: 0;
}
@ -1835,6 +2011,61 @@ footer.file-footer button {
.admonition.caution { --accent-color: #ff2e2e; }
.admonition.warning { --accent-color: #e2aa03; }
.ck-content .admonition.note::before { content: "\eb21"; }
.ck-content .admonition.tip::before { content: "\ea0d"; }
.ck-content .admonition.important::before { content: "\ea7c"; }
.ck-content .admonition.caution::before { content: "\eac7"; }
.ck-content .admonition.warning::before { content: "\eac5"; }
.chat-options-container {
display: flex;
margin: 5px 0;
align-items: center;
padding: 0 10px;
}
.chat-option {
display: flex;
align-items: center;
font-size: 0.9em;
margin-right: 15px;
cursor: pointer;
}
.chat-option input[type="checkbox"] {
margin-right: 5px;
}
/* Style for thinking process in chat responses */
.thinking-process {
background-color: rgba(0, 0, 0, 0.05);
border-left: 3px solid var(--main-text-color);
padding: 10px;
margin: 10px 0;
border-radius: 4px;
}
.thinking-step {
margin-bottom: 8px;
padding-left: 10px;
}
.thinking-step.observation {
border-left: 2px solid #69c7ff;
}
.thinking-step.hypothesis {
border-left: 2px solid #9839f7;
}
.thinking-step.evidence {
border-left: 2px solid #40c025;
}
.thinking-step.conclusion {
border-left: 2px solid #e2aa03;
font-weight: bold;
}
.admonition.note::before { content: "\eb21"; }
.admonition.tip::before { content: "\ea0d"; }
.admonition.important::before { content: "\ea7c"; }
@ -1875,4 +2106,4 @@ footer.file-footer button {
.bx-tn-toc::before {
content: "\ec24";
transform: rotate(180deg);
}
}

View File

@ -0,0 +1,122 @@
/* LLM Chat Launcher Widget Styles */
.note-context-chat {
display: flex;
flex-direction: column;
height: 100%;
width: 100%;
}
.note-context-chat-container {
flex-grow: 1;
overflow-y: auto;
padding: 15px;
}
.chat-message {
display: flex;
margin-bottom: 15px;
max-width: 85%;
}
.chat-message.user-message {
margin-left: auto;
}
.chat-message.assistant-message {
margin-right: auto;
}
.message-avatar {
width: 32px;
height: 32px;
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
margin-right: 8px;
}
.user-message .message-avatar {
background-color: var(--primary-color);
color: white;
}
.assistant-message .message-avatar {
background-color: var(--secondary-color);
color: white;
}
.message-content {
background-color: var(--more-accented-background-color);
border-radius: 12px;
padding: 10px 15px;
max-width: calc(100% - 40px);
}
.user-message .message-content {
background-color: var(--accented-background-color);
}
.message-content pre {
background-color: var(--code-background-color);
border-radius: 5px;
padding: 10px;
overflow-x: auto;
max-width: 100%;
}
.message-content code {
background-color: var(--code-background-color);
padding: 2px 4px;
border-radius: 3px;
}
.loading-indicator {
display: flex;
align-items: center;
margin: 10px 0;
color: var(--muted-text-color);
}
.sources-container {
background-color: var(--accented-background-color);
border-top: 1px solid var(--main-border-color);
padding: 8px;
}
.sources-list {
font-size: 0.9em;
}
.source-item {
padding: 4px 0;
}
.source-link {
color: var(--link-color);
text-decoration: none;
}
.source-link:hover {
text-decoration: underline;
}
.note-context-chat-form {
display: flex;
background-color: var(--main-background-color);
border-top: 1px solid var(--main-border-color);
padding: 10px;
}
.note-context-chat-input {
resize: vertical;
min-height: 44px;
max-height: 200px;
}
/* Responsive adjustments */
@media (max-width: 768px) {
.chat-message {
max-width: 95%;
}
}

View File

@ -257,6 +257,30 @@ div.note-detail-empty {
font-size: .85em;
}
nav.options-section-tabs {
min-width: var(--options-card-min-width);
max-width: var(--options-card-max-width);
margin: auto;
}
nav.options-section-tabs .nav-tabs {
border-bottom: 0;
}
nav.options-section-tabs + .options-section {
border-top-left-radius: 0;
border-top-right-radius: 0;
}
/* Appeareance */
.main-font-size-input-group,
.tree-font-size-input-group,
.detail-font-size-input-group,
.monospace-font-size-input-group {
width: fit-content;
}
/* Shortcuts */
.note-detail-content-widget-content:has(.shortcuts-options-section) {

View File

@ -1122,6 +1122,201 @@
"layout-vertical-description": "launcher bar is on the left (default)",
"layout-horizontal-description": "launcher bar is underneath the tab bar, the tab bar is now full width."
},
"ai_llm": {
"embeddings_configuration": "Embeddings Configuration",
"not_started": "Not started",
"title": "AI & Embedding Settings",
"embedding_statistics": "Embedding Statistics",
"processed_notes": "Processed Notes",
"total_notes": "Total Notes",
"progress": "Progress",
"queued_notes": "Queued Notes",
"failed_notes": "Failed Notes",
"last_processed": "Last Processed",
"refresh_stats": "Refresh Statistics",
"no_failed_embeddings": "No failed embeddings found.",
"enable_ai_features": "Enable AI/LLM features",
"enable_ai_description": "Enable AI features like note summarization, content generation, and other LLM capabilities",
"openai_tab": "OpenAI",
"anthropic_tab": "Anthropic",
"voyage_tab": "Voyage AI",
"ollama_tab": "Ollama",
"enable_ai": "Enable AI/LLM features",
"enable_ai_desc": "Enable AI features like note summarization, content generation, and other LLM capabilities",
"provider_configuration": "AI Provider Configuration",
"provider_precedence": "Provider Precedence",
"provider_precedence_description": "Comma-separated list of providers in order of precedence (e.g., 'openai,anthropic,ollama')",
"temperature": "Temperature",
"temperature_description": "Controls randomness in responses (0 = deterministic, 2 = maximum randomness)",
"system_prompt": "System Prompt",
"system_prompt_description": "Default system prompt used for all AI interactions",
"openai_configuration": "OpenAI Configuration",
"openai_settings": "OpenAI Settings",
"api_key": "API Key",
"url": "Base URL",
"model": "Model",
"openai_api_key_description": "Your OpenAI API key for accessing their AI services",
"anthropic_api_key_description": "Your Anthropic API key for accessing Claude models",
"default_model": "Default Model",
"openai_model_description": "Examples: gpt-4o, gpt-4-turbo, gpt-3.5-turbo",
"embedding_model": "Embedding Model",
"openai_embedding_model_description": "Model used for generating embeddings (text-embedding-3-small recommended)",
"base_url": "Base URL",
"openai_url_description": "Default: https://api.openai.com/v1",
"anthropic_settings": "Anthropic Settings",
"anthropic_url_description": "Base URL for the Anthropic API (default: https://api.anthropic.com)",
"anthropic_model_description": "Anthropic Claude models for chat completion",
"voyage_settings": "Voyage AI Settings",
"voyage_api_key_description": "Your Voyage AI API key for accessing embeddings services",
"ollama_settings": "Ollama Settings",
"ollama_url_description": "URL for the Ollama API (default: http://localhost:11434)",
"ollama_model_description": "Ollama model to use for chat completion",
"anthropic_configuration": "Anthropic Configuration",
"voyage_embedding_model_description": "Voyage AI embedding models for text embeddings (voyage-2 recommended)",
"voyage_configuration": "Voyage AI Configuration",
"voyage_url_description": "Default: https://api.voyageai.com/v1",
"ollama_configuration": "Ollama Configuration",
"enable_ollama": "Enable Ollama",
"enable_ollama_description": "Enable Ollama for local AI model usage",
"ollama_url": "Ollama URL",
"ollama_model": "Ollama Model",
"ollama_embedding_model": "Embedding Model",
"ollama_embedding_model_description": "Specialized model for generating embeddings (vector representations)",
"refresh_models": "Refresh Models",
"refreshing_models": "Refreshing...",
"embedding_configuration": "Embeddings Configuration",
"embedding_default_provider": "Default Provider",
"embedding_default_provider_description": "Select the default provider used for generating note embeddings",
"embedding_provider_precedence": "Embedding Provider Precedence",
"embedding_providers_order": "Embedding Provider Order",
"embedding_providers_order_description": "Set the order of embedding providers in comma-separated format (e.g., \"openai,voyage,ollama,local\")",
"enable_automatic_indexing": "Enable Automatic Indexing",
"enable_automatic_indexing_description": "Automatically generate embeddings for new and updated notes",
"embedding_auto_update_enabled": "Auto-update Embeddings",
"embedding_auto_update_enabled_description": "Automatically update embeddings when notes are modified",
"recreate_embeddings": "Recreate All Embeddings",
"recreate_embeddings_description": "Regenerate all note embeddings from scratch (may take a long time for large note collections)",
"recreate_embeddings_started": "Embeddings regeneration started. This may take a long time for large note collections.",
"recreate_embeddings_error": "Error starting embeddings regeneration. Check logs for details.",
"recreate_embeddings_confirm": "Are you sure you want to recreate all embeddings? This may take a long time for large note collections.",
"rebuild_index": "Rebuild Index",
"rebuild_index_description": "Rebuild the vector search index for better performance (much faster than recreating embeddings)",
"rebuild_index_started": "Embedding index rebuild started. This may take several minutes.",
"rebuild_index_error": "Error starting index rebuild. Check logs for details.",
"note_title": "Note Title",
"error": "Error",
"last_attempt": "Last Attempt",
"actions": "Actions",
"retry": "Retry",
"partial": "{{ percentage }}% completed",
"retry_queued": "Note queued for retry",
"retry_failed": "Failed to queue note for retry",
"embedding_provider_precedence_description": "Comma-separated list of providers in order of precedence for embeddings search (e.g., 'openai,ollama,anthropic')",
"embedding_dimension_strategy": "Embedding Dimension Strategy",
"embedding_dimension_auto": "Auto (Recommended)",
"embedding_dimension_fixed": "Fixed",
"embedding_similarity_threshold": "Similarity Threshold",
"embedding_similarity_threshold_description": "Minimum similarity score for notes to be included in search results (0-1)",
"max_notes_per_llm_query": "Max Notes Per Query",
"max_notes_per_llm_query_description": "Maximum number of similar notes to include in AI context",
"embedding_dimension_strategy_description": "Choose how embeddings are handled. 'Native' preserves maximum information by adapting smaller vectors to match larger ones (recommended). 'Regenerate' creates new embeddings with the target model for specific search needs.",
"drag_providers_to_reorder": "Drag providers up or down to set your preferred order for embedding searches",
"active_providers": "Active Providers",
"disabled_providers": "Disabled Providers",
"remove_provider": "Remove provider from search",
"restore_provider": "Restore provider to search",
"embedding_generation_location": "Generation Location",
"embedding_generation_location_description": "Select where embedding generation should happen",
"embedding_generation_location_client": "Client/Server",
"embedding_generation_location_sync_server": "Sync Server",
"enable_auto_update_embeddings": "Auto-update Embeddings",
"enable_auto_update_embeddings_description": "Automatically update embeddings when notes are modified",
"auto_update_embeddings": "Auto-update Embeddings",
"auto_update_embeddings_desc": "Automatically update embeddings when notes are modified",
"similarity_threshold": "Similarity Threshold",
"similarity_threshold_description": "Minimum similarity score (0-1) for notes to be included in context for LLM queries",
"embedding_batch_size": "Batch Size",
"embedding_batch_size_description": "Number of notes to process in a single batch (1-50)",
"embedding_update_interval": "Update Interval (ms)",
"embedding_update_interval_description": "Time between processing batches of embeddings (in milliseconds)",
"embedding_default_dimension": "Default Dimension",
"embedding_default_dimension_description": "Default embedding vector dimension when creating new embeddings",
"reprocess_all_embeddings": "Reprocess All Embeddings",
"reprocess_all_embeddings_description": "Queue all notes for embedding processing. This may take some time depending on your number of notes.",
"reprocessing_embeddings": "Reprocessing...",
"reprocess_started": "Embedding reprocessing started in the background",
"reprocess_error": "Error starting embedding reprocessing",
"reprocess_index": "Rebuild Search Index",
"reprocess_index_description": "Optimize the search index for better performance. This uses existing embeddings without regenerating them (much faster than reprocessing all embeddings).",
"reprocessing_index": "Rebuilding...",
"reprocess_index_started": "Search index optimization started in the background",
"reprocess_index_error": "Error rebuilding search index",
"index_rebuild_progress": "Index Rebuild Progress",
"index_rebuilding": "Optimizing index ({{percentage}}%)",
"index_rebuild_complete": "Index optimization complete",
"index_rebuild_status_error": "Error checking index rebuild status",
"never": "Never",
"processing": "Processing ({{percentage}}%)",
"incomplete": "Incomplete ({{percentage}}%)",
"complete": "Complete (100%)",
"refreshing": "Refreshing...",
"stats_error": "Error fetching embedding statistics",
"auto_refresh_notice": "Auto-refreshes every {{seconds}} seconds",
"note_queued_for_retry": "Note queued for retry",
"failed_to_retry_note": "Failed to retry note",
"all_notes_queued_for_retry": "All failed notes queued for retry",
"failed_to_retry_all": "Failed to retry notes",
"ai_settings": "AI Settings",
"api_key_tooltip": "API key for accessing the service",
"confirm_delete_embeddings": "Are you sure you want to delete all AI embeddings? This will remove all semantic search capabilities until notes are reindexed, which can take a significant amount of time.",
"empty_key_warning": {
"anthropic": "Anthropic API key is empty. Please enter a valid API key.",
"openai": "OpenAI API key is empty. Please enter a valid API key.",
"voyage": "Voyage API key is empty. Please enter a valid API key.",
"ollama": "Ollama API key is empty. Please enter a valid API key."
},
"agent": {
"processing": "Processing...",
"thinking": "Thinking...",
"loading": "Loading...",
"generating": "Generating..."
},
"name": "AI",
"openai": "OpenAI",
"use_enhanced_context": "Use enhanced context",
"enhanced_context_description": "Provides the AI with more context from the note and its related notes for better responses",
"show_thinking": "Show thinking",
"show_thinking_description": "Show the AI's chain of thought process",
"enter_message": "Enter your message...",
"error_contacting_provider": "Error contacting AI provider. Please check your settings and internet connection.",
"error_generating_response": "Error generating AI response",
"index_all_notes": "Index All Notes",
"index_status": "Index Status",
"indexed_notes": "Indexed Notes",
"indexing_stopped": "Indexing stopped",
"indexing_in_progress": "Indexing in progress...",
"last_indexed": "Last Indexed",
"n_notes_queued": "{{ count }} note queued for indexing",
"n_notes_queued_plural": "{{ count }} notes queued for indexing",
"note_chat": "Note Chat",
"notes_indexed": "{{ count }} note indexed",
"notes_indexed_plural": "{{ count }} notes indexed",
"reset_embeddings": "Reset Embeddings",
"sources": "Sources",
"start_indexing": "Start Indexing",
"use_advanced_context": "Use Advanced Context",
"ollama_no_url": "Ollama is not configured. Please enter a valid URL.",
"chat": {
"root_note_title": "AI Chats",
"root_note_content": "This note contains your saved AI chat conversations.",
"new_chat_title": "New Chat",
"create_new_ai_chat": "Create new AI Chat"
},
"create_new_ai_chat": "Create new AI Chat",
"configuration_warnings": "There are some issues with your AI configuration. Please check your settings."
},
"zoom_factor": {
"title": "Zoom Factor (desktop build only)",
"description": "Zooming can be controlled with CTRL+- and CTRL+= shortcuts as well."
@ -1474,6 +1669,7 @@
"confirm-change": "It is not recommended to change note type when note content is not empty. Do you want to continue anyway?",
"geo-map": "Geo Map",
"beta-feature": "Beta",
"ai-chat": "AI Chat",
"task-list": "Task List"
},
"protect_note": {

112
src/routes/api/anthropic.ts Normal file
View File

@ -0,0 +1,112 @@
import options from "../../services/options.js";
import log from "../../services/log.js";
import type { Request, Response } from "express";
import { PROVIDER_CONSTANTS } from '../../services/llm/constants/provider_constants.js';
import Anthropic from '@anthropic-ai/sdk';
// Interface for Anthropic model entries
interface AnthropicModel {
id: string;
name: string;
type: string;
}
/**
* @swagger
* /api/anthropic/models:
* post:
* summary: List available models from Anthropic
* operationId: anthropic-list-models
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* baseUrl:
* type: string
* description: Optional custom Anthropic API base URL
* responses:
* '200':
* description: List of available Anthropic models
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* chatModels:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* name:
* type: string
* type:
* type: string
* embeddingModels:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* name:
* type: string
* type:
* type: string
* '500':
* description: Error listing models
* security:
* - session: []
* tags: ["llm"]
*/
async function listModels(req: Request, res: Response) {
try {
const { baseUrl } = req.body;
// Use provided base URL or default from options
const anthropicBaseUrl = baseUrl ||
await options.getOption('anthropicBaseUrl') ||
PROVIDER_CONSTANTS.ANTHROPIC.BASE_URL;
const apiKey = await options.getOption('anthropicApiKey');
if (!apiKey) {
throw new Error('Anthropic API key is not configured');
}
log.info(`Using predefined Anthropic models list (avoiding direct API call)`);
// Instead of using the SDK's built-in models listing which might not work,
// directly use the predefined available models
const chatModels = PROVIDER_CONSTANTS.ANTHROPIC.AVAILABLE_MODELS.map(model => ({
id: model.id,
name: model.name,
type: 'chat'
}));
// Anthropic doesn't currently have embedding models
const embeddingModels: AnthropicModel[] = [];
// Return the models list
return {
success: true,
chatModels,
embeddingModels
};
} catch (error: any) {
log.error(`Error listing Anthropic models: ${error.message || 'Unknown error'}`);
// Properly throw the error to be handled by the global error handler
throw new Error(`Failed to list Anthropic models: ${error.message || 'Unknown error'}`);
}
}
export default {
listModels
};

View File

@ -0,0 +1,798 @@
import options from "../../services/options.js";
import vectorStore from "../../services/llm/embeddings/index.js";
import providerManager from "../../services/llm/providers/providers.js";
import indexService from "../../services/llm/index_service.js";
import becca from "../../becca/becca.js";
import type { Request, Response } from "express";
import log from "../../services/log.js";
import sql from "../../services/sql.js";
/**
* @swagger
* /api/llm/embeddings/similar/{noteId}:
* get:
* summary: Find similar notes based on a given note ID
* operationId: embeddings-similar-by-note
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* - name: providerId
* in: query
* required: false
* schema:
* type: string
* default: openai
* description: Embedding provider ID
* - name: modelId
* in: query
* required: false
* schema:
* type: string
* default: text-embedding-3-small
* description: Embedding model ID
* - name: limit
* in: query
* required: false
* schema:
* type: integer
* default: 10
* description: Maximum number of similar notes to return
* - name: threshold
* in: query
* required: false
* schema:
* type: number
* format: float
* default: 0.7
* description: Similarity threshold (0.0-1.0)
* responses:
* '200':
* description: List of similar notes
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* similarNotes:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* similarity:
* type: number
* format: float
* '400':
* description: Invalid request parameters
* '404':
* description: Note not found
* security:
* - session: []
* tags: ["llm"]
*/
async function findSimilarNotes(req: Request, res: Response) {
const noteId = req.params.noteId;
const providerId = req.query.providerId as string || 'openai';
const modelId = req.query.modelId as string || 'text-embedding-3-small';
const limit = parseInt(req.query.limit as string || '10', 10);
const threshold = parseFloat(req.query.threshold as string || '0.7');
if (!noteId) {
return [400, {
success: false,
message: "Note ID is required"
}];
}
const embedding = await vectorStore.getEmbeddingForNote(noteId, providerId, modelId);
if (!embedding) {
// If no embedding exists for this note yet, generate one
const note = becca.getNote(noteId);
if (!note) {
return [404, {
success: false,
message: "Note not found"
}];
}
const context = await vectorStore.getNoteEmbeddingContext(noteId);
const provider = providerManager.getEmbeddingProvider(providerId);
if (!provider) {
return [400, {
success: false,
message: `Embedding provider '${providerId}' not found`
}];
}
const newEmbedding = await provider.generateNoteEmbeddings(context);
await vectorStore.storeNoteEmbedding(noteId, providerId, modelId, newEmbedding);
const similarNotes = await vectorStore.findSimilarNotes(
newEmbedding, providerId, modelId, limit, threshold
);
return {
success: true,
similarNotes
};
}
const similarNotes = await vectorStore.findSimilarNotes(
embedding.embedding, providerId, modelId, limit, threshold
);
return {
success: true,
similarNotes
};
}
/**
* @swagger
* /api/llm/embeddings/search:
* post:
* summary: Search for notes similar to provided text
* operationId: embeddings-search-by-text
* parameters:
* - name: providerId
* in: query
* required: false
* schema:
* type: string
* default: openai
* description: Embedding provider ID
* - name: modelId
* in: query
* required: false
* schema:
* type: string
* default: text-embedding-3-small
* description: Embedding model ID
* - name: limit
* in: query
* required: false
* schema:
* type: integer
* default: 10
* description: Maximum number of similar notes to return
* - name: threshold
* in: query
* required: false
* schema:
* type: number
* format: float
* default: 0.7
* description: Similarity threshold (0.0-1.0)
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* text:
* type: string
* description: Text to search with
* responses:
* '200':
* description: List of similar notes
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* similarNotes:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* similarity:
* type: number
* format: float
* '400':
* description: Invalid request parameters
* security:
* - session: []
* tags: ["llm"]
*/
async function searchByText(req: Request, res: Response) {
const { text } = req.body;
const providerId = req.query.providerId as string || 'openai';
const modelId = req.query.modelId as string || 'text-embedding-3-small';
const limit = parseInt(req.query.limit as string || '10', 10);
const threshold = parseFloat(req.query.threshold as string || '0.7');
if (!text) {
return [400, {
success: false,
message: "Search text is required"
}];
}
const provider = providerManager.getEmbeddingProvider(providerId);
if (!provider) {
return [400, {
success: false,
message: `Embedding provider '${providerId}' not found`
}];
}
// Generate embedding for the search text
const embedding = await provider.generateEmbeddings(text);
// Find similar notes
const similarNotes = await vectorStore.findSimilarNotes(
embedding, providerId, modelId, limit, threshold
);
return {
success: true,
similarNotes
};
}
/**
* @swagger
* /api/llm/embeddings/providers:
* get:
* summary: Get available embedding providers
* operationId: embeddings-get-providers
* responses:
* '200':
* description: List of available embedding providers
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* providers:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* name:
* type: string
* isEnabled:
* type: boolean
* priority:
* type: integer
* config:
* type: object
* security:
* - session: []
* tags: ["llm"]
*/
async function getProviders(req: Request, res: Response) {
const providerConfigs = await providerManager.getEmbeddingProviderConfigs();
return {
success: true,
providers: providerConfigs
};
}
/**
* @swagger
* /api/llm/embeddings/providers/{providerId}:
* patch:
* summary: Update embedding provider configuration
* operationId: embeddings-update-provider
* parameters:
* - name: providerId
* in: path
* required: true
* schema:
* type: string
* description: Provider ID to update
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* enabled:
* type: boolean
* description: Whether provider is enabled
* priority:
* type: integer
* description: Priority order (lower is higher priority)
* config:
* type: object
* description: Provider-specific configuration
* responses:
* '200':
* description: Provider updated successfully
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* '400':
* description: Invalid provider ID or configuration
* security:
* - session: []
* tags: ["llm"]
*/
async function updateProvider(req: Request, res: Response) {
const { providerId } = req.params;
const { isEnabled, priority, config } = req.body;
const success = await providerManager.updateEmbeddingProviderConfig(
providerId, isEnabled, priority
);
if (!success) {
return [404, {
success: false,
message: "Provider not found"
}];
}
return {
success: true
};
}
/**
* @swagger
* /api/llm/embeddings/reprocess:
* post:
* summary: Reprocess embeddings for all notes
* operationId: embeddings-reprocess-all
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* providerId:
* type: string
* description: Provider ID to use for reprocessing
* modelId:
* type: string
* description: Model ID to use for reprocessing
* forceReprocess:
* type: boolean
* description: Whether to reprocess notes that already have embeddings
* responses:
* '200':
* description: Reprocessing started
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* jobId:
* type: string
* message:
* type: string
* '400':
* description: Invalid provider ID or configuration
* security:
* - session: []
* tags: ["llm"]
*/
async function reprocessAllNotes(req: Request, res: Response) {
// Import cls
const cls = (await import("../../services/cls.js")).default;
// Start the reprocessing operation in the background
setTimeout(async () => {
try {
// Wrap the operation in cls.init to ensure proper context
cls.init(async () => {
await vectorStore.reprocessAllNotes();
log.info("Embedding reprocessing completed successfully");
});
} catch (error: any) {
log.error(`Error during background embedding reprocessing: ${error.message || "Unknown error"}`);
}
}, 0);
// Return the response data
return {
success: true,
message: "Embedding reprocessing started in the background"
};
}
/**
* @swagger
* /api/llm/embeddings/queue-status:
* get:
* summary: Get status of the embedding processing queue
* operationId: embeddings-queue-status
* parameters:
* - name: jobId
* in: query
* required: false
* schema:
* type: string
* description: Optional job ID to get status for a specific processing job
* responses:
* '200':
* description: Queue status information
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* status:
* type: string
* enum: [idle, processing, paused]
* progress:
* type: number
* format: float
* description: Progress percentage (0-100)
* details:
* type: object
* security:
* - session: []
* tags: ["llm"]
*/
async function getQueueStatus(req: Request, res: Response) {
// Use the imported sql instead of requiring it
const queueCount = await sql.getValue(
"SELECT COUNT(*) FROM embedding_queue"
);
const failedCount = await sql.getValue(
"SELECT COUNT(*) FROM embedding_queue WHERE attempts > 0"
);
const totalEmbeddingsCount = await sql.getValue(
"SELECT COUNT(*) FROM note_embeddings"
);
return {
success: true,
status: {
queueCount,
failedCount,
totalEmbeddingsCount
}
};
}
/**
* @swagger
* /api/llm/embeddings/stats:
* get:
* summary: Get embedding statistics
* operationId: embeddings-stats
* responses:
* '200':
* description: Embedding statistics
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* stats:
* type: object
* properties:
* totalEmbeddings:
* type: integer
* providers:
* type: object
* modelCounts:
* type: object
* lastUpdated:
* type: string
* format: date-time
* security:
* - session: []
* tags: ["llm"]
*/
async function getEmbeddingStats(req: Request, res: Response) {
const stats = await vectorStore.getEmbeddingStats();
return {
success: true,
stats
};
}
/**
* @swagger
* /api/llm/embeddings/failed:
* get:
* summary: Get list of notes that failed embedding generation
* operationId: embeddings-failed-notes
* responses:
* '200':
* description: List of failed notes
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* failedNotes:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* error:
* type: string
* failedAt:
* type: string
* format: date-time
* security:
* - session: []
* tags: ["llm"]
*/
async function getFailedNotes(req: Request, res: Response) {
const limit = parseInt(req.query.limit as string || '100', 10);
const failedNotes = await vectorStore.getFailedEmbeddingNotes(limit);
// No need to fetch note titles here anymore as they're already included in the response
return {
success: true,
failedNotes: failedNotes
};
}
/**
* @swagger
* /api/llm/embeddings/retry/{noteId}:
* post:
* summary: Retry generating embeddings for a failed note
* operationId: embeddings-retry-note
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* description: Note ID to retry
* - name: providerId
* in: query
* required: false
* schema:
* type: string
* description: Provider ID to use (defaults to configured default)
* - name: modelId
* in: query
* required: false
* schema:
* type: string
* description: Model ID to use (defaults to provider default)
* responses:
* '200':
* description: Retry result
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* '400':
* description: Invalid request
* '404':
* description: Note not found
* security:
* - session: []
* tags: ["llm"]
*/
async function retryFailedNote(req: Request, res: Response) {
const { noteId } = req.params;
if (!noteId) {
return [400, {
success: false,
message: "Note ID is required"
}];
}
const success = await vectorStore.retryFailedEmbedding(noteId);
if (!success) {
return [404, {
success: false,
message: "Failed note not found or note is not marked as failed"
}];
}
return {
success: true,
message: "Note queued for retry"
};
}
/**
* @swagger
* /api/llm/embeddings/retry-all-failed:
* post:
* summary: Retry generating embeddings for all failed notes
* operationId: embeddings-retry-all-failed
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* providerId:
* type: string
* description: Provider ID to use (defaults to configured default)
* modelId:
* type: string
* description: Model ID to use (defaults to provider default)
* responses:
* '200':
* description: Retry started
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* jobId:
* type: string
* security:
* - session: []
* tags: ["llm"]
*/
async function retryAllFailedNotes(req: Request, res: Response) {
const count = await vectorStore.retryAllFailedEmbeddings();
return {
success: true,
message: `${count} failed notes queued for retry`
};
}
/**
* @swagger
* /api/llm/embeddings/rebuild-index:
* post:
* summary: Rebuild the vector store index
* operationId: embeddings-rebuild-index
* responses:
* '200':
* description: Rebuild started
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* message:
* type: string
* jobId:
* type: string
* security:
* - session: []
* tags: ["llm"]
*/
async function rebuildIndex(req: Request, res: Response) {
// Start the index rebuilding operation in the background
setTimeout(async () => {
try {
await indexService.startFullIndexing(true);
log.info("Index rebuilding completed successfully");
} catch (error: any) {
log.error(`Error during background index rebuilding: ${error.message || "Unknown error"}`);
}
}, 0);
// Return the response data
return {
success: true,
message: "Index rebuilding started in the background"
};
}
/**
* @swagger
* /api/llm/embeddings/index-rebuild-status:
* get:
* summary: Get status of the vector index rebuild operation
* operationId: embeddings-rebuild-status
* parameters:
* - name: jobId
* in: query
* required: false
* schema:
* type: string
* description: Optional job ID to get status for a specific rebuild job
* responses:
* '200':
* description: Rebuild status information
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* status:
* type: string
* enum: [idle, in_progress, completed, failed]
* progress:
* type: number
* format: float
* description: Progress percentage (0-100)
* message:
* type: string
* details:
* type: object
* properties:
* startTime:
* type: string
* format: date-time
* processed:
* type: integer
* total:
* type: integer
* security:
* - session: []
* tags: ["llm"]
*/
async function getIndexRebuildStatus(req: Request, res: Response) {
const status = indexService.getIndexRebuildStatus();
return {
success: true,
status
};
}
export default {
findSimilarNotes,
searchByText,
getProviders,
updateProvider,
reprocessAllNotes,
getQueueStatus,
getEmbeddingStats,
getFailedNotes,
retryFailedNote,
retryAllFailedNotes,
rebuildIndex,
getIndexRebuildStatus
};

944
src/routes/api/llm.ts Normal file
View File

@ -0,0 +1,944 @@
import type { Request, Response } from "express";
import log from "../../services/log.js";
import options from "../../services/options.js";
// Import the index service for knowledge base management
import indexService from "../../services/llm/index_service.js";
import restChatService from "../../services/llm/rest_chat_service.js";
import chatService from '../../services/llm/chat_service.js';
import chatStorageService from '../../services/llm/chat_storage_service.js';
// Define basic interfaces
interface ChatMessage {
role: 'user' | 'assistant' | 'system';
content: string;
timestamp?: Date;
}
/**
* @swagger
* /api/llm/sessions:
* post:
* summary: Create a new LLM chat session
* operationId: llm-create-session
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* title:
* type: string
* description: Title for the chat session
* systemPrompt:
* type: string
* description: System message to set the behavior of the assistant
* temperature:
* type: number
* description: Temperature parameter for the LLM (0.0-1.0)
* maxTokens:
* type: integer
* description: Maximum tokens to generate in responses
* model:
* type: string
* description: Specific model to use (depends on provider)
* provider:
* type: string
* description: LLM provider to use (e.g., 'openai', 'anthropic', 'ollama')
* contextNoteId:
* type: string
* description: Note ID to use as context for the session
* responses:
* '200':
* description: Successfully created session
* content:
* application/json:
* schema:
* type: object
* properties:
* sessionId:
* type: string
* title:
* type: string
* createdAt:
* type: string
* format: date-time
* security:
* - session: []
* tags: ["llm"]
*/
async function createSession(req: Request, res: Response) {
return restChatService.createSession(req, res);
}
/**
* @swagger
* /api/llm/sessions/{sessionId}:
* get:
* summary: Retrieve a specific chat session
* operationId: llm-get-session
* parameters:
* - name: sessionId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Chat session details
* content:
* application/json:
* schema:
* type: object
* properties:
* id:
* type: string
* title:
* type: string
* messages:
* type: array
* items:
* type: object
* properties:
* role:
* type: string
* enum: [user, assistant, system]
* content:
* type: string
* timestamp:
* type: string
* format: date-time
* createdAt:
* type: string
* format: date-time
* lastActive:
* type: string
* format: date-time
* '404':
* description: Session not found
* security:
* - session: []
* tags: ["llm"]
*/
async function getSession(req: Request, res: Response) {
return restChatService.getSession(req, res);
}
/**
* @swagger
* /api/llm/chat/{chatNoteId}:
* patch:
* summary: Update a chat's settings
* operationId: llm-update-chat
* parameters:
* - name: chatNoteId
* in: path
* required: true
* schema:
* type: string
* description: The ID of the chat note (formerly sessionId)
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* title:
* type: string
* description: Updated title for the session
* systemPrompt:
* type: string
* description: Updated system prompt
* temperature:
* type: number
* description: Updated temperature setting
* maxTokens:
* type: integer
* description: Updated maximum tokens setting
* model:
* type: string
* description: Updated model selection
* provider:
* type: string
* description: Updated provider selection
* contextNoteId:
* type: string
* description: Updated note ID for context
* responses:
* '200':
* description: Session successfully updated
* content:
* application/json:
* schema:
* type: object
* properties:
* id:
* type: string
* title:
* type: string
* updatedAt:
* type: string
* format: date-time
* '404':
* description: Session not found
* security:
* - session: []
* tags: ["llm"]
*/
async function updateSession(req: Request, res: Response) {
// Get the chat using ChatService
const chatNoteId = req.params.chatNoteId;
const updates = req.body;
try {
// Get the chat
const session = await chatService.getOrCreateSession(chatNoteId);
// Update title if provided
if (updates.title) {
await chatStorageService.updateChat(chatNoteId, session.messages, updates.title);
}
// Return the updated chat
return {
id: chatNoteId,
title: updates.title || session.title,
updatedAt: new Date()
};
} catch (error) {
log.error(`Error updating chat: ${error}`);
throw new Error(`Failed to update chat: ${error}`);
}
}
/**
* @swagger
* /api/llm/sessions:
* get:
* summary: List all chat sessions
* operationId: llm-list-sessions
* responses:
* '200':
* description: List of chat sessions
* content:
* application/json:
* schema:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* title:
* type: string
* createdAt:
* type: string
* format: date-time
* lastActive:
* type: string
* format: date-time
* messageCount:
* type: integer
* security:
* - session: []
* tags: ["llm"]
*/
async function listSessions(req: Request, res: Response) {
// Get all sessions using ChatService
try {
const sessions = await chatService.getAllSessions();
// Format the response
return {
sessions: sessions.map(session => ({
id: session.id,
title: session.title,
createdAt: new Date(), // Since we don't have this in chat sessions
lastActive: new Date(), // Since we don't have this in chat sessions
messageCount: session.messages.length
}))
};
} catch (error) {
log.error(`Error listing sessions: ${error}`);
throw new Error(`Failed to list sessions: ${error}`);
}
}
/**
* @swagger
* /api/llm/sessions/{sessionId}:
* delete:
* summary: Delete a chat session
* operationId: llm-delete-session
* parameters:
* - name: sessionId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Session successfully deleted
* '404':
* description: Session not found
* security:
* - session: []
* tags: ["llm"]
*/
async function deleteSession(req: Request, res: Response) {
return restChatService.deleteSession(req, res);
}
/**
* @swagger
* /api/llm/chat/{chatNoteId}/messages:
* post:
* summary: Send a message to an LLM and get a response
* operationId: llm-send-message
* parameters:
* - name: chatNoteId
* in: path
* required: true
* schema:
* type: string
* description: The ID of the chat note (formerly sessionId)
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* message:
* type: string
* description: The user message to send to the LLM
* options:
* type: object
* description: Optional parameters for this specific message
* properties:
* temperature:
* type: number
* maxTokens:
* type: integer
* model:
* type: string
* provider:
* type: string
* includeContext:
* type: boolean
* description: Whether to include relevant notes as context
* useNoteContext:
* type: boolean
* description: Whether to use the session's context note
* responses:
* '200':
* description: LLM response
* content:
* application/json:
* schema:
* type: object
* properties:
* response:
* type: string
* sources:
* type: array
* items:
* type: object
* properties:
* noteId:
* type: string
* title:
* type: string
* similarity:
* type: number
* sessionId:
* type: string
* '404':
* description: Session not found
* '500':
* description: Error processing request
* security:
* - session: []
* tags: ["llm"]
*/
async function sendMessage(req: Request, res: Response) {
return restChatService.handleSendMessage(req, res);
}
/**
* @swagger
* /api/llm/indexes/stats:
* get:
* summary: Get stats about the LLM knowledge base indexing status
* operationId: llm-index-stats
* responses:
* '200':
* description: Index stats successfully retrieved
* security:
* - session: []
* tags: ["llm"]
*/
async function getIndexStats(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
// Return indexing stats
const stats = await indexService.getIndexingStats();
return {
success: true,
...stats
};
} catch (error: any) {
log.error(`Error getting index stats: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to get index stats: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes:
* post:
* summary: Start or continue indexing the knowledge base
* operationId: llm-start-indexing
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* force:
* type: boolean
* description: Whether to force reindexing of all notes
* responses:
* '200':
* description: Indexing started successfully
* security:
* - session: []
* tags: ["llm"]
*/
async function startIndexing(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const { force = false } = req.body;
// Start indexing
await indexService.startFullIndexing(force);
return {
success: true,
message: "Indexing started"
};
} catch (error: any) {
log.error(`Error starting indexing: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to start indexing: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/failed:
* get:
* summary: Get list of notes that failed to index
* operationId: llm-failed-indexes
* parameters:
* - name: limit
* in: query
* required: false
* schema:
* type: integer
* default: 100
* responses:
* '200':
* description: Failed indexes successfully retrieved
* security:
* - session: []
* tags: ["llm"]
*/
async function getFailedIndexes(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const limit = parseInt(req.query.limit as string || "100", 10);
// Get failed indexes
const failed = await indexService.getFailedIndexes(limit);
return {
success: true,
failed
};
} catch (error: any) {
log.error(`Error getting failed indexes: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to get failed indexes: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/notes/{noteId}:
* put:
* summary: Retry indexing a specific note that previously failed
* operationId: llm-retry-index
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Index retry successfully initiated
* security:
* - session: []
* tags: ["llm"]
*/
async function retryFailedIndex(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const { noteId } = req.params;
// Retry indexing the note
const result = await indexService.retryFailedNote(noteId);
return {
success: true,
message: result ? "Note queued for indexing" : "Failed to queue note for indexing"
};
} catch (error: any) {
log.error(`Error retrying failed index: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to retry index: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/failed:
* put:
* summary: Retry indexing all failed notes
* operationId: llm-retry-all-indexes
* responses:
* '200':
* description: Retry of all failed indexes successfully initiated
* security:
* - session: []
* tags: ["llm"]
*/
async function retryAllFailedIndexes(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
// Retry all failed notes
const count = await indexService.retryAllFailedNotes();
return {
success: true,
message: `${count} notes queued for reprocessing`
};
} catch (error: any) {
log.error(`Error retrying all failed indexes: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to retry all indexes: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/notes/similar:
* get:
* summary: Find notes similar to a query string
* operationId: llm-find-similar-notes
* parameters:
* - name: query
* in: query
* required: true
* schema:
* type: string
* - name: contextNoteId
* in: query
* required: false
* schema:
* type: string
* - name: limit
* in: query
* required: false
* schema:
* type: integer
* default: 5
* responses:
* '200':
* description: Similar notes found successfully
* security:
* - session: []
* tags: ["llm"]
*/
async function findSimilarNotes(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const query = req.query.query as string;
const contextNoteId = req.query.contextNoteId as string | undefined;
const limit = parseInt(req.query.limit as string || "5", 10);
if (!query) {
return {
success: false,
message: "Query is required"
};
}
// Find similar notes
const similar = await indexService.findSimilarNotes(query, contextNoteId, limit);
return {
success: true,
similar
};
} catch (error: any) {
log.error(`Error finding similar notes: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to find similar notes: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/context:
* get:
* summary: Generate context for an LLM query based on the knowledge base
* operationId: llm-generate-context
* parameters:
* - name: query
* in: query
* required: true
* schema:
* type: string
* - name: contextNoteId
* in: query
* required: false
* schema:
* type: string
* - name: depth
* in: query
* required: false
* schema:
* type: integer
* default: 2
* responses:
* '200':
* description: Context generated successfully
* security:
* - session: []
* tags: ["llm"]
*/
async function generateQueryContext(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const query = req.query.query as string;
const contextNoteId = req.query.contextNoteId as string | undefined;
const depth = parseInt(req.query.depth as string || "2", 10);
if (!query) {
return {
success: false,
message: "Query is required"
};
}
// Generate context
const context = await indexService.generateQueryContext(query, contextNoteId, depth);
return {
success: true,
context
};
} catch (error: any) {
log.error(`Error generating query context: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to generate query context: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/indexes/notes/{noteId}:
* post:
* summary: Index a specific note for LLM knowledge base
* operationId: llm-index-note
* parameters:
* - name: noteId
* in: path
* required: true
* schema:
* type: string
* responses:
* '200':
* description: Note indexed successfully
* security:
* - session: []
* tags: ["llm"]
*/
async function indexNote(req: Request, res: Response) {
try {
// Check if AI is enabled
const aiEnabled = await options.getOptionBool('aiEnabled');
if (!aiEnabled) {
return {
success: false,
message: "AI features are disabled"
};
}
const { noteId } = req.params;
if (!noteId) {
return {
success: false,
message: "Note ID is required"
};
}
// Index the note
const result = await indexService.generateNoteIndex(noteId);
return {
success: true,
message: result ? "Note indexed successfully" : "Failed to index note"
};
} catch (error: any) {
log.error(`Error indexing note: ${error.message || 'Unknown error'}`);
throw new Error(`Failed to index note: ${error.message || 'Unknown error'}`);
}
}
/**
* @swagger
* /api/llm/chat/{chatNoteId}/messages/stream:
* post:
* summary: Stream a message to an LLM via WebSocket
* operationId: llm-stream-message
* parameters:
* - name: chatNoteId
* in: path
* required: true
* schema:
* type: string
* description: The ID of the chat note to stream messages to (formerly sessionId)
* requestBody:
* required: true
* content:
* application/json:
* schema:
* type: object
* properties:
* content:
* type: string
* description: The user message to send to the LLM
* useAdvancedContext:
* type: boolean
* description: Whether to use advanced context extraction
* showThinking:
* type: boolean
* description: Whether to show thinking process in the response
* responses:
* '200':
* description: Streaming started successfully
* '404':
* description: Session not found
* '500':
* description: Error processing request
* security:
* - session: []
* tags: ["llm"]
*/
async function streamMessage(req: Request, res: Response) {
log.info("=== Starting streamMessage ===");
try {
const chatNoteId = req.params.chatNoteId;
const { content, useAdvancedContext, showThinking } = req.body;
if (!content || typeof content !== 'string' || content.trim().length === 0) {
throw new Error('Content cannot be empty');
}
// Check if session exists
const session = restChatService.getSessions().get(chatNoteId);
if (!session) {
throw new Error('Chat not found');
}
// Update last active timestamp
session.lastActive = new Date();
// Add user message to the session
session.messages.push({
role: 'user',
content,
timestamp: new Date()
});
// Create request parameters for the pipeline
const requestParams = {
chatNoteId: chatNoteId,
content,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true,
stream: true // Always stream for this endpoint
};
// Create a fake request/response pair to pass to the handler
const fakeReq = {
...req,
method: 'GET', // Set to GET to indicate streaming
query: {
stream: 'true', // Set stream param - don't use format: 'stream' to avoid confusion
useAdvancedContext: String(useAdvancedContext === true),
showThinking: String(showThinking === true)
},
params: {
chatNoteId: chatNoteId
},
// Make sure the original content is available to the handler
body: {
content,
useAdvancedContext: useAdvancedContext === true,
showThinking: showThinking === true
}
} as unknown as Request;
// Log to verify correct parameters
log.info(`WebSocket stream settings - useAdvancedContext=${useAdvancedContext === true}, in query=${fakeReq.query.useAdvancedContext}, in body=${fakeReq.body.useAdvancedContext}`);
// Extra safety to ensure the parameters are passed correctly
if (useAdvancedContext === true) {
log.info(`Enhanced context IS enabled for this request`);
} else {
log.info(`Enhanced context is NOT enabled for this request`);
}
// Process the request in the background
Promise.resolve().then(async () => {
try {
await restChatService.handleSendMessage(fakeReq, res);
} catch (error) {
log.error(`Background message processing error: ${error}`);
// Import the WebSocket service
const wsService = (await import('../../services/ws.js')).default;
// Define LLMStreamMessage interface
interface LLMStreamMessage {
type: 'llm-stream';
chatNoteId: string;
content?: string;
thinking?: string;
toolExecution?: any;
done?: boolean;
error?: string;
raw?: unknown;
}
// Send error to client via WebSocket
wsService.sendMessageToAllClients({
type: 'llm-stream',
chatNoteId: chatNoteId,
error: `Error processing message: ${error}`,
done: true
} as LLMStreamMessage);
}
});
// Import the WebSocket service
const wsService = (await import('../../services/ws.js')).default;
// Let the client know streaming has started via WebSocket (helps client confirm connection is working)
wsService.sendMessageToAllClients({
type: 'llm-stream',
chatNoteId: chatNoteId,
thinking: 'Initializing streaming LLM response...'
});
// Let the client know streaming has started via HTTP response
return {
success: true,
message: 'Streaming started',
chatNoteId: chatNoteId
};
} catch (error: any) {
log.error(`Error starting message stream: ${error.message}`);
throw error;
}
}
export default {
// Chat session management
createSession,
getSession,
updateSession,
listSessions,
deleteSession,
sendMessage,
streamMessage,
// Knowledge base index management
getIndexStats,
startIndexing,
getFailedIndexes,
retryFailedIndex,
retryAllFailedIndexes,
findSimilarNotes,
generateQueryContext,
indexNote
};

64
src/routes/api/ollama.ts Normal file
View File

@ -0,0 +1,64 @@
import options from "../../services/options.js";
import log from "../../services/log.js";
import type { Request, Response } from "express";
import { Ollama } from "ollama";
/**
* @swagger
* /api/llm/providers/ollama/models:
* get:
* summary: List available models from Ollama
* operationId: ollama-list-models
* parameters:
* - name: baseUrl
* in: query
* required: false
* schema:
* type: string
* description: Optional custom Ollama API base URL
* responses:
* '200':
* description: List of available Ollama models
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* models:
* type: array
* items:
* type: object
* '500':
* description: Error listing models
* security:
* - session: []
* tags: ["llm"]
*/
async function listModels(req: Request, res: Response) {
try {
const baseUrl = req.query.baseUrl as string || await options.getOption('ollamaBaseUrl') || 'http://localhost:11434';
// Create Ollama client
const ollama = new Ollama({ host: baseUrl });
// Call Ollama API to get models using the official client
const response = await ollama.list();
// Return the models list
return {
success: true,
models: response.models || []
};
} catch (error: any) {
log.error(`Error listing Ollama models: ${error.message || 'Unknown error'}`);
// Properly throw the error to be handled by the global error handler
throw new Error(`Failed to list Ollama models: ${error.message || 'Unknown error'}`);
}
}
export default {
listModels
};

127
src/routes/api/openai.ts Normal file
View File

@ -0,0 +1,127 @@
import options from "../../services/options.js";
import log from "../../services/log.js";
import type { Request, Response } from "express";
import OpenAI from "openai";
/**
* @swagger
* /api/openai/models:
* post:
* summary: List available models from OpenAI
* operationId: openai-list-models
* requestBody:
* required: false
* content:
* application/json:
* schema:
* type: object
* properties:
* baseUrl:
* type: string
* description: Optional custom OpenAI API base URL
* responses:
* '200':
* description: List of available OpenAI models
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* chatModels:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* name:
* type: string
* type:
* type: string
* embeddingModels:
* type: array
* items:
* type: object
* properties:
* id:
* type: string
* name:
* type: string
* type:
* type: string
* '500':
* description: Error listing models
* security:
* - session: []
* tags: ["llm"]
*/
async function listModels(req: Request, res: Response) {
try {
const { baseUrl } = req.body;
// Use provided base URL or default from options
const openaiBaseUrl = baseUrl || await options.getOption('openaiBaseUrl') || 'https://api.openai.com/v1';
const apiKey = await options.getOption('openaiApiKey');
if (!apiKey) {
throw new Error('OpenAI API key is not configured');
}
// Initialize OpenAI client with the API key and base URL
const openai = new OpenAI({
apiKey,
baseURL: openaiBaseUrl
});
// Call OpenAI API to get models using the SDK
const response = await openai.models.list();
// Filter and categorize models
const allModels = response.data || [];
// Separate models into chat models and embedding models
const chatModels = allModels
.filter((model) =>
// Include GPT models for chat
model.id.includes('gpt') ||
// Include Claude models via Azure OpenAI
model.id.includes('claude')
)
.map((model) => ({
id: model.id,
name: model.id,
type: 'chat'
}));
const embeddingModels = allModels
.filter((model) =>
// Only include embedding-specific models
model.id.includes('embedding') ||
model.id.includes('embed')
)
.map((model) => ({
id: model.id,
name: model.id,
type: 'embedding'
}));
// Return the models list
return {
success: true,
chatModels,
embeddingModels
};
} catch (error: any) {
log.error(`Error listing OpenAI models: ${error.message || 'Unknown error'}`);
// Properly throw the error to be handled by the global error handler
throw new Error(`Failed to list OpenAI models: ${error.message || 'Unknown error'}`);
}
}
export default {
listModels
};

View File

@ -82,7 +82,35 @@ const ALLOWED_OPTIONS = new Set<OptionNames>([
"allowedHtmlTags",
"redirectBareDomain",
"showLoginInShareTheme",
"splitEditorOrientation",
// AI/LLM integration options
"aiEnabled",
"aiTemperature",
"aiSystemPrompt",
"aiProviderPrecedence",
"openaiApiKey",
"openaiBaseUrl",
"openaiDefaultModel",
"openaiEmbeddingModel",
"anthropicApiKey",
"anthropicBaseUrl",
"anthropicDefaultModel",
"voyageApiKey",
"voyageEmbeddingModel",
"ollamaBaseUrl",
"ollamaDefaultModel",
"ollamaEmbeddingModel",
"embeddingAutoUpdateEnabled",
"embeddingDimensionStrategy",
"embeddingProviderPrecedence",
"embeddingSimilarityThreshold",
"embeddingBatchSize",
"embeddingUpdateInterval",
"enableAutomaticIndexing",
"maxNotesPerLlmQuery",
// Embedding options
"embeddingDefaultDimension",
"mfaEnabled",
"mfaMethod"
]);

View File

@ -61,6 +61,11 @@ import etapiTokensApiRoutes from "./api/etapi_tokens.js";
import relationMapApiRoute from "./api/relation-map.js";
import otherRoute from "./api/other.js";
import shareRoutes from "../share/routes.js";
import embeddingsRoute from "./api/embeddings.js";
import ollamaRoute from "./api/ollama.js";
import openaiRoute from "./api/openai.js";
import anthropicRoute from "./api/anthropic.js";
import llmRoute from "./api/llm.js";
import etapiAuthRoutes from "../etapi/auth.js";
import etapiAppInfoRoutes from "../etapi/app_info.js";
@ -387,6 +392,44 @@ function register(app: express.Application) {
etapiSpecRoute.register(router);
etapiBackupRoute.register(router);
// LLM Chat API
apiRoute(PST, "/api/llm/chat", llmRoute.createSession);
apiRoute(GET, "/api/llm/chat", llmRoute.listSessions);
apiRoute(GET, "/api/llm/chat/:sessionId", llmRoute.getSession);
apiRoute(PATCH, "/api/llm/chat/:sessionId", llmRoute.updateSession);
apiRoute(DEL, "/api/llm/chat/:chatNoteId", llmRoute.deleteSession);
apiRoute(PST, "/api/llm/chat/:chatNoteId/messages", llmRoute.sendMessage);
apiRoute(PST, "/api/llm/chat/:chatNoteId/messages/stream", llmRoute.streamMessage);
// LLM index management endpoints - reorganized for REST principles
apiRoute(GET, "/api/llm/indexes/stats", llmRoute.getIndexStats);
apiRoute(PST, "/api/llm/indexes", llmRoute.startIndexing); // Create index process
apiRoute(GET, "/api/llm/indexes/failed", llmRoute.getFailedIndexes);
apiRoute(PUT, "/api/llm/indexes/notes/:noteId", llmRoute.retryFailedIndex); // Update index for note
apiRoute(PUT, "/api/llm/indexes/failed", llmRoute.retryAllFailedIndexes); // Update all failed indexes
apiRoute(GET, "/api/llm/indexes/notes/similar", llmRoute.findSimilarNotes); // Get similar notes
apiRoute(GET, "/api/llm/indexes/context", llmRoute.generateQueryContext); // Get context
apiRoute(PST, "/api/llm/indexes/notes/:noteId", llmRoute.indexNote); // Create index for specific note
// LLM embeddings endpoints
apiRoute(GET, "/api/llm/embeddings/similar/:noteId", embeddingsRoute.findSimilarNotes);
apiRoute(PST, "/api/llm/embeddings/search", embeddingsRoute.searchByText);
apiRoute(GET, "/api/llm/embeddings/providers", embeddingsRoute.getProviders);
apiRoute(PATCH, "/api/llm/embeddings/providers/:providerId", embeddingsRoute.updateProvider);
apiRoute(PST, "/api/llm/embeddings/reprocess", embeddingsRoute.reprocessAllNotes);
apiRoute(GET, "/api/llm/embeddings/queue-status", embeddingsRoute.getQueueStatus);
apiRoute(GET, "/api/llm/embeddings/stats", embeddingsRoute.getEmbeddingStats);
apiRoute(GET, "/api/llm/embeddings/failed", embeddingsRoute.getFailedNotes);
apiRoute(PST, "/api/llm/embeddings/retry/:noteId", embeddingsRoute.retryFailedNote);
apiRoute(PST, "/api/llm/embeddings/retry-all-failed", embeddingsRoute.retryAllFailedNotes);
apiRoute(PST, "/api/llm/embeddings/rebuild-index", embeddingsRoute.rebuildIndex);
apiRoute(GET, "/api/llm/embeddings/index-rebuild-status", embeddingsRoute.getIndexRebuildStatus);
// LLM provider endpoints - moved under /api/llm/providers hierarchy
apiRoute(GET, "/api/llm/providers/ollama/models", ollamaRoute.listModels);
apiRoute(GET, "/api/llm/providers/openai/models", openaiRoute.listModels);
apiRoute(GET, "/api/llm/providers/anthropic/models", anthropicRoute.listModels);
// API Documentation
apiDocsRoute.register(app);
@ -500,8 +543,14 @@ function route(method: HttpMethod, path: string, middleware: express.Handler[],
}
function handleResponse(resultHandler: ApiResultHandler, req: express.Request, res: express.Response, result: unknown, start: number) {
const responseLength = resultHandler(req, res, result);
// Skip result handling if the response has already been handled
if ((res as any).triliumResponseHandled) {
// Just log the request without additional processing
log.request(req, res, Date.now() - start, 0);
return;
}
const responseLength = resultHandler(req, res, result);
log.request(req, res, Date.now() - start, responseLength);
}

View File

@ -3,8 +3,8 @@ import build from "./build.js";
import packageJson from "../../package.json" with { type: "json" };
import dataDir from "./data_dir.js";
const APP_DB_VERSION = 229;
const SYNC_VERSION = 34;
const APP_DB_VERSION = 230;
const SYNC_VERSION = 35;
const CLIPPER_PROTOCOL_VERSION = "1.0";
export default {

Some files were not shown because too many files have changed in this diff Show More