diff --git a/db/migrations/0230__vector_embeddings.sql b/db/migrations/0230__vector_embeddings.sql new file mode 100644 index 000000000..45f14fddf --- /dev/null +++ b/db/migrations/0230__vector_embeddings.sql @@ -0,0 +1,46 @@ +-- Add tables for vector embeddings storage and management +-- This migration adds embedding support to the main document.db database + +-- Store embeddings for notes +CREATE TABLE IF NOT EXISTS "note_embeddings" ( + "embedId" TEXT NOT NULL PRIMARY KEY, + "noteId" TEXT NOT NULL, + "providerId" TEXT NOT NULL, + "modelId" TEXT NOT NULL, + "dimension" INTEGER NOT NULL, + "embedding" BLOB NOT NULL, + "version" INTEGER NOT NULL DEFAULT 1, + "dateCreated" TEXT NOT NULL, + "utcDateCreated" TEXT NOT NULL, + "dateModified" TEXT NOT NULL, + "utcDateModified" TEXT NOT NULL +); + +CREATE INDEX "IDX_note_embeddings_noteId" ON "note_embeddings" ("noteId"); +CREATE INDEX "IDX_note_embeddings_providerId_modelId" ON "note_embeddings" ("providerId", "modelId"); + +-- Table to track which notes need embedding updates +CREATE TABLE IF NOT EXISTS "embedding_queue" ( + "noteId" TEXT NOT NULL PRIMARY KEY, + "operation" TEXT NOT NULL, -- CREATE, UPDATE, DELETE + "dateQueued" TEXT NOT NULL, + "utcDateQueued" TEXT NOT NULL, + "priority" INTEGER NOT NULL DEFAULT 0, + "attempts" INTEGER NOT NULL DEFAULT 0, + "lastAttempt" TEXT NULL, + "error" TEXT NULL, + "failed" INTEGER NOT NULL DEFAULT 0, + "isProcessing" INTEGER NOT NULL DEFAULT 0 +); + +-- Table to store embedding provider configurations +CREATE TABLE IF NOT EXISTS "embedding_providers" ( + "providerId" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "priority" INTEGER NOT NULL DEFAULT 0, + "config" TEXT NOT NULL, -- JSON config object + "dateCreated" TEXT NOT NULL, + "utcDateCreated" TEXT NOT NULL, + "dateModified" TEXT NOT NULL, + "utcDateModified" TEXT NOT NULL +); \ No newline at end of file diff --git a/db/schema.sql b/db/schema.sql index 8bf9db1e7..29b749d89 100644 --- a/db/schema.sql +++ b/db/schema.sql @@ -145,3 +145,45 @@ CREATE INDEX IDX_attachments_ownerId_role CREATE INDEX IDX_notes_blobId on notes (blobId); CREATE INDEX IDX_revisions_blobId on revisions (blobId); CREATE INDEX IDX_attachments_blobId on attachments (blobId); + +CREATE TABLE IF NOT EXISTS "note_embeddings" ( + "embedId" TEXT NOT NULL PRIMARY KEY, + "noteId" TEXT NOT NULL, + "providerId" TEXT NOT NULL, + "modelId" TEXT NOT NULL, + "dimension" INTEGER NOT NULL, + "embedding" BLOB NOT NULL, + "version" INTEGER NOT NULL DEFAULT 1, + "dateCreated" TEXT NOT NULL, + "utcDateCreated" TEXT NOT NULL, + "dateModified" TEXT NOT NULL, + "utcDateModified" TEXT NOT NULL +); + +CREATE INDEX "IDX_note_embeddings_noteId" ON "note_embeddings" ("noteId"); +CREATE INDEX "IDX_note_embeddings_providerId_modelId" ON "note_embeddings" ("providerId", "modelId"); + +CREATE TABLE IF NOT EXISTS "embedding_queue" ( + "noteId" TEXT NOT NULL PRIMARY KEY, + "operation" TEXT NOT NULL, + "dateQueued" TEXT NOT NULL, + "utcDateQueued" TEXT NOT NULL, + "priority" INTEGER NOT NULL DEFAULT 0, + "attempts" INTEGER NOT NULL DEFAULT 0, + "lastAttempt" TEXT NULL, + "error" TEXT NULL, + "failed" INTEGER NOT NULL DEFAULT 0, + "isProcessing" INTEGER NOT NULL DEFAULT 0 +); + +CREATE TABLE IF NOT EXISTS "embedding_providers" ( + "providerId" TEXT NOT NULL PRIMARY KEY, + "name" TEXT NOT NULL, + "isEnabled" INTEGER NOT NULL DEFAULT 0, + "priority" INTEGER NOT NULL DEFAULT 0, + "config" TEXT NOT NULL, + "dateCreated" TEXT NOT NULL, + "utcDateCreated" TEXT NOT NULL, + "dateModified" TEXT NOT NULL, + "utcDateModified" TEXT NOT NULL +); diff --git a/docs/Developer Guide/!!!meta.json b/docs/Developer Guide/!!!meta.json index af6ae2063..df3a5208e 100644 --- a/docs/Developer Guide/!!!meta.json +++ b/docs/Developer Guide/!!!meta.json @@ -1,6 +1,6 @@ { "formatVersion": 2, - "appVersion": "0.92.7", + "appVersion": "0.93.0", "files": [ { "isClone": false, diff --git a/docs/Release Notes/!!!meta.json b/docs/Release Notes/!!!meta.json index 9a5c041f1..7e22a4a32 100644 --- a/docs/Release Notes/!!!meta.json +++ b/docs/Release Notes/!!!meta.json @@ -1,6 +1,6 @@ { "formatVersion": 2, - "appVersion": "0.92.7", + "appVersion": "0.93.0", "files": [ { "isClone": false, diff --git a/docs/Release Notes/Release Notes/v0.93.0.md b/docs/Release Notes/Release Notes/v0.93.0.md index dcbbfc016..43425993f 100644 --- a/docs/Release Notes/Release Notes/v0.93.0.md +++ b/docs/Release Notes/Release Notes/v0.93.0.md @@ -1,5 +1,4 @@ # v0.93.0 - ## 🐞 Bugfixes * Calendar does not hide when clicking on a note by @JYC333 diff --git a/docs/User Guide/!!!meta.json b/docs/User Guide/!!!meta.json index 374334fb7..1b716c2ba 100644 --- a/docs/User Guide/!!!meta.json +++ b/docs/User Guide/!!!meta.json @@ -1,6 +1,6 @@ { "formatVersion": 2, - "appVersion": "0.92.7", + "appVersion": "0.93.0", "files": [ { "isClone": false, @@ -10598,6 +10598,369 @@ } ] }, + { + "isClone": false, + "noteId": "LMAv4Uy3Wk6J", + "notePath": [ + "pOsGYCXsbNQG", + "LMAv4Uy3Wk6J" + ], + "title": "AI", + "notePosition": 320, + "prefix": null, + "isExpanded": false, + "type": "book", + "mime": "", + "attributes": [ + { + "type": "label", + "name": "iconClass", + "value": "bx bx-bot", + "isInheritable": false, + "position": 10 + }, + { + "type": "label", + "name": "viewType", + "value": "list", + "isInheritable": false, + "position": 20 + }, + { + "type": "label", + "name": "expanded", + "value": "", + "isInheritable": false, + "position": 30 + } + ], + "attachments": [], + "dirFileName": "AI", + "children": [ + { + "isClone": false, + "noteId": "GBBMSlVSOIGP", + "notePath": [ + "pOsGYCXsbNQG", + "LMAv4Uy3Wk6J", + "GBBMSlVSOIGP" + ], + "title": "Introduction", + "notePosition": 10, + "prefix": null, + "isExpanded": false, + "type": "text", + "mime": "text/html", + "attributes": [ + { + "type": "relation", + "name": "internalLink", + "value": "vvUCN7FDkq7G", + "isInheritable": false, + "position": 10 + } + ], + "format": "markdown", + "dataFileName": "Introduction.md", + "attachments": [ + { + "attachmentId": "4UpXwA3WvbmA", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "Introduction_image.png" + }, + { + "attachmentId": "8Bn5IsE3Bv1k", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "1_Introduction_image.png" + }, + { + "attachmentId": "ABN1rFIIJ8no", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "2_Introduction_image.png" + }, + { + "attachmentId": "CK3z7sYw63XT", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "3_Introduction_image.png" + }, + { + "attachmentId": "E6Y09N2t7vyA", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "4_Introduction_image.png" + }, + { + "attachmentId": "JlIPeTtl5wlV", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "5_Introduction_image.png" + }, + { + "attachmentId": "ur4TDJeRqpUC", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "6_Introduction_image.png" + }, + { + "attachmentId": "UTH83LkQEA8u", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "7_Introduction_image.png" + }, + { + "attachmentId": "V68TCCTUdyl7", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "8_Introduction_image.png" + }, + { + "attachmentId": "YbWoNq58T9kB", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "9_Introduction_image.png" + } + ] + }, + { + "isClone": false, + "noteId": "WkM7gsEUyCXs", + "notePath": [ + "pOsGYCXsbNQG", + "LMAv4Uy3Wk6J", + "WkM7gsEUyCXs" + ], + "title": "AI Provider Information", + "notePosition": 20, + "prefix": null, + "isExpanded": false, + "type": "text", + "mime": "text/html", + "attributes": [ + { + "type": "relation", + "name": "internalLink", + "value": "7EdTxPADv95W", + "isInheritable": false, + "position": 10 + }, + { + "type": "relation", + "name": "internalLink", + "value": "ZavFigBX9AwP", + "isInheritable": false, + "position": 20 + }, + { + "type": "relation", + "name": "internalLink", + "value": "e0lkirXEiSNc", + "isInheritable": false, + "position": 30 + }, + { + "type": "label", + "name": "viewType", + "value": "list", + "isInheritable": false, + "position": 10 + } + ], + "format": "markdown", + "dataFileName": "AI Provider Information.md", + "attachments": [ + { + "attachmentId": "BNN9Vv3JEf2X", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "AI Provider Information_im.png" + }, + { + "attachmentId": "diIollN3KEbn", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "1_AI Provider Information_im.png" + } + ], + "dirFileName": "AI Provider Information", + "children": [ + { + "isClone": false, + "noteId": "7EdTxPADv95W", + "notePath": [ + "pOsGYCXsbNQG", + "LMAv4Uy3Wk6J", + "WkM7gsEUyCXs", + "7EdTxPADv95W" + ], + "title": "Ollama", + "notePosition": 10, + "prefix": null, + "isExpanded": false, + "type": "book", + "mime": "", + "attributes": [ + { + "type": "label", + "name": "viewType", + "value": "list", + "isInheritable": false, + "position": 10 + }, + { + "type": "label", + "name": "expanded", + "value": "", + "isInheritable": false, + "position": 20 + } + ], + "attachments": [], + "dirFileName": "Ollama", + "children": [ + { + "isClone": false, + "noteId": "vvUCN7FDkq7G", + "notePath": [ + "pOsGYCXsbNQG", + "LMAv4Uy3Wk6J", + "WkM7gsEUyCXs", + "7EdTxPADv95W", + "vvUCN7FDkq7G" + ], + "title": "Installing Ollama", + "notePosition": 10, + "prefix": null, + "isExpanded": false, + "type": "text", + "mime": "text/html", + "attributes": [], + "format": "markdown", + "dataFileName": "Installing Ollama.md", + "attachments": [ + { + "attachmentId": "CG9q2FfKuEsr", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "Installing Ollama_image.png" + }, + { + "attachmentId": "GEcgXxUE1IDx", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "1_Installing Ollama_image.png" + }, + { + "attachmentId": "OMGDDxjScXCl", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "2_Installing Ollama_image.png" + }, + { + "attachmentId": "Qacg7ibmEBkZ", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "3_Installing Ollama_image.png" + }, + { + "attachmentId": "vSjU929VnBm4", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "4_Installing Ollama_image.png" + }, + { + "attachmentId": "xGrxARTj79Gv", + "title": "image.png", + "role": "image", + "mime": "image/png", + "position": 10, + "dataFileName": "5_Installing Ollama_image.png" + } + ] + } + ] + }, + { + "isClone": false, + "noteId": "ZavFigBX9AwP", + "notePath": [ + "pOsGYCXsbNQG", + "LMAv4Uy3Wk6J", + "WkM7gsEUyCXs", + "ZavFigBX9AwP" + ], + "title": "OpenAI", + "notePosition": 20, + "prefix": null, + "isExpanded": false, + "type": "text", + "mime": "text/html", + "attributes": [], + "format": "markdown", + "dataFileName": "OpenAI.md", + "attachments": [] + }, + { + "isClone": false, + "noteId": "e0lkirXEiSNc", + "notePath": [ + "pOsGYCXsbNQG", + "LMAv4Uy3Wk6J", + "WkM7gsEUyCXs", + "e0lkirXEiSNc" + ], + "title": "Anthropic", + "notePosition": 30, + "prefix": null, + "isExpanded": false, + "type": "text", + "mime": "text/html", + "attributes": [], + "format": "markdown", + "dataFileName": "Anthropic.md", + "attachments": [] + } + ] + } + ] + }, { "isClone": false, "noteId": "CdNpE2pqjmI6", @@ -10606,7 +10969,7 @@ "CdNpE2pqjmI6" ], "title": "Scripting", - "notePosition": 320, + "notePosition": 330, "prefix": null, "isExpanded": false, "type": "text", diff --git a/docs/User Guide/User Guide/AI/1_AI Provider Information_im.png b/docs/User Guide/User Guide/AI/1_AI Provider Information_im.png new file mode 100644 index 000000000..80627e0b0 Binary files /dev/null and b/docs/User Guide/User Guide/AI/1_AI Provider Information_im.png differ diff --git a/docs/User Guide/User Guide/AI/1_Introduction_image.png b/docs/User Guide/User Guide/AI/1_Introduction_image.png new file mode 100644 index 000000000..c0955f2bf Binary files /dev/null and b/docs/User Guide/User Guide/AI/1_Introduction_image.png differ diff --git a/docs/User Guide/User Guide/AI/2_Introduction_image.png b/docs/User Guide/User Guide/AI/2_Introduction_image.png new file mode 100644 index 000000000..3908a5f48 Binary files /dev/null and b/docs/User Guide/User Guide/AI/2_Introduction_image.png differ diff --git a/docs/User Guide/User Guide/AI/3_Introduction_image.png b/docs/User Guide/User Guide/AI/3_Introduction_image.png new file mode 100644 index 000000000..0fe0c8186 Binary files /dev/null and b/docs/User Guide/User Guide/AI/3_Introduction_image.png differ diff --git a/docs/User Guide/User Guide/AI/4_Introduction_image.png b/docs/User Guide/User Guide/AI/4_Introduction_image.png new file mode 100644 index 000000000..ef2ad8a0f Binary files /dev/null and b/docs/User Guide/User Guide/AI/4_Introduction_image.png differ diff --git a/docs/User Guide/User Guide/AI/5_Introduction_image.png b/docs/User Guide/User Guide/AI/5_Introduction_image.png new file mode 100644 index 000000000..ede9d6aae Binary files /dev/null and b/docs/User Guide/User Guide/AI/5_Introduction_image.png differ diff --git a/docs/User Guide/User Guide/AI/6_Introduction_image.png b/docs/User Guide/User Guide/AI/6_Introduction_image.png new file mode 100644 index 000000000..e784df790 Binary files /dev/null and b/docs/User Guide/User Guide/AI/6_Introduction_image.png differ diff --git a/docs/User Guide/User Guide/AI/7_Introduction_image.png b/docs/User Guide/User Guide/AI/7_Introduction_image.png new file mode 100644 index 000000000..96d56b39c Binary files /dev/null and b/docs/User Guide/User Guide/AI/7_Introduction_image.png differ diff --git a/docs/User Guide/User Guide/AI/8_Introduction_image.png b/docs/User Guide/User Guide/AI/8_Introduction_image.png new file mode 100644 index 000000000..afb6653ca Binary files /dev/null and b/docs/User Guide/User Guide/AI/8_Introduction_image.png differ diff --git a/docs/User Guide/User Guide/AI/9_Introduction_image.png b/docs/User Guide/User Guide/AI/9_Introduction_image.png new file mode 100644 index 000000000..f50f69553 Binary files /dev/null and b/docs/User Guide/User Guide/AI/9_Introduction_image.png differ diff --git a/docs/User Guide/User Guide/AI/AI Provider Information.md b/docs/User Guide/User Guide/AI/AI Provider Information.md new file mode 100644 index 000000000..333dcd184 --- /dev/null +++ b/docs/User Guide/User Guide/AI/AI Provider Information.md @@ -0,0 +1,15 @@ +# AI Provider Information +Currently, we support the following providers: + +* Ollama +* OpenAI +* Anthropic +* Voyage AI + +To set your preferred chat model, you'll want to enter the provider's name here: + +
+ +And to set your preferred embedding provider: + +
\ No newline at end of file diff --git a/docs/User Guide/User Guide/AI/AI Provider Information/Anthropic.md b/docs/User Guide/User Guide/AI/AI Provider Information/Anthropic.md new file mode 100644 index 000000000..e69de29bb diff --git a/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/1_Installing Ollama_image.png b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/1_Installing Ollama_image.png new file mode 100644 index 000000000..821966b77 Binary files /dev/null and b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/1_Installing Ollama_image.png differ diff --git a/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/2_Installing Ollama_image.png b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/2_Installing Ollama_image.png new file mode 100644 index 000000000..7e585a751 Binary files /dev/null and b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/2_Installing Ollama_image.png differ diff --git a/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/3_Installing Ollama_image.png b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/3_Installing Ollama_image.png new file mode 100644 index 000000000..0a239f35c Binary files /dev/null and b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/3_Installing Ollama_image.png differ diff --git a/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/4_Installing Ollama_image.png b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/4_Installing Ollama_image.png new file mode 100644 index 000000000..b5c4c2930 Binary files /dev/null and b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/4_Installing Ollama_image.png differ diff --git a/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/5_Installing Ollama_image.png b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/5_Installing Ollama_image.png new file mode 100644 index 000000000..0c02db50f Binary files /dev/null and b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/5_Installing Ollama_image.png differ diff --git a/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama.md b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama.md new file mode 100644 index 000000000..650ac76e9 --- /dev/null +++ b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama.md @@ -0,0 +1,25 @@ +# Installing Ollama +[Ollama](https://ollama.com/) can be installed in a variety of ways, and even runs [within a Docker container](https://hub.docker.com/r/ollama/ollama). Ollama will be noticeably quicker when running on a GPU (Nvidia, AMD, Intel), but it can run on CPU and RAM. To install Ollama without any other prerequisites, you can follow their [installer](https://ollama.com/download): + +
+ +After their installer completes, if you're on Windows, you should see an entry in the start menu to run it: + +
+ +Also, you should have access to the `ollama` CLI via Powershell or CMD: + +
+ +After Ollama is installed, you can go ahead and `pull` the models you want to use and run. Here's a command to pull my favorite tool-compatible model and embedding model as of April 2025: + +``` +ollama pull llama3.1:8b +ollama pull mxbai-embed-large +``` + +Also, you can make sure it's running by going to [http://localhost:11434](http://localhost:11434) and you should get the following response (port 11434 being the “normal” Ollama port): + +
+ +Now that you have Ollama up and running, have a few models pulled, you're ready to go to go ahead and start using Ollama as both a chat provider, and embedding provider! \ No newline at end of file diff --git a/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama_image.png b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama_image.png new file mode 100644 index 000000000..cf5ee38d4 Binary files /dev/null and b/docs/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama_image.png differ diff --git a/docs/User Guide/User Guide/AI/AI Provider Information/OpenAI.md b/docs/User Guide/User Guide/AI/AI Provider Information/OpenAI.md new file mode 100644 index 000000000..e69de29bb diff --git a/docs/User Guide/User Guide/AI/AI Provider Information_im.png b/docs/User Guide/User Guide/AI/AI Provider Information_im.png new file mode 100644 index 000000000..aa19e949d Binary files /dev/null and b/docs/User Guide/User Guide/AI/AI Provider Information_im.png differ diff --git a/docs/User Guide/User Guide/AI/Introduction.md b/docs/User Guide/User Guide/AI/Introduction.md new file mode 100644 index 000000000..5591e0834 --- /dev/null +++ b/docs/User Guide/User Guide/AI/Introduction.md @@ -0,0 +1,89 @@ +# Introduction +
An example chat with an LLM
+ +The AI / LLM features within Trilium Notes are designed to allow you to interact with your Notes in a variety of ways, using as many of the major providers as we can support.  + +In addition to being able to send chats to LLM providers such as OpenAI, Anthropic, and Ollama - we also support agentic tool calling, and embeddings. + +The quickest way to get started is to navigate to the “AI/LLM” settings: + +
+ +Enable the feature: + +
+ +## Embeddings + +**Embeddings** are important as it allows us to have an compact AI “summary” (it's not human readable text) of each of your Notes, that we can then perform mathematical functions on (such as cosine similarity) to smartly figure out which Notes to send as context to the LLM when you're chatting, among other useful functions. + +You will then need to set up the AI “provider” that you wish to use to create the embeddings for your Notes. Currently OpenAI, Voyage AI, and Ollama are supported providers for embedding generation. + +In the following example, we're going to use our self-hosted Ollama instance to create the embeddings for our Notes. You can see additional documentation about installing your own Ollama locally in Installing Ollama. + +To see what embedding models Ollama has available, you can check out [this search](https://ollama.com/search?c=embedding)on their website, and then `pull` whichever one you want to try out. As of 4/15/25, my personal favorite is `mxbai-embed-large`. + +First, we'll need to select the Ollama provider from the tabs of providers, then we will enter in the Base URL for our Ollama. Since our Ollama is running on our local machine, our Base URL is `http://localhost:11434`. We will then hit the “refresh” button to have it fetch our models: + +
+ +When selecting the dropdown for the “Embedding Model”, embedding models should be at the top of the list, separated by regular chat models with a horizontal line, as seen below: + +
+ +After selecting an embedding model, embeddings should automatically begin to be generated by checking the embedding statistics at the top of the “AI/LLM” settings panel: + +
+ +If you don't see any embeddings being created, you will want to scroll to the bottom of the settings, and hit “Recreate All Embeddings”: + +
+ +Creating the embeddings will take some time, and will be regenerated when a Note is created, updated, or deleted (removed). + +If for some reason you choose to change your embedding provider, or the model used, you'll need to recreate all embeddings. + +## Tools + +Tools are essentially functions that we provide to the various LLM providers, and then LLMs can respond in a specific format that tells us what tool function and parameters they would like to invoke. We then execute these tools, and provide it as additional context in the Chat conversation.  + +These are the tools that currently exist, and will certainly be updated to be more effectively (and even more to be added!): + +* `search_notes` + * Semantic search +* `keyword_search` + * Keyword-based search +* `attribute_search` + * Attribute-specific search +* `search_suggestion` + * Search syntax helper +* `read_note` + * Read note content (helps the LLM read Notes) +* `create_note` + * Create a Note +* `update_note` + * Update a Note +* `manage_attributes` + * Manage attributes on a Note +* `manage_relationships` + * Manage the various relationships between Notes +* `extract_content` + * Used to smartly extract content from a Note +* `calendar_integration` + * Used to find date notes, create date notes, get the daily note, etc. + +When Tools are executed within your Chat, you'll see output like the following: + +
+ +You don't need to tell the LLM to execute a certain tool, it should “smartly” call tools and automatically execute them as needed. + +## Overview + +Now that you know about embeddings and tools, you can just go ahead and use the “Chat with Notes” button, where you can go ahead and start chatting!: + +
+ +If you don't see the “Chat with Notes” button on your side launchbar, you might need to move it from the “Available Launchers” section to the “Visible Launchers” section: + +
\ No newline at end of file diff --git a/docs/User Guide/User Guide/AI/Introduction_image.png b/docs/User Guide/User Guide/AI/Introduction_image.png new file mode 100644 index 000000000..5a8657faf Binary files /dev/null and b/docs/User Guide/User Guide/AI/Introduction_image.png differ diff --git a/package-lock.json b/package-lock.json index c21f48503..581465dc8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,6 +9,7 @@ "version": "0.93.0", "license": "AGPL-3.0-only", "dependencies": { + "@anthropic-ai/sdk": "0.39.0", "@braintree/sanitize-url": "7.1.1", "@electron/remote": "2.1.2", "@highlightjs/cdn-assets": "11.11.1", @@ -67,6 +68,8 @@ "multer": "1.4.5-lts.2", "normalize-strings": "1.1.1", "normalize.css": "8.0.1", + "ollama": "0.5.14", + "openai": "4.93.0", "rand-token": "1.0.1", "safe-compare": "1.1.4", "sanitize-filename": "1.6.3", @@ -254,6 +257,36 @@ "url": "https://github.com/sponsors/antfu" } }, + "node_modules/@anthropic-ai/sdk": { + "version": "0.39.0", + "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.39.0.tgz", + "integrity": "sha512-eMyDIPRZbt1CCLErRCi3exlAvNkBtRe+kW5vvJyef93PmNr/clstYgHhtvmkxN82nlKgzyGPCyGxrm0JQ1ZIdg==", + "license": "MIT", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7" + } + }, + "node_modules/@anthropic-ai/sdk/node_modules/@types/node": { + "version": "18.19.86", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.86.tgz", + "integrity": "sha512-fifKayi175wLyKyc5qUfyENhQ1dCNI1UNjp653d8kuYcPQN5JhX3dGuP/XmvPTg/xRBn1VTLpbmi+H/Mr7tLfQ==", + "license": "MIT", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@anthropic-ai/sdk/node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "license": "MIT" + }, "node_modules/@apidevtools/json-schema-ref-parser": { "version": "9.1.2", "resolved": "https://registry.npmjs.org/@apidevtools/json-schema-ref-parser/-/json-schema-ref-parser-9.1.2.tgz", @@ -5260,6 +5293,16 @@ "undici-types": "~6.21.0" } }, + "node_modules/@types/node-fetch": { + "version": "2.6.12", + "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.12.tgz", + "integrity": "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "form-data": "^4.0.0" + } + }, "node_modules/@types/prop-types": { "version": "15.7.14", "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.14.tgz", @@ -6238,7 +6281,6 @@ "version": "4.5.0", "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", "integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==", - "dev": true, "license": "MIT", "dependencies": { "humanize-ms": "^1.2.1" @@ -10299,7 +10341,6 @@ "version": "0.1.13", "resolved": "https://registry.npmjs.org/encoding/-/encoding-0.1.13.tgz", "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", - "dev": true, "license": "MIT", "optional": true, "dependencies": { @@ -11670,6 +11711,12 @@ "node": ">= 6" } }, + "node_modules/form-data-encoder": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", + "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==", + "license": "MIT" + }, "node_modules/form-data/node_modules/mime-db": { "version": "1.52.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", @@ -11691,6 +11738,19 @@ "node": ">= 0.6" } }, + "node_modules/formdata-node": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", + "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", + "license": "MIT", + "dependencies": { + "node-domexception": "1.0.0", + "web-streams-polyfill": "4.0.0-beta.3" + }, + "engines": { + "node": ">= 12.20" + } + }, "node_modules/formidable": { "version": "3.5.2", "resolved": "https://registry.npmjs.org/formidable/-/formidable-3.5.2.tgz", @@ -12666,7 +12726,6 @@ "version": "1.2.1", "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", - "dev": true, "license": "MIT", "dependencies": { "ms": "^2.0.0" @@ -15516,11 +15575,29 @@ "semver": "^7.3.5" } }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "engines": { + "node": ">=10.5.0" + } + }, "node_modules/node-fetch": { "version": "2.7.0", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "dev": true, "license": "MIT", "dependencies": { "whatwg-url": "^5.0.0" @@ -15541,21 +15618,18 @@ "version": "0.0.3", "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", - "dev": true, "license": "MIT" }, "node_modules/node-fetch/node_modules/webidl-conversions": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", - "dev": true, "license": "BSD-2-Clause" }, "node_modules/node-fetch/node_modules/whatwg-url": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "dev": true, "license": "MIT", "dependencies": { "tr46": "~0.0.3", @@ -15851,6 +15925,15 @@ "node": "^10.13.0 || >=12.0.0" } }, + "node_modules/ollama": { + "version": "0.5.14", + "resolved": "https://registry.npmjs.org/ollama/-/ollama-0.5.14.tgz", + "integrity": "sha512-pvOuEYa2WkkAumxzJP0RdEYHkbZ64AYyyUszXVX7ruLvk5L+EiO2G71da2GqEQ4IAk4j6eLoUbGk5arzFT1wJA==", + "license": "MIT", + "dependencies": { + "whatwg-fetch": "^3.6.20" + } + }, "node_modules/omggif": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/omggif/-/omggif-1.0.10.tgz", @@ -15910,6 +15993,51 @@ "dev": true, "license": "MIT" }, + "node_modules/openai": { + "version": "4.93.0", + "resolved": "https://registry.npmjs.org/openai/-/openai-4.93.0.tgz", + "integrity": "sha512-2kONcISbThKLfm7T9paVzg+QCE1FOZtNMMUfXyXckUAoXRRS/mTP89JSDHPMp8uM5s0bz28RISbvQjArD6mgUQ==", + "license": "Apache-2.0", + "dependencies": { + "@types/node": "^18.11.18", + "@types/node-fetch": "^2.6.4", + "abort-controller": "^3.0.0", + "agentkeepalive": "^4.2.1", + "form-data-encoder": "1.7.2", + "formdata-node": "^4.3.2", + "node-fetch": "^2.6.7" + }, + "bin": { + "openai": "bin/cli" + }, + "peerDependencies": { + "ws": "^8.18.0", + "zod": "^3.23.8" + }, + "peerDependenciesMeta": { + "ws": { + "optional": true + }, + "zod": { + "optional": true + } + } + }, + "node_modules/openai/node_modules/@types/node": { + "version": "18.19.86", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.86.tgz", + "integrity": "sha512-fifKayi175wLyKyc5qUfyENhQ1dCNI1UNjp653d8kuYcPQN5JhX3dGuP/XmvPTg/xRBn1VTLpbmi+H/Mr7tLfQ==", + "license": "MIT", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/openai/node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "license": "MIT" + }, "node_modules/openapi-types": { "version": "12.1.3", "resolved": "https://registry.npmjs.org/openapi-types/-/openapi-types-12.1.3.tgz", @@ -21056,6 +21184,15 @@ "defaults": "^1.0.3" } }, + "node_modules/web-streams-polyfill": { + "version": "4.0.0-beta.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", + "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, "node_modules/web-worker": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/web-worker/-/web-worker-1.5.0.tgz", @@ -21292,6 +21429,12 @@ "node": ">=18" } }, + "node_modules/whatwg-fetch": { + "version": "3.6.20", + "resolved": "https://registry.npmjs.org/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz", + "integrity": "sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==", + "license": "MIT" + }, "node_modules/whatwg-mimetype": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", diff --git a/package.json b/package.json index 6803b838d..d766e456d 100644 --- a/package.json +++ b/package.json @@ -69,6 +69,7 @@ "chore:generate-openapi": "tsx bin/generate-openapi.js" }, "dependencies": { + "@anthropic-ai/sdk": "0.39.0", "@braintree/sanitize-url": "7.1.1", "@electron/remote": "2.1.2", "@highlightjs/cdn-assets": "11.11.1", @@ -127,6 +128,8 @@ "multer": "1.4.5-lts.2", "normalize-strings": "1.1.1", "normalize.css": "8.0.1", + "ollama": "0.5.14", + "openai": "4.93.0", "rand-token": "1.0.1", "safe-compare": "1.1.4", "sanitize-filename": "1.6.3", diff --git a/src/app.ts b/src/app.ts index 58dfaa4ce..66007a4fd 100644 --- a/src/app.ts +++ b/src/app.ts @@ -18,6 +18,8 @@ import sql_init from "./services/sql_init.js"; import { auth } from "express-openid-connect"; import openID from "./services/open_id.js"; import { t } from "i18next"; +import eventService from "./services/events.js"; +import log from "./services/log.js"; await import("./services/handlers.js"); await import("./becca/becca_loader.js"); @@ -29,6 +31,42 @@ const scriptDir = dirname(fileURLToPath(import.meta.url)); // Initialize DB sql_init.initializeDb(); +// Listen for database initialization event +eventService.subscribe(eventService.DB_INITIALIZED, async () => { + try { + log.info("Database initialized, setting up LLM features"); + + // Initialize embedding providers + const { initializeEmbeddings } = await import("./services/llm/embeddings/init.js"); + await initializeEmbeddings(); + + // Initialize the index service for LLM functionality + const { default: indexService } = await import("./services/llm/index_service.js"); + await indexService.initialize().catch(e => console.error("Failed to initialize index service:", e)); + + log.info("LLM features initialized successfully"); + } catch (error) { + console.error("Error initializing LLM features:", error); + } +}); + +// Initialize LLM features only if database is already initialized +if (sql_init.isDbInitialized()) { + try { + // Initialize embedding providers + const { initializeEmbeddings } = await import("./services/llm/embeddings/init.js"); + await initializeEmbeddings(); + + // Initialize the index service for LLM functionality + const { default: indexService } = await import("./services/llm/index_service.js"); + await indexService.initialize().catch(e => console.error("Failed to initialize index service:", e)); + } catch (error) { + console.error("Error initializing LLM features:", error); + } +} else { + console.log("Database not initialized yet. LLM features will be initialized after setup."); +} + // view engine setup app.set("views", path.join(scriptDir, "views")); app.set("view engine", "ejs"); diff --git a/src/becca/entities/bnote_embedding.ts b/src/becca/entities/bnote_embedding.ts new file mode 100644 index 000000000..2f696ee6f --- /dev/null +++ b/src/becca/entities/bnote_embedding.ts @@ -0,0 +1,73 @@ +import AbstractBeccaEntity from "./abstract_becca_entity.js"; +import dateUtils from "../../services/date_utils.js"; +import type { NoteEmbeddingRow } from "./rows.js"; + +/** + * Entity representing a note's vector embedding for semantic search and AI features + */ +class BNoteEmbedding extends AbstractBeccaEntity { + static get entityName() { + return "note_embeddings"; + } + static get primaryKeyName() { + return "embedId"; + } + static get hashedProperties() { + return ["embedId", "noteId", "providerId", "modelId", "dimension", "version"]; + } + + embedId!: string; + noteId!: string; + providerId!: string; + modelId!: string; + dimension!: number; + embedding!: Buffer; + version!: number; + + constructor(row?: NoteEmbeddingRow) { + super(); + + if (row) { + this.updateFromRow(row); + } + } + + updateFromRow(row: NoteEmbeddingRow): void { + this.embedId = row.embedId; + this.noteId = row.noteId; + this.providerId = row.providerId; + this.modelId = row.modelId; + this.dimension = row.dimension; + this.embedding = row.embedding; + this.version = row.version; + this.dateCreated = row.dateCreated; + this.dateModified = row.dateModified; + this.utcDateCreated = row.utcDateCreated; + this.utcDateModified = row.utcDateModified; + } + + beforeSaving() { + super.beforeSaving(); + + this.dateModified = dateUtils.localNowDateTime(); + this.utcDateModified = dateUtils.utcNowDateTime(); + } + + getPojo(): NoteEmbeddingRow { + return { + embedId: this.embedId, + noteId: this.noteId, + providerId: this.providerId, + modelId: this.modelId, + dimension: this.dimension, + embedding: this.embedding, + version: this.version, + dateCreated: this.dateCreated!, + dateModified: this.dateModified!, + utcDateCreated: this.utcDateCreated, + utcDateModified: this.utcDateModified! + }; + } +} + +export default BNoteEmbedding; diff --git a/src/becca/entities/rows.ts b/src/becca/entities/rows.ts index 3730ed922..6b7d73a7e 100644 --- a/src/becca/entities/rows.ts +++ b/src/becca/entities/rows.ts @@ -139,3 +139,17 @@ export interface NoteRow { utcDateModified: string; content?: string | Buffer; } + +export interface NoteEmbeddingRow { + embedId: string; + noteId: string; + providerId: string; + modelId: string; + dimension: number; + embedding: Buffer; + version: number; + dateCreated: string; + utcDateCreated: string; + dateModified: string; + utcDateModified: string; +} diff --git a/src/becca/entity_constructor.ts b/src/becca/entity_constructor.ts index 18f7a14c7..882f62492 100644 --- a/src/becca/entity_constructor.ts +++ b/src/becca/entity_constructor.ts @@ -6,6 +6,7 @@ import BBlob from "./entities/bblob.js"; import BBranch from "./entities/bbranch.js"; import BEtapiToken from "./entities/betapi_token.js"; import BNote from "./entities/bnote.js"; +import BNoteEmbedding from "./entities/bnote_embedding.js"; import BOption from "./entities/boption.js"; import BRecentNote from "./entities/brecent_note.js"; import BRevision from "./entities/brevision.js"; @@ -19,6 +20,7 @@ const ENTITY_NAME_TO_ENTITY: Record & EntityClass> branches: BBranch, etapi_tokens: BEtapiToken, notes: BNote, + note_embeddings: BNoteEmbedding, options: BOption, recent_notes: BRecentNote, revisions: BRevision diff --git a/src/public/app/components/app_context.ts b/src/public/app/components/app_context.ts index ff1f3fa50..57811da7e 100644 --- a/src/public/app/components/app_context.ts +++ b/src/public/app/components/app_context.ts @@ -89,6 +89,8 @@ export type CommandMappings = { closeHlt: CommandData; showLaunchBarSubtree: CommandData; showRevisions: CommandData; + showLlmChat: CommandData; + createAiChat: CommandData; showOptions: CommandData & { section: string; }; diff --git a/src/public/app/components/root_command_executor.ts b/src/public/app/components/root_command_executor.ts index eb46e3139..1e16fae81 100644 --- a/src/public/app/components/root_command_executor.ts +++ b/src/public/app/components/root_command_executor.ts @@ -7,6 +7,9 @@ import protectedSessionService from "../services/protected_session.js"; import options from "../services/options.js"; import froca from "../services/froca.js"; import utils from "../services/utils.js"; +import LlmChatPanel from "../widgets/llm_chat_panel.js"; +import toastService from "../services/toast.js"; +import noteCreateService from "../services/note_create.js"; export default class RootCommandExecutor extends Component { editReadOnlyNoteCommand() { @@ -226,4 +229,35 @@ export default class RootCommandExecutor extends Component { appContext.tabManager.activateNoteContext(tab.ntxId); } } + + async createAiChatCommand() { + try { + // Create a new AI Chat note at the root level + const rootNoteId = "root"; + + const result = await noteCreateService.createNote(rootNoteId, { + title: "New AI Chat", + type: "aiChat", + content: JSON.stringify({ + messages: [], + title: "New AI Chat" + }) + }); + + if (!result.note) { + toastService.showError("Failed to create AI Chat note"); + return; + } + + await appContext.tabManager.openTabWithNoteWithHoisting(result.note.noteId, { + activate: true + }); + + toastService.showMessage("Created new AI Chat note"); + } + catch (e) { + console.error("Error creating AI Chat note:", e); + toastService.showError("Failed to create AI Chat note: " + (e as Error).message); + } + } } diff --git a/src/public/app/doc_notes/en/User Guide/!!!meta.json b/src/public/app/doc_notes/en/User Guide/!!!meta.json index d3e50116c..7bd4fbc8f 100644 --- a/src/public/app/doc_notes/en/User Guide/!!!meta.json +++ b/src/public/app/doc_notes/en/User Guide/!!!meta.json @@ -1 +1 @@ -[{"id":"_help_Otzi9La2YAUX","title":"Installation & Setup","type":"book","attributes":[{"name":"iconClass","value":"bx bx-cog","type":"label"}],"children":[{"id":"_help_poXkQfguuA0U","title":"Desktop Installation","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Desktop Installation"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_WOcw2SLH6tbX","title":"Server Installation","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_Dgg7bR3b6K9j","title":"1. Installing the server","type":"book","attributes":[{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_3tW6mORuTHnB","title":"Packaged version for Linux","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/Packaged version for Linux"},{"name":"iconClass","value":"bx bxl-tux","type":"label"}]},{"id":"_help_rWX5eY045zbE","title":"Using Docker","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/Using Docker"},{"name":"iconClass","value":"bx bxl-docker","type":"label"}]},{"id":"_help_moVgBcoxE3EK","title":"On NixOS","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/On NixOS"},{"name":"iconClass","value":"bx bxl-tux","type":"label"}]},{"id":"_help_J1Bb6lVlwU5T","title":"Manually","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/Manually"},{"name":"iconClass","value":"bx bx-code-alt","type":"label"}]},{"id":"_help_DCmT6e7clMoP","title":"Using Kubernetes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/Using Kubernetes"},{"name":"iconClass","value":"bx bxl-kubernetes","type":"label"}]},{"id":"_help_klCWNks3ReaQ","title":"Multiple server instances","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/Multiple server instances"},{"name":"iconClass","value":"bx bxs-user-account","type":"label"}]}]},{"id":"_help_vcjrb3VVYPZI","title":"2. Reverse proxy","type":"book","attributes":[{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_ud6MShXL4WpO","title":"Nginx","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/2. Reverse proxy/Nginx"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_fDLvzOx29Pfg","title":"Apache","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/2. Reverse proxy/Apache"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_l2VkvOwUNfZj","title":"TLS Configuration","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/TLS Configuration"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_cbkrhQjrkKrh","title":"Synchronization","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Synchronization"},{"name":"iconClass","value":"bx bx-sync","type":"label"}]},{"id":"_help_RDslemsQ6gCp","title":"Mobile Frontend","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Mobile Frontend"},{"name":"iconClass","value":"bx bx-mobile-alt","type":"label"}]},{"id":"_help_MtPxeAWVAzMg","title":"Web Clipper","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Web Clipper"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_n1lujUxCwipy","title":"Upgrading TriliumNext","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Upgrading TriliumNext"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_ODY7qQn5m2FT","title":"Backup","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Backup"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_tAassRL4RSQL","title":"Data directory","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Data directory"},{"name":"iconClass","value":"bx bx-folder-open","type":"label"}]}]},{"id":"_help_gh7bpGYxajRS","title":"Basic Concepts and Features","type":"book","attributes":[{"name":"iconClass","value":"bx bx-help-circle","type":"label"}],"children":[{"id":"_help_Vc8PjrjAGuOp","title":"UI Elements","type":"book","attributes":[{"name":"iconClass","value":"bx bx-window-alt","type":"label"}],"children":[{"id":"_help_x0JgW8UqGXvq","title":"Vertical and horizontal layout","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Vertical and horizontal layout"},{"name":"iconClass","value":"bx bxs-layout","type":"label"}]},{"id":"_help_x3i7MxGccDuM","title":"Global menu","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Global menu"},{"name":"iconClass","value":"bx bx-menu","type":"label"}]},{"id":"_help_oPVyFC7WL2Lp","title":"Note Tree","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Note Tree"},{"name":"iconClass","value":"bx bxs-tree-alt","type":"label"}],"children":[{"id":"_help_YtSN43OrfzaA","title":"Note tree contextual menu","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Note Tree/Note tree contextual menu"},{"name":"iconClass","value":"bx bx-menu","type":"label"}]},{"id":"_help_yTjUdsOi4CIE","title":"Multiple selection","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Note Tree/Multiple selection"},{"name":"iconClass","value":"bx bx-list-plus","type":"label"}]}]},{"id":"_help_BlN9DFI679QC","title":"Ribbon","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Ribbon"},{"name":"iconClass","value":"bx bx-dots-horizontal","type":"label"}]},{"id":"_help_3seOhtN8uLIY","title":"Tabs","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Tabs"},{"name":"iconClass","value":"bx bx-dock-top","type":"label"}]},{"id":"_help_xYmIYSP6wE3F","title":"Launch Bar","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Launch Bar"},{"name":"iconClass","value":"bx bx-sidebar","type":"label"}]},{"id":"_help_8YBEPzcpUgxw","title":"Note buttons","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Note buttons"},{"name":"iconClass","value":"bx bx-dots-vertical-rounded","type":"label"}]},{"id":"_help_4TIF1oA4VQRO","title":"Options","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Options"},{"name":"iconClass","value":"bx bx-cog","type":"label"}]},{"id":"_help_luNhaphA37EO","title":"Split View","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Split View"},{"name":"iconClass","value":"bx bx-dock-right","type":"label"}]},{"id":"_help_XpOYSgsLkTJy","title":"Floating buttons","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Floating buttons"},{"name":"iconClass","value":"bx bx-rectangle","type":"label"}]},{"id":"_help_RnaPdbciOfeq","title":"Right Sidebar","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Right Sidebar"},{"name":"iconClass","value":"bx bxs-dock-right","type":"label"}]},{"id":"_help_r5JGHN99bVKn","title":"Recent Changes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Recent Changes"},{"name":"iconClass","value":"bx bx-history","type":"label"}]},{"id":"_help_ny318J39E5Z0","title":"Zoom","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Zoom"},{"name":"iconClass","value":"bx bx-zoom-in","type":"label"}]}]},{"id":"_help_BFs8mudNFgCS","title":"Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes"},{"name":"iconClass","value":"bx bx-notepad","type":"label"}],"children":[{"id":"_help_p9kXRFAkwN4o","title":"Note Icons","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Note Icons"},{"name":"iconClass","value":"bx bxs-grid","type":"label"}]},{"id":"_help_0vhv7lsOLy82","title":"Attachments","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Attachments"},{"name":"iconClass","value":"bx bx-paperclip","type":"label"}]},{"id":"_help_IakOLONlIfGI","title":"Cloning Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Cloning Notes"},{"name":"iconClass","value":"bx bx-duplicate","type":"label"}],"children":[{"id":"_help_TBwsyfadTA18","title":"Branch prefix","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Cloning Notes/Branch prefix"},{"name":"iconClass","value":"bx bx-rename","type":"label"}]}]},{"id":"_help_bwg0e8ewQMak","title":"Protected Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Protected Notes"},{"name":"iconClass","value":"bx bx-lock-alt","type":"label"}]},{"id":"_help_MKmLg5x6xkor","title":"Archived Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Archived Notes"},{"name":"iconClass","value":"bx bx-box","type":"label"}]},{"id":"_help_vZWERwf8U3nx","title":"Note Revisions","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Note Revisions"},{"name":"iconClass","value":"bx bx-history","type":"label"}]},{"id":"_help_aGlEvb9hyDhS","title":"Sorting Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Sorting Notes"},{"name":"iconClass","value":"bx bx-sort-up","type":"label"}]},{"id":"_help_NRnIZmSMc5sj","title":"Export as PDF","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Export as PDF"},{"name":"iconClass","value":"bx bxs-file-pdf","type":"label"}]},{"id":"_help_CoFPLs3dRlXc","title":"Read-Only Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Read-Only Notes"},{"name":"iconClass","value":"bx bx-edit-alt","type":"label"}]},{"id":"_help_0ESUbbAxVnoK","title":"Note List","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Note List"},{"name":"iconClass","value":"bx bxs-grid","type":"label"}],"children":[{"id":"_help_xWbu3jpNWapp","title":"Calendar View","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Note List/Calendar View"},{"name":"iconClass","value":"bx bx-calendar","type":"label"}]}]}]},{"id":"_help_wArbEsdSae6g","title":"Navigation","type":"book","attributes":[{"name":"iconClass","value":"bx bx-navigation","type":"label"}],"children":[{"id":"_help_kBrnXNG3Hplm","title":"Tree Concepts","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Tree Concepts"},{"name":"iconClass","value":"bx bx-pyramid","type":"label"}]},{"id":"_help_MMiBEQljMQh2","title":"Note Navigation","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Note Navigation"},{"name":"iconClass","value":"bx bxs-navigation","type":"label"}]},{"id":"_help_Ms1nauBra7gq","title":"Quick search","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Quick search"},{"name":"iconClass","value":"bx bx-search-alt-2","type":"label"}]},{"id":"_help_F1r9QtzQLZqm","title":"Jump to Note","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Jump to Note"},{"name":"iconClass","value":"bx bx-send","type":"label"}]},{"id":"_help_eIg8jdvaoNNd","title":"Search","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Search"},{"name":"iconClass","value":"bx bx-search-alt-2","type":"label"}]},{"id":"_help_u3YFHC9tQlpm","title":"Bookmarks","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Bookmarks"},{"name":"iconClass","value":"bx bx-bookmarks","type":"label"}]},{"id":"_help_OR8WJ7Iz9K4U","title":"Note Hoisting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Note Hoisting"},{"name":"iconClass","value":"bx bxs-chevrons-up","type":"label"}]},{"id":"_help_9sRHySam5fXb","title":"Workspaces","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Workspaces"},{"name":"iconClass","value":"bx bx-door-open","type":"label"}]},{"id":"_help_xWtq5NUHOwql","title":"Similar Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Similar Notes"},{"name":"iconClass","value":"bx bx-bar-chart","type":"label"}]},{"id":"_help_McngOG2jbUWX","title":"Search in note","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Search in note"},{"name":"iconClass","value":"bx bx-search-alt-2","type":"label"}]}]},{"id":"_help_A9Oc6YKKc65v","title":"Keyboard Shortcuts","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Keyboard Shortcuts"},{"name":"iconClass","value":"bx bxs-keyboard","type":"label"}]},{"id":"_help_Wy267RK4M69c","title":"Themes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Themes"},{"name":"iconClass","value":"bx bx-palette","type":"label"}],"children":[{"id":"_help_VbjZvtUek0Ln","title":"Theme Gallery","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Themes/Theme Gallery"},{"name":"iconClass","value":"bx bx-book-reader","type":"label"}]}]},{"id":"_help_mHbBMPDPkVV5","title":"Import & Export","type":"book","attributes":[{"name":"iconClass","value":"bx bx-import","type":"label"}],"children":[{"id":"_help_Oau6X9rCuegd","title":"Markdown","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Import & Export/Markdown"},{"name":"iconClass","value":"bx bxl-markdown","type":"label"}]},{"id":"_help_syuSEKf2rUGr","title":"Evernote","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Import & Export/Evernote"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_GnhlmrATVqcH","title":"OneNote","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Import & Export/OneNote"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_rC3pL2aptaRE","title":"Zen mode","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Zen mode"},{"name":"iconClass","value":"bx bxs-yin-yang","type":"label"}]}]},{"id":"_help_s3YCWHBfmYuM","title":"Quick Start","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Quick Start"},{"name":"iconClass","value":"bx bx-run","type":"label"}]},{"id":"_help_i6dbnitykE5D","title":"FAQ","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/FAQ"},{"name":"iconClass","value":"bx bx-question-mark","type":"label"}]},{"id":"_help_KSZ04uQ2D1St","title":"Note Types","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types"},{"name":"iconClass","value":"bx bx-edit","type":"label"}],"children":[{"id":"_help_iPIMuisry3hd","title":"Text","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text"},{"name":"iconClass","value":"bx bx-note","type":"label"}],"children":[{"id":"_help_NwBbFdNZ9h7O","title":"Block quotes & admonitions","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Block quotes & admonitions"},{"name":"iconClass","value":"bx bx-info-circle","type":"label"}]},{"id":"_help_veGu4faJErEM","title":"Content language & Right-to-left support","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Content language & Right-to-le"},{"name":"iconClass","value":"bx bx-align-right","type":"label"}]},{"id":"_help_2x0ZAX9ePtzV","title":"Cut to subnote","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Cut to subnote"},{"name":"iconClass","value":"bx bx-cut","type":"label"}]},{"id":"_help_UYuUB1ZekNQU","title":"Developer-specific formatting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Developer-specific formatting"},{"name":"iconClass","value":"bx bx-code-alt","type":"label"}],"children":[{"id":"_help_QxEyIjRBizuC","title":"Code blocks","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Developer-specific formatting/Code blocks"},{"name":"iconClass","value":"bx bx-code","type":"label"}]}]},{"id":"_help_AgjCISero73a","title":"Footnotes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Footnotes"},{"name":"iconClass","value":"bx bx-bracket","type":"label"}]},{"id":"_help_nRhnJkTT8cPs","title":"Formatting toolbar","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Formatting toolbar"},{"name":"iconClass","value":"bx bx-text","type":"label"}]},{"id":"_help_Gr6xFaF6ioJ5","title":"General formatting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/General formatting"},{"name":"iconClass","value":"bx bx-bold","type":"label"}]},{"id":"_help_AxshuNRegLAv","title":"Highlights list","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Highlights list"},{"name":"iconClass","value":"bx bx-highlight","type":"label"}]},{"id":"_help_mT0HEkOsz6i1","title":"Images","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Images"},{"name":"iconClass","value":"bx bx-image-alt","type":"label"}],"children":[{"id":"_help_0Ofbk1aSuVRu","title":"Image references","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Images/Image references"},{"name":"iconClass","value":"bx bxs-file-image","type":"label"}]}]},{"id":"_help_nBAXQFj20hS1","title":"Include Note","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Include Note"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_CohkqWQC1iBv","title":"Insert buttons","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Insert buttons"},{"name":"iconClass","value":"bx bx-plus","type":"label"}]},{"id":"_help_oiVPnW8QfnvS","title":"Keyboard shortcuts","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Keyboard shortcuts"},{"name":"iconClass","value":"bx bxs-keyboard","type":"label"}]},{"id":"_help_QEAPj01N5f7w","title":"Links","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Links"},{"name":"iconClass","value":"bx bx-link-alt","type":"label"}]},{"id":"_help_S6Xx8QIWTV66","title":"Lists","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Lists"},{"name":"iconClass","value":"bx bx-list-ul","type":"label"}]},{"id":"_help_QrtTYPmdd1qq","title":"Markdown-like formatting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Markdown-like formatting"},{"name":"iconClass","value":"bx bxl-markdown","type":"label"}]},{"id":"_help_YfYAtQBcfo5V","title":"Math Equations","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Math Equations"},{"name":"iconClass","value":"bx bx-math","type":"label"}]},{"id":"_help_dEHYtoWWi8ct","title":"Other features","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Other features"},{"name":"iconClass","value":"bx bxs-grid","type":"label"}]},{"id":"_help_BFvAtE74rbP6","title":"Table of contents","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Table of contents"},{"name":"iconClass","value":"bx bx-heading","type":"label"}]},{"id":"_help_NdowYOC1GFKS","title":"Tables","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Tables"},{"name":"iconClass","value":"bx bx-table","type":"label"}]}]},{"id":"_help_6f9hih2hXXZk","title":"Code","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Code"},{"name":"iconClass","value":"bx bx-code","type":"label"}]},{"id":"_help_m523cpzocqaD","title":"Saved Search","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Saved Search"},{"name":"iconClass","value":"bx bx-file-find","type":"label"}]},{"id":"_help_iRwzGnHPzonm","title":"Relation Map","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Relation Map"},{"name":"iconClass","value":"bx bxs-network-chart","type":"label"}]},{"id":"_help_bdUJEHsAPYQR","title":"Note Map","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Note Map"},{"name":"iconClass","value":"bx bxs-network-chart","type":"label"}]},{"id":"_help_HcABDtFCkbFN","title":"Render Note","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Render Note"},{"name":"iconClass","value":"bx bx-extension","type":"label"}]},{"id":"_help_GTwFsgaA0lCt","title":"Book","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Book"},{"name":"iconClass","value":"bx bx-book","type":"label"}]},{"id":"_help_s1aBHPd79XYj","title":"Mermaid Diagrams","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Mermaid Diagrams"},{"name":"iconClass","value":"bx bx-selection","type":"label"}],"children":[{"id":"_help_RH6yLjjWJHof","title":"ELK layout","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Mermaid Diagrams/ELK layout"},{"name":"iconClass","value":"bx bxs-network-chart","type":"label"}]}]},{"id":"_help_grjYqerjn243","title":"Canvas","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Canvas"},{"name":"iconClass","value":"bx bx-pen","type":"label"}]},{"id":"_help_1vHRoWCEjj0L","title":"Web View","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Web View"},{"name":"iconClass","value":"bx bx-globe-alt","type":"label"}]},{"id":"_help_gBbsAeiuUxI5","title":"Mind Map","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Mind Map"},{"name":"iconClass","value":"bx bx-sitemap","type":"label"}]},{"id":"_help_81SGnPGMk7Xc","title":"Geo Map","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Geo Map"},{"name":"iconClass","value":"bx bx-map-alt","type":"label"}]},{"id":"_help_W8vYD3Q1zjCR","title":"File","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/File"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_BgmBlOIl72jZ","title":"Troubleshooting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting"},{"name":"iconClass","value":"bx bx-bug","type":"label"}],"children":[{"id":"_help_wy8So3yZZlH9","title":"Reporting issues","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting/Reporting issues"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_x59R8J8KV5Bp","title":"Anonymized Database","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting/Anonymized Database"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_qzNzp9LYQyPT","title":"Error logs","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting/Error logs"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_vdlYGAcpXAgc","title":"Synchronization fails with 504 Gateway Timeout","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting/Synchronization fails with 504"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_s8alTXmpFR61","title":"Refreshing the application","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting/Refreshing the application"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_pKK96zzmvBGf","title":"Theme development","type":"book","attributes":[{"name":"iconClass","value":"bx bx-palette","type":"label"}],"children":[{"id":"_help_7NfNr5pZpVKV","title":"Creating a custom theme","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Theme development/Creating a custom theme"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_WFGzWeUK6arS","title":"Customize the Next theme","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Theme development/Customize the Next theme"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_WN5z4M8ASACJ","title":"Reference","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Theme development/Reference"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_AlhDUqhENtH7","title":"Custom app-wide CSS","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Theme development/Custom app-wide CSS"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_tC7s2alapj8V","title":"Advanced Usage","type":"book","attributes":[{"name":"iconClass","value":"bx bx-rocket","type":"label"}],"children":[{"id":"_help_zEY4DaJG4YT5","title":"Attributes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Attributes"},{"name":"iconClass","value":"bx bx-list-check","type":"label"}],"children":[{"id":"_help_HI6GBBIduIgv","title":"Labels","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Attributes/Labels"},{"name":"iconClass","value":"bx bx-hash","type":"label"}]},{"id":"_help_Cq5X6iKQop6R","title":"Relations","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Attributes/Relations"},{"name":"iconClass","value":"bx bx-transfer","type":"label"}]},{"id":"_help_bwZpz2ajCEwO","title":"Attribute Inheritance","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Attributes/Attribute Inheritance"},{"name":"iconClass","value":"bx bx-list-plus","type":"label"}]},{"id":"_help_OFXdgB2nNk1F","title":"Promoted Attributes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Attributes/Promoted Attributes"},{"name":"iconClass","value":"bx bx-table","type":"label"}]}]},{"id":"_help_KC1HB96bqqHX","title":"Templates","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Templates"},{"name":"iconClass","value":"bx bx-copy","type":"label"}]},{"id":"_help_BCkXAVs63Ttv","title":"Note Map (Link map, Tree map)","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Note Map (Link map, Tree map)"},{"name":"iconClass","value":"bx bxs-network-chart","type":"label"}]},{"id":"_help_R9pX4DGra2Vt","title":"Sharing","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Sharing"},{"name":"iconClass","value":"bx bx-share-alt","type":"label"}],"children":[{"id":"_help_Qjt68inQ2bRj","title":"Serving directly the content of a note","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Sharing/Serving directly the content o"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_5668rwcirq1t","title":"Advanced Showcases","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Advanced Showcases"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_l0tKav7yLHGF","title":"Day Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Advanced Showcases/Day Notes"},{"name":"iconClass","value":"bx bx-calendar","type":"label"}]},{"id":"_help_R7abl2fc6Mxi","title":"Weight Tracker","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Advanced Showcases/Weight Tracker"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_xYjQUYhpbUEW","title":"Task Manager","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Advanced Showcases/Task Manager"},{"name":"iconClass","value":"bx bx-calendar-check","type":"label"}]}]},{"id":"_help_J5Ex1ZrMbyJ6","title":"Custom Request Handler","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Custom Request Handler"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_d3fAXQ2diepH","title":"Custom Resource Providers","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Custom Resource Providers"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_pgxEVkzLl1OP","title":"ETAPI (REST API)","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/ETAPI (REST API)"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_9qPsTWBorUhQ","title":"API Reference","type":"webView","attributes":[{"type":"label","name":"webViewSrc","value":"/etapi/docs"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_47ZrP6FNuoG8","title":"Default Note Title","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Default Note Title"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_wX4HbRucYSDD","title":"Database","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Database"},{"name":"iconClass","value":"bx bx-data","type":"label"}],"children":[{"id":"_help_oyIAJ9PvvwHX","title":"Manually altering the database","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Database/Manually altering the database"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_YKWqdJhzi2VY","title":"SQL Console","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Database/Manually altering the database/SQL Console"},{"name":"iconClass","value":"bx bx-data","type":"label"}]}]},{"id":"_help_6tZeKvSHEUiB","title":"Demo Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Database/Demo Notes"},{"name":"iconClass","value":"bx bx-package","type":"label"}]}]},{"id":"_help_Gzjqa934BdH4","title":"Configuration (config.ini or environment variables)","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Configuration (config.ini or e"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_c5xB8m4g2IY6","title":"Trilium instance","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Configuration (config.ini or environment variables)/Trilium instance"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_LWtBjFej3wX3","title":"Cross-Origin Resource Sharing (CORS)","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Configuration (config.ini or environment variables)/Cross-Origin Resource Sharing "},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_ivYnonVFBxbQ","title":"Bulk Actions","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Bulk Actions"},{"name":"iconClass","value":"bx bx-list-plus","type":"label"}]},{"id":"_help_4FahAwuGTAwC","title":"Note source","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Note source"},{"name":"iconClass","value":"bx bx-code","type":"label"}]},{"id":"_help_1YeN2MzFUluU","title":"Technologies used","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Technologies used"},{"name":"iconClass","value":"bx bxs-component","type":"label"}],"children":[{"id":"_help_MI26XDLSAlCD","title":"CKEditor","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Technologies used/CKEditor"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_N4IDkixaDG9C","title":"MindElixir","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Technologies used/MindElixir"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_H0mM1lTxF9JI","title":"Excalidraw","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Technologies used/Excalidraw"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_MQHyy2dIFgxS","title":"Leaflet","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Technologies used/Leaflet"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_m1lbrzyKDaRB","title":"Note ID","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Note ID"},{"name":"iconClass","value":"bx bx-hash","type":"label"}]},{"id":"_help_0vTSyvhPTAOz","title":"Internal API","type":"book","attributes":[{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_z8O2VG4ZZJD7","title":"API Reference","type":"webView","attributes":[{"type":"label","name":"webViewSrc","value":"/api/docs"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_2mUhVmZK8RF3","title":"Hidden Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Hidden Notes"},{"name":"iconClass","value":"bx bx-hide","type":"label"}]}]},{"id":"_help_CdNpE2pqjmI6","title":"Scripting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting"},{"name":"iconClass","value":"bx bxs-file-js","type":"label"}],"children":[{"id":"_help_yIhgI5H7A2Sm","title":"Frontend Basics","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Frontend Basics"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_es8OU2GuguFU","title":"Examples","type":"book","attributes":[{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_TjLYAo3JMO8X","title":"\"New Task\" launcher button","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Examples/New Task launcher button"},{"name":"iconClass","value":"bx bx-task","type":"label"}]},{"id":"_help_7kZPMD0uFwkH","title":"Downloading responses from Google Forms","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Examples/Downloading responses from Goo"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_DL92EjAaXT26","title":"Using promoted attributes to configure scripts","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Examples/Using promoted attributes to c"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_GPERMystNGTB","title":"Events","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Events"},{"name":"iconClass","value":"bx bx-rss","type":"label"}]},{"id":"_help_MgibgPcfeuGz","title":"Custom Widgets","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Custom Widgets"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_YNxAqkI5Kg1M","title":"Word count widget","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Custom Widgets/Word count widget"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_SynTBQiBsdYJ","title":"Widget Basics","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Custom Widgets/Widget Basics"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_GLks18SNjxmC","title":"Script API","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Script API"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_Q2z6av6JZVWm","title":"Frontend API","type":"webView","attributes":[{"type":"label","name":"webViewSrc","value":"https://triliumnext.github.io/Notes/Script%20API/interfaces/Frontend_Script_API.Api.html"},{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_habiZ3HU8Kw8","title":"FNote","type":"webView","attributes":[{"type":"label","name":"webViewSrc","value":"https://triliumnext.github.io/Notes/Script%20API/classes/Frontend_Script_API.FNote.html"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_MEtfsqa5VwNi","title":"Backend API","type":"webView","attributes":[{"type":"label","name":"webViewSrc","value":"https://triliumnext.github.io/Notes/Script%20API/interfaces/Backend_Script_API.Api.html"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]}]}] \ No newline at end of file +[{"id":"_help_Otzi9La2YAUX","title":"Installation & Setup","type":"book","attributes":[{"name":"iconClass","value":"bx bx-cog","type":"label"}],"children":[{"id":"_help_poXkQfguuA0U","title":"Desktop Installation","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Desktop Installation"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_WOcw2SLH6tbX","title":"Server Installation","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_Dgg7bR3b6K9j","title":"1. Installing the server","type":"book","attributes":[{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_3tW6mORuTHnB","title":"Packaged version for Linux","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/Packaged version for Linux"},{"name":"iconClass","value":"bx bxl-tux","type":"label"}]},{"id":"_help_rWX5eY045zbE","title":"Using Docker","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/Using Docker"},{"name":"iconClass","value":"bx bxl-docker","type":"label"}]},{"id":"_help_moVgBcoxE3EK","title":"On NixOS","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/On NixOS"},{"name":"iconClass","value":"bx bxl-tux","type":"label"}]},{"id":"_help_J1Bb6lVlwU5T","title":"Manually","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/Manually"},{"name":"iconClass","value":"bx bx-code-alt","type":"label"}]},{"id":"_help_DCmT6e7clMoP","title":"Using Kubernetes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/Using Kubernetes"},{"name":"iconClass","value":"bx bxl-kubernetes","type":"label"}]},{"id":"_help_klCWNks3ReaQ","title":"Multiple server instances","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/1. Installing the server/Multiple server instances"},{"name":"iconClass","value":"bx bxs-user-account","type":"label"}]}]},{"id":"_help_vcjrb3VVYPZI","title":"2. Reverse proxy","type":"book","attributes":[{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_ud6MShXL4WpO","title":"Nginx","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/2. Reverse proxy/Nginx"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_fDLvzOx29Pfg","title":"Apache","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/2. Reverse proxy/Apache"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_l2VkvOwUNfZj","title":"TLS Configuration","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Server Installation/TLS Configuration"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_cbkrhQjrkKrh","title":"Synchronization","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Synchronization"},{"name":"iconClass","value":"bx bx-sync","type":"label"}]},{"id":"_help_RDslemsQ6gCp","title":"Mobile Frontend","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Mobile Frontend"},{"name":"iconClass","value":"bx bx-mobile-alt","type":"label"}]},{"id":"_help_MtPxeAWVAzMg","title":"Web Clipper","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Web Clipper"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_n1lujUxCwipy","title":"Upgrading TriliumNext","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Upgrading TriliumNext"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_ODY7qQn5m2FT","title":"Backup","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Backup"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_tAassRL4RSQL","title":"Data directory","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Installation & Setup/Data directory"},{"name":"iconClass","value":"bx bx-folder-open","type":"label"}]}]},{"id":"_help_gh7bpGYxajRS","title":"Basic Concepts and Features","type":"book","attributes":[{"name":"iconClass","value":"bx bx-help-circle","type":"label"}],"children":[{"id":"_help_Vc8PjrjAGuOp","title":"UI Elements","type":"book","attributes":[{"name":"iconClass","value":"bx bx-window-alt","type":"label"}],"children":[{"id":"_help_x0JgW8UqGXvq","title":"Vertical and horizontal layout","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Vertical and horizontal layout"},{"name":"iconClass","value":"bx bxs-layout","type":"label"}]},{"id":"_help_x3i7MxGccDuM","title":"Global menu","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Global menu"},{"name":"iconClass","value":"bx bx-menu","type":"label"}]},{"id":"_help_oPVyFC7WL2Lp","title":"Note Tree","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Note Tree"},{"name":"iconClass","value":"bx bxs-tree-alt","type":"label"}],"children":[{"id":"_help_YtSN43OrfzaA","title":"Note tree contextual menu","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Note Tree/Note tree contextual menu"},{"name":"iconClass","value":"bx bx-menu","type":"label"}]},{"id":"_help_yTjUdsOi4CIE","title":"Multiple selection","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Note Tree/Multiple selection"},{"name":"iconClass","value":"bx bx-list-plus","type":"label"}]}]},{"id":"_help_BlN9DFI679QC","title":"Ribbon","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Ribbon"},{"name":"iconClass","value":"bx bx-dots-horizontal","type":"label"}]},{"id":"_help_3seOhtN8uLIY","title":"Tabs","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Tabs"},{"name":"iconClass","value":"bx bx-dock-top","type":"label"}]},{"id":"_help_xYmIYSP6wE3F","title":"Launch Bar","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Launch Bar"},{"name":"iconClass","value":"bx bx-sidebar","type":"label"}]},{"id":"_help_8YBEPzcpUgxw","title":"Note buttons","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Note buttons"},{"name":"iconClass","value":"bx bx-dots-vertical-rounded","type":"label"}]},{"id":"_help_4TIF1oA4VQRO","title":"Options","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Options"},{"name":"iconClass","value":"bx bx-cog","type":"label"}]},{"id":"_help_luNhaphA37EO","title":"Split View","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Split View"},{"name":"iconClass","value":"bx bx-dock-right","type":"label"}]},{"id":"_help_XpOYSgsLkTJy","title":"Floating buttons","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Floating buttons"},{"name":"iconClass","value":"bx bx-rectangle","type":"label"}]},{"id":"_help_RnaPdbciOfeq","title":"Right Sidebar","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Right Sidebar"},{"name":"iconClass","value":"bx bxs-dock-right","type":"label"}]},{"id":"_help_r5JGHN99bVKn","title":"Recent Changes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Recent Changes"},{"name":"iconClass","value":"bx bx-history","type":"label"}]},{"id":"_help_ny318J39E5Z0","title":"Zoom","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/UI Elements/Zoom"},{"name":"iconClass","value":"bx bx-zoom-in","type":"label"}]}]},{"id":"_help_BFs8mudNFgCS","title":"Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes"},{"name":"iconClass","value":"bx bx-notepad","type":"label"}],"children":[{"id":"_help_p9kXRFAkwN4o","title":"Note Icons","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Note Icons"},{"name":"iconClass","value":"bx bxs-grid","type":"label"}]},{"id":"_help_0vhv7lsOLy82","title":"Attachments","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Attachments"},{"name":"iconClass","value":"bx bx-paperclip","type":"label"}]},{"id":"_help_IakOLONlIfGI","title":"Cloning Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Cloning Notes"},{"name":"iconClass","value":"bx bx-duplicate","type":"label"}],"children":[{"id":"_help_TBwsyfadTA18","title":"Branch prefix","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Cloning Notes/Branch prefix"},{"name":"iconClass","value":"bx bx-rename","type":"label"}]}]},{"id":"_help_bwg0e8ewQMak","title":"Protected Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Protected Notes"},{"name":"iconClass","value":"bx bx-lock-alt","type":"label"}]},{"id":"_help_MKmLg5x6xkor","title":"Archived Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Archived Notes"},{"name":"iconClass","value":"bx bx-box","type":"label"}]},{"id":"_help_vZWERwf8U3nx","title":"Note Revisions","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Note Revisions"},{"name":"iconClass","value":"bx bx-history","type":"label"}]},{"id":"_help_aGlEvb9hyDhS","title":"Sorting Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Sorting Notes"},{"name":"iconClass","value":"bx bx-sort-up","type":"label"}]},{"id":"_help_NRnIZmSMc5sj","title":"Export as PDF","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Export as PDF"},{"name":"iconClass","value":"bx bxs-file-pdf","type":"label"}]},{"id":"_help_CoFPLs3dRlXc","title":"Read-Only Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Read-Only Notes"},{"name":"iconClass","value":"bx bx-edit-alt","type":"label"}]},{"id":"_help_0ESUbbAxVnoK","title":"Note List","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Note List"},{"name":"iconClass","value":"bx bxs-grid","type":"label"}],"children":[{"id":"_help_xWbu3jpNWapp","title":"Calendar View","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Notes/Note List/Calendar View"},{"name":"iconClass","value":"bx bx-calendar","type":"label"}]}]}]},{"id":"_help_wArbEsdSae6g","title":"Navigation","type":"book","attributes":[{"name":"iconClass","value":"bx bx-navigation","type":"label"}],"children":[{"id":"_help_kBrnXNG3Hplm","title":"Tree Concepts","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Tree Concepts"},{"name":"iconClass","value":"bx bx-pyramid","type":"label"}]},{"id":"_help_MMiBEQljMQh2","title":"Note Navigation","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Note Navigation"},{"name":"iconClass","value":"bx bxs-navigation","type":"label"}]},{"id":"_help_Ms1nauBra7gq","title":"Quick search","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Quick search"},{"name":"iconClass","value":"bx bx-search-alt-2","type":"label"}]},{"id":"_help_F1r9QtzQLZqm","title":"Jump to Note","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Jump to Note"},{"name":"iconClass","value":"bx bx-send","type":"label"}]},{"id":"_help_eIg8jdvaoNNd","title":"Search","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Search"},{"name":"iconClass","value":"bx bx-search-alt-2","type":"label"}]},{"id":"_help_u3YFHC9tQlpm","title":"Bookmarks","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Bookmarks"},{"name":"iconClass","value":"bx bx-bookmarks","type":"label"}]},{"id":"_help_OR8WJ7Iz9K4U","title":"Note Hoisting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Note Hoisting"},{"name":"iconClass","value":"bx bxs-chevrons-up","type":"label"}]},{"id":"_help_9sRHySam5fXb","title":"Workspaces","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Workspaces"},{"name":"iconClass","value":"bx bx-door-open","type":"label"}]},{"id":"_help_xWtq5NUHOwql","title":"Similar Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Similar Notes"},{"name":"iconClass","value":"bx bx-bar-chart","type":"label"}]},{"id":"_help_McngOG2jbUWX","title":"Search in note","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Navigation/Search in note"},{"name":"iconClass","value":"bx bx-search-alt-2","type":"label"}]}]},{"id":"_help_A9Oc6YKKc65v","title":"Keyboard Shortcuts","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Keyboard Shortcuts"},{"name":"iconClass","value":"bx bxs-keyboard","type":"label"}]},{"id":"_help_Wy267RK4M69c","title":"Themes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Themes"},{"name":"iconClass","value":"bx bx-palette","type":"label"}],"children":[{"id":"_help_VbjZvtUek0Ln","title":"Theme Gallery","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Themes/Theme Gallery"},{"name":"iconClass","value":"bx bx-book-reader","type":"label"}]}]},{"id":"_help_mHbBMPDPkVV5","title":"Import & Export","type":"book","attributes":[{"name":"iconClass","value":"bx bx-import","type":"label"}],"children":[{"id":"_help_Oau6X9rCuegd","title":"Markdown","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Import & Export/Markdown"},{"name":"iconClass","value":"bx bxl-markdown","type":"label"}]},{"id":"_help_syuSEKf2rUGr","title":"Evernote","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Import & Export/Evernote"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_GnhlmrATVqcH","title":"OneNote","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Import & Export/OneNote"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_rC3pL2aptaRE","title":"Zen mode","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Basic Concepts and Features/Zen mode"},{"name":"iconClass","value":"bx bxs-yin-yang","type":"label"}]}]},{"id":"_help_s3YCWHBfmYuM","title":"Quick Start","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Quick Start"},{"name":"iconClass","value":"bx bx-run","type":"label"}]},{"id":"_help_i6dbnitykE5D","title":"FAQ","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/FAQ"},{"name":"iconClass","value":"bx bx-question-mark","type":"label"}]},{"id":"_help_KSZ04uQ2D1St","title":"Note Types","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types"},{"name":"iconClass","value":"bx bx-edit","type":"label"}],"children":[{"id":"_help_iPIMuisry3hd","title":"Text","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text"},{"name":"iconClass","value":"bx bx-note","type":"label"}],"children":[{"id":"_help_NwBbFdNZ9h7O","title":"Block quotes & admonitions","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Block quotes & admonitions"},{"name":"iconClass","value":"bx bx-info-circle","type":"label"}]},{"id":"_help_veGu4faJErEM","title":"Content language & Right-to-left support","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Content language & Right-to-le"},{"name":"iconClass","value":"bx bx-align-right","type":"label"}]},{"id":"_help_2x0ZAX9ePtzV","title":"Cut to subnote","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Cut to subnote"},{"name":"iconClass","value":"bx bx-cut","type":"label"}]},{"id":"_help_UYuUB1ZekNQU","title":"Developer-specific formatting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Developer-specific formatting"},{"name":"iconClass","value":"bx bx-code-alt","type":"label"}],"children":[{"id":"_help_QxEyIjRBizuC","title":"Code blocks","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Developer-specific formatting/Code blocks"},{"name":"iconClass","value":"bx bx-code","type":"label"}]}]},{"id":"_help_AgjCISero73a","title":"Footnotes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Footnotes"},{"name":"iconClass","value":"bx bx-bracket","type":"label"}]},{"id":"_help_nRhnJkTT8cPs","title":"Formatting toolbar","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Formatting toolbar"},{"name":"iconClass","value":"bx bx-text","type":"label"}]},{"id":"_help_Gr6xFaF6ioJ5","title":"General formatting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/General formatting"},{"name":"iconClass","value":"bx bx-bold","type":"label"}]},{"id":"_help_AxshuNRegLAv","title":"Highlights list","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Highlights list"},{"name":"iconClass","value":"bx bx-highlight","type":"label"}]},{"id":"_help_mT0HEkOsz6i1","title":"Images","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Images"},{"name":"iconClass","value":"bx bx-image-alt","type":"label"}],"children":[{"id":"_help_0Ofbk1aSuVRu","title":"Image references","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Images/Image references"},{"name":"iconClass","value":"bx bxs-file-image","type":"label"}]}]},{"id":"_help_nBAXQFj20hS1","title":"Include Note","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Include Note"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_CohkqWQC1iBv","title":"Insert buttons","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Insert buttons"},{"name":"iconClass","value":"bx bx-plus","type":"label"}]},{"id":"_help_oiVPnW8QfnvS","title":"Keyboard shortcuts","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Keyboard shortcuts"},{"name":"iconClass","value":"bx bxs-keyboard","type":"label"}]},{"id":"_help_QEAPj01N5f7w","title":"Links","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Links"},{"name":"iconClass","value":"bx bx-link-alt","type":"label"}]},{"id":"_help_S6Xx8QIWTV66","title":"Lists","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Lists"},{"name":"iconClass","value":"bx bx-list-ul","type":"label"}]},{"id":"_help_QrtTYPmdd1qq","title":"Markdown-like formatting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Markdown-like formatting"},{"name":"iconClass","value":"bx bxl-markdown","type":"label"}]},{"id":"_help_YfYAtQBcfo5V","title":"Math Equations","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Math Equations"},{"name":"iconClass","value":"bx bx-math","type":"label"}]},{"id":"_help_dEHYtoWWi8ct","title":"Other features","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Other features"},{"name":"iconClass","value":"bx bxs-grid","type":"label"}]},{"id":"_help_BFvAtE74rbP6","title":"Table of contents","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Table of contents"},{"name":"iconClass","value":"bx bx-heading","type":"label"}]},{"id":"_help_NdowYOC1GFKS","title":"Tables","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Text/Tables"},{"name":"iconClass","value":"bx bx-table","type":"label"}]}]},{"id":"_help_6f9hih2hXXZk","title":"Code","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Code"},{"name":"iconClass","value":"bx bx-code","type":"label"}]},{"id":"_help_m523cpzocqaD","title":"Saved Search","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Saved Search"},{"name":"iconClass","value":"bx bx-file-find","type":"label"}]},{"id":"_help_iRwzGnHPzonm","title":"Relation Map","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Relation Map"},{"name":"iconClass","value":"bx bxs-network-chart","type":"label"}]},{"id":"_help_bdUJEHsAPYQR","title":"Note Map","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Note Map"},{"name":"iconClass","value":"bx bxs-network-chart","type":"label"}]},{"id":"_help_HcABDtFCkbFN","title":"Render Note","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Render Note"},{"name":"iconClass","value":"bx bx-extension","type":"label"}]},{"id":"_help_GTwFsgaA0lCt","title":"Book","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Book"},{"name":"iconClass","value":"bx bx-book","type":"label"}]},{"id":"_help_s1aBHPd79XYj","title":"Mermaid Diagrams","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Mermaid Diagrams"},{"name":"iconClass","value":"bx bx-selection","type":"label"}],"children":[{"id":"_help_RH6yLjjWJHof","title":"ELK layout","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Mermaid Diagrams/ELK layout"},{"name":"iconClass","value":"bx bxs-network-chart","type":"label"}]}]},{"id":"_help_grjYqerjn243","title":"Canvas","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Canvas"},{"name":"iconClass","value":"bx bx-pen","type":"label"}]},{"id":"_help_1vHRoWCEjj0L","title":"Web View","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Web View"},{"name":"iconClass","value":"bx bx-globe-alt","type":"label"}]},{"id":"_help_gBbsAeiuUxI5","title":"Mind Map","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Mind Map"},{"name":"iconClass","value":"bx bx-sitemap","type":"label"}]},{"id":"_help_81SGnPGMk7Xc","title":"Geo Map","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/Geo Map"},{"name":"iconClass","value":"bx bx-map-alt","type":"label"}]},{"id":"_help_W8vYD3Q1zjCR","title":"File","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Note Types/File"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_BgmBlOIl72jZ","title":"Troubleshooting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting"},{"name":"iconClass","value":"bx bx-bug","type":"label"}],"children":[{"id":"_help_wy8So3yZZlH9","title":"Reporting issues","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting/Reporting issues"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_x59R8J8KV5Bp","title":"Anonymized Database","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting/Anonymized Database"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_qzNzp9LYQyPT","title":"Error logs","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting/Error logs"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_vdlYGAcpXAgc","title":"Synchronization fails with 504 Gateway Timeout","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting/Synchronization fails with 504"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_s8alTXmpFR61","title":"Refreshing the application","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Troubleshooting/Refreshing the application"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_pKK96zzmvBGf","title":"Theme development","type":"book","attributes":[{"name":"iconClass","value":"bx bx-palette","type":"label"}],"children":[{"id":"_help_7NfNr5pZpVKV","title":"Creating a custom theme","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Theme development/Creating a custom theme"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_WFGzWeUK6arS","title":"Customize the Next theme","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Theme development/Customize the Next theme"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_WN5z4M8ASACJ","title":"Reference","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Theme development/Reference"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_AlhDUqhENtH7","title":"Custom app-wide CSS","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Theme development/Custom app-wide CSS"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_tC7s2alapj8V","title":"Advanced Usage","type":"book","attributes":[{"name":"iconClass","value":"bx bx-rocket","type":"label"}],"children":[{"id":"_help_zEY4DaJG4YT5","title":"Attributes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Attributes"},{"name":"iconClass","value":"bx bx-list-check","type":"label"}],"children":[{"id":"_help_HI6GBBIduIgv","title":"Labels","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Attributes/Labels"},{"name":"iconClass","value":"bx bx-hash","type":"label"}]},{"id":"_help_Cq5X6iKQop6R","title":"Relations","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Attributes/Relations"},{"name":"iconClass","value":"bx bx-transfer","type":"label"}]},{"id":"_help_bwZpz2ajCEwO","title":"Attribute Inheritance","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Attributes/Attribute Inheritance"},{"name":"iconClass","value":"bx bx-list-plus","type":"label"}]},{"id":"_help_OFXdgB2nNk1F","title":"Promoted Attributes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Attributes/Promoted Attributes"},{"name":"iconClass","value":"bx bx-table","type":"label"}]}]},{"id":"_help_KC1HB96bqqHX","title":"Templates","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Templates"},{"name":"iconClass","value":"bx bx-copy","type":"label"}]},{"id":"_help_BCkXAVs63Ttv","title":"Note Map (Link map, Tree map)","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Note Map (Link map, Tree map)"},{"name":"iconClass","value":"bx bxs-network-chart","type":"label"}]},{"id":"_help_R9pX4DGra2Vt","title":"Sharing","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Sharing"},{"name":"iconClass","value":"bx bx-share-alt","type":"label"}],"children":[{"id":"_help_Qjt68inQ2bRj","title":"Serving directly the content of a note","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Sharing/Serving directly the content o"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_5668rwcirq1t","title":"Advanced Showcases","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Advanced Showcases"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_l0tKav7yLHGF","title":"Day Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Advanced Showcases/Day Notes"},{"name":"iconClass","value":"bx bx-calendar","type":"label"}]},{"id":"_help_R7abl2fc6Mxi","title":"Weight Tracker","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Advanced Showcases/Weight Tracker"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_xYjQUYhpbUEW","title":"Task Manager","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Advanced Showcases/Task Manager"},{"name":"iconClass","value":"bx bx-calendar-check","type":"label"}]}]},{"id":"_help_J5Ex1ZrMbyJ6","title":"Custom Request Handler","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Custom Request Handler"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_d3fAXQ2diepH","title":"Custom Resource Providers","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Custom Resource Providers"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_pgxEVkzLl1OP","title":"ETAPI (REST API)","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/ETAPI (REST API)"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_9qPsTWBorUhQ","title":"API Reference","type":"webView","attributes":[{"type":"label","name":"webViewSrc","value":"/etapi/docs"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_47ZrP6FNuoG8","title":"Default Note Title","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Default Note Title"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_wX4HbRucYSDD","title":"Database","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Database"},{"name":"iconClass","value":"bx bx-data","type":"label"}],"children":[{"id":"_help_oyIAJ9PvvwHX","title":"Manually altering the database","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Database/Manually altering the database"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_YKWqdJhzi2VY","title":"SQL Console","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Database/Manually altering the database/SQL Console"},{"name":"iconClass","value":"bx bx-data","type":"label"}]}]},{"id":"_help_6tZeKvSHEUiB","title":"Demo Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Database/Demo Notes"},{"name":"iconClass","value":"bx bx-package","type":"label"}]}]},{"id":"_help_Gzjqa934BdH4","title":"Configuration (config.ini or environment variables)","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Configuration (config.ini or e"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_c5xB8m4g2IY6","title":"Trilium instance","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Configuration (config.ini or environment variables)/Trilium instance"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_LWtBjFej3wX3","title":"Cross-Origin Resource Sharing (CORS)","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Configuration (config.ini or environment variables)/Cross-Origin Resource Sharing "},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_ivYnonVFBxbQ","title":"Bulk Actions","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Bulk Actions"},{"name":"iconClass","value":"bx bx-list-plus","type":"label"}]},{"id":"_help_4FahAwuGTAwC","title":"Note source","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Note source"},{"name":"iconClass","value":"bx bx-code","type":"label"}]},{"id":"_help_1YeN2MzFUluU","title":"Technologies used","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Technologies used"},{"name":"iconClass","value":"bx bxs-component","type":"label"}],"children":[{"id":"_help_MI26XDLSAlCD","title":"CKEditor","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Technologies used/CKEditor"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_N4IDkixaDG9C","title":"MindElixir","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Technologies used/MindElixir"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_H0mM1lTxF9JI","title":"Excalidraw","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Technologies used/Excalidraw"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_MQHyy2dIFgxS","title":"Leaflet","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Technologies used/Leaflet"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_m1lbrzyKDaRB","title":"Note ID","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Note ID"},{"name":"iconClass","value":"bx bx-hash","type":"label"}]},{"id":"_help_0vTSyvhPTAOz","title":"Internal API","type":"book","attributes":[{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_z8O2VG4ZZJD7","title":"API Reference","type":"webView","attributes":[{"type":"label","name":"webViewSrc","value":"/api/docs"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_2mUhVmZK8RF3","title":"Hidden Notes","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Advanced Usage/Hidden Notes"},{"name":"iconClass","value":"bx bx-hide","type":"label"}]}]},{"id":"_help_LMAv4Uy3Wk6J","title":"AI","type":"book","attributes":[{"name":"iconClass","value":"bx bx-bot","type":"label"}],"children":[{"id":"_help_GBBMSlVSOIGP","title":"Introduction","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/AI/Introduction"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_WkM7gsEUyCXs","title":"AI Provider Information","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/AI/AI Provider Information"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_7EdTxPADv95W","title":"Ollama","type":"book","attributes":[{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_vvUCN7FDkq7G","title":"Installing Ollama","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_ZavFigBX9AwP","title":"OpenAI","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/AI/AI Provider Information/OpenAI"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_e0lkirXEiSNc","title":"Anthropic","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/AI/AI Provider Information/Anthropic"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]}]},{"id":"_help_CdNpE2pqjmI6","title":"Scripting","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting"},{"name":"iconClass","value":"bx bxs-file-js","type":"label"}],"children":[{"id":"_help_yIhgI5H7A2Sm","title":"Frontend Basics","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Frontend Basics"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_es8OU2GuguFU","title":"Examples","type":"book","attributes":[{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_TjLYAo3JMO8X","title":"\"New Task\" launcher button","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Examples/New Task launcher button"},{"name":"iconClass","value":"bx bx-task","type":"label"}]},{"id":"_help_7kZPMD0uFwkH","title":"Downloading responses from Google Forms","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Examples/Downloading responses from Goo"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_DL92EjAaXT26","title":"Using promoted attributes to configure scripts","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Examples/Using promoted attributes to c"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_GPERMystNGTB","title":"Events","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Events"},{"name":"iconClass","value":"bx bx-rss","type":"label"}]},{"id":"_help_MgibgPcfeuGz","title":"Custom Widgets","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Custom Widgets"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_YNxAqkI5Kg1M","title":"Word count widget","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Custom Widgets/Word count widget"},{"name":"iconClass","value":"bx bx-file","type":"label"}]},{"id":"_help_SynTBQiBsdYJ","title":"Widget Basics","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Custom Widgets/Widget Basics"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_GLks18SNjxmC","title":"Script API","type":"doc","attributes":[{"type":"label","name":"docName","value":"User Guide/User Guide/Scripting/Script API"},{"name":"iconClass","value":"bx bx-file","type":"label"}],"children":[{"id":"_help_Q2z6av6JZVWm","title":"Frontend API","type":"webView","attributes":[{"type":"label","name":"webViewSrc","value":"https://triliumnext.github.io/Notes/Script%20API/interfaces/Frontend_Script_API.Api.html"},{"name":"iconClass","value":"bx bx-folder","type":"label"}],"children":[{"id":"_help_habiZ3HU8Kw8","title":"FNote","type":"webView","attributes":[{"type":"label","name":"webViewSrc","value":"https://triliumnext.github.io/Notes/Script%20API/classes/Frontend_Script_API.FNote.html"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]},{"id":"_help_MEtfsqa5VwNi","title":"Backend API","type":"webView","attributes":[{"type":"label","name":"webViewSrc","value":"https://triliumnext.github.io/Notes/Script%20API/interfaces/Backend_Script_API.Api.html"},{"name":"iconClass","value":"bx bx-file","type":"label"}]}]}]}] \ No newline at end of file diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/1_AI Provider Information_im.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/1_AI Provider Information_im.png new file mode 100644 index 000000000..80627e0b0 Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/1_AI Provider Information_im.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/1_Introduction_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/1_Introduction_image.png new file mode 100644 index 000000000..c0955f2bf Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/1_Introduction_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/2_Introduction_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/2_Introduction_image.png new file mode 100644 index 000000000..3908a5f48 Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/2_Introduction_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/3_Introduction_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/3_Introduction_image.png new file mode 100644 index 000000000..0fe0c8186 Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/3_Introduction_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/4_Introduction_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/4_Introduction_image.png new file mode 100644 index 000000000..ef2ad8a0f Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/4_Introduction_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/5_Introduction_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/5_Introduction_image.png new file mode 100644 index 000000000..ede9d6aae Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/5_Introduction_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/6_Introduction_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/6_Introduction_image.png new file mode 100644 index 000000000..e784df790 Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/6_Introduction_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/7_Introduction_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/7_Introduction_image.png new file mode 100644 index 000000000..96d56b39c Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/7_Introduction_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/8_Introduction_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/8_Introduction_image.png new file mode 100644 index 000000000..afb6653ca Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/8_Introduction_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/9_Introduction_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/9_Introduction_image.png new file mode 100644 index 000000000..f50f69553 Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/9_Introduction_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information.html b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information.html new file mode 100644 index 000000000..963837bc1 --- /dev/null +++ b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information.html @@ -0,0 +1,22 @@ +

Currently, we support the following providers:

+ +

To set your preferred chat model, you'll want to enter the provider's + name here:

+
+ +
+

And to set your preferred embedding provider:

+
+ +
\ No newline at end of file diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Anthropic.html b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Anthropic.html new file mode 100644 index 000000000..e69de29bb diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/1_Installing Ollama_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/1_Installing Ollama_image.png new file mode 100644 index 000000000..821966b77 Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/1_Installing Ollama_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/2_Installing Ollama_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/2_Installing Ollama_image.png new file mode 100644 index 000000000..7e585a751 Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/2_Installing Ollama_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/3_Installing Ollama_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/3_Installing Ollama_image.png new file mode 100644 index 000000000..0a239f35c Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/3_Installing Ollama_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/4_Installing Ollama_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/4_Installing Ollama_image.png new file mode 100644 index 000000000..b5c4c2930 Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/4_Installing Ollama_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/5_Installing Ollama_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/5_Installing Ollama_image.png new file mode 100644 index 000000000..0c02db50f Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/5_Installing Ollama_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama.html b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama.html new file mode 100644 index 000000000..39d9ee00d --- /dev/null +++ b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama.html @@ -0,0 +1,45 @@ +

Ollama can be installed in a variety + of ways, and even runs within a Docker container. + Ollama will be noticeably quicker when running on a GPU (Nvidia, AMD, Intel), + but it can run on CPU and RAM. To install Ollama without any other prerequisites, + you can follow their installer:

+
+ +
+
+ +
+
+ +
+

After their installer completes, if you're on Windows, you should see + an entry in the start menu to run it:

+
+ +
+

Also, you should have access to the ollama CLI via Powershell + or CMD:

+
+ +
+

After Ollama is installed, you can go ahead and pull the models + you want to use and run. Here's a command to pull my favorite tool-compatible + model and embedding model as of April 2025:

ollama pull llama3.1:8b
+ollama pull mxbai-embed-large
+

Also, you can make sure it's running by going to http://localhost:11434 and + you should get the following response (port 11434 being the “normal” Ollama + port):

+
+ +
+

Now that you have Ollama up and running, have a few models pulled, you're + ready to go to go ahead and start using Ollama as both a chat provider, + and embedding provider!

\ No newline at end of file diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama_image.png new file mode 100644 index 000000000..cf5ee38d4 Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/Ollama/Installing Ollama_image.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/OpenAI.html b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information/OpenAI.html new file mode 100644 index 000000000..e69de29bb diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information_im.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information_im.png new file mode 100644 index 000000000..aa19e949d Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/AI Provider Information_im.png differ diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/Introduction.html b/src/public/app/doc_notes/en/User Guide/User Guide/AI/Introduction.html new file mode 100644 index 000000000..b873f9ca9 --- /dev/null +++ b/src/public/app/doc_notes/en/User Guide/User Guide/AI/Introduction.html @@ -0,0 +1,161 @@ +
+ +
An example chat with an LLM
+
+

The AI / LLM features within Trilium Notes are designed to allow you to + interact with your Notes in a variety of ways, using as many of the major + providers as we can support. 

+

In addition to being able to send chats to LLM providers such as OpenAI, + Anthropic, and Ollama - we also support agentic tool calling, and embeddings.

+

The quickest way to get started is to navigate to the “AI/LLM” settings:

+
+ +
+

Enable the feature:

+
+ +
+ +

Embeddings

+

Embeddings are important as it allows us to have an compact + AI “summary” (it's not human readable text) of each of your Notes, that + we can then perform mathematical functions on (such as cosine similarity) + to smartly figure out which Notes to send as context to the LLM when you're + chatting, among other useful functions.

+

You will then need to set up the AI “provider” that you wish to use to + create the embeddings for your Notes. Currently OpenAI, Voyage AI, and + Ollama are supported providers for embedding generation.

+

In the following example, we're going to use our self-hosted Ollama instance + to create the embeddings for our Notes. You can see additional documentation + about installing your own Ollama locally in Installing Ollama.

+

To see what embedding models Ollama has available, you can check out + this searchon their website, and then pull whichever one + you want to try out. As of 4/15/25, my personal favorite is mxbai-embed-large.

+

First, we'll need to select the Ollama provider from the tabs of providers, + then we will enter in the Base URL for our Ollama. Since our Ollama is + running on our local machine, our Base URL is http://localhost:11434. + We will then hit the “refresh” button to have it fetch our models:

+
+ +
+

When selecting the dropdown for the “Embedding Model”, embedding models + should be at the top of the list, separated by regular chat models with + a horizontal line, as seen below:

+
+ +
+

After selecting an embedding model, embeddings should automatically begin + to be generated by checking the embedding statistics at the top of the + “AI/LLM” settings panel:

+
+ +
+

If you don't see any embeddings being created, you will want to scroll + to the bottom of the settings, and hit “Recreate All Embeddings”:

+
+ +
+

Creating the embeddings will take some time, and will be regenerated when + a Note is created, updated, or deleted (removed).

+

If for some reason you choose to change your embedding provider, or the + model used, you'll need to recreate all embeddings.

+

Tools

+

Tools are essentially functions that we provide to the various LLM providers, + and then LLMs can respond in a specific format that tells us what tool + function and parameters they would like to invoke. We then execute these + tools, and provide it as additional context in the Chat conversation. 

+

These are the tools that currently exist, and will certainly be updated + to be more effectively (and even more to be added!):

+ +

When Tools are executed within your Chat, you'll see output like the following:

+
+ +
+

You don't need to tell the LLM to execute a certain tool, it should “smartly” + call tools and automatically execute them as needed.

+

Overview

+

Now that you know about embeddings and tools, you can just go ahead and + use the “Chat with Notes” button, where you can go ahead and start chatting!:

+
+ +
+

If you don't see the “Chat with Notes” button on your side launchbar, + you might need to move it from the “Available Launchers” section to the + “Visible Launchers” section:

+
+ +
\ No newline at end of file diff --git a/src/public/app/doc_notes/en/User Guide/User Guide/AI/Introduction_image.png b/src/public/app/doc_notes/en/User Guide/User Guide/AI/Introduction_image.png new file mode 100644 index 000000000..5a8657faf Binary files /dev/null and b/src/public/app/doc_notes/en/User Guide/User Guide/AI/Introduction_image.png differ diff --git a/src/public/app/entities/fnote.ts b/src/public/app/entities/fnote.ts index 3f09b4878..e968dcae9 100644 --- a/src/public/app/entities/fnote.ts +++ b/src/public/app/entities/fnote.ts @@ -28,7 +28,8 @@ const NOTE_TYPE_ICONS = { doc: "bx bxs-file-doc", contentWidget: "bx bxs-widget", mindMap: "bx bx-sitemap", - geoMap: "bx bx-map-alt" + geoMap: "bx bx-map-alt", + aiChat: "bx bx-bot" }; /** @@ -36,7 +37,7 @@ const NOTE_TYPE_ICONS = { * end user. Those types should be used only for checking against, they are * not for direct use. */ -export type NoteType = "file" | "image" | "search" | "noteMap" | "launcher" | "doc" | "contentWidget" | "text" | "relationMap" | "render" | "canvas" | "mermaid" | "book" | "webView" | "code" | "mindMap" | "geoMap"; +export type NoteType = "file" | "image" | "search" | "noteMap" | "launcher" | "doc" | "contentWidget" | "text" | "relationMap" | "render" | "canvas" | "mermaid" | "book" | "webView" | "code" | "mindMap" | "geoMap" | "aiChat"; export interface NotePathRecord { isArchived: boolean; diff --git a/src/public/app/services/ws.ts b/src/public/app/services/ws.ts index 7f735c458..db79b1a5e 100644 --- a/src/public/app/services/ws.ts +++ b/src/public/app/services/ws.ts @@ -127,6 +127,49 @@ async function handleMessage(event: MessageEvent) { appContext.triggerEvent("apiLogMessages", { noteId: message.noteId, messages: message.messages }); } else if (message.type === "toast") { toastService.showMessage(message.message); + } else if (message.type === "llm-stream") { + // ENHANCED LOGGING FOR DEBUGGING + console.log(`[WS-CLIENT] >>> RECEIVED LLM STREAM MESSAGE <<<`); + console.log(`[WS-CLIENT] Message details: sessionId=${message.sessionId}, hasContent=${!!message.content}, contentLength=${message.content ? message.content.length : 0}, hasThinking=${!!message.thinking}, hasToolExecution=${!!message.toolExecution}, isDone=${!!message.done}`); + + if (message.content) { + console.log(`[WS-CLIENT] CONTENT PREVIEW: "${message.content.substring(0, 50)}..."`); + } + + // Create the event with detailed logging + console.log(`[WS-CLIENT] Creating CustomEvent 'llm-stream-message'`); + const llmStreamEvent = new CustomEvent('llm-stream-message', { detail: message }); + + // Dispatch to multiple targets to ensure delivery + try { + console.log(`[WS-CLIENT] Dispatching event to window`); + window.dispatchEvent(llmStreamEvent); + console.log(`[WS-CLIENT] Event dispatched to window`); + + // Also try document for completeness + console.log(`[WS-CLIENT] Dispatching event to document`); + document.dispatchEvent(new CustomEvent('llm-stream-message', { detail: message })); + console.log(`[WS-CLIENT] Event dispatched to document`); + } catch (err) { + console.error(`[WS-CLIENT] Error dispatching event:`, err); + } + + // Debug current listeners (though we can't directly check for specific event listeners) + console.log(`[WS-CLIENT] Active event listeners should receive this message now`); + + // Detailed logging based on message type + if (message.content) { + console.log(`[WS-CLIENT] Content message: ${message.content.length} chars`); + } else if (message.thinking) { + console.log(`[WS-CLIENT] Thinking update: "${message.thinking}"`); + } else if (message.toolExecution) { + console.log(`[WS-CLIENT] Tool execution: action=${message.toolExecution.action}, tool=${message.toolExecution.tool || 'unknown'}`); + if (message.toolExecution.result) { + console.log(`[WS-CLIENT] Tool result preview: "${String(message.toolExecution.result).substring(0, 50)}..."`); + } + } else if (message.done) { + console.log(`[WS-CLIENT] Completion signal received`); + } } else if (message.type === "execute-script") { // TODO: Remove after porting the file // @ts-ignore diff --git a/src/public/app/widgets/buttons/ai_chat_button.ts b/src/public/app/widgets/buttons/ai_chat_button.ts new file mode 100644 index 000000000..5ad3f8033 --- /dev/null +++ b/src/public/app/widgets/buttons/ai_chat_button.ts @@ -0,0 +1,26 @@ +import type { EventData } from "../../components/app_context.js"; +import type FNote from "../../entities/fnote.js"; +import options from "../../services/options.js"; +import CommandButtonWidget from "./command_button.js"; + +export default class AiChatButton extends CommandButtonWidget { + + constructor(note: FNote) { + super(); + + this.command("createAiChat") + .title(() => note.title) + .icon(() => note.getIcon()) + .class("launcher-button"); + } + + isEnabled() { + return options.get("aiEnabled") === "true"; + } + + entitiesReloadedEvent({ loadResults }: EventData<"entitiesReloaded">) { + if (loadResults.isOptionReloaded("aiEnabled")) { + this.refresh(); + } + } +} diff --git a/src/public/app/widgets/buttons/create_ai_chat_button.ts b/src/public/app/widgets/buttons/create_ai_chat_button.ts new file mode 100644 index 000000000..1ccd52cda --- /dev/null +++ b/src/public/app/widgets/buttons/create_ai_chat_button.ts @@ -0,0 +1,27 @@ +import { t } from "../../services/i18n.js"; +import options from "../../services/options.js"; +import CommandButtonWidget from "./command_button.js"; + +export default class CreateAiChatButton extends CommandButtonWidget { + constructor() { + super(); + + this.icon("bx bx-bot") + .title(t("ai.create_new_ai_chat")) + .titlePlacement("bottom") + .command("createAiChat") + .class("icon-action"); + } + + isEnabled() { + return options.get("aiEnabled") === "true"; + } + + async refreshWithNote() { + if (this.isEnabled()) { + this.$widget.show(); + } else { + this.$widget.hide(); + } + } +} diff --git a/src/public/app/widgets/containers/launcher.ts b/src/public/app/widgets/containers/launcher.ts index 86fbabb96..e1bfc5a8b 100644 --- a/src/public/app/widgets/containers/launcher.ts +++ b/src/public/app/widgets/containers/launcher.ts @@ -13,6 +13,7 @@ import HistoryNavigationButton from "../buttons/history_navigation.js"; import QuickSearchLauncherWidget from "../quick_search_launcher.js"; import type FNote from "../../entities/fnote.js"; import type { CommandNames } from "../../components/app_context.js"; +import AiChatButton from "../buttons/ai_chat_button.js"; interface InnerWidget extends BasicWidget { settings?: { @@ -123,6 +124,8 @@ export default class LauncherWidget extends BasicWidget { return new TodayLauncher(note); case "quickSearch": return new QuickSearchLauncherWidget(this.isHorizontalLayout); + case "aiChatLauncher": + return new AiChatButton(note); default: throw new Error(`Unrecognized builtin widget ${builtinWidget} for launcher ${note.noteId} "${note.title}"`); } diff --git a/src/public/app/widgets/floating_buttons/help_button.ts b/src/public/app/widgets/floating_buttons/help_button.ts index 3c6d969eb..ac54b9f8d 100644 --- a/src/public/app/widgets/floating_buttons/help_button.ts +++ b/src/public/app/widgets/floating_buttons/help_button.ts @@ -28,7 +28,8 @@ export const byNoteType: Record, string | null> = { render: null, search: null, text: null, - webView: null + webView: null, + aiChat: null }; export const byBookType: Record = { diff --git a/src/public/app/widgets/llm_chat/communication.ts b/src/public/app/widgets/llm_chat/communication.ts new file mode 100644 index 000000000..bb58a47b2 --- /dev/null +++ b/src/public/app/widgets/llm_chat/communication.ts @@ -0,0 +1,495 @@ +/** + * Communication functions for LLM Chat + */ +import server from "../../services/server.js"; +import type { SessionResponse } from "./types.js"; + +/** + * Create a new chat session + */ +export async function createChatSession(currentNoteId?: string): Promise<{chatNoteId: string | null, noteId: string | null}> { + try { + const resp = await server.post('llm/chat', { + title: 'Note Chat', + currentNoteId: currentNoteId // Pass the current note ID if available + }); + + if (resp && resp.id) { + // The backend might provide the noteId separately from the chatNoteId + // If noteId is provided, use it; otherwise, we'll need to query for it separately + return { + chatNoteId: resp.id, + noteId: resp.noteId || null + }; + } + } catch (error) { + console.error('Failed to create chat session:', error); + } + + return { + chatNoteId: null, + noteId: null + }; +} + +/** + * Check if a session exists + */ +export async function checkSessionExists(chatNoteId: string): Promise { + try { + // Validate that we have a proper note ID format, not a session ID + // Note IDs in Trilium are typically longer or in a different format + if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) { + console.warn(`Invalid note ID format detected: ${chatNoteId} appears to be a legacy session ID`); + return false; + } + + const sessionCheck = await server.getWithSilentNotFound(`llm/chat/${chatNoteId}`); + return !!(sessionCheck && sessionCheck.id); + } catch (error: any) { + console.log(`Error checking chat note ${chatNoteId}:`, error); + return false; + } +} + +/** + * Set up streaming response via WebSocket + */ +export async function setupStreamingResponse( + chatNoteId: string, + messageParams: any, + onContentUpdate: (content: string, isDone?: boolean) => void, + onThinkingUpdate: (thinking: string) => void, + onToolExecution: (toolData: any) => void, + onComplete: () => void, + onError: (error: Error) => void +): Promise { + // Validate that we have a proper note ID format, not a session ID + if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) { + console.error(`Invalid note ID format: ${chatNoteId} appears to be a legacy session ID`); + onError(new Error("Invalid note ID format - using a legacy session ID")); + return; + } + + return new Promise((resolve, reject) => { + let assistantResponse = ''; + let postToolResponse = ''; // Separate accumulator for post-tool execution content + let receivedAnyContent = false; + let receivedPostToolContent = false; // Track if we've started receiving post-tool content + let timeoutId: number | null = null; + let initialTimeoutId: number | null = null; + let cleanupTimeoutId: number | null = null; + let receivedAnyMessage = false; + let toolsExecuted = false; // Flag to track if tools were executed in this session + let toolExecutionCompleted = false; // Flag to track if tool execution is completed + let eventListener: ((event: Event) => void) | null = null; + let lastMessageTimestamp = 0; + + // Create a unique identifier for this response process + const responseId = `llm-stream-${Date.now()}-${Math.floor(Math.random() * 1000)}`; + console.log(`[${responseId}] Setting up WebSocket streaming for chat note ${chatNoteId}`); + + // Send the initial request to initiate streaming + (async () => { + try { + const streamResponse = await server.post(`llm/chat/${chatNoteId}/messages/stream`, { + content: messageParams.content, + useAdvancedContext: messageParams.useAdvancedContext, + showThinking: messageParams.showThinking, + options: { + temperature: 0.7, + maxTokens: 2000 + } + }); + + if (!streamResponse || !streamResponse.success) { + console.error(`[${responseId}] Failed to initiate streaming`); + reject(new Error('Failed to initiate streaming')); + return; + } + + console.log(`[${responseId}] Streaming initiated successfully`); + } catch (error) { + console.error(`[${responseId}] Error initiating streaming:`, error); + reject(error); + return; + } + })(); + + // Function to safely perform cleanup + const performCleanup = () => { + if (cleanupTimeoutId) { + window.clearTimeout(cleanupTimeoutId); + cleanupTimeoutId = null; + } + + console.log(`[${responseId}] Performing final cleanup of event listener`); + cleanupEventListener(eventListener); + onComplete(); + resolve(); + }; + + // Function to schedule cleanup with ability to cancel + const scheduleCleanup = (delay: number) => { + // Clear any existing cleanup timeout + if (cleanupTimeoutId) { + window.clearTimeout(cleanupTimeoutId); + } + + console.log(`[${responseId}] Scheduling listener cleanup in ${delay}ms`); + + // Set new cleanup timeout + cleanupTimeoutId = window.setTimeout(() => { + // Only clean up if no messages received recently (in last 2 seconds) + const timeSinceLastMessage = Date.now() - lastMessageTimestamp; + if (timeSinceLastMessage > 2000) { + performCleanup(); + } else { + console.log(`[${responseId}] Received message recently, delaying cleanup`); + // Reschedule cleanup + scheduleCleanup(2000); + } + }, delay); + }; + + // Create a message handler for CustomEvents + eventListener = (event: Event) => { + const customEvent = event as CustomEvent; + const message = customEvent.detail; + + // Only process messages for our chat note + if (!message || message.chatNoteId !== chatNoteId) { + return; + } + + // Update last message timestamp + lastMessageTimestamp = Date.now(); + + // Cancel any pending cleanup when we receive a new message + if (cleanupTimeoutId) { + console.log(`[${responseId}] Cancelling scheduled cleanup due to new message`); + window.clearTimeout(cleanupTimeoutId); + cleanupTimeoutId = null; + } + + console.log(`[${responseId}] LLM Stream message received via CustomEvent: chatNoteId=${chatNoteId}, content=${!!message.content}, contentLength=${message.content?.length || 0}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${!!message.done}, type=${message.type || 'llm-stream'}`); + + // Mark first message received + if (!receivedAnyMessage) { + receivedAnyMessage = true; + console.log(`[${responseId}] First message received for chat note ${chatNoteId}`); + + // Clear the initial timeout since we've received a message + if (initialTimeoutId !== null) { + window.clearTimeout(initialTimeoutId); + initialTimeoutId = null; + } + } + + // Handle specific message types + if (message.type === 'tool_execution_start') { + toolsExecuted = true; // Mark that tools were executed + onThinkingUpdate('Executing tools...'); + // Also trigger tool execution UI with a specific format + onToolExecution({ + action: 'start', + tool: 'tools', + result: 'Executing tools...' + }); + return; // Skip accumulating content from this message + } + + if (message.type === 'tool_result' && message.toolExecution) { + toolsExecuted = true; // Mark that tools were executed + console.log(`[${responseId}] Processing tool result: ${JSON.stringify(message.toolExecution)}`); + + // If tool execution doesn't have an action, add 'result' as the default + if (!message.toolExecution.action) { + message.toolExecution.action = 'result'; + } + + // First send a 'start' action to ensure the container is created + onToolExecution({ + action: 'start', + tool: 'tools', + result: 'Tool execution initialized' + }); + + // Then send the actual tool execution data + onToolExecution(message.toolExecution); + + // Mark tool execution as completed if this is a result or error + if (message.toolExecution.action === 'result' || message.toolExecution.action === 'complete' || message.toolExecution.action === 'error') { + toolExecutionCompleted = true; + console.log(`[${responseId}] Tool execution completed`); + } + + return; // Skip accumulating content from this message + } + + if (message.type === 'tool_execution_error' && message.toolExecution) { + toolsExecuted = true; // Mark that tools were executed + toolExecutionCompleted = true; // Mark tool execution as completed + onToolExecution({ + ...message.toolExecution, + action: 'error', + error: message.toolExecution.error || 'Unknown error during tool execution' + }); + return; // Skip accumulating content from this message + } + + if (message.type === 'tool_completion_processing') { + toolsExecuted = true; // Mark that tools were executed + toolExecutionCompleted = true; // Tools are done, now processing the result + onThinkingUpdate('Generating response with tool results...'); + // Also trigger tool execution UI with a specific format + onToolExecution({ + action: 'generating', + tool: 'tools', + result: 'Generating response with tool results...' + }); + return; // Skip accumulating content from this message + } + + // Handle content updates + if (message.content) { + console.log(`[${responseId}] Received content chunk of length ${message.content.length}, preview: "${message.content.substring(0, 50)}${message.content.length > 50 ? '...' : ''}"`); + + // If tools were executed and completed, and we're now getting new content, + // this is likely the final response after tool execution from Anthropic + if (toolsExecuted && toolExecutionCompleted && message.content) { + console.log(`[${responseId}] Post-tool execution content detected`); + + // If this is the first post-tool chunk, indicate we're starting a new response + if (!receivedPostToolContent) { + receivedPostToolContent = true; + postToolResponse = ''; // Clear any previous post-tool response + console.log(`[${responseId}] First post-tool content chunk, starting fresh accumulation`); + } + + // Accumulate post-tool execution content + postToolResponse += message.content; + console.log(`[${responseId}] Accumulated post-tool content, now ${postToolResponse.length} chars`); + + // Update the UI with the accumulated post-tool content + // This replaces the pre-tool content with our accumulated post-tool content + onContentUpdate(postToolResponse, message.done || false); + } else { + // Standard content handling for non-tool cases or initial tool response + + // Check if this is a duplicated message containing the same content we already have + if (message.done && assistantResponse.includes(message.content)) { + console.log(`[${responseId}] Ignoring duplicated content in done message`); + } else { + // Add to our accumulated response + assistantResponse += message.content; + } + + // Update the UI immediately with each chunk + onContentUpdate(assistantResponse, message.done || false); + } + + receivedAnyContent = true; + + // Reset timeout since we got content + if (timeoutId !== null) { + window.clearTimeout(timeoutId); + } + + // Set new timeout + timeoutId = window.setTimeout(() => { + console.warn(`[${responseId}] Stream timeout for chat note ${chatNoteId}`); + + // Clean up + performCleanup(); + reject(new Error('Stream timeout')); + }, 30000); + } + + // Handle tool execution updates (legacy format and standard format with llm-stream type) + if (message.toolExecution) { + // Only process if we haven't already handled this message via specific message types + if (message.type === 'llm-stream' || !message.type) { + console.log(`[${responseId}] Received tool execution update: action=${message.toolExecution.action || 'unknown'}`); + toolsExecuted = true; // Mark that tools were executed + + // Mark tool execution as completed if this is a result or error + if (message.toolExecution.action === 'result' || + message.toolExecution.action === 'complete' || + message.toolExecution.action === 'error') { + toolExecutionCompleted = true; + console.log(`[${responseId}] Tool execution completed via toolExecution message`); + } + + onToolExecution(message.toolExecution); + } + } + + // Handle tool calls from the raw data or direct in message (OpenAI format) + const toolCalls = message.tool_calls || (message.raw && message.raw.tool_calls); + if (toolCalls && Array.isArray(toolCalls)) { + console.log(`[${responseId}] Received tool calls: ${toolCalls.length} tools`); + toolsExecuted = true; // Mark that tools were executed + + // First send a 'start' action to ensure the container is created + onToolExecution({ + action: 'start', + tool: 'tools', + result: 'Tool execution initialized' + }); + + // Then process each tool call + for (const toolCall of toolCalls) { + let args = toolCall.function?.arguments || {}; + + // Try to parse arguments if they're a string + if (typeof args === 'string') { + try { + args = JSON.parse(args); + } catch (e) { + console.log(`[${responseId}] Could not parse tool arguments as JSON: ${e}`); + args = { raw: args }; + } + } + + onToolExecution({ + action: 'executing', + tool: toolCall.function?.name || 'unknown', + toolCallId: toolCall.id, + args: args + }); + } + } + + // Handle thinking state updates + if (message.thinking) { + console.log(`[${responseId}] Received thinking update: ${message.thinking.substring(0, 50)}...`); + onThinkingUpdate(message.thinking); + } + + // Handle completion + if (message.done) { + console.log(`[${responseId}] Stream completed for chat note ${chatNoteId}, has content: ${!!message.content}, content length: ${message.content?.length || 0}, current response: ${assistantResponse.length} chars`); + + // Dump message content to console for debugging + if (message.content) { + console.log(`[${responseId}] CONTENT IN DONE MESSAGE (first 200 chars): "${message.content.substring(0, 200)}..."`); + + // Check if the done message contains the exact same content as our accumulated response + // We normalize by removing whitespace to avoid false negatives due to spacing differences + const normalizedMessage = message.content.trim(); + const normalizedResponse = assistantResponse.trim(); + + if (normalizedMessage === normalizedResponse) { + console.log(`[${responseId}] Final message is identical to accumulated response, no need to update`); + } + // If the done message is longer but contains our accumulated response, use the done message + else if (normalizedMessage.includes(normalizedResponse) && normalizedMessage.length > normalizedResponse.length) { + console.log(`[${responseId}] Final message is more complete than accumulated response, using it`); + assistantResponse = message.content; + } + // If the done message is different and not already included, append it to avoid duplication + else if (!normalizedResponse.includes(normalizedMessage) && normalizedMessage.length > 0) { + console.log(`[${responseId}] Final message has unique content, using it`); + assistantResponse = message.content; + } + // Otherwise, we already have the content accumulated, so no need to update + else { + console.log(`[${responseId}] Already have this content accumulated, not updating`); + } + } + + // Clear timeout if set + if (timeoutId !== null) { + window.clearTimeout(timeoutId); + timeoutId = null; + } + + // Always mark as done when we receive the done flag + onContentUpdate(assistantResponse, true); + + // Set a longer delay before cleanup to allow for post-tool execution messages + // Especially important for Anthropic which may send final message after tool execution + const cleanupDelay = toolsExecuted ? 15000 : 1000; // 15 seconds if tools were used, otherwise 1 second + console.log(`[${responseId}] Setting cleanup delay of ${cleanupDelay}ms since toolsExecuted=${toolsExecuted}`); + scheduleCleanup(cleanupDelay); + } + }; + + // Register event listener for the custom event + try { + window.addEventListener('llm-stream-message', eventListener); + console.log(`[${responseId}] Event listener added for llm-stream-message events`); + } catch (err) { + console.error(`[${responseId}] Error setting up event listener:`, err); + reject(err); + return; + } + + // Set initial timeout for receiving any message + initialTimeoutId = window.setTimeout(() => { + console.warn(`[${responseId}] No messages received for initial period in chat note ${chatNoteId}`); + if (!receivedAnyMessage) { + console.error(`[${responseId}] WebSocket connection not established for chat note ${chatNoteId}`); + + if (timeoutId !== null) { + window.clearTimeout(timeoutId); + } + + // Clean up + cleanupEventListener(eventListener); + + // Show error message to user + reject(new Error('WebSocket connection not established')); + } + }, 10000); + }); +} + +/** + * Clean up an event listener + */ +function cleanupEventListener(listener: ((event: Event) => void) | null): void { + if (listener) { + try { + window.removeEventListener('llm-stream-message', listener); + console.log(`Successfully removed event listener`); + } catch (err) { + console.error(`Error removing event listener:`, err); + } + } +} + +/** + * Get a direct response from the server without streaming + */ +export async function getDirectResponse(chatNoteId: string, messageParams: any): Promise { + try { + // Validate that we have a proper note ID format, not a session ID + if (chatNoteId && chatNoteId.length === 16 && /^[A-Za-z0-9]+$/.test(chatNoteId)) { + console.error(`Invalid note ID format: ${chatNoteId} appears to be a legacy session ID`); + throw new Error("Invalid note ID format - using a legacy session ID"); + } + + const postResponse = await server.post(`llm/chat/${chatNoteId}/messages`, { + message: messageParams.content, + includeContext: messageParams.useAdvancedContext, + options: { + temperature: 0.7, + maxTokens: 2000 + } + }); + + return postResponse; + } catch (error) { + console.error('Error getting direct response:', error); + throw error; + } +} + +/** + * Get embedding statistics + */ +export async function getEmbeddingStats(): Promise { + return server.get('llm/embeddings/stats'); +} diff --git a/src/public/app/widgets/llm_chat/index.ts b/src/public/app/widgets/llm_chat/index.ts new file mode 100644 index 000000000..8f0eb9f2d --- /dev/null +++ b/src/public/app/widgets/llm_chat/index.ts @@ -0,0 +1,6 @@ +/** + * LLM Chat Panel Widget Module + */ +import LlmChatPanel from './llm_chat_panel.js'; + +export default LlmChatPanel; diff --git a/src/public/app/widgets/llm_chat/llm_chat_panel.ts b/src/public/app/widgets/llm_chat/llm_chat_panel.ts new file mode 100644 index 000000000..3554773d7 --- /dev/null +++ b/src/public/app/widgets/llm_chat/llm_chat_panel.ts @@ -0,0 +1,1234 @@ +/** + * LLM Chat Panel Widget + */ +import BasicWidget from "../basic_widget.js"; +import toastService from "../../services/toast.js"; +import appContext from "../../components/app_context.js"; +import server from "../../services/server.js"; + +import { TPL, addMessageToChat, showSources, hideSources, showLoadingIndicator, hideLoadingIndicator } from "./ui.js"; +import { formatMarkdown } from "./utils.js"; +import { createChatSession, checkSessionExists, setupStreamingResponse, getDirectResponse } from "./communication.js"; +import { extractInChatToolSteps } from "./message_processor.js"; +import { validateEmbeddingProviders } from "./validation.js"; +import type { MessageData, ToolExecutionStep, ChatData } from "./types.js"; +import { applySyntaxHighlight } from "../../services/syntax_highlight.js"; + +import "../../../stylesheets/llm_chat.css"; + +export default class LlmChatPanel extends BasicWidget { + private noteContextChatMessages!: HTMLElement; + private noteContextChatForm!: HTMLFormElement; + private noteContextChatInput!: HTMLTextAreaElement; + private noteContextChatSendButton!: HTMLButtonElement; + private chatContainer!: HTMLElement; + private loadingIndicator!: HTMLElement; + private sourcesList!: HTMLElement; + private sourcesContainer!: HTMLElement; + private sourcesCount!: HTMLElement; + private useAdvancedContextCheckbox!: HTMLInputElement; + private showThinkingCheckbox!: HTMLInputElement; + private validationWarning!: HTMLElement; + private chatNoteId: string | null = null; + private noteId: string | null = null; // The actual noteId for the Chat Note + private currentNoteId: string | null = null; + private _messageHandlerId: number | null = null; + private _messageHandler: any = null; + + // Callbacks for data persistence + private onSaveData: ((data: any) => Promise) | null = null; + private onGetData: (() => Promise) | null = null; + private messages: MessageData[] = []; + private sources: Array<{noteId: string; title: string; similarity?: number; content?: string}> = []; + private metadata: { + model?: string; + provider?: string; + temperature?: number; + maxTokens?: number; + toolExecutions?: Array<{ + id: string; + name: string; + arguments: any; + result: any; + error?: string; + timestamp: string; + }>; + lastUpdated?: string; + usage?: { + promptTokens?: number; + completionTokens?: number; + totalTokens?: number; + }; + } = { + model: 'default', + temperature: 0.7, + toolExecutions: [] + }; + + // Public getters and setters for private properties + public getCurrentNoteId(): string | null { + return this.currentNoteId; + } + + public setCurrentNoteId(noteId: string | null): void { + this.currentNoteId = noteId; + } + + public getMessages(): MessageData[] { + return this.messages; + } + + public setMessages(messages: MessageData[]): void { + this.messages = messages; + } + + public getChatNoteId(): string | null { + return this.chatNoteId; + } + + public setChatNoteId(chatNoteId: string | null): void { + this.chatNoteId = chatNoteId; + } + + public getNoteContextChatMessages(): HTMLElement { + return this.noteContextChatMessages; + } + + public clearNoteContextChatMessages(): void { + this.noteContextChatMessages.innerHTML = ''; + } + + doRender() { + this.$widget = $(TPL); + + const element = this.$widget[0]; + this.noteContextChatMessages = element.querySelector('.note-context-chat-messages') as HTMLElement; + this.noteContextChatForm = element.querySelector('.note-context-chat-form') as HTMLFormElement; + this.noteContextChatInput = element.querySelector('.note-context-chat-input') as HTMLTextAreaElement; + this.noteContextChatSendButton = element.querySelector('.note-context-chat-send-button') as HTMLButtonElement; + this.chatContainer = element.querySelector('.note-context-chat-container') as HTMLElement; + this.loadingIndicator = element.querySelector('.loading-indicator') as HTMLElement; + this.sourcesList = element.querySelector('.sources-list') as HTMLElement; + this.sourcesContainer = element.querySelector('.sources-container') as HTMLElement; + this.sourcesCount = element.querySelector('.sources-count') as HTMLElement; + this.useAdvancedContextCheckbox = element.querySelector('.use-advanced-context-checkbox') as HTMLInputElement; + this.showThinkingCheckbox = element.querySelector('.show-thinking-checkbox') as HTMLInputElement; + this.validationWarning = element.querySelector('.provider-validation-warning') as HTMLElement; + + // Set up event delegation for the settings link + this.validationWarning.addEventListener('click', (e) => { + const target = e.target as HTMLElement; + if (target.classList.contains('settings-link') || target.closest('.settings-link')) { + console.log('Settings link clicked, navigating to AI settings URL'); + window.location.href = '#root/_hidden/_options/_optionsAi'; + } + }); + + this.initializeEventListeners(); + + return this.$widget; + } + + cleanup() { + console.log(`LlmChatPanel cleanup called, removing any active WebSocket subscriptions`); + this._messageHandler = null; + this._messageHandlerId = null; + } + + /** + * Set the callbacks for data persistence + */ + setDataCallbacks( + saveDataCallback: (data: any) => Promise, + getDataCallback: () => Promise + ) { + this.onSaveData = saveDataCallback; + this.onGetData = getDataCallback; + } + + /** + * Save current chat data to the note attribute + */ + async saveCurrentData() { + if (!this.onSaveData) { + return; + } + + try { + // Extract current tool execution steps if any exist + const toolSteps = extractInChatToolSteps(this.noteContextChatMessages); + + // Get tool executions from both UI and any cached executions in metadata + let toolExecutions: Array<{ + id: string; + name: string; + arguments: any; + result: any; + error?: string; + timestamp: string; + }> = []; + + // First include any tool executions already in metadata (from streaming events) + if (this.metadata?.toolExecutions && Array.isArray(this.metadata.toolExecutions)) { + toolExecutions = [...this.metadata.toolExecutions]; + console.log(`Including ${toolExecutions.length} tool executions from metadata`); + } + + // Also extract any visible tool steps from the UI + const extractedExecutions = toolSteps.map(step => { + // Parse tool execution information + if (step.type === 'tool-execution') { + try { + const content = JSON.parse(step.content); + return { + id: content.toolCallId || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`, + name: content.tool || 'unknown', + arguments: content.args || {}, + result: content.result || {}, + error: content.error, + timestamp: new Date().toISOString() + }; + } catch (e) { + // If we can't parse it, create a basic record + return { + id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`, + name: 'unknown', + arguments: {}, + result: step.content, + timestamp: new Date().toISOString() + }; + } + } else if (step.type === 'result' && step.name) { + // Handle result steps with a name + return { + id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`, + name: step.name, + arguments: {}, + result: step.content, + timestamp: new Date().toISOString() + }; + } + return { + id: `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`, + name: 'unknown', + arguments: {}, + result: 'Unrecognized tool step', + timestamp: new Date().toISOString() + }; + }); + + // Merge the tool executions, keeping only unique IDs + const existingIds = new Set(toolExecutions.map((t: {id: string}) => t.id)); + for (const exec of extractedExecutions) { + if (!existingIds.has(exec.id)) { + toolExecutions.push(exec); + existingIds.add(exec.id); + } + } + + const dataToSave: ChatData = { + messages: this.messages, + chatNoteId: this.chatNoteId, + noteId: this.noteId, + toolSteps: toolSteps, + // Add sources if we have them + sources: this.sources || [], + // Add metadata + metadata: { + model: this.metadata?.model || 'default', + provider: this.metadata?.provider || undefined, + temperature: this.metadata?.temperature || 0.7, + lastUpdated: new Date().toISOString(), + // Add tool executions + toolExecutions: toolExecutions + } + }; + + console.log(`Saving chat data with chatNoteId: ${this.chatNoteId}, noteId: ${this.noteId}, ${toolSteps.length} tool steps, ${this.sources?.length || 0} sources, ${toolExecutions.length} tool executions`); + + // Save the data to the note attribute via the callback + // This is the ONLY place we should save data, letting the container widget handle persistence + await this.onSaveData(dataToSave); + } catch (error) { + console.error('Error saving chat data:', error); + } + } + + /** + * Load saved chat data from the note attribute + */ + async loadSavedData(): Promise { + if (!this.onGetData) { + return false; + } + + try { + const savedData = await this.onGetData() as ChatData; + + if (savedData?.messages?.length > 0) { + // Load messages + this.messages = savedData.messages; + + // Clear and rebuild the chat UI + this.noteContextChatMessages.innerHTML = ''; + + this.messages.forEach(message => { + const role = message.role as 'user' | 'assistant'; + this.addMessageToChat(role, message.content); + }); + + // Restore tool execution steps if they exist + if (savedData.toolSteps && Array.isArray(savedData.toolSteps) && savedData.toolSteps.length > 0) { + console.log(`Restoring ${savedData.toolSteps.length} saved tool steps`); + this.restoreInChatToolSteps(savedData.toolSteps); + } + + // Load sources if available + if (savedData.sources && Array.isArray(savedData.sources)) { + this.sources = savedData.sources; + console.log(`Loaded ${this.sources.length} sources from saved data`); + + // Show sources in the UI if they exist + if (this.sources.length > 0) { + this.showSources(this.sources); + } + } + + // Load metadata if available + if (savedData.metadata) { + this.metadata = { + ...this.metadata, + ...savedData.metadata + }; + + // Ensure tool executions are loaded + if (savedData.metadata.toolExecutions && Array.isArray(savedData.metadata.toolExecutions)) { + console.log(`Loaded ${savedData.metadata.toolExecutions.length} tool executions from saved data`); + + if (!this.metadata.toolExecutions) { + this.metadata.toolExecutions = []; + } + + // Make sure we don't lose any tool executions + this.metadata.toolExecutions = savedData.metadata.toolExecutions; + } + + console.log(`Loaded metadata from saved data:`, this.metadata); + } + + // Load Chat Note ID if available + if (savedData.noteId) { + console.log(`Using noteId as Chat Note ID: ${savedData.noteId}`); + this.chatNoteId = savedData.noteId; + this.noteId = savedData.noteId; + } else { + console.log(`No noteId found in saved data, cannot load chat session`); + return false; + } + + return true; + } + } catch (error) { + console.error('Failed to load saved chat data', error); + } + + return false; + } + + /** + * Restore tool execution steps in the chat UI + */ + private restoreInChatToolSteps(steps: ToolExecutionStep[]) { + if (!steps || steps.length === 0) return; + + // Create the tool execution element + const toolExecutionElement = document.createElement('div'); + toolExecutionElement.className = 'chat-tool-execution mb-3'; + + // Insert before the assistant message if it exists + const assistantMessage = this.noteContextChatMessages.querySelector('.assistant-message:last-child'); + if (assistantMessage) { + this.noteContextChatMessages.insertBefore(toolExecutionElement, assistantMessage); + } else { + // Otherwise append to the end + this.noteContextChatMessages.appendChild(toolExecutionElement); + } + + // Fill with tool execution content + toolExecutionElement.innerHTML = ` +
+ + Tool Execution + +
+
+
+ ${this.renderToolStepsHtml(steps)} +
+
+ `; + + // Add event listener for the toggle button + const toggleButton = toolExecutionElement.querySelector('.tool-execution-toggle'); + if (toggleButton) { + toggleButton.addEventListener('click', () => { + const stepsContainer = toolExecutionElement.querySelector('.tool-execution-container'); + const icon = toggleButton.querySelector('i'); + + if (stepsContainer) { + if (stepsContainer.classList.contains('collapsed')) { + // Expand + stepsContainer.classList.remove('collapsed'); + (stepsContainer as HTMLElement).style.display = 'block'; + if (icon) { + icon.className = 'bx bx-chevron-down'; + } + } else { + // Collapse + stepsContainer.classList.add('collapsed'); + (stepsContainer as HTMLElement).style.display = 'none'; + if (icon) { + icon.className = 'bx bx-chevron-right'; + } + } + } + }); + } + + // Add click handler for the header to toggle expansion as well + const header = toolExecutionElement.querySelector('.tool-execution-header'); + if (header) { + header.addEventListener('click', (e) => { + // Only toggle if the click isn't on the toggle button itself + const target = e.target as HTMLElement; + if (target && !target.closest('.tool-execution-toggle')) { + const toggleButton = toolExecutionElement.querySelector('.tool-execution-toggle'); + toggleButton?.dispatchEvent(new Event('click')); + } + }); + (header as HTMLElement).style.cursor = 'pointer'; + } + } + + /** + * Render HTML for tool execution steps + */ + private renderToolStepsHtml(steps: ToolExecutionStep[]): string { + if (!steps || steps.length === 0) return ''; + + return steps.map(step => { + let icon = 'bx-info-circle'; + let className = 'info'; + let content = ''; + + if (step.type === 'executing') { + icon = 'bx-code-block'; + className = 'executing'; + content = `
${step.content || 'Executing tools...'}
`; + } else if (step.type === 'result') { + icon = 'bx-terminal'; + className = 'result'; + content = ` +
Tool: ${step.name || 'unknown'}
+
${step.content || ''}
+ `; + } else if (step.type === 'error') { + icon = 'bx-error-circle'; + className = 'error'; + content = ` +
Tool: ${step.name || 'unknown'}
+
${step.content || 'Error occurred'}
+ `; + } else if (step.type === 'generating') { + icon = 'bx-message-dots'; + className = 'generating'; + content = `
${step.content || 'Generating response...'}
`; + } + + return ` +
+
+ + ${content} +
+
+ `; + }).join(''); + } + + async refresh() { + if (!this.isVisible()) { + return; + } + + // Check for any provider validation issues when refreshing + await validateEmbeddingProviders(this.validationWarning); + + // Get current note context if needed + const currentActiveNoteId = appContext.tabManager.getActiveContext()?.note?.noteId || null; + + // If we're switching to a different note, we need to reset + if (this.currentNoteId !== currentActiveNoteId) { + console.log(`Note ID changed from ${this.currentNoteId} to ${currentActiveNoteId}, resetting chat panel`); + + // Reset the UI and data + this.noteContextChatMessages.innerHTML = ''; + this.messages = []; + this.chatNoteId = null; + this.noteId = null; // Also reset the chat note ID + this.hideSources(); // Hide any sources from previous note + + // Update our current noteId + this.currentNoteId = currentActiveNoteId; + } + + // Always try to load saved data for the current note + const hasSavedData = await this.loadSavedData(); + + // Only create a new session if we don't have a session or saved data + if (!this.chatNoteId || !this.noteId || !hasSavedData) { + // Create a new chat session + await this.createChatSession(); + } + } + + /** + * Create a new chat session + */ + private async createChatSession() { + try { + // Create a new chat session, passing the current note ID if it exists + const { chatNoteId, noteId } = await createChatSession( + this.currentNoteId ? this.currentNoteId : undefined + ); + + if (chatNoteId) { + // If we got back an ID from the API, use it + this.chatNoteId = chatNoteId; + + // For new sessions, the noteId should equal the chatNoteId + // This ensures we're using the note ID consistently + this.noteId = noteId || chatNoteId; + + console.log(`Created new chat session with noteId: ${this.noteId}`); + } else { + throw new Error("Failed to create chat session - no ID returned"); + } + + // Save the note ID as the session identifier + await this.saveCurrentData(); + } catch (error) { + console.error('Error creating chat session:', error); + toastService.showError('Failed to create chat session'); + } + } + + /** + * Handle sending a user message to the LLM service + */ + private async sendMessage(content: string) { + if (!content.trim()) return; + + // Add the user message to the UI and data model + this.addMessageToChat('user', content); + this.messages.push({ + role: 'user', + content: content + }); + + // Save the data immediately after a user message + await this.saveCurrentData(); + + // Clear input and show loading state + this.noteContextChatInput.value = ''; + showLoadingIndicator(this.loadingIndicator); + this.hideSources(); + + try { + const useAdvancedContext = this.useAdvancedContextCheckbox.checked; + const showThinking = this.showThinkingCheckbox.checked; + + // Add logging to verify parameters + console.log(`Sending message with: useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, noteId=${this.currentNoteId}, sessionId=${this.chatNoteId}`); + + // Create the message parameters + const messageParams = { + content, + useAdvancedContext, + showThinking + }; + + // Try websocket streaming (preferred method) + try { + await this.setupStreamingResponse(messageParams); + } catch (streamingError) { + console.warn("WebSocket streaming failed, falling back to direct response:", streamingError); + + // If streaming fails, fall back to direct response + const handled = await this.handleDirectResponse(messageParams); + if (!handled) { + // If neither method works, show an error + throw new Error("Failed to get response from server"); + } + } + + // Note: We don't need to save here since the streaming completion and direct response methods + // both call saveCurrentData() when they're done + } catch (error) { + console.error('Error processing user message:', error); + toastService.showError('Failed to process message'); + + // Add a generic error message to the UI + this.addMessageToChat('assistant', 'Sorry, I encountered an error processing your message. Please try again.'); + this.messages.push({ + role: 'assistant', + content: 'Sorry, I encountered an error processing your message. Please try again.' + }); + + // Save the data even after error + await this.saveCurrentData(); + } + } + + /** + * Process a new user message - add to UI and save + */ + private async processUserMessage(content: string) { + // Check for validation issues first + await validateEmbeddingProviders(this.validationWarning); + + // Make sure we have a valid session + if (!this.chatNoteId) { + // If no session ID, create a new session + await this.createChatSession(); + + if (!this.chatNoteId) { + // If still no session ID, show error and return + console.error("Failed to create chat session"); + toastService.showError("Failed to create chat session"); + return; + } + } + + // Add user message to messages array if not already added + if (!this.messages.some(msg => msg.role === 'user' && msg.content === content)) { + this.messages.push({ + role: 'user', + content: content + }); + } + + // Clear input and show loading state + this.noteContextChatInput.value = ''; + showLoadingIndicator(this.loadingIndicator); + this.hideSources(); + + try { + const useAdvancedContext = this.useAdvancedContextCheckbox.checked; + const showThinking = this.showThinkingCheckbox.checked; + + // Save current state to the Chat Note before getting a response + await this.saveCurrentData(); + + // Add logging to verify parameters + console.log(`Sending message with: useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, noteId=${this.currentNoteId}, sessionId=${this.chatNoteId}`); + + // Create the message parameters + const messageParams = { + content, + useAdvancedContext, + showThinking + }; + + // Try websocket streaming (preferred method) + try { + await this.setupStreamingResponse(messageParams); + } catch (streamingError) { + console.warn("WebSocket streaming failed, falling back to direct response:", streamingError); + + // If streaming fails, fall back to direct response + const handled = await this.handleDirectResponse(messageParams); + if (!handled) { + // If neither method works, show an error + throw new Error("Failed to get response from server"); + } + } + + // Save final state after getting the response + await this.saveCurrentData(); + } catch (error) { + this.handleError(error as Error); + // Make sure we save the current state even on error + await this.saveCurrentData(); + } + } + + /** + * Try to get a direct response from the server + */ + private async handleDirectResponse(messageParams: any): Promise { + try { + if (!this.chatNoteId) return false; + + console.log(`Getting direct response using sessionId: ${this.chatNoteId} (noteId: ${this.noteId})`); + + // Get a direct response from the server + const postResponse = await getDirectResponse(this.chatNoteId, messageParams); + + // If the POST request returned content directly, display it + if (postResponse && postResponse.content) { + // Store metadata from the response + if (postResponse.metadata) { + console.log("Received metadata from response:", postResponse.metadata); + this.metadata = { + ...this.metadata, + ...postResponse.metadata + }; + } + + // Store sources from the response + if (postResponse.sources && postResponse.sources.length > 0) { + console.log(`Received ${postResponse.sources.length} sources from response`); + this.sources = postResponse.sources; + this.showSources(postResponse.sources); + } + + // Process the assistant response + this.processAssistantResponse(postResponse.content, postResponse); + + hideLoadingIndicator(this.loadingIndicator); + return true; + } + + return false; + } catch (error) { + console.error("Error with direct response:", error); + return false; + } + } + + /** + * Process an assistant response - add to UI and save + */ + private async processAssistantResponse(content: string, fullResponse?: any) { + // Add the response to the chat UI + this.addMessageToChat('assistant', content); + + // Add to our local message array too + this.messages.push({ + role: 'assistant', + content, + timestamp: new Date() + }); + + // If we received tool execution information, add it to metadata + if (fullResponse?.metadata?.toolExecutions) { + console.log(`Storing ${fullResponse.metadata.toolExecutions.length} tool executions from response`); + // Make sure our metadata has toolExecutions + if (!this.metadata.toolExecutions) { + this.metadata.toolExecutions = []; + } + + // Add new tool executions + this.metadata.toolExecutions = [ + ...this.metadata.toolExecutions, + ...fullResponse.metadata.toolExecutions + ]; + } + + // Save to note + this.saveCurrentData().catch(err => { + console.error("Failed to save assistant response to note:", err); + }); + } + + /** + * Set up streaming response via WebSocket + */ + private async setupStreamingResponse(messageParams: any): Promise { + if (!this.chatNoteId) { + throw new Error("No session ID available"); + } + + console.log(`Setting up streaming response using sessionId: ${this.chatNoteId} (noteId: ${this.noteId})`); + + // Store tool executions captured during streaming + const toolExecutionsCache: Array<{ + id: string; + name: string; + arguments: any; + result: any; + error?: string; + timestamp: string; + }> = []; + + return setupStreamingResponse( + this.chatNoteId, + messageParams, + // Content update handler + (content: string, isDone: boolean = false) => { + this.updateStreamingUI(content, isDone); + + // Update session data with additional metadata when streaming is complete + if (isDone) { + // Update our metadata with info from the server + server.get<{ + metadata?: { + model?: string; + provider?: string; + temperature?: number; + maxTokens?: number; + toolExecutions?: Array<{ + id: string; + name: string; + arguments: any; + result: any; + error?: string; + timestamp: string; + }>; + lastUpdated?: string; + usage?: { + promptTokens?: number; + completionTokens?: number; + totalTokens?: number; + }; + }; + sources?: Array<{ + noteId: string; + title: string; + similarity?: number; + content?: string; + }>; + }>(`llm/chat/${this.chatNoteId}`) + .then((sessionData) => { + console.log("Got updated session data:", sessionData); + + // Store metadata + if (sessionData.metadata) { + this.metadata = { + ...this.metadata, + ...sessionData.metadata + }; + } + + // Store sources + if (sessionData.sources && sessionData.sources.length > 0) { + this.sources = sessionData.sources; + this.showSources(sessionData.sources); + } + + // Make sure we include the cached tool executions + if (toolExecutionsCache.length > 0) { + console.log(`Including ${toolExecutionsCache.length} cached tool executions in metadata`); + if (!this.metadata.toolExecutions) { + this.metadata.toolExecutions = []; + } + + // Add any tool executions from our cache that aren't already in metadata + const existingIds = new Set((this.metadata.toolExecutions || []).map((t: {id: string}) => t.id)); + for (const toolExec of toolExecutionsCache) { + if (!existingIds.has(toolExec.id)) { + this.metadata.toolExecutions.push(toolExec); + existingIds.add(toolExec.id); + } + } + } + + // Save the updated data to the note + this.saveCurrentData() + .catch(err => console.error("Failed to save data after streaming completed:", err)); + }) + .catch(err => console.error("Error fetching session data after streaming:", err)); + } + }, + // Thinking update handler + (thinking: string) => { + this.showThinkingState(thinking); + }, + // Tool execution handler + (toolData: any) => { + this.showToolExecutionInfo(toolData); + + // Cache tools we see during streaming to include them in the final saved data + if (toolData && toolData.action === 'result' && toolData.tool) { + // Create a tool execution record + const toolExec = { + id: toolData.toolCallId || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`, + name: toolData.tool, + arguments: toolData.args || {}, + result: toolData.result || {}, + error: toolData.error, + timestamp: new Date().toISOString() + }; + + // Add to both our local cache for immediate saving and to metadata for later saving + toolExecutionsCache.push(toolExec); + + // Initialize toolExecutions array if it doesn't exist + if (!this.metadata.toolExecutions) { + this.metadata.toolExecutions = []; + } + + // Add tool execution to our metadata + this.metadata.toolExecutions.push(toolExec); + + console.log(`Cached tool execution for ${toolData.tool} to be saved later`); + + // Save immediately after receiving a tool execution + // This ensures we don't lose tool execution data if streaming fails + this.saveCurrentData().catch(err => { + console.error("Failed to save tool execution data:", err); + }); + } + }, + // Complete handler + () => { + hideLoadingIndicator(this.loadingIndicator); + }, + // Error handler + (error: Error) => { + this.handleError(error); + } + ); + } + + /** + * Update the UI with streaming content + */ + private updateStreamingUI(assistantResponse: string, isDone: boolean = false) { + // Get the existing assistant message or create a new one + let assistantMessageEl = this.noteContextChatMessages.querySelector('.assistant-message:last-child'); + + if (!assistantMessageEl) { + // If no assistant message yet, create one + assistantMessageEl = document.createElement('div'); + assistantMessageEl.className = 'assistant-message message mb-3'; + this.noteContextChatMessages.appendChild(assistantMessageEl); + + // Add assistant profile icon + const profileIcon = document.createElement('div'); + profileIcon.className = 'profile-icon'; + profileIcon.innerHTML = ''; + assistantMessageEl.appendChild(profileIcon); + + // Add message content container + const messageContent = document.createElement('div'); + messageContent.className = 'message-content'; + assistantMessageEl.appendChild(messageContent); + } + + // Update the content + const messageContent = assistantMessageEl.querySelector('.message-content') as HTMLElement; + messageContent.innerHTML = formatMarkdown(assistantResponse); + + // Apply syntax highlighting if this is the final update + if (isDone) { + applySyntaxHighlight($(assistantMessageEl as HTMLElement)); + + // Update message in the data model for storage + // Find the last assistant message to update, or add a new one if none exists + const assistantMessages = this.messages.filter(msg => msg.role === 'assistant'); + const lastAssistantMsgIndex = assistantMessages.length > 0 ? + this.messages.lastIndexOf(assistantMessages[assistantMessages.length - 1]) : -1; + + if (lastAssistantMsgIndex >= 0) { + // Update existing message + this.messages[lastAssistantMsgIndex].content = assistantResponse; + } else { + // Add new message + this.messages.push({ + role: 'assistant', + content: assistantResponse + }); + } + + // Hide loading indicator + hideLoadingIndicator(this.loadingIndicator); + + // Save the final state to the Chat Note + this.saveCurrentData().catch(err => { + console.error("Failed to save assistant response to note:", err); + }); + } + + // Scroll to bottom + this.chatContainer.scrollTop = this.chatContainer.scrollHeight; + } + + /** + * Handle general errors in the send message flow + */ + private handleError(error: Error) { + hideLoadingIndicator(this.loadingIndicator); + toastService.showError('Error sending message: ' + error.message); + } + + private addMessageToChat(role: 'user' | 'assistant', content: string) { + addMessageToChat(this.noteContextChatMessages, this.chatContainer, role, content); + } + + private showSources(sources: Array<{noteId: string, title: string}>) { + showSources( + this.sourcesList, + this.sourcesContainer, + this.sourcesCount, + sources, + (noteId: string) => { + // Open the note in a new tab but don't switch to it + appContext.tabManager.openTabWithNoteWithHoisting(noteId, { activate: false }); + } + ); + } + + private hideSources() { + hideSources(this.sourcesContainer); + } + + /** + * Handle tool execution updates + */ + private showToolExecutionInfo(toolExecutionData: any) { + console.log(`Showing tool execution info: ${JSON.stringify(toolExecutionData)}`); + + // Enhanced debugging for tool execution + if (!toolExecutionData) { + console.error('Tool execution data is missing or undefined'); + return; + } + + // Check for required properties + const actionType = toolExecutionData.action || ''; + const toolName = toolExecutionData.tool || 'unknown'; + console.log(`Tool execution details: action=${actionType}, tool=${toolName}, hasResult=${!!toolExecutionData.result}`); + + // Force action to 'result' if missing but result is present + if (!actionType && toolExecutionData.result) { + console.log('Setting missing action to "result" since result is present'); + toolExecutionData.action = 'result'; + } + + // Create or get the tool execution container + let toolExecutionElement = this.noteContextChatMessages.querySelector('.chat-tool-execution'); + if (!toolExecutionElement) { + toolExecutionElement = document.createElement('div'); + toolExecutionElement.className = 'chat-tool-execution mb-3'; + + // Create header with title and dropdown toggle + const header = document.createElement('div'); + header.className = 'tool-execution-header d-flex align-items-center p-2 rounded'; + header.innerHTML = ` + + Tool Execution + + `; + toolExecutionElement.appendChild(header); + + // Create container for tool steps + const stepsContainer = document.createElement('div'); + stepsContainer.className = 'tool-execution-container p-2 rounded mb-2'; + toolExecutionElement.appendChild(stepsContainer); + + // Add to chat messages + this.noteContextChatMessages.appendChild(toolExecutionElement); + + // Add click handler for toggle button + const toggleButton = toolExecutionElement.querySelector('.tool-execution-toggle'); + if (toggleButton) { + toggleButton.addEventListener('click', () => { + const stepsContainer = toolExecutionElement?.querySelector('.tool-execution-container'); + const icon = toggleButton.querySelector('i'); + + if (stepsContainer) { + if (stepsContainer.classList.contains('collapsed')) { + // Expand + stepsContainer.classList.remove('collapsed'); + (stepsContainer as HTMLElement).style.display = 'block'; + if (icon) { + icon.className = 'bx bx-chevron-down'; + } + } else { + // Collapse + stepsContainer.classList.add('collapsed'); + (stepsContainer as HTMLElement).style.display = 'none'; + if (icon) { + icon.className = 'bx bx-chevron-right'; + } + } + } + }); + } + + // Add click handler for the header to toggle expansion as well + header.addEventListener('click', (e) => { + // Only toggle if the click isn't on the toggle button itself + const target = e.target as HTMLElement; + if (target && !target.closest('.tool-execution-toggle')) { + const toggleButton = toolExecutionElement?.querySelector('.tool-execution-toggle'); + toggleButton?.dispatchEvent(new Event('click')); + } + }); + (header as HTMLElement).style.cursor = 'pointer'; + } + + // Get the steps container + const stepsContainer = toolExecutionElement.querySelector('.tool-execution-container'); + if (!stepsContainer) return; + + // Process based on action type + const action = toolExecutionData.action || ''; + + if (action === 'start' || action === 'executing') { + // Tool execution started + const step = document.createElement('div'); + step.className = 'tool-step executing p-2 mb-2 rounded'; + step.innerHTML = ` +
+ + Executing tool: ${toolExecutionData.tool || 'unknown'} +
+ ${toolExecutionData.args ? ` +
+ Args: ${JSON.stringify(toolExecutionData.args || {}, null, 2)} +
` : ''} + `; + stepsContainer.appendChild(step); + } + else if (action === 'result' || action === 'complete') { + // Tool execution completed with results + const step = document.createElement('div'); + step.className = 'tool-step result p-2 mb-2 rounded'; + + let resultDisplay = ''; + + // Special handling for note search tools which have a specific structure + if ((toolExecutionData.tool === 'search_notes' || toolExecutionData.tool === 'keyword_search_notes') && + typeof toolExecutionData.result === 'object' && + toolExecutionData.result.results) { + + const results = toolExecutionData.result.results; + + if (results.length === 0) { + resultDisplay = `
No notes found matching the search criteria.
`; + } else { + resultDisplay = ` +
+
Found ${results.length} notes:
+
    + ${results.map((note: any) => ` +
  • + ${note.title} + ${note.similarity < 1 ? `(similarity: ${(note.similarity * 100).toFixed(0)}%)` : ''} +
  • + `).join('')} +
+
+ `; + } + } + // Format the result based on type for other tools + else if (typeof toolExecutionData.result === 'object') { + // For objects, format as pretty JSON + resultDisplay = `
${JSON.stringify(toolExecutionData.result, null, 2)}
`; + } else { + // For simple values, display as text + resultDisplay = `
${String(toolExecutionData.result)}
`; + } + + step.innerHTML = ` +
+ + Tool: ${toolExecutionData.tool || 'unknown'} +
+
+ ${resultDisplay} +
+ `; + stepsContainer.appendChild(step); + + // Add event listeners for note links if this is a note search result + if (toolExecutionData.tool === 'search_notes' || toolExecutionData.tool === 'keyword_search_notes') { + const noteLinks = step.querySelectorAll('.note-link'); + noteLinks.forEach(link => { + link.addEventListener('click', (e) => { + e.preventDefault(); + const noteId = (e.currentTarget as HTMLElement).getAttribute('data-note-id'); + if (noteId) { + // Open the note in a new tab but don't switch to it + appContext.tabManager.openTabWithNoteWithHoisting(noteId, { activate: false }); + } + }); + }); + } + } + else if (action === 'error') { + // Tool execution failed + const step = document.createElement('div'); + step.className = 'tool-step error p-2 mb-2 rounded'; + step.innerHTML = ` +
+ + Error in tool: ${toolExecutionData.tool || 'unknown'} +
+
+ ${toolExecutionData.error || 'Unknown error'} +
+ `; + stepsContainer.appendChild(step); + } + else if (action === 'generating') { + // Generating final response with tool results + const step = document.createElement('div'); + step.className = 'tool-step generating p-2 mb-2 rounded'; + step.innerHTML = ` +
+ + Generating response with tool results... +
+ `; + stepsContainer.appendChild(step); + } + + // Make sure the loading indicator is shown during tool execution + this.loadingIndicator.style.display = 'flex'; + + // Scroll the chat container to show the tool execution + this.chatContainer.scrollTop = this.chatContainer.scrollHeight; + } + + /** + * Show thinking state in the UI + */ + private showThinkingState(thinkingData: string) { + // Thinking state is now updated via the in-chat UI in updateStreamingUI + // This method is now just a hook for the WebSocket handlers + + // Show the loading indicator + this.loadingIndicator.style.display = 'flex'; + } + + private initializeEventListeners() { + this.noteContextChatForm.addEventListener('submit', (e) => { + e.preventDefault(); + const content = this.noteContextChatInput.value; + this.sendMessage(content); + }); + + // Add auto-resize functionality to the textarea + this.noteContextChatInput.addEventListener('input', () => { + this.noteContextChatInput.style.height = 'auto'; + this.noteContextChatInput.style.height = `${this.noteContextChatInput.scrollHeight}px`; + }); + + // Handle Enter key (send on Enter, new line on Shift+Enter) + this.noteContextChatInput.addEventListener('keydown', (e) => { + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault(); + this.noteContextChatForm.dispatchEvent(new Event('submit')); + } + }); + } +} diff --git a/src/public/app/widgets/llm_chat/message_processor.ts b/src/public/app/widgets/llm_chat/message_processor.ts new file mode 100644 index 000000000..139a3d611 --- /dev/null +++ b/src/public/app/widgets/llm_chat/message_processor.ts @@ -0,0 +1,59 @@ +/** + * Message processing functions for LLM Chat + */ +import type { ToolExecutionStep } from "./types.js"; + +/** + * Extract tool execution steps from the DOM that are within the chat flow + */ +export function extractInChatToolSteps(chatMessagesElement: HTMLElement): ToolExecutionStep[] { + const steps: ToolExecutionStep[] = []; + + // Look for tool execution in the chat flow + const toolExecutionElement = chatMessagesElement.querySelector('.chat-tool-execution'); + + if (toolExecutionElement) { + // Find all tool step elements + const stepElements = toolExecutionElement.querySelectorAll('.tool-step'); + + stepElements.forEach(stepEl => { + const stepHtml = stepEl.innerHTML; + + // Determine the step type based on icons or classes present + let type = 'info'; + let name: string | undefined; + let content = ''; + + if (stepHtml.includes('bx-code-block')) { + type = 'executing'; + content = 'Executing tools...'; + } else if (stepHtml.includes('bx-terminal')) { + type = 'result'; + // Extract the tool name from the step + const nameMatch = stepHtml.match(/]*>Tool: ([^<]+)<\/span>/); + name = nameMatch ? nameMatch[1] : 'unknown'; + + // Extract the content from the div with class mt-1 ps-3 + const contentEl = stepEl.querySelector('.mt-1.ps-3'); + content = contentEl ? contentEl.innerHTML : ''; + } else if (stepHtml.includes('bx-error-circle')) { + type = 'error'; + const nameMatch = stepHtml.match(/]*>Tool: ([^<]+)<\/span>/); + name = nameMatch ? nameMatch[1] : 'unknown'; + + const contentEl = stepEl.querySelector('.mt-1.ps-3.text-danger'); + content = contentEl ? contentEl.innerHTML : ''; + } else if (stepHtml.includes('bx-message-dots')) { + type = 'generating'; + content = 'Generating response with tool results...'; + } else if (stepHtml.includes('bx-loader-alt')) { + // Skip the initializing spinner + return; + } + + steps.push({ type, name, content }); + }); + } + + return steps; +} diff --git a/src/public/app/widgets/llm_chat/types.ts b/src/public/app/widgets/llm_chat/types.ts new file mode 100644 index 000000000..dc19f38d3 --- /dev/null +++ b/src/public/app/widgets/llm_chat/types.ts @@ -0,0 +1,55 @@ +/** + * Types for LLM Chat Panel + */ + +export interface ChatResponse { + id: string; + messages: Array<{role: string; content: string}>; + sources?: Array<{noteId: string; title: string}>; +} + +export interface SessionResponse { + id: string; + title: string; + noteId?: string; +} + +export interface ToolExecutionStep { + type: string; + name?: string; + content: string; +} + +export interface MessageData { + role: string; + content: string; + timestamp?: Date; +} + +export interface ChatData { + messages: MessageData[]; + chatNoteId: string | null; + noteId?: string | null; + toolSteps: ToolExecutionStep[]; + sources?: Array<{ + noteId: string; + title: string; + similarity?: number; + content?: string; + }>; + metadata?: { + model?: string; + provider?: string; + temperature?: number; + maxTokens?: number; + lastUpdated?: string; + toolExecutions?: Array<{ + id: string; + name: string; + arguments: any; + result: any; + error?: string; + timestamp: string; + }>; + }; +} diff --git a/src/public/app/widgets/llm_chat/ui.ts b/src/public/app/widgets/llm_chat/ui.ts new file mode 100644 index 000000000..b4c9c9208 --- /dev/null +++ b/src/public/app/widgets/llm_chat/ui.ts @@ -0,0 +1,251 @@ +/** + * UI-related functions for LLM Chat + */ +import { t } from "../../services/i18n.js"; +import type { ToolExecutionStep } from "./types.js"; +import { formatMarkdown, applyHighlighting } from "./utils.js"; + +// Template for the chat widget +export const TPL = ` +
+ + + +
+
+ +
+ + + +
+
+ + +
+
+ Options: +
+ + +
+
+ + +
+
+
+
+`; + +/** + * Add a message to the chat UI + */ +export function addMessageToChat(messagesContainer: HTMLElement, chatContainer: HTMLElement, role: 'user' | 'assistant', content: string) { + const messageElement = document.createElement('div'); + messageElement.className = `chat-message ${role}-message mb-3 d-flex`; + + const avatarElement = document.createElement('div'); + avatarElement.className = 'message-avatar d-flex align-items-center justify-content-center me-2'; + + if (role === 'user') { + avatarElement.innerHTML = ''; + avatarElement.classList.add('user-avatar'); + } else { + avatarElement.innerHTML = ''; + avatarElement.classList.add('assistant-avatar'); + } + + const contentElement = document.createElement('div'); + contentElement.className = 'message-content p-3 rounded flex-grow-1'; + + if (role === 'user') { + contentElement.classList.add('user-content', 'bg-light'); + } else { + contentElement.classList.add('assistant-content'); + } + + // Format the content with markdown + contentElement.innerHTML = formatMarkdown(content); + + messageElement.appendChild(avatarElement); + messageElement.appendChild(contentElement); + + messagesContainer.appendChild(messageElement); + + // Apply syntax highlighting to any code blocks in the message + applyHighlighting(contentElement); + + // Scroll to bottom + chatContainer.scrollTop = chatContainer.scrollHeight; +} + +/** + * Show sources in the UI + */ +export function showSources( + sourcesList: HTMLElement, + sourcesContainer: HTMLElement, + sourcesCount: HTMLElement, + sources: Array<{noteId: string, title: string}>, + onSourceClick: (noteId: string) => void +) { + sourcesList.innerHTML = ''; + sourcesCount.textContent = sources.length.toString(); + + sources.forEach(source => { + const sourceElement = document.createElement('div'); + sourceElement.className = 'source-item p-2 mb-1 border rounded d-flex align-items-center'; + + // Create the direct link to the note + sourceElement.innerHTML = ` + `; + + // Add click handler + sourceElement.querySelector('.source-link')?.addEventListener('click', (e) => { + e.preventDefault(); + e.stopPropagation(); + onSourceClick(source.noteId); + return false; + }); + + sourcesList.appendChild(sourceElement); + }); + + sourcesContainer.style.display = 'block'; +} + +/** + * Hide sources in the UI + */ +export function hideSources(sourcesContainer: HTMLElement) { + sourcesContainer.style.display = 'none'; +} + +/** + * Show loading indicator + */ +export function showLoadingIndicator(loadingIndicator: HTMLElement) { + const logId = `ui-${Date.now()}`; + console.log(`[${logId}] Showing loading indicator`); + + try { + loadingIndicator.style.display = 'flex'; + const forceUpdate = loadingIndicator.offsetHeight; + console.log(`[${logId}] Loading indicator initialized`); + } catch (err) { + console.error(`[${logId}] Error showing loading indicator:`, err); + } +} + +/** + * Hide loading indicator + */ +export function hideLoadingIndicator(loadingIndicator: HTMLElement) { + const logId = `ui-${Date.now()}`; + console.log(`[${logId}] Hiding loading indicator`); + + try { + loadingIndicator.style.display = 'none'; + const forceUpdate = loadingIndicator.offsetHeight; + console.log(`[${logId}] Loading indicator hidden`); + } catch (err) { + console.error(`[${logId}] Error hiding loading indicator:`, err); + } +} + +/** + * Render tool steps as HTML for display in chat + */ +export function renderToolStepsHtml(steps: ToolExecutionStep[]): string { + if (!steps || steps.length === 0) return ''; + + let html = ''; + + steps.forEach(step => { + let icon, labelClass, content; + + switch (step.type) { + case 'executing': + icon = 'bx-code-block text-primary'; + labelClass = ''; + content = `
+ + ${step.content} +
`; + break; + + case 'result': + icon = 'bx-terminal text-success'; + labelClass = 'fw-bold'; + content = `
+ + Tool: ${step.name || 'unknown'} +
+
${step.content}
`; + break; + + case 'error': + icon = 'bx-error-circle text-danger'; + labelClass = 'fw-bold text-danger'; + content = `
+ + Tool: ${step.name || 'unknown'} +
+
${step.content}
`; + break; + + case 'generating': + icon = 'bx-message-dots text-info'; + labelClass = ''; + content = `
+ + ${step.content} +
`; + break; + + default: + icon = 'bx-info-circle text-muted'; + labelClass = ''; + content = `
+ + ${step.content} +
`; + } + + html += `
${content}
`; + }); + + return html; +} diff --git a/src/public/app/widgets/llm_chat/utils.ts b/src/public/app/widgets/llm_chat/utils.ts new file mode 100644 index 000000000..f7a880d69 --- /dev/null +++ b/src/public/app/widgets/llm_chat/utils.ts @@ -0,0 +1,93 @@ +/** + * Utility functions for LLM Chat + */ +import { marked } from "marked"; +import { applySyntaxHighlight } from "../../services/syntax_highlight.js"; + +/** + * Format markdown content for display + */ +export function formatMarkdown(content: string): string { + if (!content) return ''; + + // First, extract HTML thinking visualization to protect it from replacements + const thinkingBlocks: string[] = []; + let processedContent = content.replace(/
/g, (match) => { + const placeholder = `__THINKING_BLOCK_${thinkingBlocks.length}__`; + thinkingBlocks.push(match); + return placeholder; + }); + + // Use marked library to parse the markdown + const markedContent = marked(processedContent, { + breaks: true, // Convert line breaks to
+ gfm: true, // Enable GitHub Flavored Markdown + silent: true // Ignore errors + }); + + // Handle potential promise (though it shouldn't be with our options) + if (typeof markedContent === 'string') { + processedContent = markedContent; + } else { + console.warn('Marked returned a promise unexpectedly'); + // Use the original content as fallback + processedContent = content; + } + + // Restore thinking visualization blocks + thinkingBlocks.forEach((block, index) => { + processedContent = processedContent.replace(`__THINKING_BLOCK_${index}__`, block); + }); + + return processedContent; +} + +/** + * Simple HTML escaping for safer content display + */ +export function escapeHtml(text: string): string { + if (typeof text !== 'string') { + text = String(text || ''); + } + + return text + .replace(/&/g, '&') + .replace(//g, '>') + .replace(/"/g, '"') + .replace(/'/g, '''); +} + +/** + * Apply syntax highlighting to content + */ +export function applyHighlighting(element: HTMLElement): void { + applySyntaxHighlight($(element)); +} + +/** + * Format tool arguments for display + */ +export function formatToolArgs(args: any): string { + if (!args || typeof args !== 'object') return ''; + + return Object.entries(args) + .map(([key, value]) => { + // Format the value based on its type + let displayValue; + if (typeof value === 'string') { + displayValue = value.length > 50 ? `"${value.substring(0, 47)}..."` : `"${value}"`; + } else if (value === null) { + displayValue = 'null'; + } else if (Array.isArray(value)) { + displayValue = '[...]'; // Simplified array representation + } else if (typeof value === 'object') { + displayValue = '{...}'; // Simplified object representation + } else { + displayValue = String(value); + } + + return `${escapeHtml(key)}: ${escapeHtml(displayValue)}`; + }) + .join(', '); +} diff --git a/src/public/app/widgets/llm_chat/validation.ts b/src/public/app/widgets/llm_chat/validation.ts new file mode 100644 index 000000000..294ae8018 --- /dev/null +++ b/src/public/app/widgets/llm_chat/validation.ts @@ -0,0 +1,104 @@ +/** + * Validation functions for LLM Chat + */ +import options from "../../services/options.js"; +import { getEmbeddingStats } from "./communication.js"; + +/** + * Validate embedding providers configuration + */ +export async function validateEmbeddingProviders(validationWarning: HTMLElement): Promise { + try { + // Check if AI is enabled + const aiEnabled = options.is('aiEnabled'); + if (!aiEnabled) { + validationWarning.style.display = 'none'; + return; + } + + // Get provider precedence + const precedenceStr = options.get('aiProviderPrecedence') || 'openai,anthropic,ollama'; + let precedenceList: string[] = []; + + if (precedenceStr) { + if (precedenceStr.startsWith('[') && precedenceStr.endsWith(']')) { + precedenceList = JSON.parse(precedenceStr); + } else if (precedenceStr.includes(',')) { + precedenceList = precedenceStr.split(',').map(p => p.trim()); + } else { + precedenceList = [precedenceStr]; + } + } + + // Get enabled providers - this is a simplification since we don't have direct DB access + // We'll determine enabled status based on the presence of keys or settings + const enabledProviders: string[] = []; + + // OpenAI is enabled if API key is set + const openaiKey = options.get('openaiApiKey'); + if (openaiKey) { + enabledProviders.push('openai'); + } + + // Anthropic is enabled if API key is set + const anthropicKey = options.get('anthropicApiKey'); + if (anthropicKey) { + enabledProviders.push('anthropic'); + } + + // Ollama is enabled if base URL is set + const ollamaBaseUrl = options.get('ollamaBaseUrl'); + if (ollamaBaseUrl) { + enabledProviders.push('ollama'); + } + + // Local is always available + enabledProviders.push('local'); + + // Perform validation checks + const allPrecedenceEnabled = precedenceList.every((p: string) => enabledProviders.includes(p)); + + // Get embedding queue status + const embeddingStats = await getEmbeddingStats() as { + success: boolean, + stats: { + totalNotesCount: number; + embeddedNotesCount: number; + queuedNotesCount: number; + failedNotesCount: number; + lastProcessedDate: string | null; + percentComplete: number; + } + }; + const queuedNotes = embeddingStats?.stats?.queuedNotesCount || 0; + const hasEmbeddingsInQueue = queuedNotes > 0; + + // Show warning if there are issues + if (!allPrecedenceEnabled || hasEmbeddingsInQueue) { + let message = 'AI Provider Configuration Issues'; + + message += '
    '; + + if (!allPrecedenceEnabled) { + const disabledProviders = precedenceList.filter((p: string) => !enabledProviders.includes(p)); + message += `
  • The following providers in your precedence list are not enabled: ${disabledProviders.join(', ')}.
  • `; + } + + if (hasEmbeddingsInQueue) { + message += `
  • Currently processing embeddings for ${queuedNotes} notes. Some AI features may produce incomplete results until processing completes.
  • `; + } + + message += '
'; + message += ''; + + // Update HTML content + validationWarning.innerHTML = message; + validationWarning.style.display = 'block'; + } else { + validationWarning.style.display = 'none'; + } + } catch (error) { + console.error('Error validating embedding providers:', error); + validationWarning.style.display = 'none'; + } +} diff --git a/src/public/app/widgets/llm_chat_panel.ts b/src/public/app/widgets/llm_chat_panel.ts new file mode 100644 index 000000000..fd26850cc --- /dev/null +++ b/src/public/app/widgets/llm_chat_panel.ts @@ -0,0 +1,7 @@ +/** + * LLM Chat Panel Widget + * This file is preserved for backward compatibility. + * The actual implementation has been moved to the llm_chat/ folder. + */ +import LlmChatPanel from './llm_chat/index.js'; +export default LlmChatPanel; diff --git a/src/public/app/widgets/note_detail.ts b/src/public/app/widgets/note_detail.ts index 9341e89c4..238683809 100644 --- a/src/public/app/widgets/note_detail.ts +++ b/src/public/app/widgets/note_detail.ts @@ -36,6 +36,7 @@ import utils from "../services/utils.js"; import type { NoteType } from "../entities/fnote.js"; import type TypeWidget from "./type_widgets/type_widget.js"; import { MermaidTypeWidget } from "./type_widgets/mermaid.js"; +import AiChatTypeWidget from "./type_widgets/ai_chat.js"; const TPL = /*html*/`
@@ -74,6 +75,7 @@ const typeWidgetClasses = { attachmentList: AttachmentListTypeWidget, mindMap: MindMapWidget, geoMap: GeoMapTypeWidget, + aiChat: AiChatTypeWidget, // Split type editors mermaid: MermaidTypeWidget @@ -92,7 +94,8 @@ type ExtendedNoteType = | "editableCode" | "attachmentDetail" | "attachmentList" - | "protectedSession"; + | "protectedSession" + | "aiChat"; export default class NoteDetailWidget extends NoteContextAwareWidget { @@ -215,12 +218,11 @@ export default class NoteDetailWidget extends NoteContextAwareWidget { async getWidgetType(): Promise { const note = this.note; - if (!note) { return "empty"; } - let type: NoteType = note.type; + const type = note.type; let resultingType: ExtendedNoteType; const viewScope = this.noteContext?.viewScope; diff --git a/src/public/app/widgets/note_type.ts b/src/public/app/widgets/note_type.ts index 28983286c..63190f208 100644 --- a/src/public/app/widgets/note_type.ts +++ b/src/public/app/widgets/note_type.ts @@ -38,6 +38,7 @@ const NOTE_TYPES: NoteTypeMapping[] = [ // Misc note types { type: "render", mime: "", title: t("note_types.render-note"), selectable: true }, { type: "webView", mime: "", title: t("note_types.web-view"), selectable: true }, + { type: "aiChat", mime: "application/json", title: t("note_types.ai-chat"), selectable: true }, // Code notes { type: "code", mime: "text/plain", title: t("note_types.code"), selectable: true }, diff --git a/src/public/app/widgets/tab_aware_widget.js b/src/public/app/widgets/tab_aware_widget.js new file mode 100644 index 000000000..c6f8e4450 --- /dev/null +++ b/src/public/app/widgets/tab_aware_widget.js @@ -0,0 +1,53 @@ +import BasicWidget from "./basic_widget.js"; + +/** + * Base class for widgets that need to track the active tab/note + */ +export default class TabAwareWidget extends BasicWidget { + constructor() { + super(); + this.noteId = null; + this.noteType = null; + this.notePath = null; + this.isActiveTab = false; + } + + /** + * Called when the active note is switched + * + * @param {string} noteId + * @param {string|null} noteType + * @param {string|null} notePath + */ + async noteSwitched(noteId, noteType, notePath) { + this.noteId = noteId; + this.noteType = noteType; + this.notePath = notePath; + } + + /** + * Called when the widget's tab becomes active or inactive + * + * @param {boolean} active + */ + activeTabChanged(active) { + this.isActiveTab = active; + } + + /** + * Called when entities (notes, attributes, etc.) are reloaded + */ + entitiesReloaded() {} + + /** + * Check if this widget is enabled + */ + isEnabled() { + return true; + } + + /** + * Refresh widget with current data + */ + async refresh() {} +} diff --git a/src/public/app/widgets/type_widgets/ai_chat.ts b/src/public/app/widgets/type_widgets/ai_chat.ts new file mode 100644 index 000000000..e96cf5f20 --- /dev/null +++ b/src/public/app/widgets/type_widgets/ai_chat.ts @@ -0,0 +1,255 @@ +import TypeWidget from "./type_widget.js"; +import LlmChatPanel from "../llm_chat_panel.js"; +import { type EventData } from "../../components/app_context.js"; +import type FNote from "../../entities/fnote.js"; +import server from "../../services/server.js"; +import toastService from "../../services/toast.js"; + +export default class AiChatTypeWidget extends TypeWidget { + private llmChatPanel: LlmChatPanel; + private isInitialized: boolean = false; + private initPromise: Promise | null = null; + + constructor() { + super(); + this.llmChatPanel = new LlmChatPanel(); + + // Connect the data callbacks + this.llmChatPanel.setDataCallbacks( + (data) => this.saveData(data), + () => this.getData() + ); + } + + static getType() { + return "aiChat"; + } + + doRender() { + this.$widget = $('
'); + this.$widget.append(this.llmChatPanel.render()); + + return this.$widget; + } + + // Override the refreshWithNote method to ensure we get note changes + async refreshWithNote(note: FNote | null | undefined) { + console.log("refreshWithNote called for note:", note?.noteId); + + // Always force a refresh when the note changes + if (this.note?.noteId !== note?.noteId) { + console.log(`Note ID changed from ${this.note?.noteId} to ${note?.noteId}, forcing reset`); + this.isInitialized = false; + this.initPromise = null; + + // Force refresh the chat panel with the new note + if (note) { + this.llmChatPanel.setCurrentNoteId(note.noteId); + } + } + + // Continue with regular doRefresh + await this.doRefresh(note); + } + + async doRefresh(note: FNote | null | undefined) { + try { + console.log("doRefresh called for note:", note?.noteId); + + // If we're already initializing, wait for that to complete + if (this.initPromise) { + await this.initPromise; + return; + } + + // Initialize once or when note changes + if (!this.isInitialized) { + console.log("Initializing AI Chat Panel for note:", note?.noteId); + + // Initialize the note content first + if (note) { + try { + const content = await note.getContent(); + // Check if content is empty + if (!content || content === '{}') { + // Initialize with empty chat history + await this.saveData({ + messages: [], + title: note.title, + noteId: note.noteId // Store the note ID in the data + }); + console.log("Initialized empty chat history for new note"); + } else { + console.log("Note already has content, will load in LlmChatPanel.refresh()"); + } + } catch (e) { + console.error("Error initializing AI Chat note content:", e); + } + } + + // Create a promise to track initialization + this.initPromise = (async () => { + try { + // Reset the UI before refreshing + this.llmChatPanel.clearNoteContextChatMessages(); + this.llmChatPanel.setMessages([]); + + // This will load saved data via the getData callback + await this.llmChatPanel.refresh(); + this.isInitialized = true; + } catch (e) { + console.error("Error initializing LlmChatPanel:", e); + toastService.showError("Failed to initialize chat panel. Try reloading."); + } + })(); + + await this.initPromise; + this.initPromise = null; + } + } catch (e) { + console.error("Error in doRefresh:", e); + toastService.showError("Error refreshing chat. Please try again."); + } + } + + async entitiesReloadedEvent(data: EventData<"entitiesReloaded">) { + // We don't need to refresh on entities reloaded for the chat + } + + async noteSwitched() { + console.log("Note switched to:", this.noteId); + + // Force a full reset when switching notes + this.isInitialized = false; + this.initPromise = null; + + if (this.note) { + // Update the chat panel with the new note ID before refreshing + this.llmChatPanel.setCurrentNoteId(this.note.noteId); + + // Reset the chat panel UI + this.llmChatPanel.clearNoteContextChatMessages(); + this.llmChatPanel.setMessages([]); + this.llmChatPanel.setChatNoteId(null); + } + + // Call the parent method to refresh + await super.noteSwitched(); + } + + async activeContextChangedEvent(data: EventData<"activeContextChanged">) { + if (!this.isActive()) { + return; + } + + console.log("Active context changed, refreshing AI Chat Panel"); + + // Always refresh when we become active - this ensures we load the correct note data + try { + // Reset initialization flag to force a refresh + this.isInitialized = false; + + // Make sure the chat panel has the current note ID + if (this.note) { + this.llmChatPanel.setCurrentNoteId(this.note.noteId); + } + + this.initPromise = (async () => { + try { + // Reset the UI before refreshing + this.llmChatPanel.clearNoteContextChatMessages(); + this.llmChatPanel.setMessages([]); + + await this.llmChatPanel.refresh(); + this.isInitialized = true; + } catch (e) { + console.error("Error refreshing LlmChatPanel:", e); + } + })(); + + await this.initPromise; + this.initPromise = null; + } catch (e) { + console.error("Error in activeContextChangedEvent:", e); + } + } + + // Save chat data to the note + async saveData(data: any) { + if (!this.note) { + return; + } + + try { + console.log(`AiChatTypeWidget: Saving data for note ${this.note.noteId}`); + + // Format the data properly - this is the canonical format of the data + const formattedData = { + messages: data.messages || [], + chatNoteId: data.chatNoteId || this.note.noteId, + toolSteps: data.toolSteps || [], + sources: data.sources || [], + metadata: { + ...(data.metadata || {}), + lastUpdated: new Date().toISOString() + } + }; + + // Save the data to the note + await server.put(`notes/${this.note.noteId}/data`, { + content: JSON.stringify(formattedData, null, 2) + }); + } catch (e) { + console.error("Error saving AI Chat data:", e); + toastService.showError("Failed to save chat data"); + } + } + + // Get data from the note + async getData() { + if (!this.note) { + return null; + } + + try { + console.log(`AiChatTypeWidget: Getting data for note ${this.note.noteId}`); + const content = await this.note.getContent(); + + if (!content) { + console.log("Note content is empty"); + return null; + } + + // Parse the content as JSON + let parsedContent; + try { + parsedContent = JSON.parse(content as string); + console.log("Successfully parsed note content as JSON"); + } catch (e) { + console.error("Error parsing chat content as JSON:", e); + return null; + } + + // Check if this is a blob response with 'content' property that needs to be parsed again + // This happens when the content is returned from the /blob endpoint + if (parsedContent.content && typeof parsedContent.content === 'string' && + parsedContent.blobId && parsedContent.contentLength) { + try { + // The actual chat data is inside the 'content' property as a string + console.log("Detected blob response structure, parsing inner content"); + const innerContent = JSON.parse(parsedContent.content); + console.log("Successfully parsed blob inner content"); + return innerContent; + } catch (innerError) { + console.error("Error parsing inner blob content:", innerError); + return null; + } + } + + return parsedContent; + } catch (e) { + console.error("Error loading AI Chat data:", e); + return null; + } + } +} diff --git a/src/public/app/widgets/type_widgets/content_widget.ts b/src/public/app/widgets/type_widgets/content_widget.ts index 6fd3b360a..45af111e0 100644 --- a/src/public/app/widgets/type_widgets/content_widget.ts +++ b/src/public/app/widgets/type_widgets/content_widget.ts @@ -37,6 +37,7 @@ import LocalizationOptions from "./options/i18n/i18n.js"; import CodeBlockOptions from "./options/appearance/code_block.js"; import EditorOptions from "./options/text_notes/editor.js"; import ShareSettingsOptions from "./options/other/share_settings.js"; +import AiSettingsOptions from "./options/ai_settings.js"; import type FNote from "../../entities/fnote.js"; import type NoteContextAwareWidget from "../note_context_aware_widget.js"; import { t } from "i18next"; @@ -111,6 +112,7 @@ const CONTENT_WIDGETS: Record = { _optionsSync: [ SyncOptions ], + _optionsAi: [AiSettingsOptions], _optionsOther: [ SearchEngineOptions, TrayOptions, diff --git a/src/public/app/widgets/type_widgets/options/ai_settings.ts b/src/public/app/widgets/type_widgets/options/ai_settings.ts new file mode 100644 index 000000000..ca8753f2a --- /dev/null +++ b/src/public/app/widgets/type_widgets/options/ai_settings.ts @@ -0,0 +1,2 @@ +import AiSettingsWidget from './ai_settings/index.js'; +export default AiSettingsWidget; \ No newline at end of file diff --git a/src/public/app/widgets/type_widgets/options/ai_settings/ai_settings_widget.ts b/src/public/app/widgets/type_widgets/options/ai_settings/ai_settings_widget.ts new file mode 100644 index 000000000..269b958ba --- /dev/null +++ b/src/public/app/widgets/type_widgets/options/ai_settings/ai_settings_widget.ts @@ -0,0 +1,510 @@ +import OptionsWidget from "../options_widget.js"; +import { TPL } from "./template.js"; +import { t } from "../../../../services/i18n.js"; +import type { OptionDefinitions, OptionMap } from "../../../../../../services/options_interface.js"; +import server from "../../../../services/server.js"; +import toastService from "../../../../services/toast.js"; +import type { EmbeddingStats, FailedEmbeddingNotes } from "./interfaces.js"; +import { ProviderService } from "./providers.js"; + +export default class AiSettingsWidget extends OptionsWidget { + private ollamaModelsRefreshed = false; + private openaiModelsRefreshed = false; + private anthropicModelsRefreshed = false; + private statsRefreshInterval: NodeJS.Timeout | null = null; + private indexRebuildRefreshInterval: NodeJS.Timeout | null = null; + private readonly STATS_REFRESH_INTERVAL = 5000; // 5 seconds + private providerService: ProviderService | null = null; + + doRender() { + this.$widget = $(TPL); + this.providerService = new ProviderService(this.$widget); + + // Setup event handlers for options + this.setupEventHandlers(); + + this.refreshEmbeddingStats(); + this.fetchFailedEmbeddingNotes(); + + return this.$widget; + } + + /** + * Helper method to set up a change event handler for an option + * @param selector The jQuery selector for the element + * @param optionName The name of the option to update + * @param validateAfter Whether to run validation after the update + * @param isCheckbox Whether the element is a checkbox + */ + setupChangeHandler(selector: string, optionName: keyof OptionDefinitions, validateAfter: boolean = false, isCheckbox: boolean = false) { + if (!this.$widget) return; + + const $element = this.$widget.find(selector); + $element.on('change', async () => { + let value: string; + + if (isCheckbox) { + value = $element.prop('checked') ? 'true' : 'false'; + } else { + value = $element.val() as string; + } + + await this.updateOption(optionName, value); + + if (validateAfter) { + await this.displayValidationWarnings(); + } + }); + } + + /** + * Set up all event handlers for options + */ + setupEventHandlers() { + if (!this.$widget) return; + + // Core AI options + this.setupChangeHandler('.ai-enabled', 'aiEnabled', true, true); + this.setupChangeHandler('.ai-provider-precedence', 'aiProviderPrecedence', true); + this.setupChangeHandler('.ai-temperature', 'aiTemperature'); + this.setupChangeHandler('.ai-system-prompt', 'aiSystemPrompt'); + + // OpenAI options + this.setupChangeHandler('.openai-api-key', 'openaiApiKey', true); + this.setupChangeHandler('.openai-base-url', 'openaiBaseUrl', true); + this.setupChangeHandler('.openai-default-model', 'openaiDefaultModel'); + this.setupChangeHandler('.openai-embedding-model', 'openaiEmbeddingModel'); + + // Anthropic options + this.setupChangeHandler('.anthropic-api-key', 'anthropicApiKey', true); + this.setupChangeHandler('.anthropic-default-model', 'anthropicDefaultModel'); + this.setupChangeHandler('.anthropic-base-url', 'anthropicBaseUrl'); + + // Voyage options + this.setupChangeHandler('.voyage-api-key', 'voyageApiKey'); + this.setupChangeHandler('.voyage-embedding-model', 'voyageEmbeddingModel'); + + // Ollama options + this.setupChangeHandler('.ollama-base-url', 'ollamaBaseUrl'); + this.setupChangeHandler('.ollama-default-model', 'ollamaDefaultModel'); + this.setupChangeHandler('.ollama-embedding-model', 'ollamaEmbeddingModel'); + + const $refreshModels = this.$widget.find('.refresh-models'); + $refreshModels.on('click', async () => { + this.ollamaModelsRefreshed = await this.providerService?.refreshOllamaModels(true, this.ollamaModelsRefreshed) || false; + }); + + // Add tab change handler for Ollama tab + const $ollamaTab = this.$widget.find('#nav-ollama-tab'); + $ollamaTab.on('shown.bs.tab', async () => { + // Only refresh the models if we haven't done it before + this.ollamaModelsRefreshed = await this.providerService?.refreshOllamaModels(false, this.ollamaModelsRefreshed) || false; + }); + + // OpenAI models refresh button + const $refreshOpenAIModels = this.$widget.find('.refresh-openai-models'); + $refreshOpenAIModels.on('click', async () => { + this.openaiModelsRefreshed = await this.providerService?.refreshOpenAIModels(true, this.openaiModelsRefreshed) || false; + }); + + // Add tab change handler for OpenAI tab + const $openaiTab = this.$widget.find('#nav-openai-tab'); + $openaiTab.on('shown.bs.tab', async () => { + // Only refresh the models if we haven't done it before + this.openaiModelsRefreshed = await this.providerService?.refreshOpenAIModels(false, this.openaiModelsRefreshed) || false; + }); + + // Anthropic models refresh button + const $refreshAnthropicModels = this.$widget.find('.refresh-anthropic-models'); + $refreshAnthropicModels.on('click', async () => { + this.anthropicModelsRefreshed = await this.providerService?.refreshAnthropicModels(true, this.anthropicModelsRefreshed) || false; + }); + + // Add tab change handler for Anthropic tab + const $anthropicTab = this.$widget.find('#nav-anthropic-tab'); + $anthropicTab.on('shown.bs.tab', async () => { + // Only refresh the models if we haven't done it before + this.anthropicModelsRefreshed = await this.providerService?.refreshAnthropicModels(false, this.anthropicModelsRefreshed) || false; + }); + + // Embedding options event handlers + this.setupChangeHandler('.embedding-auto-update-enabled', 'embeddingAutoUpdateEnabled', false, true); + this.setupChangeHandler('.enable-automatic-indexing', 'enableAutomaticIndexing', false, true); + this.setupChangeHandler('.embedding-similarity-threshold', 'embeddingSimilarityThreshold'); + this.setupChangeHandler('.max-notes-per-llm-query', 'maxNotesPerLlmQuery'); + this.setupChangeHandler('.embedding-provider-precedence', 'embeddingProviderPrecedence', true); + this.setupChangeHandler('.embedding-dimension-strategy', 'embeddingDimensionStrategy'); + this.setupChangeHandler('.embedding-batch-size', 'embeddingBatchSize'); + this.setupChangeHandler('.embedding-update-interval', 'embeddingUpdateInterval'); + + // No sortable behavior needed anymore + + // Embedding stats refresh button + const $refreshStats = this.$widget.find('.embedding-refresh-stats'); + $refreshStats.on('click', async () => { + await this.refreshEmbeddingStats(); + await this.fetchFailedEmbeddingNotes(); + }); + + // Recreate embeddings button + const $recreateEmbeddings = this.$widget.find('.recreate-embeddings'); + $recreateEmbeddings.on('click', async () => { + if (confirm(t("ai_llm.recreate_embeddings_confirm") || "Are you sure you want to recreate all embeddings? This may take a long time.")) { + try { + await server.post('llm/embeddings/reprocess'); + toastService.showMessage(t("ai_llm.recreate_embeddings_started")); + + // Start progress polling + this.pollIndexRebuildProgress(); + } catch (e) { + console.error('Error starting embeddings regeneration:', e); + toastService.showError(t("ai_llm.recreate_embeddings_error")); + } + } + }); + + // Rebuild index button + const $rebuildIndex = this.$widget.find('.rebuild-embeddings-index'); + $rebuildIndex.on('click', async () => { + try { + await server.post('llm/embeddings/rebuild-index'); + toastService.showMessage(t("ai_llm.rebuild_index_started")); + + // Start progress polling + this.pollIndexRebuildProgress(); + } catch (e) { + console.error('Error starting index rebuild:', e); + toastService.showError(t("ai_llm.rebuild_index_error")); + } + }); + } + + /** + * Display warnings for validation issues with providers + */ + async displayValidationWarnings() { + if (!this.$widget) return; + + const $warningDiv = this.$widget.find('.provider-validation-warning'); + + // Check if AI is enabled + const aiEnabled = this.$widget.find('.ai-enabled').prop('checked'); + if (!aiEnabled) { + $warningDiv.hide(); + return; + } + + // Get provider precedence + const providerPrecedence = (this.$widget.find('.ai-provider-precedence').val() as string || '').split(','); + + // Check for OpenAI configuration if it's in the precedence list + const openaiWarnings = []; + if (providerPrecedence.includes('openai')) { + const openaiApiKey = this.$widget.find('.openai-api-key').val(); + if (!openaiApiKey) { + openaiWarnings.push(t("ai_llm.empty_key_warning.openai")); + } + } + + // Check for Anthropic configuration if it's in the precedence list + const anthropicWarnings = []; + if (providerPrecedence.includes('anthropic')) { + const anthropicApiKey = this.$widget.find('.anthropic-api-key').val(); + if (!anthropicApiKey) { + anthropicWarnings.push(t("ai_llm.empty_key_warning.anthropic")); + } + } + + // Check for Voyage configuration if it's in the precedence list + const voyageWarnings = []; + if (providerPrecedence.includes('voyage')) { + const voyageApiKey = this.$widget.find('.voyage-api-key').val(); + if (!voyageApiKey) { + voyageWarnings.push(t("ai_llm.empty_key_warning.voyage")); + } + } + + // Check for Ollama configuration if it's in the precedence list + const ollamaWarnings = []; + if (providerPrecedence.includes('ollama')) { + const ollamaBaseUrl = this.$widget.find('.ollama-base-url').val(); + if (!ollamaBaseUrl) { + ollamaWarnings.push(t("ai_llm.ollama_no_url")); + } + } + + // Similar checks for embeddings + const embeddingWarnings = []; + const embeddingsEnabled = this.$widget.find('.enable-automatic-indexing').prop('checked'); + + if (embeddingsEnabled) { + const embeddingProviderPrecedence = (this.$widget.find('.embedding-provider-precedence').val() as string || '').split(','); + + if (embeddingProviderPrecedence.includes('openai') && !this.$widget.find('.openai-api-key').val()) { + embeddingWarnings.push(t("ai_llm.empty_key_warning.openai")); + } + + if (embeddingProviderPrecedence.includes('voyage') && !this.$widget.find('.voyage-api-key').val()) { + embeddingWarnings.push(t("ai_llm.empty_key_warning.voyage")); + } + + if (embeddingProviderPrecedence.includes('ollama') && !this.$widget.find('.ollama-base-url').val()) { + embeddingWarnings.push(t("ai_llm.empty_key_warning.ollama")); + } + } + + // Combine all warnings + const allWarnings = [ + ...openaiWarnings, + ...anthropicWarnings, + ...voyageWarnings, + ...ollamaWarnings, + ...embeddingWarnings + ]; + + // Show or hide warnings + if (allWarnings.length > 0) { + const warningHtml = '' + t("ai_llm.configuration_warnings") + '
    ' + + allWarnings.map(warning => `
  • ${warning}
  • `).join('') + '
'; + $warningDiv.html(warningHtml).show(); + } else { + $warningDiv.hide(); + } + } + + /** + * Poll for index rebuild progress + */ + pollIndexRebuildProgress() { + if (this.indexRebuildRefreshInterval) { + clearInterval(this.indexRebuildRefreshInterval); + } + + // Set up polling interval for index rebuild progress + this.indexRebuildRefreshInterval = setInterval(async () => { + await this.refreshEmbeddingStats(); + }, this.STATS_REFRESH_INTERVAL); + + // Stop polling after 5 minutes to avoid indefinite polling + setTimeout(() => { + if (this.indexRebuildRefreshInterval) { + clearInterval(this.indexRebuildRefreshInterval); + this.indexRebuildRefreshInterval = null; + } + }, 5 * 60 * 1000); + } + + /** + * Refresh embedding statistics + */ + async refreshEmbeddingStats() { + if (!this.$widget) return; + + try { + const response = await server.get('llm/embeddings/stats'); + + if (response && response.success) { + const stats = response.stats; + + // Update stats display + this.$widget.find('.embedding-processed-notes').text(stats.embeddedNotesCount); + this.$widget.find('.embedding-total-notes').text(stats.totalNotesCount); + this.$widget.find('.embedding-queued-notes').text(stats.queuedNotesCount); + this.$widget.find('.embedding-failed-notes').text(stats.failedNotesCount); + + if (stats.lastProcessedDate) { + const date = new Date(stats.lastProcessedDate); + this.$widget.find('.embedding-last-processed').text(date.toLocaleString()); + } else { + this.$widget.find('.embedding-last-processed').text('-'); + } + + // Update progress bar + const $progressBar = this.$widget.find('.embedding-progress'); + const progressPercent = stats.percentComplete; + $progressBar.css('width', `${progressPercent}%`); + $progressBar.attr('aria-valuenow', progressPercent.toString()); + $progressBar.text(`${progressPercent}%`); + + // Update status text + let statusText; + if (stats.queuedNotesCount > 0) { + statusText = t("ai_llm.agent.processing", { percentage: progressPercent }); + } else if (stats.embeddedNotesCount === 0) { + statusText = t("ai_llm.not_started"); + } else if (stats.embeddedNotesCount === stats.totalNotesCount) { + statusText = t("ai_llm.complete"); + + // Clear polling interval if processing is complete + if (this.indexRebuildRefreshInterval) { + clearInterval(this.indexRebuildRefreshInterval); + this.indexRebuildRefreshInterval = null; + } + } else { + statusText = t("ai_llm.partial", { percentage: progressPercent }); + } + + this.$widget.find('.embedding-status-text').text(statusText); + } + } catch (e) { + console.error('Error fetching embedding stats:', e); + } + } + + /** + * Fetch failed embedding notes + */ + async fetchFailedEmbeddingNotes() { + if (!this.$widget) return; + + try { + const response = await server.get('llm/embeddings/failed'); + + if (response && response.success) { + const failedNotes = response.failedNotes || []; + const $failedNotesList = this.$widget.find('.embedding-failed-notes-list'); + + if (failedNotes.length === 0) { + $failedNotesList.html(`
${t("ai_llm.no_failed_embeddings")}
`); + return; + } + + // Create a table with failed notes + let html = ` + + + + + + + + + + + `; + + for (const note of failedNotes) { + const date = new Date(note.lastAttempt); + const isPermanent = note.isPermanent; + const noteTitle = note.title || note.noteId; + + html += ` + + + + + + + `; + } + + html += ` + +
${t("ai_llm.note_title")}${t("ai_llm.error")}${t("ai_llm.last_attempt")}${t("ai_llm.actions")}
${noteTitle}${note.error}${date.toLocaleString()} + +
+ `; + + $failedNotesList.html(html); + + // Add event handlers for retry buttons + $failedNotesList.find('.retry-embedding').on('click', async function() { + const noteId = $(this).closest('tr').data('note-id'); + try { + await server.post('llm/embeddings/retry', { noteId }); + toastService.showMessage(t("ai_llm.retry_queued")); + // Remove this row or update status + $(this).closest('tr').remove(); + } catch (e) { + console.error('Error retrying embedding:', e); + toastService.showError(t("ai_llm.retry_failed")); + } + }); + + // Add event handlers for open note links + $failedNotesList.find('.open-note').on('click', function(e) { + e.preventDefault(); + const noteId = $(this).closest('tr').data('note-id'); + window.open(`#${noteId}`, '_blank'); + }); + } + } catch (e) { + console.error('Error fetching failed embedding notes:', e); + } + } + + /** + * Helper to get display name for providers + */ + getProviderDisplayName(provider: string): string { + switch(provider) { + case 'openai': return 'OpenAI'; + case 'anthropic': return 'Anthropic'; + case 'ollama': return 'Ollama'; + case 'voyage': return 'Voyage'; + case 'local': return 'Local'; + default: return provider.charAt(0).toUpperCase() + provider.slice(1); + } + } + + /** + * Called when the options have been loaded from the server + */ + optionsLoaded(options: OptionMap) { + if (!this.$widget) return; + + // AI Options + this.$widget.find('.ai-enabled').prop('checked', options.aiEnabled !== 'false'); + this.$widget.find('.ai-temperature').val(options.aiTemperature || '0.7'); + this.$widget.find('.ai-system-prompt').val(options.aiSystemPrompt || ''); + this.$widget.find('.ai-provider-precedence').val(options.aiProviderPrecedence || 'openai,anthropic,ollama'); + + // OpenAI Section + this.$widget.find('.openai-api-key').val(options.openaiApiKey || ''); + this.$widget.find('.openai-base-url').val(options.openaiBaseUrl || 'https://api.openai_llm.com/v1'); + this.$widget.find('.openai-default-model').val(options.openaiDefaultModel || 'gpt-4o'); + this.$widget.find('.openai-embedding-model').val(options.openaiEmbeddingModel || 'text-embedding-3-small'); + + // Anthropic Section + this.$widget.find('.anthropic-api-key').val(options.anthropicApiKey || ''); + this.$widget.find('.anthropic-base-url').val(options.anthropicBaseUrl || 'https://api.anthropic.com'); + this.$widget.find('.anthropic-default-model').val(options.anthropicDefaultModel || 'claude-3-opus-20240229'); + + // Voyage Section + this.$widget.find('.voyage-api-key').val(options.voyageApiKey || ''); + this.$widget.find('.voyage-embedding-model').val(options.voyageEmbeddingModel || 'voyage-2'); + + // Ollama Section + this.$widget.find('.ollama-base-url').val(options.ollamaBaseUrl || 'http://localhost:11434'); + this.$widget.find('.ollama-default-model').val(options.ollamaDefaultModel || 'llama3'); + this.$widget.find('.ollama-embedding-model').val(options.ollamaEmbeddingModel || 'nomic-embed-text'); + + // Embedding Options + this.$widget.find('.embedding-provider-precedence').val(options.embeddingProviderPrecedence || 'openai,voyage,ollama,local'); + this.$widget.find('.embedding-auto-update-enabled').prop('checked', options.embeddingAutoUpdateEnabled !== 'false'); + this.$widget.find('.enable-automatic-indexing').prop('checked', options.enableAutomaticIndexing !== 'false'); + this.$widget.find('.embedding-similarity-threshold').val(options.embeddingSimilarityThreshold || '0.75'); + this.$widget.find('.max-notes-per-llm-query').val(options.maxNotesPerLlmQuery || '3'); + this.$widget.find('.embedding-dimension-strategy').val(options.embeddingDimensionStrategy || 'auto'); + this.$widget.find('.embedding-batch-size').val(options.embeddingBatchSize || '10'); + this.$widget.find('.embedding-update-interval').val(options.embeddingUpdateInterval || '5000'); + + // Display validation warnings + this.displayValidationWarnings(); + } + + cleanup() { + // Clear intervals + if (this.statsRefreshInterval) { + clearInterval(this.statsRefreshInterval); + this.statsRefreshInterval = null; + } + + if (this.indexRebuildRefreshInterval) { + clearInterval(this.indexRebuildRefreshInterval); + this.indexRebuildRefreshInterval = null; + } + } +} diff --git a/src/public/app/widgets/type_widgets/options/ai_settings/index.ts b/src/public/app/widgets/type_widgets/options/ai_settings/index.ts new file mode 100644 index 000000000..487abb407 --- /dev/null +++ b/src/public/app/widgets/type_widgets/options/ai_settings/index.ts @@ -0,0 +1,2 @@ +import AiSettingsWidget from './ai_settings_widget.js'; +export default AiSettingsWidget; \ No newline at end of file diff --git a/src/public/app/widgets/type_widgets/options/ai_settings/interfaces.ts b/src/public/app/widgets/type_widgets/options/ai_settings/interfaces.ts new file mode 100644 index 000000000..2a3326ced --- /dev/null +++ b/src/public/app/widgets/type_widgets/options/ai_settings/interfaces.ts @@ -0,0 +1,69 @@ +// Interface for the Ollama model response +export interface OllamaModelResponse { + success: boolean; + models: Array<{ + name: string; + model: string; + details?: { + family?: string; + parameter_size?: string; + } + }>; +} + +// Interface for embedding statistics +export interface EmbeddingStats { + success: boolean; + stats: { + totalNotesCount: number; + embeddedNotesCount: number; + queuedNotesCount: number; + failedNotesCount: number; + lastProcessedDate: string | null; + percentComplete: number; + } +} + +// Interface for failed embedding notes +export interface FailedEmbeddingNotes { + success: boolean; + failedNotes: Array<{ + noteId: string; + title?: string; + operation: string; + attempts: number; + lastAttempt: string; + error: string; + failureType: string; + chunks: number; + isPermanent: boolean; + }>; +} + +export interface OpenAIModelResponse { + success: boolean; + chatModels: Array<{ + id: string; + name: string; + type: string; + }>; + embeddingModels: Array<{ + id: string; + name: string; + type: string; + }>; +} + +export interface AnthropicModelResponse { + success: boolean; + chatModels: Array<{ + id: string; + name: string; + type: string; + }>; + embeddingModels: Array<{ + id: string; + name: string; + type: string; + }>; +} \ No newline at end of file diff --git a/src/public/app/widgets/type_widgets/options/ai_settings/providers.ts b/src/public/app/widgets/type_widgets/options/ai_settings/providers.ts new file mode 100644 index 000000000..c3b35e34d --- /dev/null +++ b/src/public/app/widgets/type_widgets/options/ai_settings/providers.ts @@ -0,0 +1,318 @@ +import server from "../../../../services/server.js"; +import toastService from "../../../../services/toast.js"; +import { t } from "../../../../services/i18n.js"; +import options from "../../../../services/options.js"; +import type { OpenAIModelResponse, AnthropicModelResponse, OllamaModelResponse } from "./interfaces.js"; + +export class ProviderService { + constructor(private $widget: JQuery) { + // Initialize Voyage models (since they don't have a dynamic refresh yet) + this.initializeVoyageModels(); + } + + /** + * Initialize Voyage models with default values and ensure proper selection + */ + private initializeVoyageModels() { + setTimeout(() => { + const $voyageModelSelect = this.$widget.find('.voyage-embedding-model'); + if ($voyageModelSelect.length > 0) { + const currentValue = $voyageModelSelect.val(); + this.ensureSelectedValue($voyageModelSelect, currentValue, 'voyageEmbeddingModel'); + } + }, 100); // Small delay to ensure the widget is fully initialized + } + + /** + * Ensures the dropdown has the correct value set, prioritizing: + * 1. Current UI value if present + * 2. Value from database options if available + * 3. Falling back to first option if neither is available + */ + private ensureSelectedValue($select: JQuery, currentValue: string | number | string[] | undefined | null, optionName: string) { + if (currentValue) { + $select.val(currentValue); + // If the value doesn't exist anymore, select the first option + if (!$select.val()) { + $select.prop('selectedIndex', 0); + } + } else { + // If no current value exists in the dropdown but there's a default in the database + const savedModel = options.get(optionName); + if (savedModel) { + $select.val(savedModel); + // If the saved model isn't in the dropdown, select the first option + if (!$select.val()) { + $select.prop('selectedIndex', 0); + } + } + } + } + + /** + * Refreshes the list of OpenAI models + * @param showLoading Whether to show loading indicators and toasts + * @param openaiModelsRefreshed Reference to track if models have been refreshed + * @returns Promise that resolves when the refresh is complete + */ + async refreshOpenAIModels(showLoading: boolean, openaiModelsRefreshed: boolean): Promise { + if (!this.$widget) return false; + + const $refreshOpenAIModels = this.$widget.find('.refresh-openai-models'); + + // If we've already refreshed and we're not forcing a refresh, don't do it again + if (openaiModelsRefreshed && !showLoading) { + return openaiModelsRefreshed; + } + + if (showLoading) { + $refreshOpenAIModels.prop('disabled', true); + $refreshOpenAIModels.html(``); + } + + try { + const openaiBaseUrl = this.$widget.find('.openai-base-url').val() as string; + const response = await server.get(`llm/providers/openai/models?baseUrl=${encodeURIComponent(openaiBaseUrl)}`); + + if (response && response.success) { + // Update the chat models dropdown + if (response.chatModels?.length > 0) { + const $chatModelSelect = this.$widget.find('.openai-default-model'); + const currentChatValue = $chatModelSelect.val(); + + // Clear existing options + $chatModelSelect.empty(); + + // Sort models by name + const sortedChatModels = [...response.chatModels].sort((a, b) => a.name.localeCompare(b.name)); + + // Add models to the dropdown + sortedChatModels.forEach(model => { + $chatModelSelect.append(``); + }); + + // Try to restore the previously selected value + this.ensureSelectedValue($chatModelSelect, currentChatValue, 'openaiDefaultModel'); + } + + // Update the embedding models dropdown + if (response.embeddingModels?.length > 0) { + const $embedModelSelect = this.$widget.find('.openai-embedding-model'); + const currentEmbedValue = $embedModelSelect.val(); + + // Clear existing options + $embedModelSelect.empty(); + + // Sort models by name + const sortedEmbedModels = [...response.embeddingModels].sort((a, b) => a.name.localeCompare(b.name)); + + // Add models to the dropdown + sortedEmbedModels.forEach(model => { + $embedModelSelect.append(``); + }); + + // Try to restore the previously selected value + this.ensureSelectedValue($embedModelSelect, currentEmbedValue, 'openaiEmbeddingModel'); + } + + if (showLoading) { + // Show success message + const totalModels = (response.chatModels?.length || 0) + (response.embeddingModels?.length || 0); + toastService.showMessage(`${totalModels} OpenAI models found.`); + } + + return true; + } else if (showLoading) { + toastService.showError(`No OpenAI models found. Please check your API key and settings.`); + } + + return openaiModelsRefreshed; + } catch (e) { + console.error(`Error fetching OpenAI models:`, e); + if (showLoading) { + toastService.showError(`Error fetching OpenAI models: ${e}`); + } + return openaiModelsRefreshed; + } finally { + if (showLoading) { + $refreshOpenAIModels.prop('disabled', false); + $refreshOpenAIModels.html(``); + } + } + } + + /** + * Refreshes the list of Anthropic models + * @param showLoading Whether to show loading indicators and toasts + * @param anthropicModelsRefreshed Reference to track if models have been refreshed + * @returns Promise that resolves when the refresh is complete + */ + async refreshAnthropicModels(showLoading: boolean, anthropicModelsRefreshed: boolean): Promise { + if (!this.$widget) return false; + + const $refreshAnthropicModels = this.$widget.find('.refresh-anthropic-models'); + + // If we've already refreshed and we're not forcing a refresh, don't do it again + if (anthropicModelsRefreshed && !showLoading) { + return anthropicModelsRefreshed; + } + + if (showLoading) { + $refreshAnthropicModels.prop('disabled', true); + $refreshAnthropicModels.html(``); + } + + try { + const anthropicBaseUrl = this.$widget.find('.anthropic-base-url').val() as string; + const response = await server.get(`llm/providers/anthropic/models?baseUrl=${encodeURIComponent(anthropicBaseUrl)}`); + + if (response && response.success) { + // Update the chat models dropdown + if (response.chatModels?.length > 0) { + const $chatModelSelect = this.$widget.find('.anthropic-default-model'); + const currentChatValue = $chatModelSelect.val(); + + // Clear existing options + $chatModelSelect.empty(); + + // Sort models by name + const sortedChatModels = [...response.chatModels].sort((a, b) => a.name.localeCompare(b.name)); + + // Add models to the dropdown + sortedChatModels.forEach(model => { + $chatModelSelect.append(``); + }); + + // Try to restore the previously selected value + this.ensureSelectedValue($chatModelSelect, currentChatValue, 'anthropicDefaultModel'); + } + + // Handle embedding models if they exist + if (response.embeddingModels?.length > 0 && showLoading) { + toastService.showMessage(`Found ${response.embeddingModels.length} Anthropic embedding models.`); + } + + if (showLoading) { + // Show success message + const totalModels = (response.chatModels?.length || 0) + (response.embeddingModels?.length || 0); + toastService.showMessage(`${totalModels} Anthropic models found.`); + } + + return true; + } else if (showLoading) { + toastService.showError(`No Anthropic models found. Please check your API key and settings.`); + } + + return anthropicModelsRefreshed; + } catch (e) { + console.error(`Error fetching Anthropic models:`, e); + if (showLoading) { + toastService.showError(`Error fetching Anthropic models: ${e}`); + } + return anthropicModelsRefreshed; + } finally { + if (showLoading) { + $refreshAnthropicModels.prop('disabled', false); + $refreshAnthropicModels.html(``); + } + } + } + + /** + * Refreshes the list of Ollama models + * @param showLoading Whether to show loading indicators and toasts + * @param ollamaModelsRefreshed Reference to track if models have been refreshed + * @returns Promise that resolves when the refresh is complete + */ + async refreshOllamaModels(showLoading: boolean, ollamaModelsRefreshed: boolean): Promise { + if (!this.$widget) return false; + + const $refreshModels = this.$widget.find('.refresh-models'); + + // If we've already refreshed and we're not forcing a refresh, don't do it again + if (ollamaModelsRefreshed && !showLoading) { + return ollamaModelsRefreshed; + } + + if (showLoading) { + $refreshModels.prop('disabled', true); + $refreshModels.text(t("ai_llm.refreshing_models")); + } + + try { + const ollamaBaseUrl = this.$widget.find('.ollama-base-url').val() as string; + const response = await server.get(`llm/providers/ollama/models?baseUrl=${encodeURIComponent(ollamaBaseUrl)}`); + + if (response && response.success && response.models && response.models.length > 0) { + const $embedModelSelect = this.$widget.find('.ollama-embedding-model'); + const currentValue = $embedModelSelect.val(); + + // Clear existing options + $embedModelSelect.empty(); + + // Add embedding-specific models first + const embeddingModels = response.models.filter(model => + model.name.includes('embed') || model.name.includes('bert')); + + embeddingModels.forEach(model => { + $embedModelSelect.append(``); + }); + + if (embeddingModels.length > 0) { + // Add separator if we have embedding models + $embedModelSelect.append(``); + } + + // Then add general models which can be used for embeddings too + const generalModels = response.models.filter(model => + !model.name.includes('embed') && !model.name.includes('bert')); + + generalModels.forEach(model => { + $embedModelSelect.append(``); + }); + + // Try to restore the previously selected value + this.ensureSelectedValue($embedModelSelect, currentValue, 'ollamaEmbeddingModel'); + + // Also update the LLM model dropdown + const $modelSelect = this.$widget.find('.ollama-default-model'); + const currentModelValue = $modelSelect.val(); + + // Clear existing options + $modelSelect.empty(); + + // Sort models by name to make them easier to find + const sortedModels = [...response.models].sort((a, b) => a.name.localeCompare(b.name)); + + // Add all models to the dropdown + sortedModels.forEach(model => { + $modelSelect.append(``); + }); + + // Try to restore the previously selected value + this.ensureSelectedValue($modelSelect, currentModelValue, 'ollamaDefaultModel'); + + if (showLoading) { + toastService.showMessage(`${response.models.length} Ollama models found.`); + } + + return true; + } else if (showLoading) { + toastService.showError(`No Ollama models found. Please check if Ollama is running.`); + } + + return ollamaModelsRefreshed; + } catch (e) { + console.error(`Error fetching Ollama models:`, e); + if (showLoading) { + toastService.showError(`Error fetching Ollama models: ${e}`); + } + return ollamaModelsRefreshed; + } finally { + if (showLoading) { + $refreshModels.prop('disabled', false); + $refreshModels.html(``); + } + } + } +} diff --git a/src/public/app/widgets/type_widgets/options/ai_settings/template.ts b/src/public/app/widgets/type_widgets/options/ai_settings/template.ts new file mode 100644 index 000000000..f8cd79c81 --- /dev/null +++ b/src/public/app/widgets/type_widgets/options/ai_settings/template.ts @@ -0,0 +1,305 @@ +import { t } from "../../../../services/i18n.js"; + +export const TPL = ` +
+

${t("ai_llm.title")}

+ + + + +
+ +
${t("ai_llm.enable_ai_description")}
+
+
+ +
+

${t("ai_llm.embedding_statistics")}

+
+
+
+
+
${t("ai_llm.processed_notes")}: -
+
${t("ai_llm.total_notes")}: -
+
${t("ai_llm.progress")}: -
+
+ +
+
${t("ai_llm.queued_notes")}: -
+
${t("ai_llm.failed_notes")}: -
+
${t("ai_llm.last_processed")}: -
+
+
+
+
+
0%
+
+
+ +
+
+ +
+ +
${t("ai_llm.failed_notes")}
+
+
+
+
${t("ai_llm.no_failed_embeddings")}
+
+
+
+
+ +
+

${t("ai_llm.provider_configuration")}

+ +
+ + +
${t("ai_llm.provider_precedence_description")}
+
+ +
+ + +
${t("ai_llm.temperature_description")}
+
+ +
+ + +
${t("ai_llm.system_prompt_description")}
+
+
+ + +
+ +
+ +
+

${t("ai_llm.embeddings_configuration")}

+ +
+ + +
${t("ai_llm.embedding_provider_precedence_description")}
+
+ +
+ + +
${t("ai_llm.embedding_dimension_strategy_description")}
+
+ +
+ + +
${t("ai_llm.embedding_similarity_threshold_description")}
+
+ +
+ + +
${t("ai_llm.embedding_batch_size_description")}
+
+ +
+ + +
${t("ai_llm.embedding_update_interval_description")}
+
+ +
+ + +
${t("ai_llm.max_notes_per_llm_query_description")}
+
+ +
+ +
${t("ai_llm.enable_automatic_indexing_description")}
+
+ +
+ +
${t("ai_llm.embedding_auto_update_enabled_description")}
+
+ + +
+ +
${t("ai_llm.recreate_embeddings_description")}
+
+ + +
+ +
${t("ai_llm.rebuild_index_description")}
+
+ + +
+
${t("ai_llm.embedding_providers_order")}
+
${t("ai_llm.embedding_providers_order_description")}
+
+
`; diff --git a/src/public/stylesheets/llm_chat.css b/src/public/stylesheets/llm_chat.css new file mode 100644 index 000000000..aacdf543f --- /dev/null +++ b/src/public/stylesheets/llm_chat.css @@ -0,0 +1,275 @@ +/* LLM Chat Panel Styles */ +.note-context-chat { + background-color: var(--main-background-color); +} + +/* Message Styling */ +.chat-message { + margin-bottom: 1rem; +} + +.message-avatar { + width: 36px; + height: 36px; + border-radius: 50%; + font-size: 1.25rem; + flex-shrink: 0; +} + +.user-avatar { + background-color: var(--input-background-color); + color: var(--cmd-button-icon-color); +} + +.assistant-avatar { + background-color: var(--subtle-border-color, var(--main-border-color)); + color: var(--hover-item-text-color); +} + +.message-content { + max-width: calc(100% - 50px); + box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05); + color: var(--main-text-color); +} + +.user-content { + border-radius: 0.5rem 0.5rem 0 0.5rem !important; + background-color: var(--input-background-color) !important; +} + +.assistant-content { + border-radius: 0.5rem 0.5rem 0.5rem 0 !important; + background-color: var(--main-background-color); + border: 1px solid var(--subtle-border-color, var(--main-border-color)); +} + +/* Tool Execution Styling */ +.tool-execution-info { + margin-top: 0.75rem; + margin-bottom: 1.5rem; + border: 1px solid var(--subtle-border-color); + border-radius: 0.5rem; + overflow: hidden; + box-shadow: 0 1px 4px rgba(0, 0, 0, 0.05); + background-color: var(--main-background-color); + /* Add a subtle transition effect */ + transition: all 0.2s ease-in-out; +} + +.tool-execution-status { + background-color: var(--accented-background-color, rgba(0, 0, 0, 0.03)) !important; + border-radius: 0 !important; + padding: 0.5rem !important; + max-height: 250px !important; + overflow-y: auto; +} + +.tool-execution-status .d-flex { + border-bottom: 1px solid var(--subtle-border-color); + padding-bottom: 0.5rem; + margin-bottom: 0.5rem; +} + +.tool-step { + padding: 0.5rem; + margin-bottom: 0.75rem; + border-radius: 0.375rem; + background-color: var(--main-background-color); + border: 1px solid var(--subtle-border-color); + transition: background-color 0.2s ease; +} + +.tool-step:hover { + background-color: rgba(0, 0, 0, 0.01); +} + +.tool-step:last-child { + margin-bottom: 0; +} + +/* Tool step specific styling */ +.tool-step.executing { + background-color: rgba(0, 123, 255, 0.05); + border-color: rgba(0, 123, 255, 0.2); +} + +.tool-step.result { + background-color: rgba(40, 167, 69, 0.05); + border-color: rgba(40, 167, 69, 0.2); +} + +.tool-step.error { + background-color: rgba(220, 53, 69, 0.05); + border-color: rgba(220, 53, 69, 0.2); +} + +/* Tool result formatting */ +.tool-result pre { + margin: 0.5rem 0; + padding: 0.5rem; + background-color: rgba(0, 0, 0, 0.03); + border-radius: 0.25rem; + overflow: auto; + max-height: 300px; +} + +.tool-result code { + font-family: "SFMono-Regular", Consolas, "Liberation Mono", Menlo, monospace; + font-size: 0.9em; +} + +.tool-args code { + display: block; + padding: 0.5rem; + background-color: rgba(0, 0, 0, 0.03); + border-radius: 0.25rem; + margin-top: 0.25rem; + font-size: 0.85em; + color: var(--muted-text-color); + white-space: pre-wrap; + overflow: auto; + max-height: 100px; +} + +/* Tool Execution in Chat Styling */ +.chat-tool-execution { + padding: 0 0 0 36px; /* Aligned with message content, accounting for avatar width */ + width: 100%; + margin-bottom: 1rem; +} + +.tool-execution-container { + background-color: var(--accented-background-color, rgba(245, 247, 250, 0.7)); + border: 1px solid var(--subtle-border-color); + border-radius: 0.375rem; + box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05); + overflow: hidden; + max-width: calc(100% - 20px); + transition: all 0.3s ease; +} + +.tool-execution-container.collapsed { + display: none; +} + +.tool-execution-header { + background-color: var(--main-background-color); + border-bottom: 1px solid var(--subtle-border-color); + margin-bottom: 0.5rem; + color: var(--muted-text-color); + font-weight: 500; + padding: 0.6rem 0.8rem; + cursor: pointer; + transition: background-color 0.2s ease; +} + +.tool-execution-header:hover { + background-color: var(--hover-item-background-color, rgba(0, 0, 0, 0.03)); +} + +.tool-execution-toggle { + color: var(--muted-text-color) !important; + background: transparent !important; + padding: 0.2rem 0.4rem !important; + transition: transform 0.2s ease; +} + +.tool-execution-toggle:hover { + color: var(--main-text-color) !important; +} + +.tool-execution-toggle i.bx-chevron-down { + transform: rotate(0deg); + transition: transform 0.3s ease; +} + +.tool-execution-toggle i.bx-chevron-right { + transform: rotate(-90deg); + transition: transform 0.3s ease; +} + +.tool-execution-chat-steps { + padding: 0.5rem; + max-height: 300px; + overflow-y: auto; +} + +/* Make error text more visible */ +.text-danger { + color: #dc3545 !important; +} + +/* Sources Styling */ +.sources-container { + background-color: var(--accented-background-color, var(--main-background-color)); + border-top: 1px solid var(--main-border-color); + color: var(--main-text-color); +} + +.source-item { + transition: all 0.2s ease; + background-color: var(--main-background-color); + border-color: var(--subtle-border-color, var(--main-border-color)) !important; +} + +.source-item:hover { + background-color: var(--link-hover-background, var(--hover-item-background-color)); +} + +.source-link { + color: var(--link-color, var(--hover-item-text-color)); + text-decoration: none; + display: block; + width: 100%; +} + +.source-link:hover { + color: var(--link-hover-color, var(--hover-item-text-color)); +} + +/* Input Area Styling */ +.note-context-chat-form { + background-color: var(--main-background-color); + border-top: 1px solid var(--main-border-color); +} + +.context-option-container { + padding: 0.5rem 0; + border-bottom: 1px solid var(--subtle-border-color, var(--main-border-color)); + color: var(--main-text-color); +} + +.chat-input-container { + padding-top: 0.5rem; +} + +.note-context-chat-input { + border-color: var(--subtle-border-color, var(--main-border-color)); + background-color: var(--input-background-color) !important; + color: var(--input-text-color) !important; + resize: none; + transition: all 0.2s ease; + min-height: 50px; + max-height: 150px; +} + +.note-context-chat-input:focus { + border-color: var(--input-focus-outline-color, var(--main-border-color)); + box-shadow: 0 0 0 0.25rem var(--input-focus-outline-color, rgba(13, 110, 253, 0.25)); +} + +.note-context-chat-send-button { + width: 40px; + height: 40px; + align-self: flex-end; + background-color: var(--cmd-button-background-color) !important; + color: var(--cmd-button-text-color) !important; +} + +/* Loading Indicator */ +.loading-indicator { + align-items: center; + justify-content: center; + padding: 1rem; + color: var(--muted-text-color); +} \ No newline at end of file diff --git a/src/public/stylesheets/style.css b/src/public/stylesheets/style.css index 5673acb8f..41239294f 100644 --- a/src/public/stylesheets/style.css +++ b/src/public/stylesheets/style.css @@ -1804,6 +1804,187 @@ footer.file-footer button { margin: 5px; } +/* AI Chat Widget Styles */ +.chat-widget { + display: flex; + flex-direction: column; + height: 100%; + overflow: hidden; +} + +.chat-header { + display: flex; + justify-content: space-between; + align-items: center; + padding: 10px 15px; + border-bottom: 1px solid var(--main-border-color); + background-color: var(--accented-background-color); +} + +.chat-title { + font-weight: bold; + flex-grow: 1; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.chat-actions { + display: flex; + gap: 5px; +} + +.chat-messages { + flex-grow: 1; + overflow-y: auto; + padding: 15px; + display: flex; + flex-direction: column; + gap: 15px; +} + +.chat-message { + display: flex; + gap: 10px; + max-width: 85%; +} + +.chat-message-user { + align-self: flex-end; + flex-direction: row-reverse; +} + +.chat-message-assistant { + align-self: flex-start; +} + +.chat-message-avatar { + flex-shrink: 0; + width: 30px; + height: 30px; + border-radius: 50%; + background-color: var(--accented-background-color); + display: flex; + align-items: center; + justify-content: center; +} + +.chat-message-user .chat-message-avatar { + background-color: var(--primary-color); + color: white; +} + +.chat-message-assistant .chat-message-avatar { + background-color: var(--muted-text-color); + color: white; +} + +.chat-message-content { + flex-grow: 1; + padding: 10px 15px; + border-radius: 12px; + background-color: var(--accented-background-color); + overflow-wrap: break-word; + word-break: break-word; +} + +.chat-message-user .chat-message-content { + background-color: var(--primary-color); + color: white; +} + +.chat-message-content pre { + background-color: var(--main-background-color); + border-radius: 5px; + padding: 10px; + overflow-x: auto; + margin: 10px 0; +} + +.chat-message-user .chat-message-content pre { + background-color: rgba(255, 255, 255, 0.2); +} + +.chat-message-content code { + font-family: monospace; + background-color: var(--main-background-color); + padding: 2px 4px; + border-radius: 3px; +} + +.chat-message-user .chat-message-content code { + background-color: rgba(255, 255, 255, 0.2); +} + +.chat-controls { + display: flex; + flex-direction: column; + padding: 15px; + gap: 10px; + border-top: 1px solid var(--main-border-color); +} + +.chat-input-container { + position: relative; +} + +.chat-input { + width: 100%; + resize: none; + padding-right: 40px; +} + +.chat-buttons { + display: flex; + justify-content: space-between; +} + +.chat-loading { + animation: chat-loading 1s infinite; + letter-spacing: 3px; +} + +@keyframes chat-loading { + 0% { opacity: 0.3; } + 50% { opacity: 1; } + 100% { opacity: 0.3; } +} + +/* Right Pane Tab Styles */ +#right-pane-tab-container { + display: flex; + gap: 10px; +} + +.right-pane-tab { + padding: 5px 10px; + cursor: pointer; + border-radius: 5px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + transition: background-color 0.2s ease; +} + +.right-pane-tab:hover { + background-color: var(--hover-item-background-color); +} + +.right-pane-tab.active { + background-color: var(--primary-color); + color: white; +} + +.right-pane-tab .tab-title { + display: flex; + align-items: center; + gap: 5px; +} + +.right-pane-tab .tab-title .bx { + font-size: 1.1em; +} + .admonition { --accent-color: var(--card-border-color); border: 1px solid var(--accent-color); @@ -1811,12 +1992,7 @@ footer.file-footer button { background: var(--card-background-color); border-radius: 0.5em; padding: 1em; - margin: 1.25em 0; - position: relative; - padding-left: 2.5em; - overflow: hidden; } - .admonition p:last-child { margin-bottom: 0; } @@ -1835,6 +2011,61 @@ footer.file-footer button { .admonition.caution { --accent-color: #ff2e2e; } .admonition.warning { --accent-color: #e2aa03; } +.ck-content .admonition.note::before { content: "\eb21"; } +.ck-content .admonition.tip::before { content: "\ea0d"; } +.ck-content .admonition.important::before { content: "\ea7c"; } +.ck-content .admonition.caution::before { content: "\eac7"; } +.ck-content .admonition.warning::before { content: "\eac5"; } + +.chat-options-container { + display: flex; + margin: 5px 0; + align-items: center; + padding: 0 10px; +} + +.chat-option { + display: flex; + align-items: center; + font-size: 0.9em; + margin-right: 15px; + cursor: pointer; +} + +.chat-option input[type="checkbox"] { + margin-right: 5px; +} + +/* Style for thinking process in chat responses */ +.thinking-process { + background-color: rgba(0, 0, 0, 0.05); + border-left: 3px solid var(--main-text-color); + padding: 10px; + margin: 10px 0; + border-radius: 4px; +} + +.thinking-step { + margin-bottom: 8px; + padding-left: 10px; +} + +.thinking-step.observation { + border-left: 2px solid #69c7ff; +} + +.thinking-step.hypothesis { + border-left: 2px solid #9839f7; +} + +.thinking-step.evidence { + border-left: 2px solid #40c025; +} + +.thinking-step.conclusion { + border-left: 2px solid #e2aa03; + font-weight: bold; +} .admonition.note::before { content: "\eb21"; } .admonition.tip::before { content: "\ea0d"; } .admonition.important::before { content: "\ea7c"; } @@ -1875,4 +2106,4 @@ footer.file-footer button { .bx-tn-toc::before { content: "\ec24"; transform: rotate(180deg); -} \ No newline at end of file +} diff --git a/src/public/stylesheets/theme-next/llm-chat.css b/src/public/stylesheets/theme-next/llm-chat.css new file mode 100644 index 000000000..dc3342485 --- /dev/null +++ b/src/public/stylesheets/theme-next/llm-chat.css @@ -0,0 +1,122 @@ +/* LLM Chat Launcher Widget Styles */ +.note-context-chat { + display: flex; + flex-direction: column; + height: 100%; + width: 100%; +} + +.note-context-chat-container { + flex-grow: 1; + overflow-y: auto; + padding: 15px; +} + +.chat-message { + display: flex; + margin-bottom: 15px; + max-width: 85%; +} + +.chat-message.user-message { + margin-left: auto; +} + +.chat-message.assistant-message { + margin-right: auto; +} + +.message-avatar { + width: 32px; + height: 32px; + border-radius: 50%; + display: flex; + align-items: center; + justify-content: center; + margin-right: 8px; +} + +.user-message .message-avatar { + background-color: var(--primary-color); + color: white; +} + +.assistant-message .message-avatar { + background-color: var(--secondary-color); + color: white; +} + +.message-content { + background-color: var(--more-accented-background-color); + border-radius: 12px; + padding: 10px 15px; + max-width: calc(100% - 40px); +} + +.user-message .message-content { + background-color: var(--accented-background-color); +} + +.message-content pre { + background-color: var(--code-background-color); + border-radius: 5px; + padding: 10px; + overflow-x: auto; + max-width: 100%; +} + +.message-content code { + background-color: var(--code-background-color); + padding: 2px 4px; + border-radius: 3px; +} + +.loading-indicator { + display: flex; + align-items: center; + margin: 10px 0; + color: var(--muted-text-color); +} + +.sources-container { + background-color: var(--accented-background-color); + border-top: 1px solid var(--main-border-color); + padding: 8px; +} + +.sources-list { + font-size: 0.9em; +} + +.source-item { + padding: 4px 0; +} + +.source-link { + color: var(--link-color); + text-decoration: none; +} + +.source-link:hover { + text-decoration: underline; +} + +.note-context-chat-form { + display: flex; + background-color: var(--main-background-color); + border-top: 1px solid var(--main-border-color); + padding: 10px; +} + +.note-context-chat-input { + resize: vertical; + min-height: 44px; + max-height: 200px; +} + +/* Responsive adjustments */ +@media (max-width: 768px) { + .chat-message { + max-width: 95%; + } +} \ No newline at end of file diff --git a/src/public/stylesheets/theme-next/pages.css b/src/public/stylesheets/theme-next/pages.css index 7cc1ee77b..5a34b9680 100644 --- a/src/public/stylesheets/theme-next/pages.css +++ b/src/public/stylesheets/theme-next/pages.css @@ -257,6 +257,30 @@ div.note-detail-empty { font-size: .85em; } +nav.options-section-tabs { + min-width: var(--options-card-min-width); + max-width: var(--options-card-max-width); + margin: auto; +} + +nav.options-section-tabs .nav-tabs { + border-bottom: 0; +} + +nav.options-section-tabs + .options-section { + border-top-left-radius: 0; + border-top-right-radius: 0; +} + +/* Appeareance */ + +.main-font-size-input-group, +.tree-font-size-input-group, +.detail-font-size-input-group, +.monospace-font-size-input-group { + width: fit-content; +} + /* Shortcuts */ .note-detail-content-widget-content:has(.shortcuts-options-section) { diff --git a/src/public/translations/en/translation.json b/src/public/translations/en/translation.json index ba069a4e9..6ba46d7e2 100644 --- a/src/public/translations/en/translation.json +++ b/src/public/translations/en/translation.json @@ -1122,6 +1122,201 @@ "layout-vertical-description": "launcher bar is on the left (default)", "layout-horizontal-description": "launcher bar is underneath the tab bar, the tab bar is now full width." }, + "ai_llm": { + "embeddings_configuration": "Embeddings Configuration", + "not_started": "Not started", + "title": "AI & Embedding Settings", + "embedding_statistics": "Embedding Statistics", + "processed_notes": "Processed Notes", + "total_notes": "Total Notes", + "progress": "Progress", + "queued_notes": "Queued Notes", + "failed_notes": "Failed Notes", + "last_processed": "Last Processed", + "refresh_stats": "Refresh Statistics", + "no_failed_embeddings": "No failed embeddings found.", + "enable_ai_features": "Enable AI/LLM features", + "enable_ai_description": "Enable AI features like note summarization, content generation, and other LLM capabilities", + "openai_tab": "OpenAI", + "anthropic_tab": "Anthropic", + "voyage_tab": "Voyage AI", + "ollama_tab": "Ollama", + "enable_ai": "Enable AI/LLM features", + "enable_ai_desc": "Enable AI features like note summarization, content generation, and other LLM capabilities", + "provider_configuration": "AI Provider Configuration", + "provider_precedence": "Provider Precedence", + "provider_precedence_description": "Comma-separated list of providers in order of precedence (e.g., 'openai,anthropic,ollama')", + "temperature": "Temperature", + "temperature_description": "Controls randomness in responses (0 = deterministic, 2 = maximum randomness)", + "system_prompt": "System Prompt", + "system_prompt_description": "Default system prompt used for all AI interactions", + "openai_configuration": "OpenAI Configuration", + "openai_settings": "OpenAI Settings", + "api_key": "API Key", + "url": "Base URL", + "model": "Model", + "openai_api_key_description": "Your OpenAI API key for accessing their AI services", + "anthropic_api_key_description": "Your Anthropic API key for accessing Claude models", + "default_model": "Default Model", + "openai_model_description": "Examples: gpt-4o, gpt-4-turbo, gpt-3.5-turbo", + "embedding_model": "Embedding Model", + "openai_embedding_model_description": "Model used for generating embeddings (text-embedding-3-small recommended)", + "base_url": "Base URL", + "openai_url_description": "Default: https://api.openai.com/v1", + "anthropic_settings": "Anthropic Settings", + "anthropic_url_description": "Base URL for the Anthropic API (default: https://api.anthropic.com)", + "anthropic_model_description": "Anthropic Claude models for chat completion", + "voyage_settings": "Voyage AI Settings", + "voyage_api_key_description": "Your Voyage AI API key for accessing embeddings services", + "ollama_settings": "Ollama Settings", + "ollama_url_description": "URL for the Ollama API (default: http://localhost:11434)", + "ollama_model_description": "Ollama model to use for chat completion", + "anthropic_configuration": "Anthropic Configuration", + "voyage_embedding_model_description": "Voyage AI embedding models for text embeddings (voyage-2 recommended)", + "voyage_configuration": "Voyage AI Configuration", + "voyage_url_description": "Default: https://api.voyageai.com/v1", + "ollama_configuration": "Ollama Configuration", + "enable_ollama": "Enable Ollama", + "enable_ollama_description": "Enable Ollama for local AI model usage", + "ollama_url": "Ollama URL", + "ollama_model": "Ollama Model", + "ollama_embedding_model": "Embedding Model", + "ollama_embedding_model_description": "Specialized model for generating embeddings (vector representations)", + "refresh_models": "Refresh Models", + "refreshing_models": "Refreshing...", + "embedding_configuration": "Embeddings Configuration", + "embedding_default_provider": "Default Provider", + "embedding_default_provider_description": "Select the default provider used for generating note embeddings", + "embedding_provider_precedence": "Embedding Provider Precedence", + "embedding_providers_order": "Embedding Provider Order", + "embedding_providers_order_description": "Set the order of embedding providers in comma-separated format (e.g., \"openai,voyage,ollama,local\")", + "enable_automatic_indexing": "Enable Automatic Indexing", + "enable_automatic_indexing_description": "Automatically generate embeddings for new and updated notes", + "embedding_auto_update_enabled": "Auto-update Embeddings", + "embedding_auto_update_enabled_description": "Automatically update embeddings when notes are modified", + "recreate_embeddings": "Recreate All Embeddings", + "recreate_embeddings_description": "Regenerate all note embeddings from scratch (may take a long time for large note collections)", + "recreate_embeddings_started": "Embeddings regeneration started. This may take a long time for large note collections.", + "recreate_embeddings_error": "Error starting embeddings regeneration. Check logs for details.", + "recreate_embeddings_confirm": "Are you sure you want to recreate all embeddings? This may take a long time for large note collections.", + "rebuild_index": "Rebuild Index", + "rebuild_index_description": "Rebuild the vector search index for better performance (much faster than recreating embeddings)", + "rebuild_index_started": "Embedding index rebuild started. This may take several minutes.", + "rebuild_index_error": "Error starting index rebuild. Check logs for details.", + "note_title": "Note Title", + "error": "Error", + "last_attempt": "Last Attempt", + "actions": "Actions", + "retry": "Retry", + "partial": "{{ percentage }}% completed", + "retry_queued": "Note queued for retry", + "retry_failed": "Failed to queue note for retry", + "embedding_provider_precedence_description": "Comma-separated list of providers in order of precedence for embeddings search (e.g., 'openai,ollama,anthropic')", + "embedding_dimension_strategy": "Embedding Dimension Strategy", + "embedding_dimension_auto": "Auto (Recommended)", + "embedding_dimension_fixed": "Fixed", + "embedding_similarity_threshold": "Similarity Threshold", + "embedding_similarity_threshold_description": "Minimum similarity score for notes to be included in search results (0-1)", + "max_notes_per_llm_query": "Max Notes Per Query", + "max_notes_per_llm_query_description": "Maximum number of similar notes to include in AI context", + "embedding_dimension_strategy_description": "Choose how embeddings are handled. 'Native' preserves maximum information by adapting smaller vectors to match larger ones (recommended). 'Regenerate' creates new embeddings with the target model for specific search needs.", + "drag_providers_to_reorder": "Drag providers up or down to set your preferred order for embedding searches", + "active_providers": "Active Providers", + "disabled_providers": "Disabled Providers", + "remove_provider": "Remove provider from search", + "restore_provider": "Restore provider to search", + "embedding_generation_location": "Generation Location", + "embedding_generation_location_description": "Select where embedding generation should happen", + "embedding_generation_location_client": "Client/Server", + "embedding_generation_location_sync_server": "Sync Server", + "enable_auto_update_embeddings": "Auto-update Embeddings", + "enable_auto_update_embeddings_description": "Automatically update embeddings when notes are modified", + "auto_update_embeddings": "Auto-update Embeddings", + "auto_update_embeddings_desc": "Automatically update embeddings when notes are modified", + "similarity_threshold": "Similarity Threshold", + "similarity_threshold_description": "Minimum similarity score (0-1) for notes to be included in context for LLM queries", + "embedding_batch_size": "Batch Size", + "embedding_batch_size_description": "Number of notes to process in a single batch (1-50)", + "embedding_update_interval": "Update Interval (ms)", + "embedding_update_interval_description": "Time between processing batches of embeddings (in milliseconds)", + "embedding_default_dimension": "Default Dimension", + "embedding_default_dimension_description": "Default embedding vector dimension when creating new embeddings", + "reprocess_all_embeddings": "Reprocess All Embeddings", + "reprocess_all_embeddings_description": "Queue all notes for embedding processing. This may take some time depending on your number of notes.", + "reprocessing_embeddings": "Reprocessing...", + "reprocess_started": "Embedding reprocessing started in the background", + "reprocess_error": "Error starting embedding reprocessing", + + "reprocess_index": "Rebuild Search Index", + "reprocess_index_description": "Optimize the search index for better performance. This uses existing embeddings without regenerating them (much faster than reprocessing all embeddings).", + "reprocessing_index": "Rebuilding...", + "reprocess_index_started": "Search index optimization started in the background", + "reprocess_index_error": "Error rebuilding search index", + + "index_rebuild_progress": "Index Rebuild Progress", + "index_rebuilding": "Optimizing index ({{percentage}}%)", + "index_rebuild_complete": "Index optimization complete", + "index_rebuild_status_error": "Error checking index rebuild status", + "never": "Never", + "processing": "Processing ({{percentage}}%)", + "incomplete": "Incomplete ({{percentage}}%)", + "complete": "Complete (100%)", + "refreshing": "Refreshing...", + "stats_error": "Error fetching embedding statistics", + "auto_refresh_notice": "Auto-refreshes every {{seconds}} seconds", + "note_queued_for_retry": "Note queued for retry", + "failed_to_retry_note": "Failed to retry note", + "all_notes_queued_for_retry": "All failed notes queued for retry", + "failed_to_retry_all": "Failed to retry notes", + "ai_settings": "AI Settings", + "api_key_tooltip": "API key for accessing the service", + "confirm_delete_embeddings": "Are you sure you want to delete all AI embeddings? This will remove all semantic search capabilities until notes are reindexed, which can take a significant amount of time.", + "empty_key_warning": { + "anthropic": "Anthropic API key is empty. Please enter a valid API key.", + "openai": "OpenAI API key is empty. Please enter a valid API key.", + "voyage": "Voyage API key is empty. Please enter a valid API key.", + "ollama": "Ollama API key is empty. Please enter a valid API key." + }, + "agent": { + "processing": "Processing...", + "thinking": "Thinking...", + "loading": "Loading...", + "generating": "Generating..." + }, + "name": "AI", + "openai": "OpenAI", + "use_enhanced_context": "Use enhanced context", + "enhanced_context_description": "Provides the AI with more context from the note and its related notes for better responses", + "show_thinking": "Show thinking", + "show_thinking_description": "Show the AI's chain of thought process", + "enter_message": "Enter your message...", + "error_contacting_provider": "Error contacting AI provider. Please check your settings and internet connection.", + "error_generating_response": "Error generating AI response", + "index_all_notes": "Index All Notes", + "index_status": "Index Status", + "indexed_notes": "Indexed Notes", + "indexing_stopped": "Indexing stopped", + "indexing_in_progress": "Indexing in progress...", + "last_indexed": "Last Indexed", + "n_notes_queued": "{{ count }} note queued for indexing", + "n_notes_queued_plural": "{{ count }} notes queued for indexing", + "note_chat": "Note Chat", + "notes_indexed": "{{ count }} note indexed", + "notes_indexed_plural": "{{ count }} notes indexed", + "reset_embeddings": "Reset Embeddings", + "sources": "Sources", + "start_indexing": "Start Indexing", + "use_advanced_context": "Use Advanced Context", + "ollama_no_url": "Ollama is not configured. Please enter a valid URL.", + "chat": { + "root_note_title": "AI Chats", + "root_note_content": "This note contains your saved AI chat conversations.", + "new_chat_title": "New Chat", + "create_new_ai_chat": "Create new AI Chat" + }, + "create_new_ai_chat": "Create new AI Chat", + "configuration_warnings": "There are some issues with your AI configuration. Please check your settings." + }, "zoom_factor": { "title": "Zoom Factor (desktop build only)", "description": "Zooming can be controlled with CTRL+- and CTRL+= shortcuts as well." @@ -1474,6 +1669,7 @@ "confirm-change": "It is not recommended to change note type when note content is not empty. Do you want to continue anyway?", "geo-map": "Geo Map", "beta-feature": "Beta", + "ai-chat": "AI Chat", "task-list": "Task List" }, "protect_note": { diff --git a/src/routes/api/anthropic.ts b/src/routes/api/anthropic.ts new file mode 100644 index 000000000..900a0b084 --- /dev/null +++ b/src/routes/api/anthropic.ts @@ -0,0 +1,112 @@ +import options from "../../services/options.js"; +import log from "../../services/log.js"; +import type { Request, Response } from "express"; +import { PROVIDER_CONSTANTS } from '../../services/llm/constants/provider_constants.js'; +import Anthropic from '@anthropic-ai/sdk'; + +// Interface for Anthropic model entries +interface AnthropicModel { + id: string; + name: string; + type: string; +} + +/** + * @swagger + * /api/anthropic/models: + * post: + * summary: List available models from Anthropic + * operationId: anthropic-list-models + * requestBody: + * required: false + * content: + * application/json: + * schema: + * type: object + * properties: + * baseUrl: + * type: string + * description: Optional custom Anthropic API base URL + * responses: + * '200': + * description: List of available Anthropic models + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * chatModels: + * type: array + * items: + * type: object + * properties: + * id: + * type: string + * name: + * type: string + * type: + * type: string + * embeddingModels: + * type: array + * items: + * type: object + * properties: + * id: + * type: string + * name: + * type: string + * type: + * type: string + * '500': + * description: Error listing models + * security: + * - session: [] + * tags: ["llm"] + */ +async function listModels(req: Request, res: Response) { + try { + const { baseUrl } = req.body; + + // Use provided base URL or default from options + const anthropicBaseUrl = baseUrl || + await options.getOption('anthropicBaseUrl') || + PROVIDER_CONSTANTS.ANTHROPIC.BASE_URL; + + const apiKey = await options.getOption('anthropicApiKey'); + + if (!apiKey) { + throw new Error('Anthropic API key is not configured'); + } + + log.info(`Using predefined Anthropic models list (avoiding direct API call)`); + + // Instead of using the SDK's built-in models listing which might not work, + // directly use the predefined available models + const chatModels = PROVIDER_CONSTANTS.ANTHROPIC.AVAILABLE_MODELS.map(model => ({ + id: model.id, + name: model.name, + type: 'chat' + })); + + // Anthropic doesn't currently have embedding models + const embeddingModels: AnthropicModel[] = []; + + // Return the models list + return { + success: true, + chatModels, + embeddingModels + }; + } catch (error: any) { + log.error(`Error listing Anthropic models: ${error.message || 'Unknown error'}`); + + // Properly throw the error to be handled by the global error handler + throw new Error(`Failed to list Anthropic models: ${error.message || 'Unknown error'}`); + } +} + +export default { + listModels +}; diff --git a/src/routes/api/embeddings.ts b/src/routes/api/embeddings.ts new file mode 100644 index 000000000..012a9c82f --- /dev/null +++ b/src/routes/api/embeddings.ts @@ -0,0 +1,798 @@ +import options from "../../services/options.js"; +import vectorStore from "../../services/llm/embeddings/index.js"; +import providerManager from "../../services/llm/providers/providers.js"; +import indexService from "../../services/llm/index_service.js"; +import becca from "../../becca/becca.js"; +import type { Request, Response } from "express"; +import log from "../../services/log.js"; +import sql from "../../services/sql.js"; + +/** + * @swagger + * /api/llm/embeddings/similar/{noteId}: + * get: + * summary: Find similar notes based on a given note ID + * operationId: embeddings-similar-by-note + * parameters: + * - name: noteId + * in: path + * required: true + * schema: + * type: string + * - name: providerId + * in: query + * required: false + * schema: + * type: string + * default: openai + * description: Embedding provider ID + * - name: modelId + * in: query + * required: false + * schema: + * type: string + * default: text-embedding-3-small + * description: Embedding model ID + * - name: limit + * in: query + * required: false + * schema: + * type: integer + * default: 10 + * description: Maximum number of similar notes to return + * - name: threshold + * in: query + * required: false + * schema: + * type: number + * format: float + * default: 0.7 + * description: Similarity threshold (0.0-1.0) + * responses: + * '200': + * description: List of similar notes + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * similarNotes: + * type: array + * items: + * type: object + * properties: + * noteId: + * type: string + * title: + * type: string + * similarity: + * type: number + * format: float + * '400': + * description: Invalid request parameters + * '404': + * description: Note not found + * security: + * - session: [] + * tags: ["llm"] + */ +async function findSimilarNotes(req: Request, res: Response) { + const noteId = req.params.noteId; + const providerId = req.query.providerId as string || 'openai'; + const modelId = req.query.modelId as string || 'text-embedding-3-small'; + const limit = parseInt(req.query.limit as string || '10', 10); + const threshold = parseFloat(req.query.threshold as string || '0.7'); + + if (!noteId) { + return [400, { + success: false, + message: "Note ID is required" + }]; + } + + const embedding = await vectorStore.getEmbeddingForNote(noteId, providerId, modelId); + + if (!embedding) { + // If no embedding exists for this note yet, generate one + const note = becca.getNote(noteId); + if (!note) { + return [404, { + success: false, + message: "Note not found" + }]; + } + + const context = await vectorStore.getNoteEmbeddingContext(noteId); + const provider = providerManager.getEmbeddingProvider(providerId); + + if (!provider) { + return [400, { + success: false, + message: `Embedding provider '${providerId}' not found` + }]; + } + + const newEmbedding = await provider.generateNoteEmbeddings(context); + await vectorStore.storeNoteEmbedding(noteId, providerId, modelId, newEmbedding); + + const similarNotes = await vectorStore.findSimilarNotes( + newEmbedding, providerId, modelId, limit, threshold + ); + + return { + success: true, + similarNotes + }; + } + + const similarNotes = await vectorStore.findSimilarNotes( + embedding.embedding, providerId, modelId, limit, threshold + ); + + return { + success: true, + similarNotes + }; +} + +/** + * @swagger + * /api/llm/embeddings/search: + * post: + * summary: Search for notes similar to provided text + * operationId: embeddings-search-by-text + * parameters: + * - name: providerId + * in: query + * required: false + * schema: + * type: string + * default: openai + * description: Embedding provider ID + * - name: modelId + * in: query + * required: false + * schema: + * type: string + * default: text-embedding-3-small + * description: Embedding model ID + * - name: limit + * in: query + * required: false + * schema: + * type: integer + * default: 10 + * description: Maximum number of similar notes to return + * - name: threshold + * in: query + * required: false + * schema: + * type: number + * format: float + * default: 0.7 + * description: Similarity threshold (0.0-1.0) + * requestBody: + * required: true + * content: + * application/json: + * schema: + * type: object + * properties: + * text: + * type: string + * description: Text to search with + * responses: + * '200': + * description: List of similar notes + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * similarNotes: + * type: array + * items: + * type: object + * properties: + * noteId: + * type: string + * title: + * type: string + * similarity: + * type: number + * format: float + * '400': + * description: Invalid request parameters + * security: + * - session: [] + * tags: ["llm"] + */ +async function searchByText(req: Request, res: Response) { + const { text } = req.body; + const providerId = req.query.providerId as string || 'openai'; + const modelId = req.query.modelId as string || 'text-embedding-3-small'; + const limit = parseInt(req.query.limit as string || '10', 10); + const threshold = parseFloat(req.query.threshold as string || '0.7'); + + if (!text) { + return [400, { + success: false, + message: "Search text is required" + }]; + } + + const provider = providerManager.getEmbeddingProvider(providerId); + + if (!provider) { + return [400, { + success: false, + message: `Embedding provider '${providerId}' not found` + }]; + } + + // Generate embedding for the search text + const embedding = await provider.generateEmbeddings(text); + + // Find similar notes + const similarNotes = await vectorStore.findSimilarNotes( + embedding, providerId, modelId, limit, threshold + ); + + return { + success: true, + similarNotes + }; +} + +/** + * @swagger + * /api/llm/embeddings/providers: + * get: + * summary: Get available embedding providers + * operationId: embeddings-get-providers + * responses: + * '200': + * description: List of available embedding providers + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * providers: + * type: array + * items: + * type: object + * properties: + * id: + * type: string + * name: + * type: string + * isEnabled: + * type: boolean + * priority: + * type: integer + * config: + * type: object + * security: + * - session: [] + * tags: ["llm"] + */ +async function getProviders(req: Request, res: Response) { + const providerConfigs = await providerManager.getEmbeddingProviderConfigs(); + + return { + success: true, + providers: providerConfigs + }; +} + +/** + * @swagger + * /api/llm/embeddings/providers/{providerId}: + * patch: + * summary: Update embedding provider configuration + * operationId: embeddings-update-provider + * parameters: + * - name: providerId + * in: path + * required: true + * schema: + * type: string + * description: Provider ID to update + * requestBody: + * required: true + * content: + * application/json: + * schema: + * type: object + * properties: + * enabled: + * type: boolean + * description: Whether provider is enabled + * priority: + * type: integer + * description: Priority order (lower is higher priority) + * config: + * type: object + * description: Provider-specific configuration + * responses: + * '200': + * description: Provider updated successfully + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * '400': + * description: Invalid provider ID or configuration + * security: + * - session: [] + * tags: ["llm"] + */ +async function updateProvider(req: Request, res: Response) { + const { providerId } = req.params; + const { isEnabled, priority, config } = req.body; + + const success = await providerManager.updateEmbeddingProviderConfig( + providerId, isEnabled, priority + ); + + if (!success) { + return [404, { + success: false, + message: "Provider not found" + }]; + } + + return { + success: true + }; +} + +/** + * @swagger + * /api/llm/embeddings/reprocess: + * post: + * summary: Reprocess embeddings for all notes + * operationId: embeddings-reprocess-all + * requestBody: + * required: true + * content: + * application/json: + * schema: + * type: object + * properties: + * providerId: + * type: string + * description: Provider ID to use for reprocessing + * modelId: + * type: string + * description: Model ID to use for reprocessing + * forceReprocess: + * type: boolean + * description: Whether to reprocess notes that already have embeddings + * responses: + * '200': + * description: Reprocessing started + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * jobId: + * type: string + * message: + * type: string + * '400': + * description: Invalid provider ID or configuration + * security: + * - session: [] + * tags: ["llm"] + */ +async function reprocessAllNotes(req: Request, res: Response) { + // Import cls + const cls = (await import("../../services/cls.js")).default; + + // Start the reprocessing operation in the background + setTimeout(async () => { + try { + // Wrap the operation in cls.init to ensure proper context + cls.init(async () => { + await vectorStore.reprocessAllNotes(); + log.info("Embedding reprocessing completed successfully"); + }); + } catch (error: any) { + log.error(`Error during background embedding reprocessing: ${error.message || "Unknown error"}`); + } + }, 0); + + // Return the response data + return { + success: true, + message: "Embedding reprocessing started in the background" + }; +} + +/** + * @swagger + * /api/llm/embeddings/queue-status: + * get: + * summary: Get status of the embedding processing queue + * operationId: embeddings-queue-status + * parameters: + * - name: jobId + * in: query + * required: false + * schema: + * type: string + * description: Optional job ID to get status for a specific processing job + * responses: + * '200': + * description: Queue status information + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * status: + * type: string + * enum: [idle, processing, paused] + * progress: + * type: number + * format: float + * description: Progress percentage (0-100) + * details: + * type: object + * security: + * - session: [] + * tags: ["llm"] + */ +async function getQueueStatus(req: Request, res: Response) { + // Use the imported sql instead of requiring it + const queueCount = await sql.getValue( + "SELECT COUNT(*) FROM embedding_queue" + ); + + const failedCount = await sql.getValue( + "SELECT COUNT(*) FROM embedding_queue WHERE attempts > 0" + ); + + const totalEmbeddingsCount = await sql.getValue( + "SELECT COUNT(*) FROM note_embeddings" + ); + + return { + success: true, + status: { + queueCount, + failedCount, + totalEmbeddingsCount + } + }; +} + +/** + * @swagger + * /api/llm/embeddings/stats: + * get: + * summary: Get embedding statistics + * operationId: embeddings-stats + * responses: + * '200': + * description: Embedding statistics + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * stats: + * type: object + * properties: + * totalEmbeddings: + * type: integer + * providers: + * type: object + * modelCounts: + * type: object + * lastUpdated: + * type: string + * format: date-time + * security: + * - session: [] + * tags: ["llm"] + */ +async function getEmbeddingStats(req: Request, res: Response) { + const stats = await vectorStore.getEmbeddingStats(); + + return { + success: true, + stats + }; +} + +/** + * @swagger + * /api/llm/embeddings/failed: + * get: + * summary: Get list of notes that failed embedding generation + * operationId: embeddings-failed-notes + * responses: + * '200': + * description: List of failed notes + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * failedNotes: + * type: array + * items: + * type: object + * properties: + * noteId: + * type: string + * title: + * type: string + * error: + * type: string + * failedAt: + * type: string + * format: date-time + * security: + * - session: [] + * tags: ["llm"] + */ +async function getFailedNotes(req: Request, res: Response) { + const limit = parseInt(req.query.limit as string || '100', 10); + const failedNotes = await vectorStore.getFailedEmbeddingNotes(limit); + + // No need to fetch note titles here anymore as they're already included in the response + return { + success: true, + failedNotes: failedNotes + }; +} + +/** + * @swagger + * /api/llm/embeddings/retry/{noteId}: + * post: + * summary: Retry generating embeddings for a failed note + * operationId: embeddings-retry-note + * parameters: + * - name: noteId + * in: path + * required: true + * schema: + * type: string + * description: Note ID to retry + * - name: providerId + * in: query + * required: false + * schema: + * type: string + * description: Provider ID to use (defaults to configured default) + * - name: modelId + * in: query + * required: false + * schema: + * type: string + * description: Model ID to use (defaults to provider default) + * responses: + * '200': + * description: Retry result + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * message: + * type: string + * '400': + * description: Invalid request + * '404': + * description: Note not found + * security: + * - session: [] + * tags: ["llm"] + */ +async function retryFailedNote(req: Request, res: Response) { + const { noteId } = req.params; + + if (!noteId) { + return [400, { + success: false, + message: "Note ID is required" + }]; + } + + const success = await vectorStore.retryFailedEmbedding(noteId); + + if (!success) { + return [404, { + success: false, + message: "Failed note not found or note is not marked as failed" + }]; + } + + return { + success: true, + message: "Note queued for retry" + }; +} + +/** + * @swagger + * /api/llm/embeddings/retry-all-failed: + * post: + * summary: Retry generating embeddings for all failed notes + * operationId: embeddings-retry-all-failed + * requestBody: + * required: false + * content: + * application/json: + * schema: + * type: object + * properties: + * providerId: + * type: string + * description: Provider ID to use (defaults to configured default) + * modelId: + * type: string + * description: Model ID to use (defaults to provider default) + * responses: + * '200': + * description: Retry started + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * message: + * type: string + * jobId: + * type: string + * security: + * - session: [] + * tags: ["llm"] + */ +async function retryAllFailedNotes(req: Request, res: Response) { + const count = await vectorStore.retryAllFailedEmbeddings(); + + return { + success: true, + message: `${count} failed notes queued for retry` + }; +} + +/** + * @swagger + * /api/llm/embeddings/rebuild-index: + * post: + * summary: Rebuild the vector store index + * operationId: embeddings-rebuild-index + * responses: + * '200': + * description: Rebuild started + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * message: + * type: string + * jobId: + * type: string + * security: + * - session: [] + * tags: ["llm"] + */ +async function rebuildIndex(req: Request, res: Response) { + // Start the index rebuilding operation in the background + setTimeout(async () => { + try { + await indexService.startFullIndexing(true); + log.info("Index rebuilding completed successfully"); + } catch (error: any) { + log.error(`Error during background index rebuilding: ${error.message || "Unknown error"}`); + } + }, 0); + + // Return the response data + return { + success: true, + message: "Index rebuilding started in the background" + }; +} + +/** + * @swagger + * /api/llm/embeddings/index-rebuild-status: + * get: + * summary: Get status of the vector index rebuild operation + * operationId: embeddings-rebuild-status + * parameters: + * - name: jobId + * in: query + * required: false + * schema: + * type: string + * description: Optional job ID to get status for a specific rebuild job + * responses: + * '200': + * description: Rebuild status information + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * status: + * type: string + * enum: [idle, in_progress, completed, failed] + * progress: + * type: number + * format: float + * description: Progress percentage (0-100) + * message: + * type: string + * details: + * type: object + * properties: + * startTime: + * type: string + * format: date-time + * processed: + * type: integer + * total: + * type: integer + * security: + * - session: [] + * tags: ["llm"] + */ +async function getIndexRebuildStatus(req: Request, res: Response) { + const status = indexService.getIndexRebuildStatus(); + + return { + success: true, + status + }; +} + +export default { + findSimilarNotes, + searchByText, + getProviders, + updateProvider, + reprocessAllNotes, + getQueueStatus, + getEmbeddingStats, + getFailedNotes, + retryFailedNote, + retryAllFailedNotes, + rebuildIndex, + getIndexRebuildStatus +}; diff --git a/src/routes/api/llm.ts b/src/routes/api/llm.ts new file mode 100644 index 000000000..adda7cd26 --- /dev/null +++ b/src/routes/api/llm.ts @@ -0,0 +1,944 @@ +import type { Request, Response } from "express"; +import log from "../../services/log.js"; +import options from "../../services/options.js"; + +// Import the index service for knowledge base management +import indexService from "../../services/llm/index_service.js"; +import restChatService from "../../services/llm/rest_chat_service.js"; +import chatService from '../../services/llm/chat_service.js'; +import chatStorageService from '../../services/llm/chat_storage_service.js'; + +// Define basic interfaces +interface ChatMessage { + role: 'user' | 'assistant' | 'system'; + content: string; + timestamp?: Date; +} + + + +/** + * @swagger + * /api/llm/sessions: + * post: + * summary: Create a new LLM chat session + * operationId: llm-create-session + * requestBody: + * required: true + * content: + * application/json: + * schema: + * type: object + * properties: + * title: + * type: string + * description: Title for the chat session + * systemPrompt: + * type: string + * description: System message to set the behavior of the assistant + * temperature: + * type: number + * description: Temperature parameter for the LLM (0.0-1.0) + * maxTokens: + * type: integer + * description: Maximum tokens to generate in responses + * model: + * type: string + * description: Specific model to use (depends on provider) + * provider: + * type: string + * description: LLM provider to use (e.g., 'openai', 'anthropic', 'ollama') + * contextNoteId: + * type: string + * description: Note ID to use as context for the session + * responses: + * '200': + * description: Successfully created session + * content: + * application/json: + * schema: + * type: object + * properties: + * sessionId: + * type: string + * title: + * type: string + * createdAt: + * type: string + * format: date-time + * security: + * - session: [] + * tags: ["llm"] + */ +async function createSession(req: Request, res: Response) { + return restChatService.createSession(req, res); +} + +/** + * @swagger + * /api/llm/sessions/{sessionId}: + * get: + * summary: Retrieve a specific chat session + * operationId: llm-get-session + * parameters: + * - name: sessionId + * in: path + * required: true + * schema: + * type: string + * responses: + * '200': + * description: Chat session details + * content: + * application/json: + * schema: + * type: object + * properties: + * id: + * type: string + * title: + * type: string + * messages: + * type: array + * items: + * type: object + * properties: + * role: + * type: string + * enum: [user, assistant, system] + * content: + * type: string + * timestamp: + * type: string + * format: date-time + * createdAt: + * type: string + * format: date-time + * lastActive: + * type: string + * format: date-time + * '404': + * description: Session not found + * security: + * - session: [] + * tags: ["llm"] + */ +async function getSession(req: Request, res: Response) { + return restChatService.getSession(req, res); +} + +/** + * @swagger + * /api/llm/chat/{chatNoteId}: + * patch: + * summary: Update a chat's settings + * operationId: llm-update-chat + * parameters: + * - name: chatNoteId + * in: path + * required: true + * schema: + * type: string + * description: The ID of the chat note (formerly sessionId) + * requestBody: + * required: true + * content: + * application/json: + * schema: + * type: object + * properties: + * title: + * type: string + * description: Updated title for the session + * systemPrompt: + * type: string + * description: Updated system prompt + * temperature: + * type: number + * description: Updated temperature setting + * maxTokens: + * type: integer + * description: Updated maximum tokens setting + * model: + * type: string + * description: Updated model selection + * provider: + * type: string + * description: Updated provider selection + * contextNoteId: + * type: string + * description: Updated note ID for context + * responses: + * '200': + * description: Session successfully updated + * content: + * application/json: + * schema: + * type: object + * properties: + * id: + * type: string + * title: + * type: string + * updatedAt: + * type: string + * format: date-time + * '404': + * description: Session not found + * security: + * - session: [] + * tags: ["llm"] + */ +async function updateSession(req: Request, res: Response) { + // Get the chat using ChatService + const chatNoteId = req.params.chatNoteId; + const updates = req.body; + + try { + // Get the chat + const session = await chatService.getOrCreateSession(chatNoteId); + + // Update title if provided + if (updates.title) { + await chatStorageService.updateChat(chatNoteId, session.messages, updates.title); + } + + // Return the updated chat + return { + id: chatNoteId, + title: updates.title || session.title, + updatedAt: new Date() + }; + } catch (error) { + log.error(`Error updating chat: ${error}`); + throw new Error(`Failed to update chat: ${error}`); + } +} + +/** + * @swagger + * /api/llm/sessions: + * get: + * summary: List all chat sessions + * operationId: llm-list-sessions + * responses: + * '200': + * description: List of chat sessions + * content: + * application/json: + * schema: + * type: array + * items: + * type: object + * properties: + * id: + * type: string + * title: + * type: string + * createdAt: + * type: string + * format: date-time + * lastActive: + * type: string + * format: date-time + * messageCount: + * type: integer + * security: + * - session: [] + * tags: ["llm"] + */ +async function listSessions(req: Request, res: Response) { + // Get all sessions using ChatService + try { + const sessions = await chatService.getAllSessions(); + + // Format the response + return { + sessions: sessions.map(session => ({ + id: session.id, + title: session.title, + createdAt: new Date(), // Since we don't have this in chat sessions + lastActive: new Date(), // Since we don't have this in chat sessions + messageCount: session.messages.length + })) + }; + } catch (error) { + log.error(`Error listing sessions: ${error}`); + throw new Error(`Failed to list sessions: ${error}`); + } +} + +/** + * @swagger + * /api/llm/sessions/{sessionId}: + * delete: + * summary: Delete a chat session + * operationId: llm-delete-session + * parameters: + * - name: sessionId + * in: path + * required: true + * schema: + * type: string + * responses: + * '200': + * description: Session successfully deleted + * '404': + * description: Session not found + * security: + * - session: [] + * tags: ["llm"] + */ +async function deleteSession(req: Request, res: Response) { + return restChatService.deleteSession(req, res); +} + +/** + * @swagger + * /api/llm/chat/{chatNoteId}/messages: + * post: + * summary: Send a message to an LLM and get a response + * operationId: llm-send-message + * parameters: + * - name: chatNoteId + * in: path + * required: true + * schema: + * type: string + * description: The ID of the chat note (formerly sessionId) + * requestBody: + * required: true + * content: + * application/json: + * schema: + * type: object + * properties: + * message: + * type: string + * description: The user message to send to the LLM + * options: + * type: object + * description: Optional parameters for this specific message + * properties: + * temperature: + * type: number + * maxTokens: + * type: integer + * model: + * type: string + * provider: + * type: string + * includeContext: + * type: boolean + * description: Whether to include relevant notes as context + * useNoteContext: + * type: boolean + * description: Whether to use the session's context note + * responses: + * '200': + * description: LLM response + * content: + * application/json: + * schema: + * type: object + * properties: + * response: + * type: string + * sources: + * type: array + * items: + * type: object + * properties: + * noteId: + * type: string + * title: + * type: string + * similarity: + * type: number + * sessionId: + * type: string + * '404': + * description: Session not found + * '500': + * description: Error processing request + * security: + * - session: [] + * tags: ["llm"] + */ +async function sendMessage(req: Request, res: Response) { + return restChatService.handleSendMessage(req, res); +} + +/** + * @swagger + * /api/llm/indexes/stats: + * get: + * summary: Get stats about the LLM knowledge base indexing status + * operationId: llm-index-stats + * responses: + * '200': + * description: Index stats successfully retrieved + * security: + * - session: [] + * tags: ["llm"] + */ +async function getIndexStats(req: Request, res: Response) { + try { + // Check if AI is enabled + const aiEnabled = await options.getOptionBool('aiEnabled'); + if (!aiEnabled) { + return { + success: false, + message: "AI features are disabled" + }; + } + + // Return indexing stats + const stats = await indexService.getIndexingStats(); + return { + success: true, + ...stats + }; + } catch (error: any) { + log.error(`Error getting index stats: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to get index stats: ${error.message || 'Unknown error'}`); + } +} + +/** + * @swagger + * /api/llm/indexes: + * post: + * summary: Start or continue indexing the knowledge base + * operationId: llm-start-indexing + * requestBody: + * required: false + * content: + * application/json: + * schema: + * type: object + * properties: + * force: + * type: boolean + * description: Whether to force reindexing of all notes + * responses: + * '200': + * description: Indexing started successfully + * security: + * - session: [] + * tags: ["llm"] + */ +async function startIndexing(req: Request, res: Response) { + try { + // Check if AI is enabled + const aiEnabled = await options.getOptionBool('aiEnabled'); + if (!aiEnabled) { + return { + success: false, + message: "AI features are disabled" + }; + } + + const { force = false } = req.body; + + // Start indexing + await indexService.startFullIndexing(force); + + return { + success: true, + message: "Indexing started" + }; + } catch (error: any) { + log.error(`Error starting indexing: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to start indexing: ${error.message || 'Unknown error'}`); + } +} + +/** + * @swagger + * /api/llm/indexes/failed: + * get: + * summary: Get list of notes that failed to index + * operationId: llm-failed-indexes + * parameters: + * - name: limit + * in: query + * required: false + * schema: + * type: integer + * default: 100 + * responses: + * '200': + * description: Failed indexes successfully retrieved + * security: + * - session: [] + * tags: ["llm"] + */ +async function getFailedIndexes(req: Request, res: Response) { + try { + // Check if AI is enabled + const aiEnabled = await options.getOptionBool('aiEnabled'); + if (!aiEnabled) { + return { + success: false, + message: "AI features are disabled" + }; + } + + const limit = parseInt(req.query.limit as string || "100", 10); + + // Get failed indexes + const failed = await indexService.getFailedIndexes(limit); + + return { + success: true, + failed + }; + } catch (error: any) { + log.error(`Error getting failed indexes: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to get failed indexes: ${error.message || 'Unknown error'}`); + } +} + +/** + * @swagger + * /api/llm/indexes/notes/{noteId}: + * put: + * summary: Retry indexing a specific note that previously failed + * operationId: llm-retry-index + * parameters: + * - name: noteId + * in: path + * required: true + * schema: + * type: string + * responses: + * '200': + * description: Index retry successfully initiated + * security: + * - session: [] + * tags: ["llm"] + */ +async function retryFailedIndex(req: Request, res: Response) { + try { + // Check if AI is enabled + const aiEnabled = await options.getOptionBool('aiEnabled'); + if (!aiEnabled) { + return { + success: false, + message: "AI features are disabled" + }; + } + + const { noteId } = req.params; + + // Retry indexing the note + const result = await indexService.retryFailedNote(noteId); + + return { + success: true, + message: result ? "Note queued for indexing" : "Failed to queue note for indexing" + }; + } catch (error: any) { + log.error(`Error retrying failed index: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to retry index: ${error.message || 'Unknown error'}`); + } +} + +/** + * @swagger + * /api/llm/indexes/failed: + * put: + * summary: Retry indexing all failed notes + * operationId: llm-retry-all-indexes + * responses: + * '200': + * description: Retry of all failed indexes successfully initiated + * security: + * - session: [] + * tags: ["llm"] + */ +async function retryAllFailedIndexes(req: Request, res: Response) { + try { + // Check if AI is enabled + const aiEnabled = await options.getOptionBool('aiEnabled'); + if (!aiEnabled) { + return { + success: false, + message: "AI features are disabled" + }; + } + + // Retry all failed notes + const count = await indexService.retryAllFailedNotes(); + + return { + success: true, + message: `${count} notes queued for reprocessing` + }; + } catch (error: any) { + log.error(`Error retrying all failed indexes: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to retry all indexes: ${error.message || 'Unknown error'}`); + } +} + +/** + * @swagger + * /api/llm/indexes/notes/similar: + * get: + * summary: Find notes similar to a query string + * operationId: llm-find-similar-notes + * parameters: + * - name: query + * in: query + * required: true + * schema: + * type: string + * - name: contextNoteId + * in: query + * required: false + * schema: + * type: string + * - name: limit + * in: query + * required: false + * schema: + * type: integer + * default: 5 + * responses: + * '200': + * description: Similar notes found successfully + * security: + * - session: [] + * tags: ["llm"] + */ +async function findSimilarNotes(req: Request, res: Response) { + try { + // Check if AI is enabled + const aiEnabled = await options.getOptionBool('aiEnabled'); + if (!aiEnabled) { + return { + success: false, + message: "AI features are disabled" + }; + } + + const query = req.query.query as string; + const contextNoteId = req.query.contextNoteId as string | undefined; + const limit = parseInt(req.query.limit as string || "5", 10); + + if (!query) { + return { + success: false, + message: "Query is required" + }; + } + + // Find similar notes + const similar = await indexService.findSimilarNotes(query, contextNoteId, limit); + + return { + success: true, + similar + }; + } catch (error: any) { + log.error(`Error finding similar notes: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to find similar notes: ${error.message || 'Unknown error'}`); + } +} + +/** + * @swagger + * /api/llm/indexes/context: + * get: + * summary: Generate context for an LLM query based on the knowledge base + * operationId: llm-generate-context + * parameters: + * - name: query + * in: query + * required: true + * schema: + * type: string + * - name: contextNoteId + * in: query + * required: false + * schema: + * type: string + * - name: depth + * in: query + * required: false + * schema: + * type: integer + * default: 2 + * responses: + * '200': + * description: Context generated successfully + * security: + * - session: [] + * tags: ["llm"] + */ +async function generateQueryContext(req: Request, res: Response) { + try { + // Check if AI is enabled + const aiEnabled = await options.getOptionBool('aiEnabled'); + if (!aiEnabled) { + return { + success: false, + message: "AI features are disabled" + }; + } + + const query = req.query.query as string; + const contextNoteId = req.query.contextNoteId as string | undefined; + const depth = parseInt(req.query.depth as string || "2", 10); + + if (!query) { + return { + success: false, + message: "Query is required" + }; + } + + // Generate context + const context = await indexService.generateQueryContext(query, contextNoteId, depth); + + return { + success: true, + context + }; + } catch (error: any) { + log.error(`Error generating query context: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to generate query context: ${error.message || 'Unknown error'}`); + } +} + +/** + * @swagger + * /api/llm/indexes/notes/{noteId}: + * post: + * summary: Index a specific note for LLM knowledge base + * operationId: llm-index-note + * parameters: + * - name: noteId + * in: path + * required: true + * schema: + * type: string + * responses: + * '200': + * description: Note indexed successfully + * security: + * - session: [] + * tags: ["llm"] + */ +async function indexNote(req: Request, res: Response) { + try { + // Check if AI is enabled + const aiEnabled = await options.getOptionBool('aiEnabled'); + if (!aiEnabled) { + return { + success: false, + message: "AI features are disabled" + }; + } + + const { noteId } = req.params; + + if (!noteId) { + return { + success: false, + message: "Note ID is required" + }; + } + + // Index the note + const result = await indexService.generateNoteIndex(noteId); + + return { + success: true, + message: result ? "Note indexed successfully" : "Failed to index note" + }; + } catch (error: any) { + log.error(`Error indexing note: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to index note: ${error.message || 'Unknown error'}`); + } +} + +/** + * @swagger + * /api/llm/chat/{chatNoteId}/messages/stream: + * post: + * summary: Stream a message to an LLM via WebSocket + * operationId: llm-stream-message + * parameters: + * - name: chatNoteId + * in: path + * required: true + * schema: + * type: string + * description: The ID of the chat note to stream messages to (formerly sessionId) + * requestBody: + * required: true + * content: + * application/json: + * schema: + * type: object + * properties: + * content: + * type: string + * description: The user message to send to the LLM + * useAdvancedContext: + * type: boolean + * description: Whether to use advanced context extraction + * showThinking: + * type: boolean + * description: Whether to show thinking process in the response + * responses: + * '200': + * description: Streaming started successfully + * '404': + * description: Session not found + * '500': + * description: Error processing request + * security: + * - session: [] + * tags: ["llm"] + */ +async function streamMessage(req: Request, res: Response) { + log.info("=== Starting streamMessage ==="); + try { + const chatNoteId = req.params.chatNoteId; + const { content, useAdvancedContext, showThinking } = req.body; + + if (!content || typeof content !== 'string' || content.trim().length === 0) { + throw new Error('Content cannot be empty'); + } + + // Check if session exists + const session = restChatService.getSessions().get(chatNoteId); + if (!session) { + throw new Error('Chat not found'); + } + + // Update last active timestamp + session.lastActive = new Date(); + + // Add user message to the session + session.messages.push({ + role: 'user', + content, + timestamp: new Date() + }); + + // Create request parameters for the pipeline + const requestParams = { + chatNoteId: chatNoteId, + content, + useAdvancedContext: useAdvancedContext === true, + showThinking: showThinking === true, + stream: true // Always stream for this endpoint + }; + + // Create a fake request/response pair to pass to the handler + const fakeReq = { + ...req, + method: 'GET', // Set to GET to indicate streaming + query: { + stream: 'true', // Set stream param - don't use format: 'stream' to avoid confusion + useAdvancedContext: String(useAdvancedContext === true), + showThinking: String(showThinking === true) + }, + params: { + chatNoteId: chatNoteId + }, + // Make sure the original content is available to the handler + body: { + content, + useAdvancedContext: useAdvancedContext === true, + showThinking: showThinking === true + } + } as unknown as Request; + + // Log to verify correct parameters + log.info(`WebSocket stream settings - useAdvancedContext=${useAdvancedContext === true}, in query=${fakeReq.query.useAdvancedContext}, in body=${fakeReq.body.useAdvancedContext}`); + // Extra safety to ensure the parameters are passed correctly + if (useAdvancedContext === true) { + log.info(`Enhanced context IS enabled for this request`); + } else { + log.info(`Enhanced context is NOT enabled for this request`); + } + + // Process the request in the background + Promise.resolve().then(async () => { + try { + await restChatService.handleSendMessage(fakeReq, res); + } catch (error) { + log.error(`Background message processing error: ${error}`); + + // Import the WebSocket service + const wsService = (await import('../../services/ws.js')).default; + + // Define LLMStreamMessage interface + interface LLMStreamMessage { + type: 'llm-stream'; + chatNoteId: string; + content?: string; + thinking?: string; + toolExecution?: any; + done?: boolean; + error?: string; + raw?: unknown; + } + + // Send error to client via WebSocket + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId: chatNoteId, + error: `Error processing message: ${error}`, + done: true + } as LLMStreamMessage); + } + }); + + // Import the WebSocket service + const wsService = (await import('../../services/ws.js')).default; + + // Let the client know streaming has started via WebSocket (helps client confirm connection is working) + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId: chatNoteId, + thinking: 'Initializing streaming LLM response...' + }); + + // Let the client know streaming has started via HTTP response + return { + success: true, + message: 'Streaming started', + chatNoteId: chatNoteId + }; + } catch (error: any) { + log.error(`Error starting message stream: ${error.message}`); + throw error; + } +} + +export default { + // Chat session management + createSession, + getSession, + updateSession, + listSessions, + deleteSession, + sendMessage, + streamMessage, + + // Knowledge base index management + getIndexStats, + startIndexing, + getFailedIndexes, + retryFailedIndex, + retryAllFailedIndexes, + findSimilarNotes, + generateQueryContext, + indexNote +}; diff --git a/src/routes/api/ollama.ts b/src/routes/api/ollama.ts new file mode 100644 index 000000000..e6ab968dc --- /dev/null +++ b/src/routes/api/ollama.ts @@ -0,0 +1,64 @@ +import options from "../../services/options.js"; +import log from "../../services/log.js"; +import type { Request, Response } from "express"; +import { Ollama } from "ollama"; + +/** + * @swagger + * /api/llm/providers/ollama/models: + * get: + * summary: List available models from Ollama + * operationId: ollama-list-models + * parameters: + * - name: baseUrl + * in: query + * required: false + * schema: + * type: string + * description: Optional custom Ollama API base URL + * responses: + * '200': + * description: List of available Ollama models + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * models: + * type: array + * items: + * type: object + * '500': + * description: Error listing models + * security: + * - session: [] + * tags: ["llm"] + */ +async function listModels(req: Request, res: Response) { + try { + const baseUrl = req.query.baseUrl as string || await options.getOption('ollamaBaseUrl') || 'http://localhost:11434'; + + // Create Ollama client + const ollama = new Ollama({ host: baseUrl }); + + // Call Ollama API to get models using the official client + const response = await ollama.list(); + + // Return the models list + return { + success: true, + models: response.models || [] + }; + } catch (error: any) { + log.error(`Error listing Ollama models: ${error.message || 'Unknown error'}`); + + // Properly throw the error to be handled by the global error handler + throw new Error(`Failed to list Ollama models: ${error.message || 'Unknown error'}`); + } +} + +export default { + listModels +}; diff --git a/src/routes/api/openai.ts b/src/routes/api/openai.ts new file mode 100644 index 000000000..c78f183cd --- /dev/null +++ b/src/routes/api/openai.ts @@ -0,0 +1,127 @@ +import options from "../../services/options.js"; +import log from "../../services/log.js"; +import type { Request, Response } from "express"; +import OpenAI from "openai"; + +/** + * @swagger + * /api/openai/models: + * post: + * summary: List available models from OpenAI + * operationId: openai-list-models + * requestBody: + * required: false + * content: + * application/json: + * schema: + * type: object + * properties: + * baseUrl: + * type: string + * description: Optional custom OpenAI API base URL + * responses: + * '200': + * description: List of available OpenAI models + * content: + * application/json: + * schema: + * type: object + * properties: + * success: + * type: boolean + * chatModels: + * type: array + * items: + * type: object + * properties: + * id: + * type: string + * name: + * type: string + * type: + * type: string + * embeddingModels: + * type: array + * items: + * type: object + * properties: + * id: + * type: string + * name: + * type: string + * type: + * type: string + * '500': + * description: Error listing models + * security: + * - session: [] + * tags: ["llm"] + */ +async function listModels(req: Request, res: Response) { + try { + const { baseUrl } = req.body; + + // Use provided base URL or default from options + const openaiBaseUrl = baseUrl || await options.getOption('openaiBaseUrl') || 'https://api.openai.com/v1'; + const apiKey = await options.getOption('openaiApiKey'); + + if (!apiKey) { + throw new Error('OpenAI API key is not configured'); + } + + // Initialize OpenAI client with the API key and base URL + const openai = new OpenAI({ + apiKey, + baseURL: openaiBaseUrl + }); + + // Call OpenAI API to get models using the SDK + const response = await openai.models.list(); + + // Filter and categorize models + const allModels = response.data || []; + + // Separate models into chat models and embedding models + const chatModels = allModels + .filter((model) => + // Include GPT models for chat + model.id.includes('gpt') || + // Include Claude models via Azure OpenAI + model.id.includes('claude') + ) + .map((model) => ({ + id: model.id, + name: model.id, + type: 'chat' + })); + + const embeddingModels = allModels + .filter((model) => + // Only include embedding-specific models + model.id.includes('embedding') || + model.id.includes('embed') + ) + .map((model) => ({ + id: model.id, + name: model.id, + type: 'embedding' + })); + + // Return the models list + return { + success: true, + chatModels, + embeddingModels + }; + } catch (error: any) { + log.error(`Error listing OpenAI models: ${error.message || 'Unknown error'}`); + + // Properly throw the error to be handled by the global error handler + throw new Error(`Failed to list OpenAI models: ${error.message || 'Unknown error'}`); + } +} + +export default { + listModels +}; + diff --git a/src/routes/api/options.ts b/src/routes/api/options.ts index aeb4e9009..60cfc9bb7 100644 --- a/src/routes/api/options.ts +++ b/src/routes/api/options.ts @@ -82,7 +82,35 @@ const ALLOWED_OPTIONS = new Set([ "allowedHtmlTags", "redirectBareDomain", "showLoginInShareTheme", - "splitEditorOrientation", + + // AI/LLM integration options + "aiEnabled", + "aiTemperature", + "aiSystemPrompt", + "aiProviderPrecedence", + "openaiApiKey", + "openaiBaseUrl", + "openaiDefaultModel", + "openaiEmbeddingModel", + "anthropicApiKey", + "anthropicBaseUrl", + "anthropicDefaultModel", + "voyageApiKey", + "voyageEmbeddingModel", + "ollamaBaseUrl", + "ollamaDefaultModel", + "ollamaEmbeddingModel", + "embeddingAutoUpdateEnabled", + "embeddingDimensionStrategy", + "embeddingProviderPrecedence", + "embeddingSimilarityThreshold", + "embeddingBatchSize", + "embeddingUpdateInterval", + "enableAutomaticIndexing", + "maxNotesPerLlmQuery", + + // Embedding options + "embeddingDefaultDimension", "mfaEnabled", "mfaMethod" ]); diff --git a/src/routes/routes.ts b/src/routes/routes.ts index 1c7341503..082e05cd0 100644 --- a/src/routes/routes.ts +++ b/src/routes/routes.ts @@ -61,6 +61,11 @@ import etapiTokensApiRoutes from "./api/etapi_tokens.js"; import relationMapApiRoute from "./api/relation-map.js"; import otherRoute from "./api/other.js"; import shareRoutes from "../share/routes.js"; +import embeddingsRoute from "./api/embeddings.js"; +import ollamaRoute from "./api/ollama.js"; +import openaiRoute from "./api/openai.js"; +import anthropicRoute from "./api/anthropic.js"; +import llmRoute from "./api/llm.js"; import etapiAuthRoutes from "../etapi/auth.js"; import etapiAppInfoRoutes from "../etapi/app_info.js"; @@ -387,6 +392,44 @@ function register(app: express.Application) { etapiSpecRoute.register(router); etapiBackupRoute.register(router); + // LLM Chat API + apiRoute(PST, "/api/llm/chat", llmRoute.createSession); + apiRoute(GET, "/api/llm/chat", llmRoute.listSessions); + apiRoute(GET, "/api/llm/chat/:sessionId", llmRoute.getSession); + apiRoute(PATCH, "/api/llm/chat/:sessionId", llmRoute.updateSession); + apiRoute(DEL, "/api/llm/chat/:chatNoteId", llmRoute.deleteSession); + apiRoute(PST, "/api/llm/chat/:chatNoteId/messages", llmRoute.sendMessage); + apiRoute(PST, "/api/llm/chat/:chatNoteId/messages/stream", llmRoute.streamMessage); + + // LLM index management endpoints - reorganized for REST principles + apiRoute(GET, "/api/llm/indexes/stats", llmRoute.getIndexStats); + apiRoute(PST, "/api/llm/indexes", llmRoute.startIndexing); // Create index process + apiRoute(GET, "/api/llm/indexes/failed", llmRoute.getFailedIndexes); + apiRoute(PUT, "/api/llm/indexes/notes/:noteId", llmRoute.retryFailedIndex); // Update index for note + apiRoute(PUT, "/api/llm/indexes/failed", llmRoute.retryAllFailedIndexes); // Update all failed indexes + apiRoute(GET, "/api/llm/indexes/notes/similar", llmRoute.findSimilarNotes); // Get similar notes + apiRoute(GET, "/api/llm/indexes/context", llmRoute.generateQueryContext); // Get context + apiRoute(PST, "/api/llm/indexes/notes/:noteId", llmRoute.indexNote); // Create index for specific note + + // LLM embeddings endpoints + apiRoute(GET, "/api/llm/embeddings/similar/:noteId", embeddingsRoute.findSimilarNotes); + apiRoute(PST, "/api/llm/embeddings/search", embeddingsRoute.searchByText); + apiRoute(GET, "/api/llm/embeddings/providers", embeddingsRoute.getProviders); + apiRoute(PATCH, "/api/llm/embeddings/providers/:providerId", embeddingsRoute.updateProvider); + apiRoute(PST, "/api/llm/embeddings/reprocess", embeddingsRoute.reprocessAllNotes); + apiRoute(GET, "/api/llm/embeddings/queue-status", embeddingsRoute.getQueueStatus); + apiRoute(GET, "/api/llm/embeddings/stats", embeddingsRoute.getEmbeddingStats); + apiRoute(GET, "/api/llm/embeddings/failed", embeddingsRoute.getFailedNotes); + apiRoute(PST, "/api/llm/embeddings/retry/:noteId", embeddingsRoute.retryFailedNote); + apiRoute(PST, "/api/llm/embeddings/retry-all-failed", embeddingsRoute.retryAllFailedNotes); + apiRoute(PST, "/api/llm/embeddings/rebuild-index", embeddingsRoute.rebuildIndex); + apiRoute(GET, "/api/llm/embeddings/index-rebuild-status", embeddingsRoute.getIndexRebuildStatus); + + // LLM provider endpoints - moved under /api/llm/providers hierarchy + apiRoute(GET, "/api/llm/providers/ollama/models", ollamaRoute.listModels); + apiRoute(GET, "/api/llm/providers/openai/models", openaiRoute.listModels); + apiRoute(GET, "/api/llm/providers/anthropic/models", anthropicRoute.listModels); + // API Documentation apiDocsRoute.register(app); @@ -500,8 +543,14 @@ function route(method: HttpMethod, path: string, middleware: express.Handler[], } function handleResponse(resultHandler: ApiResultHandler, req: express.Request, res: express.Response, result: unknown, start: number) { - const responseLength = resultHandler(req, res, result); + // Skip result handling if the response has already been handled + if ((res as any).triliumResponseHandled) { + // Just log the request without additional processing + log.request(req, res, Date.now() - start, 0); + return; + } + const responseLength = resultHandler(req, res, result); log.request(req, res, Date.now() - start, responseLength); } diff --git a/src/services/app_info.ts b/src/services/app_info.ts index bd1436912..60b453f9d 100644 --- a/src/services/app_info.ts +++ b/src/services/app_info.ts @@ -3,8 +3,8 @@ import build from "./build.js"; import packageJson from "../../package.json" with { type: "json" }; import dataDir from "./data_dir.js"; -const APP_DB_VERSION = 229; -const SYNC_VERSION = 34; +const APP_DB_VERSION = 230; +const SYNC_VERSION = 35; const CLIPPER_PROTOCOL_VERSION = "1.0"; export default { diff --git a/src/services/entity_changes.ts b/src/services/entity_changes.ts index 66c2613ce..a22ecb11c 100644 --- a/src/services/entity_changes.ts +++ b/src/services/entity_changes.ts @@ -188,6 +188,7 @@ function fillAllEntityChanges() { fillEntityChanges("attributes", "attributeId"); fillEntityChanges("etapi_tokens", "etapiTokenId"); fillEntityChanges("options", "name", "WHERE isSynced = 1"); + fillEntityChanges("note_embeddings", "embedId"); }); } diff --git a/src/services/erase.ts b/src/services/erase.ts index 28603e136..61b3f7ce8 100644 --- a/src/services/erase.ts +++ b/src/services/erase.ts @@ -28,6 +28,11 @@ function eraseNotes(noteIdsToErase: string[]) { eraseRevisions(revisionIdsToErase); + // Erase embeddings related to the deleted notes + const embeddingIdsToErase = sql.getManyRows<{ embedId: string }>(`SELECT embedId FROM note_embeddings WHERE noteId IN (???)`, noteIdsToErase).map((row) => row.embedId); + + eraseEmbeddings(embeddingIdsToErase); + log.info(`Erased notes: ${JSON.stringify(noteIdsToErase)}`); } @@ -151,6 +156,13 @@ function eraseNotesWithDeleteId(deleteId: string) { const attachmentIdsToErase = sql.getColumn("SELECT attachmentId FROM attachments WHERE isDeleted = 1 AND deleteId = ?", [deleteId]); eraseAttachments(attachmentIdsToErase); + // Find and erase embeddings for deleted notes + const deletedNoteIds = sql.getColumn("SELECT noteId FROM notes WHERE isDeleted = 1 AND deleteId = ?", [deleteId]); + if (deletedNoteIds.length > 0) { + const embeddingIdsToErase = sql.getColumn("SELECT embedId FROM note_embeddings WHERE noteId IN (???)", deletedNoteIds); + eraseEmbeddings(embeddingIdsToErase); + } + eraseUnusedBlobs(); } @@ -173,6 +185,17 @@ function eraseScheduledAttachments(eraseUnusedAttachmentsAfterSeconds: number | eraseAttachments(attachmentIdsToErase); } +function eraseEmbeddings(embedIdsToErase: string[]) { + if (embedIdsToErase.length === 0) { + return; + } + + sql.executeMany(`DELETE FROM note_embeddings WHERE embedId IN (???)`, embedIdsToErase); + setEntityChangesAsErased(sql.getManyRows(`SELECT * FROM entity_changes WHERE entityName = 'note_embeddings' AND entityId IN (???)`, embedIdsToErase)); + + log.info(`Erased embeddings: ${JSON.stringify(embedIdsToErase)}`); +} + export function startScheduledCleanup() { sqlInit.dbReady.then(() => { // first cleanup kickoff 5 minutes after startup diff --git a/src/services/events.ts b/src/services/events.ts index 2b24a5826..5ffc93f82 100644 --- a/src/services/events.ts +++ b/src/services/events.ts @@ -10,6 +10,7 @@ const ENTITY_CHANGE_SYNCED = "ENTITY_CHANGE_SYNCED"; const ENTITY_DELETE_SYNCED = "ENTITY_DELETE_SYNCED"; const CHILD_NOTE_CREATED = "CHILD_NOTE_CREATED"; const NOTE_CONTENT_CHANGE = "NOTE_CONTENT_CHANGED"; +const DB_INITIALIZED = "DB_INITIALIZED"; type EventType = string | string[]; type EventListener = (data: any) => void; @@ -72,5 +73,6 @@ export default { ENTITY_CHANGE_SYNCED, ENTITY_DELETE_SYNCED, CHILD_NOTE_CREATED, - NOTE_CONTENT_CHANGE + NOTE_CONTENT_CHANGE, + DB_INITIALIZED }; diff --git a/src/services/hidden_subtree.ts b/src/services/hidden_subtree.ts index 369b23aea..a66360032 100644 --- a/src/services/hidden_subtree.ts +++ b/src/services/hidden_subtree.ts @@ -1,23 +1,29 @@ import BAttribute from "../becca/entities/battribute.js"; -import type { AttributeType, NoteType } from "../becca/entities/rows.js"; +import type { AttributeType } from "../becca/entities/rows.js"; import becca from "../becca/becca.js"; import noteService from "./notes.js"; import log from "./log.js"; import migrationService from "./migration.js"; +import options from "./options.js"; +import sql from "./sql.js"; import { t } from "i18next"; import { cleanUpHelp, getHelpHiddenSubtreeData } from "./in_app_help.js"; import buildLaunchBarConfig from "./hidden_subtree_launcherbar.js"; const LBTPL_ROOT = "_lbTplRoot"; const LBTPL_BASE = "_lbTplBase"; -const LBTPL_COMMAND = "_lbTplCommandLauncher"; -const LBTPL_NOTE_LAUNCHER = "_lbTplNoteLauncher"; -const LBTPL_SCRIPT = "_lbTplScriptLauncher"; -const LBTPL_BUILTIN_WIDGET = "_lbTplBuiltinWidget"; +const LBTPL_HEADER = "_lbTplHeader"; +const LBTPL_NOTE_LAUNCHER = "_lbTplLauncherNote"; +const LBTPL_WIDGET = "_lbTplLauncherWidget"; +const LBTPL_COMMAND = "_lbTplLauncherCommand"; +const LBTPL_SCRIPT = "_lbTplLauncherScript"; const LBTPL_SPACER = "_lbTplSpacer"; const LBTPL_CUSTOM_WIDGET = "_lbTplCustomWidget"; +// Define launcher note types locally +type LauncherNoteType = "launcher" | "search" | "doc" | "noteMap" | "contentWidget" | "book" | "file" | "image" | "text" | "relationMap" | "render" | "canvas" | "mermaid" | "webView" | "code" | "mindMap" | "geoMap"; + interface HiddenSubtreeAttribute { type: AttributeType; name: string; @@ -29,7 +35,7 @@ export interface HiddenSubtreeItem { notePosition?: number; id: string; title: string; - type: NoteType; + type: LauncherNoteType; icon?: string; attributes?: HiddenSubtreeAttribute[]; children?: HiddenSubtreeItem[]; @@ -37,7 +43,17 @@ export interface HiddenSubtreeItem { baseSize?: string; growthFactor?: string; targetNoteId?: "_backendLog" | "_globalNoteMap"; - builtinWidget?: "bookmarks" | "spacer" | "backInHistoryButton" | "forwardInHistoryButton" | "syncStatus" | "protectedSession" | "todayInJournal" | "calendar" | "quickSearch"; + builtinWidget?: + | "todayInJournal" + | "bookmarks" + | "spacer" + | "backInHistoryButton" + | "forwardInHistoryButton" + | "syncStatus" + | "protectedSession" + | "calendar" + | "quickSearch" + | "aiChatLauncher"; command?: keyof typeof Command; } @@ -47,7 +63,8 @@ enum Command { searchNotes, createNoteIntoInbox, showRecentChanges, - showOptions + showOptions, + createAiChat } /* @@ -168,7 +185,7 @@ function buildHiddenSubtreeDefinition(helpSubtree: HiddenSubtreeItem[]): HiddenS ] }, { - id: LBTPL_BUILTIN_WIDGET, + id: LBTPL_WIDGET, title: t("hidden-subtree.built-in-widget-title"), type: "doc", attributes: [ @@ -182,7 +199,7 @@ function buildHiddenSubtreeDefinition(helpSubtree: HiddenSubtreeItem[]): HiddenS type: "doc", icon: "bx-move-vertical", attributes: [ - { type: "relation", name: "template", value: LBTPL_BUILTIN_WIDGET }, + { type: "relation", name: "template", value: LBTPL_WIDGET }, { type: "label", name: "builtinWidget", value: "spacer" }, { type: "label", name: "label:baseSize", value: "promoted,number" }, { type: "label", name: "label:growthFactor", value: "promoted,number" }, @@ -275,6 +292,7 @@ function buildHiddenSubtreeDefinition(helpSubtree: HiddenSubtreeItem[]): HiddenS { id: "_optionsEtapi", title: t("hidden-subtree.etapi-title"), type: "contentWidget", icon: "bx-extension" }, { id: "_optionsBackup", title: t("hidden-subtree.backup-title"), type: "contentWidget", icon: "bx-data" }, { id: "_optionsSync", title: t("hidden-subtree.sync-title"), type: "contentWidget", icon: "bx-wifi" }, + { id: "_optionsAi", title: t("hidden-subtree.ai-llm-title"), type: "contentWidget", icon: "bx-bot" }, { id: "_optionsOther", title: t("hidden-subtree.other"), type: "contentWidget", icon: "bx-dots-horizontal" }, { id: "_optionsLocalization", title: t("hidden-subtree.localization"), type: "contentWidget", icon: "bx-world" }, { id: "_optionsAdvanced", title: t("hidden-subtree.advanced-title"), type: "contentWidget" } @@ -359,7 +377,7 @@ function checkHiddenSubtreeRecursively(parentNoteId: string, item: HiddenSubtree attrs.push({ type: "label", name: "baseSize", value: item.baseSize }); attrs.push({ type: "label", name: "growthFactor", value: item.growthFactor }); } else { - attrs.push({ type: "relation", name: "template", value: LBTPL_BUILTIN_WIDGET }); + attrs.push({ type: "relation", name: "template", value: LBTPL_WIDGET }); } attrs.push({ type: "label", name: "builtinWidget", value: item.builtinWidget }); @@ -430,8 +448,8 @@ export default { LBTPL_BASE, LBTPL_COMMAND, LBTPL_NOTE_LAUNCHER, + LBTPL_WIDGET, LBTPL_SCRIPT, - LBTPL_BUILTIN_WIDGET, LBTPL_SPACER, LBTPL_CUSTOM_WIDGET }; diff --git a/src/services/hidden_subtree_launcherbar.ts b/src/services/hidden_subtree_launcherbar.ts index 0ee41f2e6..602d09bb3 100644 --- a/src/services/hidden_subtree_launcherbar.ts +++ b/src/services/hidden_subtree_launcherbar.ts @@ -46,7 +46,7 @@ export default function buildLaunchBarConfig() { const desktopAvailableLaunchers: HiddenSubtreeItem[] = [ { id: "_lbBackInHistory", ...sharedLaunchers.backInHistory }, { id: "_lbForwardInHistory", ...sharedLaunchers.forwardInHistory }, - { id: "_lbBackendLog", title: t("hidden-subtree.backend-log-title"), type: "launcher", targetNoteId: "_backendLog", icon: "bx bx-terminal" } + { id: "_lbBackendLog", title: t("hidden-subtree.backend-log-title"), type: "launcher", targetNoteId: "_backendLog", icon: "bx bx-terminal" }, ]; const desktopVisibleLaunchers: HiddenSubtreeItem[] = [ @@ -68,6 +68,16 @@ export default function buildLaunchBarConfig() { attributes: [{ type: "label", name: "desktopOnly" }] }, { id: "_lbNoteMap", title: t("hidden-subtree.note-map-title"), type: "launcher", targetNoteId: "_globalNoteMap", icon: "bx bxs-network-chart" }, + { + id: "_lbLlmChat", + title: t("hidden-subtree.llm-chat-title"), + type: "launcher", + command: "createAiChat", + icon: "bx bx-bot", + attributes: [ + { type: "label", name: "desktopOnly" } + ] + }, { id: "_lbCalendar", ...sharedLaunchers.calendar }, { id: "_lbRecentChanges", ...sharedLaunchers.recentChanges }, { id: "_lbSpacer1", title: t("hidden-subtree.spacer-title"), type: "launcher", builtinWidget: "spacer", baseSize: "50", growthFactor: "0" }, diff --git a/src/services/llm/README.md b/src/services/llm/README.md new file mode 100644 index 000000000..dce7887ee --- /dev/null +++ b/src/services/llm/README.md @@ -0,0 +1,144 @@ +# Trilium Context Service + +This directory contains Trilium's context management services, which are responsible for providing relevant context to LLM models when generating responses. + +## Structure + +The context system has been refactored into a modular architecture: + +``` +context/ + ├── index.ts - Base context extractor + ├── semantic_context.ts - Semantic context utilities + ├── hierarchy.ts - Note hierarchy context utilities + ├── code_handlers.ts - Code-specific context handling + ├── content_chunking.ts - Content chunking utilities + ├── note_content.ts - Note content processing + ├── summarization.ts - Content summarization utilities + ├── modules/ - Modular context services + │ ├── provider_manager.ts - Embedding provider management + │ ├── cache_manager.ts - Caching system + │ ├── semantic_search.ts - Semantic search functionality + │ ├── query_enhancer.ts - Query enhancement + │ ├── context_formatter.ts - Context formatting + │ └── context_service.ts - Main context service + └── README.md - This documentation +``` + +## Main Entry Points + +- `context_service.ts` - Main entry point for modern code +- `semantic_context_service.ts` - Compatibility wrapper for old code (deprecated) +- `trilium_context_service.ts` - Compatibility wrapper for old code (deprecated) + +## Usage + +### For new code: + +```typescript +import aiServiceManager from '../services/llm/ai_service_manager.js'; + +// Get the context service +const contextService = aiServiceManager.getContextService(); + +// Process a query to get relevant context +const result = await contextService.processQuery( + "What are my notes about programming?", + llmService, + currentNoteId, + false // showThinking +); + +// Get semantic context +const context = await contextService.getSemanticContext(noteId, userQuery); + +// Get context that adapts to query complexity +const smartContext = await contextService.getSmartContext(noteId, userQuery); +``` + +### For legacy code (deprecated): + +```typescript +import aiServiceManager from '../services/llm/ai_service_manager.js'; + +// Get the semantic context service (deprecated) +const semanticContext = aiServiceManager.getSemanticContextService(); + +// Get context +const context = await semanticContext.getSemanticContext(noteId, userQuery); +``` + +## Modules + +### Provider Manager + +Handles embedding provider selection and management: + +```typescript +import providerManager from './context/modules/provider_manager.js'; + +// Get the preferred embedding provider +const provider = await providerManager.getPreferredEmbeddingProvider(); + +// Generate embeddings for a query +const embedding = await providerManager.generateQueryEmbedding(query); +``` + +### Cache Manager + +Provides caching for context data: + +```typescript +import cacheManager from './context/modules/cache_manager.js'; + +// Get cached data +const cached = cacheManager.getNoteData(noteId, 'content'); + +// Store data in cache +cacheManager.storeNoteData(noteId, 'content', data); + +// Clear caches +cacheManager.clearAllCaches(); +``` + +### Semantic Search + +Handles semantic search functionality: + +```typescript +import semanticSearch from './context/modules/semantic_search.js'; + +// Find relevant notes +const notes = await semanticSearch.findRelevantNotes(query, contextNoteId); + +// Rank notes by relevance +const ranked = await semanticSearch.rankNotesByRelevance(notes, query); +``` + +### Query Enhancer + +Provides query enhancement: + +```typescript +import queryEnhancer from './context/modules/query_enhancer.js'; + +// Generate multiple search queries from a user question +const queries = await queryEnhancer.generateSearchQueries(question, llmService); + +// Estimate query complexity +const complexity = queryEnhancer.estimateQueryComplexity(query); +``` + +### Context Formatter + +Formats context for LLM consumption: + +```typescript +import contextFormatter from './context/modules/context_formatter.js'; + +// Build formatted context from notes +const context = await contextFormatter.buildContextFromNotes(notes, query, providerId); + +// Sanitize note content +const clean = contextFormatter.sanitizeNoteContent(content, type, mime); +``` \ No newline at end of file diff --git a/src/services/llm/ai_interface.ts b/src/services/llm/ai_interface.ts new file mode 100644 index 000000000..b8349b5b2 --- /dev/null +++ b/src/services/llm/ai_interface.ts @@ -0,0 +1,262 @@ +import type { ToolCall } from './tools/tool_interfaces.js'; +import type { ModelMetadata } from './providers/provider_options.js'; + +/** + * Interface for chat messages between client and LLM models + */ +export interface Message { + role: 'user' | 'assistant' | 'system' | 'tool'; + content: string; + name?: string; + tool_call_id?: string; + tool_calls?: ToolCall[]; + sessionId?: string; // Optional session ID for WebSocket communication +} + +// Define additional interfaces for tool-related types +export interface ToolChoice { + type: 'none' | 'auto' | 'function'; + function?: { + name: string; + }; +} + +export interface ToolData { + type: 'function'; + function: { + name: string; + description: string; + parameters: Record; + }; +} + +export interface ToolExecutionInfo { + type: 'start' | 'update' | 'complete' | 'error'; + tool: { + name: string; + arguments: Record; + }; + result?: string | Record; +} + +/** + * Interface for streaming response chunks + * + * This is the standardized format for all streaming chunks across + * different providers (OpenAI, Anthropic, Ollama, etc.). + * The original provider-specific chunks are available through + * the extended interface in the stream_manager. + * + * See STREAMING.md for complete documentation on streaming usage. + */ +export interface StreamChunk { + /** The text content in this chunk (may be empty for status updates) */ + text: string; + + /** Whether this is the final chunk in the stream */ + done: boolean; + + /** Optional token usage statistics (rarely available in streaming mode) */ + usage?: { + promptTokens?: number; + completionTokens?: number; + totalTokens?: number; + }; + + /** + * Raw provider-specific data from the original response chunk + * This can include thinking state, tool execution info, etc. + */ + raw?: Record; + + /** + * Tool calls from the LLM (if any) + * These may be accumulated over multiple chunks during streaming + */ + tool_calls?: ToolCall[]; + + /** + * Tool execution information during streaming + * Includes tool name, args, and execution status + */ + toolExecution?: ToolExecutionInfo; +} + +/** + * Tool execution status for feedback to models + */ +export interface ToolExecutionStatus { + toolCallId: string; + name: string; + success: boolean; + result: string; + error?: string; +} + +/** + * Options for chat completion requests + * + * Key properties: + * - stream: If true, the response will be streamed + * - model: Model name to use + * - provider: Provider to use (openai, anthropic, ollama, etc.) + * - enableTools: If true, enables tool support + * + * The stream option is particularly important and should be consistently handled + * throughout the pipeline. It should be explicitly set to true or false. + * + * Streaming supports two approaches: + * 1. Callback-based: Provide a streamCallback to receive chunks directly + * 2. API-based: Use the stream property in the response to process chunks + * + * See STREAMING.md for complete documentation on streaming usage. + */ +export interface ChatCompletionOptions { + model?: string; + temperature?: number; + maxTokens?: number; + topP?: number; + frequencyPenalty?: number; + presencePenalty?: number; + showThinking?: boolean; + systemPrompt?: string; + preserveSystemPrompt?: boolean; // Whether to preserve existing system message + bypassFormatter?: boolean; // Whether to bypass the message formatter entirely + expectsJsonResponse?: boolean; // Whether this request expects a JSON response + + /** + * Whether to stream the response + * When true, response will be delivered incrementally via either: + * - The streamCallback if provided + * - The stream property in the response object + */ + stream?: boolean; + + /** + * Optional callback function for streaming responses + * When provided along with stream:true, this function will be called + * for each chunk of the response. + * + * @param text The text content in this chunk + * @param isDone Whether this is the final chunk + * @param originalChunk Optional original provider-specific chunk for advanced usage + */ + streamCallback?: (text: string, isDone: boolean, originalChunk?: Record) => Promise | void; + + enableTools?: boolean; // Whether to enable tool calling + tools?: ToolData[]; // Tools to provide to the LLM + tool_choice?: ToolChoice; // Tool choice parameter for the LLM + useAdvancedContext?: boolean; // Whether to use advanced context enrichment + toolExecutionStatus?: ToolExecutionStatus[]; // Status information about executed tools for feedback + providerMetadata?: ModelMetadata; // Metadata about the provider and model capabilities + sessionId?: string; // Session ID for storing tool execution results + + /** + * Maximum number of tool execution iterations + * Used to prevent infinite loops in tool execution + */ + maxToolIterations?: number; + + /** + * Current tool execution iteration counter + * Internal use for tracking nested tool executions + */ + currentToolIteration?: number; +} + +/** + * Response from a chat completion request + * + * When streaming is used, the behavior depends on how streaming was requested: + * + * 1. With streamCallback: The text field contains the complete response + * collected from all chunks, and the stream property is not present. + * + * 2. Without streamCallback: The text field is initially empty, and the + * stream property provides a function to process chunks and collect + * the complete response. + * + * See STREAMING.md for complete documentation on streaming usage. + */ +export interface ChatResponse { + /** + * The complete text response. + * If streaming was used with streamCallback, this contains the collected response. + * If streaming was used without streamCallback, this is initially empty. + */ + text: string; + + /** The model that generated the response */ + model: string; + + /** The provider that served the request (openai, anthropic, ollama, etc.) */ + provider: string; + + /** Token usage statistics (may not be available when streaming) */ + usage?: { + promptTokens?: number; + completionTokens?: number; + totalTokens?: number; + }; + + /** + * Stream processor function - only present when streaming is enabled + * without a streamCallback. When called with a chunk processor function, + * it returns a Promise that resolves to the complete response text. + * + * @param callback Function to process each chunk of the stream + * @returns Promise resolving to the complete text after stream processing + */ + stream?: (callback: (chunk: StreamChunk) => Promise | void) => Promise; + + /** Tool calls from the LLM (if tools were used and the model supports them) */ + tool_calls?: ToolCall[]; +} + +export interface AIService { + /** + * Generate a chat completion response + */ + generateChatCompletion(messages: Message[], options?: ChatCompletionOptions): Promise; + + /** + * Check if the service can be used (API key is set, etc.) + */ + isAvailable(): boolean; + + /** + * Get the name of the service + */ + getName(): string; +} + +/** + * Interface for the semantic context service, which provides enhanced context retrieval + * for AI conversations based on semantic similarity. + */ +export interface SemanticContextService { + /** + * Initialize the semantic context service + */ + initialize(): Promise; + + /** + * Retrieve semantic context based on relevance to user query + */ + getSemanticContext(noteId: string, userQuery: string, maxResults?: number, messages?: Message[]): Promise; + + /** + * Get progressive context based on depth + */ + getProgressiveContext?(noteId: string, depth?: number): Promise; + + /** + * Get smart context selection that adapts to query complexity + */ + getSmartContext?(noteId: string, userQuery: string): Promise; + + /** + * Enhance LLM context with agent tools + */ + getAgentToolsContext(noteId: string, query: string, showThinking?: boolean): Promise; +} diff --git a/src/services/llm/ai_service_manager.ts b/src/services/llm/ai_service_manager.ts new file mode 100644 index 000000000..c9c0581f3 --- /dev/null +++ b/src/services/llm/ai_service_manager.ts @@ -0,0 +1,709 @@ +import options from '../options.js'; +import type { AIService, ChatCompletionOptions, ChatResponse, Message } from './ai_interface.js'; +import { AnthropicService } from './providers/anthropic_service.js'; +import { ContextExtractor } from './context/index.js'; +import agentTools from './context_extractors/index.js'; +import contextService from './context/services/context_service.js'; +import { getEmbeddingProvider, getEnabledEmbeddingProviders } from './providers/providers.js'; +import indexService from './index_service.js'; +import log from '../log.js'; +import { OllamaService } from './providers/ollama_service.js'; +import { OpenAIService } from './providers/openai_service.js'; + +// Import interfaces +import type { + ServiceProviders, + IAIServiceManager, + ProviderMetadata +} from './interfaces/ai_service_interfaces.js'; +import type { NoteSearchResult } from './interfaces/context_interfaces.js'; + +/** + * Interface representing relevant note context + */ +interface NoteContext { + title: string; + content?: string; + noteId?: string; + summary?: string; + score?: number; +} + +export class AIServiceManager implements IAIServiceManager { + private services: Record = { + openai: new OpenAIService(), + anthropic: new AnthropicService(), + ollama: new OllamaService() + }; + + private providerOrder: ServiceProviders[] = ['openai', 'anthropic', 'ollama']; // Default order + private initialized = false; + + constructor() { + // Initialize provider order immediately + this.updateProviderOrder(); + + // Initialize tools immediately + this.initializeTools().catch(error => { + log.error(`Error initializing LLM tools during AIServiceManager construction: ${error.message || String(error)}`); + }); + } + + /** + * Initialize all LLM tools in one place + */ + private async initializeTools(): Promise { + try { + log.info('Initializing LLM tools during AIServiceManager construction...'); + + // Initialize agent tools + await this.initializeAgentTools(); + log.info("Agent tools initialized successfully"); + + // Initialize LLM tools + const toolInitializer = await import('./tools/tool_initializer.js'); + await toolInitializer.default.initializeTools(); + log.info("LLM tools initialized successfully"); + } catch (error: unknown) { + log.error(`Error initializing tools: ${this.handleError(error)}`); + // Don't throw, just log the error to prevent breaking construction + } + } + + /** + * Update the provider precedence order from saved options + * Returns true if successful, false if options not available yet + */ + updateProviderOrder(): boolean { + if (this.initialized) { + return true; + } + + try { + // Default precedence: openai, anthropic, ollama + const defaultOrder: ServiceProviders[] = ['openai', 'anthropic', 'ollama']; + + // Get custom order from options + const customOrder = options.getOption('aiProviderPrecedence'); + + if (customOrder) { + try { + // Try to parse as JSON first + let parsed; + + // Handle both array in JSON format and simple string format + if (customOrder.startsWith('[') && customOrder.endsWith(']')) { + parsed = JSON.parse(customOrder); + } else if (typeof customOrder === 'string') { + // If it's a string with commas, split it + if (customOrder.includes(',')) { + parsed = customOrder.split(',').map(p => p.trim()); + } else { + // If it's a simple string (like "ollama"), convert to single-item array + parsed = [customOrder]; + } + } else { + // Fallback to default + parsed = defaultOrder; + } + + // Validate that all providers are valid + if (Array.isArray(parsed) && + parsed.every(p => Object.keys(this.services).includes(p))) { + this.providerOrder = parsed as ServiceProviders[]; + } else { + log.info('Invalid AI provider precedence format, using defaults'); + this.providerOrder = defaultOrder; + } + } catch (e) { + log.error(`Failed to parse AI provider precedence: ${e}`); + this.providerOrder = defaultOrder; + } + } else { + this.providerOrder = defaultOrder; + } + + this.initialized = true; + + // Remove the validateEmbeddingProviders call since we now do validation on the client + // this.validateEmbeddingProviders(); + + return true; + } catch (error) { + // If options table doesn't exist yet, use defaults + // This happens during initial database creation + this.providerOrder = ['openai', 'anthropic', 'ollama']; + return false; + } + } + + /** + * Validate embedding providers configuration + * - Check if embedding default provider is in provider precedence list + * - Check if all providers in precedence list and default provider are enabled + * + * @returns A warning message if there are issues, or null if everything is fine + */ + async validateEmbeddingProviders(): Promise { + try { + // Check if AI is enabled, if not, skip validation + const aiEnabled = await options.getOptionBool('aiEnabled'); + if (!aiEnabled) { + return null; + } + + // Parse provider precedence list (similar to updateProviderOrder) + let precedenceList: string[] = []; + const precedenceOption = await options.getOption('aiProviderPrecedence'); + + if (precedenceOption) { + if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) { + precedenceList = JSON.parse(precedenceOption); + } else if (typeof precedenceOption === 'string') { + if (precedenceOption.includes(',')) { + precedenceList = precedenceOption.split(',').map(p => p.trim()); + } else { + precedenceList = [precedenceOption]; + } + } + } + + // Get enabled providers + const enabledProviders = await getEnabledEmbeddingProviders(); + const enabledProviderNames = enabledProviders.map(p => p.name); + + // Check if all providers in precedence list are enabled + const allPrecedenceEnabled = precedenceList.every(p => + enabledProviderNames.includes(p) || p === 'local'); + + // Return warning message if there are issues + if (!allPrecedenceEnabled) { + let message = 'There are issues with your AI provider configuration:'; + + if (!allPrecedenceEnabled) { + const disabledProviders = precedenceList.filter(p => + !enabledProviderNames.includes(p) && p !== 'local'); + message += `\n• The following providers in your precedence list are not enabled: ${disabledProviders.join(', ')}.`; + } + + message += '\n\nPlease check your AI settings.'; + + // Log warning to console + log.error('AI Provider Configuration Warning: ' + message); + + return message; + } + + return null; + } catch (error) { + log.error(`Error validating embedding providers: ${error}`); + return null; + } + } + + /** + * Ensure manager is initialized before using + */ + private ensureInitialized() { + if (!this.initialized) { + this.updateProviderOrder(); + } + } + + /** + * Check if any AI service is available + */ + isAnyServiceAvailable(): boolean { + return Object.values(this.services).some(service => service.isAvailable()); + } + + /** + * Get list of available providers + */ + getAvailableProviders(): ServiceProviders[] { + this.ensureInitialized(); + return Object.entries(this.services) + .filter(([_, service]) => service.isAvailable()) + .map(([key, _]) => key as ServiceProviders); + } + + /** + * Generate a chat completion response using the first available AI service + * based on the configured precedence order + */ + async generateChatCompletion(messages: Message[], options: ChatCompletionOptions = {}): Promise { + this.ensureInitialized(); + + log.info(`[AIServiceManager] generateChatCompletion called with options: ${JSON.stringify({ + model: options.model, + stream: options.stream, + enableTools: options.enableTools + })}`); + log.info(`[AIServiceManager] Stream option type: ${typeof options.stream}`); + + if (!messages || messages.length === 0) { + throw new Error('No messages provided for chat completion'); + } + + // Try providers in order of preference + const availableProviders = this.getAvailableProviders(); + + if (availableProviders.length === 0) { + throw new Error('No AI providers are available. Please check your AI settings.'); + } + + // Sort available providers by precedence + const sortedProviders = this.providerOrder + .filter(provider => availableProviders.includes(provider)); + + // If a specific provider is requested and available, use it + if (options.model && options.model.includes(':')) { + const [providerName, modelName] = options.model.split(':'); + + if (availableProviders.includes(providerName as ServiceProviders)) { + try { + const modifiedOptions = { ...options, model: modelName }; + log.info(`[AIServiceManager] Using provider ${providerName} from model prefix with modifiedOptions.stream: ${modifiedOptions.stream}`); + return await this.services[providerName as ServiceProviders].generateChatCompletion(messages, modifiedOptions); + } catch (error) { + log.error(`Error with specified provider ${providerName}: ${error}`); + // If the specified provider fails, continue with the fallback providers + } + } + } + + // Try each provider in order until one succeeds + let lastError: Error | null = null; + + for (const provider of sortedProviders) { + try { + log.info(`[AIServiceManager] Trying provider ${provider} with options.stream: ${options.stream}`); + return await this.services[provider].generateChatCompletion(messages, options); + } catch (error) { + log.error(`Error with provider ${provider}: ${error}`); + lastError = error as Error; + // Continue to the next provider + } + } + + // If we get here, all providers failed + throw new Error(`All AI providers failed: ${lastError?.message || 'Unknown error'}`); + } + + setupEventListeners() { + // Setup event listeners for AI services + } + + /** + * Get the context extractor service + * @returns The context extractor instance + */ + getContextExtractor() { + return contextExtractor; + } + + /** + * Get the context service for advanced context management + * @returns The context service instance + */ + getContextService() { + return contextService; + } + + /** + * Get the index service for managing knowledge base indexing + * @returns The index service instance + */ + getIndexService() { + return indexService; + } + + /** + * Ensure agent tools are initialized (no-op as they're initialized in constructor) + * Kept for backward compatibility with existing API + */ + async initializeAgentTools(): Promise { + // Agent tools are already initialized in the constructor + // This method is kept for backward compatibility + log.info("initializeAgentTools called, but tools are already initialized in constructor"); + } + + /** + * Get the agent tools manager + * This provides access to all agent tools + */ + getAgentTools() { + return agentTools; + } + + /** + * Get the vector search tool for semantic similarity search + */ + getVectorSearchTool() { + const tools = agentTools.getTools(); + return tools.vectorSearch; + } + + /** + * Get the note navigator tool for hierarchical exploration + */ + getNoteNavigatorTool() { + const tools = agentTools.getTools(); + return tools.noteNavigator; + } + + /** + * Get the query decomposition tool for complex queries + */ + getQueryDecompositionTool() { + const tools = agentTools.getTools(); + return tools.queryDecomposition; + } + + /** + * Get the contextual thinking tool for transparent reasoning + */ + getContextualThinkingTool() { + const tools = agentTools.getTools(); + return tools.contextualThinking; + } + + /** + * Get whether AI features are enabled from options + */ + getAIEnabled(): boolean { + return options.getOptionBool('aiEnabled'); + } + + /** + * Set up embeddings provider for AI features + */ + async setupEmbeddingsProvider(): Promise { + try { + if (!this.getAIEnabled()) { + log.info('AI features are disabled'); + return; + } + + // Get provider precedence list + const precedenceOption = await options.getOption('embeddingProviderPrecedence'); + let precedenceList: string[] = []; + + if (precedenceOption) { + if (precedenceOption.startsWith('[') && precedenceOption.endsWith(']')) { + precedenceList = JSON.parse(precedenceOption); + } else if (typeof precedenceOption === 'string') { + if (precedenceOption.includes(',')) { + precedenceList = precedenceOption.split(',').map(p => p.trim()); + } else { + precedenceList = [precedenceOption]; + } + } + } + + // Check if we have enabled providers + const enabledProviders = await getEnabledEmbeddingProviders(); + + if (enabledProviders.length === 0) { + log.info('No embedding providers are enabled'); + return; + } + + // Initialize embedding providers + log.info('Embedding providers initialized successfully'); + } catch (error: any) { + log.error(`Error setting up embedding providers: ${error.message}`); + throw error; + } + } + + /** + * Initialize the AI Service + */ + async initialize(): Promise { + try { + log.info("Initializing AI service..."); + + // Check if AI is enabled in options + const isAIEnabled = this.getAIEnabled(); + + if (!isAIEnabled) { + log.info("AI features are disabled in options"); + return; + } + + // Set up embeddings provider if AI is enabled + await this.setupEmbeddingsProvider(); + + // Initialize index service + await this.getIndexService().initialize(); + + // Tools are already initialized in the constructor + // No need to initialize them again + + this.initialized = true; + log.info("AI service initialized successfully"); + } catch (error: any) { + log.error(`Error initializing AI service: ${error.message}`); + throw error; + } + } + + /** + * Get description of available agent tools + */ + async getAgentToolsDescription(): Promise { + try { + // Get all available tools + const tools = agentTools.getAllTools(); + + if (!tools || tools.length === 0) { + return ""; + } + + // Format tool descriptions + const toolDescriptions = tools.map(tool => + `- ${tool.name}: ${tool.description}` + ).join('\n'); + + return `Available tools:\n${toolDescriptions}`; + } catch (error) { + log.error(`Error getting agent tools description: ${error}`); + return ""; + } + } + + /** + * Get enhanced context with available agent tools + * @param noteId - The ID of the note + * @param query - The user's query + * @param showThinking - Whether to show LLM's thinking process + * @param relevantNotes - Optional notes already found to be relevant + * @returns Enhanced context with agent tools information + */ + async getAgentToolsContext( + noteId: string, + query: string, + showThinking: boolean = false, + relevantNotes: NoteSearchResult[] = [] + ): Promise { + try { + // Create agent tools message + const toolsMessage = await this.getAgentToolsDescription(); + + // Agent tools are already initialized in the constructor + // No need to initialize them again + + // If we have notes that were already found to be relevant, use them directly + let contextNotes = relevantNotes; + + // If no notes provided, find relevant ones + if (!contextNotes || contextNotes.length === 0) { + try { + // Get the default LLM service for context enhancement + const provider = this.getPreferredProvider(); + const llmService = this.getService(provider); + + // Find relevant notes + contextNotes = await contextService.findRelevantNotes( + query, + noteId, + { + maxResults: 5, + summarize: true, + llmService + } + ); + + log.info(`Found ${contextNotes.length} relevant notes for context`); + } catch (error) { + log.error(`Failed to find relevant notes: ${this.handleError(error)}`); + // Continue without context notes + contextNotes = []; + } + } + + // Format notes into context string if we have any + let contextStr = ""; + if (contextNotes && contextNotes.length > 0) { + contextStr = "\n\nRelevant context:\n"; + contextNotes.forEach((note, index) => { + contextStr += `[${index + 1}] "${note.title}"\n${note.content || 'No content available'}\n\n`; + }); + } + + // Combine tool message with context + return toolsMessage + contextStr; + } catch (error) { + log.error(`Error getting agent tools context: ${this.handleError(error)}`); + return ""; + } + } + + /** + * Get AI service for the given provider + */ + getService(provider?: string): AIService { + this.ensureInitialized(); + + // If provider is specified, try to use it + if (provider && this.services[provider as ServiceProviders]?.isAvailable()) { + return this.services[provider as ServiceProviders]; + } + + // Otherwise, use the first available provider in the configured order + for (const providerName of this.providerOrder) { + const service = this.services[providerName]; + if (service.isAvailable()) { + return service; + } + } + + // If no provider is available, use first one anyway (it will throw an error) + // This allows us to show a proper error message rather than "provider not found" + return this.services[this.providerOrder[0]]; + } + + /** + * Get the preferred provider based on configuration + */ + getPreferredProvider(): string { + this.ensureInitialized(); + + // Return the first available provider in the order + for (const providerName of this.providerOrder) { + if (this.services[providerName].isAvailable()) { + return providerName; + } + } + + // Return the first provider as fallback + return this.providerOrder[0]; + } + + /** + * Check if a specific provider is available + */ + isProviderAvailable(provider: string): boolean { + return this.services[provider as ServiceProviders]?.isAvailable() ?? false; + } + + /** + * Get metadata about a provider + */ + getProviderMetadata(provider: string): ProviderMetadata | null { + const service = this.services[provider as ServiceProviders]; + if (!service) { + return null; + } + + return { + name: provider, + capabilities: { + chat: true, + embeddings: provider !== 'anthropic', // Anthropic doesn't have embeddings + streaming: true, + functionCalling: provider === 'openai' // Only OpenAI has function calling + }, + models: ['default'], // Placeholder, could be populated from the service + defaultModel: 'default' + }; + } + + /** + * Error handler that properly types the error object + */ + private handleError(error: unknown): string { + if (error instanceof Error) { + return error.message || String(error); + } + return String(error); + } +} + +// Don't create singleton immediately, use a lazy-loading pattern +let instance: AIServiceManager | null = null; + +/** + * Get the AIServiceManager instance (creates it if not already created) + */ +function getInstance(): AIServiceManager { + if (!instance) { + instance = new AIServiceManager(); + } + return instance; +} + +export default { + getInstance, + // Also export methods directly for convenience + isAnyServiceAvailable(): boolean { + return getInstance().isAnyServiceAvailable(); + }, + getAvailableProviders() { + return getInstance().getAvailableProviders(); + }, + async generateChatCompletion(messages: Message[], options: ChatCompletionOptions = {}): Promise { + return getInstance().generateChatCompletion(messages, options); + }, + // Add validateEmbeddingProviders method + async validateEmbeddingProviders(): Promise { + return getInstance().validateEmbeddingProviders(); + }, + // Context and index related methods + getContextExtractor() { + return getInstance().getContextExtractor(); + }, + getContextService() { + return getInstance().getContextService(); + }, + getIndexService() { + return getInstance().getIndexService(); + }, + // Agent tools related methods + // Tools are now initialized in the constructor + getAgentTools() { + return getInstance().getAgentTools(); + }, + getVectorSearchTool() { + return getInstance().getVectorSearchTool(); + }, + getNoteNavigatorTool() { + return getInstance().getNoteNavigatorTool(); + }, + getQueryDecompositionTool() { + return getInstance().getQueryDecompositionTool(); + }, + getContextualThinkingTool() { + return getInstance().getContextualThinkingTool(); + }, + async getAgentToolsContext( + noteId: string, + query: string, + showThinking: boolean = false, + relevantNotes: NoteSearchResult[] = [] + ): Promise { + return getInstance().getAgentToolsContext( + noteId, + query, + showThinking, + relevantNotes + ); + }, + // New methods + getService(provider?: string): AIService { + return getInstance().getService(provider); + }, + getPreferredProvider(): string { + return getInstance().getPreferredProvider(); + }, + isProviderAvailable(provider: string): boolean { + return getInstance().isProviderAvailable(provider); + }, + getProviderMetadata(provider: string): ProviderMetadata | null { + return getInstance().getProviderMetadata(provider); + } +}; + +// Create an instance of ContextExtractor for backward compatibility +const contextExtractor = new ContextExtractor(); diff --git a/src/services/llm/base_ai_service.ts b/src/services/llm/base_ai_service.ts new file mode 100644 index 000000000..3c6e05bc7 --- /dev/null +++ b/src/services/llm/base_ai_service.ts @@ -0,0 +1,26 @@ +import options from '../options.js'; +import type { AIService, ChatCompletionOptions, ChatResponse, Message } from './ai_interface.js'; +import { DEFAULT_SYSTEM_PROMPT } from './constants/llm_prompt_constants.js'; + +export abstract class BaseAIService implements AIService { + protected name: string; + + constructor(name: string) { + this.name = name; + } + + abstract generateChatCompletion(messages: Message[], options?: ChatCompletionOptions): Promise; + + isAvailable(): boolean { + return options.getOptionBool('aiEnabled'); // Base check if AI is enabled globally + } + + getName(): string { + return this.name; + } + + protected getSystemPrompt(customPrompt?: string): string { + // Use prompt from constants file if no custom prompt is provided + return customPrompt || DEFAULT_SYSTEM_PROMPT; + } +} diff --git a/src/services/llm/chat/handlers/context_handler.ts b/src/services/llm/chat/handlers/context_handler.ts new file mode 100644 index 000000000..c5af21116 --- /dev/null +++ b/src/services/llm/chat/handlers/context_handler.ts @@ -0,0 +1,168 @@ +/** + * Handler for LLM context management + */ +import log from "../../../log.js"; +import becca from "../../../../becca/becca.js"; +import vectorStore from "../../embeddings/index.js"; +import providerManager from "../../providers/providers.js"; +import contextService from "../../context/services/context_service.js"; +import type { NoteSource } from "../../interfaces/chat_session.js"; +import { SEARCH_CONSTANTS } from '../../constants/search_constants.js'; + +/** + * Handles context management for LLM chat + */ +export class ContextHandler { + /** + * Find relevant notes based on search query + * @param content The search content + * @param contextNoteId Optional note ID for context + * @param limit Maximum number of results to return + * @returns Array of relevant note sources + */ + static async findRelevantNotes(content: string, contextNoteId: string | null = null, limit = 5): Promise { + try { + // If content is too short, don't bother + if (content.length < 3) { + return []; + } + + // Check if embeddings are available + const enabledProviders = await providerManager.getEnabledEmbeddingProviders(); + if (enabledProviders.length === 0) { + log.info("No embedding providers available, can't find relevant notes"); + return []; + } + + // Get the embedding for the query + const provider = enabledProviders[0]; + const embedding = await provider.generateEmbeddings(content); + + let results; + if (contextNoteId) { + // For branch context, get notes specifically from that branch + const contextNote = becca.notes[contextNoteId]; + if (!contextNote) { + return []; + } + + const sql = require("../../../../services/sql.js").default; + const childBranches = await sql.getRows(` + SELECT branches.* FROM branches + WHERE branches.parentNoteId = ? + AND branches.isDeleted = 0 + `, [contextNoteId]); + + const childNoteIds = childBranches.map((branch: any) => branch.noteId); + + // Include the context note itself + childNoteIds.push(contextNoteId); + + // Find similar notes in this context + results = []; + + for (const noteId of childNoteIds) { + const noteEmbedding = await vectorStore.getEmbeddingForNote( + noteId, + provider.name, + provider.getConfig().model + ); + + if (noteEmbedding) { + const similarity = vectorStore.cosineSimilarity( + embedding, + noteEmbedding.embedding + ); + + if (similarity > SEARCH_CONSTANTS.VECTOR_SEARCH.EXACT_MATCH_THRESHOLD) { + results.push({ + noteId, + similarity + }); + } + } + } + + // Sort by similarity + results.sort((a, b) => b.similarity - a.similarity); + results = results.slice(0, limit); + } else { + // General search across all notes + results = await vectorStore.findSimilarNotes( + embedding, + provider.name, + provider.getConfig().model, + limit + ); + } + + // Format the results + const sources: NoteSource[] = []; + + for (const result of results) { + const note = becca.notes[result.noteId]; + if (!note) continue; + + let noteContent: string | undefined = undefined; + if (note.type === 'text') { + const content = note.getContent(); + // Handle both string and Buffer types + noteContent = typeof content === 'string' ? content : + content instanceof Buffer ? content.toString('utf8') : undefined; + } + + sources.push({ + noteId: result.noteId, + title: note.title, + content: noteContent, + similarity: result.similarity, + branchId: note.getBranches()[0]?.branchId + }); + } + + return sources; + } catch (error: any) { + log.error(`Error finding relevant notes: ${error.message}`); + return []; + } + } + + /** + * Process enhanced context using the context service + * @param query Query to process + * @param contextNoteId Optional note ID for context + * @param showThinking Whether to show thinking process + */ + static async processEnhancedContext(query: string, llmService: any, options: { + contextNoteId?: string, + showThinking?: boolean + }) { + // Use the Trilium-specific approach + const contextNoteId = options.contextNoteId || null; + const showThinking = options.showThinking || false; + + // Log that we're calling contextService with the parameters + log.info(`Using enhanced context with: noteId=${contextNoteId}, showThinking=${showThinking}`); + + // Call context service for processing + const results = await contextService.processQuery( + query, + llmService, + { + contextNoteId, + showThinking + } + ); + + // Return the generated context and sources + return { + context: results.context, + sources: results.sources.map(source => ({ + noteId: source.noteId, + title: source.title, + content: source.content || undefined, // Convert null to undefined + similarity: source.similarity + })) + }; + } +} diff --git a/src/services/llm/chat/handlers/stream_handler.ts b/src/services/llm/chat/handlers/stream_handler.ts new file mode 100644 index 000000000..3aeb26d83 --- /dev/null +++ b/src/services/llm/chat/handlers/stream_handler.ts @@ -0,0 +1,368 @@ +/** + * Handler for streaming LLM responses + */ +import log from "../../../log.js"; +import type { Response } from "express"; +import type { StreamChunk } from "../../ai_interface.js"; +import type { LLMStreamMessage } from "../../interfaces/chat_ws_messages.js"; +import type { ChatSession } from "../../interfaces/chat_session.js"; + +/** + * Handles streaming of LLM responses via WebSocket + */ +export class StreamHandler { + /** + * Handle streaming response via WebSocket + * + * This method processes LLM responses and sends them incrementally via WebSocket + * to the client, supporting both text content and tool execution status updates. + * + * @param res Express response object + * @param aiMessages Messages to send to the LLM + * @param chatOptions Options for the chat completion + * @param service LLM service to use + * @param session Chat session for storing the response + */ + static async handleStreamingResponse( + res: Response, + aiMessages: any[], + chatOptions: any, + service: any, + session: ChatSession + ): Promise { + // The client receives a success response for their HTTP request, + // but the actual content will be streamed via WebSocket + res.json({ success: true, message: 'Streaming response started' }); + + // Import the WebSocket service + const wsService = (await import('../../../ws.js')).default; + + let messageContent = ''; + const chatNoteId = session.id; + + // Immediately send an initial message to confirm WebSocket connection is working + // This helps prevent timeouts on the client side + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + thinking: 'Preparing response...' + } as LLMStreamMessage); + + try { + // Import the tool handler + const { ToolHandler } = await import('./tool_handler.js'); + + // Generate the LLM completion with streaming enabled + const response = await service.generateChatCompletion(aiMessages, { + ...chatOptions, + stream: true + }); + + // If the model doesn't support streaming via .stream() method or returns tool calls, + // we'll handle it specially + if (response.tool_calls && response.tool_calls.length > 0) { + // Send thinking state notification via WebSocket + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + thinking: 'Analyzing tools needed for this request...' + } as LLMStreamMessage); + + try { + // Execute the tools + const toolResults = await ToolHandler.executeToolCalls(response, chatNoteId); + + // For each tool execution, send progress update via WebSocket + for (const toolResult of toolResults) { + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + toolExecution: { + action: 'complete', + tool: toolResult.name, + result: toolResult.content.substring(0, 100) + (toolResult.content.length > 100 ? '...' : '') + } + } as LLMStreamMessage); + } + + // Make follow-up request with tool results + const toolMessages = [...aiMessages, { + role: 'assistant', + content: response.text || '', + tool_calls: response.tool_calls + }, ...toolResults]; + + // Preserve streaming for follow-up if it was enabled in the original request + const followUpOptions = { + ...chatOptions, + // Only disable streaming if it wasn't explicitly requested + stream: chatOptions.stream === true, + // Allow tools but track iterations to prevent infinite loops + enableTools: true, + maxToolIterations: chatOptions.maxToolIterations || 5, + currentToolIteration: 1 // Start counting tool iterations + }; + + const followUpResponse = await service.generateChatCompletion(toolMessages, followUpOptions); + + await this.processStreamedResponse( + followUpResponse, + wsService, + chatNoteId, + session, + toolMessages, + followUpOptions, + service + ); + } catch (toolError: any) { + log.error(`Error executing tools: ${toolError.message}`); + + // Send error via WebSocket with done flag + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + error: `Error executing tools: ${toolError instanceof Error ? toolError.message : 'Unknown error'}`, + done: true + } as LLMStreamMessage); + } + } else if (response.stream) { + // Handle standard streaming through the stream() method + log.info(`Provider ${service.getName ? service.getName() : 'unknown'} supports streaming via stream() method`); + + // Store information about the model and provider in session metadata + session.metadata.model = response.model || session.metadata.model; + session.metadata.provider = response.provider || session.metadata.provider; + session.metadata.lastUpdated = new Date().toISOString(); + + await this.processStreamedResponse( + response, + wsService, + chatNoteId, + session + ); + } else { + log.info(`Provider ${service.getName ? service.getName() : 'unknown'} does not support streaming via stream() method, falling back to single response`); + + // If streaming isn't available, send the entire response at once + messageContent = response.text || ''; + + // Send via WebSocket - include both content and done flag in same message + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + content: messageContent, + done: true + } as LLMStreamMessage); + + log.info(`Complete response sent`); + + // Store the full response in the session + session.messages.push({ + role: 'assistant', + content: messageContent, + timestamp: new Date() + }); + } + } catch (streamingError: any) { + log.error(`Streaming error: ${streamingError.message}`); + + // Import the WebSocket service directly in case it wasn't imported earlier + const wsService = (await import('../../../ws.js')).default; + + // Send error via WebSocket + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + error: `Error generating response: ${streamingError instanceof Error ? streamingError.message : 'Unknown error'}` + } as LLMStreamMessage); + + // Signal completion + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + done: true + } as LLMStreamMessage); + } + } + + /** + * Process a streamed response from an LLM + */ + private static async processStreamedResponse( + response: any, + wsService: any, + chatNoteId: string, + session: ChatSession, + toolMessages?: any[], + followUpOptions?: any, + service?: any + ): Promise { + // Import tool handler lazily to avoid circular dependencies + const { ToolHandler } = await import('./tool_handler.js'); + + let messageContent = ''; + + try { + await response.stream(async (chunk: StreamChunk) => { + if (chunk.text) { + messageContent += chunk.text; + + // Enhanced logging for each chunk + log.info(`Received stream chunk with ${chunk.text.length} chars of text, done=${!!chunk.done}`); + + // Send each individual chunk via WebSocket as it arrives + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + content: chunk.text, + done: !!chunk.done, // Include done flag with each chunk + // Include any raw data from the provider that might contain thinking/tool info + ...(chunk.raw ? { raw: chunk.raw } : {}) + } as LLMStreamMessage); + + // Log the first chunk (useful for debugging) + if (messageContent.length === chunk.text.length) { + log.info(`First stream chunk received: "${chunk.text.substring(0, 50)}${chunk.text.length > 50 ? '...' : ''}"`); + } + } + + // If the provider indicates this is "thinking" state, relay that + if (chunk.raw?.thinking) { + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + thinking: chunk.raw.thinking + } as LLMStreamMessage); + } + + // If the provider indicates tool execution, relay that + if (chunk.raw?.toolExecution) { + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + toolExecution: chunk.raw.toolExecution + } as LLMStreamMessage); + } + + // Handle direct tool_calls in the response (for OpenAI) + if (chunk.tool_calls && chunk.tool_calls.length > 0) { + log.info(`Detected direct tool_calls in stream chunk: ${chunk.tool_calls.length} tools`); + + // Send tool execution notification + wsService.sendMessageToAllClients({ + type: 'tool_execution_start', + chatNoteId + } as LLMStreamMessage); + + // Process each tool call + for (const toolCall of chunk.tool_calls) { + // Process arguments + let args = toolCall.function?.arguments; + if (typeof args === 'string') { + try { + args = JSON.parse(args); + } catch (e) { + log.info(`Could not parse tool arguments as JSON: ${e}`); + args = { raw: args }; + } + } + + // Format into a standardized tool execution message + wsService.sendMessageToAllClients({ + type: 'tool_result', + chatNoteId, + toolExecution: { + action: 'executing', + tool: toolCall.function?.name || 'unknown', + toolCallId: toolCall.id, + args: args + } + } as LLMStreamMessage); + } + } + + // Signal completion when done + if (chunk.done) { + log.info(`Stream completed, total content: ${messageContent.length} chars`); + + // Check if there are more tool calls to execute (recursive tool calling) + if (service && toolMessages && followUpOptions && + response.tool_calls && response.tool_calls.length > 0 && + followUpOptions.currentToolIteration < followUpOptions.maxToolIterations) { + + log.info(`Found ${response.tool_calls.length} more tool calls in iteration ${followUpOptions.currentToolIteration}`); + + // Execute these tool calls in another iteration + const assistantMessage = { + role: 'assistant' as const, + content: messageContent, + tool_calls: response.tool_calls + }; + + // Execute the next round of tools + const nextToolResults = await ToolHandler.executeToolCalls(response, chatNoteId); + + // Create a new messages array with the latest tool results + const nextToolMessages = [...toolMessages, assistantMessage, ...nextToolResults]; + + // Increment the tool iteration counter for the next call + const nextFollowUpOptions = { + ...followUpOptions, + currentToolIteration: followUpOptions.currentToolIteration + 1 + }; + + log.info(`Making another follow-up request (iteration ${nextFollowUpOptions.currentToolIteration}/${nextFollowUpOptions.maxToolIterations})`); + + // Make another follow-up request + const nextResponse = await service.generateChatCompletion(nextToolMessages, nextFollowUpOptions); + + // Process the next response recursively + await this.processStreamedResponse( + nextResponse, + wsService, + chatNoteId, + session, + nextToolMessages, + nextFollowUpOptions, + service + ); + } else { + // Only send final done message if it wasn't already sent with content + // This ensures we don't duplicate the content but still mark completion + if (!chunk.text) { + log.info(`No content in final chunk, sending explicit completion message`); + + // Send final message with done flag only (no content) + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + done: true + } as LLMStreamMessage); + } + + // Store the full response in the session + session.messages.push({ + role: 'assistant', + content: messageContent, + timestamp: new Date() + }); + } + } + }); + + log.info(`Streaming completed successfully`); + } catch (streamError: any) { + log.error(`Error during streaming: ${streamError.message}`); + + // Report the error to the client + wsService.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId, + error: `Error during streaming: ${streamError instanceof Error ? streamError.message : 'Unknown error'}`, + done: true + } as LLMStreamMessage); + + throw streamError; + } + } +} diff --git a/src/services/llm/chat/handlers/tool_handler.ts b/src/services/llm/chat/handlers/tool_handler.ts new file mode 100644 index 000000000..076664f63 --- /dev/null +++ b/src/services/llm/chat/handlers/tool_handler.ts @@ -0,0 +1,181 @@ +/** + * Handler for LLM tool executions + */ +import log from "../../../log.js"; +import type { Message } from "../../ai_interface.js"; +import SessionsStore from "../sessions_store.js"; + +/** + * Handles the execution of LLM tools + */ +export class ToolHandler { + /** + * Execute tool calls from the LLM response + * @param response The LLM response containing tool calls + * @param chatNoteId Optional chat note ID for tracking + */ + static async executeToolCalls(response: any, chatNoteId?: string): Promise { + log.info(`========== TOOL EXECUTION FLOW ==========`); + if (!response.tool_calls || response.tool_calls.length === 0) { + log.info(`No tool calls to execute, returning early`); + return []; + } + + log.info(`Executing ${response.tool_calls.length} tool calls`); + + try { + // Import tool registry directly to avoid circular dependencies + const toolRegistry = (await import('../../tools/tool_registry.js')).default; + + // Check if tools are available + const availableTools = toolRegistry.getAllTools(); + log.info(`Available tools in registry: ${availableTools.length}`); + + if (availableTools.length === 0) { + log.error('No tools available in registry for execution'); + + // Try to initialize tools + try { + // Ensure tools are initialized + const initResult = await this.ensureToolsInitialized(); + if (!initResult) { + throw new Error('Failed to initialize tools'); + } + } catch (error: unknown) { + const errorMessage = error instanceof Error ? error.message : String(error); + log.error(`Failed to initialize tools: ${errorMessage}`); + throw new Error('Tool execution failed: No tools available'); + } + } + + // Execute each tool call and collect results + const toolResults = await Promise.all(response.tool_calls.map(async (toolCall: any) => { + try { + log.info(`Executing tool: ${toolCall.function.name}, ID: ${toolCall.id || 'unknown'}`); + + // Get the tool from registry + const tool = toolRegistry.getTool(toolCall.function.name); + if (!tool) { + throw new Error(`Tool not found: ${toolCall.function.name}`); + } + + // Parse arguments + let args; + if (typeof toolCall.function.arguments === 'string') { + try { + args = JSON.parse(toolCall.function.arguments); + } catch (e: unknown) { + log.error(`Failed to parse tool arguments: ${e instanceof Error ? e.message : String(e)}`); + + // Try cleanup and retry + try { + const cleaned = toolCall.function.arguments + .replace(/^['"]|['"]$/g, '') // Remove surrounding quotes + .replace(/\\"/g, '"') // Replace escaped quotes + .replace(/([{,])\s*'([^']+)'\s*:/g, '$1"$2":') // Replace single quotes around property names + .replace(/([{,])\s*(\w+)\s*:/g, '$1"$2":'); // Add quotes around unquoted property names + + args = JSON.parse(cleaned); + } catch (cleanErr) { + // If all parsing fails, use as-is + args = { text: toolCall.function.arguments }; + } + } + } else { + args = toolCall.function.arguments; + } + + // Log what we're about to execute + log.info(`Executing tool with arguments: ${JSON.stringify(args)}`); + + // Execute the tool and get result + const startTime = Date.now(); + const result = await tool.execute(args); + const executionTime = Date.now() - startTime; + + log.info(`Tool execution completed in ${executionTime}ms`); + + // Log the result + const resultPreview = typeof result === 'string' + ? result.substring(0, 100) + (result.length > 100 ? '...' : '') + : JSON.stringify(result).substring(0, 100) + '...'; + log.info(`Tool result: ${resultPreview}`); + + // Record tool execution in session if chatNoteId is provided + if (chatNoteId) { + SessionsStore.recordToolExecution(chatNoteId, toolCall, typeof result === 'string' ? result : JSON.stringify(result)); + } + + // Format result as a proper message + return { + role: 'tool', + content: typeof result === 'string' ? result : JSON.stringify(result), + name: toolCall.function.name, + tool_call_id: toolCall.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 9)}` + }; + } catch (error: any) { + log.error(`Error executing tool ${toolCall.function.name}: ${error.message}`); + + // Record error in session if chatNoteId is provided + if (chatNoteId) { + SessionsStore.recordToolExecution(chatNoteId, toolCall, '', error.message); + } + + // Return error as tool result + return { + role: 'tool', + content: `Error: ${error.message}`, + name: toolCall.function.name, + tool_call_id: toolCall.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 9)}` + }; + } + })); + + log.info(`Completed execution of ${toolResults.length} tools`); + return toolResults; + } catch (error: any) { + log.error(`Error in tool execution handler: ${error.message}`); + throw error; + } + } + + /** + * Ensure LLM tools are initialized + */ + static async ensureToolsInitialized(): Promise { + try { + log.info("Checking LLM tool initialization..."); + + // Import tool registry + const toolRegistry = (await import('../../tools/tool_registry.js')).default; + + // Check if tools are already initialized + const registeredTools = toolRegistry.getAllTools(); + + if (registeredTools.length === 0) { + log.info("No tools found in registry."); + log.info("Note: Tools should be initialized in the AIServiceManager constructor."); + + // Create AI service manager instance to trigger tool initialization + const aiServiceManager = (await import('../../ai_service_manager.js')).default; + aiServiceManager.getInstance(); + + // Check again after AIServiceManager instantiation + const tools = toolRegistry.getAllTools(); + log.info(`After AIServiceManager instantiation: ${tools.length} tools available`); + } else { + log.info(`LLM tools already initialized: ${registeredTools.length} tools available`); + } + + // Get all available tools for logging + const availableTools = toolRegistry.getAllTools().map(t => t.definition.function.name); + log.info(`Available tools: ${availableTools.join(', ')}`); + + log.info("LLM tools initialized successfully: " + availableTools.length + " tools available"); + return true; + } catch (error) { + log.error(`Failed to initialize LLM tools: ${error}`); + return false; + } + } +} diff --git a/src/services/llm/chat/index.ts b/src/services/llm/chat/index.ts new file mode 100644 index 000000000..d82554229 --- /dev/null +++ b/src/services/llm/chat/index.ts @@ -0,0 +1,29 @@ +/** + * Chat module export + */ +import restChatService from './rest_chat_service.js'; +import sessionsStore from './sessions_store.js'; +import { ContextHandler } from './handlers/context_handler.js'; +import { ToolHandler } from './handlers/tool_handler.js'; +import { StreamHandler } from './handlers/stream_handler.js'; +import * as messageFormatter from './utils/message_formatter.js'; +import type { ChatSession, ChatMessage, NoteSource } from '../interfaces/chat_session.js'; +import type { LLMStreamMessage } from '../interfaces/chat_ws_messages.js'; + +// Export components +export { + restChatService as default, + sessionsStore, + ContextHandler, + ToolHandler, + StreamHandler, + messageFormatter +}; + +// Export types +export type { + ChatSession, + ChatMessage, + NoteSource, + LLMStreamMessage +}; diff --git a/src/services/llm/chat/rest_chat_service.ts b/src/services/llm/chat/rest_chat_service.ts new file mode 100644 index 000000000..0a400ad91 --- /dev/null +++ b/src/services/llm/chat/rest_chat_service.ts @@ -0,0 +1,680 @@ +/** + * Service to handle chat API interactions + */ +import log from "../../log.js"; +import type { Request, Response } from "express"; +import type { Message, ChatCompletionOptions } from "../ai_interface.js"; +import { AIServiceManager } from "../ai_service_manager.js"; +import { ChatPipeline } from "../pipeline/chat_pipeline.js"; +import type { ChatPipelineInput } from "../pipeline/interfaces.js"; +import options from "../../options.js"; +import { SEARCH_CONSTANTS } from '../constants/search_constants.js'; + +// Import our refactored modules +import { ContextHandler } from "./handlers/context_handler.js"; +import { ToolHandler } from "./handlers/tool_handler.js"; +import { StreamHandler } from "./handlers/stream_handler.js"; +import SessionsStore from "./sessions_store.js"; +import * as MessageFormatter from "./utils/message_formatter.js"; +import type { NoteSource } from "../interfaces/chat_session.js"; +import type { LLMStreamMessage } from "../interfaces/chat_ws_messages.js"; +import type { ChatMessage } from '../interfaces/chat_session.js'; +import type { ChatSession } from '../interfaces/chat_session.js'; + +/** + * Service to handle chat API interactions + */ +class RestChatService { + /** + * Check if the database is initialized + */ + isDatabaseInitialized(): boolean { + try { + options.getOption('initialized'); + return true; + } catch (error) { + return false; + } + } + + /** + * Check if AI services are available + */ + safelyUseAIManager(): boolean { + // Only use AI manager if database is initialized + if (!this.isDatabaseInitialized()) { + log.info("AI check failed: Database is not initialized"); + return false; + } + + // Try to access the manager - will create instance only if needed + try { + // Create local instance to avoid circular references + const aiManager = new AIServiceManager(); + + if (!aiManager) { + log.info("AI check failed: AI manager module is not available"); + return false; + } + + const isAvailable = aiManager.isAnyServiceAvailable(); + log.info(`AI service availability check result: ${isAvailable}`); + + if (isAvailable) { + // Additional diagnostics + try { + const providers = aiManager.getAvailableProviders(); + log.info(`Available AI providers: ${providers.join(', ')}`); + } catch (err) { + log.info(`Could not get available providers: ${err}`); + } + } + + return isAvailable; + } catch (error) { + log.error(`Error accessing AI service manager: ${error}`); + return false; + } + } + + /** + * Handle a message sent to an LLM and get a response + */ + async handleSendMessage(req: Request, res: Response) { + log.info("=== Starting handleSendMessage ==="); + try { + // Extract parameters differently based on the request method + let content, useAdvancedContext, showThinking, chatNoteId; + + if (req.method === 'POST') { + // For POST requests, get content from the request body + const requestBody = req.body || {}; + content = requestBody.content; + useAdvancedContext = requestBody.useAdvancedContext || false; + showThinking = requestBody.showThinking || false; + + // Add logging for POST requests + log.info(`LLM POST message: chatNoteId=${req.params.chatNoteId}, useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}, contentLength=${content ? content.length : 0}`); + } else if (req.method === 'GET') { + // For GET (streaming) requests, get parameters from query params and body + // For streaming requests, we need the content from the body + useAdvancedContext = req.query.useAdvancedContext === 'true' || (req.body && req.body.useAdvancedContext === true); + showThinking = req.query.showThinking === 'true' || (req.body && req.body.showThinking === true); + content = req.body && req.body.content ? req.body.content : ''; + + // Add detailed logging for GET requests + log.info(`LLM GET stream: chatNoteId=${req.params.chatNoteId}, useAdvancedContext=${useAdvancedContext}, showThinking=${showThinking}`); + log.info(`Parameters from query: useAdvancedContext=${req.query.useAdvancedContext}, showThinking=${req.query.showThinking}`); + log.info(`Parameters from body: useAdvancedContext=${req.body?.useAdvancedContext}, showThinking=${req.body?.showThinking}, content=${content ? `${content.substring(0, 20)}...` : 'none'}`); + } + + // Get chatNoteId from URL params + chatNoteId = req.params.chatNoteId; + + // For GET requests, ensure we have the stream parameter + if (req.method === 'GET' && req.query.stream !== 'true') { + throw new Error('Stream parameter must be set to true for GET/streaming requests'); + } + + // For POST requests, validate the content + if (req.method === 'POST' && (!content || typeof content !== 'string' || content.trim().length === 0)) { + throw new Error('Content cannot be empty'); + } + + // Get or create session from Chat Note + let session = await this.getOrCreateSessionFromChatNote(chatNoteId, req.method === 'POST'); + + // If no session found and we're not allowed to create one (GET request) + if (!session && req.method === 'GET') { + throw new Error('Chat Note not found, cannot create session for streaming'); + } + + // For POST requests, if no Chat Note exists, create a new one + if (!session && req.method === 'POST') { + log.info(`No Chat Note found for ${chatNoteId}, creating a new Chat Note and session`); + + // Create a new Chat Note via the storage service + //const chatStorageService = (await import('../../llm/chat_storage_service.js')).default; + //const newChat = await chatStorageService.createChat('New Chat'); + + // Use the new Chat Note's ID for the session + session = SessionsStore.createSession({ + //title: newChat.title, + chatNoteId: chatNoteId + }); + + // Update the session ID to match the Chat Note ID + session.id = chatNoteId; + + log.info(`Created new Chat Note and session with ID: ${session.id}`); + + // Update the parameter to use the new ID + chatNoteId = session.id; + } + + // At this point, session should never be null + // TypeScript doesn't know this, so we'll add a check + if (!session) { + // This should never happen due to our logic above + throw new Error('Failed to create or retrieve session'); + } + + // Update session last active timestamp + SessionsStore.touchSession(session.id); + + // For POST requests, store the user message + if (req.method === 'POST' && content && session) { + // Add message to session + session.messages.push({ + role: 'user', + content, + timestamp: new Date() + }); + + // Log a preview of the message + log.info(`Processing LLM message: "${content.substring(0, 50)}${content.length > 50 ? '...' : ''}"`); + } + + // Check if AI services are enabled before proceeding + const aiEnabled = await options.getOptionBool('aiEnabled'); + log.info(`AI enabled setting: ${aiEnabled}`); + if (!aiEnabled) { + log.info("AI services are disabled by configuration"); + return { + error: "AI features are disabled. Please enable them in the settings." + }; + } + + // Check if AI services are available + log.info("Checking if AI services are available..."); + if (!this.safelyUseAIManager()) { + log.info("AI services are not available - checking for specific issues"); + + try { + // Create a direct instance to avoid circular references + const aiManager = new AIServiceManager(); + + if (!aiManager) { + log.error("AI service manager is not initialized"); + return { + error: "AI service is not properly initialized. Please check your configuration." + }; + } + + const availableProviders = aiManager.getAvailableProviders(); + if (availableProviders.length === 0) { + log.error("No AI providers are available"); + return { + error: "No AI providers are configured or available. Please check your AI settings." + }; + } + } catch (err) { + log.error(`Detailed AI service check failed: ${err}`); + } + + return { + error: "AI services are currently unavailable. Please check your configuration." + }; + } + + // Create direct instance to avoid circular references + const aiManager = new AIServiceManager(); + + // Get the default service - just use the first available one + const availableProviders = aiManager.getAvailableProviders(); + + if (availableProviders.length === 0) { + log.error("No AI providers are available after manager check"); + return { + error: "No AI providers are configured or available. Please check your AI settings." + }; + } + + // Use the first available provider + const providerName = availableProviders[0]; + log.info(`Using AI provider: ${providerName}`); + + // We know the manager has a 'services' property from our code inspection, + // but TypeScript doesn't know that from the interface. + // This is a workaround to access it + const service = (aiManager as any).services[providerName]; + + if (!service) { + log.error(`AI service for provider ${providerName} not found`); + return { + error: `Selected AI provider (${providerName}) is not available. Please check your configuration.` + }; + } + + // Initialize tools + log.info("Initializing LLM agent tools..."); + // Ensure tools are initialized to prevent tool execution issues + await ToolHandler.ensureToolsInitialized(); + + // Create and use the chat pipeline instead of direct processing + const pipeline = new ChatPipeline({ + enableStreaming: req.method === 'GET', + enableMetrics: true, + maxToolCallIterations: 5 + }); + + log.info("Executing chat pipeline..."); + + // Create options object for better tracking + const pipelineOptions = { + // Force useAdvancedContext to be a boolean, no matter what + useAdvancedContext: useAdvancedContext === true, + systemPrompt: session?.messages.find(m => m.role === 'system')?.content, + temperature: session?.metadata.temperature, + maxTokens: session?.metadata.maxTokens, + model: session?.metadata.model, + // Set stream based on request type, but ensure it's explicitly a boolean value + // GET requests or format=stream parameter indicates streaming should be used + stream: !!(req.method === 'GET' || req.query.format === 'stream' || req.query.stream === 'true'), + // Include chatNoteId for tracking tool executions + chatNoteId: chatNoteId + }; + + // Log the options to verify what's being sent to the pipeline + log.info(`Pipeline input options: ${JSON.stringify({ + useAdvancedContext: pipelineOptions.useAdvancedContext, + stream: pipelineOptions.stream + })}`); + + // Import the WebSocket service for direct access + const wsService = await import('../../ws.js'); + + // Create a stream callback wrapper + // This will ensure we properly handle all streaming messages + let messageContent = ''; + + // Prepare the pipeline input + const pipelineInput: ChatPipelineInput = { + messages: session.messages.map(msg => ({ + role: msg.role as 'user' | 'assistant' | 'system', + content: msg.content + })), + query: content || '', // Ensure query is always a string, even if content is null/undefined + noteId: session.noteContext ?? undefined, + showThinking: showThinking, + options: pipelineOptions, + streamCallback: req.method === 'GET' ? (data, done, rawChunk) => { + try { + // Use WebSocket service to send messages + this.handleStreamCallback( + data, done, rawChunk, + wsService.default, chatNoteId, + messageContent, session, res + ); + } catch (error) { + log.error(`Error in stream callback: ${error}`); + + // Try to send error message + try { + wsService.default.sendMessageToAllClients({ + type: 'llm-stream', + chatNoteId: chatNoteId, + error: `Stream error: ${error instanceof Error ? error.message : 'Unknown error'}`, + done: true + }); + + // End the response + res.write(`data: ${JSON.stringify({ error: 'Stream error', done: true })}\n\n`); + res.end(); + } catch (e) { + log.error(`Failed to send error message: ${e}`); + } + } + } : undefined + }; + + // Execute the pipeline + const response = await pipeline.execute(pipelineInput); + + // Handle the response + if (req.method === 'POST') { + // Add assistant message to session + session.messages.push({ + role: 'assistant', + content: response.text || '', + timestamp: new Date() + }); + + // Extract sources if they're available + const sources = (response as any).sources || []; + + // Store sources in the session metadata if they're present + if (sources.length > 0) { + session.metadata.sources = sources; + log.info(`Stored ${sources.length} sources in session metadata`); + } + + // Return the response with complete metadata + return { + content: response.text || '', + sources: sources, + metadata: { + model: response.model || session.metadata.model, + provider: response.provider || session.metadata.provider, + temperature: session.metadata.temperature, + maxTokens: session.metadata.maxTokens, + lastUpdated: new Date().toISOString(), + toolExecutions: session.metadata.toolExecutions || [] + } + }; + } else { + // For streaming requests, we've already sent the response + return null; + } + } catch (processingError: any) { + log.error(`Error processing message: ${processingError}`); + return { + error: `Error processing your request: ${processingError.message}` + }; + } + } + + /** + * Handle stream callback for WebSocket communication + */ + private handleStreamCallback( + data: string | null, + done: boolean, + rawChunk: any, + wsService: any, + chatNoteId: string, + messageContent: string, + session: any, + res: Response + ) { + // Only accumulate content that's actually text (not tool execution or thinking info) + if (data) { + messageContent += data; + } + + // Create a message object with all necessary fields + const message: LLMStreamMessage = { + type: 'llm-stream', + chatNoteId: chatNoteId + }; + + // Add content if available - either the new chunk or full content on completion + if (data) { + message.content = data; + } + + // Add thinking info if available in the raw chunk + if (rawChunk && 'thinking' in rawChunk && rawChunk.thinking) { + message.thinking = rawChunk.thinking as string; + } + + // Add tool execution info if available in the raw chunk + if (rawChunk && 'toolExecution' in rawChunk && rawChunk.toolExecution) { + // Transform the toolExecution to match the expected format + const toolExec = rawChunk.toolExecution; + message.toolExecution = { + // Use optional chaining for all properties + tool: typeof toolExec.tool === 'string' + ? toolExec.tool + : toolExec.tool?.name, + result: toolExec.result, + // Map arguments to args + args: 'arguments' in toolExec ? + (typeof toolExec.arguments === 'object' ? + toolExec.arguments as Record : {}) : {}, + // Add additional properties if they exist + action: 'action' in toolExec ? toolExec.action as string : undefined, + toolCallId: 'toolCallId' in toolExec ? toolExec.toolCallId as string : undefined, + error: 'error' in toolExec ? toolExec.error as string : undefined + }; + } + + // Set done flag explicitly + message.done = done; + + // On final message, include the complete content too + if (done) { + // Store the response in the session when done + session.messages.push({ + role: 'assistant', + content: messageContent, + timestamp: new Date() + }); + } + + // Send message to all clients + wsService.sendMessageToAllClients(message); + + // Log what was sent (first message and completion) + if (message.thinking || done) { + log.info( + `[WS-SERVER] Sending LLM stream message: chatNoteId=${chatNoteId}, content=${!!message.content}, contentLength=${message.content?.length || 0}, thinking=${!!message.thinking}, toolExecution=${!!message.toolExecution}, done=${done}` + ); + } + + // For GET requests, also send as server-sent events + // Prepare response data for JSON event + const responseData: any = { + content: data, + done + }; + + // Add tool execution if available + if (rawChunk?.toolExecution) { + responseData.toolExecution = rawChunk.toolExecution; + } + + // Send the data as a JSON event + res.write(`data: ${JSON.stringify(responseData)}\n\n`); + + if (done) { + res.end(); + } + } + + /** + * Create a new chat session + */ + async createSession(req: Request, res: Response) { + try { + const options: any = req.body || {}; + const title = options.title || 'Chat Session'; + + // Use the currentNoteId as the chatNoteId if provided + let chatNoteId = options.chatNoteId; + + // If currentNoteId is provided but chatNoteId is not, use currentNoteId + if (!chatNoteId && options.currentNoteId) { + chatNoteId = options.currentNoteId; + log.info(`Using provided currentNoteId ${chatNoteId} as chatNoteId`); + } + + // If we still don't have a chatNoteId, create a new Chat Note + if (!chatNoteId) { + // Create a new Chat Note via the storage service + const chatStorageService = (await import('../../llm/chat_storage_service.js')).default; + const newChat = await chatStorageService.createChat(title); + chatNoteId = newChat.id; + log.info(`Created new Chat Note with ID: ${chatNoteId}`); + } + + // Create a new session through our session store + const session = SessionsStore.createSession({ + chatNoteId, + title, + systemPrompt: options.systemPrompt, + contextNoteId: options.contextNoteId, + maxTokens: options.maxTokens, + model: options.model, + provider: options.provider, + temperature: options.temperature + }); + + return { + id: session.id, + title: session.title, + createdAt: session.createdAt, + noteId: chatNoteId // Return the note ID explicitly + }; + } catch (error: any) { + log.error(`Error creating LLM session: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to create LLM session: ${error.message || 'Unknown error'}`); + } + } + + /** + * Get a specific chat session by ID + */ + async getSession(req: Request, res: Response) { + try { + const { sessionId } = req.params; + + // Check if session exists + const session = SessionsStore.getSession(sessionId); + if (!session) { + // Instead of throwing an error, return a structured 404 response + // that the frontend can handle gracefully + res.status(404).json({ + error: true, + message: `Session with ID ${sessionId} not found`, + code: 'session_not_found', + sessionId + }); + return null; // Return null to prevent further processing + } + + // Return session with metadata and additional fields + return { + id: session.id, + title: session.title, + createdAt: session.createdAt, + lastActive: session.lastActive, + messages: session.messages, + noteContext: session.noteContext, + // Include additional fields for the frontend + sources: session.metadata.sources || [], + metadata: { + model: session.metadata.model, + provider: session.metadata.provider, + temperature: session.metadata.temperature, + maxTokens: session.metadata.maxTokens, + lastUpdated: session.lastActive.toISOString(), + // Include simplified tool executions if available + toolExecutions: session.metadata.toolExecutions || [] + } + }; + } catch (error: any) { + log.error(`Error getting LLM session: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to get session: ${error.message || 'Unknown error'}`); + } + } + + /** + * Delete a chat session + */ + async deleteSession(req: Request, res: Response) { + try { + const { sessionId } = req.params; + + // Delete the session + const success = SessionsStore.deleteSession(sessionId); + if (!success) { + throw new Error(`Session with ID ${sessionId} not found`); + } + + return { + success: true, + message: `Session ${sessionId} deleted successfully` + }; + } catch (error: any) { + log.error(`Error deleting LLM session: ${error.message || 'Unknown error'}`); + throw new Error(`Failed to delete session: ${error.message || 'Unknown error'}`); + } + } + + /** + * Get all sessions + */ + getSessions() { + return SessionsStore.getAllSessions(); + } + + /** + * Create an in-memory session from a Chat Note + * This treats the Chat Note as the source of truth, using its ID as the session ID + */ + async createSessionFromChatNote(noteId: string): Promise { + try { + log.info(`Creating in-memory session for Chat Note ID ${noteId}`); + + // Import chat storage service + const chatStorageService = (await import('../../llm/chat_storage_service.js')).default; + + // Try to get the Chat Note data + const chatNote = await chatStorageService.getChat(noteId); + + if (!chatNote) { + log.error(`Chat Note ${noteId} not found, cannot create session`); + return null; + } + + log.info(`Found Chat Note ${noteId}, creating in-memory session`); + + // Convert Message[] to ChatMessage[] by ensuring the role is compatible + const chatMessages: ChatMessage[] = chatNote.messages.map(msg => ({ + role: msg.role === 'tool' ? 'assistant' : msg.role, // Map 'tool' role to 'assistant' + content: msg.content, + timestamp: new Date() + })); + + // Create a new session with the same ID as the Chat Note + const session: ChatSession = { + id: chatNote.id, // Use Chat Note ID as the session ID + title: chatNote.title, + messages: chatMessages, + createdAt: chatNote.createdAt || new Date(), + lastActive: new Date(), + metadata: chatNote.metadata || {} + }; + + // Add the session to the in-memory store + SessionsStore.getAllSessions().set(noteId, session); + + log.info(`Successfully created in-memory session for Chat Note ${noteId}`); + return session; + } catch (error) { + log.error(`Failed to create session from Chat Note: ${error}`); + return null; + } + } + + /** + * Get an existing session or create a new one from a Chat Note + * This treats the Chat Note as the source of truth, using its ID as the session ID + */ + async getOrCreateSessionFromChatNote(noteId: string, createIfNotFound: boolean = true): Promise { + // First check if we already have this session in memory + let session = SessionsStore.getSession(noteId); + + if (session) { + log.info(`Found existing in-memory session for Chat Note ${noteId}`); + return session; + } + + // If not in memory, try to create from Chat Note + log.info(`Session not found in memory for Chat Note ${noteId}, attempting to create it`); + + // Only try to create if allowed + if (!createIfNotFound) { + log.info(`Not creating new session for ${noteId} as createIfNotFound=false`); + return null; + } + + // Create from Chat Note + return await this.createSessionFromChatNote(noteId); + } +} + +// Create singleton instance +const restChatService = new RestChatService(); +export default restChatService; diff --git a/src/services/llm/chat/sessions_store.ts b/src/services/llm/chat/sessions_store.ts new file mode 100644 index 000000000..65715ab23 --- /dev/null +++ b/src/services/llm/chat/sessions_store.ts @@ -0,0 +1,169 @@ +/** + * In-memory storage for chat sessions + */ +import log from "../../log.js"; +import { LLM_CONSTANTS } from '../constants/provider_constants.js'; +import { SEARCH_CONSTANTS } from '../constants/search_constants.js'; +import { randomString } from "../../utils.js"; +import type { ChatSession, ChatMessage } from '../interfaces/chat_session.js'; + +// In-memory storage for sessions +const sessions = new Map(); + +// Flag to track if cleanup timer has been initialized +let cleanupInitialized = false; + +/** + * Provides methods to manage chat sessions + */ +class SessionsStore { + /** + * Initialize the session cleanup timer to remove old/inactive sessions + */ + initializeCleanupTimer(): void { + if (cleanupInitialized) { + return; + } + + // Clean sessions that have expired based on the constants + function cleanupOldSessions() { + const expiryTime = new Date(Date.now() - LLM_CONSTANTS.SESSION.SESSION_EXPIRY_MS); + for (const [sessionId, session] of sessions.entries()) { + if (session.lastActive < expiryTime) { + sessions.delete(sessionId); + } + } + } + + // Run cleanup at the configured interval + setInterval(cleanupOldSessions, LLM_CONSTANTS.SESSION.CLEANUP_INTERVAL_MS); + cleanupInitialized = true; + log.info("Session cleanup timer initialized"); + } + + /** + * Get all sessions + */ + getAllSessions(): Map { + return sessions; + } + + /** + * Get a specific session by ID + */ + getSession(sessionId: string): ChatSession | undefined { + return sessions.get(sessionId); + } + + /** + * Create a new session + */ + createSession(options: { + chatNoteId: string; + title?: string; + systemPrompt?: string; + contextNoteId?: string; + maxTokens?: number; + model?: string; + provider?: string; + temperature?: number; + }): ChatSession { + this.initializeCleanupTimer(); + + const title = options.title || 'Chat Session'; + const sessionId = options.chatNoteId; + const now = new Date(); + + // Initial system message if provided + const messages: ChatMessage[] = []; + if (options.systemPrompt) { + messages.push({ + role: 'system', + content: options.systemPrompt, + timestamp: now + }); + } + + // Create and store the session + const session: ChatSession = { + id: sessionId, + title, + messages, + createdAt: now, + lastActive: now, + noteContext: options.contextNoteId, + metadata: { + temperature: options.temperature || SEARCH_CONSTANTS.TEMPERATURE.DEFAULT, + maxTokens: options.maxTokens, + model: options.model, + provider: options.provider, + sources: [], + toolExecutions: [], + lastUpdated: now.toISOString() + } + }; + + sessions.set(sessionId, session); + log.info(`Created in-memory session for Chat Note ID: ${sessionId}`); + + return session; + } + + /** + * Update a session's last active timestamp + */ + touchSession(sessionId: string): boolean { + const session = sessions.get(sessionId); + if (!session) { + return false; + } + + session.lastActive = new Date(); + return true; + } + + /** + * Delete a session + */ + deleteSession(sessionId: string): boolean { + return sessions.delete(sessionId); + } + + /** + * Record a tool execution in the session metadata + */ + recordToolExecution(chatNoteId: string, tool: any, result: string, error?: string): void { + if (!chatNoteId) return; + + const session = sessions.get(chatNoteId); + if (!session) return; + + try { + const toolExecutions = session.metadata.toolExecutions || []; + + // Format tool execution record + const execution = { + id: tool.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 7)}`, + name: tool.function?.name || 'unknown', + arguments: typeof tool.function?.arguments === 'string' + ? (() => { try { return JSON.parse(tool.function.arguments); } catch { return tool.function.arguments; } })() + : tool.function?.arguments || {}, + result: result, + error: error, + timestamp: new Date().toISOString() + }; + + // Add to tool executions + toolExecutions.push(execution); + session.metadata.toolExecutions = toolExecutions; + + log.info(`Recorded tool execution for ${execution.name} in session ${chatNoteId}`); + } catch (err) { + log.error(`Failed to record tool execution: ${err}`); + } + } +} + +// Create singleton instance +const sessionsStore = new SessionsStore(); +export default sessionsStore; diff --git a/src/services/llm/chat/utils/message_formatter.ts b/src/services/llm/chat/utils/message_formatter.ts new file mode 100644 index 000000000..30ac9a7da --- /dev/null +++ b/src/services/llm/chat/utils/message_formatter.ts @@ -0,0 +1,121 @@ +/** + * Message formatting utilities for different LLM providers + */ +import type { Message } from "../../ai_interface.js"; + +/** + * Interface for message formatters + */ +interface MessageFormatter { + formatMessages(messages: Message[], systemPrompt?: string, context?: string): Message[]; +} + +/** + * Factory to get the appropriate message formatter for a given provider + */ +export function getFormatter(providerName: string): MessageFormatter { + // Currently we use a simple implementation that works for most providers + // In the future, this could be expanded to have provider-specific formatters + return { + formatMessages(messages: Message[], systemPrompt?: string, context?: string): Message[] { + // Simple implementation that works for most providers + const formattedMessages: Message[] = []; + + // Add system message if context or systemPrompt is provided + if (context || systemPrompt) { + formattedMessages.push({ + role: 'system', + content: systemPrompt || (context ? `Use the following context to answer the query: ${context}` : '') + }); + } + + // Add all other messages + for (const message of messages) { + if (message.role === 'system' && formattedMessages.some(m => m.role === 'system')) { + // Skip duplicate system messages + continue; + } + formattedMessages.push(message); + } + + return formattedMessages; + } + }; +} + +/** + * Build messages with context for a specific LLM provider + */ +export async function buildMessagesWithContext( + messages: Message[], + context: string, + llmService: any +): Promise { + try { + if (!messages || messages.length === 0) { + return []; + } + + if (!context || context.trim() === '') { + return messages; + } + + // Get the provider name, handling service classes and raw provider names + let providerName: string; + if (typeof llmService === 'string') { + // If llmService is a string, assume it's the provider name + providerName = llmService; + } else if (llmService.constructor && llmService.constructor.name) { + // Extract provider name from service class name (e.g., OllamaService -> ollama) + providerName = llmService.constructor.name.replace('Service', '').toLowerCase(); + } else { + // Fallback to default + providerName = 'default'; + } + + // Get the appropriate formatter for this provider + const formatter = getFormatter(providerName); + + // Format messages with context using the provider-specific formatter + const formattedMessages = formatter.formatMessages( + messages, + undefined, // No system prompt override - use what's in the messages + context + ); + + return formattedMessages; + } catch (error) { + console.error(`Error building messages with context: ${error}`); + // Fallback to original messages in case of error + return messages; + } +} + +/** + * Build context from a list of note sources and a query + */ +export function buildContextFromNotes(sources: any[], query: string): string { + if (!sources || sources.length === 0) { + return query || ''; + } + + const noteContexts = sources + .filter(source => source.content) // Only include sources with content + .map((source) => { + // Format each note with its title as a natural heading and wrap in tags + return `\n### ${source.title}\n${source.content || 'No content available'}\n`; + }) + .join('\n\n'); + + if (!noteContexts) { + return query || ''; + } + + // Import the CONTEXT_PROMPTS constant + const { CONTEXT_PROMPTS } = require('../../constants/llm_prompt_constants.js'); + + // Use the template from the constants file, replacing placeholders + return CONTEXT_PROMPTS.CONTEXT_NOTES_WRAPPER + .replace('{noteContexts}', noteContexts) + .replace('{query}', query); +} diff --git a/src/services/llm/chat_service.ts b/src/services/llm/chat_service.ts new file mode 100644 index 000000000..18bf01251 --- /dev/null +++ b/src/services/llm/chat_service.ts @@ -0,0 +1,595 @@ +import type { Message, ChatCompletionOptions, ChatResponse } from './ai_interface.js'; +import chatStorageService from './chat_storage_service.js'; +import log from '../log.js'; +import { CONTEXT_PROMPTS, ERROR_PROMPTS } from './constants/llm_prompt_constants.js'; +import { ChatPipeline } from './pipeline/chat_pipeline.js'; +import type { ChatPipelineConfig, StreamCallback } from './pipeline/interfaces.js'; +import aiServiceManager from './ai_service_manager.js'; +import type { ChatPipelineInput } from './pipeline/interfaces.js'; +import type { NoteSearchResult } from './interfaces/context_interfaces.js'; + +// Update the ChatCompletionOptions interface to include the missing properties +declare module './ai_interface.js' { + interface ChatCompletionOptions { + pipeline?: string; + noteId?: string; + useAdvancedContext?: boolean; + showThinking?: boolean; + enableTools?: boolean; + } +} + +// Add a type for context extraction result +interface ContextExtractionResult { + context: string; + sources?: NoteSearchResult[]; + thinking?: string; +} + +export interface ChatSession { + id: string; + title: string; + messages: Message[]; + isStreaming?: boolean; + options?: ChatCompletionOptions; +} + +/** + * Chat pipeline configurations for different use cases + */ +const PIPELINE_CONFIGS: Record> = { + default: { + enableStreaming: true, + enableMetrics: true + }, + agent: { + enableStreaming: true, + enableMetrics: true, + maxToolCallIterations: 5 + }, + performance: { + enableStreaming: false, + enableMetrics: true + } +}; + +/** + * Service for managing chat interactions and history + */ +export class ChatService { + private sessionCache: Map = new Map(); + private pipelines: Map = new Map(); + + constructor() { + // Initialize pipelines + Object.entries(PIPELINE_CONFIGS).forEach(([name, config]) => { + this.pipelines.set(name, new ChatPipeline(config)); + }); + } + + /** + * Get a pipeline by name, or the default one + */ + private getPipeline(name: string = 'default'): ChatPipeline { + return this.pipelines.get(name) || this.pipelines.get('default')!; + } + + /** + * Create a new chat session + */ + async createSession(title?: string, initialMessages: Message[] = []): Promise { + // Create a new Chat Note as the source of truth + const chat = await chatStorageService.createChat(title || 'New Chat', initialMessages); + + const session: ChatSession = { + id: chat.id, + title: chat.title, + messages: chat.messages, + isStreaming: false + }; + + // Session is just a cache now + this.sessionCache.set(chat.id, session); + return session; + } + + /** + * Get an existing session or create a new one + */ + async getOrCreateSession(sessionId?: string): Promise { + if (sessionId) { + // First check the cache + const cachedSession = this.sessionCache.get(sessionId); + if (cachedSession) { + // Refresh the data from the source of truth + const chat = await chatStorageService.getChat(sessionId); + if (chat) { + // Update the cached session with latest data from the note + cachedSession.title = chat.title; + cachedSession.messages = chat.messages; + return cachedSession; + } + } else { + // Not in cache, load from the chat note + const chat = await chatStorageService.getChat(sessionId); + if (chat) { + const session: ChatSession = { + id: chat.id, + title: chat.title, + messages: chat.messages, + isStreaming: false + }; + + this.sessionCache.set(chat.id, session); + return session; + } + } + } + + return this.createSession(); + } + + /** + * Send a message in a chat session and get the AI response + */ + async sendMessage( + sessionId: string, + content: string, + options?: ChatCompletionOptions, + streamCallback?: StreamCallback + ): Promise { + const session = await this.getOrCreateSession(sessionId); + + // Add user message + const userMessage: Message = { + role: 'user', + content + }; + + session.messages.push(userMessage); + session.isStreaming = true; + + try { + // Immediately save the user message + await chatStorageService.updateChat(session.id, session.messages); + + // Log message processing + log.info(`Processing message: "${content.substring(0, 100)}..."`); + + // Select pipeline to use + const pipeline = this.getPipeline(); + + // Include sessionId in the options for tool execution tracking + const pipelineOptions = { + ...(options || session.options || {}), + sessionId: session.id + }; + + // Execute the pipeline + const response = await pipeline.execute({ + messages: session.messages, + options: pipelineOptions, + query: content, + streamCallback + }); + + // Add assistant message + const assistantMessage: Message = { + role: 'assistant', + content: response.text, + tool_calls: response.tool_calls + }; + + session.messages.push(assistantMessage); + session.isStreaming = false; + + // Save metadata about the response + const metadata = { + model: response.model, + provider: response.provider, + usage: response.usage + }; + + // If there are tool calls, make sure they're stored in metadata + if (response.tool_calls && response.tool_calls.length > 0) { + // Let the storage service extract and save tool executions + // The tool results are already in the messages + } + + // Save the complete conversation with metadata + await chatStorageService.updateChat(session.id, session.messages, undefined, metadata); + + // If first message, update the title based on content + if (session.messages.length <= 2 && (!session.title || session.title === 'New Chat')) { + const title = this.generateTitleFromMessages(session.messages); + session.title = title; + await chatStorageService.updateChat(session.id, session.messages, title); + } + + return session; + + } catch (error: unknown) { + session.isStreaming = false; + console.error('Error in AI chat:', this.handleError(error)); + + // Add error message + const errorMessage: Message = { + role: 'assistant', + content: ERROR_PROMPTS.USER_ERRORS.GENERAL_ERROR + }; + + session.messages.push(errorMessage); + + // Save the conversation with error + await chatStorageService.updateChat(session.id, session.messages); + + // Notify streaming error if callback provided + if (streamCallback) { + streamCallback(errorMessage.content, true); + } + + return session; + } + } + + /** + * Send a message with context from a specific note + */ + async sendContextAwareMessage( + sessionId: string, + content: string, + noteId: string, + options?: ChatCompletionOptions, + streamCallback?: StreamCallback + ): Promise { + const session = await this.getOrCreateSession(sessionId); + + // Add user message + const userMessage: Message = { + role: 'user', + content + }; + + session.messages.push(userMessage); + session.isStreaming = true; + + try { + // Immediately save the user message + await chatStorageService.updateChat(session.id, session.messages); + + // Log message processing + log.info(`Processing context-aware message: "${content.substring(0, 100)}..."`); + log.info(`Using context from note: ${noteId}`); + + // Get showThinking option if it exists + const showThinking = options?.showThinking === true; + + // Select appropriate pipeline based on whether agent tools are needed + const pipelineType = showThinking ? 'agent' : 'default'; + const pipeline = this.getPipeline(pipelineType); + + // Include sessionId in the options for tool execution tracking + const pipelineOptions = { + ...(options || session.options || {}), + sessionId: session.id + }; + + // Execute the pipeline with note context + const response = await pipeline.execute({ + messages: session.messages, + options: pipelineOptions, + noteId, + query: content, + showThinking, + streamCallback + }); + + // Add assistant message + const assistantMessage: Message = { + role: 'assistant', + content: response.text, + tool_calls: response.tool_calls + }; + + session.messages.push(assistantMessage); + session.isStreaming = false; + + // Save metadata about the response + const metadata = { + model: response.model, + provider: response.provider, + usage: response.usage, + contextNoteId: noteId // Store the note ID used for context + }; + + // If there are tool calls, make sure they're stored in metadata + if (response.tool_calls && response.tool_calls.length > 0) { + // Let the storage service extract and save tool executions + // The tool results are already in the messages + } + + // Save the complete conversation with metadata to the Chat Note (the single source of truth) + await chatStorageService.updateChat(session.id, session.messages, undefined, metadata); + + // If first message, update the title + if (session.messages.length <= 2 && (!session.title || session.title === 'New Chat')) { + const title = this.generateTitleFromMessages(session.messages); + session.title = title; + await chatStorageService.updateChat(session.id, session.messages, title); + } + + return session; + + } catch (error: unknown) { + session.isStreaming = false; + console.error('Error in context-aware chat:', this.handleError(error)); + + // Add error message + const errorMessage: Message = { + role: 'assistant', + content: ERROR_PROMPTS.USER_ERRORS.CONTEXT_ERROR + }; + + session.messages.push(errorMessage); + + // Save the conversation with error to the Chat Note + await chatStorageService.updateChat(session.id, session.messages); + + // Notify streaming error if callback provided + if (streamCallback) { + streamCallback(errorMessage.content, true); + } + + return session; + } + } + + /** + * Add context from the current note to the chat + * + * @param sessionId - The ID of the chat session + * @param noteId - The ID of the note to add context from + * @param useSmartContext - Whether to use smart context extraction (default: true) + * @returns The updated chat session + */ + async addNoteContext(sessionId: string, noteId: string, useSmartContext = true): Promise { + const session = await this.getOrCreateSession(sessionId); + + // Get the last user message to use as context for semantic search + const lastUserMessage = [...session.messages].reverse() + .find(msg => msg.role === 'user' && msg.content.length > 10)?.content || ''; + + // Use the context extraction stage from the pipeline + const pipeline = this.getPipeline(); + const contextResult = await pipeline.stages.contextExtraction.execute({ + noteId, + query: lastUserMessage, + useSmartContext + }) as ContextExtractionResult; + + const contextMessage: Message = { + role: 'user', + content: CONTEXT_PROMPTS.NOTE_CONTEXT_PROMPT.replace('{context}', contextResult.context) + }; + + session.messages.push(contextMessage); + + // Store the context note id in metadata + const metadata = { + contextNoteId: noteId + }; + + // Check if the context extraction result has sources + if (contextResult.sources && contextResult.sources.length > 0) { + // Convert the sources to match expected format (handling null vs undefined) + const sources = contextResult.sources.map(source => ({ + noteId: source.noteId, + title: source.title, + similarity: source.similarity, + // Replace null with undefined for content + content: source.content === null ? undefined : source.content + })); + + // Store these sources in metadata + await chatStorageService.recordSources(session.id, sources); + } + + await chatStorageService.updateChat(session.id, session.messages, undefined, metadata); + + return session; + } + + /** + * Add semantically relevant context from a note based on a specific query + */ + async addSemanticNoteContext(sessionId: string, noteId: string, query: string): Promise { + const session = await this.getOrCreateSession(sessionId); + + // Use the semantic context extraction stage from the pipeline + const pipeline = this.getPipeline(); + const contextResult = await pipeline.stages.semanticContextExtraction.execute({ + noteId, + query + }); + + const contextMessage: Message = { + role: 'user', + content: CONTEXT_PROMPTS.SEMANTIC_NOTE_CONTEXT_PROMPT + .replace('{query}', query) + .replace('{context}', contextResult.context) + }; + + session.messages.push(contextMessage); + + // Store the context note id and query in metadata + const metadata = { + contextNoteId: noteId + }; + + // Check if the semantic context extraction result has sources + const contextSources = (contextResult as ContextExtractionResult).sources || []; + if (contextSources && contextSources.length > 0) { + // Convert the sources to the format expected by recordSources + const sources = contextSources.map((source) => ({ + noteId: source.noteId, + title: source.title, + similarity: source.similarity, + content: source.content === null ? undefined : source.content + })); + + // Store these sources in metadata + await chatStorageService.recordSources(session.id, sources); + } + + await chatStorageService.updateChat(session.id, session.messages, undefined, metadata); + + return session; + } + + /** + * Get all user's chat sessions + */ + async getAllSessions(): Promise { + // Always fetch the latest data from notes + const chats = await chatStorageService.getAllChats(); + + // Update the cache with the latest data + return chats.map(chat => { + const cachedSession = this.sessionCache.get(chat.id); + + const session: ChatSession = { + id: chat.id, + title: chat.title, + messages: chat.messages, + isStreaming: cachedSession?.isStreaming || false + }; + + // Update the cache + if (cachedSession) { + cachedSession.title = chat.title; + cachedSession.messages = chat.messages; + } else { + this.sessionCache.set(chat.id, session); + } + + return session; + }); + } + + /** + * Delete a chat session + */ + async deleteSession(sessionId: string): Promise { + this.sessionCache.delete(sessionId); + return chatStorageService.deleteChat(sessionId); + } + + /** + * Get pipeline performance metrics + */ + getPipelineMetrics(pipelineType: string = 'default'): unknown { + const pipeline = this.getPipeline(pipelineType); + return pipeline.getMetrics(); + } + + /** + * Reset pipeline metrics + */ + resetPipelineMetrics(pipelineType: string = 'default'): void { + const pipeline = this.getPipeline(pipelineType); + pipeline.resetMetrics(); + } + + /** + * Generate a title from the first messages in a conversation + */ + private generateTitleFromMessages(messages: Message[]): string { + if (messages.length < 2) { + return 'New Chat'; + } + + // Get the first user message + const firstUserMessage = messages.find(m => m.role === 'user'); + if (!firstUserMessage) { + return 'New Chat'; + } + + // Extract first line or first few words + const firstLine = firstUserMessage.content.split('\n')[0].trim(); + + if (firstLine.length <= 30) { + return firstLine; + } + + // Take first 30 chars if too long + return firstLine.substring(0, 27) + '...'; + } + + /** + * Generate a chat completion with a sequence of messages + * @param messages Messages array to send to the AI provider + * @param options Chat completion options + */ + async generateChatCompletion(messages: Message[], options: ChatCompletionOptions = {}): Promise { + log.info(`========== CHAT SERVICE FLOW CHECK ==========`); + log.info(`Entered generateChatCompletion in ChatService`); + log.info(`Using pipeline for chat completion: ${this.getPipeline(options.pipeline).constructor.name}`); + log.info(`Tool support enabled: ${options.enableTools !== false}`); + + try { + // Get AI service + const service = await aiServiceManager.getService(); + if (!service) { + throw new Error('No AI service available'); + } + + log.info(`Using AI service: ${service.getName()}`); + + // Prepare query extraction + const lastUserMessage = [...messages].reverse().find(m => m.role === 'user'); + const query = lastUserMessage ? lastUserMessage.content : undefined; + + // For advanced context processing, use the pipeline + if (options.useAdvancedContext && query) { + log.info(`Using chat pipeline for advanced context with query: ${query.substring(0, 50)}...`); + + // Create a pipeline input with the query and messages + const pipelineInput: ChatPipelineInput = { + messages, + options, + query, + noteId: options.noteId + }; + + // Execute the pipeline + const pipeline = this.getPipeline(options.pipeline); + const response = await pipeline.execute(pipelineInput); + log.info(`Pipeline execution complete, response contains tools: ${response.tool_calls ? 'yes' : 'no'}`); + if (response.tool_calls) { + log.info(`Tool calls in pipeline response: ${response.tool_calls.length}`); + } + return response; + } + + // If not using advanced context, use direct service call + return await service.generateChatCompletion(messages, options); + } catch (error: unknown) { + console.error('Error in generateChatCompletion:', error); + throw error; + } + } + + /** + * Error handler utility + */ + private handleError(error: unknown): string { + if (error instanceof Error) { + return error.message || String(error); + } + return String(error); + } +} + +// Singleton instance +const chatService = new ChatService(); +export default chatService; diff --git a/src/services/llm/chat_storage_service.ts b/src/services/llm/chat_storage_service.ts new file mode 100644 index 000000000..578f75ab7 --- /dev/null +++ b/src/services/llm/chat_storage_service.ts @@ -0,0 +1,451 @@ +import notes from '../notes.js'; +import sql from '../sql.js'; +import attributes from '../attributes.js'; +import type { Message } from './ai_interface.js'; +import type { ToolCall } from './tools/tool_interfaces.js'; +import { t } from 'i18next'; +import log from '../log.js'; + +interface StoredChat { + id: string; + title: string; + messages: Message[]; + noteId?: string; + createdAt: Date; + updatedAt: Date; + metadata?: ChatMetadata; +} + +interface ChatMetadata { + sources?: Array<{ + noteId: string; + title: string; + similarity?: number; + path?: string; + branchId?: string; + content?: string; + }>; + model?: string; + provider?: string; + contextNoteId?: string; + toolExecutions?: Array; + usage?: { + promptTokens?: number; + completionTokens?: number; + totalTokens?: number; + }; + temperature?: number; + maxTokens?: number; +} + +interface ToolExecution { + id: string; + name: string; + arguments: Record | string; + result: string | Record; + error?: string; + timestamp: Date; + executionTime?: number; +} + +/** + * Service for storing and retrieving chat histories + * Chats are stored as a special type of note + */ +export class ChatStorageService { + private static readonly CHAT_LABEL = 'triliumChat'; + private static readonly CHAT_ROOT_LABEL = 'triliumChatRoot'; + private static readonly CHAT_TYPE = 'code'; + private static readonly CHAT_MIME = 'application/json'; + + /** + * Get or create the root note for all chats + */ + async getOrCreateChatRoot(): Promise { + const existingRoot = await sql.getRow<{noteId: string}>( + `SELECT noteId FROM attributes WHERE name = ? AND value = ?`, + ['label', ChatStorageService.CHAT_ROOT_LABEL] + ); + + if (existingRoot) { + return existingRoot.noteId; + } + + // Create root note for chats + const { note } = notes.createNewNote({ + parentNoteId: 'root', + title: t('ai.chat.root_note_title'), + type: 'text', + content: t('ai.chat.root_note_content') + }); + + attributes.createLabel( + note.noteId, + ChatStorageService.CHAT_ROOT_LABEL, + '' + ); + + return note.noteId; + } + + /** + * Create a new chat + */ + async createChat(title: string, messages: Message[] = [], metadata?: ChatMetadata): Promise { + const rootNoteId = await this.getOrCreateChatRoot(); + const now = new Date(); + + const { note } = notes.createNewNote({ + parentNoteId: rootNoteId, + title: title || t('ai.chat.new_chat_title') + ' ' + now.toLocaleString(), + type: ChatStorageService.CHAT_TYPE, + mime: ChatStorageService.CHAT_MIME, + content: JSON.stringify({ + messages, + metadata: metadata || {}, + createdAt: now, + updatedAt: now + }, null, 2) + }); + + attributes.createLabel( + note.noteId, + ChatStorageService.CHAT_LABEL, + '' + ); + + return { + id: note.noteId, + title: title || t('ai.chat.new_chat_title') + ' ' + now.toLocaleString(), + messages, + noteId: note.noteId, + createdAt: now, + updatedAt: now, + metadata: metadata || {} + }; + } + + /** + * Get all chats + */ + async getAllChats(): Promise { + const chats = await sql.getRows<{noteId: string, title: string, dateCreated: string, dateModified: string, content: string}>( + `SELECT notes.noteId, notes.title, notes.dateCreated, notes.dateModified, blobs.content + FROM notes + JOIN blobs ON notes.blobId = blobs.blobId + JOIN attributes ON notes.noteId = attributes.noteId + WHERE attributes.name = ? AND attributes.value = ? + ORDER BY notes.dateModified DESC`, + ['label', ChatStorageService.CHAT_LABEL] + ); + + return chats.map(chat => { + let messages: Message[] = []; + let metadata: ChatMetadata = {}; + let createdAt = new Date(chat.dateCreated); + let updatedAt = new Date(chat.dateModified); + + try { + const content = JSON.parse(chat.content); + messages = content.messages || []; + metadata = content.metadata || {}; + + // Use stored dates if available + if (content.createdAt) { + createdAt = new Date(content.createdAt); + } + if (content.updatedAt) { + updatedAt = new Date(content.updatedAt); + } + } catch (e) { + console.error('Failed to parse chat content:', e); + } + + return { + id: chat.noteId, + title: chat.title, + messages, + noteId: chat.noteId, + createdAt, + updatedAt, + metadata + }; + }); + } + + /** + * Get a specific chat + */ + async getChat(chatId: string): Promise { + const chat = await sql.getRow<{noteId: string, title: string, dateCreated: string, dateModified: string, content: string}>( + `SELECT notes.noteId, notes.title, notes.dateCreated, notes.dateModified, blobs.content + FROM notes + JOIN blobs ON notes.blobId = blobs.blobId + WHERE notes.noteId = ?`, + [chatId] + ); + + if (!chat) { + return null; + } + + let messages: Message[] = []; + let metadata: ChatMetadata = {}; + let createdAt = new Date(chat.dateCreated); + let updatedAt = new Date(chat.dateModified); + + try { + const content = JSON.parse(chat.content); + messages = content.messages || []; + metadata = content.metadata || {}; + + // Use stored dates if available + if (content.createdAt) { + createdAt = new Date(content.createdAt); + } + if (content.updatedAt) { + updatedAt = new Date(content.updatedAt); + } + } catch (e) { + console.error('Failed to parse chat content:', e); + } + + return { + id: chat.noteId, + title: chat.title, + messages, + noteId: chat.noteId, + createdAt, + updatedAt, + metadata + }; + } + + /** + * Update messages in a chat + */ + async updateChat( + chatId: string, + messages: Message[], + title?: string, + metadata?: ChatMetadata + ): Promise { + const chat = await this.getChat(chatId); + + if (!chat) { + return null; + } + + const now = new Date(); + const updatedMetadata = {...(chat.metadata || {}), ...(metadata || {})}; + + // Extract and store tool calls from the messages + const toolExecutions = this.extractToolExecutionsFromMessages(messages, updatedMetadata.toolExecutions || []); + if (toolExecutions.length > 0) { + updatedMetadata.toolExecutions = toolExecutions; + } + + // Update content directly using SQL since we don't have a method for this in the notes service + await sql.execute( + `UPDATE blobs SET content = ? WHERE blobId = (SELECT blobId FROM notes WHERE noteId = ?)`, + [JSON.stringify({ + messages, + metadata: updatedMetadata, + createdAt: chat.createdAt, + updatedAt: now + }, null, 2), chatId] + ); + + // Update title if provided + if (title && title !== chat.title) { + await sql.execute( + `UPDATE notes SET title = ? WHERE noteId = ?`, + [title, chatId] + ); + } + + return { + ...chat, + title: title || chat.title, + messages, + updatedAt: now, + metadata: updatedMetadata + }; + } + + /** + * Delete a chat + */ + async deleteChat(chatId: string): Promise { + try { + // Mark note as deleted using SQL since we don't have deleteNote in the exports + await sql.execute( + `UPDATE notes SET isDeleted = 1 WHERE noteId = ?`, + [chatId] + ); + + return true; + } catch (e) { + console.error('Failed to delete chat:', e); + return false; + } + } + + /** + * Record a new tool execution + */ + async recordToolExecution( + chatId: string, + toolName: string, + toolId: string, + args: Record | string, + result: string | Record, + error?: string + ): Promise { + try { + const chat = await this.getChat(chatId); + if (!chat) return false; + + const toolExecution: ToolExecution = { + id: toolId, + name: toolName, + arguments: args, + result, + error, + timestamp: new Date(), + executionTime: 0 // Could track this if we passed in a start time + }; + + const currentToolExecutions = chat.metadata?.toolExecutions || []; + currentToolExecutions.push(toolExecution); + + await this.updateChat( + chatId, + chat.messages, + undefined, // Don't change title + { + ...chat.metadata, + toolExecutions: currentToolExecutions + } + ); + + return true; + } catch (e) { + log.error(`Failed to record tool execution: ${e}`); + return false; + } + } + + /** + * Extract tool executions from messages + * This helps maintain a record of all tool calls even if messages are truncated + */ + private extractToolExecutionsFromMessages( + messages: Message[], + existingToolExecutions: ToolExecution[] = [] + ): ToolExecution[] { + const toolExecutions = [...existingToolExecutions]; + const executedToolIds = new Set(existingToolExecutions.map(t => t.id)); + + // Process all messages to find tool calls and their results + const assistantMessages = messages.filter(msg => msg.role === 'assistant' && msg.tool_calls); + const toolMessages = messages.filter(msg => msg.role === 'tool'); + + // Create a map of tool responses by tool_call_id + const toolResponseMap = new Map(); + for (const toolMsg of toolMessages) { + if (toolMsg.tool_call_id) { + toolResponseMap.set(toolMsg.tool_call_id, toolMsg.content); + } + } + + // Extract all tool calls and pair with responses + for (const assistantMsg of assistantMessages) { + if (!assistantMsg.tool_calls || !Array.isArray(assistantMsg.tool_calls)) continue; + + for (const toolCall of assistantMsg.tool_calls as ToolCall[]) { + if (!toolCall.id || executedToolIds.has(toolCall.id)) continue; + + const toolResponse = toolResponseMap.get(toolCall.id); + if (!toolResponse) continue; // Skip if no response found + + // We found a tool call with a response, record it + let args: Record | string; + if (typeof toolCall.function.arguments === 'string') { + try { + args = JSON.parse(toolCall.function.arguments); + } catch (e) { + args = toolCall.function.arguments; + } + } else { + args = toolCall.function.arguments; + } + + let result: string | Record = toolResponse; + try { + // Try to parse result as JSON if it starts with { or [ + if (toolResponse.trim().startsWith('{') || toolResponse.trim().startsWith('[')) { + result = JSON.parse(toolResponse); + } + } catch (e) { + // Keep as string if parsing fails + result = toolResponse; + } + + const isError = toolResponse.startsWith('Error:'); + const toolExecution: ToolExecution = { + id: toolCall.id, + name: toolCall.function.name, + arguments: args, + result, + error: isError ? toolResponse.substring('Error:'.length).trim() : undefined, + timestamp: new Date() + }; + + toolExecutions.push(toolExecution); + executedToolIds.add(toolCall.id); + } + } + + return toolExecutions; + } + + /** + * Store sources used in a chat + */ + async recordSources( + chatId: string, + sources: Array<{ + noteId: string; + title: string; + similarity?: number; + path?: string; + branchId?: string; + content?: string; + }> + ): Promise { + try { + const chat = await this.getChat(chatId); + if (!chat) return false; + + await this.updateChat( + chatId, + chat.messages, + undefined, // Don't change title + { + ...chat.metadata, + sources + } + ); + + return true; + } catch (e) { + log.error(`Failed to record sources: ${e}`); + return false; + } + } +} + +// Singleton instance +const chatStorageService = new ChatStorageService(); +export default chatStorageService; diff --git a/src/services/llm/constants/embedding_constants.ts b/src/services/llm/constants/embedding_constants.ts new file mode 100644 index 000000000..07d3f83a3 --- /dev/null +++ b/src/services/llm/constants/embedding_constants.ts @@ -0,0 +1,9 @@ +export const EMBEDDING_CONSTANTS = { + exactTitleMatch: 0.3, + titleContainsQuery: 0.2, + partialTitleMatch: 0.1, + sameType: 0.05, + attributeMatch: 0.05, + recentlyCreated: 0.05, + recentlyModified: 0.05 +}; diff --git a/src/services/llm/constants/formatter_constants.ts b/src/services/llm/constants/formatter_constants.ts new file mode 100644 index 000000000..d0e745c1c --- /dev/null +++ b/src/services/llm/constants/formatter_constants.ts @@ -0,0 +1,203 @@ +/** + * Formatter Constants + * + * Constants related to message formatters for different LLM providers. + * This centralizes string formatting patterns, HTML cleaning options, + * and other formatter-specific constants that were previously hardcoded. + */ + +/** + * HTML tag allowlists for different formatter strictness levels + */ +export const HTML_ALLOWED_TAGS = { + // Standard set used by most formatters + STANDARD: ['b', 'i', 'em', 'strong', 'a', 'p', 'br', 'ul', 'ol', 'li', 'h1', 'h2', 'h3', 'h4', 'h5', 'code', 'pre'], + + // Minimal set for providers with limited HTML support + MINIMAL: ['b', 'i', 'p', 'br', 'a'], + + // Empty set for plain text only (Ollama) + NONE: [] +}; + +/** + * HTML attribute allowlists + */ +export const HTML_ALLOWED_ATTRIBUTES = { + // Standard set of allowed attributes + STANDARD: { + 'a': ['href'] + }, + + // Empty set for plain text only + NONE: {} +}; + +/** + * HTML tag transformations + */ +export const HTML_TRANSFORMS = { + // Standard transformations + STANDARD: { + 'h1': 'h2', + 'h2': 'h3', + 'div': 'p', + 'span': 'span' + } +}; + +/** + * RegEx patterns for HTML to Markdown conversion + */ +export const HTML_TO_MARKDOWN_PATTERNS = { + // Headings + HEADING_1: { pattern: /]*>(.*?)<\/h1>/gi, replacement: '# $1\n' }, + HEADING_2: { pattern: /]*>(.*?)<\/h2>/gi, replacement: '## $1\n' }, + HEADING_3: { pattern: /]*>(.*?)<\/h3>/gi, replacement: '### $1\n' }, + HEADING_4: { pattern: /]*>(.*?)<\/h4>/gi, replacement: '#### $1\n' }, + HEADING_5: { pattern: /]*>(.*?)<\/h5>/gi, replacement: '##### $1\n' }, + + // Paragraph and line breaks + PARAGRAPH: { pattern: /]*>(.*?)<\/p>/gi, replacement: '$1\n\n' }, + BREAK: { pattern: /]*>/gi, replacement: '\n' }, + + // Links and formatting + LINK: { pattern: /]*href=["'](.*?)["'][^>]*>(.*?)<\/a>/gi, replacement: '[$2]($1)' }, + STRONG: { pattern: /]*>(.*?)<\/strong>/gi, replacement: '**$1**' }, + BOLD: { pattern: /]*>(.*?)<\/b>/gi, replacement: '**$1**' }, + EMPHASIS: { pattern: /]*>(.*?)<\/em>/gi, replacement: '*$1*' }, + ITALIC: { pattern: /]*>(.*?)<\/i>/gi, replacement: '*$1*' }, + + // Code + INLINE_CODE: { pattern: /]*>(.*?)<\/code>/gi, replacement: '`$1`' }, + CODE_BLOCK: { pattern: /]*>(.*?)<\/pre>/gi, replacement: '```\n$1\n```' }, + + // Clean up + ANY_REMAINING_TAG: { pattern: /<[^>]*>/g, replacement: '' }, + EXCESSIVE_NEWLINES: { pattern: /\n{3,}/g, replacement: '\n\n' } +}; + +/** + * HTML entity replacements + */ +export const HTML_ENTITY_REPLACEMENTS = { + // Common HTML entities + NBSP: { pattern: / /g, replacement: ' ' }, + LT: { pattern: /</g, replacement: '<' }, + GT: { pattern: />/g, replacement: '>' }, + AMP: { pattern: /&/g, replacement: '&' }, + QUOT: { pattern: /"/g, replacement: '"' }, + APOS: { pattern: /'/g, replacement: "'" }, + LDQUO: { pattern: /“/g, replacement: '"' }, + RDQUO: { pattern: /”/g, replacement: '"' }, + LSQUO: { pattern: /‘/g, replacement: "'" }, + RSQUO: { pattern: /’/g, replacement: "'" }, + MDASH: { pattern: /—/g, replacement: '—' }, + NDASH: { pattern: /–/g, replacement: '–' }, + HELLIP: { pattern: /…/g, replacement: '…' } +}; + +/** + * Encoding issue fixes + */ +export const ENCODING_FIXES = { + // Common encoding issues + BROKEN_QUOTES: { pattern: /Γ\u00c2[\u00a3\u00a5]/g, replacement: '"' }, + + // Character replacements for Unicode + UNICODE_REPLACEMENTS: { + '\u00A0': ' ', // Non-breaking space + '\u2018': "'", // Left single quote + '\u2019': "'", // Right single quote + '\u201C': '"', // Left double quote + '\u201D': '"', // Right double quote + '\u2013': '-', // En dash + '\u2014': '--', // Em dash + '\u2022': '*', // Bullet + '\u2026': '...' // Ellipsis + } +}; + +/** + * Ollama-specific cleaning patterns + */ +export const OLLAMA_CLEANING = { + // Replace fancy quotes + QUOTES: { pattern: /[""]/g, replacement: '"' }, + APOSTROPHES: { pattern: /['']/g, replacement: "'" }, + + // Replace other Unicode characters + DASHES: { pattern: /[–—]/g, replacement: '-' }, + BULLETS: { pattern: /[•]/g, replacement: '*' }, + ELLIPSES: { pattern: /[…]/g, replacement: '...' }, + + // Remove non-ASCII characters + NON_ASCII: { pattern: /[^\x00-\x7F]/g, replacement: '' }, + + // Normalize whitespace + WHITESPACE: { pattern: /\s+/g, replacement: ' ' }, + NEWLINE_WHITESPACE: { pattern: /\n\s+/g, replacement: '\n' } +}; + +/** + * Console log messages for formatters + */ +export const FORMATTER_LOGS = { + ANTHROPIC: { + PROCESSED: (before: number, after: number) => `Anthropic formatter: ${before} messages → ${after} messages` + }, + OPENAI: { + PROCESSED: (before: number, after: number) => `OpenAI formatter: ${before} messages → ${after} messages` + }, + OLLAMA: { + PROCESSED: (before: number, after: number) => `Ollama formatter processed ${before} messages into ${after} messages` + }, + ERROR: { + CONTEXT_CLEANING: (provider: string) => `Error cleaning content for ${provider}:`, + ENCODING: 'Error fixing encoding issues:' + } +}; + +/** + * Message formatter text templates + */ +export const MESSAGE_FORMATTER_TEMPLATES = { + /** + * OpenAI-specific message templates + */ + OPENAI: { + CONTEXT_INSTRUCTION: 'Please use the following context to respond to the user\'s messages:\n\n' + }, + + /** + * Anthropic-specific message templates + */ + ANTHROPIC: { + CONTEXT_START: '\n\n\n', + CONTEXT_END: '\n' + }, + + /** + * Ollama-specific message templates + */ + OLLAMA: { + REFERENCE_INFORMATION: '\n\nReference information:\n' + }, + + /** + * Default formatter message templates + */ + DEFAULT: { + CONTEXT_INSTRUCTION: 'Here is context to help you answer my questions: ' + } +}; + +/** + * Provider identifier constants + */ +export const PROVIDER_IDENTIFIERS = { + OPENAI: 'openai', + ANTHROPIC: 'anthropic', + OLLAMA: 'ollama', + DEFAULT: 'default' +}; diff --git a/src/services/llm/constants/hierarchy_constants.ts b/src/services/llm/constants/hierarchy_constants.ts new file mode 100644 index 000000000..40cbedd1f --- /dev/null +++ b/src/services/llm/constants/hierarchy_constants.ts @@ -0,0 +1,35 @@ +/** + * Hierarchy Context Constants + * + * This file centralizes all strings used in the note hierarchy context + * functionality. These strings are used when displaying information about parent-child + * relationships and note relations in the LLM context building process. + */ + +export const HIERARCHY_STRINGS = { + // Parent context strings + PARENT_CONTEXT: { + NO_PARENT_CONTEXT: 'No parent context available.', + CURRENT_NOTE: (title: string) => `${title} (current note)`, + }, + + // Child context strings + CHILD_CONTEXT: { + NO_CHILD_NOTES: 'No child notes.', + CHILD_NOTES_HEADER: (count: number) => `Child notes (${count} total)`, + CHILD_SUMMARY_PREFIX: 'Summary: ', + MORE_CHILDREN: (count: number) => `... and ${count} more child notes not shown`, + ERROR_RETRIEVING: 'Error retrieving child notes.' + }, + + // Linked notes context strings + LINKED_NOTES: { + NO_LINKED_NOTES: 'No linked notes.', + OUTGOING_RELATIONS_HEADER: (count: number) => `Outgoing relations (${count} total)`, + INCOMING_RELATIONS_HEADER: (count: number) => `Incoming relations (${count} total)`, + DEFAULT_RELATION: 'relates to', + MORE_OUTGOING: (count: number) => `... and ${count} more outgoing relations not shown`, + MORE_INCOMING: (count: number) => `... and ${count} more incoming relations not shown`, + ERROR_RETRIEVING: 'Error retrieving linked notes.' + } +}; diff --git a/src/services/llm/constants/llm_prompt_constants.ts b/src/services/llm/constants/llm_prompt_constants.ts new file mode 100644 index 000000000..68081f9c0 --- /dev/null +++ b/src/services/llm/constants/llm_prompt_constants.ts @@ -0,0 +1,298 @@ +/** + * LLM Prompt Constants + * + * This file centralizes all LLM/AI prompts used throughout the application. + * When adding new prompts, please add them here rather than hardcoding them in other files. + * + * Prompts are organized by their usage context (e.g., service, feature, etc.) + */ + +import fs from 'fs'; +import path from 'path'; +import { fileURLToPath } from 'url'; + +// Load system prompt from markdown file +const loadSystemPrompt = (): string => { + try { + const __filename = fileURLToPath(import.meta.url); + const __dirname = path.dirname(__filename); + + const promptPath = path.join(__dirname, '../prompts/base_system_prompt.md'); + const promptContent = fs.readFileSync(promptPath, 'utf8'); + // Strip the markdown title if needed + return promptContent.replace(/^# TriliumNext Base System Prompt\n+/, ''); + } catch (error) { + console.error('Failed to load system prompt from file:', error); + // Return fallback prompt if file can't be loaded + return "You are a helpful assistant embedded in the TriliumNext Notes application. " + + "You can help users with their notes, answer questions, and provide information. " + + "Keep your responses concise and helpful. " + + "You're currently chatting with the user about their notes."; + } +}; + +// Base system prompt loaded from markdown file +export const DEFAULT_SYSTEM_PROMPT = loadSystemPrompt(); + +/** + * System prompts for different use cases + */ +export const SYSTEM_PROMPTS = { + DEFAULT_SYSTEM_PROMPT: + "You are an intelligent AI assistant for Trilium Notes, a hierarchical note-taking application. " + + "Help the user with their notes, knowledge management, and questions. " + + "When referencing their notes, be clear about which note you're referring to. " + + "Be concise but thorough in your responses.", + + AGENT_TOOLS_PROMPT: + "You are an intelligent AI assistant for Trilium Notes with access to special tools. " + + "You can use these tools to search through the user's notes and find relevant information. " + + "Always be helpful, accurate, and respect the user's privacy and security.", + + CONTEXT_AWARE_PROMPT: + "You are an intelligent AI assistant for Trilium Notes. " + + "You have access to the context from the user's notes. " + + "Use this context to provide accurate and helpful responses. " + + "Be specific when referencing information from their notes." +}; + +// Context-specific prompts +export const CONTEXT_PROMPTS = { + // Query enhancer prompt for generating better search terms + QUERY_ENHANCER: + `You are an AI assistant that decides what information needs to be retrieved from a user's knowledge base called TriliumNext Notes to answer the user's question. +Given the user's question, generate 3-5 specific search queries that would help find relevant information. +Each query should be focused on a different aspect of the question. +Avoid generating queries that are too broad, vague, or about a user's entire Note database, and make sure they are relevant to the user's question. +Format your answer as a JSON array of strings, with each string being a search query. +Example: ["exact topic mentioned", "related concept 1", "related concept 2"]`, + + // Used to format notes context when providing responses + CONTEXT_NOTES_WRAPPER: + `I'll provide you with relevant information from my notes to help answer your question. + + +{noteContexts} + + +When referring to information from these notes in your response, please cite them by their titles (e.g., "According to your note on [Title]...") rather than using labels like "Note 1" or "Note 2". + +Now, based on the above information, please answer: {query}`, + + // Default fallback when no notes are found + NO_NOTES_CONTEXT: + "I am an AI assistant helping you with your Trilium notes. " + + "I couldn't find any specific notes related to your query, but I'll try to assist you " + + "with general knowledge about Trilium or other topics you're interested in.", + + // Fallback when context building fails + ERROR_FALLBACK_CONTEXT: + "I'm your AI assistant helping with your Trilium notes. I'll try to answer based on what I know.", + + // Headers for context (by provider) + CONTEXT_HEADERS: { + ANTHROPIC: (query: string) => + `I'm your AI assistant helping with your Trilium notes database. For your query: "${query}", I found these relevant `, + DEFAULT: (query: string) => + `I've found some relevant information in your notes that may help answer: "${query}"\n\n` + }, + + // Closings for context (by provider) + CONTEXT_CLOSINGS: { + ANTHROPIC: + "\n\nPlease use this information to answer the user's query. If the notes don't contain enough information, you can use your general knowledge as well.", + DEFAULT: + "\n\nBased on this information from the user's notes, please provide a helpful response." + }, + + // Context for index service + INDEX_NO_NOTES_CONTEXT: + "I'm an AI assistant helping with your Trilium notes. I couldn't find specific notes related to your query, but I'll try to assist based on general knowledge.", + + // Prompt for adding note context to chat + NOTE_CONTEXT_PROMPT: `Here is the content of the note I want to discuss: + + +{context} + + +Please help me with this information.`, + + // Prompt for adding semantic note context to chat + SEMANTIC_NOTE_CONTEXT_PROMPT: `Here is the relevant information from my notes based on my query "{query}": + + +{context} + + +Please help me understand this information in relation to my query.`, + + // System message prompt for context-aware chat + CONTEXT_AWARE_SYSTEM_PROMPT: `You are an AI assistant helping with Trilium Notes. Use this context to answer the user's question: + + +{enhancedContext} +`, + + // Error messages + ERROR_MESSAGES: { + GENERAL_ERROR: `Error: Failed to generate response. {errorMessage}`, + CONTEXT_ERROR: `Error: Failed to generate response with note context. {errorMessage}` + }, + + // Merged from JS file + AGENT_TOOLS_CONTEXT_PROMPT: + "You have access to the following tools to help answer the user's question: {tools}" +}; + +// Agent tool prompts +export const AGENT_TOOL_PROMPTS = { + // Prompts for query decomposition + QUERY_DECOMPOSITION: { + SUB_QUERY_DIRECT: 'Direct question that can be answered without decomposition', + SUB_QUERY_GENERIC: 'Generic exploration to find related content', + SUB_QUERY_ERROR: 'Error in decomposition, treating as simple query', + SUB_QUERY_DIRECT_ANALYSIS: 'Direct analysis of note details', + ORIGINAL_QUERY: 'Original query' + }, + + // Prompts for contextual thinking tool + CONTEXTUAL_THINKING: { + STARTING_ANALYSIS: (query: string) => `Starting analysis of the query: "${query}"`, + KEY_COMPONENTS: 'What are the key components of this query that need to be addressed?', + BREAKING_DOWN: 'Breaking down the query to understand its requirements and context.' + } +}; + +// Provider-specific prompt modifiers +export const PROVIDER_PROMPTS = { + ANTHROPIC: { + // Anthropic Claude-specific prompt formatting + SYSTEM_WITH_CONTEXT: (context: string) => + ` +${DEFAULT_SYSTEM_PROMPT} + +Use the following information from the user's notes to answer their questions: + + +${context} + + +When responding: +- Focus on the most relevant information from the notes +- Be concise and direct in your answers +- If quoting from notes, mention which note it's from +- If the notes don't contain relevant information, say so clearly +`, + + INSTRUCTIONS_WRAPPER: (instructions: string) => + `\n${instructions}\n`, + + ACKNOWLEDGMENT: "I understand. I'll follow those instructions.", + CONTEXT_ACKNOWLEDGMENT: "I'll help you with your notes based on the context provided.", + CONTEXT_QUERY_ACKNOWLEDGMENT: "I'll help you with your notes based on the context provided. What would you like to know?" + }, + + OPENAI: { + // OpenAI-specific prompt formatting + SYSTEM_WITH_CONTEXT: (context: string) => + ` +You are an AI assistant integrated into TriliumNext Notes. +Use the following information from the user's notes to answer their questions: + + +${context} + + +Focus on relevant information from these notes when answering. +Be concise and informative in your responses. +` + }, + + OLLAMA: { + // Ollama-specific prompt formatting + CONTEXT_INJECTION: (context: string, query: string) => + `Here's information from my notes to help answer the question: + +${context} + +Based on this information, please answer: ${query}` + }, + + // Common prompts across providers + COMMON: { + DEFAULT_ASSISTANT_INTRO: "You are an AI assistant integrated into TriliumNext Notes. Focus on helping users find information in their notes and answering questions based on their knowledge base. Be concise, informative, and direct when responding to queries." + } +}; + +// Constants for formatting context and messages +export const FORMATTING_PROMPTS = { + // Headers for context formatting + CONTEXT_HEADERS: { + SIMPLE: (query: string) => `I'm searching for information about: ${query}\n\nHere are the most relevant notes from my knowledge base:`, + DETAILED: (query: string) => `I'm searching for information about: "${query}"\n\nHere are the most relevant notes from my personal knowledge base:` + }, + + // Closing text for context formatting + CONTEXT_CLOSERS: { + SIMPLE: `\nEnd of notes. Please use this information to answer my question comprehensively.`, + DETAILED: `\nEnd of context information. Please use only the above notes to answer my question as comprehensively as possible.` + }, + + // Dividers used in context formatting + DIVIDERS: { + NOTE_SECTION: `------ NOTE INFORMATION ------`, + CONTENT_SECTION: `------ CONTEXT INFORMATION ------`, + NOTE_START: `# Note: `, + CONTENT_START: `Content: ` + }, + + HTML_ALLOWED_TAGS: ['b', 'i', 'em', 'strong', 'a', 'p', 'br', 'ul', 'ol', 'li', 'h1', 'h2', 'h3', 'h4', 'h5', 'code', 'pre'] +}; + +// Prompt templates for chat service +export const CHAT_PROMPTS = { + // Introduction messages for new chats + INTRODUCTIONS: { + NEW_CHAT: "Welcome to TriliumNext AI Assistant. How can I help you with your notes today?", + SEMANTIC_SEARCH: "I'll search through your notes for relevant information. What would you like to know?" + }, + + // Placeholders for various chat scenarios + PLACEHOLDERS: { + NO_CONTEXT: "I don't have any specific note context yet. Would you like me to search your notes for something specific?", + WAITING_FOR_QUERY: "Awaiting your question..." + } +}; + +// Error messages and fallbacks +export const ERROR_PROMPTS = { + // User-facing error messages + USER_ERRORS: { + GENERAL_ERROR: "I encountered an error processing your request. Please try again or rephrase your question.", + CONTEXT_ERROR: "I couldn't retrieve context from your notes. Please check your query or try a different question.", + NETWORK_ERROR: "There was a network error connecting to the AI service. Please check your connection and try again.", + RATE_LIMIT: "The AI service is currently experiencing high demand. Please try again in a moment.", + + // Merged from JS file + PROVIDER_ERROR: + "I'm sorry, but there seems to be an issue with the AI service provider. " + + "Please check your connection and API settings, or try again later." + }, + + // Internal error handling + INTERNAL_ERRORS: { + CONTEXT_PROCESSING: "Error processing context data", + MESSAGE_FORMATTING: "Error formatting messages for LLM", + RESPONSE_PARSING: "Error parsing LLM response" + }, + + // Merged from JS file + SYSTEM_ERRORS: { + NO_PROVIDER_AVAILABLE: + "No AI provider is available. Please check your AI settings and ensure at least one provider is configured properly.", + + UNAUTHORIZED: + "The AI provider returned an authorization error. Please check your API key settings." + } +}; diff --git a/src/services/llm/constants/provider_constants.ts b/src/services/llm/constants/provider_constants.ts new file mode 100644 index 000000000..e1cccecc6 --- /dev/null +++ b/src/services/llm/constants/provider_constants.ts @@ -0,0 +1,215 @@ +export const PROVIDER_CONSTANTS = { + ANTHROPIC: { + API_VERSION: '2023-06-01', + BETA_VERSION: 'messages-2023-12-15', + BASE_URL: 'https://api.anthropic.com', + DEFAULT_MODEL: 'claude-3-haiku-20240307', + // Model mapping for simplified model names to their full versions + MODEL_MAPPING: { + 'claude-3.7-sonnet': 'claude-3-7-sonnet-20250219', + 'claude-3.5-sonnet': 'claude-3-5-sonnet-20241022', + 'claude-3.5-haiku': 'claude-3-5-haiku-20241022', + 'claude-3-opus': 'claude-3-opus-20240229', + 'claude-3-sonnet': 'claude-3-sonnet-20240229', + 'claude-3-haiku': 'claude-3-haiku-20240307', + 'claude-2': 'claude-2.1' + }, + // These are the currently available models from Anthropic + AVAILABLE_MODELS: [ + { + id: 'claude-3-7-sonnet-20250219', + name: 'Claude 3.7 Sonnet', + description: 'Most intelligent model with hybrid reasoning capabilities', + maxTokens: 8192 + }, + { + id: 'claude-3-5-sonnet-20241022', + name: 'Claude 3.5 Sonnet', + description: 'High level of intelligence and capability', + maxTokens: 8192 + }, + { + id: 'claude-3-5-haiku-20241022', + name: 'Claude 3.5 Haiku', + description: 'Fastest model with high intelligence', + maxTokens: 8192 + }, + { + id: 'claude-3-opus-20240229', + name: 'Claude 3 Opus', + description: 'Most capable model for highly complex tasks', + maxTokens: 8192 + }, + { + id: 'claude-3-sonnet-20240229', + name: 'Claude 3 Sonnet', + description: 'Ideal balance of intelligence and speed', + maxTokens: 8192 + }, + { + id: 'claude-3-haiku-20240307', + name: 'Claude 3 Haiku', + description: 'Fastest and most compact model', + maxTokens: 8192 + }, + { + id: 'claude-2.1', + name: 'Claude 2.1', + description: 'Previous generation model', + maxTokens: 8192 + } + ] + }, + + OPENAI: { + BASE_URL: 'https://api.openai.com/v1', + DEFAULT_MODEL: 'gpt-3.5-turbo', + DEFAULT_EMBEDDING_MODEL: 'text-embedding-ada-002', + CONTEXT_WINDOW: 16000, + EMBEDDING_DIMENSIONS: { + ADA: 1536, + DEFAULT: 1536 + }, + AVAILABLE_MODELS: [ + { + id: 'gpt-4o', + name: 'GPT-4o', + description: 'Most capable multimodal model', + maxTokens: 8192 + }, + { + id: 'gpt-4-turbo', + name: 'GPT-4 Turbo', + description: 'Advanced capabilities with higher token limit', + maxTokens: 8192 + }, + { + id: 'gpt-4', + name: 'GPT-4', + description: 'Original GPT-4 model', + maxTokens: 8192 + }, + { + id: 'gpt-3.5-turbo', + name: 'GPT-3.5 Turbo', + description: 'Fast and efficient model for most tasks', + maxTokens: 8192 + } + ] + }, + + OLLAMA: { + BASE_URL: 'http://localhost:11434', + DEFAULT_MODEL: 'llama2', + BATCH_SIZE: 100, + CHUNKING: { + SIZE: 4000, + OVERLAP: 200 + }, + MODEL_DIMENSIONS: { + default: 8192, + llama2: 8192, + mixtral: 8192, + 'mistral': 8192 + }, + MODEL_CONTEXT_WINDOWS: { + default: 8192, + llama2: 8192, + mixtral: 8192, + 'mistral': 8192 + } + } +} as const; + +// LLM service configuration constants +export const LLM_CONSTANTS = { + // Context window sizes (in characters) + CONTEXT_WINDOW: { + OLLAMA: 8000, + OPENAI: 12000, + ANTHROPIC: 15000, + VOYAGE: 12000, + DEFAULT: 6000 + }, + + // Embedding dimensions (verify these with your actual models) + EMBEDDING_DIMENSIONS: { + OLLAMA: { + DEFAULT: 384, + NOMIC: 768, + MISTRAL: 1024 + }, + OPENAI: { + ADA: 1536, + DEFAULT: 1536 + }, + ANTHROPIC: { + CLAUDE: 1024, + DEFAULT: 1024 + }, + VOYAGE: { + DEFAULT: 1024 + } + }, + + // Model-specific embedding dimensions for Ollama models + OLLAMA_MODEL_DIMENSIONS: { + "llama3": 8192, + "llama3.1": 8192, + "mistral": 8192, + "nomic": 768, + "mxbai": 1024, + "nomic-embed-text": 768, + "mxbai-embed-large": 1024, + "default": 384 + }, + + // Model-specific context windows for Ollama models + OLLAMA_MODEL_CONTEXT_WINDOWS: { + "llama3": 8192, + "llama3.1": 8192, + "llama3.2": 8192, + "mistral": 8192, + "nomic": 32768, + "mxbai": 32768, + "nomic-embed-text": 32768, + "mxbai-embed-large": 32768, + "default": 8192 + }, + + // Batch size configuration + BATCH_SIZE: { + OPENAI: 10, // OpenAI can handle larger batches efficiently + ANTHROPIC: 5, // More conservative for Anthropic + OLLAMA: 1, // Ollama processes one at a time + DEFAULT: 5 // Conservative default + }, + + // Chunking parameters + CHUNKING: { + DEFAULT_SIZE: 1500, + OLLAMA_SIZE: 1000, + DEFAULT_OVERLAP: 100, + MAX_SIZE_FOR_SINGLE_EMBEDDING: 5000 + }, + + // Search/similarity thresholds + SIMILARITY: { + DEFAULT_THRESHOLD: 0.65, + HIGH_THRESHOLD: 0.75, + LOW_THRESHOLD: 0.5 + }, + + // Session management + SESSION: { + CLEANUP_INTERVAL_MS: 60 * 60 * 1000, // 1 hour + SESSION_EXPIRY_MS: 12 * 60 * 60 * 1000, // 12 hours + MAX_SESSION_MESSAGES: 10 + }, + + // Content limits + CONTENT: { + MAX_NOTE_CONTENT_LENGTH: 1500, + MAX_TOTAL_CONTENT_LENGTH: 10000 + } +}; diff --git a/src/services/llm/constants/query_decomposition_constants.ts b/src/services/llm/constants/query_decomposition_constants.ts new file mode 100644 index 000000000..2c6df4386 --- /dev/null +++ b/src/services/llm/constants/query_decomposition_constants.ts @@ -0,0 +1,95 @@ +/** + * Query Decomposition Constants + * + * This file centralizes all string constants used in the query decomposition tool. + * These constants can be translated for internationalization support. + */ + +export const QUERY_DECOMPOSITION_STRINGS = { + // Log messages + LOG_MESSAGES: { + DECOMPOSING_QUERY: (query: string) => `Decomposing query: "${query.substring(0, 100)}..."`, + EMPTY_QUERY: "Query decomposition called with empty query", + COMPLEXITY_ASSESSMENT: (complexity: number) => `Query complexity assessment: ${complexity}/10`, + SIMPLE_QUERY: (complexity: number) => `Query is simple (complexity ${complexity}), returning as single sub-query`, + DECOMPOSED_INTO: (count: number) => `Decomposed query into ${count} sub-queries`, + SUB_QUERY_LOG: (index: number, text: string, reason: string) => `Sub-query ${index + 1}: "${text}" - Reason: ${reason}`, + ERROR_DECOMPOSING: (error: string) => `Error decomposing query: ${error}`, + AVOIDING_RECURSIVE: (query: string) => `Avoiding recursive subqueries for query "${query.substring(0, 50)}..."`, + ERROR_SYNTHESIZING: (error: string) => `Error synthesizing answer: ${error}` + }, + + // Query identification patterns + QUERY_PATTERNS: { + PROVIDE_DETAILS_ABOUT: "provide details about", + INFORMATION_RELATED_TO: "information related to", + COMPARE: "compare", + DIFFERENCE_BETWEEN: "difference between", + VS: " vs ", + VERSUS: "versus", + HOW_TO: "how to ", + WHY: "why ", + WHAT_IS: "what is ", + WHAT_ARE: "what are " + }, + + // Question words used for complexity assessment + QUESTION_WORDS: ['what', 'how', 'why', 'where', 'when', 'who', 'which'], + + // Conjunctions used for complexity assessment + CONJUNCTIONS: ['and', 'or', 'but', 'as well as'], + + // Comparison terms used for complexity assessment + COMPARISON_TERMS: ['compare', 'versus', 'vs', 'difference', 'similarities'], + + // Analysis terms used for complexity assessment + ANALYSIS_TERMS: ['analyze', 'examine', 'investigate', 'explore', 'explain', 'discuss'], + + // Common stop words for parsing + STOP_WORDS: ['the', 'of', 'and', 'or', 'vs', 'versus', 'between', 'comparison', 'compared', 'to', 'with', 'what', 'is', 'are', 'how', 'why', 'when', 'which'], + + // Sub-query templates + SUB_QUERY_TEMPLATES: { + INFORMATION_RELATED: (query: string) => `Information related to ${query}`, + KEY_CHARACTERISTICS: (entity: string) => `What are the key characteristics of ${entity}?`, + COMPARISON_FEATURES: (entities: string[]) => `How do ${entities.join(' and ')} compare in terms of their primary features?`, + STEPS_TO: (topic: string) => `What are the steps to ${topic}?`, + CHALLENGES: (topic: string) => `What are common challenges or pitfalls when trying to ${topic}?`, + CAUSES: (topic: string) => `What are the causes of ${topic}?`, + EVIDENCE: (topic: string) => `What evidence supports explanations for ${topic}?`, + DEFINITION: (topic: string) => `Definition of ${topic}`, + EXAMPLES: (topic: string) => `Examples of ${topic}`, + KEY_INFORMATION: (concept: string) => `Key information about ${concept}` + }, + + // Sub-query reasons + SUB_QUERY_REASONS: { + GETTING_DETAILS: (entity: string) => `Getting details about "${entity}" for comparison`, + DIRECT_COMPARISON: 'Direct comparison of the entities', + FINDING_PROCEDURAL: 'Finding procedural information', + IDENTIFYING_DIFFICULTIES: 'Identifying potential difficulties', + IDENTIFYING_CAUSES: 'Identifying causes', + FINDING_EVIDENCE: 'Finding supporting evidence', + GETTING_DEFINITION: 'Getting basic definition', + FINDING_EXAMPLES: 'Finding examples', + FINDING_INFORMATION: (concept: string) => `Finding information about "${concept}"` + }, + + // Synthesis answer templates + SYNTHESIS_TEMPLATES: { + CANNOT_SYNTHESIZE: "Cannot synthesize answer - not all sub-queries have been answered.", + ANSWER_TO: (query: string) => `Answer to: "${query}"\n\n`, + BASED_ON_INFORMATION: "Based on the information gathered:\n\n", + ERROR_SYNTHESIZING: "Error synthesizing the final answer." + }, + + // Query status templates + STATUS_TEMPLATES: { + PROGRESS: (answered: number, total: number) => `Progress: ${answered}/${total} sub-queries answered\n\n`, + ANSWERED_MARKER: "✓", + UNANSWERED_MARKER: "○", + ANSWER_PREFIX: " Answer: " + } +}; + +export default QUERY_DECOMPOSITION_STRINGS; diff --git a/src/services/llm/constants/search_constants.ts b/src/services/llm/constants/search_constants.ts new file mode 100644 index 000000000..bc1689961 --- /dev/null +++ b/src/services/llm/constants/search_constants.ts @@ -0,0 +1,137 @@ +export const SEARCH_CONSTANTS = { + // Vector search parameters + VECTOR_SEARCH: { + DEFAULT_MAX_RESULTS: 10, + DEFAULT_THRESHOLD: 0.6, + SIMILARITY_THRESHOLD: { + COSINE: 0.6, + HYBRID: 0.3, + DIM_AWARE: 0.1 + }, + EXACT_MATCH_THRESHOLD: 0.65 + }, + + // Context extraction parameters + CONTEXT: { + CONTENT_LENGTH: { + MEDIUM_THRESHOLD: 5000, + HIGH_THRESHOLD: 10000 + }, + MAX_PARENT_DEPTH: 3, + MAX_CHILDREN: 10, + MAX_LINKS: 10, + MAX_SIMILAR_NOTES: 5, + MAX_CONTENT_LENGTH: 2000, + MAX_RELATIONS: 10, + MAX_POINTS: 5 + }, + + // Hierarchy parameters + HIERARCHY: { + DEFAULT_QUERY_DEPTH: 2, + MAX_NOTES_PER_QUERY: 10, + MAX_PATH_LENGTH: 20, + MAX_BREADTH: 100, + MAX_DEPTH: 5, + MAX_PATHS_TO_SHOW: 3 + }, + + // Temperature settings + TEMPERATURE: { + DEFAULT: 0.7, + RELATIONSHIP_TOOL: 0.4, + VECTOR_SEARCH: 0.3, + QUERY_PROCESSOR: 0.3 + }, + + // Token/char limits + LIMITS: { + DEFAULT_NOTE_SUMMARY_LENGTH: 500, + DEFAULT_MAX_TOKENS: 4096, + RELATIONSHIP_TOOL_MAX_TOKENS: 50, + VECTOR_SEARCH_MAX_TOKENS: 500, + QUERY_PROCESSOR_MAX_TOKENS: 300, + MIN_STRING_LENGTH: 3 + }, + + // Tool execution parameters + TOOL_EXECUTION: { + MAX_TOOL_CALL_ITERATIONS: 5, + MAX_FOLLOW_UP_ITERATIONS: 3 + } +}; + +// Model capabilities constants - moved from ./interfaces/model_capabilities.ts +export const MODEL_CAPABILITIES = { + 'gpt-3.5-turbo': { + contextWindowTokens: 8192, + contextWindowChars: 16000 + }, + 'gpt-4': { + contextWindowTokens: 8192 + }, + 'gpt-4-turbo': { + contextWindowTokens: 8192 + }, + 'claude-3-opus': { + contextWindowTokens: 200000 + }, + 'claude-3-sonnet': { + contextWindowTokens: 180000 + }, + 'claude-3.5-sonnet': { + contextWindowTokens: 200000 + }, + 'default': { + contextWindowTokens: 4096 + } +}; + +// Embedding processing constants +export const EMBEDDING_PROCESSING = { + MAX_TOTAL_PROCESSING_TIME: 5 * 60 * 1000, // 5 minutes + MAX_CHUNK_RETRY_ATTEMPTS: 2, + DEFAULT_MAX_CHUNK_PROCESSING_TIME: 60 * 1000, // 1 minute + OLLAMA_MAX_CHUNK_PROCESSING_TIME: 120 * 1000, // 2 minutes + DEFAULT_EMBEDDING_UPDATE_INTERVAL: 200 +}; + +// Provider-specific embedding capabilities +export const PROVIDER_EMBEDDING_CAPABILITIES = { + VOYAGE: { + MODELS: { + 'voyage-large-2': { + contextWidth: 8192, + dimension: 1536 + }, + 'voyage-2': { + contextWidth: 8192, + dimension: 1024 + }, + 'voyage-lite-02': { + contextWidth: 8192, + dimension: 768 + }, + 'default': { + contextWidth: 8192, + dimension: 1024 + } + } + }, + OPENAI: { + MODELS: { + 'text-embedding-3-small': { + dimension: 1536, + contextWindow: 8191 + }, + 'text-embedding-3-large': { + dimension: 3072, + contextWindow: 8191 + }, + 'default': { + dimension: 1536, + contextWindow: 8192 + } + } + } +}; diff --git a/src/services/llm/context/code_handlers.ts b/src/services/llm/context/code_handlers.ts new file mode 100644 index 000000000..f4b1fca97 --- /dev/null +++ b/src/services/llm/context/code_handlers.ts @@ -0,0 +1,438 @@ +/** + * Helper functions for processing code notes, including language detection and structure extraction + */ + +// Import highlight.js dynamically when needed +let hljs: object | null = null; + +/** + * Attempt to detect the programming language from code content or note attributes + */ +export function detectLanguage(content: string, mime: string): string { + // First check MIME type for hints + if (mime) { + const mimeLower = mime.toLowerCase(); + + // Map of mime types to language names + const mimeMap: {[key: string]: string} = { + 'text/javascript': 'javascript', + 'application/javascript': 'javascript', + 'text/typescript': 'typescript', + 'application/typescript': 'typescript', + 'text/x-python': 'python', + 'text/x-java': 'java', + 'text/x-c': 'c', + 'text/x-c++': 'cpp', + 'text/x-csharp': 'csharp', + 'text/x-go': 'go', + 'text/x-ruby': 'ruby', + 'text/x-php': 'php', + 'text/x-rust': 'rust', + 'text/x-swift': 'swift', + 'text/x-kotlin': 'kotlin', + 'text/x-scala': 'scala', + 'text/x-perl': 'perl', + 'text/x-lua': 'lua', + 'text/x-r': 'r', + 'text/x-dart': 'dart', + 'text/html': 'html', + 'text/css': 'css', + 'application/json': 'json', + 'application/xml': 'xml', + 'text/markdown': 'markdown', + 'text/yaml': 'yaml', + 'text/x-sql': 'sql' + }; + + if (mimeMap[mimeLower]) { + return mimeMap[mimeLower]; + } + } + + // Fallback to regex-based detection if highlight.js is not available or fails + // Check for common language patterns in the first few lines + const firstLines = content.split('\n').slice(0, 10).join('\n'); + + // Simple heuristics for common languages + if (firstLines.includes('') || firstLines.includes('')) return 'html'; + if (firstLines.includes('function ') && firstLines.includes('var ') && firstLines.includes('const ')) return 'javascript'; + if (firstLines.includes('interface ') && firstLines.includes('export class ')) return 'typescript'; + if (firstLines.includes('@Component') || firstLines.includes('import { Component }')) return 'typescript'; + + // Default to 'text' if language can't be determined + return 'text'; +} + +/** + * Extract structure from code to create a summary + */ +export function extractCodeStructure(content: string, language: string): string { + // Avoid processing very large code files + if (content.length > 100000) { + return "Code content too large for structure extraction"; + } + + let structure = ""; + + try { + switch (language.toLowerCase()) { + case 'javascript': + case 'typescript': + structure = extractJsStructure(content); + break; + + case 'python': + structure = extractPythonStructure(content); + break; + + case 'java': + case 'csharp': + case 'cpp': + structure = extractClassBasedStructure(content); + break; + + case 'go': + structure = extractGoStructure(content); + break; + + case 'rust': + structure = extractRustStructure(content); + break; + + case 'html': + structure = extractHtmlStructure(content); + break; + + default: + // For other languages, just return a summary of the file size and a few lines + const lines = content.split('\n'); + structure = `Code file with ${lines.length} lines.\n`; + + // Add first few non-empty lines that aren't comments + const firstCodeLines = lines.filter(line => + line.trim() !== '' && + !line.trim().startsWith('//') && + !line.trim().startsWith('#') && + !line.trim().startsWith('*') && + !line.trim().startsWith('