mirror of
https://github.com/zadam/trilium.git
synced 2025-11-09 16:08:58 +01:00
this at least works to send responses when there's no tool calls
This commit is contained in:
parent
80c29e2a01
commit
253dbf92fa
@ -581,6 +581,16 @@ export class ChatPipeline {
|
|||||||
} else if (toolsEnabled) {
|
} else if (toolsEnabled) {
|
||||||
log.info(`========== NO TOOL CALLS DETECTED ==========`);
|
log.info(`========== NO TOOL CALLS DETECTED ==========`);
|
||||||
log.info(`LLM response did not contain any tool calls, skipping tool execution`);
|
log.info(`LLM response did not contain any tool calls, skipping tool execution`);
|
||||||
|
|
||||||
|
// Handle streaming for responses without tool calls
|
||||||
|
if (shouldEnableStream && streamCallback) {
|
||||||
|
log.info(`Sending final streaming response without tool calls: ${currentResponse.text.length} chars`);
|
||||||
|
|
||||||
|
// Send the final response with done=true to complete the streaming
|
||||||
|
await streamCallback(currentResponse.text, true);
|
||||||
|
|
||||||
|
log.info(`Sent final non-tool response with done=true signal`);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process the final response
|
// Process the final response
|
||||||
|
|||||||
@ -347,7 +347,7 @@ export class OllamaService extends BaseAIService {
|
|||||||
// Send the chunk to the caller
|
// Send the chunk to the caller
|
||||||
await callback({
|
await callback({
|
||||||
text: chunk.message?.content || '',
|
text: chunk.message?.content || '',
|
||||||
done: !!chunk.done,
|
done: false, // Never mark as done during chunk processing
|
||||||
raw: chunk // Include the raw chunk for advanced processing
|
raw: chunk // Include the raw chunk for advanced processing
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -359,7 +359,7 @@ export class OllamaService extends BaseAIService {
|
|||||||
|
|
||||||
log.info(`Completed streaming from Ollama: processed ${chunkCount} chunks, total content: ${completeText.length} chars`);
|
log.info(`Completed streaming from Ollama: processed ${chunkCount} chunks, total content: ${completeText.length} chars`);
|
||||||
|
|
||||||
// Signal completion
|
// Signal completion with a separate final callback after all processing is done
|
||||||
await callback({
|
await callback({
|
||||||
text: '',
|
text: '',
|
||||||
done: true
|
done: true
|
||||||
@ -476,8 +476,10 @@ export class OllamaService extends BaseAIService {
|
|||||||
// Call the callback with the current chunk content
|
// Call the callback with the current chunk content
|
||||||
if (opts.streamCallback) {
|
if (opts.streamCallback) {
|
||||||
try {
|
try {
|
||||||
// Don't send done:true when tool calls are present to avoid premature completion
|
// Only mark as done on the final chunk if we have actual content
|
||||||
const shouldMarkAsDone = !!chunk.done && !responseToolCalls.length;
|
// This ensures consistent behavior with and without tool calls
|
||||||
|
// We'll send a separate final callback after the loop completes
|
||||||
|
const shouldMarkAsDone = false; // Never mark as done during chunk processing
|
||||||
|
|
||||||
await opts.streamCallback(
|
await opts.streamCallback(
|
||||||
chunk.message?.content || '',
|
chunk.message?.content || '',
|
||||||
@ -499,6 +501,17 @@ export class OllamaService extends BaseAIService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Send one final callback with done=true after all chunks have been processed
|
||||||
|
// This ensures we get the complete response regardless of tool calls
|
||||||
|
if (opts.streamCallback) {
|
||||||
|
try {
|
||||||
|
log.info(`Sending final done=true callback after processing all chunks`);
|
||||||
|
await opts.streamCallback('', true, { done: true });
|
||||||
|
} catch (finalCallbackError) {
|
||||||
|
log.error(`Error in final streamCallback: ${finalCallbackError}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
log.info(`Completed direct streaming from Ollama: processed ${chunkCount} chunks, final content: ${completeText.length} chars`);
|
log.info(`Completed direct streaming from Ollama: processed ${chunkCount} chunks, final content: ${completeText.length} chars`);
|
||||||
} catch (iterationError) {
|
} catch (iterationError) {
|
||||||
log.error(`Error iterating through Ollama stream chunks: ${iterationError instanceof Error ? iterationError.message : String(iterationError)}`);
|
log.error(`Error iterating through Ollama stream chunks: ${iterationError instanceof Error ? iterationError.message : String(iterationError)}`);
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user