Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 31 additions & 2 deletions src/cobolt-backend/chat_history.ts
Original file line number Diff line number Diff line change
Expand Up @@ -56,15 +56,44 @@ class ChatHistory {

/**
* Convert chat history to format used by Ollama LLM
* @returns Array of Message objects in Ollama format
* Removes execution event metadata from AI context
* @returns Array of Message objects in Ollama format with clean content
*/
toOllamaMessages(): Message[] {
return this.messages.map(message => ({
role: message.role,
content: message.content
content: this.cleanContentForAI(message.content)
}));
}

/**
* Clean content for AI context - removes ALL execution metadata
* This prevents AI from learning how to fake execution patterns
*/
private cleanContentForAI(content: string): string {
let cleaned = content;

// AGGRESSIVE CLEANING: Remove ALL XML-like tags that start with these patterns
cleaned = cleaned.replace(/<execution_event\b[^>]*>.*?<\/execution_event>/gs, '');
cleaned = cleaned.replace(/<tool_call_position\b[^>]*>/g, '');
cleaned = cleaned.replace(/<tool_calls_update\b[^>]*>.*?<\/tool_calls_update>/gs, '');
cleaned = cleaned.replace(/<tool_calls_complete\b[^>]*>.*?<\/tool_calls_complete>/gs, '');
cleaned = cleaned.replace(/<tool_calls\b[^>]*>.*?<\/tool_calls>/gs, '');

// Remove standalone closing tags that might be left behind
cleaned = cleaned.replace(/<\/(?:execution_event|tool_calls_update|tool_calls_complete|tool_calls|tool_call_position)>/g, '');

// Remove any remaining XML-like execution metadata
cleaned = cleaned.replace(/<[^>]*(?:execution|tool_call|metadata|event)[^>]*>.*?<\/[^>]+>/gs, '');

// Clean up any excessive whitespace left behind
cleaned = cleaned.replace(/\n\s*\n\s*\n/g, '\n\n');
cleaned = cleaned.replace(/^\s+|\s+$/gm, ''); // Trim each line
cleaned = cleaned.trim();

return cleaned;
}

/**
* Clear the chat history
*/
Expand Down
21 changes: 20 additions & 1 deletion src/cobolt-backend/memory.ts
Original file line number Diff line number Diff line change
Expand Up @@ -64,12 +64,27 @@ function initMemory(): void {
* Adds messages to the memory store
* @param messages Array of messages to add to memory
*/
/* og
async function addToMemory(messages: Message[]): Promise<void> {
if (!memoryEnabled) {
return;
}
await memory?.add(messages, {userId: "userid"});
}
*/
async function addToMemory(messages: Message[]): Promise<void> {
if (!memoryEnabled) {
return;
}

try {
const result = await memory?.add(messages, {userId: "userid"});
console.log('[Memory] SUCCESS memory.add() result:', result);
} catch (error) {
console.error('[Memory] Memory storage FAILED:', error);
throw error;
}
}

/**
* Searches the memory store for relevant memories
Expand Down Expand Up @@ -128,4 +143,8 @@ if (require.main === module) {
}
}

export { addToMemory, searchMemories, clearMemory, listMemories, updateMemoryEnabled };
function isMemoryEnabled(): boolean {
return memoryEnabled;
}

export { addToMemory, searchMemories, clearMemory, listMemories, updateMemoryEnabled, isMemoryEnabled };
41 changes: 9 additions & 32 deletions src/cobolt-backend/ollama_client.ts
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
import { Ollama, Message, ChatResponse } from 'ollama';
import { Ollama, Message, ChatResponse} from 'ollama';
import { exec, spawn } from 'child_process';
import log from 'electron-log/main';
import { FunctionTool } from './ollama_tools';
import * as os from 'os';
import configStore from './data_models/config_store';
import { addToMemory } from './memory';
import { RequestContext, TraceLogger } from './logger';
import { formatDateTime } from './datetime_parser';
import { createQueryWithToolsPrompt } from './prompt_templates';
import { ChatHistory } from './chat_history';
import { MODELS } from './model_manager'
import { MODELS } from './model_manager';
import { FunctionTool } from './ollama_tools';
import { BrowserWindow } from 'electron';

let progressWindow: BrowserWindow | null = null;
Expand Down Expand Up @@ -461,9 +458,12 @@ async function* simpleChatOllamaStream(requestContext: RequestContext,

/**
* Send a simple query to ollama with the specified tools.
* @param messages - a slice of messages objects
* @param toolCalls - the list of FunctionTools to pass with the query
* @returns - The response from the LLM
* Uses the dedicated TOOLS_MODEL for efficient tool calling.
* @param requestContext - Request context with user query
* @param systemPrompt - System prompt for tool selection
* @param toolCalls - List of available tools
* @param memories - User memories for context
* @returns The response from the LLM with tool calls
*/
async function queryOllamaWithTools(requestContext: RequestContext,
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We need this to process tool calls

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This one I'm not sure I follow.. I've just totally run over this function and it was swallowed whole by the new processRagRatQuery().

systemPrompt: string,
Expand Down Expand Up @@ -515,27 +515,4 @@ function logExecOutput(platform: string) {
};
}

if (require.main === module) {
(async () => {
await initOllama();
const toolCalls: FunctionTool[] = [];
const requestContext = {
requestId: '123',
currentDatetime: new Date(),
question: 'Give me all of my calender events since last week from friends',
chatHistory: new ChatHistory(),
};
const toolUserMessage = createQueryWithToolsPrompt(formatDateTime(new Date()).toString())
const response = await queryOllamaWithTools(requestContext, toolUserMessage, toolCalls);
console.log(response)
if (!response.message.tool_calls) {
console.log('No tool calls');
return;
}
for (const toolCall of response.message.tool_calls) {
console.log('Tool call:', toolCall);
}
})();
}

export { initOllama, getOllamaClient, queryOllamaWithTools, simpleChatOllamaStream, stopOllama, setProgressWindow };
Loading