2025-02-04 13:38:48 -05:00

237 lines
6.7 KiB
TypeScript

import { ServiceError, LLMConfig, DocumentMetadata } from '../types';
const { store } = require('../store');
import OpenAI from 'openai';
import { OpenRouter } from 'openrouter-client';
import { ollamaService } from './ollamaService';
type Message = { role: 'system' | 'user' | 'assistant'; content: string };
interface OpenAIClient {
chat: {
completions: {
create: Function;
};
};
}
interface OpenRouterStreamResponse {
success: boolean;
data?: {
choices: Array<{ delta?: { content?: string }; message?: { content: string } }>;
};
errorCode?: number;
errorMessage?: string;
}
type OpenRouterConfig = {
temperature?: number;
model?: string;
stream?: boolean;
};
interface OpenRouterClient {
chat: (messages: Message[], config?: OpenRouterConfig) => Promise<OpenRouterStreamResponse>;
}
export class LLMService {
#config: LLMConfig;
#openaiClient: OpenAIClient | null;
#openrouterClient: OpenRouterClient | null;
constructor() {
const storedConfig = store.get('llm_config');
this.#config = storedConfig || {
provider: 'ollama',
model: 'jimscard/blackhat-hacker:v2',
baseUrl: 'http://localhost:11434',
temperature: 0.7,
apiKey: null
};
// Ensure config is saved with defaults
store.set('llm_config', this.#config);
this.#openaiClient = null;
this.#openrouterClient = null;
this.#initializeClient();
}
/**
* @private
*/
#initializeClient() {
switch (this.#config?.provider) {
case 'openai':
if (!this.#config.apiKey) {
throw new ServiceError('OpenAI API key is required');
}
this.#openaiClient = new OpenAI({
apiKey: this.#config.apiKey,
baseURL: this.#config.baseUrl,
});
break;
case 'openrouter':
if (!this.#config.apiKey) {
throw new ServiceError('OpenRouter API key is required');
}
this.#openrouterClient = new OpenRouter(this.#config.apiKey);
break;
case 'ollama':
if (this.#config.baseUrl) {
ollamaService.updateBaseUrl(this.#config.baseUrl);
}
break;
}
}
async query(
question: string,
onChunk?: (chunk: string) => void
): Promise<{ answer: string, sources: DocumentMetadata[] }> {
if (!this.#config?.provider) {
throw new ServiceError('LLM provider not configured');
}
try {
let response;
switch (this.#config.provider) {
case 'openai':
if (!this.#openaiClient) {
throw new ServiceError('OpenAI client not initialized');
}
const openaiResponse = await this.#openaiClient.chat.completions.create({
model: this.#config.model || 'gpt-3.5-turbo',
messages: [{ role: 'user', content: question }],
temperature: this.#config.temperature || 0.7,
stream: true,
});
let openaiText = '';
for await (const chunk of openaiResponse) {
const content = chunk.choices[0]?.delta?.content || '';
if (content) {
openaiText += content;
onChunk?.(content);
}
}
response = openaiText;
break;
case 'openrouter':
if (!this.#openrouterClient) {
throw new ServiceError('OpenRouter client not initialized');
}
const openrouterResponse = await this.#openrouterClient.chat(
[{ role: 'user', content: question }],
{
model: this.#config.model || 'openai/gpt-3.5-turbo',
temperature: this.#config.temperature || 0.7,
stream: true,
}
);
if (!openrouterResponse.success) {
throw new ServiceError(openrouterResponse.errorMessage || 'OpenRouter request failed');
}
let routerText = '';
for await (const chunk of openrouterResponse.data?.choices || []) {
const content = chunk.delta?.content || chunk.message?.content || '';
if (content) {
routerText += content;
onChunk?.(content);
}
}
response = routerText;
break;
case 'ollama':
const ollamaResponse = await ollamaService.chat({
model: this.#config.model || 'llama2:7b',
messages: [{ role: 'user', content: question }],
temperature: this.#config.temperature,
onChunk,
});
response = ollamaResponse.message.content;
break;
default:
throw new ServiceError(`Unsupported provider: ${this.#config.provider}`);
}
/** @type {DocumentMetadata[]} */
const sources = []; // TODO: Implement source retrieval from vector store
return {
answer: response,
sources,
};
} catch (error) {
console.error('Error querying LLM:', error);
throw new ServiceError(
error instanceof Error ? error.message : 'Unknown error occurred'
);
}
}
/**
* @returns {LLMConfig}
*/
getConfig() {
return this.#config;
}
/**
* @param {LLMConfig} newConfig - The new LLM configuration
*/
async updateConfig(newConfig) {
// Validate required fields from schema
if (!newConfig.provider) {
throw new ServiceError('Provider is required');
}
// Clean config to only include allowed properties from schema
const cleanConfig = {
provider: newConfig.provider,
apiKey: newConfig.apiKey ?? null,
model: newConfig.model ?? (newConfig.provider === 'ollama' ? 'llama2:7b' : null),
baseUrl: newConfig.provider === 'ollama' ? (newConfig.baseUrl ?? 'http://localhost:11434') : (newConfig.baseUrl ?? null),
temperature: typeof newConfig.temperature === 'number' ? newConfig.temperature : 0.7
};
// Validate provider-specific requirements
if (cleanConfig.provider !== 'ollama' && !cleanConfig.apiKey) {
throw new ServiceError(`${cleanConfig.provider} requires an API key`);
}
try {
store.set('llm_config', cleanConfig);
this.#config = cleanConfig;
this.#initializeClient();
} catch (error) {
throw new ServiceError(
error instanceof Error ? error.message : 'Failed to update config'
);
}
}
/**
* Get available models from Ollama server
* @returns {Promise<string[]>} List of model names
*/
async getOllamaModels() {
try {
return await ollamaService.getModels();
} catch (error) {
console.error('Error fetching Ollama models:', error);
throw new ServiceError(
error instanceof Error ? error.message : 'Failed to fetch Ollama models'
);
}
}
}
const llmService = new LLMService();
export { llmService };