2025-02-04 12:45:17 -05:00
|
|
|
import { ServiceError, DocumentMetadata } from '../types';
|
2025-02-04 01:30:36 -05:00
|
|
|
import { ollamaService } from './ollamaService';
|
|
|
|
|
|
|
|
export class LLMService {
|
|
|
|
constructor() {
|
2025-02-04 12:45:17 -05:00
|
|
|
ollamaService.updateBaseUrl('http://localhost:11434');
|
2025-02-04 01:30:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
async query(
|
|
|
|
question: string,
|
|
|
|
onChunk?: (chunk: string) => void
|
|
|
|
): Promise<{ answer: string, sources: DocumentMetadata[] }> {
|
|
|
|
try {
|
2025-02-04 12:45:17 -05:00
|
|
|
const ollamaResponse = await ollamaService.chat({
|
2025-02-05 10:05:22 -05:00
|
|
|
model: 'damien113/datahound-gpu:8b',
|
2025-02-04 12:45:17 -05:00
|
|
|
messages: [{ role: 'user', content: question }],
|
|
|
|
temperature: 0.7,
|
|
|
|
onChunk,
|
|
|
|
});
|
2025-02-04 01:30:36 -05:00
|
|
|
|
|
|
|
/** @type {DocumentMetadata[]} */
|
|
|
|
const sources = []; // TODO: Implement source retrieval from vector store
|
|
|
|
|
|
|
|
return {
|
2025-02-04 12:45:17 -05:00
|
|
|
answer: ollamaResponse.message.content,
|
2025-02-04 01:30:36 -05:00
|
|
|
sources,
|
|
|
|
};
|
|
|
|
} catch (error) {
|
|
|
|
console.error('Error querying LLM:', error);
|
|
|
|
throw new ServiceError(
|
|
|
|
error instanceof Error ? error.message : 'Unknown error occurred'
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
getConfig() {
|
2025-02-04 12:45:17 -05:00
|
|
|
return {
|
|
|
|
provider: 'ollama',
|
2025-02-05 10:05:22 -05:00
|
|
|
model: 'damien113/datahound-gpu:8b',
|
2025-02-04 12:45:17 -05:00
|
|
|
baseUrl: 'http://localhost:11434',
|
|
|
|
temperature: 0.7
|
2025-02-04 01:30:36 -05:00
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const llmService = new LLMService();
|
|
|
|
export { llmService };
|