2025-02-04 01:30:36 -05:00
|
|
|
import { ServiceError } from '../types';
|
|
|
|
import { net } from 'electron';
|
2025-02-04 13:39:50 -05:00
|
|
|
import { platform } from 'os';
|
|
|
|
const { exec } = require('child_process');
|
|
|
|
const { promisify } = require('util');
|
|
|
|
|
|
|
|
const execAsync = promisify(exec);
|
|
|
|
|
|
|
|
interface OllamaStatus {
|
|
|
|
installed: boolean;
|
|
|
|
running: boolean;
|
|
|
|
}
|
2025-02-04 01:30:36 -05:00
|
|
|
|
|
|
|
interface OllamaModel {
|
|
|
|
name: string;
|
|
|
|
modified_at: string;
|
|
|
|
size: number;
|
|
|
|
digest: string;
|
|
|
|
}
|
|
|
|
|
2025-02-04 12:45:17 -05:00
|
|
|
interface OllamaModelStatus {
|
|
|
|
installed: boolean;
|
|
|
|
installing: boolean;
|
|
|
|
}
|
|
|
|
|
2025-02-04 01:30:36 -05:00
|
|
|
interface OllamaListResponse {
|
|
|
|
models: Array<{
|
|
|
|
name: string;
|
|
|
|
model: string;
|
|
|
|
modified_at: string;
|
|
|
|
size: number;
|
|
|
|
digest: string;
|
|
|
|
}>;
|
|
|
|
}
|
|
|
|
|
|
|
|
interface OllamaChatResponse {
|
|
|
|
message: {
|
|
|
|
content: string;
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
interface OllamaChatParams {
|
|
|
|
model: string;
|
|
|
|
messages: Array<{ role: string; content: string }>;
|
|
|
|
temperature?: number;
|
|
|
|
onChunk?: (chunk: string) => void;
|
|
|
|
}
|
|
|
|
|
|
|
|
class OllamaService {
|
|
|
|
private baseUrl: string = 'http://127.0.0.1:11434';
|
2025-02-04 12:45:17 -05:00
|
|
|
private _lastProgress: number | null = null;
|
2025-02-04 01:30:36 -05:00
|
|
|
|
2025-02-04 13:39:50 -05:00
|
|
|
async checkOllamaInstallation(): Promise<OllamaStatus> {
|
|
|
|
try {
|
|
|
|
// Check if ollama binary exists
|
|
|
|
const cmd = platform() === 'win32' ? 'where ollama' : 'which ollama';
|
|
|
|
await execAsync(cmd);
|
|
|
|
|
|
|
|
// Check if Ollama server is running by attempting to connect to the API
|
|
|
|
try {
|
|
|
|
await this.makeRequest<any>('/api/tags');
|
|
|
|
return { installed: true, running: true };
|
|
|
|
} catch (error) {
|
2025-02-04 13:44:59 -05:00
|
|
|
// Server not running, attempt to start it
|
|
|
|
try {
|
|
|
|
console.log('Attempting to start Ollama server...');
|
|
|
|
exec('ollama serve', (error, stdout, stderr) => {
|
|
|
|
if (error) {
|
|
|
|
console.error('Failed to start Ollama server:', error);
|
|
|
|
}
|
|
|
|
if (stderr) {
|
|
|
|
console.error('Ollama server stderr:', stderr);
|
|
|
|
}
|
|
|
|
if (stdout) {
|
|
|
|
console.log('Ollama server stdout:', stdout);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Give the server a moment to start
|
|
|
|
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
|
|
|
|
|
|
// Check again if it's running
|
|
|
|
try {
|
|
|
|
await this.makeRequest<any>('/api/tags');
|
|
|
|
return { installed: true, running: true };
|
|
|
|
} catch (retryError) {
|
|
|
|
console.error('Server still not responding after start attempt:', retryError);
|
|
|
|
return { installed: true, running: false };
|
|
|
|
}
|
|
|
|
} catch (startError) {
|
|
|
|
console.error('Error starting Ollama server:', startError);
|
|
|
|
return { installed: true, running: false };
|
|
|
|
}
|
2025-02-04 13:39:50 -05:00
|
|
|
}
|
|
|
|
} catch (error) {
|
|
|
|
return { installed: false, running: false };
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2025-02-04 01:30:36 -05:00
|
|
|
private async makeRequest<T>(
|
|
|
|
path: string,
|
|
|
|
method: string = 'GET',
|
|
|
|
body?: any,
|
|
|
|
onChunk?: (chunk: string) => void
|
|
|
|
): Promise<T> {
|
2025-02-04 13:39:50 -05:00
|
|
|
let accumulatedContent = ''; // Add accumulator for chat content
|
2025-02-04 01:30:36 -05:00
|
|
|
return new Promise((resolve, reject) => {
|
|
|
|
try {
|
|
|
|
const url = new URL(path, this.baseUrl);
|
|
|
|
const request = net.request({
|
|
|
|
url: url.toString(),
|
|
|
|
method,
|
|
|
|
headers: {
|
|
|
|
'Content-Type': 'application/json',
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
let responseData = '';
|
|
|
|
let streamBuffer = '';
|
|
|
|
|
|
|
|
request.on('response', (response) => {
|
|
|
|
if (response.statusCode !== 200) {
|
|
|
|
const error = new Error(`HTTP error! status: ${response.statusCode}`);
|
|
|
|
console.error('Ollama request failed:', {
|
|
|
|
path,
|
|
|
|
statusCode: response.statusCode,
|
|
|
|
error
|
|
|
|
});
|
|
|
|
reject(error);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
response.on('data', (chunk) => {
|
|
|
|
try {
|
|
|
|
const chunkStr = chunk.toString();
|
|
|
|
|
2025-02-04 12:45:17 -05:00
|
|
|
if (path === '/api/chat' || path === '/api/pull') {
|
|
|
|
// Handle streaming responses
|
2025-02-04 01:30:36 -05:00
|
|
|
streamBuffer += chunkStr;
|
|
|
|
const lines = streamBuffer.split('\n');
|
|
|
|
|
|
|
|
// Process all complete lines except the last one
|
|
|
|
for (let i = 0; i < lines.length - 1; i++) {
|
|
|
|
const line = lines[i].trim();
|
|
|
|
if (!line) continue;
|
|
|
|
|
|
|
|
try {
|
|
|
|
const parsed = JSON.parse(line);
|
2025-02-04 13:39:50 -05:00
|
|
|
if (path === '/api/chat' && parsed.message?.content) {
|
|
|
|
accumulatedContent += parsed.message.content;
|
|
|
|
if (onChunk) {
|
|
|
|
onChunk(parsed.message.content);
|
|
|
|
}
|
2025-02-04 12:45:17 -05:00
|
|
|
} else if (path === '/api/pull' && onChunk) {
|
|
|
|
if (parsed.status === 'success') {
|
|
|
|
onChunk('downloading: 100% complete');
|
|
|
|
} else if (parsed.total && parsed.completed !== undefined) {
|
|
|
|
const percentage = ((parsed.completed / parsed.total) * 100).toFixed(1);
|
|
|
|
onChunk(`downloading: ${percentage}% complete`);
|
|
|
|
} else if (parsed.status) {
|
|
|
|
onChunk(parsed.status);
|
|
|
|
}
|
2025-02-04 01:30:36 -05:00
|
|
|
}
|
|
|
|
} catch (e) {
|
2025-02-04 12:45:17 -05:00
|
|
|
console.warn('Failed to parse chunk:', { line, error: e });
|
2025-02-04 01:30:36 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Keep the last potentially incomplete line
|
|
|
|
streamBuffer = lines[lines.length - 1];
|
|
|
|
} else {
|
2025-02-04 12:45:17 -05:00
|
|
|
// For non-streaming endpoints, accumulate the entire response
|
2025-02-04 01:30:36 -05:00
|
|
|
responseData += chunkStr;
|
|
|
|
}
|
|
|
|
} catch (e) {
|
|
|
|
console.error('Error processing chunk:', e);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
response.on('end', () => {
|
|
|
|
try {
|
2025-02-04 13:39:50 -05:00
|
|
|
if (path === '/api/chat') {
|
|
|
|
// Process any remaining data in the buffer for chat
|
2025-02-04 01:30:36 -05:00
|
|
|
if (streamBuffer.trim()) {
|
|
|
|
try {
|
|
|
|
const parsed = JSON.parse(streamBuffer);
|
2025-02-04 13:39:50 -05:00
|
|
|
if (parsed.message?.content) {
|
|
|
|
accumulatedContent += parsed.message.content;
|
2025-02-04 01:30:36 -05:00
|
|
|
}
|
|
|
|
} catch (e) {
|
2025-02-04 13:39:50 -05:00
|
|
|
console.warn('Failed to parse final chat chunk:', { buffer: streamBuffer, error: e });
|
2025-02-04 01:30:36 -05:00
|
|
|
}
|
|
|
|
}
|
2025-02-04 13:39:50 -05:00
|
|
|
// Resolve with the complete accumulated content
|
|
|
|
resolve({
|
|
|
|
message: {
|
|
|
|
content: accumulatedContent
|
|
|
|
}
|
|
|
|
} as T);
|
|
|
|
} else if (path === '/api/pull') {
|
|
|
|
// For pull, resolve with success
|
2025-02-04 12:45:17 -05:00
|
|
|
resolve({ success: true } as T);
|
2025-02-04 01:30:36 -05:00
|
|
|
} else {
|
2025-02-04 12:45:17 -05:00
|
|
|
// For non-streaming endpoints, parse the accumulated response
|
2025-02-04 01:30:36 -05:00
|
|
|
const trimmedResponse = responseData.trim();
|
|
|
|
if (!trimmedResponse) {
|
|
|
|
throw new Error('Empty response received');
|
|
|
|
}
|
|
|
|
resolve(JSON.parse(trimmedResponse) as T);
|
|
|
|
}
|
|
|
|
} catch (e) {
|
|
|
|
reject(new Error(`Failed to process response: ${e.message}`));
|
|
|
|
}
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
request.on('error', (error) => {
|
|
|
|
console.error('Request error:', {
|
|
|
|
path,
|
|
|
|
error: error.message
|
|
|
|
});
|
|
|
|
reject(error);
|
|
|
|
});
|
|
|
|
|
|
|
|
if (body) {
|
|
|
|
const bodyStr = JSON.stringify(body);
|
|
|
|
console.log('Sending request:', {
|
|
|
|
path,
|
|
|
|
method,
|
|
|
|
body: bodyStr
|
|
|
|
});
|
|
|
|
request.write(bodyStr);
|
|
|
|
}
|
|
|
|
|
|
|
|
request.end();
|
|
|
|
} catch (e) {
|
|
|
|
console.error('Failed to make request:', e);
|
|
|
|
reject(e);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
async getModels(): Promise<string[]> {
|
|
|
|
try {
|
|
|
|
const response = await this.makeRequest<OllamaListResponse>('/api/tags');
|
|
|
|
return response.models.map(model => model.name);
|
|
|
|
} catch (error) {
|
|
|
|
console.error('Error fetching Ollama models:', error);
|
|
|
|
throw new ServiceError(
|
|
|
|
error instanceof Error ? error.message : 'Failed to fetch Ollama models'
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async chat(params: OllamaChatParams): Promise<OllamaChatResponse> {
|
|
|
|
if (!params?.model || !params?.messages?.length) {
|
|
|
|
throw new ServiceError('Invalid chat parameters: model and messages are required');
|
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
|
|
|
console.log('Starting chat request:', {
|
|
|
|
model: params.model,
|
|
|
|
messageCount: params.messages.length
|
|
|
|
});
|
|
|
|
|
|
|
|
const { onChunk, temperature, ...requestParams } = params;
|
|
|
|
const response = await this.makeRequest<OllamaChatResponse>(
|
|
|
|
'/api/chat',
|
|
|
|
'POST',
|
|
|
|
{
|
|
|
|
...requestParams,
|
|
|
|
stream: true,
|
|
|
|
temperature: temperature ?? 0.7
|
|
|
|
},
|
|
|
|
onChunk
|
|
|
|
);
|
|
|
|
|
2025-02-04 13:39:50 -05:00
|
|
|
return response as OllamaChatResponse;
|
2025-02-04 01:30:36 -05:00
|
|
|
} catch (error) {
|
|
|
|
const errorMessage = error instanceof Error ? error.message : 'Failed to chat with Ollama';
|
|
|
|
console.error('Chat error:', {
|
|
|
|
error: errorMessage,
|
|
|
|
params: {
|
|
|
|
model: params.model,
|
|
|
|
messageCount: params.messages.length
|
|
|
|
}
|
|
|
|
});
|
|
|
|
throw new ServiceError(errorMessage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
updateBaseUrl(baseUrl: string) {
|
|
|
|
this.baseUrl = baseUrl;
|
|
|
|
}
|
2025-02-04 12:45:17 -05:00
|
|
|
|
|
|
|
async checkModel(modelName: string): Promise<OllamaModelStatus> {
|
|
|
|
try {
|
|
|
|
const models = await this.getModels();
|
|
|
|
return {
|
|
|
|
installed: models.includes(modelName),
|
|
|
|
installing: false
|
|
|
|
};
|
|
|
|
} catch (error) {
|
|
|
|
console.error('Error checking model:', error);
|
|
|
|
throw new ServiceError(
|
|
|
|
error instanceof Error ? error.message : 'Failed to check model status'
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async pullModel(modelName: string, onProgress?: (status: string) => void): Promise<void> {
|
|
|
|
try {
|
|
|
|
console.log('Starting model pull for:', modelName);
|
|
|
|
|
|
|
|
// Make a direct request using net.request to handle streaming properly
|
|
|
|
await new Promise<void>((resolve, reject) => {
|
|
|
|
const request = net.request({
|
|
|
|
url: `${this.baseUrl}/api/pull`,
|
|
|
|
method: 'POST',
|
|
|
|
headers: {
|
|
|
|
'Content-Type': 'application/json',
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
request.on('response', (response) => {
|
|
|
|
if (response.statusCode !== 200) {
|
|
|
|
reject(new Error(`HTTP error! status: ${response.statusCode}`));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let buffer = '';
|
|
|
|
response.on('data', (chunk) => {
|
|
|
|
buffer += chunk.toString();
|
|
|
|
const lines = buffer.split('\n');
|
|
|
|
buffer = lines.pop() || ''; // Keep the last incomplete line
|
|
|
|
|
|
|
|
for (const line of lines) {
|
|
|
|
if (!line.trim()) continue;
|
|
|
|
|
|
|
|
try {
|
|
|
|
const data = JSON.parse(line);
|
|
|
|
console.log('Pull progress data:', data);
|
|
|
|
|
|
|
|
if (data.status === 'success') {
|
|
|
|
if (onProgress) onProgress('downloading: 100% complete');
|
|
|
|
} else if (data.total && typeof data.completed === 'number') {
|
|
|
|
// Round to nearest whole number to reduce update frequency
|
|
|
|
const percentage = Math.round((data.completed / data.total) * 100);
|
|
|
|
// Cache the last reported progress to avoid duplicate updates
|
|
|
|
if (onProgress && (!this._lastProgress || percentage !== this._lastProgress)) {
|
|
|
|
this._lastProgress = percentage;
|
|
|
|
onProgress(`downloading: ${percentage}% complete`);
|
|
|
|
}
|
|
|
|
} else if (data.status) {
|
|
|
|
if (onProgress) onProgress(data.status);
|
|
|
|
}
|
|
|
|
} catch (e) {
|
|
|
|
console.warn('Failed to parse progress data:', e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
response.on('end', () => {
|
|
|
|
// Process any remaining data in buffer
|
|
|
|
if (buffer.trim()) {
|
|
|
|
try {
|
|
|
|
const data = JSON.parse(buffer);
|
|
|
|
if (data.status === 'success') {
|
|
|
|
if (onProgress) onProgress('downloading: 100% complete');
|
|
|
|
}
|
|
|
|
} catch (e) {
|
|
|
|
console.warn('Failed to parse final data:', e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
resolve();
|
|
|
|
});
|
|
|
|
|
|
|
|
response.on('error', (error) => {
|
|
|
|
console.error('Response error:', error);
|
|
|
|
reject(error);
|
|
|
|
});
|
|
|
|
});
|
|
|
|
|
|
|
|
request.on('error', (error) => {
|
|
|
|
console.error('Request error:', error);
|
|
|
|
reject(error);
|
|
|
|
});
|
|
|
|
|
|
|
|
// Send the request with the model name
|
|
|
|
const body = JSON.stringify({ name: modelName });
|
|
|
|
console.log('Sending pull request with body:', body);
|
|
|
|
request.write(body);
|
|
|
|
request.end();
|
|
|
|
});
|
|
|
|
|
|
|
|
// After successful pull, verify the model exists
|
|
|
|
console.log('Pull completed, verifying model installation...');
|
|
|
|
|
|
|
|
// Give Ollama some time to process
|
|
|
|
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
|
|
|
|
|
|
const models = await this.getModels();
|
|
|
|
console.log('Available models:', models);
|
|
|
|
|
|
|
|
if (!models.includes(modelName)) {
|
|
|
|
throw new ServiceError('Model pull completed but model is not available in Ollama');
|
|
|
|
}
|
|
|
|
} catch (error) {
|
|
|
|
console.error('Error pulling model:', error);
|
|
|
|
throw new ServiceError(
|
|
|
|
error instanceof Error ? error.message : 'Failed to pull model'
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
2025-02-04 01:30:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
export const ollamaService = new OllamaService();
|