omnidev / lib /ai-provider-handler.ts
kalhdrawi's picture
أول رفع للملفات إلى السبيس kalhdrawi/omnidev
cfe08da
raw
history blame
25.3 kB
import { InferenceClient } from "@huggingface/inference";
import { geminiClient, GeminiModelConfig } from "./gemini-client";
import { openRouterClient, OpenRouterModelConfig } from "./openrouter-client";
import { PROVIDERS, MODELS } from "./providers";
import {
PRO_AGENT_DESIGN_EXPERT_PROMPT,
PRO_AGENT_ANIMATION_EXPERT_PROMPT,
PRO_AGENT_INTEGRATION_EXPERT_PROMPT,
PRO_AGENT_DEVELOPER_PROMPT
} from "./prompts";
import type { ProAgentWorkflow, ProAgentStep, ProAgentStepResult, ProAgentConfig } from "../types";
export interface AIMessage {
role: 'system' | 'user' | 'assistant';
content: string;
}
export interface AIGenerationConfig {
model: string;
provider: string;
messages: AIMessage[];
maxTokens?: number;
temperature?: number;
topP?: number;
topK?: number;
stream?: boolean;
}
export interface AIResponse {
text: string;
finishReason?: string;
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
}
export class AIProviderHandler {
private huggingFaceClient: InferenceClient;
constructor(token?: string) {
this.huggingFaceClient = new InferenceClient(token);
}
/**
* Generate content using the appropriate provider
*/
async generateContent(config: AIGenerationConfig): Promise<AIResponse> {
const { provider, model, messages, maxTokens, temperature, topP, topK } = config;
if (provider === 'google') {
return this.generateWithGemini(model, messages, {
maxTokens,
temperature,
topP,
topK
});
} else if (provider === 'openrouter') {
return this.generateWithOpenRouter(model, messages, {
maxTokens,
temperature,
topP,
topK
});
} else {
return this.generateWithHuggingFace(config);
}
}
/**
* Generate content stream using the appropriate provider
*/
async* generateContentStream(config: AIGenerationConfig): AsyncGenerator<{ text: string; done: boolean }, void, unknown> {
const { provider, model, messages, maxTokens, temperature, topP, topK } = config;
if (provider === 'google') {
yield* this.generateStreamWithGemini(model, messages, {
maxTokens,
temperature,
topP,
topK
});
} else if (provider === 'openrouter') {
yield* this.generateStreamWithOpenRouter(model, messages, {
maxTokens,
temperature,
topP,
topK
});
} else {
yield* this.generateStreamWithHuggingFace(config);
}
}
/**
* Generate content using Google Gemini
*/
private async generateWithGemini(
model: string,
messages: AIMessage[],
config?: Partial<GeminiModelConfig>
): Promise<AIResponse> {
try {
// Convert messages to Gemini format
const geminiMessages = messages
.filter(msg => msg.role !== 'system')
.map(msg => ({
role: msg.role as 'user' | 'assistant',
content: msg.content
}));
// Get system instruction if present
const systemMessage = messages.find(msg => msg.role === 'system');
let response;
if (systemMessage && geminiMessages.length > 0) {
// Use system instruction with user prompt
const userMessage = geminiMessages[geminiMessages.length - 1];
response = await geminiClient.generateWithSystemInstruction(
model,
systemMessage.content,
userMessage.content,
config
);
} else if (geminiMessages.length > 1) {
// Use conversation history
response = await geminiClient.generateWithHistory(
model,
geminiMessages,
config
);
} else if (geminiMessages.length === 1) {
// Simple prompt
response = await geminiClient.generateContent(
model,
geminiMessages[0].content,
config
);
} else {
throw new Error('No valid messages provided');
}
return response;
} catch (error) {
console.error('Gemini generation error:', error);
throw new Error(`Gemini generation failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
/**
* Generate content using OpenRouter
*/
private async generateWithOpenRouter(
model: string,
messages: AIMessage[],
config?: Partial<OpenRouterModelConfig>
): Promise<AIResponse> {
try {
// Convert messages to OpenRouter format
const openRouterMessages = messages
.filter(msg => msg.role !== 'system')
.map(msg => ({
role: msg.role as 'user' | 'assistant',
content: msg.content
}));
// Get system instruction if present
const systemMessage = messages.find(msg => msg.role === 'system');
let response;
if (systemMessage && openRouterMessages.length > 0) {
// Use system instruction with user prompt
const userMessage = openRouterMessages[openRouterMessages.length - 1];
response = await openRouterClient.generateWithSystemInstruction(
model,
systemMessage.content,
userMessage.content,
config
);
} else if (openRouterMessages.length > 1) {
// Use conversation history
response = await openRouterClient.generateWithHistory(
model,
openRouterMessages,
config
);
} else if (openRouterMessages.length === 1) {
// Simple prompt
response = await openRouterClient.generateContent(
model,
openRouterMessages[0].content,
config
);
} else {
throw new Error('No valid messages provided');
}
return response;
} catch (error) {
console.error('OpenRouter generation error:', error);
throw new Error(`OpenRouter generation failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
/**
* Generate content stream using Google Gemini
*/
private async* generateStreamWithGemini(
model: string,
messages: AIMessage[],
config?: Partial<GeminiModelConfig>
): AsyncGenerator<{ text: string; done: boolean }, void, unknown> {
try {
// For streaming, we'll use the last user message with system instruction if available
const systemMessage = messages.find(msg => msg.role === 'system');
const userMessages = messages.filter(msg => msg.role === 'user');
if (userMessages.length === 0) {
throw new Error('No user messages provided');
}
const lastUserMessage = userMessages[userMessages.length - 1];
let prompt = lastUserMessage.content;
if (systemMessage) {
prompt = `${systemMessage.content}\n\nUser: ${prompt}\n\nAssistant:`;
}
const generator = geminiClient.generateContentStream(model, prompt, config);
for await (const chunk of generator) {
yield {
text: chunk.text,
done: chunk.done
};
}
} catch (error) {
console.error('Gemini streaming error:', error);
throw new Error(`Gemini streaming failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
/**
* Generate content stream using OpenRouter
*/
private async* generateStreamWithOpenRouter(
model: string,
messages: AIMessage[],
config?: Partial<OpenRouterModelConfig>
): AsyncGenerator<{ text: string; done: boolean }, void, unknown> {
try {
// For streaming, we'll use the last user message with system instruction if available
const systemMessage = messages.find(msg => msg.role === 'system');
const userMessages = messages.filter(msg => msg.role === 'user');
if (userMessages.length === 0) {
throw new Error('No user messages provided');
}
const lastUserMessage = userMessages[userMessages.length - 1];
let prompt = lastUserMessage.content;
if (systemMessage) {
prompt = `${systemMessage.content}\n\nUser: ${prompt}\n\nAssistant:`;
}
const generator = openRouterClient.generateContentStream(model, prompt, config);
for await (const chunk of generator) {
yield {
text: chunk.text,
done: chunk.done
};
}
} catch (error) {
console.error('OpenRouter streaming error:', error);
throw new Error(`OpenRouter streaming failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
/**
* Generate content using Hugging Face Inference
*/
private async generateWithHuggingFace(config: AIGenerationConfig): Promise<AIResponse> {
try {
const { model, provider, messages, maxTokens } = config;
// Skip Google provider as it's handled separately
if (provider === 'google') {
throw new Error('Google provider should be handled by generateWithGemini method');
}
if (provider === 'openrouter') {
throw new Error('OpenRouter provider should be handled by generateWithOpenRouter method');
}
const selectedProvider = PROVIDERS[provider as keyof typeof PROVIDERS];
if (!selectedProvider) {
throw new Error(`Provider ${provider} not found in supported providers: ${Object.keys(PROVIDERS).join(', ')}`);
}
const response = await this.huggingFaceClient.chatCompletion({
model,
provider: selectedProvider.id as unknown as Parameters<InferenceClient["chatCompletion"]>[0]["provider"],
messages: messages.map(msg => ({
role: msg.role,
content: msg.content
})),
max_tokens: maxTokens || selectedProvider.max_tokens,
temperature: config.temperature || 0.7,
});
const content = response.choices[0]?.message?.content || '';
return {
text: content,
finishReason: response.choices[0]?.finish_reason,
usage: {
promptTokens: response.usage?.prompt_tokens || 0,
completionTokens: response.usage?.completion_tokens || 0,
totalTokens: response.usage?.total_tokens || 0,
}
};
} catch (error) {
console.error('Hugging Face generation error:', error);
throw new Error(`Hugging Face generation failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
/**
* Generate content stream using Hugging Face Inference
*/
private async* generateStreamWithHuggingFace(config: AIGenerationConfig): AsyncGenerator<{ text: string; done: boolean }, void, unknown> {
try {
const { model, provider, messages, maxTokens } = config;
// Skip Google provider as it's handled separately
if (provider === 'google') {
throw new Error('Google provider should be handled by generateStreamWithGemini method');
}
if (provider === 'openrouter') {
throw new Error('OpenRouter provider should be handled by generateStreamWithOpenRouter method');
}
const selectedProvider = PROVIDERS[provider as keyof typeof PROVIDERS];
if (!selectedProvider) {
throw new Error(`Provider ${provider} not found in supported providers: ${Object.keys(PROVIDERS).join(', ')}`);
}
const chatCompletion = this.huggingFaceClient.chatCompletionStream({
model,
provider: selectedProvider.id as unknown as Parameters<InferenceClient["chatCompletionStream"]>[0]["provider"],
messages: messages.map(msg => ({
role: msg.role,
content: msg.content
})),
max_tokens: maxTokens || selectedProvider.max_tokens,
temperature: config.temperature || 0.7,
});
while (true) {
const { done, value } = await chatCompletion.next();
if (done) {
yield { text: '', done: true };
break;
}
const chunk = value.choices[0]?.delta?.content;
if (chunk) {
yield { text: chunk, done: false };
}
}
} catch (error) {
console.error('Hugging Face streaming error:', error);
throw new Error(`Hugging Face streaming failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
/**
* Validate if a model is supported by a provider
*/
static isModelSupportedByProvider(model: string, provider: string): boolean {
if (provider === 'google') {
return geminiClient.getAvailableModels().includes(model);
}
if (provider === 'openrouter') {
return openRouterClient.getAvailableModels().includes(model);
}
// For other providers, check the MODELS configuration
const modelConfig = MODELS.find((m: { value: string; providers: string[] }) => m.value === model);
return modelConfig?.providers.includes(provider) || false;
}
/**
* Get the best provider for a model
*/
static getBestProvider(model: string): string {
if (geminiClient.getAvailableModels().includes(model)) {
return 'google';
}
if (openRouterClient.getAvailableModels().includes(model)) {
return 'openrouter';
}
// For other models, return the auto provider
const modelConfig = MODELS.find((m: { value: string; autoProvider?: string }) => m.value === model);
return modelConfig?.autoProvider || 'novita';
}
/**
* Get model capabilities
*/
static getModelCapabilities(model: string): {
maxTokens: number;
features: string[];
provider: string;
} {
if (geminiClient.getAvailableModels().includes(model)) {
const info = geminiClient.getModelInfo(model);
return {
maxTokens: info.maxTokens,
features: info.features,
provider: 'google'
};
}
if (openRouterClient.getAvailableModels().includes(model)) {
const info = openRouterClient.getModelInfo(model);
return {
maxTokens: info.maxTokens,
features: info.features,
provider: 'openrouter'
};
}
// For other models
const modelConfig = MODELS.find((m: { value: string; maxTokens?: number; features?: string[]; autoProvider?: string }) => m.value === model);
const provider = PROVIDERS[modelConfig?.autoProvider as keyof typeof PROVIDERS];
return {
maxTokens: modelConfig?.maxTokens || provider?.max_tokens || 8192,
features: modelConfig?.features || ['text'],
provider: modelConfig?.autoProvider || 'novita'
};
}
/**
* Pro Agent Workflow - نظام الوكيل المحترف
*/
async runProAgentWorkflow(
userPrompt: string,
config: ProAgentConfig,
onStepUpdate?: (step: ProAgentStep) => void,
onThinkingUpdate?: (thinking: string, stepId: string) => void
): Promise<ProAgentWorkflow> {
const workflowId = `workflow-${Date.now()}`;
const startTime = new Date();
// إنشاء خطوات العمل
const steps: ProAgentStep[] = [
{
id: 'design-expert',
name: 'خبير التصميم',
description: 'تحليل المتطلبات وإنشاء مواصفات التصميم',
status: 'pending'
},
{
id: 'animation-expert',
name: 'خبير الأنيميشن',
description: 'تصميم الأنيميشنات والتفاعلات',
status: 'pending'
},
{
id: 'integration-expert',
name: 'خبير التكامل',
description: 'دمج وتحسين جميع المواصفات',
status: 'pending'
},
{
id: 'developer',
name: 'المطور',
description: 'تنفيذ المشروع النهائي',
status: 'pending'
}
];
const workflow: ProAgentWorkflow = {
id: workflowId,
userPrompt,
steps,
currentStepIndex: 0,
status: 'running',
createdAt: startTime
};
try {
console.log("🚀 Starting Pro Agent workflow with config:", config);
// الخطوة 1: خبير التصميم
console.log("🎨 Starting design expert step...");
const designResult = await this.executeProAgentStep(
'design-expert',
PRO_AGENT_DESIGN_EXPERT_PROMPT,
userPrompt,
config,
workflow,
onStepUpdate,
onThinkingUpdate
);
console.log("✅ Design expert completed");
// الخطوة 2: خبير الأنيميشن
console.log("✨ Starting animation expert step...");
const animationPrompt = `طلب المستخدم الأصلي: ${userPrompt}\n\nمواصفات التصميم:\n${designResult.result}`;
const animationResult = await this.executeProAgentStep(
'animation-expert',
PRO_AGENT_ANIMATION_EXPERT_PROMPT,
animationPrompt,
config,
workflow,
onStepUpdate,
onThinkingUpdate
);
console.log("✅ Animation expert completed");
// الخطوة 3: خبير التكامل
console.log("🔧 Starting integration expert step...");
const integrationPrompt = `طلب المستخدم الأصلي: ${userPrompt}\n\nمواصفات التصميم:\n${designResult.result}\n\nمواصفات الأنيميشن:\n${animationResult.result}`;
const integrationResult = await this.executeProAgentStep(
'integration-expert',
PRO_AGENT_INTEGRATION_EXPERT_PROMPT,
integrationPrompt,
config,
workflow,
onStepUpdate,
onThinkingUpdate
);
console.log("✅ Integration expert completed");
// الخطوة 4: المطور
console.log("💻 Starting developer step...");
const finalPrompt = `${PRO_AGENT_DEVELOPER_PROMPT}\n\nالمواصفات المحسنة والمتكاملة:\n${integrationResult.result}`;
const developerResult = await this.executeProAgentStep(
'developer',
finalPrompt,
'',
config,
workflow,
onStepUpdate,
onThinkingUpdate
);
console.log("✅ Developer completed");
// إنهاء العمل
workflow.status = 'completed';
workflow.finalResult = developerResult.result;
workflow.completedAt = new Date();
workflow.totalDuration = workflow.completedAt.getTime() - startTime.getTime();
return workflow;
} catch (error) {
console.error("❌ Pro Agent workflow error:", error);
workflow.status = 'error';
workflow.completedAt = new Date();
// تحديث الخطوة الحالية بالخطأ
if (workflow.currentStepIndex < workflow.steps.length) {
workflow.steps[workflow.currentStepIndex].status = 'error';
workflow.steps[workflow.currentStepIndex].error = error instanceof Error ? error.message : 'Unknown error';
workflow.steps[workflow.currentStepIndex].endTime = new Date();
}
throw error;
}
}
/**
* تنفيذ خطوة واحدة من Pro Agent
*/
private async executeProAgentStep(
stepId: string,
systemPrompt: string,
userPrompt: string,
config: ProAgentConfig,
workflow: ProAgentWorkflow,
onStepUpdate?: (step: ProAgentStep) => void,
onThinkingUpdate?: (thinking: string, stepId: string) => void
): Promise<ProAgentStepResult> {
const step = workflow.steps.find(s => s.id === stepId);
if (!step) {
throw new Error(`Step ${stepId} not found`);
}
// بدء الخطوة
step.status = 'running';
step.startTime = new Date();
workflow.currentStepIndex = workflow.steps.findIndex(s => s.id === stepId);
if (onStepUpdate) {
onStepUpdate(step);
}
try {
const messages: AIMessage[] = [
{
role: 'system',
content: systemPrompt
},
{
role: 'user',
content: userPrompt
}
];
// استخدام generateContent العادي مع محاكاة التفكير
console.log(`🧠 Starting step: ${stepId}`);
// محاكاة التفكير للخطوة مع تخصيص حسب نوع الخطوة
if (onThinkingUpdate) {
const thinkingSteps = this.getThinkingStepsForStep(stepId, userPrompt);
for (let i = 0; i < thinkingSteps.length; i++) {
await new Promise(resolve => setTimeout(resolve, 800));
onThinkingUpdate(thinkingSteps.slice(0, i + 1).join('\n'), stepId);
}
}
console.log(`🤖 Calling AI for step ${stepId} with model ${config.model}`);
const response = await this.generateContent({
model: config.model,
provider: config.provider,
messages,
maxTokens: config.maxTokens,
temperature: config.temperature
});
console.log(`✅ AI response received for step ${stepId}, length: ${response.text.length}`);
const completeResponse = response.text;
// إنهاء الخطوة بنجاح
step.status = 'completed';
step.result = completeResponse;
step.endTime = new Date();
step.duration = step.endTime.getTime() - (step.startTime?.getTime() || 0);
if (onStepUpdate) {
onStepUpdate(step);
}
return {
stepId,
success: true,
result: completeResponse
};
} catch (error) {
console.error(`❌ Error in step ${stepId}:`, error);
// إنهاء الخطوة بخطأ
step.status = 'error';
step.error = error instanceof Error ? error.message : 'Unknown error';
step.endTime = new Date();
step.duration = step.endTime.getTime() - (step.startTime?.getTime() || 0);
if (onStepUpdate) {
onStepUpdate(step);
}
throw error;
}
}
/**
* إنشاء خطوات التفكير المخصصة حسب نوع الخطوة والطلب
*/
private getThinkingStepsForStep(stepId: string, userPrompt: string): string[] {
const promptLower = userPrompt.toLowerCase();
switch (stepId) {
case 'design-expert':
if (promptLower.includes('medical') || promptLower.includes('healthcare') || promptLower.includes('r-ray')) {
return [
"تحليل متطلبات المنصة الطبية...",
"دراسة معايير التصميم الطبي والموثوقية...",
"تحديد نظام الألوان المناسب للمجال الطبي (أزرق، أبيض، رمادي)...",
"تصميم واجهة احترافية تناسب الأطباء والمختصين...",
"ضمان سهولة الاستخدام والوضوح في عرض البيانات الطبية...",
"إنهاء مواصفات التصميم الطبي الاحترافي"
];
} else if (promptLower.includes('personal') || promptLower.includes('portfolio')) {
return [
"تحليل متطلبات الموقع الشخصي...",
"تحديد الهوية البصرية المناسبة...",
"اختيار نظام الألوان والخطوط...",
"تصميم تخطيط جذاب ومتميز...",
"إنهاء مواصفات التصميم الشخصي"
];
} else {
return [
"تحليل طبيعة المشروع المطلوب...",
"دراسة أفضل الممارسات في التصميم...",
"تحديد النمط والهوية البصرية المناسبة...",
"تطوير مواصفات التصميم التفصيلية...",
"إنهاء التصميم الاحترافي"
];
}
case 'animation-expert':
return [
"تحليل مواصفات التصميم المقدمة...",
"تحديد الأنيميشنات المناسبة للمشروع...",
"تصميم تفاعلات سلسة ومتطورة...",
"تحسين الأنيميشنات للأداء الأمثل...",
"إنهاء مواصفات الأنيميشن المتقدمة"
];
case 'integration-expert':
return [
"مراجعة جميع المواصفات السابقة...",
"تحليل التوافق بين التصميم والأنيميشن...",
"حل أي تضارب في المتطلبات...",
"تحسين وتكامل جميع العناصر...",
"إنشاء البرومبت النهائي المتكامل"
];
case 'developer':
return [
"تحليل المواصفات النهائية للتطوير...",
"تخطيط هيكل الكود والمكونات...",
"تطوير HTML الدلالي والمنظم...",
"تنفيذ CSS المتقدم والأنيميشنات...",
"إضافة JavaScript للتفاعلات...",
"تحسين الأداء والاستجابة...",
"إنهاء المشروع بأعلى جودة"
];
default:
return [
"بدء تحليل المتطلبات...",
"تطوير الحلول المناسبة...",
"تحسين النتائج...",
"إنهاء المعالجة"
];
}
}
}
// Export singleton instance
export const aiProviderHandler = new AIProviderHandler();