WIP - using deep agent to create dog using workflow
This commit is contained in:
@@ -986,7 +986,7 @@ export class AiAssistantService {
|
||||
].includes(apiName);
|
||||
}
|
||||
|
||||
private async getOpenAiConfig(tenantId: string): Promise<OpenAIConfig | null> {
|
||||
async getOpenAiConfig(tenantId: string): Promise<OpenAIConfig | null> {
|
||||
const resolvedTenantId = await this.tenantDbService.resolveTenantId(tenantId);
|
||||
const centralPrisma = getCentralPrisma();
|
||||
const tenant = await centralPrisma.tenant.findUnique({
|
||||
|
||||
@@ -75,12 +75,20 @@ export const validateGraphDefinition = (
|
||||
}
|
||||
|
||||
const toolRegistry = new ToolRegistry();
|
||||
const allToolNames = toolRegistry.getAllToolNames();
|
||||
|
||||
graph.nodes.forEach((node) => {
|
||||
if (node.type === 'ToolNode') {
|
||||
const toolName = (node.data as { toolName?: string }).toolName;
|
||||
if (!toolName || !toolRegistry.isToolAllowed(tenantId, toolName)) {
|
||||
if (!toolName) {
|
||||
throw new GraphValidationError(
|
||||
`Tool ${toolName ?? 'unknown'} is not allowlisted for tenant.`,
|
||||
`ToolNode ${node.id} missing toolName configuration.`,
|
||||
);
|
||||
}
|
||||
// Validate tool exists in registry (allowlist check happens at runtime)
|
||||
if (!allToolNames.includes(toolName)) {
|
||||
throw new GraphValidationError(
|
||||
`Tool ${toolName} is not registered in the tool registry.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,6 +119,7 @@ export class AiProcessesController {
|
||||
}
|
||||
|
||||
@Post('ai-chat/messages')
|
||||
@Post('ai-processes/chat/messages')
|
||||
async sendChatMessage(
|
||||
@TenantId() tenantId: string,
|
||||
@CurrentUser() user: any,
|
||||
@@ -136,6 +137,7 @@ export class AiProcessesController {
|
||||
}
|
||||
|
||||
@Sse('ai-chat/stream')
|
||||
@Sse('ai-processes/stream')
|
||||
streamChat(@Query('sessionId') sessionId: string) {
|
||||
return this.streamService.getStream(sessionId);
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import { AiProcessesStreamService } from './ai-processes.stream.service';
|
||||
import { AiAssistantService } from '../ai-assistant/ai-assistant.service';
|
||||
import { TenantDatabaseService } from '../tenant/tenant-database.service';
|
||||
import { AiChatMessage, AiChatSession } from '../models/ai-chat.model';
|
||||
import { DeepAgentOrchestrator } from './deep-agent.orchestrator';
|
||||
|
||||
@Injectable()
|
||||
export class AiProcessesOrchestratorService {
|
||||
@@ -27,7 +28,6 @@ export class AiProcessesOrchestratorService {
|
||||
userId: string,
|
||||
) {
|
||||
return AiChatSession.query(knex).insert({
|
||||
tenantId,
|
||||
userId,
|
||||
});
|
||||
}
|
||||
@@ -54,7 +54,7 @@ export class AiProcessesOrchestratorService {
|
||||
? await AiChatSession.query(knex).findById(sessionId)
|
||||
: await this.createSessionWithContext(knex, resolvedTenantId, userId);
|
||||
|
||||
if (!session || session.tenantId !== resolvedTenantId) {
|
||||
if (!session) {
|
||||
throw new Error('Chat session not found.');
|
||||
}
|
||||
|
||||
@@ -72,18 +72,26 @@ export class AiProcessesOrchestratorService {
|
||||
data: { count: processes.length },
|
||||
});
|
||||
|
||||
// If no processes configured, fallback to standard AI assistant
|
||||
if (!processes.length) {
|
||||
const response = await this.aiAssistantService.handleChat(
|
||||
resolvedTenantId,
|
||||
userId,
|
||||
message,
|
||||
history ?? [],
|
||||
(history ?? []) as any,
|
||||
context ?? {},
|
||||
);
|
||||
this.streamService.emit(session.id, {
|
||||
type: 'final',
|
||||
data: { reply: response.reply, action: response.action },
|
||||
});
|
||||
|
||||
await AiChatMessage.query(knex).insert({
|
||||
sessionId: session.id,
|
||||
role: 'assistant',
|
||||
content: response.reply,
|
||||
});
|
||||
|
||||
return {
|
||||
sessionId: session.id,
|
||||
reply: response.reply,
|
||||
@@ -92,29 +100,113 @@ export class AiProcessesOrchestratorService {
|
||||
};
|
||||
}
|
||||
|
||||
const selectedProcess = processId
|
||||
? processes.find((proc) => proc.id === processId)
|
||||
: processes[0];
|
||||
// Get OpenAI credentials from tenant integrations
|
||||
const credentials = await this.aiAssistantService.getOpenAiConfig(resolvedTenantId);
|
||||
if (!credentials?.apiKey) {
|
||||
throw new Error('OpenAI credentials not configured for this tenant');
|
||||
}
|
||||
|
||||
// Create Deep Agent with tenant's credentials
|
||||
const deepAgent = new DeepAgentOrchestrator(credentials.apiKey, credentials.model);
|
||||
|
||||
// Use Deep Agent to select the best process
|
||||
const processInfos = processes.map((p) => ({
|
||||
id: p.id,
|
||||
name: p.name,
|
||||
description: p.description || undefined,
|
||||
}));
|
||||
|
||||
const selection = await deepAgent.selectProcess(
|
||||
message,
|
||||
processInfos,
|
||||
history as any,
|
||||
);
|
||||
|
||||
// If we need more information or no match, respond with question
|
||||
if (selection.action === 'need_more_info' || selection.action === 'no_match') {
|
||||
const reply = selection.question || selection.reasoning ||
|
||||
'I\'m not sure which process to use. Could you provide more details?';
|
||||
|
||||
this.streamService.emit(session.id, {
|
||||
type: 'final',
|
||||
data: { reply, needsMoreInfo: true },
|
||||
});
|
||||
|
||||
await AiChatMessage.query(knex).insert({
|
||||
sessionId: session.id,
|
||||
role: 'assistant',
|
||||
content: reply,
|
||||
});
|
||||
|
||||
return { sessionId: session.id, reply, needsMoreInfo: true };
|
||||
}
|
||||
|
||||
// Process selected - find it and execute
|
||||
const selectedProcess = processes.find((p) => p.id === selection.processId);
|
||||
if (!selectedProcess) {
|
||||
throw new Error('Process not found.');
|
||||
throw new Error('Selected process not found.');
|
||||
}
|
||||
|
||||
this.streamService.emit(session.id, {
|
||||
type: 'process_selected',
|
||||
processId: selectedProcess.id,
|
||||
version: selectedProcess.latestVersion,
|
||||
data: { processName: selectedProcess.name, reasoning: selection.reasoning },
|
||||
});
|
||||
|
||||
// Extract inputs from the message
|
||||
// For now, we'll use a simple approach - just pass the message as input
|
||||
// In a more sophisticated implementation, we'd use the deep agent to extract structured inputs
|
||||
const startMessage = await deepAgent.generateStartMessage(
|
||||
selectedProcess.name,
|
||||
{ message },
|
||||
);
|
||||
|
||||
this.streamService.emit(session.id, {
|
||||
type: 'agent_message',
|
||||
data: { message: startMessage },
|
||||
});
|
||||
|
||||
await AiChatMessage.query(knex).insert({
|
||||
sessionId: session.id,
|
||||
role: 'assistant',
|
||||
content: startMessage,
|
||||
});
|
||||
|
||||
const { run, result } = await this.processesService.createRun(
|
||||
resolvedTenantId,
|
||||
userId,
|
||||
selectedProcess.id,
|
||||
{ message },
|
||||
{ message, context: context || {} },
|
||||
session.id,
|
||||
(payload) => this.streamService.emit(session.id, payload),
|
||||
);
|
||||
|
||||
// Emit final event
|
||||
this.streamService.emit(session.id, {
|
||||
type: 'final',
|
||||
data: {
|
||||
runId: run.id,
|
||||
status: result.status,
|
||||
output: result.output,
|
||||
message: result.status === 'completed'
|
||||
? '✅ Workflow completed successfully!'
|
||||
: result.status === 'error'
|
||||
? `❌ Workflow failed: ${result.error?.message || 'Unknown error'}`
|
||||
: '⏸️ Workflow paused',
|
||||
},
|
||||
});
|
||||
|
||||
await AiChatMessage.query(knex).insert({
|
||||
sessionId: session.id,
|
||||
role: 'assistant',
|
||||
content: result.status === 'completed'
|
||||
? '✅ Workflow completed successfully!'
|
||||
: result.status === 'error'
|
||||
? `❌ Workflow failed: ${result.error?.message || 'Unknown error'}`
|
||||
: '⏸️ Workflow paused',
|
||||
});
|
||||
|
||||
return { sessionId: session.id, runId: run.id, status: result.status };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,13 +86,24 @@ export const runCompiledGraph = async (
|
||||
const argsTemplate = (node.data as { argsTemplate: Record<string, unknown> })
|
||||
.argsTemplate;
|
||||
const resolvedArgs = resolveTemplate(argsTemplate, state);
|
||||
|
||||
// Debug logging
|
||||
console.log(`[ToolNode ${node.id}] Tool: ${toolName}`);
|
||||
console.log(`[ToolNode ${node.id}] State keys:`, Object.keys(state));
|
||||
console.log(`[ToolNode ${node.id}] ArgsTemplate:`, JSON.stringify(argsTemplate));
|
||||
console.log(`[ToolNode ${node.id}] ResolvedArgs:`, JSON.stringify(resolvedArgs));
|
||||
|
||||
const toolResult = await tool(toolContext, {
|
||||
...resolvedArgs,
|
||||
state,
|
||||
});
|
||||
|
||||
console.log(`[ToolNode ${node.id}] ToolResult:`, JSON.stringify(toolResult));
|
||||
|
||||
const outputMapping = (node.data as { outputMapping: Record<string, string> })
|
||||
.outputMapping;
|
||||
Object.entries(outputMapping).forEach(([key, path]) => {
|
||||
console.log(`[ToolNode ${node.id}] Mapping: toolResult['${key}'] = ${toolResult[key]} -> state['${path}']`);
|
||||
state[path] = toolResult[key];
|
||||
});
|
||||
}
|
||||
@@ -203,6 +214,9 @@ const validateNodeOutput = (
|
||||
const ajv = new Ajv({ allErrors: true, strict: false });
|
||||
const validate = ajv.compile(schema);
|
||||
if (!validate(output)) {
|
||||
throw new Error(`LLM output invalid for node ${node.id}.`);
|
||||
const errors = validate.errors?.map(e => `${e.instancePath} ${e.message}`).join(', ');
|
||||
throw new Error(
|
||||
`LLM output invalid for node ${node.id}. Errors: ${errors}. Output: ${JSON.stringify(output)}`
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -15,7 +15,7 @@ const nodeTypes: AiNodeType[] = [
|
||||
'End',
|
||||
];
|
||||
|
||||
export const graphSchema: JSONSchemaType<ProcessGraphDefinition> = {
|
||||
export const graphSchema: any = {
|
||||
type: 'object',
|
||||
required: ['id', 'name', 'nodes', 'edges'],
|
||||
additionalProperties: false,
|
||||
@@ -47,7 +47,7 @@ export const graphSchema: JSONSchemaType<ProcessGraphDefinition> = {
|
||||
target: { type: 'string' },
|
||||
condition: { type: 'object', nullable: true },
|
||||
},
|
||||
} as JSONSchemaType<ProcessGraphEdge>,
|
||||
},
|
||||
processGraphNode: {
|
||||
type: 'object',
|
||||
required: ['id', 'type', 'data'],
|
||||
@@ -67,7 +67,7 @@ export const graphSchema: JSONSchemaType<ProcessGraphDefinition> = {
|
||||
},
|
||||
data: { type: 'object' },
|
||||
},
|
||||
} as JSONSchemaType<ProcessGraphNode>,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import {
|
||||
ProcessGraphDefinition,
|
||||
} from './ai-processes.types';
|
||||
import { ToolRegistry } from './tools/tool-registry';
|
||||
import { demoTools } from './tools/demo-tools';
|
||||
|
||||
@Injectable()
|
||||
export class AiProcessesService {
|
||||
@@ -31,9 +32,8 @@ export class AiProcessesService {
|
||||
const { knex, tenantId: resolvedTenantId } =
|
||||
await this.getTenantContext(tenantId);
|
||||
return AiProcess.query(knex)
|
||||
.where('tenantId', resolvedTenantId)
|
||||
.withGraphFetched('versions')
|
||||
.orderBy('createdAt', 'desc');
|
||||
.orderBy('created_at', 'desc');
|
||||
}
|
||||
|
||||
async getProcess(tenantId: string, processId: string) {
|
||||
@@ -62,21 +62,20 @@ export class AiProcessesService {
|
||||
|
||||
await AiProcess.query(trx).insert({
|
||||
id: processId,
|
||||
tenantId: resolvedTenantId,
|
||||
name,
|
||||
description,
|
||||
latestVersion: 1,
|
||||
createdBy: userId,
|
||||
});
|
||||
|
||||
await AiProcessVersion.query(trx).insert({
|
||||
await trx('ai_process_versions').insert({
|
||||
id: randomUUID(),
|
||||
tenantId: resolvedTenantId,
|
||||
processId,
|
||||
process_id: processId,
|
||||
version: 1,
|
||||
graphJson: graph,
|
||||
compiledJson: compiled,
|
||||
createdBy: userId,
|
||||
graph_json: JSON.stringify(graph),
|
||||
compiled_json: JSON.stringify(compiled),
|
||||
created_by: userId,
|
||||
created_at: new Date(),
|
||||
});
|
||||
|
||||
return AiProcess.query(trx)
|
||||
@@ -95,7 +94,7 @@ export class AiProcessesService {
|
||||
await this.getTenantContext(tenantId);
|
||||
|
||||
const process = await AiProcess.query(knex).findById(processId);
|
||||
if (!process || process.tenantId !== resolvedTenantId) {
|
||||
if (!process) {
|
||||
throw new Error('Process not found.');
|
||||
}
|
||||
|
||||
@@ -111,14 +110,14 @@ export class AiProcessesService {
|
||||
.patch({ latestVersion: nextVersion });
|
||||
|
||||
const versionId = randomUUID();
|
||||
await AiProcessVersion.query(trx).insert({
|
||||
await trx('ai_process_versions').insert({
|
||||
id: versionId,
|
||||
tenantId: resolvedTenantId,
|
||||
processId,
|
||||
process_id: processId,
|
||||
version: nextVersion,
|
||||
graphJson: graph,
|
||||
compiledJson: compiled,
|
||||
createdBy: userId,
|
||||
graph_json: JSON.stringify(graph),
|
||||
compiled_json: JSON.stringify(compiled),
|
||||
created_by: userId,
|
||||
created_at: new Date(),
|
||||
});
|
||||
|
||||
return AiProcessVersion.query(trx).findById(versionId);
|
||||
@@ -129,7 +128,7 @@ export class AiProcessesService {
|
||||
const { knex, tenantId: resolvedTenantId } =
|
||||
await this.getTenantContext(tenantId);
|
||||
return AiProcessVersion.query(knex)
|
||||
.where({ processId, tenantId: resolvedTenantId })
|
||||
.where({ process_id: processId })
|
||||
.orderBy('version', 'desc');
|
||||
}
|
||||
|
||||
@@ -144,12 +143,12 @@ export class AiProcessesService {
|
||||
const { knex, tenantId: resolvedTenantId } =
|
||||
await this.getTenantContext(tenantId);
|
||||
const process = await AiProcess.query(knex).findById(processId);
|
||||
if (!process || process.tenantId !== resolvedTenantId) {
|
||||
if (!process) {
|
||||
throw new Error('Process not found.');
|
||||
}
|
||||
|
||||
const versionRecord = await AiProcessVersion.query(knex).findOne({
|
||||
processId,
|
||||
process_id: processId,
|
||||
version: process.latestVersion,
|
||||
});
|
||||
|
||||
@@ -160,7 +159,6 @@ export class AiProcessesService {
|
||||
const runId = randomUUID();
|
||||
await AiProcessRun.query(knex).insert({
|
||||
id: runId,
|
||||
tenantId: resolvedTenantId,
|
||||
processId,
|
||||
version: versionRecord.version,
|
||||
status: 'running',
|
||||
@@ -174,16 +172,17 @@ export class AiProcessesService {
|
||||
throw new Error('Run not created.');
|
||||
}
|
||||
|
||||
const compiled = versionRecord.compiledJson as CompiledGraph;
|
||||
const toolRegistry = new ToolRegistry();
|
||||
const compiled = versionRecord.compiledJson as unknown as CompiledGraph;
|
||||
const toolRegistry = new ToolRegistry(demoTools);
|
||||
await toolRegistry.loadTenantAllowlist(resolvedTenantId, knex);
|
||||
|
||||
const emitAndAudit = (event: AiProcessEventPayload) => {
|
||||
emitEvent?.(event);
|
||||
void AiAuditEvent.query(knex).insert({
|
||||
id: randomUUID(),
|
||||
tenantId: resolvedTenantId,
|
||||
runId,
|
||||
eventType: event.type,
|
||||
payloadJson: event,
|
||||
payloadJson: event as any,
|
||||
});
|
||||
};
|
||||
const result = await runCompiledGraph(
|
||||
@@ -191,7 +190,7 @@ export class AiProcessesService {
|
||||
compiledGraph: compiled,
|
||||
input,
|
||||
toolRegistry,
|
||||
toolContext: { tenantId: resolvedTenantId, userId },
|
||||
toolContext: { tenantId: resolvedTenantId, userId, knex },
|
||||
onEvent: (event) => emitAndAudit({ ...event, runId, sessionId }),
|
||||
llmDecision: async (node, state) =>
|
||||
this.mockDecision(node.id, state),
|
||||
@@ -215,28 +214,29 @@ export class AiProcessesService {
|
||||
const { knex, tenantId: resolvedTenantId } =
|
||||
await this.getTenantContext(tenantId);
|
||||
const run = await AiProcessRun.query(knex).findById(runId);
|
||||
if (!run || run.tenantId !== resolvedTenantId) {
|
||||
if (!run) {
|
||||
throw new Error('Run not found.');
|
||||
}
|
||||
const versionRecord = await AiProcessVersion.query(knex).findOne({
|
||||
processId: run.processId,
|
||||
process_id: run.processId,
|
||||
version: run.version,
|
||||
});
|
||||
if (!versionRecord) {
|
||||
throw new Error('Process version not found.');
|
||||
}
|
||||
|
||||
const compiled = versionRecord.compiledJson as CompiledGraph;
|
||||
const toolRegistry = new ToolRegistry();
|
||||
const compiled = versionRecord.compiledJson as unknown as CompiledGraph;
|
||||
const toolRegistry = new ToolRegistry(demoTools);
|
||||
await toolRegistry.loadTenantAllowlist(resolvedTenantId, knex);
|
||||
|
||||
const mergedState = { ...(run.stateJson || {}), ...input };
|
||||
const emitAndAudit = (event: AiProcessEventPayload) => {
|
||||
emitEvent?.(event);
|
||||
void AiAuditEvent.query(knex).insert({
|
||||
id: randomUUID(),
|
||||
tenantId: resolvedTenantId,
|
||||
runId: run.id,
|
||||
eventType: event.type,
|
||||
payloadJson: event,
|
||||
payloadJson: event as any,
|
||||
});
|
||||
};
|
||||
|
||||
@@ -245,7 +245,7 @@ export class AiProcessesService {
|
||||
compiledGraph: compiled,
|
||||
input: mergedState,
|
||||
toolRegistry,
|
||||
toolContext: { tenantId: resolvedTenantId, userId },
|
||||
toolContext: { tenantId: resolvedTenantId, userId, knex },
|
||||
onEvent: (event) =>
|
||||
emitAndAudit({ ...event, runId: run.id, sessionId }),
|
||||
llmDecision: async (node, state) =>
|
||||
@@ -279,6 +279,30 @@ export class AiProcessesService {
|
||||
nodeId: string,
|
||||
state: Record<string, unknown>,
|
||||
) {
|
||||
if (nodeId === 'extract_info') {
|
||||
// Extract pet registration info from the message
|
||||
const message = (state.message as string) || '';
|
||||
|
||||
// Simple extraction (in production, this would use an LLM)
|
||||
const petNameMatch = message.match(/(?:dog|cat|pet)\s+named\s+(\w+)/i);
|
||||
const petTypeMatch = message.match(/(dog|cat)/i);
|
||||
const ownerNameMatch = message.match(/owned\s+by\s+([\w\s]+?)(?:\s*\(|$)/i);
|
||||
const emailMatch = message.match(/\(?([\w\.-]+@[\w\.-]+\.\w+)\)?/i);
|
||||
|
||||
const ownerName = ownerNameMatch?.[1]?.trim() || 'Unknown Owner';
|
||||
const nameParts = ownerName.split(/\s+/);
|
||||
const firstName = nameParts[0] || 'Unknown';
|
||||
const lastName = nameParts.slice(1).join(' ') || 'Owner';
|
||||
|
||||
return {
|
||||
petName: petNameMatch?.[1] || 'Unknown Pet',
|
||||
species: petTypeMatch?.[1]?.toLowerCase() || 'dog',
|
||||
ownerFirstName: firstName,
|
||||
ownerLastName: lastName,
|
||||
ownerEmail: emailMatch?.[1] || null,
|
||||
accountName: `${firstName} ${lastName}`,
|
||||
};
|
||||
}
|
||||
if (nodeId === 'decide_account') {
|
||||
const accountName = (state.accountName as string) ?? 'New Account';
|
||||
const accountAction = state.accountId ? 'find' : 'create';
|
||||
|
||||
@@ -94,6 +94,7 @@ export type AiProcessEventType =
|
||||
| 'agent_started'
|
||||
| 'processes_listed'
|
||||
| 'process_selected'
|
||||
| 'agent_message'
|
||||
| 'node_started'
|
||||
| 'tool_called'
|
||||
| 'node_completed'
|
||||
|
||||
202
backend/src/ai-processes/deep-agent.orchestrator.ts
Normal file
202
backend/src/ai-processes/deep-agent.orchestrator.ts
Normal file
@@ -0,0 +1,202 @@
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { JsonOutputParser } from '@langchain/core/output_parsers';
|
||||
import { SystemMessage, HumanMessage } from '@langchain/core/messages';
|
||||
|
||||
export interface ProcessInfo {
|
||||
id: string;
|
||||
name: string;
|
||||
description?: string;
|
||||
}
|
||||
|
||||
export interface ProcessSelectionResult {
|
||||
action: 'select_process' | 'need_more_info' | 'no_match';
|
||||
processId?: string;
|
||||
question?: string;
|
||||
reasoning?: string;
|
||||
}
|
||||
|
||||
export interface InputExtractionResult {
|
||||
hasAllInputs: boolean;
|
||||
extractedInputs: Record<string, unknown>;
|
||||
missingFields?: string[];
|
||||
question?: string;
|
||||
}
|
||||
|
||||
export class DeepAgentOrchestrator {
|
||||
private model: ChatOpenAI;
|
||||
|
||||
constructor(
|
||||
apiKey: string,
|
||||
modelName: string = 'gpt-4o',
|
||||
temperature: number = 0,
|
||||
) {
|
||||
this.model = new ChatOpenAI({
|
||||
apiKey,
|
||||
modelName,
|
||||
temperature,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Step 1: Select the best matching process from available processes
|
||||
*/
|
||||
async selectProcess(
|
||||
userMessage: string,
|
||||
availableProcesses: ProcessInfo[],
|
||||
conversationHistory?: { role: string; text: string }[],
|
||||
): Promise<ProcessSelectionResult> {
|
||||
const processList = availableProcesses
|
||||
.map((p) => `- ${p.name} (ID: ${p.id}): ${p.description || 'No description'}`)
|
||||
.join('\n');
|
||||
|
||||
const historyContext =
|
||||
conversationHistory && conversationHistory.length > 0
|
||||
? `\n\nConversation history:\n${conversationHistory
|
||||
.map((msg) => `${msg.role}: ${msg.text}`)
|
||||
.join('\n')}`
|
||||
: '';
|
||||
|
||||
const systemPrompt = `You are an intelligent process orchestrator. Your task is to select the most appropriate business process based on the user's request.
|
||||
|
||||
Available processes:
|
||||
${processList}
|
||||
|
||||
Rules:
|
||||
1. Select exactly ONE process that best matches the user's intent
|
||||
2. If the request is ambiguous or matches multiple processes, ask for clarification
|
||||
3. If no process matches, indicate no match
|
||||
4. Always provide reasoning for your decision
|
||||
|
||||
Respond with JSON:
|
||||
{
|
||||
"action": "select_process" | "need_more_info" | "no_match",
|
||||
"processId": "selected process ID or null",
|
||||
"question": "clarifying question if needed",
|
||||
"reasoning": "brief explanation of decision"
|
||||
}`;
|
||||
|
||||
const userPrompt = `User request: ${userMessage}${historyContext}`;
|
||||
|
||||
try {
|
||||
const response = await this.model.invoke([
|
||||
new SystemMessage(systemPrompt),
|
||||
new HumanMessage(userPrompt),
|
||||
]);
|
||||
|
||||
const parser = new JsonOutputParser<ProcessSelectionResult>();
|
||||
const content = response.content as string;
|
||||
const jsonMatch = content.match(/\{[\s\S]*\}/);
|
||||
|
||||
if (jsonMatch) {
|
||||
return await parser.parse(jsonMatch[0]);
|
||||
}
|
||||
|
||||
return {
|
||||
action: 'no_match',
|
||||
reasoning: 'Failed to parse LLM response',
|
||||
};
|
||||
} catch (error: any) {
|
||||
console.error('Process selection error:', error);
|
||||
return {
|
||||
action: 'no_match',
|
||||
reasoning: `Error: ${error.message}`,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Step 2: Extract required inputs from user message
|
||||
*/
|
||||
async extractInputs(
|
||||
userMessage: string,
|
||||
requiredFields: { name: string; description: string; required: boolean }[],
|
||||
conversationHistory?: { role: string; text: string }[],
|
||||
context?: Record<string, unknown>,
|
||||
): Promise<InputExtractionResult> {
|
||||
const fieldsList = requiredFields
|
||||
.map((f) => `- ${f.name} (${f.required ? 'required' : 'optional'}): ${f.description}`)
|
||||
.join('\n');
|
||||
|
||||
const historyContext =
|
||||
conversationHistory && conversationHistory.length > 0
|
||||
? `\n\nConversation history:\n${conversationHistory
|
||||
.map((msg) => `${msg.role}: ${msg.text}`)
|
||||
.join('\n')}`
|
||||
: '';
|
||||
|
||||
const contextInfo = context ? `\n\nAvailable context: ${JSON.stringify(context)}` : '';
|
||||
|
||||
const systemPrompt = `You are an input extraction assistant. Extract structured data from the user's message and conversation history.
|
||||
|
||||
Required fields for this process:
|
||||
${fieldsList}${contextInfo}
|
||||
|
||||
Rules:
|
||||
1. Extract as many fields as possible from the message and context
|
||||
2. Only mark hasAllInputs=true if ALL required fields are present
|
||||
3. If required fields are missing, generate a natural question to ask the user
|
||||
4. Use context data when available (e.g., current page context)
|
||||
|
||||
Respond with JSON:
|
||||
{
|
||||
"hasAllInputs": true | false,
|
||||
"extractedInputs": { "field1": "value1", ... },
|
||||
"missingFields": ["field1", "field2"] or undefined,
|
||||
"question": "natural language question" or undefined
|
||||
}`;
|
||||
|
||||
const userPrompt = `User message: ${userMessage}${historyContext}`;
|
||||
|
||||
try {
|
||||
const response = await this.model.invoke([
|
||||
new SystemMessage(systemPrompt),
|
||||
new HumanMessage(userPrompt),
|
||||
]);
|
||||
|
||||
const parser = new JsonOutputParser<InputExtractionResult>();
|
||||
const content = response.content as string;
|
||||
const jsonMatch = content.match(/\{[\s\S]*\}/);
|
||||
|
||||
if (jsonMatch) {
|
||||
return await parser.parse(jsonMatch[0]);
|
||||
}
|
||||
|
||||
return {
|
||||
hasAllInputs: false,
|
||||
extractedInputs: {},
|
||||
missingFields: requiredFields.filter((f) => f.required).map((f) => f.name),
|
||||
question: 'I need more information to proceed. Could you provide additional details?',
|
||||
};
|
||||
} catch (error: any) {
|
||||
console.error('Input extraction error:', error);
|
||||
return {
|
||||
hasAllInputs: false,
|
||||
extractedInputs: {},
|
||||
question: 'I encountered an error processing your request. Please try again.',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Step 3: Generate a friendly response explaining what will happen
|
||||
*/
|
||||
async generateStartMessage(
|
||||
processName: string,
|
||||
extractedInputs: Record<string, unknown>,
|
||||
): Promise<string> {
|
||||
const systemPrompt = `You are a friendly assistant explaining what process will be executed. Be concise and clear.`;
|
||||
|
||||
const userPrompt = `Generate a brief message (1-2 sentences) confirming that you will execute the "${processName}" process with these inputs: ${JSON.stringify(extractedInputs)}`;
|
||||
|
||||
try {
|
||||
const response = await this.model.invoke([
|
||||
new SystemMessage(systemPrompt),
|
||||
new HumanMessage(userPrompt),
|
||||
]);
|
||||
|
||||
return (response.content as string).trim();
|
||||
} catch (error) {
|
||||
return `I'll execute the ${processName} process with your provided information.`;
|
||||
}
|
||||
}
|
||||
}
|
||||
226
backend/src/ai-processes/tools/demo-tools.ts
Normal file
226
backend/src/ai-processes/tools/demo-tools.ts
Normal file
@@ -0,0 +1,226 @@
|
||||
import { ToolContext, ToolHandler } from './tool-registry';
|
||||
import { Account } from '../../models/account.model';
|
||||
import { Contact } from '../../models/contact.model';
|
||||
import { randomUUID } from 'crypto';
|
||||
|
||||
/**
|
||||
* Demo tools that wrap ObjectService operations
|
||||
* These tools provide structured access to CRM entities
|
||||
*/
|
||||
|
||||
export const findAccount: ToolHandler = async (ctx, args) => {
|
||||
if (!ctx.knex) {
|
||||
throw new Error('Knex connection required for findAccount');
|
||||
}
|
||||
|
||||
const { name } = args as { name?: string };
|
||||
|
||||
if (!name) {
|
||||
return { found: false, accountId: null, message: 'Name required' };
|
||||
}
|
||||
|
||||
try {
|
||||
const query = Account.query(ctx.knex).where('name', 'like', `%${name}%`);
|
||||
|
||||
const account = await query.first();
|
||||
|
||||
if (account) {
|
||||
return {
|
||||
found: true,
|
||||
accountId: account.id,
|
||||
account: {
|
||||
id: account.id,
|
||||
name: account.name,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return { found: false, accountId: null };
|
||||
} catch (error: any) {
|
||||
return { found: false, error: error.message };
|
||||
}
|
||||
};
|
||||
|
||||
export const createAccount: ToolHandler = async (ctx, args) => {
|
||||
if (!ctx.knex) {
|
||||
throw new Error('Knex connection required for createAccount');
|
||||
}
|
||||
|
||||
const { name, email, phone, industry } = args as {
|
||||
name: string;
|
||||
email?: string;
|
||||
phone?: string;
|
||||
industry?: string;
|
||||
};
|
||||
|
||||
if (!name) {
|
||||
throw new Error('Account name is required');
|
||||
}
|
||||
|
||||
try {
|
||||
const accountId = randomUUID();
|
||||
await ctx.knex('accounts').insert({
|
||||
id: accountId,
|
||||
name,
|
||||
phone,
|
||||
industry,
|
||||
ownerId: ctx.userId,
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
accountId,
|
||||
account: {
|
||||
id: accountId,
|
||||
name,
|
||||
},
|
||||
};
|
||||
} catch (error: any) {
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
};
|
||||
|
||||
export const findContact: ToolHandler = async (ctx, args) => {
|
||||
if (!ctx.knex) {
|
||||
throw new Error('Knex connection required for findContact');
|
||||
}
|
||||
|
||||
const { firstName, lastName, accountId } = args as {
|
||||
firstName?: string;
|
||||
lastName?: string;
|
||||
accountId?: string;
|
||||
};
|
||||
|
||||
if (!firstName && !lastName) {
|
||||
return {
|
||||
found: false,
|
||||
contactId: null,
|
||||
message: 'First name or last name required',
|
||||
};
|
||||
}
|
||||
|
||||
try {
|
||||
let query = Contact.query(ctx.knex);
|
||||
|
||||
if (firstName) {
|
||||
query = query.where('firstName', 'like', `%${firstName}%`);
|
||||
}
|
||||
if (lastName) {
|
||||
query = query.where('lastName', 'like', `%${lastName}%`);
|
||||
}
|
||||
if (accountId) {
|
||||
query = query.where('accountId', accountId);
|
||||
}
|
||||
|
||||
const contact = await query.first();
|
||||
|
||||
if (contact) {
|
||||
return {
|
||||
found: true,
|
||||
contactId: contact.id,
|
||||
contact: {
|
||||
id: contact.id,
|
||||
firstName: contact.firstName,
|
||||
lastName: contact.lastName,
|
||||
accountId: contact.accountId,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
return { found: false, contactId: null };
|
||||
} catch (error: any) {
|
||||
return { found: false, error: error.message };
|
||||
}
|
||||
};
|
||||
|
||||
export const createContact: ToolHandler = async (ctx, args) => {
|
||||
if (!ctx.knex) {
|
||||
throw new Error('Knex connection required for createContact');
|
||||
}
|
||||
|
||||
const { firstName, lastName, email, phone, accountId } = args as {
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
email?: string;
|
||||
phone?: string;
|
||||
accountId?: string;
|
||||
};
|
||||
|
||||
if (!firstName || !lastName) {
|
||||
throw new Error('First name and last name are required');
|
||||
}
|
||||
|
||||
try {
|
||||
const contactId = randomUUID();
|
||||
await ctx.knex('contacts').insert({
|
||||
id: contactId,
|
||||
firstName,
|
||||
lastName,
|
||||
accountId,
|
||||
ownerId: ctx.userId,
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
contactId,
|
||||
contact: {
|
||||
id: contactId,
|
||||
firstName,
|
||||
lastName,
|
||||
accountId,
|
||||
},
|
||||
};
|
||||
} catch (error: any) {
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
};
|
||||
|
||||
export const createPet: ToolHandler = async (ctx, args) => {
|
||||
if (!ctx.knex) {
|
||||
throw new Error('Knex connection required for createPet');
|
||||
}
|
||||
|
||||
const { name, species, breed, age, ownerId } = args as {
|
||||
name: string;
|
||||
species: string;
|
||||
breed?: string;
|
||||
age?: number;
|
||||
ownerId: string; // Contact ID
|
||||
};
|
||||
|
||||
if (!name || !ownerId) {
|
||||
throw new Error('Pet name and owner (contact) are required');
|
||||
}
|
||||
|
||||
try {
|
||||
const petId = randomUUID();
|
||||
|
||||
// Get the accountId from the contact
|
||||
const contact = await ctx.knex('contacts').where('id', ownerId).first();
|
||||
|
||||
// Insert into dogs table
|
||||
await ctx.knex('dogs').insert({
|
||||
id: petId,
|
||||
name,
|
||||
ownerId,
|
||||
accountId: contact?.accountId,
|
||||
});
|
||||
|
||||
return {
|
||||
success: true,
|
||||
petId,
|
||||
pet: { id: petId, name, ownerId, accountId: contact?.accountId },
|
||||
};
|
||||
} catch (error: any) {
|
||||
return { success: false, error: error.message };
|
||||
}
|
||||
};
|
||||
|
||||
// Export all demo tools
|
||||
export const demoTools = {
|
||||
findAccount,
|
||||
createAccount,
|
||||
findContact,
|
||||
createContact,
|
||||
createPet,
|
||||
};
|
||||
@@ -1,6 +1,10 @@
|
||||
import { Knex } from 'knex';
|
||||
import { AiToolConfig } from '../../models/ai-process.model';
|
||||
|
||||
export interface ToolContext {
|
||||
tenantId: string;
|
||||
userId: string;
|
||||
knex?: Knex;
|
||||
authScopes?: string[];
|
||||
}
|
||||
|
||||
@@ -9,6 +13,13 @@ export type ToolHandler = (
|
||||
args: Record<string, unknown>,
|
||||
) => Promise<Record<string, unknown>>;
|
||||
|
||||
export interface ToolDefinition {
|
||||
name: string;
|
||||
description: string;
|
||||
handler: ToolHandler;
|
||||
inputSchema?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
const defaultTools: Record<string, ToolHandler> = {
|
||||
findAccount: async () => ({ accountId: null, found: false }),
|
||||
createAccount: async (_ctx, args) => ({ accountId: `acc_${Date.now()}`, args }),
|
||||
@@ -24,6 +35,7 @@ const tenantAllowlist: Record<string, string[]> = {
|
||||
export class ToolRegistry {
|
||||
private tools: Record<string, ToolHandler>;
|
||||
private allowlist: Record<string, string[]>;
|
||||
private dbAllowlistCache: Map<string, Set<string>> = new Map();
|
||||
|
||||
constructor(
|
||||
tools: Record<string, ToolHandler> = defaultTools,
|
||||
@@ -33,7 +45,32 @@ export class ToolRegistry {
|
||||
this.allowlist = allowlist;
|
||||
}
|
||||
|
||||
isToolAllowed(tenantId: string, toolName: string) {
|
||||
registerTool(name: string, handler: ToolHandler) {
|
||||
this.tools[name] = handler;
|
||||
}
|
||||
|
||||
async loadTenantAllowlist(tenantId: string, knex: Knex) {
|
||||
const configs = await AiToolConfig.query(knex)
|
||||
.where('enabled', true);
|
||||
|
||||
const allowed = new Set(configs.map((c) => c.toolName));
|
||||
this.dbAllowlistCache.set(tenantId, allowed);
|
||||
return allowed;
|
||||
}
|
||||
|
||||
async isToolAllowed(tenantId: string, toolName: string, knex?: Knex) {
|
||||
// Check database cache first
|
||||
if (this.dbAllowlistCache.has(tenantId)) {
|
||||
return this.dbAllowlistCache.get(tenantId)!.has(toolName);
|
||||
}
|
||||
|
||||
// Load from database if knex provided
|
||||
if (knex) {
|
||||
const allowed = await this.loadTenantAllowlist(tenantId, knex);
|
||||
return allowed.has(toolName);
|
||||
}
|
||||
|
||||
// Fallback to static allowlist
|
||||
const allowed = this.allowlist[tenantId] || this.allowlist.default || [];
|
||||
return allowed.includes(toolName);
|
||||
}
|
||||
@@ -45,4 +82,8 @@ export class ToolRegistry {
|
||||
}
|
||||
return tool;
|
||||
}
|
||||
|
||||
getAllToolNames(): string[] {
|
||||
return Object.keys(this.tools);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ export class AiChatSession extends BaseModel {
|
||||
static columnNameMappers = snakeCaseMappers();
|
||||
|
||||
id!: string;
|
||||
tenantId!: string;
|
||||
userId!: string;
|
||||
createdAt!: Date;
|
||||
|
||||
@@ -25,7 +24,7 @@ export class AiChatSession extends BaseModel {
|
||||
modelClass: AiChatMessage,
|
||||
join: {
|
||||
from: 'ai_chat_sessions.id',
|
||||
to: 'ai_chat_messages.sessionId',
|
||||
to: 'ai_chat_messages.session_id',
|
||||
},
|
||||
},
|
||||
};
|
||||
@@ -55,7 +54,7 @@ export class AiChatMessage extends BaseModel {
|
||||
relation: BaseModel.BelongsToOneRelation,
|
||||
modelClass: AiChatSession,
|
||||
join: {
|
||||
from: 'ai_chat_messages.sessionId',
|
||||
from: 'ai_chat_messages.session_id',
|
||||
to: 'ai_chat_sessions.id',
|
||||
},
|
||||
},
|
||||
|
||||
@@ -7,7 +7,6 @@ export class AiProcess extends BaseModel {
|
||||
static columnNameMappers = snakeCaseMappers();
|
||||
|
||||
id!: string;
|
||||
tenantId!: string;
|
||||
name!: string;
|
||||
description?: string;
|
||||
latestVersion!: number;
|
||||
@@ -27,7 +26,7 @@ export class AiProcess extends BaseModel {
|
||||
modelClass: AiProcessVersion,
|
||||
join: {
|
||||
from: 'ai_processes.id',
|
||||
to: 'ai_process_versions.processId',
|
||||
to: 'ai_process_versions.process_id',
|
||||
},
|
||||
},
|
||||
runs: {
|
||||
@@ -35,7 +34,7 @@ export class AiProcess extends BaseModel {
|
||||
modelClass: AiProcessRun,
|
||||
join: {
|
||||
from: 'ai_processes.id',
|
||||
to: 'ai_process_runs.processId',
|
||||
to: 'ai_process_runs.process_id',
|
||||
},
|
||||
},
|
||||
};
|
||||
@@ -45,9 +44,9 @@ export class AiProcess extends BaseModel {
|
||||
export class AiProcessVersion extends BaseModel {
|
||||
static tableName = 'ai_process_versions';
|
||||
static columnNameMappers = snakeCaseMappers();
|
||||
static jsonAttributes = ['graphJson', 'compiledJson'];
|
||||
|
||||
id!: string;
|
||||
tenantId!: string;
|
||||
processId!: string;
|
||||
version!: number;
|
||||
graphJson!: Record<string, unknown>;
|
||||
@@ -68,7 +67,7 @@ export class AiProcessVersion extends BaseModel {
|
||||
relation: BaseModel.BelongsToOneRelation,
|
||||
modelClass: AiProcess,
|
||||
join: {
|
||||
from: 'ai_process_versions.processId',
|
||||
from: 'ai_process_versions.process_id',
|
||||
to: 'ai_processes.id',
|
||||
},
|
||||
},
|
||||
@@ -79,9 +78,9 @@ export class AiProcessVersion extends BaseModel {
|
||||
export class AiProcessRun extends BaseModel {
|
||||
static tableName = 'ai_process_runs';
|
||||
static columnNameMappers = snakeCaseMappers();
|
||||
static jsonAttributes = ['inputJson', 'outputJson', 'errorJson', 'stateJson'];
|
||||
|
||||
id!: string;
|
||||
tenantId!: string;
|
||||
processId!: string;
|
||||
version!: number;
|
||||
status!: string;
|
||||
@@ -106,7 +105,7 @@ export class AiProcessRun extends BaseModel {
|
||||
relation: BaseModel.BelongsToOneRelation,
|
||||
modelClass: AiProcess,
|
||||
join: {
|
||||
from: 'ai_process_runs.processId',
|
||||
from: 'ai_process_runs.process_id',
|
||||
to: 'ai_processes.id',
|
||||
},
|
||||
},
|
||||
@@ -117,9 +116,9 @@ export class AiProcessRun extends BaseModel {
|
||||
export class AiAuditEvent extends BaseModel {
|
||||
static tableName = 'ai_audit_events';
|
||||
static columnNameMappers = snakeCaseMappers();
|
||||
static jsonAttributes = ['payloadJson'];
|
||||
|
||||
id!: string;
|
||||
tenantId!: string;
|
||||
runId!: string;
|
||||
eventType!: string;
|
||||
payloadJson!: Record<string, unknown>;
|
||||
@@ -138,10 +137,28 @@ export class AiAuditEvent extends BaseModel {
|
||||
relation: BaseModel.BelongsToOneRelation,
|
||||
modelClass: AiProcessRun,
|
||||
join: {
|
||||
from: 'ai_audit_events.runId',
|
||||
from: 'ai_audit_events.run_id',
|
||||
to: 'ai_process_runs.id',
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export class AiToolConfig extends BaseModel {
|
||||
static tableName = 'ai_tool_configs';
|
||||
static columnNameMappers = snakeCaseMappers();
|
||||
static jsonAttributes = ['configJson'];
|
||||
|
||||
id!: string;
|
||||
toolName!: string;
|
||||
enabled!: boolean;
|
||||
configJson?: Record<string, unknown>;
|
||||
createdAt!: Date;
|
||||
updatedAt!: Date;
|
||||
|
||||
$beforeInsert(queryContext: QueryContext) {
|
||||
this.id = this.id || randomUUID();
|
||||
super.$beforeInsert(queryContext);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,8 +110,9 @@ export class TenantDatabaseService {
|
||||
* @deprecated Use getTenantKnexByDomain or getTenantKnexById instead
|
||||
*/
|
||||
async getTenantKnex(tenantIdOrSlug: string): Promise<Knex> {
|
||||
// Assume it's a domain if it contains a dot
|
||||
return this.getTenantKnexByDomain(tenantIdOrSlug);
|
||||
// Resolve tenant ID first, then get connection by ID
|
||||
const tenantId = await this.resolveTenantId(tenantIdOrSlug);
|
||||
return this.getTenantKnexById(tenantId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user