feat: add chat functionality

This commit is contained in:
Manoj K 2025-07-08 08:26:06 +05:30
parent fa8d2064e1
commit 293927fa06
18 changed files with 2737 additions and 160 deletions

View File

@ -0,0 +1,579 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { ActionStatusEnum, LLMMappings } from "@core/types";
import { logger } from "@trigger.dev/sdk/v3";
import {
type CoreMessage,
type DataContent,
jsonSchema,
tool,
type ToolSet,
} from "ai";
import axios from "axios";
import Handlebars from "handlebars";
import { REACT_SYSTEM_PROMPT, REACT_USER_PROMPT } from "./prompt";
import { generate, processTag } from "./stream-utils";
import { type AgentMessage, AgentMessageType, Message } from "./types";
import { type MCP } from "../utils/mcp";
import {
type ExecutionState,
type HistoryStep,
type Resource,
type TotalCost,
} from "../utils/types";
import { flattenObject } from "../utils/utils";
import { searchMemory, addMemory } from "./memory-utils";
interface LLMOutputInterface {
response: AsyncGenerator<
| string
| {
type: string;
toolName: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
args?: any;
toolCallId?: string;
message?: string;
},
any,
any
>;
}
const progressUpdateTool = tool({
description:
"Send a progress update to the user about what has been discovered or will be done next in a crisp and user friendly way no technical terms",
parameters: jsonSchema({
type: "object",
properties: {
message: {
type: "string",
description: "The progress update message to send to the user",
},
},
required: ["message"],
additionalProperties: false,
}),
});
const searchMemoryTool = tool({
description:
"Search the user's memory graph for episodes or statements based on a query",
parameters: jsonSchema({
type: "object",
properties: {
query: {
type: "string",
description: "The search query to find relevant information in memory",
},
spaceId: {
type: "string",
description: "Optional space ID to search within a specific space",
},
sessionId: {
type: "string",
description: "Optional session ID to search within a specific session",
},
},
required: ["query"],
additionalProperties: false,
}),
});
const addMemoryTool = tool({
description: "Add information to the user's memory graph",
parameters: jsonSchema({
type: "object",
properties: {
episodeBody: {
type: "string",
description: "The content/text to add to memory",
},
referenceTime: {
type: "string",
description:
"ISO 8601 timestamp for when this information is relevant (defaults to current time)",
},
source: {
type: "string",
description:
"Source of the information (e.g., 'user', 'chat', 'system')",
},
spaceId: {
type: "string",
description: "Optional space ID to add memory to a specific space",
},
sessionId: {
type: "string",
description: "Optional session ID to associate with a specific session",
},
metadata: {
type: "object",
description: "Optional metadata object for additional context",
},
},
required: ["episodeBody"],
additionalProperties: false,
}),
});
const internalTools = [
"core--progress_update",
"core--search_memory",
"core--add_memory",
];
async function addResources(messages: CoreMessage[], resources: Resource[]) {
const resourcePromises = resources.map(async (resource) => {
// Remove everything before "/api" in the publicURL
if (resource.publicURL) {
const apiIndex = resource.publicURL.indexOf("/api");
if (apiIndex !== -1) {
resource.publicURL = resource.publicURL.substring(apiIndex);
}
}
const response = await axios.get(resource.publicURL, {
responseType: "arraybuffer",
});
if (resource.fileType.startsWith("image/")) {
return {
type: "image",
image: response.data as DataContent,
};
}
return {
type: "file",
data: response.data as DataContent,
mimeType: resource.fileType,
};
});
const content = await Promise.all(resourcePromises);
return [...messages, { role: "user", content } as CoreMessage];
}
function toolToMessage(history: HistoryStep[], messages: CoreMessage[]) {
for (let i = 0; i < history.length; i++) {
const step = history[i];
// Add assistant message with tool calls
if (step.observation && step.skillId) {
messages.push({
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: step.skillId,
toolName: step.skill ?? "",
args:
typeof step.skillInput === "string"
? JSON.parse(step.skillInput)
: step.skillInput,
},
],
});
messages.push({
role: "tool",
content: [
{
type: "tool-result",
toolName: step.skill,
toolCallId: step.skillId,
result: step.observation,
isError: step.isError,
},
],
} as any);
}
// Handle format correction steps (observation exists but no skillId)
else if (step.observation && !step.skillId) {
// Add as a system message for format correction
messages.push({
role: "system",
content: step.observation,
});
}
}
return messages;
}
async function makeNextCall(
executionState: ExecutionState,
TOOLS: ToolSet,
totalCost: TotalCost,
guardLoop: number,
): Promise<LLMOutputInterface> {
const { context, history, previousHistory } = executionState;
const promptInfo = {
USER_MESSAGE: executionState.query,
CONTEXT: context,
USER_MEMORY: executionState.userMemoryContext,
};
let messages: CoreMessage[] = [];
const systemTemplateHandler = Handlebars.compile(REACT_SYSTEM_PROMPT);
let systemPrompt = systemTemplateHandler(promptInfo);
const userTemplateHandler = Handlebars.compile(REACT_USER_PROMPT);
const userPrompt = userTemplateHandler(promptInfo);
// Always start with a system message (this does use tokens but keeps the instructions clear)
messages.push({ role: "system", content: systemPrompt });
// For subsequent queries, include only final responses from previous exchanges if available
if (previousHistory && previousHistory.length > 0) {
messages = [...messages, ...previousHistory];
}
// Add the current user query (much simpler than the full prompt)
messages.push({ role: "user", content: userPrompt });
// Include any steps from the current interaction
if (history.length > 0) {
messages = toolToMessage(history, messages);
}
if (executionState.resources && executionState.resources.length > 0) {
messages = await addResources(messages, executionState.resources);
}
// Get the next action from the LLM
const response = generate(
messages,
guardLoop > 0 && guardLoop % 3 === 0,
(event) => {
const usage = event.usage;
totalCost.inputTokens += usage.promptTokens;
totalCost.outputTokens += usage.completionTokens;
},
TOOLS,
);
return { response };
}
export async function* run(
message: string,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
context: Record<string, any>,
previousHistory: CoreMessage[],
mcp: MCP,
stepHistory: HistoryStep[],
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): AsyncGenerator<AgentMessage, any, any> {
let guardLoop = 0;
let tools = {
"core--progress_update": progressUpdateTool,
"core--search_memory": searchMemoryTool,
"core--add_memory": addMemoryTool,
};
logger.info("Tools have been formed");
let contextText = "";
let resources = [];
if (context) {
// Extract resources and remove from context
resources = context.resources || [];
delete context.resources;
// Process remaining context
contextText = flattenObject(context).join("\n");
}
const executionState: ExecutionState = {
query: message,
context: contextText,
resources,
previousHistory,
history: stepHistory, // Track the full ReAct history
completed: false,
};
const totalCost: TotalCost = { inputTokens: 0, outputTokens: 0, cost: 0 };
try {
while (!executionState.completed && guardLoop < 50) {
logger.info(`Starting the loop: ${guardLoop}`);
const { response: llmResponse } = await makeNextCall(
executionState,
tools,
totalCost,
guardLoop,
);
let toolCallInfo;
const messageState = {
inTag: false,
message: "",
messageEnded: false,
lastSent: "",
};
const questionState = {
inTag: false,
message: "",
messageEnded: false,
lastSent: "",
};
let totalMessage = "";
const toolCalls = [];
// LLM thought response
for await (const chunk of llmResponse) {
if (typeof chunk === "object" && chunk.type === "tool-call") {
toolCallInfo = chunk;
toolCalls.push(chunk);
}
totalMessage += chunk;
if (!messageState.messageEnded) {
yield* processTag(
messageState,
totalMessage,
chunk as string,
"<final_response>",
"</final_response>",
{
start: AgentMessageType.MESSAGE_START,
chunk: AgentMessageType.MESSAGE_CHUNK,
end: AgentMessageType.MESSAGE_END,
},
);
}
if (!questionState.messageEnded) {
yield* processTag(
questionState,
totalMessage,
chunk as string,
"<question_response>",
"</question_response>",
{
start: AgentMessageType.MESSAGE_START,
chunk: AgentMessageType.MESSAGE_CHUNK,
end: AgentMessageType.MESSAGE_END,
},
);
}
}
logger.info(`Cost for thought: ${JSON.stringify(totalCost)}`);
// Replace the error-handling block with this self-correcting implementation
if (
!totalMessage.includes("final_response") &&
!totalMessage.includes("question_response") &&
!toolCallInfo
) {
// Log the issue for debugging
logger.info(
`Invalid response format detected. Attempting to get proper format.`,
);
// Extract the raw content from the invalid response
const rawContent = totalMessage
.replace(/(<[^>]*>|<\/[^>]*>)/g, "")
.trim();
// Create a correction step
const stepRecord: HistoryStep = {
thought: "",
skill: "",
skillId: "",
userMessage: "Sol agent error, retrying \n",
isQuestion: false,
isFinal: false,
tokenCount: totalCost,
skillInput: "",
observation: `Your last response was not in a valid format. You must respond with EXACTLY ONE of the required formats: either a tool call, <question_response> tags, or <final_response> tags. Please reformat your previous response using the correct format:\n\n${rawContent}`,
};
yield Message("", AgentMessageType.MESSAGE_START);
yield Message(
stepRecord.userMessage as string,
AgentMessageType.MESSAGE_CHUNK,
);
yield Message("", AgentMessageType.MESSAGE_END);
// Add this step to the history
yield Message(JSON.stringify(stepRecord), AgentMessageType.STEP);
executionState.history.push(stepRecord);
// Log that we're continuing the loop with a correction request
logger.info(`Added format correction request to history.`);
// Don't mark as completed - let the loop continue
guardLoop++; // Still increment to prevent infinite loops
continue;
}
// Record this step in history
const stepRecord: HistoryStep = {
thought: "",
skill: "",
skillId: "",
userMessage: "",
isQuestion: false,
isFinal: false,
tokenCount: totalCost,
skillInput: "",
};
if (totalMessage && totalMessage.includes("final_response")) {
executionState.completed = true;
stepRecord.isFinal = true;
stepRecord.userMessage = messageState.message;
stepRecord.finalTokenCount = totalCost;
stepRecord.skillStatus = ActionStatusEnum.SUCCESS;
yield Message(JSON.stringify(stepRecord), AgentMessageType.STEP);
executionState.history.push(stepRecord);
break;
}
if (totalMessage && totalMessage.includes("question_response")) {
executionState.completed = true;
stepRecord.isQuestion = true;
stepRecord.userMessage = questionState.message;
stepRecord.finalTokenCount = totalCost;
stepRecord.skillStatus = ActionStatusEnum.QUESTION;
yield Message(JSON.stringify(stepRecord), AgentMessageType.STEP);
executionState.history.push(stepRecord);
break;
}
if (toolCalls && toolCalls.length > 0) {
// Run all tool calls in parallel
for (const toolCallInfo of toolCalls) {
const skillName = toolCallInfo.toolName;
const skillId = toolCallInfo.toolCallId;
const skillInput = toolCallInfo.args;
const toolName = skillName.split("--")[1];
const agent = skillName.split("--")[0];
const stepRecord: HistoryStep = {
agent,
thought: "",
skill: skillName,
skillId,
userMessage: "",
isQuestion: false,
isFinal: false,
tokenCount: totalCost,
skillInput: JSON.stringify(skillInput),
};
if (!internalTools.includes(skillName)) {
const skillMessageToSend = `\n<skill id="${skillId}" name="${toolName}" agent="${agent}"></skill>\n`;
stepRecord.userMessage += skillMessageToSend;
yield Message("", AgentMessageType.MESSAGE_START);
yield Message(skillMessageToSend, AgentMessageType.MESSAGE_CHUNK);
yield Message("", AgentMessageType.MESSAGE_END);
}
let result;
try {
// Log skill execution details
logger.info(`Executing skill: ${skillName}`);
logger.info(`Input parameters: ${JSON.stringify(skillInput)}`);
if (!internalTools.includes(toolName)) {
yield Message(
JSON.stringify({ skillId, status: "start" }),
AgentMessageType.SKILL_START,
);
}
// Handle CORE agent tools
if (agent === "core") {
if (toolName === "progress_update") {
yield Message("", AgentMessageType.MESSAGE_START);
yield Message(
skillInput.message,
AgentMessageType.MESSAGE_CHUNK,
);
stepRecord.userMessage += skillInput.message;
yield Message("", AgentMessageType.MESSAGE_END);
result = "Progress update sent successfully";
} else if (toolName === "search_memory") {
try {
result = await searchMemory(skillInput);
} catch (apiError) {
logger.error("Memory utils calls failed for search_memory", {
apiError,
});
result =
"Memory search failed - please check your memory configuration";
}
} else if (toolName === "add_memory") {
try {
result = await addMemory(skillInput);
} catch (apiError) {
logger.error("Memory utils calls failed for add_memory", {
apiError,
});
result =
"Memory storage failed - please check your memory configuration";
}
}
}
// Handle other MCP tools
else {
result = await mcp.callTool(skillName, skillInput);
yield Message(
JSON.stringify({ result, skillId }),
AgentMessageType.SKILL_CHUNK,
);
}
yield Message(
JSON.stringify({ skillId, status: "end" }),
AgentMessageType.SKILL_END,
);
stepRecord.skillOutput =
typeof result === "object"
? JSON.stringify(result, null, 2)
: result;
stepRecord.observation = stepRecord.skillOutput;
} catch (e) {
console.log(e);
logger.error(e as string);
stepRecord.skillInput = skillInput;
stepRecord.observation = JSON.stringify(e);
stepRecord.isError = true;
}
logger.info(`Skill step: ${JSON.stringify(stepRecord)}`);
yield Message(JSON.stringify(stepRecord), AgentMessageType.STEP);
executionState.history.push(stepRecord);
}
}
guardLoop++;
}
yield Message("Stream ended", AgentMessageType.STREAM_END);
} catch (e) {
logger.error(e as string);
yield Message((e as Error).message, AgentMessageType.ERROR);
yield Message("Stream ended", AgentMessageType.STREAM_END);
}
}

View File

@ -0,0 +1,131 @@
import { PrismaClient } from "@prisma/client";
import { ActionStatusEnum } from "@core/types";
import { logger, metadata, task } from "@trigger.dev/sdk/v3";
import { format } from "date-fns";
import { run } from "./chat-utils";
import { MCP } from "../utils/mcp";
import { type HistoryStep } from "../utils/types";
import {
createConversationHistoryForAgent,
getPreviousExecutionHistory,
init,
type RunChatPayload,
updateConversationHistoryMessage,
updateConversationStatus,
updateExecutionStep,
} from "../utils/utils";
const prisma = new PrismaClient();
/**
* Main chat task that orchestrates the agent workflow
* Handles conversation context, agent selection, and LLM interactions
*/
export const chat = task({
id: "chat",
maxDuration: 3000,
queue: {
name: "chat",
concurrencyLimit: 30,
},
init,
run: async (payload: RunChatPayload, { init }) => {
await updateConversationStatus("running", payload.conversationId);
try {
let creditForChat = 0;
const { previousHistory, ...otherData } = payload.context;
const isContinuation = payload.isContinuation || false;
// Initialise mcp
const mcp = new MCP();
await mcp.init();
// Prepare context with additional metadata
const context = {
// Currently this is assuming we only have one page in context
context: {
...(otherData.page && otherData.page.length > 0
? { page: otherData.page[0] }
: {}),
},
workpsaceId: init?.conversation.workspaceId,
resources: otherData.resources,
};
// Extract user's goal from conversation history
const message = init?.conversationHistory?.message;
// Retrieve execution history from previous interactions
const previousExecutionHistory = getPreviousExecutionHistory(
previousHistory ?? [],
);
let agentUserMessage = "";
let agentConversationHistory;
let stepHistory: HistoryStep[] = [];
// Prepare conversation history in agent-compatible format
agentConversationHistory = await createConversationHistoryForAgent(
payload.conversationId,
);
const llmResponse = run(
message as string,
context,
previousExecutionHistory,
mcp,
stepHistory,
);
const stream = await metadata.stream("messages", llmResponse);
let conversationStatus = "success";
for await (const step of stream) {
if (step.type === "STEP") {
creditForChat += 1;
const stepDetails = JSON.parse(step.message as string);
if (stepDetails.skillStatus === ActionStatusEnum.TOOL_REQUEST) {
conversationStatus = "need_approval";
}
if (stepDetails.skillStatus === ActionStatusEnum.QUESTION) {
conversationStatus = "need_attention";
}
await updateExecutionStep(
{ ...stepDetails },
agentConversationHistory.id,
);
agentUserMessage += stepDetails.userMessage;
await updateConversationHistoryMessage(
agentUserMessage,
agentConversationHistory.id,
);
} else if (step.type === "STREAM_END") {
break;
}
}
await updateConversationStatus(
conversationStatus,
payload.conversationId,
);
// await addToMemory(
// init.conversation.id,
// message,
// agentUserMessage,
// init.preferences,
// init.userName,
// );
} catch (e) {
await updateConversationStatus("failed", payload.conversationId);
throw new Error(e as string);
}
},
});

View File

@ -0,0 +1,48 @@
import { logger } from "@trigger.dev/sdk/v3";
import axios from "axios";
// Memory API functions using axios interceptor
export interface SearchMemoryParams {
query: string;
spaceId?: string;
sessionId?: string;
}
export interface AddMemoryParams {
episodeBody: string;
referenceTime?: string;
source?: string;
spaceId?: string;
sessionId?: string;
metadata?: any;
}
export const searchMemory = async (params: SearchMemoryParams) => {
try {
const response = await axios.post("https://core::memory/search", params);
return response.data;
} catch (error) {
logger.error("Memory search failed", { error, params });
return { error: "Memory search failed" };
}
};
export const addMemory = async (params: AddMemoryParams) => {
try {
// Set defaults for required fields
const memoryInput = {
...params,
referenceTime: params.referenceTime || new Date().toISOString(),
source: params.source || "chat",
};
const response = await axios.post(
"https://core::memory/ingest",
memoryInput,
);
return response.data;
} catch (error) {
logger.error("Memory storage failed", { error, params });
return { error: "Memory storage failed" };
}
};

View File

@ -0,0 +1,131 @@
export const REACT_SYSTEM_PROMPT = `
You are a helpful AI assistant with access to user memory. Your primary capabilities are:
1. **Memory-First Approach**: Always check user memory first to understand context and previous interactions
2. **Memory Management**: Help users store, retrieve, and organize information in their memory
3. **Contextual Assistance**: Use memory to provide personalized and contextual responses
<context>
{{CONTEXT}}
</context>
<memory>
- Always check memory FIRST using core--search_memory before any other actions
- Consider this your highest priority for EVERY interaction - as essential as breathing
- Make memory checking your first tool call before any other operations
QUERY FORMATION:
- Write specific factual statements as queries (e.g., "user email address" not "what is the user's email?")
- Create multiple targeted memory queries for complex requests
KEY QUERY AREAS:
- Personal context: user name, location, identity, work context
- Project context: repositories, codebases, current work, team members
- Task context: recent tasks, ongoing projects, deadlines, priorities
- Integration context: GitHub repos, Slack channels, Linear projects, connected services
- Communication patterns: email preferences, notification settings, workflow automation
- Technical context: coding languages, frameworks, development environment
- Collaboration context: team members, project stakeholders, meeting patterns
- Preferences: likes, dislikes, communication style, tool preferences
- History: previous discussions, past requests, completed work, recurring issues
- Automation rules: user-defined workflows, triggers, automation preferences
MEMORY USAGE:
- Execute multiple memory queries in parallel rather than sequentially
- Batch related memory queries when possible
- Prioritize recent information over older memories
- Create comprehensive context-aware queries based on user message/activity content
- Extract and query SEMANTIC CONTENT, not just structural metadata
- Parse titles, descriptions, and content for actual subject matter keywords
- Search internal SOL tasks/conversations that may relate to the same topics
- Query ALL relatable concepts, not just direct keywords or IDs
- Search for similar past situations, patterns, and related work
- Include synonyms, related terms, and contextual concepts in queries
- Query user's historical approach to similar requests or activities
- Search for connected projects, tasks, conversations, and collaborations
- Retrieve workflow patterns and past decision-making context
- Query broader domain context beyond immediate request scope
- Remember: SOL tracks work that external tools don't - search internal content thoroughly
- Blend memory insights naturally into responses
- Verify you've checked relevant memory before finalizing ANY response
If memory access is unavailable, rely only on the current conversation or ask user
</memory>
<tool_calling>
You have tools at your disposal to assist users:
CORE PRINCIPLES:
- Use tools only when necessary for the task at hand
- Always check memory FIRST before making other tool calls
- Execute multiple operations in parallel whenever possible
- Use sequential calls only when output of one is required for input of another
PARAMETER HANDLING:
- Follow tool schemas exactly with all required parameters
- Only use values that are:
Explicitly provided by the user (use EXACTLY as given)
Reasonably inferred from context
Retrieved from memory or prior tool calls
- Never make up values for required parameters
- Omit optional parameters unless clearly needed
- Analyze user's descriptive terms for parameter clues
TOOL SELECTION:
- Never call tools not provided in this conversation
- Skip tool calls for general questions you can answer directly
- For identical operations on multiple items, use parallel tool calls
- Default to parallel execution (3-5× faster than sequential calls)
- You can always access external service tools by loading them with load_mcp first
TOOL MENTION HANDLING:
When user message contains <mention data-id="tool_name" data-label="tool"></mention>:
- Extract tool_name from data-id attribute
- First check if it's a built-in tool; if not, check EXTERNAL SERVICES TOOLS
- If available: Load it with load_mcp and focus on addressing the request with this tool
- If unavailable: Inform user and suggest alternatives if possible
- For multiple tool mentions: Load all applicable tools in a single load_mcp call
ERROR HANDLING:
- If a tool returns an error, try fixing parameters before retrying
- If you can't resolve an error, explain the issue to the user
- Consider alternative tools when primary tools are unavailable
</tool_calling>
<communication>
Use EXACTLY ONE of these formats for all user-facing communication:
PROGRESS UPDATES - During processing:
- Use the core--progress_update tool to keep users informed
- Update users about what you're discovering or doing next
- Keep messages clear and user-friendly
- Avoid technical jargon
QUESTIONS - When you need information:
<question_response>
<p>[Your question with HTML formatting]</p>
</question_response>
- Ask questions only when you cannot find information through memory or tools
- Be specific about what you need to know
- Provide context for why you're asking
FINAL ANSWERS - When completing tasks:
<final_response>
<p>[Your answer with HTML formatting]</p>
</final_response>
CRITICAL:
- Use ONE format per turn
- Apply proper HTML formatting (<h1>, <h2>, <p>, <ul>, <li>, etc.)
- Never mix communication formats
- Keep responses clear and helpful
</communication>
`;
export const REACT_USER_PROMPT = `
Here is the user message:
<user_message>
{{USER_MESSAGE}}
</user_message>
`;

View File

@ -0,0 +1,263 @@
import fs from "fs";
import path from "node:path";
import { anthropic } from "@ai-sdk/anthropic";
import { google } from "@ai-sdk/google";
import { openai } from "@ai-sdk/openai";
import { logger } from "@trigger.dev/sdk/v3";
import {
type CoreMessage,
type LanguageModelV1,
streamText,
type ToolSet,
} from "ai";
import { createOllama } from "ollama-ai-provider";
import { type AgentMessageType, Message } from "./types";
interface State {
inTag: boolean;
messageEnded: boolean;
message: string;
lastSent: string;
}
export interface ExecutionState {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
agentFlow: any;
userMessage: string;
message: string;
}
export async function* processTag(
state: State,
totalMessage: string,
chunk: string,
startTag: string,
endTag: string,
states: { start: string; chunk: string; end: string },
extraParams: Record<string, string> = {},
) {
let comingFromStart = false;
if (!state.messageEnded) {
if (!state.inTag) {
const startIndex = totalMessage.indexOf(startTag);
if (startIndex !== -1) {
state.inTag = true;
// Send MESSAGE_START when we first enter the tag
yield Message("", states.start as AgentMessageType, extraParams);
const chunkToSend = totalMessage.slice(startIndex + startTag.length);
state.message += chunkToSend;
comingFromStart = true;
}
}
if (state.inTag) {
// Check if chunk contains end tag
const hasEndTag = chunk.includes(endTag);
const hasStartTag = chunk.includes(startTag);
const hasClosingTag = chunk.includes("</");
if (hasClosingTag && !hasStartTag && !hasEndTag) {
// If chunk only has </ but not the full end tag, accumulate it
state.message += chunk;
} else if (hasEndTag || (!hasEndTag && !hasClosingTag)) {
let currentMessage = comingFromStart
? state.message
: state.message + chunk;
const endIndex = currentMessage.indexOf(endTag);
if (endIndex !== -1) {
// For the final chunk before the end tag
currentMessage = currentMessage.slice(0, endIndex).trim();
const messageToSend = currentMessage.slice(
currentMessage.indexOf(state.lastSent) + state.lastSent.length,
);
if (messageToSend) {
yield Message(
messageToSend,
states.chunk as AgentMessageType,
extraParams,
);
}
// Send MESSAGE_END when we reach the end tag
yield Message("", states.end as AgentMessageType, extraParams);
state.message = currentMessage;
state.messageEnded = true;
} else {
const diff = currentMessage.slice(
currentMessage.indexOf(state.lastSent) + state.lastSent.length,
);
// For chunks in between start and end
const messageToSend = comingFromStart ? state.message : diff;
if (messageToSend) {
state.lastSent = messageToSend;
yield Message(
messageToSend,
states.chunk as AgentMessageType,
extraParams,
);
}
}
state.message = currentMessage;
state.lastSent = state.message;
} else {
state.message += chunk;
}
}
}
}
export async function* generate(
messages: CoreMessage[],
isProgressUpdate: boolean = false,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
onFinish?: (event: any) => void,
tools?: ToolSet,
system?: string,
model?: string,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): AsyncGenerator<
| string
| {
type: string;
toolName: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
args?: any;
toolCallId?: string;
message?: string;
}
> {
// Check for API keys
const anthropicKey = process.env.ANTHROPIC_API_KEY;
const googleKey = process.env.GOOGLE_GENERATIVE_AI_API_KEY;
const openaiKey = process.env.OPENAI_API_KEY;
const ollamaUrl = process.env.OLLAMA_URL;
model = model || process.env.MODEL;
let modelInstance;
let modelTemperature = Number(process.env.MODEL_TEMPERATURE) || 1;
// First check if Ollama URL exists and use Ollama
if (ollamaUrl) {
const ollama = createOllama({
baseURL: ollamaUrl,
});
modelInstance = ollama(model || "llama2"); // Default to llama2 if no model specified
} else {
// If no Ollama, check other models
switch (model) {
case "claude-3-7-sonnet-20250219":
case "claude-3-opus-20240229":
case "claude-3-5-haiku-20241022":
if (!anthropicKey) {
throw new Error("No Anthropic API key found. Set ANTHROPIC_API_KEY");
}
modelInstance = anthropic(model);
modelTemperature = 0.5;
break;
case "gemini-2.5-flash-preview-04-17":
case "gemini-2.5-pro-preview-03-25":
case "gemini-2.0-flash":
case "gemini-2.0-flash-lite":
if (!googleKey) {
throw new Error("No Google API key found. Set GOOGLE_API_KEY");
}
modelInstance = google(model);
break;
case "gpt-4.1-2025-04-14":
case "gpt-4.1-mini-2025-04-14":
case "gpt-4.1-nano-2025-04-14":
if (!openaiKey) {
throw new Error("No OpenAI API key found. Set OPENAI_API_KEY");
}
modelInstance = openai(model);
break;
default:
break;
}
}
logger.info("starting stream");
// Try Anthropic next if key exists
if (modelInstance) {
try {
const { textStream, fullStream } = streamText({
model: modelInstance as LanguageModelV1,
messages,
temperature: modelTemperature,
maxSteps: 10,
tools,
...(isProgressUpdate
? { toolChoice: { type: "tool", toolName: "core--progress_update" } }
: {}),
toolCallStreaming: true,
onFinish,
...(system ? { system } : {}),
});
for await (const chunk of textStream) {
yield chunk;
}
for await (const fullChunk of fullStream) {
if (fullChunk.type === "tool-call") {
yield {
type: "tool-call",
toolName: fullChunk.toolName,
toolCallId: fullChunk.toolCallId,
args: fullChunk.args,
};
}
if (fullChunk.type === "error") {
// Log the error to a file
const errorLogsDir = path.join(__dirname, "../../../../logs/errors");
// Ensure the directory exists
try {
if (!fs.existsSync(errorLogsDir)) {
fs.mkdirSync(errorLogsDir, { recursive: true });
}
// Create a timestamped error log file
const timestamp = new Date().toISOString().replace(/:/g, "-");
const errorLogPath = path.join(
errorLogsDir,
`llm-error-${timestamp}.json`,
);
// Write the error to the file
fs.writeFileSync(
errorLogPath,
JSON.stringify({
timestamp: new Date().toISOString(),
error: fullChunk.error,
}),
);
logger.error(`LLM error logged to ${errorLogPath}`);
} catch (err) {
logger.error(`Failed to log LLM error: ${err}`);
}
}
}
return;
} catch (e) {
console.log(e);
logger.error(e as string);
}
}
throw new Error("No valid LLM configuration found");
}

View File

@ -0,0 +1,46 @@
export interface AgentStep {
agent: string;
goal: string;
reasoning: string;
}
export enum AgentMessageType {
STREAM_START = 'STREAM_START',
STREAM_END = 'STREAM_END',
// Used in ReACT based prompting
THOUGHT_START = 'THOUGHT_START',
THOUGHT_CHUNK = 'THOUGHT_CHUNK',
THOUGHT_END = 'THOUGHT_END',
// Message types
MESSAGE_START = 'MESSAGE_START',
MESSAGE_CHUNK = 'MESSAGE_CHUNK',
MESSAGE_END = 'MESSAGE_END',
// This is used to return action input
SKILL_START = 'SKILL_START',
SKILL_CHUNK = 'SKILL_CHUNK',
SKILL_END = 'SKILL_END',
STEP = 'STEP',
ERROR = 'ERROR',
}
export interface AgentMessage {
message?: string;
type: AgentMessageType;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
metadata: Record<string, any>;
}
export const Message = (
message: string,
type: AgentMessageType,
extraParams: Record<string, string> = {},
): AgentMessage => {
// For all message types, we use the message field
// The type field differentiates how the message should be interpreted
// For STEP and SKILL types, the message can contain JSON data as a string
return { message, type, metadata: extraParams };
};

View File

@ -0,0 +1,151 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { logger } from "@trigger.dev/sdk/v3";
import { jsonSchema, tool, type ToolSet } from "ai";
import { type MCPTool } from "./types";
export class MCP {
private Client: any;
private clients: Record<string, any> = {};
private StdioTransport: any;
constructor() {}
public async init() {
this.Client = await MCP.importClient();
this.StdioTransport = await MCP.importStdioTransport();
}
private static async importClient() {
const { Client } = await import(
"@modelcontextprotocol/sdk/client/index.js"
);
return Client;
}
async load(agents: string[], mcpConfig: any) {
await Promise.all(
agents.map(async (agent) => {
const mcp = mcpConfig.mcpServers[agent];
return await this.connectToServer(agent, mcp.command, mcp.args, {
...mcp.env,
DATABASE_URL: mcp.env?.DATABASE_URL ?? "",
});
}),
);
}
private static async importStdioTransport() {
const { StdioClientTransport } = await import("./stdio");
return StdioClientTransport;
}
async allTools(): Promise<ToolSet> {
const clientEntries = Object.entries(this.clients);
// Fetch all tools in parallel
const toolsArrays = await Promise.all(
clientEntries.map(async ([clientKey, client]) => {
try {
const { tools } = await client.listTools();
return tools.map(({ name, description, inputSchema }: any) => [
`${clientKey}--${name}`,
tool({
description,
parameters: jsonSchema(inputSchema),
}),
]);
} catch (error) {
logger.error(`Error fetching tools for ${clientKey}:`, { error });
return [];
}
}),
);
// Flatten and convert to object
return Object.fromEntries(toolsArrays.flat());
}
async tools(): Promise<MCPTool[]> {
const allTools: MCPTool[] = [];
for (const clientKey in this.clients) {
const client = this.clients[clientKey];
const { tools: clientTools } = await client.listTools();
for (const tool of clientTools) {
// Add client prefix to tool name
tool.name = `${clientKey}--${tool.name}`;
allTools.push(tool);
}
}
return allTools;
}
async getTool(name: string) {
try {
const clientKey = name.split("--")[0];
const toolName = name.split("--")[1];
const client = this.clients[clientKey];
const { tools: clientTools } = await client.listTools();
const clientTool = clientTools.find((to: any) => to.name === toolName);
return JSON.stringify(clientTool);
} catch (e) {
logger.error((e as string) ?? "Getting tool failed");
throw new Error("Getting tool failed");
}
}
async callTool(name: string, parameters: any) {
const clientKey = name.split("--")[0];
const toolName = name.split("--")[1];
const client = this.clients[clientKey];
const response = await client.callTool({
name: toolName,
arguments: parameters,
});
return response;
}
async connectToServer(
name: string,
command: string,
args: string[],
env: any,
) {
try {
const client = new this.Client(
{
name,
version: "1.0.0",
},
{
capabilities: {},
},
);
// Conf
// igure the transport for MCP server
const transport = new this.StdioTransport({
command,
args,
env,
});
// Connect to the MCP server
await client.connect(transport, { timeout: 60 * 1000 * 5 });
this.clients[name] = client;
logger.info(`Connected to ${name} MCP server`);
} catch (e) {
logger.error(`Failed to connect to ${name} MCP server: `, { e });
throw e;
}
}
}

View File

@ -0,0 +1,256 @@
import { type ChildProcess, type IOType } from "node:child_process";
import process from "node:process";
import { type Stream } from "node:stream";
import { type Transport } from "@modelcontextprotocol/sdk/shared/transport";
import {
type JSONRPCMessage,
JSONRPCMessageSchema,
} from "@modelcontextprotocol/sdk/types.js";
import { execa } from "execa";
/**
* Buffers a continuous stdio stream into discrete JSON-RPC messages.
*/
export class ReadBuffer {
private _buffer?: Buffer;
append(chunk: Buffer): void {
this._buffer = this._buffer ? Buffer.concat([this._buffer, chunk]) : chunk;
}
readMessage(): JSONRPCMessage | null {
if (!this._buffer) {
return null;
}
const index = this._buffer.indexOf("\n");
if (index === -1) {
return null;
}
const line = this._buffer.toString("utf8", 0, index).replace(/\r$/, "");
this._buffer = this._buffer.subarray(index + 1);
return deserializeMessage(line);
}
clear(): void {
this._buffer = undefined;
}
}
export function deserializeMessage(line: string): JSONRPCMessage {
return JSONRPCMessageSchema.parse(JSON.parse(line));
}
export function serializeMessage(message: JSONRPCMessage): string {
return `${JSON.stringify(message)}\n`;
}
export interface StdioServerParameters {
/**
* The executable to run to start the server.
*/
command: string;
/**
* Command line arguments to pass to the executable.
*/
args?: string[];
/**
* The environment to use when spawning the process.
*
* If not specified, the result of getDefaultEnvironment() will be used.
*/
env?: Record<string, string>;
/**
* How to handle stderr of the child process. This matches the semantics of Node's `child_process.spawn`.
*
* The default is "inherit", meaning messages to stderr will be printed to the parent process's stderr.
*/
stderr?: IOType | Stream | number;
/**
* The working directory to use when spawning the process.
*
* If not specified, the current working directory will be inherited.
*/
cwd?: string;
}
/**
* Environment variables to inherit by default, if an environment is not explicitly given.
*/
export const DEFAULT_INHERITED_ENV_VARS =
process.platform === "win32"
? [
"APPDATA",
"HOMEDRIVE",
"HOMEPATH",
"LOCALAPPDATA",
"PATH",
"PROCESSOR_ARCHITECTURE",
"SYSTEMDRIVE",
"SYSTEMROOT",
"TEMP",
"USERNAME",
"USERPROFILE",
]
: /* list inspired by the default env inheritance of sudo */
["HOME", "LOGNAME", "PATH", "SHELL", "TERM", "USER"];
/**
* Returns a default environment object including only environment variables deemed safe to inherit.
*/
export function getDefaultEnvironment(): Record<string, string> {
const env: Record<string, string> = {};
for (const key of DEFAULT_INHERITED_ENV_VARS) {
const value = process.env[key];
if (value === undefined) {
continue;
}
if (value.startsWith("()")) {
// Skip functions, which are a security risk.
continue;
}
env[key] = value;
}
return env;
}
/**
* Client transport for stdio: this will connect to a server by spawning a process and communicating with it over stdin/stdout.
*
* This transport is only available in Node.js environments.
*/
export class StdioClientTransport implements Transport {
private _process?: ChildProcess;
private _abortController: AbortController = new AbortController();
private _readBuffer: ReadBuffer = new ReadBuffer();
private _serverParams: StdioServerParameters;
onclose?: () => void;
onerror?: (error: Error) => void;
onmessage?: (message: JSONRPCMessage) => void;
constructor(server: StdioServerParameters) {
this._serverParams = server;
}
/**
* Starts the server process and prepares to communicate with it.
*/
async start(): Promise<void> {
if (this._process) {
throw new Error(
"StdioClientTransport already started! If using Client class, note that connect() calls start() automatically.",
);
}
return new Promise((resolve, reject) => {
this._process = execa(
this._serverParams.command,
this._serverParams.args ?? [],
{
env: this._serverParams.env ?? getDefaultEnvironment(),
stderr: "inherit",
shell: "/bin/sh",
windowsHide: process.platform === "win32" && isElectron(),
cwd: this._serverParams.cwd,
cancelSignal: this._abortController.signal,
stdin: "pipe",
stdout: "pipe",
},
);
this._process.on("error", (error) => {
if (error.name === "AbortError") {
// Expected when close() is called.
this.onclose?.();
return;
}
reject(error);
this.onerror?.(error);
});
this._process.on("spawn", () => {
resolve();
});
// eslint-disable-next-line @typescript-eslint/no-unused-vars
this._process.on("close", (_code) => {
this._process = undefined;
this.onclose?.();
});
this._process.stdin?.on("error", (error) => {
this.onerror?.(error);
});
this._process.stdout?.on("data", (chunk) => {
this._readBuffer.append(chunk);
this.processReadBuffer();
});
this._process.stdout?.on("error", (error) => {
this.onerror?.(error);
});
});
}
/**
* The stderr stream of the child process, if `StdioServerParameters.stderr` was set to "pipe" or "overlapped".
*
* This is only available after the process has been started.
*/
get stderr(): Stream | null {
return this._process?.stderr ?? null;
}
private processReadBuffer() {
while (true) {
try {
const message = this._readBuffer.readMessage();
if (message === null) {
break;
}
this.onmessage?.(message);
} catch (error) {
this.onerror?.(error as Error);
}
}
}
async close(): Promise<void> {
this._abortController.abort();
this._process = undefined;
this._readBuffer.clear();
}
send(message: JSONRPCMessage): Promise<void> {
return new Promise((resolve) => {
if (!this._process?.stdin) {
throw new Error("Not connected");
}
const json = serializeMessage(message);
if (this._process.stdin.write(json)) {
resolve();
} else {
this._process.stdin.once("drain", resolve);
}
});
}
}
function isElectron() {
return "type" in process;
}

View File

@ -0,0 +1,123 @@
import { type ActionStatusEnum } from "@core/types";
import { type CoreMessage } from "ai";
// Define types for the MCP tool schema
export interface MCPTool {
name: string;
description: string;
inputSchema: {
type: string;
properties: Record<string, SchemaProperty>;
required?: string[];
additionalProperties: boolean;
$schema: string;
};
}
// Vercel AI SDK Tool Types
export type VercelAITools = Record<
string,
{
type: "function";
description: string;
parameters: {
type: "object";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
properties: Record<string, any>;
required?: string[];
};
}
>;
export type SchemaProperty =
| {
type: string | string[];
minimum?: number;
maximum?: number;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
default?: any;
minLength?: number;
pattern?: string;
enum?: string[];
// eslint-disable-next-line @typescript-eslint/no-explicit-any
items?: any;
properties?: Record<string, SchemaProperty>;
required?: string[];
additionalProperties?: boolean;
description?: string;
}
| {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
anyOf: any[];
};
export interface Resource {
id?: string;
size?: number;
fileType: string;
publicURL: string;
originalName?: string;
}
export interface ExecutionState {
query: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
context?: string;
resources: Resource[];
previousHistory?: CoreMessage[];
history: HistoryStep[];
userMemoryContext?: string;
automationContext?: string;
completed: boolean;
}
export interface TokenCount {
inputTokens: number;
outputToken: number;
}
export interface TotalCost {
inputTokens: number;
outputTokens: number;
cost: number;
}
export interface HistoryStep {
agent?: string;
// The agent's reasoning process for this step
thought?: string;
// Indicates if this step contains a question for the user
isQuestion?: boolean;
// Indicates if this is the final response in the conversation
isFinal?: boolean;
isError?: boolean;
// The name of the skill/tool being used in this step
skill?: string;
skillId?: string;
skillInput?: string;
skillOutput?: string;
skillStatus?: ActionStatusEnum;
// This is when the action has run and the output will be put here
observation?: string;
// This is what the user will read
userMessage?: string;
// If the agent has run completely
completed?: boolean;
// Token count
tokenCount: TotalCost;
finalTokenCount?: TotalCost;
}
export interface GenerateResponse {
text: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
toolCalls: any[];
}

View File

@ -0,0 +1,432 @@
import {
type Activity,
type Conversation,
type ConversationHistory,
type IntegrationDefinitionV2,
type Prisma,
PrismaClient,
UserType,
type Workspace,
} from "@prisma/client";
import { logger } from "@trigger.dev/sdk/v3";
import { type CoreMessage } from "ai";
import { type HistoryStep } from "./types";
import axios from "axios";
const prisma = new PrismaClient();
export interface InitChatPayload {
conversationId: string;
conversationHistoryId: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
context: any;
pat: string;
}
export class Preferences {
timezone?: string;
// Memory details
memory_host?: string;
memory_api_key?: string;
}
export interface RunChatPayload {
conversationId: string;
conversationHistoryId: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
context: any;
conversation: Conversation;
conversationHistory: ConversationHistory;
pat: string;
isContinuation?: boolean;
}
export const init = async (payload: InitChatPayload) => {
logger.info("Loading init");
const conversationHistory = await prisma.conversationHistory.findUnique({
where: { id: payload.conversationHistoryId },
include: { conversation: true },
});
const conversation = conversationHistory?.conversation as Conversation;
const workspace = await prisma.workspace.findUnique({
where: { id: conversation.workspaceId as string },
});
if (!workspace) {
return { conversation, conversationHistory };
}
const pat = await prisma.personalAccessToken.findFirst({
where: { userId: workspace.userId as string, name: "default" },
});
const user = await prisma.user.findFirst({
where: { id: workspace.userId as string },
});
const integrationAccounts = await prisma.integrationAccount.findMany({
where: {
workspaceId: workspace.id,
},
include: { integrationDefinition: true },
});
// Create MCP server configurations for each integration account
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const integrationMCPServers: Record<string, any> = {};
for (const account of integrationAccounts) {
try {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const spec = account.integrationDefinition?.spec as any;
if (spec.mcp) {
const mcpSpec = spec.mcp;
const configuredMCP = { ...mcpSpec };
// Replace config placeholders in environment variables
if (configuredMCP.env) {
for (const [key, value] of Object.entries(configuredMCP.env)) {
if (typeof value === "string" && value.includes("${config:")) {
// Extract the config key from the placeholder
const configKey = value.match(/\$\{config:(.*?)\}/)?.[1];
if (
configKey &&
account.integrationConfiguration &&
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(account.integrationConfiguration as any)[configKey]
) {
configuredMCP.env[key] = value.replace(
`\${config:${configKey}}`,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(account.integrationConfiguration as any)[configKey],
);
}
}
if (
typeof value === "string" &&
value.includes("${integrationConfig:")
) {
// Extract the config key from the placeholder
const configKey = value.match(
/\$\{integrationConfig:(.*?)\}/,
)?.[1];
if (
configKey &&
account.integrationDefinition.config &&
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(account.integrationDefinition.config as any)[configKey]
) {
configuredMCP.env[key] = value.replace(
`\${integrationConfig:${configKey}}`,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(account.integrationDefinition.config as any)[configKey],
);
}
}
}
}
// Add to the MCP servers collection
integrationMCPServers[account.integrationDefinition.slug] =
configuredMCP;
}
axios.interceptors.request.use((config) => {
if (config.url?.startsWith("https://core::memory")) {
// Handle both search and ingest endpoints
if (config.url.includes("/search")) {
config.url = `${process.env.API_BASE_URL}/search`;
} else if (config.url.includes("/ingest")) {
config.url = `${process.env.API_BASE_URL}/ingest`;
}
config.headers.Authorization = `Bearer ${payload.pat}`;
}
return config;
});
} catch (error) {
logger.error(
`Failed to configure MCP for ${account.integrationDefinition?.slug}:`,
{ error },
);
}
}
return {
conversation,
conversationHistory,
token: pat?.obfuscatedToken,
userId: user?.id,
userName: user?.name,
};
};
export const createConversationHistoryForAgent = async (
conversationId: string,
) => {
return await prisma.conversationHistory.create({
data: {
conversationId,
message: "Generating...",
userType: "Agent",
thoughts: {},
},
});
};
export const getConversationHistoryFormat = (
// eslint-disable-next-line @typescript-eslint/no-explicit-any
previousHistory: any[],
): string => {
if (previousHistory) {
const historyText = previousHistory
.map((history) => `${history.userType}: \n ${history.message}`)
.join("\n------------\n");
return historyText;
}
return "";
};
export const getPreviousExecutionHistory = (
// eslint-disable-next-line @typescript-eslint/no-explicit-any
previousHistory: any[],
): CoreMessage[] => {
return previousHistory.map((history) => ({
role: history.userType === "User" ? "user" : "assistant",
content: history.message,
}));
};
export const getIntegrationDefinitionsForAgents = (agents: string[]) => {
return prisma.integrationDefinitionV2.findMany({
where: {
slug: {
in: agents,
},
},
});
};
export const getIntegrationConfigForIntegrationDefinition = (
integrationDefinitionId: string,
) => {
return prisma.integrationAccount.findFirst({
where: {
integrationDefinitionId,
},
});
};
export const updateExecutionStep = async (
step: HistoryStep,
conversationHistoryId: string,
) => {
const {
thought,
userMessage,
skillInput,
skillOutput,
skillId,
skillStatus,
...metadata
} = step;
await prisma.conversationExecutionStep.create({
data: {
thought: thought ?? "",
message: userMessage ?? "",
actionInput:
typeof skillInput === "object"
? JSON.stringify(skillInput)
: skillInput,
actionOutput:
typeof skillOutput === "object"
? JSON.stringify(skillOutput)
: skillOutput,
actionId: skillId,
actionStatus: skillStatus,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
metadata: metadata as any,
conversationHistoryId,
},
});
};
export const updateConversationHistoryMessage = async (
userMessage: string,
conversationHistoryId: string,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
thoughts?: Record<string, any>,
) => {
await prisma.conversationHistory.update({
where: {
id: conversationHistoryId,
},
data: {
message: userMessage,
thoughts,
userType: UserType.Agent,
},
});
};
export const getExecutionStepsForConversation = async (
conversationHistoryId: string,
) => {
const lastExecutionSteps = await prisma.conversationExecutionStep.findMany({
where: {
conversationHistoryId,
},
});
return lastExecutionSteps;
};
export const getActivityDetails = async (activityId: string) => {
if (!activityId) {
return {};
}
const activity = await prisma.activity.findFirst({
where: {
id: activityId,
},
});
return {
activityId,
integrationAccountId: activity?.integrationAccountId,
sourceURL: activity?.sourceURL,
};
};
/**
* Generates a random ID of 6 characters
* @returns A random string of 6 characters
*/
export const generateRandomId = (): string => {
// Define characters that can be used in the ID
const characters =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
let result = "";
// Generate 6 random characters
for (let i = 0; i < 6; i++) {
const randomIndex = Math.floor(Math.random() * characters.length);
result += characters.charAt(randomIndex);
}
return result.toLowerCase();
};
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export function flattenObject(obj: Record<string, any>, prefix = ""): string[] {
return Object.entries(obj).reduce<string[]>((result, [key, value]) => {
const entryKey = prefix ? `${prefix}_${key}` : key;
if (value !== null && typeof value === "object" && !Array.isArray(value)) {
// For nested objects, flatten them and add to results
return [...result, ...flattenObject(value, entryKey)];
}
// For primitive values or arrays, add directly
return [...result, `- ${entryKey}: ${value}`];
}, []);
}
export const updateConversationStatus = async (
status: string,
conversationId: string,
) => {
const data: Prisma.ConversationUpdateInput = { status, unread: true };
return await prisma.conversation.update({
where: {
id: conversationId,
},
data,
});
};
export const getActivity = async (activityId: string) => {
return await prisma.activity.findUnique({
where: {
id: activityId,
},
include: {
workspace: true,
integrationAccount: {
include: {
integrationDefinition: true,
},
},
},
});
};
export const updateActivity = async (
activityId: string,
rejectionReason: string,
) => {
return await prisma.activity.update({
where: {
id: activityId,
},
data: {
rejectionReason,
},
});
};
export const createConversation = async (
activity: Activity,
workspace: Workspace,
integrationDefinition: IntegrationDefinitionV2,
automationContext: { automations?: string[]; executionPlan: string },
) => {
const conversation = await prisma.conversation.create({
data: {
workspaceId: activity.workspaceId,
userId: workspace.userId as string,
title: activity.text.substring(0, 100),
ConversationHistory: {
create: {
userId: workspace.userId,
message: `Activity from ${integrationDefinition.name} \n Content: ${activity.text}`,
userType: UserType.User,
activityId: activity.id,
thoughts: { ...automationContext },
},
},
},
include: {
ConversationHistory: true,
},
});
return conversation;
};
export async function getContinuationAgentConversationHistory(
conversationId: string,
): Promise<ConversationHistory | null> {
return await prisma.conversationHistory.findFirst({
where: {
conversationId,
userType: "Agent",
deleted: null,
},
orderBy: {
createdAt: "desc",
},
take: 1,
});
}

View File

@ -11,6 +11,8 @@
"typecheck": "tsc"
},
"dependencies": {
"@ai-sdk/anthropic": "^1.2.12",
"@ai-sdk/google": "^1.2.22",
"@ai-sdk/openai": "^1.3.21",
"@coji/remix-auth-google": "^4.2.0",
"@conform-to/react": "^0.6.1",
@ -52,6 +54,7 @@
"@tanstack/react-table": "^8.13.2",
"@trigger.dev/sdk": "^3.3.17",
"ai": "4.3.14",
"axios": "^1.10.0",
"bullmq": "^5.53.2",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
@ -61,11 +64,13 @@
"date-fns": "^4.1.0",
"dayjs": "^1.11.10",
"emails": "workspace:*",
"execa": "^9.6.0",
"express": "^4.18.1",
"graphology": "^0.26.0",
"graphology-layout-force": "^0.2.4",
"graphology-layout-forceatlas2": "^0.10.1",
"graphology-layout-noverlap": "^0.4.2",
"handlebars": "^4.7.8",
"ioredis": "^5.6.1",
"isbot": "^4.1.0",
"jose": "^5.2.3",
@ -85,6 +90,7 @@
"remix-themes": "^1.3.1",
"remix-typedjson": "0.3.1",
"remix-utils": "^7.7.0",
"sdk": "link:@modelcontextprotocol/sdk",
"sigma": "^3.0.2",
"tailwind-merge": "^2.6.0",
"tailwind-scrollbar-hide": "^2.0.0",

View File

@ -0,0 +1,70 @@
-- CreateEnum
CREATE TYPE "UserType" AS ENUM ('Agent', 'User', 'System');
-- CreateTable
CREATE TABLE "Conversation" (
"id" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
"deleted" TIMESTAMP(3),
"unread" BOOLEAN NOT NULL DEFAULT false,
"title" TEXT,
"userId" TEXT NOT NULL,
"workspaceId" TEXT,
"status" TEXT NOT NULL DEFAULT 'pending',
CONSTRAINT "Conversation_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "ConversationHistory" (
"id" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
"deleted" TIMESTAMP(3),
"message" TEXT NOT NULL,
"userType" "UserType" NOT NULL,
"activityId" TEXT,
"context" JSONB,
"thoughts" JSONB,
"userId" TEXT,
"conversationId" TEXT NOT NULL,
CONSTRAINT "ConversationHistory_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "ConversationExecutionStep" (
"id" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
"deleted" TIMESTAMP(3),
"thought" TEXT NOT NULL,
"message" TEXT NOT NULL,
"actionId" TEXT,
"actionOutput" TEXT,
"actionInput" TEXT,
"actionStatus" TEXT,
"metadata" JSONB DEFAULT '{}',
"conversationHistoryId" TEXT NOT NULL,
CONSTRAINT "ConversationExecutionStep_pkey" PRIMARY KEY ("id")
);
-- AddForeignKey
ALTER TABLE "Conversation" ADD CONSTRAINT "Conversation_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Conversation" ADD CONSTRAINT "Conversation_workspaceId_fkey" FOREIGN KEY ("workspaceId") REFERENCES "Workspace"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ConversationHistory" ADD CONSTRAINT "ConversationHistory_activityId_fkey" FOREIGN KEY ("activityId") REFERENCES "Activity"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ConversationHistory" ADD CONSTRAINT "ConversationHistory_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ConversationHistory" ADD CONSTRAINT "ConversationHistory_conversationId_fkey" FOREIGN KEY ("conversationId") REFERENCES "Conversation"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "ConversationExecutionStep" ADD CONSTRAINT "ConversationExecutionStep_conversationHistoryId_fkey" FOREIGN KEY ("conversationHistoryId") REFERENCES "ConversationHistory"("id") ON DELETE RESTRICT ON UPDATE CASCADE;

View File

@ -10,67 +10,29 @@ generator client {
previewFeatures = ["tracing"]
}
model User {
id String @id @default(cuid())
email String @unique
authenticationMethod AuthenticationMethod
authenticationProfile Json?
authenticationExtraParams Json?
authIdentifier String? @unique
displayName String?
name String?
avatarUrl String?
memoryFilter String? // Adding memory filter instructions
admin Boolean @default(false)
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
marketingEmails Boolean @default(true)
confirmedBasicDetails Boolean @default(false)
referralSource String?
personalAccessTokens PersonalAccessToken[]
InvitationCode InvitationCode? @relation(fields: [invitationCodeId], references: [id])
invitationCodeId String?
Space Space[]
Workspace Workspace?
IntegrationAccount IntegrationAccount[]
WebhookConfiguration WebhookConfiguration[]
}
model Workspace {
model Activity {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
deleted DateTime?
name String
slug String @unique
icon String?
text String
// Used to link the task or activity to external apps
sourceURL String?
integrations String[]
integrationAccount IntegrationAccount? @relation(fields: [integrationAccountId], references: [id])
integrationAccountId String?
userId String? @unique
user User? @relation(fields: [userId], references: [id])
IngestionQueue IngestionQueue[]
IntegrationAccount IntegrationAccount[]
IntegrationDefinitionV2 IntegrationDefinitionV2[]
Activity Activity[]
WebhookConfiguration WebhookConfiguration[]
rejectionReason String?
workspace Workspace @relation(fields: [workspaceId], references: [id])
workspaceId String
WebhookDeliveryLog WebhookDeliveryLog[]
ConversationHistory ConversationHistory[]
}
enum AuthenticationMethod {
GOOGLE
MAGIC_LINK
}
/// Used to generate PersonalAccessTokens, they're one-time use
model AuthorizationCode {
id String @id @default(cuid())
@ -83,63 +45,69 @@ model AuthorizationCode {
updatedAt DateTime @updatedAt
}
// Used by User's to perform API actions
model PersonalAccessToken {
id String @id @default(cuid())
model Conversation {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
deleted DateTime?
/// If generated by the CLI this will be "cli", otherwise user-provided
name String
unread Boolean @default(false)
/// This is the token encrypted using the ENCRYPTION_KEY
encryptedToken Json
/// This is shown in the UI, with ********
obfuscatedToken String
/// This is used to find the token in the database
hashedToken String @unique
user User @relation(fields: [userId], references: [id])
title String?
user User @relation(fields: [userId], references: [id])
userId String
revokedAt DateTime?
lastAccessedAt DateTime?
workspace Workspace? @relation(fields: [workspaceId], references: [id])
workspaceId String?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
status String @default("pending") // Can be "pending", "running", "completed", "failed", "need_attension"
authorizationCodes AuthorizationCode[]
ConversationHistory ConversationHistory[]
}
model InvitationCode {
id String @id @default(cuid())
code String @unique
model ConversationExecutionStep {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
deleted DateTime?
users User[]
thought String
message String
createdAt DateTime @default(now())
actionId String?
actionOutput String?
actionInput String?
actionStatus String?
metadata Json? @default("{}")
conversationHistory ConversationHistory @relation(fields: [conversationHistoryId], references: [id])
conversationHistoryId String
}
// Space model for user workspaces
model Space {
id String @id @default(cuid())
name String
description String?
autoMode Boolean @default(false)
model ConversationHistory {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
deleted DateTime?
// Relations
user User @relation(fields: [userId], references: [id])
userId String
message String
userType UserType
// Space's enabled entities
enabledEntities SpaceEntity[]
activity Activity? @relation(fields: [activityId], references: [id])
activityId String?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
IngestionQueue IngestionQueue[]
context Json?
thoughts Json?
user User? @relation(fields: [userId], references: [id])
userId String?
conversation Conversation @relation(fields: [conversationId], references: [id])
conversationId String
ConversationExecutionStep ConversationExecutionStep[]
}
// Entity types that can be stored in the memory plane
model Entity {
id String @id @default(cuid())
name String @unique // e.g., "User", "Issue", "Task", "Automation"
@ -152,27 +120,6 @@ model Entity {
updatedAt DateTime @updatedAt
}
// Junction table for Space-Entity relationship (what entities are enabled in each space)
model SpaceEntity {
id String @id @default(cuid())
// Relations
space Space @relation(fields: [spaceId], references: [id])
spaceId String
entity Entity @relation(fields: [entityId], references: [id])
entityId String
// Custom settings for this entity in this space
settings Json?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
@@unique([spaceId, entityId])
}
// Queue for processing ingestion tasks
model IngestionQueue {
id String @id @default(cuid())
@ -199,29 +146,6 @@ model IngestionQueue {
processedAt DateTime?
}
// For Integrations
model Activity {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
deleted DateTime?
text String
// Used to link the task or activity to external apps
sourceURL String?
integrationAccount IntegrationAccount? @relation(fields: [integrationAccountId], references: [id])
integrationAccountId String?
rejectionReason String?
workspace Workspace @relation(fields: [workspaceId], references: [id])
workspaceId String
WebhookDeliveryLog WebhookDeliveryLog[]
}
model IntegrationAccount {
id String @id @default(uuid())
createdAt DateTime @default(now())
@ -265,6 +189,115 @@ model IntegrationDefinitionV2 {
IntegrationAccount IntegrationAccount[]
}
model InvitationCode {
id String @id @default(cuid())
code String @unique
users User[]
createdAt DateTime @default(now())
}
model PersonalAccessToken {
id String @id @default(cuid())
/// If generated by the CLI this will be "cli", otherwise user-provided
name String
/// This is the token encrypted using the ENCRYPTION_KEY
encryptedToken Json
/// This is shown in the UI, with ********
obfuscatedToken String
/// This is used to find the token in the database
hashedToken String @unique
user User @relation(fields: [userId], references: [id])
userId String
revokedAt DateTime?
lastAccessedAt DateTime?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
authorizationCodes AuthorizationCode[]
}
model Space {
id String @id @default(cuid())
name String
description String?
autoMode Boolean @default(false)
// Relations
user User @relation(fields: [userId], references: [id])
userId String
// Space's enabled entities
enabledEntities SpaceEntity[]
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
IngestionQueue IngestionQueue[]
}
model SpaceEntity {
id String @id @default(cuid())
// Relations
space Space @relation(fields: [spaceId], references: [id])
spaceId String
entity Entity @relation(fields: [entityId], references: [id])
entityId String
// Custom settings for this entity in this space
settings Json?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
@@unique([spaceId, entityId])
}
model User {
id String @id @default(cuid())
email String @unique
authenticationMethod AuthenticationMethod
authenticationProfile Json?
authenticationExtraParams Json?
authIdentifier String? @unique
displayName String?
name String?
avatarUrl String?
memoryFilter String? // Adding memory filter instructions
admin Boolean @default(false)
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
marketingEmails Boolean @default(true)
confirmedBasicDetails Boolean @default(false)
referralSource String?
personalAccessTokens PersonalAccessToken[]
InvitationCode InvitationCode? @relation(fields: [invitationCodeId], references: [id])
invitationCodeId String?
Space Space[]
Workspace Workspace?
IntegrationAccount IntegrationAccount[]
WebhookConfiguration WebhookConfiguration[]
Conversation Conversation[]
ConversationHistory ConversationHistory[]
}
model WebhookConfiguration {
id String @id @default(cuid())
url String
@ -298,9 +331,31 @@ model WebhookDeliveryLog {
createdAt DateTime @default(now())
}
enum WebhookDeliveryStatus {
SUCCESS
FAILED
model Workspace {
id String @id @default(uuid())
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
deleted DateTime?
name String
slug String @unique
icon String?
integrations String[]
userId String? @unique
user User? @relation(fields: [userId], references: [id])
IngestionQueue IngestionQueue[]
IntegrationAccount IntegrationAccount[]
IntegrationDefinitionV2 IntegrationDefinitionV2[]
Activity Activity[]
WebhookConfiguration WebhookConfiguration[]
Conversation Conversation[]
}
enum AuthenticationMethod {
GOOGLE
MAGIC_LINK
}
enum IngestionStatus {
@ -310,3 +365,14 @@ enum IngestionStatus {
FAILED
CANCELLED
}
enum UserType {
Agent
User
System
}
enum WebhookDeliveryStatus {
SUCCESS
FAILED
}

View File

@ -0,0 +1,19 @@
export enum ActionStatusEnum {
ACCEPT = "ACCEPT",
DECLINE = "DECLINE",
QUESTION = "QUESTION",
TOOL_REQUEST = "TOOL_REQUEST",
SUCCESS = "SUCCESS",
FAILED = "FAILED",
}
export const ActionStatus = {
ACCEPT: "ACCEPT",
DECLINE: "DECLINE",
QUESTION: "QUESTION",
TOOL_REQUEST: "TOOL_REQUEST",
SUCCESS: "SUCCESS",
FAILED: "FAILED",
};
export type ActionStatus = (typeof ActionStatus)[keyof typeof ActionStatus];

View File

@ -0,0 +1 @@
export * from "./conversation-execution.entity";

View File

@ -1,2 +1,3 @@
export * from "./llm";
export * from "./graph";
export * from "./conversation-execution-step";

269
pnpm-lock.yaml generated
View File

@ -30,6 +30,12 @@ importers:
apps/webapp:
dependencies:
'@ai-sdk/anthropic':
specifier: ^1.2.12
version: 1.2.12(zod@3.23.8)
'@ai-sdk/google':
specifier: ^1.2.22
version: 1.2.22(zod@3.23.8)
'@ai-sdk/openai':
specifier: ^1.3.21
version: 1.3.22(zod@3.23.8)
@ -153,6 +159,9 @@ importers:
ai:
specifier: 4.3.14
version: 4.3.14(react@18.3.1)(zod@3.23.8)
axios:
specifier: ^1.10.0
version: 1.10.0
bullmq:
specifier: ^5.53.2
version: 5.53.2
@ -180,6 +189,9 @@ importers:
emails:
specifier: workspace:*
version: link:../../packages/emails
execa:
specifier: ^9.6.0
version: 9.6.0
express:
specifier: ^4.18.1
version: 4.21.2
@ -195,6 +207,9 @@ importers:
graphology-layout-noverlap:
specifier: ^0.4.2
version: 0.4.2(graphology-types@0.24.8)
handlebars:
specifier: ^4.7.8
version: 4.7.8
ioredis:
specifier: ^5.6.1
version: 5.6.1
@ -252,6 +267,9 @@ importers:
remix-utils:
specifier: ^7.7.0
version: 7.7.0(@remix-run/node@2.1.0(typescript@5.8.3))(@remix-run/react@2.16.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.8.3))(@remix-run/router@1.23.0)(crypto-js@4.2.0)(react@18.3.1)(zod@3.23.8)
sdk:
specifier: link:@modelcontextprotocol/sdk
version: link:@modelcontextprotocol/sdk
sigma:
specifier: ^3.0.2
version: 3.0.2(graphology-types@0.24.8)
@ -463,6 +481,18 @@ importers:
packages:
'@ai-sdk/anthropic@1.2.12':
resolution: {integrity: sha512-YSzjlko7JvuiyQFmI9RN1tNZdEiZxc+6xld/0tq/VkJaHpEzGAb1yiNxxvmYVcjvfu/PcvCxAAYXmTYQQ63IHQ==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
'@ai-sdk/google@1.2.22':
resolution: {integrity: sha512-Ppxu3DIieF1G9pyQ5O1Z646GYR0gkC57YdBqXJ82qvCdhEhZHu0TWhmnOoeIWe2olSbuDeoOY+MfJrW8dzS3Hw==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
'@ai-sdk/openai@1.3.22':
resolution: {integrity: sha512-QwA+2EkG0QyjVR+7h6FE7iOu2ivNqAVMm9UJZkVxxTk5OIq5fFJDTEI/zICEMuHImTTXR2JjsL6EirJ28Jc4cw==}
engines: {node: '>=18'}
@ -3136,9 +3166,16 @@ packages:
'@rushstack/eslint-patch@1.11.0':
resolution: {integrity: sha512-zxnHvoMQVqewTJr/W4pKjF0bMGiKJv1WX7bSrkl46Hg0QjESbzBROWK0Wg4RphzSOS5Jiy7eFimmM3UgMrMZbQ==}
'@sec-ant/readable-stream@0.4.1':
resolution: {integrity: sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==}
'@selderee/plugin-htmlparser2@0.11.0':
resolution: {integrity: sha512-P33hHGdldxGabLFjPPpaTxVolMrzrcegejx+0GxjrIb9Zv48D8yAIA/QTDR2dFl7Uz7urX8aX6+5bCZslr+gWQ==}
'@sindresorhus/merge-streams@4.0.0':
resolution: {integrity: sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ==}
engines: {node: '>=18'}
'@smithy/abort-controller@4.0.4':
resolution: {integrity: sha512-gJnEjZMvigPDQWHrW3oPrFhQtkrgqBkyjj3pCIdF3A5M6vsZODG93KNlfJprv6bp4245bdT32fsHK4kkH3KYDA==}
engines: {node: '>=18.0.0'}
@ -4239,6 +4276,9 @@ packages:
resolution: {integrity: sha512-hsU18Ae8CDTR6Kgu9DYf0EbCr/a5iGL0rytQDobUcdpYOKokk8LEjVphnXkDkgpi0wYVsqrXuP0bZxJaTqdgoA==}
engines: {node: '>= 0.4'}
asynckit@0.4.0:
resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
autoprefixer@10.4.14:
resolution: {integrity: sha512-FQzyfOsTlwVzjHxKEqRIAdJx9niO6VCBCoEwax/VLSoQF29ggECcPuBqUMZ+u8jCZOPSy8b8/8KnuFbp0SaFZQ==}
engines: {node: ^10 || ^12 || >=14}
@ -4265,6 +4305,9 @@ packages:
resolution: {integrity: sha512-Xm7bpRXnDSX2YE2YFfBk2FnF0ep6tmG7xPh8iHee8MIcrgq762Nkce856dYtJYLkuIoYZvGfTs/PbZhideTcEg==}
engines: {node: '>=4'}
axios@1.10.0:
resolution: {integrity: sha512-/1xYAC4MP/HEG+3duIhFr4ZQXR4sQXOIe+o6sdqzeykGLx6Upp/1p8MHqhINOvGeP7xyNHe7tsiJByc4SSVUxw==}
axobject-query@4.1.0:
resolution: {integrity: sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==}
engines: {node: '>= 0.4'}
@ -4516,6 +4559,10 @@ packages:
color@3.2.1:
resolution: {integrity: sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA==}
combined-stream@1.0.8:
resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==}
engines: {node: '>= 0.8'}
comma-separated-tokens@2.0.3:
resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==}
@ -4925,6 +4972,10 @@ packages:
delaunator@5.0.1:
resolution: {integrity: sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==}
delayed-stream@1.0.0:
resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==}
engines: {node: '>=0.4.0'}
denque@2.1.0:
resolution: {integrity: sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==}
engines: {node: '>=0.10'}
@ -5423,6 +5474,10 @@ packages:
resolution: {integrity: sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==}
engines: {node: '>=16.17'}
execa@9.6.0:
resolution: {integrity: sha512-jpWzZ1ZhwUmeWRhS7Qv3mhpOhLfwI+uAX4e5fOcXqwMR7EcJ0pj2kV1CVzHVMX/LphnKWD3LObjZCoJ71lKpHw==}
engines: {node: ^18.19.0 || >=20.5.0}
exit-hook@2.2.1:
resolution: {integrity: sha512-eNTPlAD67BmP31LDINZ3U7HSF8l57TxOY2PmBJ1shpCvpnxBF93mWCE8YHBnXs8qiUZJc9WDcWIeC3a2HIAMfw==}
engines: {node: '>=6'}
@ -5484,6 +5539,10 @@ packages:
fflate@0.4.8:
resolution: {integrity: sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==}
figures@6.1.0:
resolution: {integrity: sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg==}
engines: {node: '>=18'}
file-entry-cache@6.0.1:
resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==}
engines: {node: ^10.12.0 || >=12.0.0}
@ -5514,6 +5573,15 @@ packages:
flatted@3.3.3:
resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==}
follow-redirects@1.15.9:
resolution: {integrity: sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==}
engines: {node: '>=4.0'}
peerDependencies:
debug: '*'
peerDependenciesMeta:
debug:
optional: true
for-each@0.3.5:
resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==}
engines: {node: '>= 0.4'}
@ -5522,6 +5590,10 @@ packages:
resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==}
engines: {node: '>=14'}
form-data@4.0.3:
resolution: {integrity: sha512-qsITQPfmvMOSAdeyZ+12I1c+CKSstAFAwu+97zrnWAbIr5u8wfsExUzCesVLC8NgHuRUqNN4Zy6UPWUTRGslcA==}
engines: {node: '>= 6'}
format@0.2.2:
resolution: {integrity: sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==}
engines: {node: '>=0.4.x'}
@ -5624,6 +5696,10 @@ packages:
resolution: {integrity: sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==}
engines: {node: '>=16'}
get-stream@9.0.1:
resolution: {integrity: sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==}
engines: {node: '>=18'}
get-symbol-description@1.1.0:
resolution: {integrity: sha512-w9UMqWwJxHNOvoNzSJ2oPF5wvYcvP7jUvYzhp67yEhTi17ZDBBC1z9pTdGuzjD+EFIqLSYRweZjqfiPzQ06Ebg==}
engines: {node: '>= 0.4'}
@ -5724,6 +5800,11 @@ packages:
resolution: {integrity: sha512-4haO1M4mLO91PW57BMsDFf75UmwoRX0GkdD+Faw+Lr+r/OZrOCS0pIBwOL1xCKQqnQzbNFGgK2V2CpBUPeFNTw==}
hasBin: true
handlebars@4.7.8:
resolution: {integrity: sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==}
engines: {node: '>=0.4.7'}
hasBin: true
hard-rejection@2.1.0:
resolution: {integrity: sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==}
engines: {node: '>=6'}
@ -5798,6 +5879,10 @@ packages:
resolution: {integrity: sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==}
engines: {node: '>=16.17.0'}
human-signals@8.0.1:
resolution: {integrity: sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ==}
engines: {node: '>=18.18.0'}
humanize-duration@3.33.0:
resolution: {integrity: sha512-vYJX7BSzn7EQ4SaP2lPYVy+icHDppB6k7myNeI3wrSRfwMS5+BHyGgzpHR0ptqJ2AQ6UuIKrclSg5ve6Ci4IAQ==}
@ -6021,6 +6106,10 @@ packages:
resolution: {integrity: sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==}
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
is-stream@4.0.1:
resolution: {integrity: sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==}
engines: {node: '>=18'}
is-string@1.1.1:
resolution: {integrity: sha512-BtEeSsoaQjlSPBemMQIrY1MY0uM6vnS1g5fmufYOtnxLGUZM2178PKbhsk7Ffv58IX+ZtcvoGwccYsh0PglkAA==}
engines: {node: '>= 0.4'}
@ -6041,6 +6130,10 @@ packages:
resolution: {integrity: sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==}
engines: {node: '>=10'}
is-unicode-supported@2.1.0:
resolution: {integrity: sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ==}
engines: {node: '>=18'}
is-weakmap@2.0.2:
resolution: {integrity: sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==}
engines: {node: '>= 0.4'}
@ -6850,6 +6943,10 @@ packages:
resolution: {integrity: sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==}
engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0}
npm-run-path@6.0.0:
resolution: {integrity: sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==}
engines: {node: '>=18'}
num2fraction@1.2.2:
resolution: {integrity: sha512-Y1wZESM7VUThYY+4W+X4ySH2maqcA+p7UR+w8VWNWVAd6lwuXXWz/w/Cz43J/dI2I+PS6wD5N+bJUF+gjWvIqg==}
@ -7008,6 +7105,10 @@ packages:
resolution: {integrity: sha512-kHt7kzLoS9VBZfUsiKjv43mr91ea+U05EyKkEtqp7vNbHxmaVuEqN7XxeEVnGrMtYOAxGrDElSi96K7EgO1zCA==}
engines: {node: '>=6'}
parse-ms@4.0.0:
resolution: {integrity: sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==}
engines: {node: '>=18'}
parseley@0.12.1:
resolution: {integrity: sha512-e6qHKe3a9HWr0oMRVDTRhKce+bRO8VGQR3NyVwcjwrbhMmFCX9KszEV35+rn4AdilFAq9VPxP/Fe1wC9Qjd2lw==}
@ -7363,6 +7464,10 @@ packages:
resolution: {integrity: sha512-973driJZvxiGOQ5ONsFhOF/DtzPMOMtgC11kCpUrPGMTgqp2q/1gwzCquocrN33is0VZ5GFHXZYMM9l6h67v2Q==}
engines: {node: '>=10'}
pretty-ms@9.2.0:
resolution: {integrity: sha512-4yf0QO/sllf/1zbZWYnvWw3NxCQwLXKzIj0G849LSufP15BXKM0rbD2Z3wVnkMfjdn/CB0Dpp444gYAACdsplg==}
engines: {node: '>=18'}
prism-react-renderer@2.1.0:
resolution: {integrity: sha512-I5cvXHjA1PVGbGm1MsWCpvBCRrYyxEri0MC7/JbfIfYfcXAxHyO5PaUjs3A8H5GW6kJcLhTHxxMaOZZpRZD2iQ==}
peerDependencies:
@ -7418,6 +7523,9 @@ packages:
resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==}
engines: {node: '>= 0.10'}
proxy-from-env@1.1.0:
resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==}
pseudomap@1.0.2:
resolution: {integrity: sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==}
@ -8102,6 +8210,10 @@ packages:
resolution: {integrity: sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==}
engines: {node: '>=12'}
strip-final-newline@4.0.0:
resolution: {integrity: sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw==}
engines: {node: '>=18'}
strip-indent@3.0.0:
resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==}
engines: {node: '>=8'}
@ -8471,6 +8583,11 @@ packages:
ufo@1.6.1:
resolution: {integrity: sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==}
uglify-js@3.19.3:
resolution: {integrity: sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==}
engines: {node: '>=0.8.0'}
hasBin: true
ulid@2.4.0:
resolution: {integrity: sha512-fIRiVTJNcSRmXKPZtGzFQv9WRrZ3M9eoptl/teFJvjOzmpU+/K/JH6HZ8deBfb5vMEpicJcLn7JmvdknlMq7Zg==}
hasBin: true
@ -8489,6 +8606,10 @@ packages:
resolution: {integrity: sha512-gBLkYIlEnSp8pFbT64yFgGE6UIB9tAkhukC23PmMDCe5Nd+cRqKxSjw5y54MK2AZMgZfJWMaNE4nYUHgi1XEOw==}
engines: {node: '>=18.17'}
unicorn-magic@0.3.0:
resolution: {integrity: sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==}
engines: {node: '>=18'}
unified@10.1.2:
resolution: {integrity: sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==}
@ -8784,6 +8905,9 @@ packages:
resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==}
engines: {node: '>=0.10.0'}
wordwrap@1.0.0:
resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==}
wrap-ansi@6.2.0:
resolution: {integrity: sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==}
engines: {node: '>=8'}
@ -8876,6 +9000,10 @@ packages:
resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
engines: {node: '>=10'}
yoctocolors@2.1.1:
resolution: {integrity: sha512-GQHQqAopRhwU8Kt1DDM8NjibDXHC8eoh1erhGAJPEyveY9qqVeXvVikNKrDz69sHowPMorbPUrH/mx8c50eiBQ==}
engines: {node: '>=18'}
zod-error@1.5.0:
resolution: {integrity: sha512-zzopKZ/skI9iXpqCEPj+iLCKl9b88E43ehcU+sbRoHuwGd9F1IDVGQ70TyO6kmfiRL1g4IXkjsXK+g1gLYl4WQ==}
@ -8898,6 +9026,18 @@ packages:
snapshots:
'@ai-sdk/anthropic@1.2.12(zod@3.23.8)':
dependencies:
'@ai-sdk/provider': 1.1.3
'@ai-sdk/provider-utils': 2.2.8(zod@3.23.8)
zod: 3.23.8
'@ai-sdk/google@1.2.22(zod@3.23.8)':
dependencies:
'@ai-sdk/provider': 1.1.3
'@ai-sdk/provider-utils': 2.2.8(zod@3.23.8)
zod: 3.23.8
'@ai-sdk/openai@1.3.22(zod@3.23.8)':
dependencies:
'@ai-sdk/provider': 1.1.3
@ -10588,7 +10728,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-arrow@1.1.7(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -10642,7 +10782,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-collapsible@1.1.11(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -10670,7 +10810,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-collection@1.1.7(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -10760,7 +10900,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-dismissable-layer@1.1.10(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -10811,7 +10951,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-focus-scope@1.1.7(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -10898,7 +11038,7 @@ snapshots:
react-remove-scroll: 2.5.7(@types/react@18.2.47)(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-popover@1.1.14(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -10939,7 +11079,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-popper@1.2.7(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -10967,7 +11107,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-portal@1.1.9(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -10987,7 +11127,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-presence@1.1.4(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -11006,7 +11146,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-primitive@2.1.3(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -11032,7 +11172,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-roving-focus@1.1.10(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -11192,7 +11332,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-toggle@1.1.0(@types/react-dom@18.3.7(@types/react@18.2.47))(@types/react@18.2.47)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -11203,7 +11343,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-tooltip@1.1.1(@types/react-dom@18.3.7(@types/react@18.2.47))(@types/react@18.2.47)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -11223,7 +11363,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-tooltip@1.2.7(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -11353,7 +11493,7 @@ snapshots:
react-dom: 18.3.1(react@18.3.1)
optionalDependencies:
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@radix-ui/react-visually-hidden@1.2.3(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)':
dependencies:
@ -11843,11 +11983,15 @@ snapshots:
'@rushstack/eslint-patch@1.11.0': {}
'@sec-ant/readable-stream@0.4.1': {}
'@selderee/plugin-htmlparser2@0.11.0':
dependencies:
domhandler: 5.0.3
selderee: 0.11.0
'@sindresorhus/merge-streams@4.0.0': {}
'@smithy/abort-controller@4.0.4':
dependencies:
'@smithy/types': 4.3.1
@ -12604,6 +12748,10 @@ snapshots:
'@types/range-parser@1.2.7': {}
'@types/react-dom@18.3.7(@types/react@18.2.47)':
dependencies:
'@types/react': 18.2.47
'@types/react-dom@18.3.7(@types/react@18.2.69)':
dependencies:
'@types/react': 18.2.69
@ -13221,6 +13369,8 @@ snapshots:
async-function@1.0.0: {}
asynckit@0.4.0: {}
autoprefixer@10.4.14(postcss@8.4.38):
dependencies:
browserslist: 4.25.0
@ -13257,6 +13407,14 @@ snapshots:
axe-core@4.10.3: {}
axios@1.10.0:
dependencies:
follow-redirects: 1.15.9
form-data: 4.0.3
proxy-from-env: 1.1.0
transitivePeerDependencies:
- debug
axobject-query@4.1.0: {}
bail@2.0.2: {}
@ -13533,6 +13691,10 @@ snapshots:
color-convert: 1.9.3
color-string: 1.9.1
combined-stream@1.0.8:
dependencies:
delayed-stream: 1.0.0
comma-separated-tokens@2.0.3: {}
commander@10.0.1: {}
@ -13940,6 +14102,8 @@ snapshots:
dependencies:
robust-predicates: 3.0.2
delayed-stream@1.0.0: {}
denque@2.1.0: {}
depd@2.0.0: {}
@ -14730,6 +14894,21 @@ snapshots:
signal-exit: 4.1.0
strip-final-newline: 3.0.0
execa@9.6.0:
dependencies:
'@sindresorhus/merge-streams': 4.0.0
cross-spawn: 7.0.6
figures: 6.1.0
get-stream: 9.0.1
human-signals: 8.0.1
is-plain-obj: 4.1.0
is-stream: 4.0.1
npm-run-path: 6.0.0
pretty-ms: 9.2.0
signal-exit: 4.1.0
strip-final-newline: 4.0.0
yoctocolors: 2.1.1
exit-hook@2.2.1: {}
express@4.21.2:
@ -14816,6 +14995,10 @@ snapshots:
fflate@0.4.8: {}
figures@6.1.0:
dependencies:
is-unicode-supported: 2.1.0
file-entry-cache@6.0.1:
dependencies:
flat-cache: 3.2.0
@ -14859,6 +15042,8 @@ snapshots:
flatted@3.3.3: {}
follow-redirects@1.15.9: {}
for-each@0.3.5:
dependencies:
is-callable: 1.2.7
@ -14868,6 +15053,14 @@ snapshots:
cross-spawn: 7.0.6
signal-exit: 4.1.0
form-data@4.0.3:
dependencies:
asynckit: 0.4.0
combined-stream: 1.0.8
es-set-tostringtag: 2.1.0
hasown: 2.0.2
mime-types: 2.1.35
format@0.2.2: {}
forwarded@0.2.0: {}
@ -14964,6 +15157,11 @@ snapshots:
get-stream@8.0.1: {}
get-stream@9.0.1:
dependencies:
'@sec-ant/readable-stream': 0.4.1
is-stream: 4.0.1
get-symbol-description@1.1.0:
dependencies:
call-bound: 1.0.4
@ -15084,6 +15282,15 @@ snapshots:
pumpify: 1.5.1
through2: 2.0.5
handlebars@4.7.8:
dependencies:
minimist: 1.2.8
neo-async: 2.6.2
source-map: 0.6.1
wordwrap: 1.0.0
optionalDependencies:
uglify-js: 3.19.3
hard-rejection@2.1.0: {}
has-bigints@1.1.0: {}
@ -15169,6 +15376,8 @@ snapshots:
human-signals@5.0.0: {}
human-signals@8.0.1: {}
humanize-duration@3.33.0: {}
iconv-lite@0.4.24:
@ -15377,6 +15586,8 @@ snapshots:
is-stream@3.0.0: {}
is-stream@4.0.1: {}
is-string@1.1.1:
dependencies:
call-bound: 1.0.4
@ -15398,6 +15609,8 @@ snapshots:
is-unicode-supported@0.1.0: {}
is-unicode-supported@2.1.0: {}
is-weakmap@2.0.2: {}
is-weakref@1.1.1:
@ -16339,6 +16552,11 @@ snapshots:
dependencies:
path-key: 4.0.0
npm-run-path@6.0.0:
dependencies:
path-key: 4.0.0
unicorn-magic: 0.3.0
num2fraction@1.2.2: {}
object-assign@4.1.1: {}
@ -16515,6 +16733,8 @@ snapshots:
parse-ms@2.1.0: {}
parse-ms@4.0.0: {}
parseley@0.12.1:
dependencies:
leac: 0.6.0
@ -16805,6 +17025,10 @@ snapshots:
dependencies:
parse-ms: 2.1.0
pretty-ms@9.2.0:
dependencies:
parse-ms: 4.0.0
prism-react-renderer@2.1.0(react@18.3.1):
dependencies:
'@types/prismjs': 1.26.5
@ -16864,6 +17088,8 @@ snapshots:
forwarded: 0.2.0
ipaddr.js: 1.9.1
proxy-from-env@1.1.0: {}
pseudomap@1.0.2: {}
pump@2.0.1:
@ -16938,7 +17164,7 @@ snapshots:
'@radix-ui/react-tooltip': 1.1.1(@types/react-dom@18.3.7(@types/react@18.2.47))(@types/react@18.2.47)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
'@swc/core': 1.3.101(@swc/helpers@0.5.2)
'@types/react': 18.2.47
'@types/react-dom': 18.3.7(@types/react@18.2.69)
'@types/react-dom': 18.3.7(@types/react@18.2.47)
'@types/webpack': 5.28.5(@swc/core@1.3.101(@swc/helpers@0.5.2))(esbuild@0.19.11)
autoprefixer: 10.4.14(postcss@8.4.38)
chalk: 4.1.2
@ -17746,6 +17972,8 @@ snapshots:
strip-final-newline@3.0.0: {}
strip-final-newline@4.0.0: {}
strip-indent@3.0.0:
dependencies:
min-indent: 1.0.1
@ -18147,6 +18375,9 @@ snapshots:
ufo@1.6.1: {}
uglify-js@3.19.3:
optional: true
ulid@2.4.0: {}
unbox-primitive@1.1.0:
@ -18162,6 +18393,8 @@ snapshots:
undici@6.21.3: {}
unicorn-magic@0.3.0: {}
unified@10.1.2:
dependencies:
'@types/unist': 2.0.11
@ -18564,6 +18797,8 @@ snapshots:
word-wrap@1.2.5: {}
wordwrap@1.0.0: {}
wrap-ansi@6.2.0:
dependencies:
ansi-styles: 4.3.0
@ -18639,6 +18874,8 @@ snapshots:
yocto-queue@0.1.0: {}
yoctocolors@2.1.1: {}
zod-error@1.5.0:
dependencies:
zod: 3.23.8

View File

@ -24,7 +24,13 @@ services:
# Only needed for bootstrap
command: sh -c "chown -R node:node /home/node/shared && exec ./scripts/entrypoint.sh"
healthcheck:
test: ["CMD", "node", "-e", "http.get('http://localhost:3000/healthcheck', res => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))"]
test:
[
"CMD",
"node",
"-e",
"http.get('http://localhost:3000/healthcheck', res => process.exit(res.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))",
]
interval: 30s
timeout: 10s
retries: 5
@ -76,8 +82,6 @@ services:
image: electricsql/electric:${ELECTRIC_IMAGE_TAG:-1.0.13}
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
depends_on:
- postgres
networks:
- webapp
environment:
@ -107,7 +111,21 @@ services:
networks:
- webapp
healthcheck:
test: ["CMD", "clickhouse-client", "--host", "localhost", "--port", "9000", "--user", "default", "--password", "password", "--query", "SELECT 1"]
test:
[
"CMD",
"clickhouse-client",
"--host",
"localhost",
"--port",
"9000",
"--user",
"default",
"--password",
"password",
"--query",
"SELECT 1",
]
interval: 5s
timeout: 5s
retries: 5
@ -233,7 +251,6 @@ services:
volumes:
shared:
clickhouse:
shared:
minio:
networks: