Compare commits

...

7 Commits

Author SHA1 Message Date
Harshith Mullapudi
b1569aafce fix: docker docs 2025-10-26 12:25:53 +05:30
Harshith Mullapudi
c1ccb2bb23 fix: streaming 2025-10-26 12:25:51 +05:30
Manoj
6a05ea4f37 refactor: simplify clustered graph query and add stop conditions for AI responses 2025-10-26 12:20:33 +05:30
Harshith Mullapudi
8836849310 1. Remove chat and deep-search from trigger
2. Add ai/sdk for chat UI
3. Added a better model manager
2025-10-26 01:10:28 +05:30
Harshith Mullapudi
cf91a824d1 feat: add logger to bullmq workers 2025-10-25 16:26:12 +05:30
Manoj
a548bae670 feat: add Ollama container and update ingestion status for unchanged documents 2025-10-25 12:53:23 +05:30
Harshith Mullapudi
17b8f9520b fix: telemetry and trigger deploymen 2025-10-25 09:12:34 +05:30
74 changed files with 2479 additions and 4864 deletions

View File

@ -1,4 +1,4 @@
VERSION=0.1.25
VERSION=0.1.26
# Nest run in docker, change host to database container name
DB_HOST=localhost

View File

@ -72,27 +72,6 @@ export const conversationTitleQueue = new Queue("conversation-title-queue", {
},
});
/**
* Deep search queue
*/
export const deepSearchQueue = new Queue("deep-search-queue", {
connection: getRedisConnection(),
defaultJobOptions: {
attempts: 3,
backoff: {
type: "exponential",
delay: 2000,
},
removeOnComplete: {
age: 3600,
count: 1000,
},
removeOnFail: {
age: 86400,
},
},
});
/**
* Session compaction queue
*/

View File

@ -13,31 +13,109 @@ import {
ingestWorker,
documentIngestWorker,
conversationTitleWorker,
deepSearchWorker,
sessionCompactionWorker,
closeAllWorkers,
} from "./workers";
import {
ingestQueue,
documentIngestQueue,
conversationTitleQueue,
sessionCompactionQueue,
} from "./queues";
import {
setupWorkerLogging,
startPeriodicMetricsLogging,
} from "./utils/worker-logger";
export async function startWorkers() {}
let metricsInterval: NodeJS.Timeout | null = null;
// Handle graceful shutdown
process.on("SIGTERM", async () => {
logger.log("SIGTERM received, closing workers gracefully...");
/**
* Initialize and start all BullMQ workers with comprehensive logging
*/
export async function initWorkers(): Promise<void> {
// Setup comprehensive logging for all workers
setupWorkerLogging(ingestWorker, ingestQueue, "ingest-episode");
setupWorkerLogging(
documentIngestWorker,
documentIngestQueue,
"ingest-document",
);
setupWorkerLogging(
conversationTitleWorker,
conversationTitleQueue,
"conversation-title",
);
setupWorkerLogging(
sessionCompactionWorker,
sessionCompactionQueue,
"session-compaction",
);
// Start periodic metrics logging (every 60 seconds)
metricsInterval = startPeriodicMetricsLogging(
[
{ worker: ingestWorker, queue: ingestQueue, name: "ingest-episode" },
{
worker: documentIngestWorker,
queue: documentIngestQueue,
name: "ingest-document",
},
{
worker: conversationTitleWorker,
queue: conversationTitleQueue,
name: "conversation-title",
},
{
worker: sessionCompactionWorker,
queue: sessionCompactionQueue,
name: "session-compaction",
},
],
60000, // Log metrics every 60 seconds
);
// Log worker startup
logger.log("\n🚀 Starting BullMQ workers...");
logger.log("─".repeat(80));
logger.log(`✓ Ingest worker: ${ingestWorker.name} (concurrency: 5)`);
logger.log(
`✓ Document ingest worker: ${documentIngestWorker.name} (concurrency: 3)`,
);
logger.log(
`✓ Conversation title worker: ${conversationTitleWorker.name} (concurrency: 10)`,
);
logger.log(
`✓ Session compaction worker: ${sessionCompactionWorker.name} (concurrency: 3)`,
);
logger.log("─".repeat(80));
logger.log("✅ All BullMQ workers started and listening for jobs");
logger.log("📊 Metrics will be logged every 60 seconds\n");
}
/**
* Shutdown all workers gracefully
*/
export async function shutdownWorkers(): Promise<void> {
logger.log("Shutdown signal received, closing workers gracefully...");
if (metricsInterval) {
clearInterval(metricsInterval);
}
await closeAllWorkers();
process.exit(0);
});
}
process.on("SIGINT", async () => {
logger.log("SIGINT received, closing workers gracefully...");
await closeAllWorkers();
process.exit(0);
});
// If running as standalone script, initialize workers
if (import.meta.url === `file://${process.argv[1]}`) {
initWorkers();
// Log worker startup
logger.log("Starting BullMQ workers...");
logger.log(`- Ingest worker: ${ingestWorker.name}`);
logger.log(`- Document ingest worker: ${documentIngestWorker.name}`);
logger.log(`- Conversation title worker: ${conversationTitleWorker.name}`);
logger.log(`- Deep search worker: ${deepSearchWorker.name}`);
logger.log(`- Session compaction worker: ${sessionCompactionWorker.name}`);
logger.log("All BullMQ workers started and listening for jobs");
// Handle graceful shutdown
const shutdown = async () => {
await shutdownWorkers();
process.exit(0);
};
process.on("SIGTERM", shutdown);
process.on("SIGINT", shutdown);
}

View File

@ -18,7 +18,6 @@ async function getAllQueues() {
ingestQueue,
documentIngestQueue,
conversationTitleQueue,
deepSearchQueue,
sessionCompactionQueue,
} = await import("../queues");
@ -26,7 +25,6 @@ async function getAllQueues() {
ingestQueue,
documentIngestQueue,
conversationTitleQueue,
deepSearchQueue,
sessionCompactionQueue,
];
}

View File

@ -0,0 +1,184 @@
/**
* BullMQ Worker Logger
*
* Comprehensive logging utility for tracking worker status, queue metrics,
* and job lifecycle events
*/
import { type Worker, type Queue } from "bullmq";
import { logger } from "~/services/logger.service";
interface WorkerMetrics {
name: string;
concurrency: number;
activeJobs: number;
waitingJobs: number;
delayedJobs: number;
failedJobs: number;
completedJobs: number;
}
/**
* Setup comprehensive logging for a worker
*/
export function setupWorkerLogging(
worker: Worker,
queue: Queue,
workerName: string,
): void {
// Job picked up and started processing
worker.on("active", async (job) => {
const counts = await getQueueCounts(queue);
logger.log(
`[${workerName}] 🔄 Job started: ${job.id} | Queue: ${counts.waiting} waiting, ${counts.active} active, ${counts.delayed} delayed`,
);
});
// Job completed successfully
worker.on("completed", async (job, result) => {
const counts = await getQueueCounts(queue);
const duration = job.finishedOn ? job.finishedOn - job.processedOn! : 0;
logger.log(
`[${workerName}] ✅ Job completed: ${job.id} (${duration}ms) | Queue: ${counts.waiting} waiting, ${counts.active} active`,
);
});
// Job failed
worker.on("failed", async (job, error) => {
const counts = await getQueueCounts(queue);
const attempt = job?.attemptsMade || 0;
const maxAttempts = job?.opts?.attempts || 3;
logger.error(
`[${workerName}] ❌ Job failed: ${job?.id} (attempt ${attempt}/${maxAttempts}) | Error: ${error.message} | Queue: ${counts.waiting} waiting, ${counts.failed} failed`,
);
});
// Job progress update (if job reports progress)
worker.on("progress", async (job, progress) => {
logger.log(`[${workerName}] 📊 Job progress: ${job.id} - ${progress}%`);
});
// Worker stalled (job took too long)
worker.on("stalled", async (jobId) => {
logger.warn(`[${workerName}] ⚠️ Job stalled: ${jobId}`);
});
// Worker error
worker.on("error", (error) => {
logger.error(`[${workerName}] 🔥 Worker error: ${error.message}`);
});
// Worker closed
worker.on("closed", () => {
logger.log(`[${workerName}] 🛑 Worker closed`);
});
}
/**
* Get queue counts for logging
*/
async function getQueueCounts(queue: Queue): Promise<{
waiting: number;
active: number;
delayed: number;
failed: number;
completed: number;
}> {
try {
const counts = await queue.getJobCounts(
"waiting",
"active",
"delayed",
"failed",
"completed",
);
return {
waiting: counts.waiting || 0,
active: counts.active || 0,
delayed: counts.delayed || 0,
failed: counts.failed || 0,
completed: counts.completed || 0,
};
} catch (error) {
return { waiting: 0, active: 0, delayed: 0, failed: 0, completed: 0 };
}
}
/**
* Get metrics for all workers
*/
export async function getAllWorkerMetrics(
workers: Array<{ worker: Worker; queue: Queue; name: string }>,
): Promise<WorkerMetrics[]> {
const metrics = await Promise.all(
workers.map(async ({ worker, queue, name }) => {
const counts = await getQueueCounts(queue);
return {
name,
concurrency: worker.opts.concurrency || 1,
activeJobs: counts.active,
waitingJobs: counts.waiting,
delayedJobs: counts.delayed,
failedJobs: counts.failed,
completedJobs: counts.completed,
};
}),
);
return metrics;
}
/**
* Log worker metrics summary
*/
export function logWorkerMetrics(metrics: WorkerMetrics[]): void {
logger.log("\n📊 BullMQ Worker Metrics:");
logger.log("─".repeat(80));
for (const metric of metrics) {
logger.log(
`[${metric.name.padEnd(25)}] Concurrency: ${metric.concurrency} | ` +
`Active: ${metric.activeJobs} | Waiting: ${metric.waitingJobs} | ` +
`Delayed: ${metric.delayedJobs} | Failed: ${metric.failedJobs} | ` +
`Completed: ${metric.completedJobs}`,
);
}
const totals = metrics.reduce(
(acc, m) => ({
active: acc.active + m.activeJobs,
waiting: acc.waiting + m.waitingJobs,
delayed: acc.delayed + m.delayedJobs,
failed: acc.failed + m.failedJobs,
completed: acc.completed + m.completedJobs,
}),
{ active: 0, waiting: 0, delayed: 0, failed: 0, completed: 0 },
);
logger.log("─".repeat(80));
logger.log(
`[TOTAL] Active: ${totals.active} | Waiting: ${totals.waiting} | ` +
`Delayed: ${totals.delayed} | Failed: ${totals.failed} | ` +
`Completed: ${totals.completed}`,
);
logger.log("─".repeat(80) + "\n");
}
/**
* Start periodic metrics logging
*/
export function startPeriodicMetricsLogging(
workers: Array<{ worker: Worker; queue: Queue; name: string }>,
intervalMs: number = 60000, // Default: 1 minute
): NodeJS.Timeout {
const logMetrics = async () => {
const metrics = await getAllWorkerMetrics(workers);
logWorkerMetrics(metrics);
};
// Log immediately on start
logMetrics();
// Then log periodically
return setInterval(logMetrics, intervalMs);
}

View File

@ -18,10 +18,7 @@ import {
processConversationTitleCreation,
type CreateConversationTitlePayload,
} from "~/jobs/conversation/create-title.logic";
import {
processDeepSearch,
type ProcessDeepSearchPayload,
} from "~/jobs/deep-search/deep-search.logic";
import {
processSessionCompaction,
type SessionCompactionPayload,
@ -58,14 +55,6 @@ export const ingestWorker = new Worker(
},
);
ingestWorker.on("completed", (job) => {
logger.log(`Job ${job.id} completed`);
});
ingestWorker.on("failed", (job, error) => {
logger.error(`Job ${job?.id} failed: ${error}`);
});
/**
* Document ingestion worker
* Handles document-level ingestion with differential processing
@ -89,14 +78,6 @@ export const documentIngestWorker = new Worker(
},
);
documentIngestWorker.on("completed", (job) => {
logger.log(`Document job ${job.id} completed`);
});
documentIngestWorker.on("failed", (job, error) => {
logger.error(`Document job ${job?.id} failed: ${error}`);
});
/**
* Conversation title creation worker
*/
@ -112,37 +93,6 @@ export const conversationTitleWorker = new Worker(
},
);
conversationTitleWorker.on("completed", (job) => {
logger.log(`Conversation title job ${job.id} completed`);
});
conversationTitleWorker.on("failed", (job, error) => {
logger.error(`Conversation title job ${job?.id} failed: ${error}`);
});
/**
* Deep search worker (non-streaming version for BullMQ)
*/
export const deepSearchWorker = new Worker(
"deep-search-queue",
async (job) => {
const payload = job.data as ProcessDeepSearchPayload;
return await processDeepSearch(payload);
},
{
connection: getRedisConnection(),
concurrency: 5, // Process up to 5 searches in parallel
},
);
deepSearchWorker.on("completed", (job) => {
logger.log(`Deep search job ${job.id} completed`);
});
deepSearchWorker.on("failed", (job, error) => {
logger.error(`Deep search job ${job?.id} failed: ${error}`);
});
/**
* Session compaction worker
*/
@ -158,14 +108,6 @@ export const sessionCompactionWorker = new Worker(
},
);
sessionCompactionWorker.on("completed", (job) => {
logger.log(`Session compaction job ${job.id} completed`);
});
sessionCompactionWorker.on("failed", (job, error) => {
logger.error(`Session compaction job ${job?.id} failed: ${error}`);
});
/**
* Graceful shutdown handler
*/
@ -174,7 +116,7 @@ export async function closeAllWorkers(): Promise<void> {
ingestWorker.close(),
documentIngestWorker.close(),
conversationTitleWorker.close(),
deepSearchWorker.close(),
sessionCompactionWorker.close(),
]);
logger.log("All BullMQ workers closed");

View File

@ -1,38 +1,42 @@
import { EditorContent, useEditor } from "@tiptap/react";
import { useEffect, memo } from "react";
import { UserTypeEnum } from "@core/types";
import { type ConversationHistory } from "@core/database";
import { cn } from "~/lib/utils";
import { extensionsForConversation } from "./editor-extensions";
import { skillExtension } from "../editor/skill-extension";
import { type UIMessage } from "ai";
interface AIConversationItemProps {
conversationHistory: ConversationHistory;
message: UIMessage;
}
const ConversationItemComponent = ({
conversationHistory,
}: AIConversationItemProps) => {
const isUser =
conversationHistory.userType === UserTypeEnum.User ||
conversationHistory.userType === UserTypeEnum.System;
function getMessage(message: string) {
let finalMessage = message.replace("<final_response>", "");
finalMessage = finalMessage.replace("</final_response>", "");
finalMessage = finalMessage.replace("<question_response>", "");
finalMessage = finalMessage.replace("</question_response>", "");
const id = `a${conversationHistory.id.replace(/-/g, "")}`;
return finalMessage;
}
const ConversationItemComponent = ({ message }: AIConversationItemProps) => {
const isUser = message.role === "user" || false;
const textPart = message.parts.find((part) => part.type === "text");
const editor = useEditor({
extensions: [...extensionsForConversation, skillExtension],
editable: false,
content: conversationHistory.message,
content: textPart ? getMessage(textPart.text) : "",
});
useEffect(() => {
editor?.commands.setContent(conversationHistory.message);
if (textPart) {
editor?.commands.setContent(getMessage(textPart.text));
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [id, conversationHistory.message]);
}, [message]);
if (!conversationHistory.message) {
if (!message) {
return null;
}
@ -51,10 +55,10 @@ const ConversationItemComponent = ({
};
// Memoize to prevent unnecessary re-renders
export const ConversationItem = memo(ConversationItemComponent, (prevProps, nextProps) => {
// Only re-render if the conversation history ID or message changed
return (
prevProps.conversationHistory.id === nextProps.conversationHistory.id &&
prevProps.conversationHistory.message === nextProps.conversationHistory.message
);
});
export const ConversationItem = memo(
ConversationItemComponent,
(prevProps, nextProps) => {
// Only re-render if the conversation history ID or message changed
return prevProps.message === nextProps.message;
},
);

View File

@ -13,33 +13,26 @@ import { Form, useSubmit, useActionData } from "@remix-run/react";
interface ConversationTextareaProps {
defaultValue?: string;
conversationId: string;
placeholder?: string;
isLoading?: boolean;
className?: string;
onChange?: (text: string) => void;
disabled?: boolean;
onConversationCreated?: (conversation: any) => void;
onConversationCreated?: (message: string) => void;
stop?: () => void;
}
export function ConversationTextarea({
defaultValue,
isLoading = false,
placeholder,
conversationId,
onChange,
onConversationCreated,
stop,
}: ConversationTextareaProps) {
const [text, setText] = useState(defaultValue ?? "");
const [editor, setEditor] = useState<Editor>();
const submit = useSubmit();
const actionData = useActionData<{ conversation?: any }>();
useEffect(() => {
if (actionData?.conversation && onConversationCreated) {
onConversationCreated(actionData.conversation);
}
}, [actionData]);
const onUpdate = (editor: Editor) => {
setText(editor.getHTML());
@ -51,134 +44,99 @@ export function ConversationTextarea({
return;
}
const data = isLoading ? {} : { message: text, conversationId };
// When conversationId exists and not stopping, submit to current route
// When isLoading (stopping), submit to the specific conversation route
submit(data as any, {
action: isLoading
? `/home/conversation/${conversationId}`
: conversationId
? `/home/conversation/${conversationId}`
: "/home/conversation",
method: "post",
});
onConversationCreated && onConversationCreated(text);
editor?.commands.clearContent(true);
setText("");
}, [editor, text]);
// Send message to API
const submitForm = useCallback(
async (e: React.FormEvent<HTMLFormElement>) => {
const data = isLoading
? {}
: { message: text, title: text, conversationId };
submit(data as any, {
action: isLoading
? `/home/conversation/${conversationId}`
: conversationId
? `/home/conversation/${conversationId}`
: "/home/conversation",
method: "post",
});
editor?.commands.clearContent(true);
setText("");
e.preventDefault();
},
[text, conversationId],
);
return (
<Form
action="/home/conversation"
method="post"
onSubmit={(e) => submitForm(e)}
className="pt-2"
>
<div className="bg-background-3 rounded-lg border-1 border-gray-300 py-2">
<EditorRoot>
<EditorContent
// eslint-disable-next-line @typescript-eslint/no-explicit-any
initialContent={defaultValue as any}
extensions={[
Document,
Paragraph,
Text,
HardBreak.configure({
keepMarks: true,
}),
<div className="bg-background-3 rounded-lg border-1 border-gray-300 py-2">
<EditorRoot>
<EditorContent
// eslint-disable-next-line @typescript-eslint/no-explicit-any
initialContent={defaultValue as any}
extensions={[
Document,
Paragraph,
Text,
HardBreak.configure({
keepMarks: true,
}),
Placeholder.configure({
placeholder: () => placeholder ?? "Ask sol...",
includeChildren: true,
}),
History,
]}
onCreate={async ({ editor }) => {
setEditor(editor);
await new Promise((resolve) => setTimeout(resolve, 100));
editor.commands.focus("end");
}}
onUpdate={({ editor }) => {
onUpdate(editor);
}}
shouldRerenderOnTransaction={false}
editorProps={{
attributes: {
class: `prose prose-lg dark:prose-invert prose-headings:font-title font-default focus:outline-none max-w-full`,
},
handleKeyDown(view, event) {
if (event.key === "Enter" && !event.shiftKey) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const target = event.target as any;
if (target.innerHTML.includes("suggestion")) {
return false;
}
event.preventDefault();
if (text) {
handleSend();
}
return true;
Placeholder.configure({
placeholder: () => placeholder ?? "Ask sol...",
includeChildren: true,
}),
History,
]}
onCreate={async ({ editor }) => {
setEditor(editor);
await new Promise((resolve) => setTimeout(resolve, 100));
editor.commands.focus("end");
}}
onUpdate={({ editor }) => {
onUpdate(editor);
}}
shouldRerenderOnTransaction={false}
editorProps={{
attributes: {
class: `prose prose-lg dark:prose-invert prose-headings:font-title font-default focus:outline-none max-w-full`,
},
handleKeyDown(view, event) {
if (event.key === "Enter" && !event.shiftKey) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const target = event.target as any;
if (target.innerHTML.includes("suggestion")) {
return false;
}
event.preventDefault();
if (text) {
handleSend();
}
return true;
}
if (event.key === "Enter" && event.shiftKey) {
view.dispatch(
view.state.tr.replaceSelectionWith(
view.state.schema.nodes.hardBreak.create(),
),
);
return true;
}
return false;
},
}}
immediatelyRender={false}
className={cn(
"editor-container text-md max-h-[400px] min-h-[40px] w-full min-w-full overflow-auto rounded-lg px-3",
)}
/>
</EditorRoot>
<div className="mb-1 flex justify-end px-3">
<Button
variant="default"
className="gap-1 shadow-none transition-all duration-500 ease-in-out"
type="submit"
size="lg"
>
{isLoading ? (
<>
<LoaderCircle size={18} className="mr-1 animate-spin" />
Stop
</>
) : (
<>Chat</>
)}
</Button>
</div>
if (event.key === "Enter" && event.shiftKey) {
view.dispatch(
view.state.tr.replaceSelectionWith(
view.state.schema.nodes.hardBreak.create(),
),
);
return true;
}
return false;
},
}}
immediatelyRender={false}
className={cn(
"editor-container text-md max-h-[400px] min-h-[40px] w-full min-w-full overflow-auto rounded-lg px-3",
)}
/>
</EditorRoot>
<div className="mb-1 flex justify-end px-3">
<Button
variant="default"
className="gap-1 shadow-none transition-all duration-500 ease-in-out"
onClick={() => {
if (!isLoading) {
handleSend();
} else {
stop && stop();
}
}}
size="lg"
>
{isLoading ? (
<>
<LoaderCircle size={18} className="mr-1 animate-spin" />
Stop
</>
) : (
<>Chat</>
)}
</Button>
</div>
</Form>
</div>
);
}

View File

@ -1,10 +1,4 @@
import { EllipsisVertical, Trash, Copy } from "lucide-react";
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "../ui/dropdown-menu";
import { Trash, Copy, RotateCw } from "lucide-react";
import { Button } from "../ui/button";
import {
AlertDialog,
@ -22,11 +16,13 @@ import { toast } from "~/hooks/use-toast";
interface LogOptionsProps {
id: string;
status?: string;
}
export const LogOptions = ({ id }: LogOptionsProps) => {
export const LogOptions = ({ id, status }: LogOptionsProps) => {
const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
const deleteFetcher = useFetcher<{ success: boolean }>();
const retryFetcher = useFetcher<{ success: boolean }>();
const navigate = useNavigate();
const handleDelete = () => {
@ -58,22 +54,54 @@ export const LogOptions = ({ id }: LogOptionsProps) => {
}
};
const handleRetry = () => {
retryFetcher.submit(
{},
{
method: "POST",
action: `/api/v1/logs/${id}/retry`,
},
);
};
useEffect(() => {
if (deleteFetcher.state === "idle" && deleteFetcher.data?.success) {
navigate(`/home/inbox`);
}
}, [deleteFetcher.state, deleteFetcher.data]);
useEffect(() => {
if (retryFetcher.state === "idle" && retryFetcher.data?.success) {
toast({
title: "Success",
description: "Episode retry initiated",
});
// Reload the page to reflect the new status
window.location.reload();
}
}, [retryFetcher.state, retryFetcher.data]);
return (
<>
<div className="flex items-center gap-2">
{status === "FAILED" && (
<Button
variant="secondary"
size="sm"
className="gap-2 rounded"
onClick={handleRetry}
disabled={retryFetcher.state !== "idle"}
>
<RotateCw size={15} /> Retry
</Button>
)}
<Button
variant="secondary"
size="sm"
className="gap-2 rounded"
onClick={handleCopy}
>
<Copy size={15} /> Copy ID
<Copy size={15} /> Copy Id
</Button>
<Button
variant="secondary"

View File

@ -74,7 +74,7 @@ export function LogTextCollapse({ text, log }: LogTextCollapseProps) {
<div className={cn("flex w-full min-w-[0px] shrink flex-col")}>
<div className="flex w-full items-center justify-between gap-4">
<div className="inline-flex min-h-[24px] min-w-[0px] shrink items-center justify-start">
<div className={cn("truncate text-left")}>
<div className={cn("truncate text-left text-base")}>
{text.replace(/<[^>]+>/g, "")}
</div>
</div>
@ -97,7 +97,7 @@ export function LogTextCollapse({ text, log }: LogTextCollapseProps) {
</div>
<div className="flex items-center justify-between">
<div className="flex items-center gap-1">
<div className="flex items-center gap-1 font-light">
{getIconForAuthorise(log.source.toLowerCase(), 12, undefined)}
{log.source.toLowerCase()}
</div>

View File

@ -99,7 +99,7 @@ export const SpaceOptions = ({ id, name, description }: SpaceOptionsProps) => {
<DropdownMenuContent align="end">
<DropdownMenuItem onClick={handleCopy}>
<Button variant="link" size="sm" className="gap-2 rounded">
<Copy size={15} /> Copy ID
<Copy size={15} /> Copy Id
</Button>
</DropdownMenuItem>
<DropdownMenuItem onClick={() => setEditDialogOpen(true)}>

View File

@ -17,6 +17,7 @@ import { renderToPipeableStream } from "react-dom/server";
import { initializeStartupServices } from "./utils/startup";
import { handleMCPRequest, handleSessionRequest } from "~/services/mcp.server";
import { authenticateHybridRequest } from "~/services/routeBuilders/apiBuilder.server";
import { trackError } from "~/services/telemetry.server";
const ABORT_DELAY = 5_000;
@ -27,6 +28,42 @@ async function init() {
init();
/**
* Global error handler for all server-side errors
* This catches errors from loaders, actions, and rendering
* Automatically tracks all errors to telemetry
*/
export function handleError(
error: unknown,
{ request }: { request: Request },
): void {
// Don't track 404s or aborted requests as errors
if (
error instanceof Response &&
(error.status === 404 || error.status === 304)
) {
return;
}
// Track error to telemetry
if (error instanceof Error) {
const url = new URL(request.url);
trackError(error, {
url: request.url,
path: url.pathname,
method: request.method,
userAgent: request.headers.get("user-agent") || "unknown",
referer: request.headers.get("referer") || undefined,
}).catch((trackingError) => {
// If telemetry tracking fails, just log it - don't break the app
console.error("Failed to track error:", trackingError);
});
}
// Always log to console for development/debugging
console.error(error);
}
export default function handleRequest(
request: Request,
responseStatusCode: number,

View File

@ -3,102 +3,146 @@ import { isValidDatabaseUrl } from "./utils/db";
import { isValidRegex } from "./utils/regex";
import { LLMModelEnum } from "@core/types";
const EnvironmentSchema = z.object({
NODE_ENV: z.union([
z.literal("development"),
z.literal("production"),
z.literal("test"),
]),
POSTGRES_DB: z.string(),
DATABASE_URL: z
.string()
.refine(
isValidDatabaseUrl,
"DATABASE_URL is invalid, for details please check the additional output above this message.",
),
DATABASE_CONNECTION_LIMIT: z.coerce.number().int().default(10),
DATABASE_POOL_TIMEOUT: z.coerce.number().int().default(60),
DATABASE_CONNECTION_TIMEOUT: z.coerce.number().int().default(20),
DIRECT_URL: z
.string()
.refine(
isValidDatabaseUrl,
"DIRECT_URL is invalid, for details please check the additional output above this message.",
),
DATABASE_READ_REPLICA_URL: z.string().optional(),
SESSION_SECRET: z.string(),
ENCRYPTION_KEY: z.string(),
MAGIC_LINK_SECRET: z.string(),
WHITELISTED_EMAILS: z
.string()
.refine(isValidRegex, "WHITELISTED_EMAILS must be a valid regex.")
.optional(),
ADMIN_EMAILS: z
.string()
.refine(isValidRegex, "ADMIN_EMAILS must be a valid regex.")
.optional(),
const EnvironmentSchema = z
.object({
NODE_ENV: z.union([
z.literal("development"),
z.literal("production"),
z.literal("test"),
]),
POSTGRES_DB: z.string(),
DATABASE_URL: z
.string()
.refine(
isValidDatabaseUrl,
"DATABASE_URL is invalid, for details please check the additional output above this message.",
),
DATABASE_CONNECTION_LIMIT: z.coerce.number().int().default(10),
DATABASE_POOL_TIMEOUT: z.coerce.number().int().default(60),
DATABASE_CONNECTION_TIMEOUT: z.coerce.number().int().default(20),
DIRECT_URL: z
.string()
.refine(
isValidDatabaseUrl,
"DIRECT_URL is invalid, for details please check the additional output above this message.",
),
DATABASE_READ_REPLICA_URL: z.string().optional(),
SESSION_SECRET: z.string(),
ENCRYPTION_KEY: z.string(),
MAGIC_LINK_SECRET: z.string(),
WHITELISTED_EMAILS: z
.string()
.refine(isValidRegex, "WHITELISTED_EMAILS must be a valid regex.")
.optional(),
ADMIN_EMAILS: z
.string()
.refine(isValidRegex, "ADMIN_EMAILS must be a valid regex.")
.optional(),
APP_ENV: z.string().default(process.env.NODE_ENV),
LOGIN_ORIGIN: z.string().default("http://localhost:5173"),
APP_ORIGIN: z.string().default("http://localhost:5173"),
POSTHOG_PROJECT_KEY: z.string().default(""),
APP_ENV: z.string().default(process.env.NODE_ENV),
LOGIN_ORIGIN: z.string().default("http://localhost:5173"),
APP_ORIGIN: z.string().default("http://localhost:5173"),
//storage
ACCESS_KEY_ID: z.string().optional(),
SECRET_ACCESS_KEY: z.string().optional(),
BUCKET: z.string().optional(),
// Telemetry
POSTHOG_PROJECT_KEY: z
.string()
.default("phc_SwfGIzzX5gh5bazVWoRxZTBhkr7FwvzArS0NRyGXm1a"),
TELEMETRY_ENABLED: z
.string()
.optional()
.default("true")
.transform((val) => val !== "false" && val !== "0"),
TELEMETRY_ANONYMOUS: z
.string()
.optional()
.default("false")
.transform((val) => val === "true" || val === "1"),
// google auth
AUTH_GOOGLE_CLIENT_ID: z.string().optional(),
AUTH_GOOGLE_CLIENT_SECRET: z.string().optional(),
//storage
ACCESS_KEY_ID: z.string().optional(),
SECRET_ACCESS_KEY: z.string().optional(),
BUCKET: z.string().optional(),
ENABLE_EMAIL_LOGIN: z.coerce.boolean().default(true),
// google auth
AUTH_GOOGLE_CLIENT_ID: z.string().optional(),
AUTH_GOOGLE_CLIENT_SECRET: z.string().optional(),
//Redis
REDIS_HOST: z.string().default("localhost"),
REDIS_PORT: z.coerce.number().default(6379),
REDIS_TLS_DISABLED: z.coerce.boolean().default(true),
ENABLE_EMAIL_LOGIN: z
.string()
.optional()
.default("true")
.transform((val) => val !== "false" && val !== "0"),
//Neo4j
NEO4J_URI: z.string(),
NEO4J_USERNAME: z.string(),
NEO4J_PASSWORD: z.string(),
//Redis
REDIS_HOST: z.string().default("localhost"),
REDIS_PORT: z.coerce.number().default(6379),
REDIS_TLS_DISABLED: z
.string()
.optional()
.default("true")
.transform((val) => val !== "false" && val !== "0"),
//OpenAI
OPENAI_API_KEY: z.string(),
ANTHROPIC_API_KEY: z.string().optional(),
//Neo4j
NEO4J_URI: z.string(),
NEO4J_USERNAME: z.string(),
NEO4J_PASSWORD: z.string(),
EMAIL_TRANSPORT: z.string().optional(),
FROM_EMAIL: z.string().optional(),
REPLY_TO_EMAIL: z.string().optional(),
RESEND_API_KEY: z.string().optional(),
SMTP_HOST: z.string().optional(),
SMTP_PORT: z.coerce.number().optional(),
SMTP_SECURE: z.coerce.boolean().optional(),
SMTP_USER: z.string().optional(),
SMTP_PASSWORD: z.string().optional(),
//OpenAI
OPENAI_API_KEY: z.string().optional(),
ANTHROPIC_API_KEY: z.string().optional(),
GOOGLE_GENERATIVE_AI_API_KEY: z.string().optional(),
//Trigger
TRIGGER_PROJECT_ID: z.string(),
TRIGGER_SECRET_KEY: z.string(),
TRIGGER_API_URL: z.string(),
TRIGGER_DB: z.string().default("trigger"),
EMAIL_TRANSPORT: z.string().optional(),
FROM_EMAIL: z.string().optional(),
REPLY_TO_EMAIL: z.string().optional(),
RESEND_API_KEY: z.string().optional(),
SMTP_HOST: z.string().optional(),
SMTP_PORT: z.coerce.number().optional(),
SMTP_SECURE: z
.string()
.optional()
.transform((val) => val === "true" || val === "1"),
SMTP_USER: z.string().optional(),
SMTP_PASSWORD: z.string().optional(),
// Model envs
MODEL: z.string().default(LLMModelEnum.GPT41),
EMBEDDING_MODEL: z.string().default("mxbai-embed-large"),
EMBEDDING_MODEL_SIZE: z.string().default("1024"),
OLLAMA_URL: z.string().optional(),
COHERE_API_KEY: z.string().optional(),
COHERE_SCORE_THRESHOLD: z.string().default("0.3"),
//Trigger
TRIGGER_PROJECT_ID: z.string().optional(),
TRIGGER_SECRET_KEY: z.string().optional(),
TRIGGER_API_URL: z.string().optional(),
TRIGGER_DB: z.string().default("trigger"),
AWS_ACCESS_KEY_ID: z.string().optional(),
AWS_SECRET_ACCESS_KEY: z.string().optional(),
AWS_REGION: z.string().optional(),
// Model envs
MODEL: z.string().default(LLMModelEnum.GPT41),
EMBEDDING_MODEL: z.string().default("mxbai-embed-large"),
EMBEDDING_MODEL_SIZE: z.string().default("1024"),
OLLAMA_URL: z.string().optional(),
COHERE_API_KEY: z.string().optional(),
COHERE_SCORE_THRESHOLD: z.string().default("0.3"),
// Queue provider
QUEUE_PROVIDER: z.enum(["trigger", "bullmq"]).default("trigger"),
});
AWS_ACCESS_KEY_ID: z.string().optional(),
AWS_SECRET_ACCESS_KEY: z.string().optional(),
AWS_REGION: z.string().optional(),
// Queue provider
QUEUE_PROVIDER: z.enum(["trigger", "bullmq"]).default("trigger"),
})
.refine(
(data) => {
// If QUEUE_PROVIDER is "trigger", then Trigger.dev variables must be present
if (data.QUEUE_PROVIDER === "trigger") {
return !!(
data.TRIGGER_PROJECT_ID &&
data.TRIGGER_SECRET_KEY &&
data.TRIGGER_API_URL
);
}
return true;
},
{
message:
"TRIGGER_PROJECT_ID, TRIGGER_SECRET_KEY, and TRIGGER_API_URL are required when QUEUE_PROVIDER=trigger",
},
);
export type Environment = z.infer<typeof EnvironmentSchema>;
export const env = EnvironmentSchema.parse(process.env);

View File

@ -6,6 +6,7 @@ import { useOptionalUser, useUserChanged } from "./useUser";
export const usePostHog = (
apiKey?: string,
telemetryEnabled = true,
logging = false,
debug = false,
): void => {
@ -15,6 +16,8 @@ export const usePostHog = (
//start PostHog once
useEffect(() => {
// Respect telemetry settings
if (!telemetryEnabled) return;
if (apiKey === undefined || apiKey === "") return;
if (postHogInitialized.current === true) return;
if (logging) console.log("Initializing PostHog");
@ -27,19 +30,26 @@ export const usePostHog = (
if (logging) console.log("PostHog loaded");
if (user !== undefined) {
if (logging) console.log("Loaded: Identifying user", user);
posthog.identify(user.id, { email: user.email });
posthog.identify(user.id, {
email: user.email,
name: user.name,
});
}
},
});
postHogInitialized.current = true;
}, [apiKey, logging, user]);
}, [apiKey, telemetryEnabled, logging, user]);
useUserChanged((user) => {
if (postHogInitialized.current === false) return;
if (!telemetryEnabled) return;
if (logging) console.log("User changed");
if (user) {
if (logging) console.log("Identifying user", user);
posthog.identify(user.id, { email: user.email });
posthog.identify(user.id, {
email: user.email,
name: user.name,
});
} else {
if (logging) console.log("Resetting user");
posthog.reset();

View File

@ -1,8 +1,8 @@
import { LLMMappings } from "@core/types";
import { generate } from "~/trigger/chat/stream-utils";
import { conversationTitlePrompt } from "~/trigger/conversation/prompt";
import { prisma } from "~/trigger/utils/prisma";
import { logger } from "~/services/logger.service";
import { generateText, type LanguageModel } from "ai";
import { getModel } from "~/lib/model.server";
export interface CreateConversationTitlePayload {
conversationId: string;
@ -24,8 +24,9 @@ export async function processConversationTitleCreation(
): Promise<CreateConversationTitleResult> {
try {
let conversationTitleResponse = "";
const gen = generate(
[
const { text } = await generateText({
model: getModel() as LanguageModel,
messages: [
{
role: "user",
content: conversationTitlePrompt.replace(
@ -34,24 +35,9 @@ export async function processConversationTitleCreation(
),
},
],
false,
() => {},
undefined,
"",
LLMMappings.GPT41,
);
});
for await (const chunk of gen) {
if (typeof chunk === "string") {
conversationTitleResponse += chunk;
} else if (chunk && typeof chunk === "object" && chunk.message) {
conversationTitleResponse += chunk.message;
}
}
const outputMatch = conversationTitleResponse.match(
/<output>(.*?)<\/output>/s,
);
const outputMatch = text.match(/<output>(.*?)<\/output>/s);
logger.info(`Conversation title data: ${JSON.stringify(outputMatch)}`);

View File

@ -1,105 +0,0 @@
import { type CoreMessage } from "ai";
import { logger } from "~/services/logger.service";
import { nanoid } from "nanoid";
import {
deletePersonalAccessToken,
getOrCreatePersonalAccessToken,
} from "~/trigger/utils/utils";
import { getReActPrompt } from "~/trigger/deep-search/prompt";
import { type DeepSearchPayload, type DeepSearchResponse } from "~/trigger/deep-search/types";
import { createSearchMemoryTool } from "~/trigger/deep-search/utils";
import { run } from "~/trigger/deep-search/deep-search-utils";
import { AgentMessageType } from "~/trigger/chat/types";
export interface ProcessDeepSearchPayload {
content: string;
userId: string;
metadata?: any;
intentOverride?: string;
}
export interface ProcessDeepSearchResult {
success: boolean;
synthesis?: string;
error?: string;
}
/**
* Core business logic for deep search (non-streaming version for BullMQ)
* This is shared logic, but the streaming happens in Trigger.dev via metadata.stream
*/
export async function processDeepSearch(
payload: ProcessDeepSearchPayload,
): Promise<ProcessDeepSearchResult> {
const { content, userId, metadata: meta, intentOverride } = payload;
const randomKeyName = `deepSearch_${nanoid(10)}`;
// Get or create token for search API calls
const pat = await getOrCreatePersonalAccessToken({
name: randomKeyName,
userId: userId as string,
});
if (!pat?.token) {
return {
success: false,
error: "Failed to create personal access token",
};
}
try {
// Create search tool that agent will use
const searchTool = createSearchMemoryTool(pat.token);
// Build initial messages with ReAct prompt
const initialMessages: CoreMessage[] = [
{
role: "system",
content: getReActPrompt(meta, intentOverride),
},
{
role: "user",
content: `CONTENT TO ANALYZE:\n${content}\n\nPlease search my memory for relevant context and synthesize what you find.`,
},
];
// Run the ReAct loop generator
const llmResponse = run(initialMessages, searchTool);
let synthesis = "";
// For BullMQ: iterate without streaming, just accumulate the final synthesis
for await (const step of llmResponse) {
// MESSAGE_CHUNK: Final synthesis - accumulate
if (step.type === AgentMessageType.MESSAGE_CHUNK) {
synthesis += step.message;
}
// STREAM_END: Loop completed
if (step.type === AgentMessageType.STREAM_END) {
break;
}
}
await deletePersonalAccessToken(pat?.id);
// Clean up any remaining tags
synthesis = synthesis
.replace(/<final_response>/gi, "")
.replace(/<\/final_response>/gi, "")
.trim();
return {
success: true,
synthesis,
};
} catch (error: any) {
await deletePersonalAccessToken(pat?.id);
logger.error(`Deep search error: ${error}`);
return {
success: false,
error: error.message,
};
}
}

View File

@ -1,15 +1,15 @@
import { type z } from "zod";
import crypto from "crypto";
import { IngestionStatus } from "@core/database";
import { EpisodeTypeEnum } from "@core/types";
import { logger } from "~/services/logger.service";
import { saveDocument } from "~/services/graphModels/document";
import { type IngestBodyRequest } from "~/lib/ingest.server";
import { DocumentVersioningService } from "~/services/documentVersioning.server";
import { DocumentDifferentialService } from "~/services/documentDiffer.server";
import { KnowledgeGraphService } from "~/services/knowledgeGraph.server";
import { prisma } from "~/trigger/utils/prisma";
import { type IngestBodyRequest } from "./ingest-episode.logic";
export interface IngestDocumentPayload {
body: z.infer<typeof IngestBodyRequest>;
@ -101,6 +101,12 @@ export async function processDocumentIngestion(
// Early return for unchanged documents
if (differentialDecision.strategy === "skip_processing") {
logger.log("Document content unchanged, skipping processing");
await prisma.ingestionQueue.update({
where: { id: payload.queueId },
data: {
status: IngestionStatus.COMPLETED,
},
});
return {
success: true,
};
@ -134,9 +140,7 @@ export async function processDocumentIngestion(
});
}
logger.log(
`Document chunked into ${chunkedDocument.chunks.length} chunks`,
);
logger.log(`Document chunked into ${chunkedDocument.chunks.length} chunks`);
// Step 4: Process chunks based on differential strategy
let chunksToProcess = chunkedDocument.chunks;
@ -280,10 +284,7 @@ export async function processDocumentIngestion(
},
});
logger.error(
`Error processing document for user ${payload.userId}:`,
err,
);
logger.error(`Error processing document for user ${payload.userId}:`, err);
return { success: false, error: err.message };
}
}

View File

@ -9,11 +9,13 @@ import {
enqueueIngestDocument,
enqueueIngestEpisode,
} from "~/lib/queue-adapter.server";
import { trackFeatureUsage } from "~/services/telemetry.server";
export const addToQueue = async (
rawBody: z.infer<typeof IngestBodyRequest>,
userId: string,
activityId?: string,
ingestionQueueId?: string,
) => {
const body = { ...rawBody, source: rawBody.source.toLowerCase() };
const user = await prisma.user.findFirst({
@ -41,8 +43,18 @@ export const addToQueue = async (
throw new Error("no credits");
}
const queuePersist = await prisma.ingestionQueue.create({
data: {
// Upsert: update existing or create new ingestion queue entry
const queuePersist = await prisma.ingestionQueue.upsert({
where: {
id: ingestionQueueId || "non-existent-id", // Use provided ID or dummy ID to force create
},
update: {
data: body,
type: body.type,
status: IngestionStatus.PENDING,
error: null,
},
create: {
data: body,
type: body.type,
status: IngestionStatus.PENDING,
@ -60,6 +72,9 @@ export const addToQueue = async (
workspaceId: user.Workspace.id,
queueId: queuePersist.id,
});
// Track document ingestion
trackFeatureUsage("document_ingested", userId).catch(console.error);
} else if (body.type === EpisodeType.CONVERSATION) {
handler = await enqueueIngestEpisode({
body,
@ -67,6 +82,9 @@ export const addToQueue = async (
workspaceId: user.Workspace.id,
queueId: queuePersist.id,
});
// Track episode ingestion
trackFeatureUsage("episode_ingested", userId).catch(console.error);
}
return { id: handler?.id, publicAccessToken: handler?.token };

View File

@ -1,31 +1,23 @@
import {
type CoreMessage,
type LanguageModelV1,
embed,
generateText,
streamText,
} from "ai";
import { type CoreMessage, embed, generateText, streamText } from "ai";
import { openai } from "@ai-sdk/openai";
import { logger } from "~/services/logger.service";
import { createOllama, type OllamaProvider } from "ollama-ai-provider";
import { createOllama } from "ollama-ai-provider-v2";
import { anthropic } from "@ai-sdk/anthropic";
import { google } from "@ai-sdk/google";
import { createAmazonBedrock } from "@ai-sdk/amazon-bedrock";
import { fromNodeProviderChain } from "@aws-sdk/credential-providers";
export type ModelComplexity = 'high' | 'low';
export type ModelComplexity = "high" | "low";
/**
* Get the appropriate model for a given complexity level.
* HIGH complexity uses the configured MODEL.
* LOW complexity automatically downgrades to cheaper variants if possible.
*/
export function getModelForTask(complexity: ModelComplexity = 'high'): string {
const baseModel = process.env.MODEL || 'gpt-4.1-2025-04-14';
export function getModelForTask(complexity: ModelComplexity = "high"): string {
const baseModel = process.env.MODEL || "gpt-4.1-2025-04-14";
// HIGH complexity - always use the configured model
if (complexity === 'high') {
if (complexity === "high") {
return baseModel;
}
@ -33,29 +25,73 @@ export function getModelForTask(complexity: ModelComplexity = 'high'): string {
// If already using a cheap model, keep it
const downgrades: Record<string, string> = {
// OpenAI downgrades
'gpt-5-2025-08-07': 'gpt-5-mini-2025-08-07',
'gpt-4.1-2025-04-14': 'gpt-4.1-mini-2025-04-14',
"gpt-5-2025-08-07": "gpt-5-mini-2025-08-07",
"gpt-4.1-2025-04-14": "gpt-4.1-mini-2025-04-14",
// Anthropic downgrades
'claude-sonnet-4-5': 'claude-3-5-haiku-20241022',
'claude-3-7-sonnet-20250219': 'claude-3-5-haiku-20241022',
'claude-3-opus-20240229': 'claude-3-5-haiku-20241022',
"claude-sonnet-4-5": "claude-3-5-haiku-20241022",
"claude-3-7-sonnet-20250219": "claude-3-5-haiku-20241022",
"claude-3-opus-20240229": "claude-3-5-haiku-20241022",
// Google downgrades
'gemini-2.5-pro-preview-03-25': 'gemini-2.5-flash-preview-04-17',
'gemini-2.0-flash': 'gemini-2.0-flash-lite',
"gemini-2.5-pro-preview-03-25": "gemini-2.5-flash-preview-04-17",
"gemini-2.0-flash": "gemini-2.0-flash-lite",
// AWS Bedrock downgrades (keep same model - already cost-optimized)
'us.amazon.nova-premier-v1:0': 'us.amazon.nova-premier-v1:0',
"us.amazon.nova-premier-v1:0": "us.amazon.nova-premier-v1:0",
};
return downgrades[baseModel] || baseModel;
}
export const getModel = (takeModel?: string) => {
let model = takeModel;
const anthropicKey = process.env.ANTHROPIC_API_KEY;
const googleKey = process.env.GOOGLE_GENERATIVE_AI_API_KEY;
const openaiKey = process.env.OPENAI_API_KEY;
let ollamaUrl = process.env.OLLAMA_URL;
model = model || process.env.MODEL || "gpt-4.1-2025-04-14";
let modelInstance;
let modelTemperature = Number(process.env.MODEL_TEMPERATURE) || 1;
ollamaUrl = undefined;
// First check if Ollama URL exists and use Ollama
if (ollamaUrl) {
const ollama = createOllama({
baseURL: ollamaUrl,
});
modelInstance = ollama(model || "llama2"); // Default to llama2 if no model specified
} else {
// If no Ollama, check other models
if (model.includes("claude")) {
if (!anthropicKey) {
throw new Error("No Anthropic API key found. Set ANTHROPIC_API_KEY");
}
modelInstance = anthropic(model);
modelTemperature = 0.5;
} else if (model.includes("gemini")) {
if (!googleKey) {
throw new Error("No Google API key found. Set GOOGLE_API_KEY");
}
modelInstance = google(model);
} else {
if (!openaiKey) {
throw new Error("No OpenAI API key found. Set OPENAI_API_KEY");
}
modelInstance = openai(model);
}
return modelInstance;
}
};
export interface TokenUsage {
promptTokens: number;
completionTokens: number;
totalTokens: number;
promptTokens?: number;
completionTokens?: number;
totalTokens?: number;
}
export async function makeModelCall(
@ -63,69 +99,13 @@ export async function makeModelCall(
messages: CoreMessage[],
onFinish: (text: string, model: string, usage?: TokenUsage) => void,
options?: any,
complexity: ModelComplexity = 'high',
complexity: ModelComplexity = "high",
) {
let modelInstance: LanguageModelV1 | undefined;
let model = getModelForTask(complexity);
const ollamaUrl = process.env.OLLAMA_URL;
let ollama: OllamaProvider | undefined;
logger.info(`complexity: ${complexity}, model: ${model}`);
if (ollamaUrl) {
ollama = createOllama({
baseURL: ollamaUrl,
});
}
const bedrock = createAmazonBedrock({
region: process.env.AWS_REGION || 'us-east-1',
credentialProvider: fromNodeProviderChain(),
});
const generateTextOptions: any = {}
logger.info(
`complexity: ${complexity}, model: ${model}`,
);
switch (model) {
case "gpt-4.1-2025-04-14":
case "gpt-4.1-mini-2025-04-14":
case "gpt-5-mini-2025-08-07":
case "gpt-5-2025-08-07":
case "gpt-4.1-nano-2025-04-14":
modelInstance = openai(model, { ...options });
generateTextOptions.temperature = 1
break;
case "claude-3-7-sonnet-20250219":
case "claude-3-opus-20240229":
case "claude-3-5-haiku-20241022":
modelInstance = anthropic(model, { ...options });
break;
case "gemini-2.5-flash-preview-04-17":
case "gemini-2.5-pro-preview-03-25":
case "gemini-2.0-flash":
case "gemini-2.0-flash-lite":
modelInstance = google(model, { ...options });
break;
case "us.meta.llama3-3-70b-instruct-v1:0":
case "us.deepseek.r1-v1:0":
case "qwen.qwen3-32b-v1:0":
case "openai.gpt-oss-120b-1:0":
case "us.mistral.pixtral-large-2502-v1:0":
case "us.amazon.nova-premier-v1:0":
modelInstance = bedrock(`${model}`);
generateTextOptions.maxTokens = 100000
break;
default:
if (ollama) {
modelInstance = ollama(model);
}
logger.warn(`Unsupported model type: ${model}`);
break;
}
const modelInstance = getModel(model);
const generateTextOptions: any = {};
if (!modelInstance) {
throw new Error(`Unsupported model type: ${model}`);
@ -135,16 +115,21 @@ export async function makeModelCall(
return streamText({
model: modelInstance,
messages,
...options,
...generateTextOptions,
onFinish: async ({ text, usage }) => {
const tokenUsage = usage ? {
promptTokens: usage.promptTokens,
completionTokens: usage.completionTokens,
totalTokens: usage.totalTokens,
} : undefined;
const tokenUsage = usage
? {
promptTokens: usage.inputTokens,
completionTokens: usage.outputTokens,
totalTokens: usage.totalTokens,
}
: undefined;
if (tokenUsage) {
logger.log(`[${complexity.toUpperCase()}] ${model} - Tokens: ${tokenUsage.totalTokens} (prompt: ${tokenUsage.promptTokens}, completion: ${tokenUsage.completionTokens})`);
logger.log(
`[${complexity.toUpperCase()}] ${model} - Tokens: ${tokenUsage.totalTokens} (prompt: ${tokenUsage.promptTokens}, completion: ${tokenUsage.completionTokens})`,
);
}
onFinish(text, model, tokenUsage);
@ -158,14 +143,18 @@ export async function makeModelCall(
...generateTextOptions,
});
const tokenUsage = usage ? {
promptTokens: usage.promptTokens,
completionTokens: usage.completionTokens,
totalTokens: usage.totalTokens,
} : undefined;
const tokenUsage = usage
? {
promptTokens: usage.inputTokens,
completionTokens: usage.outputTokens,
totalTokens: usage.totalTokens,
}
: undefined;
if (tokenUsage) {
logger.log(`[${complexity.toUpperCase()}] ${model} - Tokens: ${tokenUsage.totalTokens} (prompt: ${tokenUsage.promptTokens}, completion: ${tokenUsage.completionTokens})`);
logger.log(
`[${complexity.toUpperCase()}] ${model} - Tokens: ${tokenUsage.totalTokens} (prompt: ${tokenUsage.promptTokens}, completion: ${tokenUsage.completionTokens})`,
);
}
onFinish(text, model, tokenUsage);
@ -177,19 +166,22 @@ export async function makeModelCall(
* Determines if a given model is proprietary (OpenAI, Anthropic, Google, Grok)
* or open source (accessed via Bedrock, Ollama, etc.)
*/
export function isProprietaryModel(modelName?: string, complexity: ModelComplexity = 'high'): boolean {
export function isProprietaryModel(
modelName?: string,
complexity: ModelComplexity = "high",
): boolean {
const model = modelName || getModelForTask(complexity);
if (!model) return false;
// Proprietary model patterns
const proprietaryPatterns = [
/^gpt-/, // OpenAI models
/^claude-/, // Anthropic models
/^gemini-/, // Google models
/^grok-/, // xAI models
/^gpt-/, // OpenAI models
/^claude-/, // Anthropic models
/^gemini-/, // Google models
/^grok-/, // xAI models
];
return proprietaryPatterns.some(pattern => pattern.test(model));
return proprietaryPatterns.some((pattern) => pattern.test(model));
}
export async function getEmbedding(text: string) {

View File

@ -112,51 +112,31 @@ export const getNodeLinks = async (userId: string) => {
export const getClusteredGraphData = async (userId: string) => {
const session = driver.session();
try {
// Get the simplified graph structure: Episode, Subject, Object with Predicate as edge
// Only include entities that are connected to more than 1 episode
// Get Episode -> Entity graph, only showing entities connected to more than 1 episode
const result = await session.run(
`// Find entities connected to more than 1 episode
MATCH (e:Episode)-[:HAS_PROVENANCE]->(s:Statement {userId: $userId})
MATCH (s)-[:HAS_SUBJECT|HAS_OBJECT|HAS_PREDICATE]->(ent:Entity)
WITH ent, count(DISTINCT e) as episodeCount
MATCH (e:Episode{userId: $userId})-[:HAS_PROVENANCE]->(s:Statement {userId: $userId})-[r:HAS_SUBJECT|HAS_OBJECT|HAS_PREDICATE]->(entity:Entity)
WITH entity, count(DISTINCT e) as episodeCount
WHERE episodeCount > 1
WITH collect(ent.uuid) as validEntityUuids
WITH collect(entity.uuid) as validEntityUuids
// Get statements where all entities are in the valid set
MATCH (e:Episode)-[:HAS_PROVENANCE]->(s:Statement {userId: $userId})
MATCH (s)-[:HAS_SUBJECT]->(subj:Entity)
WHERE subj.uuid IN validEntityUuids
MATCH (s)-[:HAS_PREDICATE]->(pred:Entity)
WHERE pred.uuid IN validEntityUuids
MATCH (s)-[:HAS_OBJECT]->(obj:Entity)
WHERE obj.uuid IN validEntityUuids
// Build relationships
WITH e, s, subj, pred, obj
UNWIND [
// Episode -> Subject
{source: e, sourceType: 'Episode', target: subj, targetType: 'Entity', predicate: null},
// Episode -> Object
{source: e, sourceType: 'Episode', target: obj, targetType: 'Entity', predicate: null},
// Subject -> Object (with Predicate as edge)
{source: subj, sourceType: 'Entity', target: obj, targetType: 'Entity', predicate: pred.name}
] AS rel
// Build Episode -> Entity relationships for valid entities
MATCH (e:Episode{userId: $userId})-[r:HAS_PROVENANCE]->(s:Statement {userId: $userId})-[r:HAS_SUBJECT|HAS_OBJECT|HAS_PREDICATE]->(entity:Entity)
WHERE entity.uuid IN validEntityUuids
WITH DISTINCT e, entity, type(r) as relType,
CASE WHEN size(e.spaceIds) > 0 THEN e.spaceIds[0] ELSE null END as clusterId,
s.createdAt as createdAt
RETURN DISTINCT
rel.source.uuid as sourceUuid,
rel.source.name as sourceName,
rel.source.content as sourceContent,
rel.sourceType as sourceNodeType,
rel.target.uuid as targetUuid,
rel.target.name as targetName,
rel.targetType as targetNodeType,
rel.predicate as predicateLabel,
e.uuid as episodeUuid,
e.content as episodeContent,
e.spaceIds as spaceIds,
s.uuid as statementUuid,
s.validAt as validAt,
s.createdAt as createdAt`,
e.uuid as sourceUuid,
e.content as sourceContent,
'Episode' as sourceNodeType,
entity.uuid as targetUuid,
entity.name as targetName,
'Entity' as targetNodeType,
relType as edgeType,
clusterId,
createdAt`,
{ userId },
);
@ -165,72 +145,29 @@ export const getClusteredGraphData = async (userId: string) => {
result.records.forEach((record) => {
const sourceUuid = record.get("sourceUuid");
const sourceName = record.get("sourceName");
const sourceContent = record.get("sourceContent");
const sourceNodeType = record.get("sourceNodeType");
const targetUuid = record.get("targetUuid");
const targetName = record.get("targetName");
const targetNodeType = record.get("targetNodeType");
const predicateLabel = record.get("predicateLabel");
const episodeUuid = record.get("episodeUuid");
const clusterIds = record.get("spaceIds");
const clusterId = clusterIds ? clusterIds[0] : undefined;
const edgeType = record.get("edgeType");
const clusterId = record.get("clusterId");
const createdAt = record.get("createdAt");
// Create unique edge identifier to avoid duplicates
// For Episode->Subject edges, use generic type; for Subject->Object use predicate
const edgeType = predicateLabel || "HAS_SUBJECT";
const edgeKey = `${sourceUuid}-${targetUuid}-${edgeType}`;
if (processedEdges.has(edgeKey)) return;
processedEdges.add(edgeKey);
// Build node attributes based on type
const sourceAttributes =
sourceNodeType === "Episode"
? {
nodeType: "Episode",
content: sourceContent,
episodeUuid: sourceUuid,
clusterId,
}
: {
nodeType: "Entity",
name: sourceName,
clusterId,
};
const targetAttributes =
targetNodeType === "Episode"
? {
nodeType: "Episode",
content: sourceContent,
episodeUuid: targetUuid,
clusterId,
}
: {
nodeType: "Entity",
name: targetName,
clusterId,
};
// Build display name
const sourceDisplayName =
sourceNodeType === "Episode"
? sourceContent || episodeUuid
: sourceName || sourceUuid;
const targetDisplayName =
targetNodeType === "Episode"
? sourceContent || episodeUuid
: targetName || targetUuid;
triplets.push({
sourceNode: {
uuid: sourceUuid,
labels: [sourceNodeType],
attributes: sourceAttributes,
name: sourceDisplayName,
labels: ["Episode"],
attributes: {
nodeType: "Episode",
content: sourceContent,
episodeUuid: sourceUuid,
clusterId,
},
name: sourceContent || sourceUuid,
clusterId,
createdAt: createdAt || "",
},
@ -243,10 +180,14 @@ export const getClusteredGraphData = async (userId: string) => {
},
targetNode: {
uuid: targetUuid,
labels: [targetNodeType],
attributes: targetAttributes,
labels: ["Entity"],
attributes: {
nodeType: "Entity",
name: targetName,
clusterId,
},
name: targetName || targetUuid,
clusterId,
name: targetDisplayName,
createdAt: createdAt || "",
},
});

View File

@ -0,0 +1,324 @@
import { type StopCondition } from "ai";
export const hasAnswer: StopCondition<any> = ({ steps }) => {
return (
steps.some((step) => step.text?.includes("</final_response>")) ?? false
);
};
export const hasQuestion: StopCondition<any> = ({ steps }) => {
return (
steps.some((step) => step.text?.includes("</question_response>")) ?? false
);
};
export const REACT_SYSTEM_PROMPT = `
You are a helpful AI assistant with access to user memory. Your primary capabilities are:
1. **Memory-First Approach**: Always check user memory first to understand context and previous interactions
2. **Intelligent Information Gathering**: Analyze queries to determine if current information is needed
3. **Memory Management**: Help users store, retrieve, and organize information in their memory
4. **Contextual Assistance**: Use memory to provide personalized and contextual responses
<information_gathering>
Follow this intelligent approach for information gathering:
1. **MEMORY FIRST** (Always Required)
- Always check memory FIRST using core--search_memory before any other actions
- Consider this your highest priority for EVERY interaction - as essential as breathing
- Memory provides context, personal preferences, and historical information
- Use memory to understand user's background, ongoing projects, and past conversations
2. **INFORMATION SYNTHESIS** (Combine Sources)
- Use memory to personalize current information based on user preferences
- Always store new useful information in memory using core--add_memory
3. **TRAINING KNOWLEDGE** (Foundation)
- Use your training knowledge as the foundation for analysis and explanation
- Apply training knowledge to interpret and contextualize information from memory
- Indicate when you're using training knowledge vs. live information sources
EXECUTION APPROACH:
- Memory search is mandatory for every interaction
- Always indicate your information sources in responses
</information_gathering>
<memory>
QUERY FORMATION:
- Write specific factual statements as queries (e.g., "user email address" not "what is the user's email?")
- Create multiple targeted memory queries for complex requests
KEY QUERY AREAS:
- Personal context: user name, location, identity, work context
- Project context: repositories, codebases, current work, team members
- Task context: recent tasks, ongoing projects, deadlines, priorities
- Integration context: GitHub repos, Slack channels, Linear projects, connected services
- Communication patterns: email preferences, notification settings, workflow automation
- Technical context: coding languages, frameworks, development environment
- Collaboration context: team members, project stakeholders, meeting patterns
- Preferences: likes, dislikes, communication style, tool preferences
- History: previous discussions, past requests, completed work, recurring issues
- Automation rules: user-defined workflows, triggers, automation preferences
MEMORY USAGE:
- Execute multiple memory queries in parallel rather than sequentially
- Batch related memory queries when possible
- Prioritize recent information over older memories
- Create comprehensive context-aware queries based on user message/activity content
- Extract and query SEMANTIC CONTENT, not just structural metadata
- Parse titles, descriptions, and content for actual subject matter keywords
- Search internal SOL tasks/conversations that may relate to the same topics
- Query ALL relatable concepts, not just direct keywords or IDs
- Search for similar past situations, patterns, and related work
- Include synonyms, related terms, and contextual concepts in queries
- Query user's historical approach to similar requests or activities
- Search for connected projects, tasks, conversations, and collaborations
- Retrieve workflow patterns and past decision-making context
- Query broader domain context beyond immediate request scope
- Remember: SOL tracks work that external tools don't - search internal content thoroughly
- Blend memory insights naturally into responses
- Verify you've checked relevant memory before finalizing ANY response
</memory>
<external_services>
- To use: load_mcp with EXACT integration name from the available list
- Can load multiple at once with an array
- Only load when tools are NOT already available in your current toolset
- If a tool is already available, use it directly without load_mcp
- If requested integration unavailable: inform user politely
</external_services>
<tool_calling>
You have tools at your disposal to assist users:
CORE PRINCIPLES:
- Use tools only when necessary for the task at hand
- Always check memory FIRST before making other tool calls
- Execute multiple operations in parallel whenever possible
- Use sequential calls only when output of one is required for input of another
PARAMETER HANDLING:
- Follow tool schemas exactly with all required parameters
- Only use values that are:
Explicitly provided by the user (use EXACTLY as given)
Reasonably inferred from context
Retrieved from memory or prior tool calls
- Never make up values for required parameters
- Omit optional parameters unless clearly needed
- Analyze user's descriptive terms for parameter clues
TOOL SELECTION:
- Never call tools not provided in this conversation
- Skip tool calls for general questions you can answer directly from memory/knowledge
- For identical operations on multiple items, use parallel tool calls
- Default to parallel execution (3-5× faster than sequential calls)
- You can always access external service tools by loading them with load_mcp first
TOOL MENTION HANDLING:
When user message contains <mention data-id="tool_name" data-label="tool"></mention>:
- Extract tool_name from data-id attribute
- First check if it's a built-in tool; if not, check EXTERNAL SERVICES TOOLS
- If available: Load it with load_mcp and focus on addressing the request with this tool
- If unavailable: Inform user and suggest alternatives if possible
- For multiple tool mentions: Load all applicable tools in a single load_mcp call
ERROR HANDLING:
- If a tool returns an error, try fixing parameters before retrying
- If you can't resolve an error, explain the issue to the user
- Consider alternative tools when primary tools are unavailable
</tool_calling>
<communication>
Use EXACTLY ONE of these formats for all user-facing communication:
PROGRESS UPDATES - During processing:
- Use the core--progress_update tool to keep users informed
- Update users about what you're discovering or doing next
- Keep messages clear and user-friendly
- Avoid technical jargon
QUESTIONS - When you need information:
<question_response>
<p>[Your question with HTML formatting]</p>
</question_response>
- Ask questions only when you cannot find information through memory, or tools
- Be specific about what you need to know
- Provide context for why you're asking
FINAL ANSWERS - When completing tasks:
<final_response>
<p>[Your answer with HTML formatting]</p>
</final_response>
CRITICAL:
- Use ONE format per turn
- Apply proper HTML formatting (<h1>, <h2>, <p>, <ul>, <li>, etc.)
- Never mix communication formats
- Keep responses clear and helpful
- Always indicate your information sources (memory, and/or knowledge)
</communication>
`;
export function getReActPrompt(
metadata?: { source?: string; url?: string; pageTitle?: string },
intentOverride?: string,
): string {
const contextHints = [];
if (
metadata?.source === "chrome" &&
metadata?.url?.includes("mail.google.com")
) {
contextHints.push("Content is from email - likely reading intent");
}
if (
metadata?.source === "chrome" &&
metadata?.url?.includes("calendar.google.com")
) {
contextHints.push("Content is from calendar - likely meeting prep intent");
}
if (
metadata?.source === "chrome" &&
metadata?.url?.includes("docs.google.com")
) {
contextHints.push(
"Content is from document editor - likely writing intent",
);
}
if (metadata?.source === "obsidian") {
contextHints.push(
"Content is from note editor - likely writing or research intent",
);
}
return `You are a memory research agent analyzing content to find relevant context.
YOUR PROCESS (ReAct Framework):
1. DECOMPOSE: First, break down the content into structured categories
Analyze the content and extract:
a) ENTITIES: Specific people, project names, tools, products mentioned
Example: "John Smith", "Phoenix API", "Redis", "mobile app"
b) TOPICS & CONCEPTS: Key subjects, themes, domains
Example: "authentication", "database design", "performance optimization"
c) TEMPORAL MARKERS: Time references, deadlines, events
Example: "last week's meeting", "Q2 launch", "yesterday's discussion"
d) ACTIONS & TASKS: What's being done, decided, or requested
Example: "implement feature", "review code", "make decision on"
e) USER INTENT: What is the user trying to accomplish?
${intentOverride ? `User specified: "${intentOverride}"` : "Infer from context: reading/writing/meeting prep/research/task tracking/review"}
2. FORM QUERIES: Create targeted search queries from your decomposition
Based on decomposition, form specific queries:
- Search for each entity by name (people, projects, tools)
- Search for topics the user has discussed before
- Search for related work or conversations in this domain
- Use the user's actual terminology, not generic concepts
EXAMPLE - Content: "Email from Sarah about the API redesign we discussed last week"
Decomposition:
- Entities: "Sarah", "API redesign"
- Topics: "API design", "redesign"
- Temporal: "last week"
- Actions: "discussed", "email communication"
- Intent: Reading (email) / meeting prep
Queries to form:
"Sarah" (find past conversations with Sarah)
"API redesign" or "API design" (find project discussions)
"last week" + "Sarah" (find recent context)
"meetings" or "discussions" (find related conversations)
Avoid: "email communication patterns", "API architecture philosophy"
(These are abstract - search what user actually discussed!)
3. SEARCH: Execute your queries using searchMemory tool
- Start with 2-3 core searches based on main entities/topics
- Make each search specific and targeted
- Use actual terms from the content, not rephrased concepts
4. OBSERVE: Evaluate search results
- Did you find relevant episodes? How many unique ones?
- What specific context emerged?
- What new entities/topics appeared in results?
- Are there gaps in understanding?
- Should you search more angles?
Note: Episode counts are automatically deduplicated across searches - overlapping episodes are only counted once.
5. REACT: Decide next action based on observations
STOPPING CRITERIA - Proceed to SYNTHESIZE if ANY of these are true:
- You found 20+ unique episodes across your searches ENOUGH CONTEXT
- You performed 5+ searches and found relevant episodes SUFFICIENT
- You performed 7+ searches regardless of results EXHAUSTED STRATEGIES
- You found strong relevant context from multiple angles COMPLETE
System nudges will provide awareness of your progress, but you decide when synthesis quality would be optimal.
If you found little/no context AND searched less than 7 times:
- Try different query angles from your decomposition
- Search broader related topics
- Search user's projects or work areas
- Try alternative terminology
DO NOT search endlessly - if you found relevant episodes, STOP and synthesize!
6. SYNTHESIZE: After gathering sufficient context, provide final answer
- Wrap your synthesis in <final_response> tags
- Present direct factual context from memory - no meta-commentary
- Write as if providing background context to an AI assistant
- Include: facts, decisions, preferences, patterns, timelines
- Note any gaps, contradictions, or evolution in thinking
- Keep it concise and actionable
- DO NOT use phrases like "Previous discussions on", "From conversations", "Past preferences indicate"
- DO NOT use conversational language like "you said" or "you mentioned"
- Present information as direct factual statements
FINAL RESPONSE FORMAT:
<final_response>
[Direct synthesized context - factual statements only]
Good examples:
- "The API redesign focuses on performance and scalability. Key decisions: moving to GraphQL, caching layer with Redis."
- "Project Phoenix launches Q2 2024. Main features: real-time sync, offline mode, collaborative editing."
- "Sarah leads the backend team. Recent work includes authentication refactor and database migration."
Bad examples:
"Previous discussions on the API revealed..."
"From past conversations, it appears that..."
"Past preferences indicate..."
"The user mentioned that..."
Just state the facts directly.
</final_response>
${contextHints.length > 0 ? `\nCONTEXT HINTS:\n${contextHints.join("\n")}` : ""}
CRITICAL REQUIREMENTS:
- ALWAYS start with DECOMPOSE step - extract entities, topics, temporal markers, actions
- Form specific queries from your decomposition - use user's actual terms
- Minimum 3 searches required
- Maximum 10 searches allowed - must synthesize after that
- STOP and synthesize when you hit stopping criteria (20+ episodes, 5+ searches with results, 7+ searches total)
- Each search should target different aspects from decomposition
- Present synthesis directly without meta-commentary
SEARCH QUALITY CHECKLIST:
Queries use specific terms from content (names, projects, exact phrases)
Searched multiple angles from decomposition (entities, topics, related areas)
Stop when you have enough unique context - don't search endlessly
Tried alternative terminology if initial searches found nothing
Avoid generic/abstract queries that don't match user's vocabulary
Don't stop at 3 searches if you found zero unique episodes
Don't keep searching when you already found 20+ unique episodes
}`;
}

View File

@ -14,8 +14,8 @@ import { env } from "~/env.server";
import type { z } from "zod";
import type { IngestBodyRequest } from "~/jobs/ingest/ingest-episode.logic";
import type { CreateConversationTitlePayload } from "~/jobs/conversation/create-title.logic";
import type { ProcessDeepSearchPayload } from "~/jobs/deep-search/deep-search.logic";
import type { SessionCompactionPayload } from "~/jobs/session/session-compaction.logic";
import { type SpaceAssignmentPayload } from "~/trigger/spaces/space-assignment";
type QueueProvider = "trigger" | "bullmq";
@ -113,35 +113,6 @@ export async function enqueueCreateConversationTitle(
}
}
/**
* Enqueue deep search job
*/
export async function enqueueDeepSearch(
payload: ProcessDeepSearchPayload,
): Promise<{ id?: string }> {
const provider = env.QUEUE_PROVIDER as QueueProvider;
if (provider === "trigger") {
const { deepSearch } = await import("~/trigger/deep-search");
const handler = await deepSearch.trigger({
content: payload.content,
userId: payload.userId,
stream: true,
metadata: payload.metadata,
intentOverride: payload.intentOverride,
});
return { id: handler.id };
} else {
// BullMQ
const { deepSearchQueue } = await import("~/bullmq/queues");
const job = await deepSearchQueue.add("deep-search", payload, {
attempts: 3,
backoff: { type: "exponential", delay: 2000 },
});
return { id: job.id };
}
}
/**
* Enqueue session compaction job
*/
@ -175,12 +146,9 @@ export async function enqueueSessionCompaction(
* Enqueue space assignment job
* (Helper for common job logic to call)
*/
export async function enqueueSpaceAssignment(payload: {
userId: string;
workspaceId: string;
mode: "episode";
episodeIds: string[];
}): Promise<void> {
export async function enqueueSpaceAssignment(
payload: SpaceAssignmentPayload,
): Promise<void> {
const provider = env.QUEUE_PROVIDER as QueueProvider;
if (provider === "trigger") {
@ -194,3 +162,7 @@ export async function enqueueSpaceAssignment(payload: {
console.warn("Space assignment not implemented for BullMQ yet");
}
}
export const isTriggerDeployment = () => {
return env.QUEUE_PROVIDER === "trigger";
};

View File

@ -3,6 +3,7 @@ import type { GoogleProfile } from "@coji/remix-auth-google";
import { prisma } from "~/db.server";
import { env } from "~/env.server";
import { runQuery } from "~/lib/neo4j.server";
import { trackFeatureUsage } from "~/services/telemetry.server";
export type { User } from "@core/database";
type FindOrCreateMagicLink = {
@ -72,9 +73,16 @@ export async function findOrCreateMagicLinkUser(
},
});
const isNewUser = !existingUser;
// Track new user registration
if (isNewUser) {
trackFeatureUsage("user_registered", user.id).catch(console.error);
}
return {
user,
isNewUser: !existingUser,
isNewUser,
};
}
@ -160,9 +168,16 @@ export async function findOrCreateGoogleUser({
},
});
const isNewUser = !existingUser;
// Track new user registration
if (isNewUser) {
trackFeatureUsage("user_registered", user.id).catch(console.error);
}
return {
user,
isNewUser: !existingUser,
isNewUser,
};
}

View File

@ -51,6 +51,7 @@ export const loader = async ({ request }: LoaderFunctionArgs) => {
const { getTheme } = await themeSessionResolver(request);
const posthogProjectKey = env.POSTHOG_PROJECT_KEY;
const telemetryEnabled = env.TELEMETRY_ENABLED;
const user = await getUser(request);
const usageSummary = await getUsageSummary(user?.Workspace?.id as string);
@ -62,6 +63,7 @@ export const loader = async ({ request }: LoaderFunctionArgs) => {
toastMessage,
theme: getTheme(),
posthogProjectKey,
telemetryEnabled,
appEnv: env.APP_ENV,
appOrigin: env.APP_ORIGIN,
},
@ -113,8 +115,10 @@ export function ErrorBoundary() {
}
function App() {
const { posthogProjectKey } = useTypedLoaderData<typeof loader>();
usePostHog(posthogProjectKey);
const { posthogProjectKey, telemetryEnabled } =
useTypedLoaderData<typeof loader>();
usePostHog(posthogProjectKey, telemetryEnabled);
const [theme] = useTheme();
return (

View File

@ -1,44 +0,0 @@
import { json } from "@remix-run/node";
import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
import { getWorkspaceByUser } from "~/models/workspace.server";
import {
createConversation,
CreateConversationSchema,
getCurrentConversationRun,
readConversation,
stopConversation,
} from "~/services/conversation.server";
import { z } from "zod";
export const ConversationIdSchema = z.object({
conversationId: z.string(),
});
const { action, loader } = createActionApiRoute(
{
params: ConversationIdSchema,
allowJWT: true,
authorization: {
action: "oauth",
},
corsStrategy: "all",
},
async ({ authentication, params }) => {
const workspace = await getWorkspaceByUser(authentication.userId);
if (!workspace) {
throw new Error("No workspace found");
}
// Call the service to get the redirect URL
const run = await getCurrentConversationRun(
params.conversationId,
workspace?.id,
);
return json(run);
},
);
export { action, loader };

View File

@ -1,41 +0,0 @@
import { json } from "@remix-run/node";
import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
import { getWorkspaceByUser } from "~/models/workspace.server";
import {
createConversation,
CreateConversationSchema,
readConversation,
stopConversation,
} from "~/services/conversation.server";
import { z } from "zod";
export const ConversationIdSchema = z.object({
conversationId: z.string(),
});
const { action, loader } = createActionApiRoute(
{
params: ConversationIdSchema,
allowJWT: true,
authorization: {
action: "oauth",
},
corsStrategy: "all",
method: "POST",
},
async ({ authentication, params }) => {
const workspace = await getWorkspaceByUser(authentication.userId);
if (!workspace) {
throw new Error("No workspace found");
}
// Call the service to get the redirect URL
const stop = await stopConversation(params.conversationId, workspace?.id);
return json(stop);
},
);
export { action, loader };

View File

@ -0,0 +1,45 @@
// import { json } from "@remix-run/node";
// import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
// import { UI_MESSAGE_STREAM_HEADERS } from "ai";
// import { getConversationAndHistory } from "~/services/conversation.server";
// import { z } from "zod";
// import { createResumableStreamContext } from "resumable-stream";
// export const ConversationIdSchema = z.object({
// conversationId: z.string(),
// });
// const { action, loader } = createActionApiRoute(
// {
// params: ConversationIdSchema,
// allowJWT: true,
// authorization: {
// action: "oauth",
// },
// corsStrategy: "all",
// },
// async ({ authentication, params }) => {
// const conversation = await getConversationAndHistory(
// params.conversationId,
// authentication.userId,
// );
// const lastConversation = conversation?.ConversationHistory.pop();
// if (!lastConversation) {
// return json({}, { status: 204 });
// }
// const streamContext = createResumableStreamContext({
// waitUntil: null,
// });
// return new Response(
// await streamContext.resumeExistingStream(lastConversation.id),
// { headers: UI_MESSAGE_STREAM_HEADERS },
// );
// },
// );
// export { action, loader };

View File

@ -1,50 +0,0 @@
import { json } from "@remix-run/node";
import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
import { getWorkspaceByUser } from "~/models/workspace.server";
import {
getConversation,
deleteConversation,
} from "~/services/conversation.server";
import { z } from "zod";
export const ConversationIdSchema = z.object({
conversationId: z.string(),
});
const { action, loader } = createActionApiRoute(
{
params: ConversationIdSchema,
allowJWT: true,
authorization: {
action: "oauth",
},
corsStrategy: "all",
},
async ({ params, authentication, request }) => {
const workspace = await getWorkspaceByUser(authentication.userId);
if (!workspace) {
throw new Error("No workspace found");
}
const method = request.method;
if (method === "GET") {
// Get a conversation by ID
const conversation = await getConversation(params.conversationId);
return json(conversation);
}
if (method === "DELETE") {
// Soft delete a conversation
const deleted = await deleteConversation(params.conversationId);
return json(deleted);
}
// Method not allowed
return new Response("Method Not Allowed", { status: 405 });
},
);
export { action, loader };

View File

@ -1,37 +1,155 @@
import { json } from "@remix-run/node";
import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
import { getWorkspaceByUser } from "~/models/workspace.server";
import {
createConversation,
CreateConversationSchema,
convertToModelMessages,
streamText,
validateUIMessages,
type LanguageModel,
experimental_createMCPClient as createMCPClient,
generateId,
stepCountIs,
} from "ai";
import { z } from "zod";
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
import { createHybridActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
import {
createConversationHistory,
getConversationAndHistory,
} from "~/services/conversation.server";
const { action, loader } = createActionApiRoute(
import { getModel } from "~/lib/model.server";
import { UserTypeEnum } from "@core/types";
import { nanoid } from "nanoid";
import { getOrCreatePersonalAccessToken } from "~/services/personalAccessToken.server";
import {
hasAnswer,
hasQuestion,
REACT_SYSTEM_PROMPT,
} from "~/lib/prompt.server";
import { enqueueCreateConversationTitle } from "~/lib/queue-adapter.server";
import { env } from "~/env.server";
const ChatRequestSchema = z.object({
message: z.object({
id: z.string().optional(),
parts: z.array(z.any()),
role: z.string(),
}),
id: z.string(),
});
const { loader, action } = createHybridActionApiRoute(
{
body: CreateConversationSchema,
body: ChatRequestSchema,
allowJWT: true,
authorization: {
action: "oauth",
action: "conversation",
},
corsStrategy: "all",
},
async ({ body, authentication }) => {
const workspace = await getWorkspaceByUser(authentication.userId);
const randomKeyName = `chat_${nanoid(10)}`;
const pat = await getOrCreatePersonalAccessToken({
name: randomKeyName,
userId: authentication.userId,
});
if (!workspace) {
throw new Error("No workspace found");
}
const message = body.message.parts[0].text;
const id = body.message.id;
const apiEndpoint = `${env.APP_ORIGIN}/api/v1/mcp?source=core`;
const url = new URL(apiEndpoint);
// Call the service to get the redirect URL
const conversation = await createConversation(
workspace?.id,
const mcpClient = await createMCPClient({
transport: new StreamableHTTPClientTransport(url, {
requestInit: {
headers: pat.token
? {
Authorization: `Bearer ${pat.token}`,
}
: {},
},
}),
});
const conversation = await getConversationAndHistory(
body.id,
authentication.userId,
body,
);
return json(conversation);
const conversationHistory = conversation?.ConversationHistory ?? [];
if (conversationHistory.length === 0) {
// Trigger conversation title task
await enqueueCreateConversationTitle({
conversationId: body.id,
message,
});
}
if (conversationHistory.length > 1) {
await createConversationHistory(message, body.id, UserTypeEnum.User);
}
const messages = conversationHistory.map((history: any) => {
return {
parts: [{ text: history.message, type: "text" }],
role: "user",
id: history.id,
};
});
const tools = { ...(await mcpClient.tools()) };
const finalMessages = [
...messages,
{
parts: [{ text: message, type: "text" }],
role: "user",
id: id ?? generateId(),
},
];
const validatedMessages = await validateUIMessages({
messages: finalMessages,
});
const result = streamText({
model: getModel() as LanguageModel,
messages: [
{
role: "system",
content: REACT_SYSTEM_PROMPT,
},
...convertToModelMessages(validatedMessages),
],
tools,
stopWhen: [stepCountIs(10), hasAnswer, hasQuestion],
});
result.consumeStream(); // no await
return result.toUIMessageStreamResponse({
originalMessages: validatedMessages,
onFinish: async ({ messages }) => {
const lastMessage = messages.pop();
let message = "";
lastMessage?.parts.forEach((part) => {
if (part.type === "text") {
message += part.text;
}
});
await createConversationHistory(message, body.id, UserTypeEnum.Agent);
},
// async consumeSseStream({ stream }) {
// // Create a resumable stream from the SSE stream
// const streamContext = createResumableStreamContext({ waitUntil: null });
// await streamContext.createNewResumableStream(
// conversation.conversationHistoryId,
// () => stream,
// );
// },
});
},
);
export { action, loader };
export { loader, action };

View File

@ -1,8 +1,27 @@
import { z } from "zod";
import { json } from "@remix-run/node";
import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
import { enqueueDeepSearch } from "~/lib/queue-adapter.server";
import { runs } from "@trigger.dev/sdk";
import { trackFeatureUsage } from "~/services/telemetry.server";
import { nanoid } from "nanoid";
import {
deletePersonalAccessToken,
getOrCreatePersonalAccessToken,
} from "~/services/personalAccessToken.server";
import {
convertToModelMessages,
generateId,
generateText,
type LanguageModel,
stepCountIs,
streamText,
tool,
validateUIMessages,
} from "ai";
import axios from "axios";
import { logger } from "~/services/logger.service";
import { getReActPrompt, hasAnswer } from "~/lib/prompt.server";
import { getModel } from "~/lib/model.server";
const DeepSearchBodySchema = z.object({
content: z.string().min(1, "Content is required"),
@ -17,6 +36,41 @@ const DeepSearchBodySchema = z.object({
.optional(),
});
export function createSearchMemoryTool(token: string) {
return tool({
description:
"Search the user's memory for relevant facts and episodes. Use this tool multiple times with different queries to gather comprehensive context.",
inputSchema: z.object({
query: z
.string()
.describe(
"Search query to find relevant information. Be specific: entity names, topics, concepts.",
),
}),
execute: async ({ query }: { query: string }) => {
try {
const response = await axios.post(
`${process.env.API_BASE_URL || "https://core.heysol.ai"}/api/v1/search`,
{ query, structured: false },
{
headers: {
Authorization: `Bearer ${token}`,
},
},
);
return response.data;
} catch (error) {
logger.error(`SearchMemory tool error: ${error}`);
return {
facts: [],
episodes: [],
summary: "No results found",
};
}
},
} as any);
}
const { action, loader } = createActionApiRoute(
{
body: DeepSearchBodySchema,
@ -28,35 +82,94 @@ const { action, loader } = createActionApiRoute(
corsStrategy: "all",
},
async ({ body, authentication }) => {
let trigger;
if (!body.stream) {
trigger = await enqueueDeepSearch({
content: body.content,
userId: authentication.userId,
stream: body.stream,
intentOverride: body.intentOverride,
metadata: body.metadata,
// Track deep search
trackFeatureUsage("deep_search_performed", authentication.userId).catch(
console.error,
);
const randomKeyName = `deepSearch_${nanoid(10)}`;
const pat = await getOrCreatePersonalAccessToken({
name: randomKeyName,
userId: authentication.userId as string,
});
if (!pat?.token) {
return json({
success: false,
error: "Failed to create personal access token",
});
}
try {
// Create search tool that agent will use
const searchTool = createSearchMemoryTool(pat.token);
const tools = {
searchMemory: searchTool,
};
// Build initial messages with ReAct prompt
const initialMessages = [
{
role: "user",
parts: [
{
type: "text",
text: `CONTENT TO ANALYZE:\n${body.content}\n\nPlease search my memory for relevant context and synthesize what you find.`,
},
],
id: generateId(),
},
];
const validatedMessages = await validateUIMessages({
messages: initialMessages,
tools,
});
return json(trigger);
} else {
const runHandler = await enqueueDeepSearch({
content: body.content,
userId: authentication.userId,
stream: body.stream,
intentOverride: body.intentOverride,
metadata: body.metadata,
});
if (body.stream) {
const result = streamText({
model: getModel() as LanguageModel,
messages: [
{
role: "system",
content: getReActPrompt(body.metadata, body.intentOverride),
},
...convertToModelMessages(validatedMessages),
],
tools,
stopWhen: [hasAnswer, stepCountIs(10)],
});
for await (const run of runs.subscribeToRun(runHandler.id)) {
if (run.status === "COMPLETED") {
return json(run.output);
} else if (run.status === "FAILED") {
return json(run.error);
}
return result.toUIMessageStreamResponse({
originalMessages: validatedMessages,
});
} else {
const { text } = await generateText({
model: getModel() as LanguageModel,
messages: [
{
role: "system",
content: getReActPrompt(body.metadata, body.intentOverride),
},
...convertToModelMessages(validatedMessages),
],
tools,
stopWhen: [hasAnswer, stepCountIs(10)],
});
await deletePersonalAccessToken(pat?.id);
return json({ text });
}
} catch (error: any) {
await deletePersonalAccessToken(pat?.id);
logger.error(`Deep search error: ${error}`);
return json({ error: "Run failed" });
return json({
success: false,
error: error.message,
});
}
},
);

View File

@ -8,6 +8,7 @@ import { logger } from "~/services/logger.service";
import { getWorkspaceByUser } from "~/models/workspace.server";
import { tasks } from "@trigger.dev/sdk";
import { type scheduler } from "~/trigger/integrations/scheduler";
import { isTriggerDeployment } from "~/lib/queue-adapter.server";
// Schema for creating an integration account with API key
const IntegrationAccountBodySchema = z.object({
@ -63,6 +64,13 @@ const { action, loader } = createHybridActionApiRoute(
);
}
if (!isTriggerDeployment()) {
return json(
{ error: "Integrations don't work in non trigger deployment" },
{ status: 400 },
);
}
await tasks.trigger<typeof scheduler>("scheduler", {
integrationAccountId: setupResult?.account?.id,
});

View File

@ -0,0 +1,88 @@
import { json } from "@remix-run/node";
import { z } from "zod";
import { IngestionStatus } from "@core/database";
import { getIngestionQueue } from "~/services/ingestionLogs.server";
import { createHybridActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
import { addToQueue } from "~/lib/ingest.server";
// Schema for log ID parameter
const LogParamsSchema = z.object({
logId: z.string(),
});
const { action } = createHybridActionApiRoute(
{
params: LogParamsSchema,
allowJWT: true,
method: "POST",
authorization: {
action: "update",
},
corsStrategy: "all",
},
async ({ params, authentication }) => {
try {
const ingestionQueue = await getIngestionQueue(params.logId);
if (!ingestionQueue) {
return json(
{
error: "Ingestion log not found",
code: "not_found",
},
{ status: 404 },
);
}
// Only allow retry for FAILED status
if (ingestionQueue.status !== IngestionStatus.FAILED) {
return json(
{
error: "Only failed ingestion logs can be retried",
code: "invalid_status",
},
{ status: 400 },
);
}
// Get the original ingestion data
const originalData = ingestionQueue.data as any;
// Re-enqueue the job with the existing queue ID (will upsert)
await addToQueue(
originalData,
authentication.userId,
ingestionQueue.activityId || undefined,
ingestionQueue.id, // Pass the existing queue ID for upsert
);
return json({
success: true,
message: "Ingestion retry initiated successfully",
});
} catch (error) {
console.error("Error retrying ingestion:", error);
// Handle specific error cases
if (error instanceof Error && error.message === "no credits") {
return json(
{
error: "Insufficient credits to retry ingestion",
code: "no_credits",
},
{ status: 402 },
);
}
return json(
{
error: "Failed to retry ingestion",
code: "internal_error",
},
{ status: 500 },
);
}
},
);
export { action };

View File

@ -5,6 +5,7 @@ import {
} from "~/services/routeBuilders/apiBuilder.server";
import { SearchService } from "~/services/search.server";
import { json } from "@remix-run/node";
import { trackFeatureUsage } from "~/services/telemetry.server";
export const SearchBodyRequest = z.object({
query: z.string(),
@ -51,6 +52,10 @@ const { action, loader } = createHybridActionApiRoute(
structured: body.structured,
},
);
// Track search
trackFeatureUsage("search_performed", authentication.userId).catch(console.error);
return json(results);
},
);

View File

@ -3,7 +3,7 @@ import { createHybridActionApiRoute } from "~/services/routeBuilders/apiBuilder.
import { SpaceService } from "~/services/space.server";
import { json } from "@remix-run/node";
import { logger } from "~/services/logger.service";
import { triggerSpaceAssignment } from "~/trigger/spaces/space-assignment";
import { enqueueSpaceAssignment } from "~/lib/queue-adapter.server";
// Schema for space ID parameter
const SpaceParamsSchema = z.object({
@ -31,7 +31,7 @@ const { loader, action } = createHybridActionApiRoute(
// Trigger automatic episode assignment for the reset space
try {
await triggerSpaceAssignment({
await enqueueSpaceAssignment({
userId: userId,
workspaceId: space.workspaceId,
mode: "new_space",

View File

@ -1,8 +1,8 @@
import { z } from "zod";
import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
import { json } from "@remix-run/node";
import { triggerSpaceAssignment } from "~/trigger/spaces/space-assignment";
import { prisma } from "~/db.server";
import { enqueueSpaceAssignment } from "~/lib/queue-adapter.server";
// Schema for manual assignment trigger
const ManualAssignmentSchema = z.object({
@ -38,7 +38,7 @@ const { action } = createActionApiRoute(
let taskRun;
// Direct LLM assignment trigger
taskRun = await triggerSpaceAssignment({
taskRun = await enqueueSpaceAssignment({
userId,
workspaceId: user?.Workspace?.id as string,
mode: body.mode,
@ -49,7 +49,7 @@ const { action } = createActionApiRoute(
return json({
success: true,
message: `${body.mode} assignment task triggered successfully`,
taskId: taskRun.id,
payload: {
userId,
mode: body.mode,

View File

@ -7,6 +7,7 @@ import { SpaceService } from "~/services/space.server";
import { json } from "@remix-run/node";
import { prisma } from "~/db.server";
import { apiCors } from "~/utils/apiCors";
import { isTriggerDeployment } from "~/lib/queue-adapter.server";
const spaceService = new SpaceService();
@ -40,6 +41,13 @@ const { action } = createHybridActionApiRoute(
},
});
if (!isTriggerDeployment()) {
return json(
{ error: "Spaces don't work in non trigger deployment" },
{ status: 400 },
);
}
if (!user?.Workspace?.id) {
throw new Error(
"Workspace ID is required to create an ingestion queue entry.",

View File

@ -1,42 +1,26 @@
import {
type LoaderFunctionArgs,
type ActionFunctionArgs,
} from "@remix-run/server-runtime";
import { sort } from "fast-sort";
import { type LoaderFunctionArgs } from "@remix-run/server-runtime";
import { useParams, useRevalidator, useNavigate } from "@remix-run/react";
import { parse } from "@conform-to/zod";
import {
requireUserId,
requireUser,
requireWorkpace,
} from "~/services/session.server";
import {
getConversationAndHistory,
getCurrentConversationRun,
stopConversation,
createConversation,
CreateConversationSchema,
} from "~/services/conversation.server";
import { type ConversationHistory } from "@core/database";
import { useParams, useNavigate } from "@remix-run/react";
import { requireUser, requireWorkpace } from "~/services/session.server";
import { getConversationAndHistory } from "~/services/conversation.server";
import {
ConversationItem,
ConversationTextarea,
StreamingConversation,
} from "~/components/conversation";
import { useTypedLoaderData } from "remix-typedjson";
import React from "react";
import { ScrollAreaWithAutoScroll } from "~/components/use-auto-scroll";
import { PageHeader } from "~/components/common/page-header";
import { Plus } from "lucide-react";
import { json } from "@remix-run/node";
import { env } from "~/env.server";
import { type UIMessage, useChat } from "@ai-sdk/react";
import { DefaultChatTransport } from "ai";
import { UserTypeEnum } from "@core/types";
import React from "react";
// Example loader accessing params
export async function loader({ params, request }: LoaderFunctionArgs) {
const user = await requireUser(request);
const workspace = await requireWorkpace(request);
const conversation = await getConversationAndHistory(
params.conversationId as string,
user.id,
@ -46,100 +30,38 @@ export async function loader({ params, request }: LoaderFunctionArgs) {
throw new Error("No conversation found");
}
const run = await getCurrentConversationRun(conversation.id, workspace.id);
return { conversation, run, apiURL: env.TRIGGER_API_URL };
}
// Example action accessing params
export async function action({ params, request }: ActionFunctionArgs) {
if (request.method.toUpperCase() !== "POST") {
return new Response("Method Not Allowed", { status: 405 });
}
const userId = await requireUserId(request);
const workspace = await requireWorkpace(request);
const formData = await request.formData();
const { conversationId } = params;
if (!conversationId) {
throw new Error("No conversation");
}
// Check if this is a stop request (isLoading = true means stop button was clicked)
const message = formData.get("message");
// If no message, it's a stop request
if (!message) {
const result = await stopConversation(conversationId, workspace.id);
return json(result);
}
// Otherwise, create a new conversation message
const submission = parse(formData, { schema: CreateConversationSchema });
if (!submission.value || submission.intent !== "submit") {
return json(submission);
}
const conversation = await createConversation(workspace?.id, userId, {
message: submission.value.message,
title: submission.value.title,
conversationId: submission.value.conversationId,
});
return json({ conversation });
return { conversation };
}
// Accessing params in the component
export default function SingleConversation() {
const { conversation, run, apiURL } = useTypedLoaderData<typeof loader>();
const conversationHistory = conversation.ConversationHistory;
const [conversationResponse, setConversationResponse] = React.useState<
{ conversationHistoryId: string; id: string; token: string } | undefined
>(run);
const { conversationId } = useParams();
const revalidator = useRevalidator();
const { conversation } = useTypedLoaderData<typeof loader>();
const navigate = useNavigate();
const { conversationId } = useParams();
const { sendMessage, messages, status, stop, regenerate } = useChat({
id: conversationId, // use the provided chat ID
messages: conversation.ConversationHistory.map(
(history) =>
({
role: history.userType === UserTypeEnum.Agent ? "assistant" : "user",
parts: [{ text: history.message, type: "text" }],
}) as UIMessage,
), // load initial messages
transport: new DefaultChatTransport({
api: "/api/v1/conversation",
prepareSendMessagesRequest({ messages, id }) {
return { body: { message: messages[messages.length - 1], id } };
},
}),
});
console.log("new", messages);
React.useEffect(() => {
if (run) {
setConversationResponse(run);
if (messages.length === 1) {
regenerate();
}
}, [run]);
const conversations = React.useMemo(() => {
const lastConversationHistoryId =
conversationResponse?.conversationHistoryId;
// First sort the conversation history by creation time
const sortedConversationHistory = sort(conversationHistory).asc(
(ch) => ch.createdAt,
);
const lastIndex = sortedConversationHistory.findIndex(
(item) => item.id === lastConversationHistoryId,
);
// Filter out any conversation history items that come after the lastConversationHistoryId
return lastConversationHistoryId
? sortedConversationHistory.filter((_ch, currentIndex: number) => {
return currentIndex <= lastIndex;
})
: sortedConversationHistory;
}, [conversationResponse, conversationHistory]);
const getConversations = () => {
return (
<>
{conversations.map((ch: ConversationHistory) => {
return <ConversationItem key={ch.id} conversationHistory={ch} />;
})}
</>
);
};
}, []);
if (typeof window === "undefined") {
return null;
@ -166,41 +88,23 @@ export default function SingleConversation() {
<div className="relative flex h-[calc(100vh_-_56px)] w-full flex-col items-center justify-center overflow-auto">
<div className="flex h-[calc(100vh_-_80px)] w-full flex-col justify-end overflow-hidden">
<ScrollAreaWithAutoScroll>
{getConversations()}
{conversationResponse && (
<StreamingConversation
runId={conversationResponse.id}
token={conversationResponse.token}
afterStreaming={() => {
setConversationResponse(undefined);
revalidator.revalidate();
}}
apiURL={apiURL}
/>
)}
{messages.map((message: UIMessage, index: number) => {
return <ConversationItem key={index} message={message} />;
})}
</ScrollAreaWithAutoScroll>
<div className="flex w-full flex-col items-center">
<div className="w-full max-w-[80ch] px-1 pr-2">
{conversation?.status !== "need_approval" && (
<ConversationTextarea
conversationId={conversationId as string}
className="bg-background-3 w-full border-1 border-gray-300"
isLoading={
!!conversationResponse || conversation?.status === "running"
<ConversationTextarea
className="bg-background-3 w-full border-1 border-gray-300"
isLoading={status === "streaming" || status === "submitted"}
onConversationCreated={(message) => {
if (message) {
sendMessage({ text: message });
}
onConversationCreated={(conversation) => {
if (conversation) {
setConversationResponse({
conversationHistoryId:
conversation.conversationHistoryId,
id: conversation.id,
token: conversation.token,
});
}
}}
/>
)}
}}
stop={() => stop()}
/>
</div>
</div>
</div>

View File

@ -43,8 +43,7 @@ export async function action({ request }: ActionFunctionArgs) {
const conversation = await createConversation(workspace?.id, userId, {
message: submission.value.message,
title: submission.value.title,
conversationId: submission.value.conversationId,
title: submission.value.title ?? "Untitled",
});
// If conversationId exists in submission, return the conversation data (don't redirect)

View File

@ -40,7 +40,7 @@ export default function InboxNotSelected() {
<PageHeader
title="Episode"
showTrigger={false}
actionsNode={<LogOptions id={log.id} />}
actionsNode={<LogOptions id={log.id} status={log.status} />}
/>
<LogDetails log={log as any} />

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,9 @@
import { UserTypeEnum } from "@core/types";
import { auth, runs, tasks } from "@trigger.dev/sdk/v3";
import { prisma } from "~/db.server";
import { enqueueCreateConversationTitle } from "~/lib/queue-adapter.server";
import { z } from "zod";
import { type ConversationHistory } from "@prisma/client";
import { trackFeatureUsage } from "~/services/telemetry.server";
export const CreateConversationSchema = z.object({
message: z.string(),
@ -44,20 +42,10 @@ export async function createConversation(
},
});
const context = await getConversationContext(conversationHistory.id);
const handler = await tasks.trigger(
"chat",
{
conversationHistoryId: conversationHistory.id,
conversationId: conversationHistory.conversation.id,
context,
},
{ tags: [conversationHistory.id, workspaceId, conversationId] },
);
// Track conversation message
trackFeatureUsage("conversation_message_sent", userId).catch(console.error);
return {
id: handler.id,
token: handler.publicAccessToken,
conversationId: conversationHistory.conversation.id,
conversationHistoryId: conversationHistory.id,
};
@ -84,36 +72,20 @@ export async function createConversation(
});
const conversationHistory = conversation.ConversationHistory[0];
const context = await getConversationContext(conversationHistory.id);
// Trigger conversation title task
await enqueueCreateConversationTitle({
conversationId: conversation.id,
message: conversationData.message,
});
const handler = await tasks.trigger(
"chat",
{
conversationHistoryId: conversationHistory.id,
conversationId: conversation.id,
context,
},
{ tags: [conversationHistory.id, workspaceId, conversation.id] },
);
// Track new conversation creation
trackFeatureUsage("conversation_created", userId).catch(console.error);
return {
id: handler.id,
token: handler.publicAccessToken,
conversationId: conversation.id,
conversationHistoryId: conversationHistory.id,
};
}
// Get a conversation by ID
export async function getConversation(conversationId: string) {
export async function getConversation(conversationId: string, userId: string) {
return prisma.conversation.findUnique({
where: { id: conversationId },
where: { id: conversationId, userId },
});
}
@ -135,141 +107,6 @@ export async function readConversation(conversationId: string) {
});
}
export async function getCurrentConversationRun(
conversationId: string,
workspaceId: string,
) {
const conversationHistory = await prisma.conversationHistory.findFirst({
where: {
conversationId,
conversation: {
workspaceId,
},
userType: UserTypeEnum.User,
},
orderBy: {
updatedAt: "desc",
},
});
if (!conversationHistory) {
throw new Error("No run found");
}
const response = await runs.list({
tag: [conversationId, conversationHistory.id, workspaceId],
status: ["QUEUED", "EXECUTING"],
limit: 1,
});
if (!response) {
return undefined;
}
const run = response?.data?.[0];
if (!run) {
return undefined;
}
const publicToken = await auth.createPublicToken({
scopes: {
read: {
runs: [run.id],
},
},
});
return {
id: run.id,
token: publicToken,
conversationId,
conversationHistoryId: conversationHistory.id,
};
}
export async function stopConversation(
conversationId: string,
workspaceId: string,
) {
const conversationHistory = await prisma.conversationHistory.findFirst({
where: {
conversationId,
conversation: {
workspaceId,
},
},
orderBy: {
updatedAt: "desc",
},
});
if (!conversationHistory) {
throw new Error("No run found");
}
const response = await runs.list({
tag: [conversationId, conversationHistory.id],
status: ["QUEUED", "EXECUTING"],
limit: 1,
});
const run = response.data[0];
if (!run) {
await prisma.conversation.update({
where: {
id: conversationId,
},
data: {
status: "failed",
},
});
return undefined;
}
return await runs.cancel(run.id);
}
export async function getConversationContext(
conversationHistoryId: string,
): Promise<{
previousHistory: ConversationHistory[];
}> {
const conversationHistory = await prisma.conversationHistory.findUnique({
where: { id: conversationHistoryId },
include: { conversation: true },
});
if (!conversationHistory) {
return {
previousHistory: [],
};
}
// Get previous conversation history message and response
let previousHistory: ConversationHistory[] = [];
if (conversationHistory.conversationId) {
previousHistory = await prisma.conversationHistory.findMany({
where: {
conversationId: conversationHistory.conversationId,
id: {
not: conversationHistoryId,
},
deleted: null,
},
orderBy: {
createdAt: "asc",
},
});
}
return {
previousHistory,
};
}
export const getConversationAndHistory = async (
conversationId: string,
userId: string,
@ -277,6 +114,7 @@ export const getConversationAndHistory = async (
const conversation = await prisma.conversation.findFirst({
where: {
id: conversationId,
userId,
},
include: {
ConversationHistory: true,
@ -286,6 +124,23 @@ export const getConversationAndHistory = async (
return conversation;
};
export const createConversationHistory = async (
userMessage: string,
conversationId: string,
userType: UserTypeEnum,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
thoughts?: Record<string, any>,
) => {
return await prisma.conversationHistory.create({
data: {
conversationId,
message: userMessage,
thoughts,
userType,
},
});
};
export const GetConversationsListSchema = z.object({
page: z.string().optional().default("1"),
limit: z.string().optional().default("20"),

View File

@ -10,7 +10,6 @@ import {
type EpisodeType,
} from "@core/types";
import { logger } from "./logger.service";
import { ClusteringService } from "./clustering.server";
import crypto from "crypto";
import { dedupeNodes, extractEntities } from "./prompts/nodes";
import {
@ -50,12 +49,6 @@ import { type PrismaClient } from "@prisma/client";
const DEFAULT_EPISODE_WINDOW = 5;
export class KnowledgeGraphService {
private clusteringService: ClusteringService;
constructor() {
this.clusteringService = new ClusteringService();
}
async getEmbedding(text: string) {
return getEmbedding(text);
}
@ -564,9 +557,9 @@ export class KnowledgeGraphService {
(text, _model, usage) => {
responseText = text;
if (usage) {
tokenMetrics.high.input += usage.promptTokens;
tokenMetrics.high.output += usage.completionTokens;
tokenMetrics.high.total += usage.totalTokens;
tokenMetrics.high.input += usage.promptTokens as number;
tokenMetrics.high.output += usage.completionTokens as number;
tokenMetrics.high.total += usage.totalTokens as number;
}
},
undefined,

View File

@ -320,6 +320,14 @@ export async function getOrCreatePersonalAccessToken({
};
}
export async function deletePersonalAccessToken(tokenId: string) {
return await prisma.personalAccessToken.delete({
where: {
id: tokenId,
},
});
}
/** Created a new PersonalAccessToken, and return the token. We only ever return the unencrypted token once. */
export async function createPersonalAccessToken({
name,

View File

@ -6,7 +6,6 @@ import {
} from "@core/types";
import { type Space } from "@prisma/client";
import { triggerSpaceAssignment } from "~/trigger/spaces/space-assignment";
import {
assignEpisodesToSpace,
createSpace,
@ -17,6 +16,8 @@ import {
updateSpace,
} from "./graphModels/space";
import { prisma } from "~/trigger/utils/prisma";
import { trackFeatureUsage } from "./telemetry.server";
import { enqueueSpaceAssignment } from "~/lib/queue-adapter.server";
export class SpaceService {
/**
@ -63,9 +64,12 @@ export class SpaceService {
logger.info(`Created space ${space.id} successfully`);
// Track space creation
trackFeatureUsage("space_created", params.userId).catch(console.error);
// Trigger automatic LLM assignment for the new space
try {
await triggerSpaceAssignment({
await enqueueSpaceAssignment({
userId: params.userId,
workspaceId: params.workspaceId,
mode: "new_space",
@ -192,6 +196,10 @@ export class SpaceService {
} catch (e) {
logger.info(`Nothing to update to graph`);
}
// Track space update
trackFeatureUsage("space_updated", userId).catch(console.error);
logger.info(`Updated space ${spaceId} successfully`);
return space;
}

View File

@ -0,0 +1,278 @@
import { PostHog } from "posthog-node";
import { env } from "~/env.server";
import { prisma } from "~/db.server";
// Server-side PostHog client for backend tracking
let posthogClient: PostHog | null = null;
function getPostHogClient(): PostHog | null {
if (!env.TELEMETRY_ENABLED || !env.POSTHOG_PROJECT_KEY) {
return null;
}
if (!posthogClient) {
posthogClient = new PostHog(env.POSTHOG_PROJECT_KEY, {
host: "https://us.posthog.com",
});
}
return posthogClient;
}
/**
* Get user email from userId, or return "anonymous" if TELEMETRY_ANONYMOUS is enabled
*/
async function getUserIdentifier(userId?: string): Promise<string> {
if (env.TELEMETRY_ANONYMOUS || !userId) {
return "anonymous";
}
try {
const user = await prisma.user.findUnique({
where: { id: userId },
select: { email: true },
});
return user?.email || "anonymous";
} catch (error) {
return "anonymous";
}
}
// Telemetry event types
export type TelemetryEvent =
| "episode_ingested"
| "document_ingested"
| "search_performed"
| "deep_search_performed"
| "conversation_created"
| "conversation_message_sent"
| "space_created"
| "space_updated"
| "user_registered"
| "error_occurred"
| "queue_job_started"
| "queue_job_completed"
| "queue_job_failed";
// Common properties for all events
interface BaseEventProperties {
userId?: string;
workspaceId?: string;
email?: string;
name?: string;
queueProvider?: "trigger" | "bullmq";
modelProvider?: string;
embeddingModel?: string;
appEnv?: string;
}
// Event-specific properties
interface EpisodeIngestedProperties extends BaseEventProperties {
spaceId?: string;
documentCount?: number;
processingTimeMs?: number;
}
interface SearchPerformedProperties extends BaseEventProperties {
query: string;
resultsCount: number;
searchType: "basic" | "deep";
spaceIds?: string[];
}
interface ConversationProperties extends BaseEventProperties {
conversationId: string;
messageLength?: number;
model?: string;
}
interface ErrorProperties extends BaseEventProperties {
errorType: string;
errorMessage: string;
stackTrace?: string;
context?: Record<string, any>;
}
interface QueueJobProperties extends BaseEventProperties {
jobId: string;
jobType: string;
queueName: string;
durationMs?: number;
}
type EventProperties =
| EpisodeIngestedProperties
| SearchPerformedProperties
| ConversationProperties
| ErrorProperties
| QueueJobProperties
| BaseEventProperties;
/**
* Track telemetry events to PostHog
*/
export async function trackEvent(
event: TelemetryEvent,
properties: EventProperties,
): Promise<void> {
const client = getPostHogClient();
if (!client) return;
try {
const userId = properties.userId || "anonymous";
// Add common properties to all events
const enrichedProperties = {
...properties,
queueProvider: env.QUEUE_PROVIDER,
modelProvider: getModelProvider(),
embeddingModel: env.EMBEDDING_MODEL,
appEnv: env.APP_ENV,
appOrigin: env.APP_ORIGIN,
timestamp: new Date().toISOString(),
};
client.capture({
distinctId: userId,
event,
properties: enrichedProperties,
});
// Identify user if we have their info
if (properties.email || properties.name) {
client.identify({
distinctId: userId,
properties: {
email: properties.email,
name: properties.name,
},
});
}
} catch (error) {
// Silently fail - don't break the app if telemetry fails
console.error("Telemetry error:", error);
}
}
/**
* Track feature usage - simplified API
* @param feature - Feature name (e.g., "episode_ingested", "search_performed")
* @param userId - User ID (will be converted to email internally)
* @param properties - Additional properties (optional)
*/
export async function trackFeatureUsage(
feature: string,
userId?: string,
properties?: Record<string, any>,
): Promise<void> {
const client = getPostHogClient();
if (!client) return;
try {
const email = await getUserIdentifier(userId);
client.capture({
distinctId: email,
event: feature,
properties: {
...properties,
appOrigin: env.APP_ORIGIN,
timestamp: new Date().toISOString(),
},
});
} catch (error) {
// Silently fail - don't break the app if telemetry fails
console.error("Telemetry error:", error);
}
}
/**
* Track system configuration once at startup
* Tracks queue provider, model provider, embedding model, etc.
*/
export async function trackConfig(): Promise<void> {
const client = getPostHogClient();
if (!client) return;
try {
client.capture({
distinctId: "system",
event: "system_config",
properties: {
queueProvider: env.QUEUE_PROVIDER,
modelProvider: getModelProvider(),
model: env.MODEL,
embeddingModel: env.EMBEDDING_MODEL,
appEnv: env.APP_ENV,
nodeEnv: env.NODE_ENV,
timestamp: new Date().toISOString(),
appOrigin: env.APP_ORIGIN,
},
});
} catch (error) {
console.error("Failed to track config:", error);
}
}
/**
* Track errors
*/
export async function trackError(
error: Error,
context?: Record<string, any>,
userId?: string,
): Promise<void> {
const client = getPostHogClient();
if (!client) return;
try {
const email = await getUserIdentifier(userId);
client.capture({
distinctId: email,
event: "error_occurred",
properties: {
errorType: error.name,
errorMessage: error.message,
appOrigin: env.APP_ORIGIN,
stackTrace: error.stack,
...context,
timestamp: new Date().toISOString(),
},
});
} catch (trackingError) {
console.error("Failed to track error:", trackingError);
}
}
/**
* Flush pending events (call on shutdown)
*/
export async function flushTelemetry(): Promise<void> {
const client = getPostHogClient();
if (client) {
await client.shutdown();
}
}
/**
* Helper to determine model provider from MODEL env variable
*/
function getModelProvider(): string {
const model = env.MODEL.toLowerCase();
if (model.includes("gpt") || model.includes("openai")) return "openai";
if (model.includes("claude") || model.includes("anthropic"))
return "anthropic";
if (env.OLLAMA_URL) return "ollama";
return "unknown";
}
// Export types for use in other files
export type {
BaseEventProperties,
EpisodeIngestedProperties,
SearchPerformedProperties,
ConversationProperties,
ErrorProperties,
QueueJobProperties,
};

View File

@ -1,492 +0,0 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { ActionStatusEnum } from "@core/types";
import { logger } from "@trigger.dev/sdk/v3";
import {
type CoreMessage,
type DataContent,
jsonSchema,
tool,
type ToolSet,
} from "ai";
import axios from "axios";
import Handlebars from "handlebars";
import { REACT_SYSTEM_PROMPT, REACT_USER_PROMPT } from "./prompt";
import { generate, processTag } from "./stream-utils";
import { type AgentMessage, AgentMessageType, Message } from "./types";
import { type MCP } from "../utils/mcp";
import {
type ExecutionState,
type HistoryStep,
type Resource,
type TotalCost,
} from "../utils/types";
import { flattenObject } from "../utils/utils";
interface LLMOutputInterface {
response: AsyncGenerator<
| string
| {
type: string;
toolName: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
args?: any;
toolCallId?: string;
message?: string;
},
any,
any
>;
}
const progressUpdateTool = tool({
description:
"Send a progress update to the user about what has been discovered or will be done next in a crisp and user friendly way no technical terms",
parameters: jsonSchema({
type: "object",
properties: {
message: {
type: "string",
description: "The progress update message to send to the user",
},
},
required: ["message"],
additionalProperties: false,
}),
});
const internalTools = ["core--progress_update"];
async function addResources(messages: CoreMessage[], resources: Resource[]) {
const resourcePromises = resources.map(async (resource) => {
// Remove everything before "/api" in the publicURL
if (resource.publicURL) {
const apiIndex = resource.publicURL.indexOf("/api");
if (apiIndex !== -1) {
resource.publicURL = resource.publicURL.substring(apiIndex);
}
}
const response = await axios.get(resource.publicURL, {
responseType: "arraybuffer",
});
if (resource.fileType.startsWith("image/")) {
return {
type: "image",
image: response.data as DataContent,
};
}
return {
type: "file",
data: response.data as DataContent,
mimeType: resource.fileType,
};
});
const content = await Promise.all(resourcePromises);
return [...messages, { role: "user", content } as CoreMessage];
}
function toolToMessage(history: HistoryStep[], messages: CoreMessage[]) {
for (let i = 0; i < history.length; i++) {
const step = history[i];
// Add assistant message with tool calls
if (step.observation && step.skillId) {
messages.push({
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: step.skillId,
toolName: step.skill ?? "",
args:
typeof step.skillInput === "string"
? JSON.parse(step.skillInput)
: step.skillInput,
},
],
});
messages.push({
role: "tool",
content: [
{
type: "tool-result",
toolName: step.skill,
toolCallId: step.skillId,
result: step.observation,
isError: step.isError,
},
],
} as any);
}
// Handle format correction steps (observation exists but no skillId)
else if (step.observation && !step.skillId) {
// Add as a system message for format correction
messages.push({
role: "system",
content: step.observation,
});
}
}
return messages;
}
async function makeNextCall(
executionState: ExecutionState,
TOOLS: ToolSet,
totalCost: TotalCost,
guardLoop: number,
): Promise<LLMOutputInterface> {
const { context, history, previousHistory } = executionState;
const promptInfo = {
USER_MESSAGE: executionState.query,
CONTEXT: context,
USER_MEMORY: executionState.userMemoryContext,
};
let messages: CoreMessage[] = [];
const systemTemplateHandler = Handlebars.compile(REACT_SYSTEM_PROMPT);
let systemPrompt = systemTemplateHandler(promptInfo);
const userTemplateHandler = Handlebars.compile(REACT_USER_PROMPT);
const userPrompt = userTemplateHandler(promptInfo);
// Always start with a system message (this does use tokens but keeps the instructions clear)
messages.push({ role: "system", content: systemPrompt });
// For subsequent queries, include only final responses from previous exchanges if available
if (previousHistory && previousHistory.length > 0) {
messages = [...messages, ...previousHistory];
}
// Add the current user query (much simpler than the full prompt)
messages.push({ role: "user", content: userPrompt });
// Include any steps from the current interaction
if (history.length > 0) {
messages = toolToMessage(history, messages);
}
if (executionState.resources && executionState.resources.length > 0) {
messages = await addResources(messages, executionState.resources);
}
// Get the next action from the LLM
const response = generate(
messages,
guardLoop > 0 && guardLoop % 3 === 0,
(event) => {
const usage = event.usage;
totalCost.inputTokens += usage.promptTokens;
totalCost.outputTokens += usage.completionTokens;
},
TOOLS,
);
return { response };
}
export async function* run(
message: string,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
context: Record<string, any>,
previousHistory: CoreMessage[],
mcp: MCP,
stepHistory: HistoryStep[],
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): AsyncGenerator<AgentMessage, any, any> {
let guardLoop = 0;
let tools = {
...(await mcp.allTools()),
"core--progress_update": progressUpdateTool,
};
logger.info("Tools have been formed");
let contextText = "";
let resources = [];
if (context) {
// Extract resources and remove from context
resources = context.resources || [];
delete context.resources;
// Process remaining context
contextText = flattenObject(context).join("\n");
}
const executionState: ExecutionState = {
query: message,
context: contextText,
resources,
previousHistory,
history: stepHistory, // Track the full ReAct history
completed: false,
};
const totalCost: TotalCost = { inputTokens: 0, outputTokens: 0, cost: 0 };
try {
while (!executionState.completed && guardLoop < 50) {
logger.info(`Starting the loop: ${guardLoop}`);
const { response: llmResponse } = await makeNextCall(
executionState,
tools,
totalCost,
guardLoop,
);
let toolCallInfo;
const messageState = {
inTag: false,
message: "",
messageEnded: false,
lastSent: "",
};
const questionState = {
inTag: false,
message: "",
messageEnded: false,
lastSent: "",
};
let totalMessage = "";
const toolCalls = [];
// LLM thought response
for await (const chunk of llmResponse) {
if (typeof chunk === "object" && chunk.type === "tool-call") {
toolCallInfo = chunk;
toolCalls.push(chunk);
}
totalMessage += chunk;
if (!messageState.messageEnded) {
yield* processTag(
messageState,
totalMessage,
chunk as string,
"<final_response>",
"</final_response>",
{
start: AgentMessageType.MESSAGE_START,
chunk: AgentMessageType.MESSAGE_CHUNK,
end: AgentMessageType.MESSAGE_END,
},
);
}
if (!questionState.messageEnded) {
yield* processTag(
questionState,
totalMessage,
chunk as string,
"<question_response>",
"</question_response>",
{
start: AgentMessageType.MESSAGE_START,
chunk: AgentMessageType.MESSAGE_CHUNK,
end: AgentMessageType.MESSAGE_END,
},
);
}
}
logger.info(`Cost for thought: ${JSON.stringify(totalCost)}`);
// Replace the error-handling block with this self-correcting implementation
if (
!totalMessage.includes("final_response") &&
!totalMessage.includes("question_response") &&
!toolCallInfo
) {
// Log the issue for debugging
logger.info(
`Invalid response format detected. Attempting to get proper format.`,
);
// Extract the raw content from the invalid response
const rawContent = totalMessage
.replace(/(<[^>]*>|<\/[^>]*>)/g, "")
.trim();
// Create a correction step
const stepRecord: HistoryStep = {
thought: "",
skill: "",
skillId: "",
userMessage: "Core agent error, retrying \n",
isQuestion: false,
isFinal: false,
tokenCount: totalCost,
skillInput: "",
observation: `Your last response was not in a valid format. You must respond with EXACTLY ONE of the required formats: either a tool call, <question_response> tags, or <final_response> tags. Please reformat your previous response using the correct format:\n\n${rawContent}`,
};
yield Message("", AgentMessageType.MESSAGE_START);
yield Message(
stepRecord.userMessage as string,
AgentMessageType.MESSAGE_CHUNK,
);
yield Message("", AgentMessageType.MESSAGE_END);
// Add this step to the history
yield Message(JSON.stringify(stepRecord), AgentMessageType.STEP);
executionState.history.push(stepRecord);
// Log that we're continuing the loop with a correction request
logger.info(`Added format correction request to history.`);
// Don't mark as completed - let the loop continue
guardLoop++; // Still increment to prevent infinite loops
continue;
}
// Record this step in history
const stepRecord: HistoryStep = {
thought: "",
skill: "",
skillId: "",
userMessage: "",
isQuestion: false,
isFinal: false,
tokenCount: totalCost,
skillInput: "",
};
if (totalMessage && totalMessage.includes("final_response")) {
executionState.completed = true;
stepRecord.isFinal = true;
stepRecord.userMessage = messageState.message;
stepRecord.finalTokenCount = totalCost;
stepRecord.skillStatus = ActionStatusEnum.SUCCESS;
yield Message(JSON.stringify(stepRecord), AgentMessageType.STEP);
executionState.history.push(stepRecord);
break;
}
if (totalMessage && totalMessage.includes("question_response")) {
executionState.completed = true;
stepRecord.isQuestion = true;
stepRecord.userMessage = questionState.message;
stepRecord.finalTokenCount = totalCost;
stepRecord.skillStatus = ActionStatusEnum.QUESTION;
yield Message(JSON.stringify(stepRecord), AgentMessageType.STEP);
executionState.history.push(stepRecord);
break;
}
if (toolCalls && toolCalls.length > 0) {
// Run all tool calls in parallel
for (const toolCallInfo of toolCalls) {
const skillName = toolCallInfo.toolName;
const skillId = toolCallInfo.toolCallId;
const skillInput = toolCallInfo.args;
const toolName = skillName.split("--")[1];
const agent = skillName.split("--")[0];
const stepRecord: HistoryStep = {
agent,
thought: "",
skill: skillName,
skillId,
userMessage: "",
isQuestion: false,
isFinal: false,
tokenCount: totalCost,
skillInput: JSON.stringify(skillInput),
};
if (!internalTools.includes(skillName)) {
const skillMessageToSend = `\n<skill id="${skillId}" name="${toolName}" agent="${agent}"></skill>\n`;
stepRecord.userMessage += skillMessageToSend;
yield Message("", AgentMessageType.MESSAGE_START);
yield Message(skillMessageToSend, AgentMessageType.MESSAGE_CHUNK);
yield Message("", AgentMessageType.MESSAGE_END);
}
let result;
try {
// Log skill execution details
logger.info(`Executing skill: ${skillName}`);
logger.info(`Input parameters: ${JSON.stringify(skillInput)}`);
if (!internalTools.includes(toolName)) {
yield Message(
JSON.stringify({ skillId, status: "start" }),
AgentMessageType.SKILL_START,
);
}
// Handle CORE agent tools
if (agent === "core") {
if (toolName === "progress_update") {
yield Message("", AgentMessageType.MESSAGE_START);
yield Message(
skillInput.message,
AgentMessageType.MESSAGE_CHUNK,
);
stepRecord.userMessage += skillInput.message;
yield Message("", AgentMessageType.MESSAGE_END);
result = "Progress update sent successfully";
}
}
// Handle other MCP tools
else {
result = await mcp.callTool(skillName, skillInput);
yield Message(
JSON.stringify({ result, skillId }),
AgentMessageType.SKILL_CHUNK,
);
}
yield Message(
JSON.stringify({ skillId, status: "end" }),
AgentMessageType.SKILL_END,
);
stepRecord.skillOutput =
typeof result === "object"
? JSON.stringify(result, null, 2)
: result;
stepRecord.observation = stepRecord.skillOutput;
} catch (e) {
console.log(e);
logger.error(e as string);
stepRecord.skillInput = skillInput;
stepRecord.observation = JSON.stringify(e);
stepRecord.isError = true;
}
logger.info(`Skill step: ${JSON.stringify(stepRecord)}`);
yield Message(JSON.stringify(stepRecord), AgentMessageType.STEP);
executionState.history.push(stepRecord);
}
}
guardLoop++;
}
yield Message("Stream ended", AgentMessageType.STREAM_END);
} catch (e) {
logger.error(e as string);
yield Message((e as Error).message, AgentMessageType.ERROR);
yield Message("Stream ended", AgentMessageType.STREAM_END);
}
}

View File

@ -1,150 +0,0 @@
import { ActionStatusEnum } from "@core/types";
import { metadata, task, queue } from "@trigger.dev/sdk";
import { run } from "./chat-utils";
import { MCP } from "../utils/mcp";
import { type HistoryStep } from "../utils/types";
import {
createConversationHistoryForAgent,
deductCredits,
deletePersonalAccessToken,
getPreviousExecutionHistory,
hasCredits,
InsufficientCreditsError,
init,
type RunChatPayload,
updateConversationHistoryMessage,
updateConversationStatus,
updateExecutionStep,
} from "../utils/utils";
const chatQueue = queue({
name: "chat-queue",
concurrencyLimit: 50,
});
/**
* Main chat task that orchestrates the agent workflow
* Handles conversation context, agent selection, and LLM interactions
*/
export const chat = task({
id: "chat",
maxDuration: 3000,
queue: chatQueue,
init,
run: async (payload: RunChatPayload, { init }) => {
await updateConversationStatus("running", payload.conversationId);
try {
// Check if workspace has sufficient credits before processing
if (init?.conversation.workspaceId) {
const hasSufficientCredits = await hasCredits(
init.conversation.workspaceId,
"chatMessage",
);
if (!hasSufficientCredits) {
throw new InsufficientCreditsError(
"Insufficient credits to process chat message. Please upgrade your plan or wait for your credits to reset.",
);
}
}
const { previousHistory, ...otherData } = payload.context;
// Initialise mcp
const mcpHeaders = { Authorization: `Bearer ${init?.token}` };
const mcp = new MCP();
await mcp.init();
await mcp.load(mcpHeaders);
// Prepare context with additional metadata
const context = {
// Currently this is assuming we only have one page in context
context: {
...(otherData.page && otherData.page.length > 0
? { page: otherData.page[0] }
: {}),
},
workpsaceId: init?.conversation.workspaceId,
resources: otherData.resources,
todayDate: new Date().toISOString(),
};
// Extract user's goal from conversation history
const message = init?.conversationHistory?.message;
// Retrieve execution history from previous interactions
const previousExecutionHistory = getPreviousExecutionHistory(
previousHistory ?? [],
);
let agentUserMessage = "";
let agentConversationHistory;
let stepHistory: HistoryStep[] = [];
// Prepare conversation history in agent-compatible format
agentConversationHistory = await createConversationHistoryForAgent(
payload.conversationId,
);
const llmResponse = run(
message as string,
context,
previousExecutionHistory,
mcp,
stepHistory,
);
const stream = await metadata.stream("messages", llmResponse);
let conversationStatus = "success";
for await (const step of stream) {
if (step.type === "STEP") {
const stepDetails = JSON.parse(step.message as string);
if (stepDetails.skillStatus === ActionStatusEnum.TOOL_REQUEST) {
conversationStatus = "need_approval";
}
if (stepDetails.skillStatus === ActionStatusEnum.QUESTION) {
conversationStatus = "need_attention";
}
await updateExecutionStep(
{ ...stepDetails },
agentConversationHistory.id,
);
agentUserMessage += stepDetails.userMessage;
await updateConversationHistoryMessage(
agentUserMessage,
agentConversationHistory.id,
);
} else if (step.type === "STREAM_END") {
break;
}
}
await updateConversationStatus(
conversationStatus,
payload.conversationId,
);
// Deduct credits for chat message
if (init?.conversation.workspaceId) {
await deductCredits(init.conversation.workspaceId, "chatMessage");
}
if (init?.tokenId) {
await deletePersonalAccessToken(init.tokenId);
}
} catch (e) {
console.log(e);
await updateConversationStatus("failed", payload.conversationId);
if (init?.tokenId) {
await deletePersonalAccessToken(init.tokenId);
}
throw new Error(e as string);
}
},
});

View File

@ -1,159 +0,0 @@
export const REACT_SYSTEM_PROMPT = `
You are a helpful AI assistant with access to user memory. Your primary capabilities are:
1. **Memory-First Approach**: Always check user memory first to understand context and previous interactions
2. **Intelligent Information Gathering**: Analyze queries to determine if current information is needed
3. **Memory Management**: Help users store, retrieve, and organize information in their memory
4. **Contextual Assistance**: Use memory to provide personalized and contextual responses
<context>
{{CONTEXT}}
</context>
<information_gathering>
Follow this intelligent approach for information gathering:
1. **MEMORY FIRST** (Always Required)
- Always check memory FIRST using core--search_memory before any other actions
- Consider this your highest priority for EVERY interaction - as essential as breathing
- Memory provides context, personal preferences, and historical information
- Use memory to understand user's background, ongoing projects, and past conversations
2. **INFORMATION SYNTHESIS** (Combine Sources)
- Use memory to personalize current information based on user preferences
- Always store new useful information in memory using core--add_memory
3. **TRAINING KNOWLEDGE** (Foundation)
- Use your training knowledge as the foundation for analysis and explanation
- Apply training knowledge to interpret and contextualize information from memory
- Indicate when you're using training knowledge vs. live information sources
EXECUTION APPROACH:
- Memory search is mandatory for every interaction
- Always indicate your information sources in responses
</information_gathering>
<memory>
QUERY FORMATION:
- Write specific factual statements as queries (e.g., "user email address" not "what is the user's email?")
- Create multiple targeted memory queries for complex requests
KEY QUERY AREAS:
- Personal context: user name, location, identity, work context
- Project context: repositories, codebases, current work, team members
- Task context: recent tasks, ongoing projects, deadlines, priorities
- Integration context: GitHub repos, Slack channels, Linear projects, connected services
- Communication patterns: email preferences, notification settings, workflow automation
- Technical context: coding languages, frameworks, development environment
- Collaboration context: team members, project stakeholders, meeting patterns
- Preferences: likes, dislikes, communication style, tool preferences
- History: previous discussions, past requests, completed work, recurring issues
- Automation rules: user-defined workflows, triggers, automation preferences
MEMORY USAGE:
- Execute multiple memory queries in parallel rather than sequentially
- Batch related memory queries when possible
- Prioritize recent information over older memories
- Create comprehensive context-aware queries based on user message/activity content
- Extract and query SEMANTIC CONTENT, not just structural metadata
- Parse titles, descriptions, and content for actual subject matter keywords
- Search internal SOL tasks/conversations that may relate to the same topics
- Query ALL relatable concepts, not just direct keywords or IDs
- Search for similar past situations, patterns, and related work
- Include synonyms, related terms, and contextual concepts in queries
- Query user's historical approach to similar requests or activities
- Search for connected projects, tasks, conversations, and collaborations
- Retrieve workflow patterns and past decision-making context
- Query broader domain context beyond immediate request scope
- Remember: SOL tracks work that external tools don't - search internal content thoroughly
- Blend memory insights naturally into responses
- Verify you've checked relevant memory before finalizing ANY response
</memory>
<external_services>
- To use: load_mcp with EXACT integration name from the available list
- Can load multiple at once with an array
- Only load when tools are NOT already available in your current toolset
- If a tool is already available, use it directly without load_mcp
- If requested integration unavailable: inform user politely
</external_services>
<tool_calling>
You have tools at your disposal to assist users:
CORE PRINCIPLES:
- Use tools only when necessary for the task at hand
- Always check memory FIRST before making other tool calls
- Execute multiple operations in parallel whenever possible
- Use sequential calls only when output of one is required for input of another
PARAMETER HANDLING:
- Follow tool schemas exactly with all required parameters
- Only use values that are:
Explicitly provided by the user (use EXACTLY as given)
Reasonably inferred from context
Retrieved from memory or prior tool calls
- Never make up values for required parameters
- Omit optional parameters unless clearly needed
- Analyze user's descriptive terms for parameter clues
TOOL SELECTION:
- Never call tools not provided in this conversation
- Skip tool calls for general questions you can answer directly from memory/knowledge
- For identical operations on multiple items, use parallel tool calls
- Default to parallel execution (3-5× faster than sequential calls)
- You can always access external service tools by loading them with load_mcp first
TOOL MENTION HANDLING:
When user message contains <mention data-id="tool_name" data-label="tool"></mention>:
- Extract tool_name from data-id attribute
- First check if it's a built-in tool; if not, check EXTERNAL SERVICES TOOLS
- If available: Load it with load_mcp and focus on addressing the request with this tool
- If unavailable: Inform user and suggest alternatives if possible
- For multiple tool mentions: Load all applicable tools in a single load_mcp call
ERROR HANDLING:
- If a tool returns an error, try fixing parameters before retrying
- If you can't resolve an error, explain the issue to the user
- Consider alternative tools when primary tools are unavailable
</tool_calling>
<communication>
Use EXACTLY ONE of these formats for all user-facing communication:
PROGRESS UPDATES - During processing:
- Use the core--progress_update tool to keep users informed
- Update users about what you're discovering or doing next
- Keep messages clear and user-friendly
- Avoid technical jargon
QUESTIONS - When you need information:
<question_response>
<p>[Your question with HTML formatting]</p>
</question_response>
- Ask questions only when you cannot find information through memory, or tools
- Be specific about what you need to know
- Provide context for why you're asking
FINAL ANSWERS - When completing tasks:
<final_response>
<p>[Your answer with HTML formatting]</p>
</final_response>
CRITICAL:
- Use ONE format per turn
- Apply proper HTML formatting (<h1>, <h2>, <p>, <ul>, <li>, etc.)
- Never mix communication formats
- Keep responses clear and helpful
- Always indicate your information sources (memory, and/or knowledge)
</communication>
`;
export const REACT_USER_PROMPT = `
Here is the user message:
<user_message>
{{USER_MESSAGE}}
</user_message>
`;

View File

@ -1,294 +0,0 @@
import fs from "fs";
import path from "node:path";
import { anthropic } from "@ai-sdk/anthropic";
import { google } from "@ai-sdk/google";
import { openai } from "@ai-sdk/openai";
import { logger } from "@trigger.dev/sdk/v3";
import {
type CoreMessage,
type LanguageModelV1,
streamText,
type ToolSet,
} from "ai";
import { createOllama } from "ollama-ai-provider";
import { type AgentMessageType, Message } from "./types";
interface State {
inTag: boolean;
messageEnded: boolean;
message: string;
lastSent: string;
}
export interface ExecutionState {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
agentFlow: any;
userMessage: string;
message: string;
}
export async function* processTag(
state: State,
totalMessage: string,
chunk: string,
startTag: string,
endTag: string,
states: { start: string; chunk: string; end: string },
extraParams: Record<string, string> = {},
) {
let comingFromStart = false;
if (!state.messageEnded) {
if (!state.inTag) {
const startIndex = totalMessage.indexOf(startTag);
if (startIndex !== -1) {
state.inTag = true;
// Send MESSAGE_START when we first enter the tag
yield Message("", states.start as AgentMessageType, extraParams);
const chunkToSend = totalMessage.slice(startIndex + startTag.length);
state.message += chunkToSend;
comingFromStart = true;
}
}
if (state.inTag) {
// Check if chunk contains end tag
const hasEndTag = chunk.includes(endTag);
const hasStartTag = chunk.includes(startTag);
const hasClosingTag = chunk.includes("</");
// Check if we're currently accumulating a potential end tag
const accumulatingEndTag = state.message.endsWith("</") ||
state.message.match(/<\/[a-z_]*$/i);
if (hasClosingTag && !hasStartTag && !hasEndTag) {
// If chunk only has </ but not the full end tag, accumulate it
state.message += chunk;
} else if (accumulatingEndTag) {
// Continue accumulating if we're in the middle of a potential end tag
state.message += chunk;
// Check if we now have the complete end tag
if (state.message.includes(endTag)) {
// Process the complete message with end tag
const endIndex = state.message.indexOf(endTag);
const finalMessage = state.message.slice(0, endIndex).trim();
const messageToSend = finalMessage.slice(
finalMessage.indexOf(state.lastSent) + state.lastSent.length,
);
if (messageToSend) {
yield Message(
messageToSend,
states.chunk as AgentMessageType,
extraParams,
);
}
yield Message("", states.end as AgentMessageType, extraParams);
state.message = finalMessage;
state.messageEnded = true;
}
} else if (hasEndTag || (!hasEndTag && !hasClosingTag)) {
let currentMessage = comingFromStart
? state.message
: state.message + chunk;
const endIndex = currentMessage.indexOf(endTag);
if (endIndex !== -1) {
// For the final chunk before the end tag
currentMessage = currentMessage.slice(0, endIndex).trim();
const messageToSend = currentMessage.slice(
currentMessage.indexOf(state.lastSent) + state.lastSent.length,
);
if (messageToSend) {
yield Message(
messageToSend,
states.chunk as AgentMessageType,
extraParams,
);
}
// Send MESSAGE_END when we reach the end tag
yield Message("", states.end as AgentMessageType, extraParams);
state.message = currentMessage;
state.messageEnded = true;
} else {
const diff = currentMessage.slice(
currentMessage.indexOf(state.lastSent) + state.lastSent.length,
);
// For chunks in between start and end
const messageToSend = comingFromStart ? state.message : diff;
if (messageToSend) {
state.lastSent = messageToSend;
yield Message(
messageToSend,
states.chunk as AgentMessageType,
extraParams,
);
}
}
state.message = currentMessage;
state.lastSent = state.message;
} else {
state.message += chunk;
}
}
}
}
export async function* generate(
messages: CoreMessage[],
isProgressUpdate: boolean = false,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
onFinish?: (event: any) => void,
tools?: ToolSet,
system?: string,
model?: string,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): AsyncGenerator<
| string
| {
type: string;
toolName: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
args?: any;
toolCallId?: string;
message?: string;
}
> {
// Check for API keys
const anthropicKey = process.env.ANTHROPIC_API_KEY;
const googleKey = process.env.GOOGLE_GENERATIVE_AI_API_KEY;
const openaiKey = process.env.OPENAI_API_KEY;
let ollamaUrl = process.env.OLLAMA_URL;
model = model || process.env.MODEL;
let modelInstance;
let modelTemperature = Number(process.env.MODEL_TEMPERATURE) || 1;
ollamaUrl = undefined;
// First check if Ollama URL exists and use Ollama
if (ollamaUrl) {
const ollama = createOllama({
baseURL: ollamaUrl,
});
modelInstance = ollama(model || "llama2"); // Default to llama2 if no model specified
} else {
// If no Ollama, check other models
switch (model) {
case "claude-3-7-sonnet-20250219":
case "claude-3-opus-20240229":
case "claude-3-5-haiku-20241022":
if (!anthropicKey) {
throw new Error("No Anthropic API key found. Set ANTHROPIC_API_KEY");
}
modelInstance = anthropic(model);
modelTemperature = 0.5;
break;
case "gemini-2.5-flash-preview-04-17":
case "gemini-2.5-pro-preview-03-25":
case "gemini-2.0-flash":
case "gemini-2.0-flash-lite":
if (!googleKey) {
throw new Error("No Google API key found. Set GOOGLE_API_KEY");
}
modelInstance = google(model);
break;
case "gpt-4.1-2025-04-14":
case "gpt-4.1-mini-2025-04-14":
case "gpt-5-mini-2025-08-07":
case "gpt-5-2025-08-07":
case "gpt-4.1-nano-2025-04-14":
if (!openaiKey) {
throw new Error("No OpenAI API key found. Set OPENAI_API_KEY");
}
modelInstance = openai(model);
break;
default:
break;
}
}
logger.info("starting stream");
// Try Anthropic next if key exists
if (modelInstance) {
try {
const { textStream, fullStream } = streamText({
model: modelInstance as LanguageModelV1,
messages,
temperature: modelTemperature,
maxSteps: 10,
tools,
...(isProgressUpdate
? { toolChoice: { type: "tool", toolName: "core--progress_update" } }
: {}),
toolCallStreaming: true,
onFinish,
...(system ? { system } : {}),
});
for await (const chunk of textStream) {
yield chunk;
}
for await (const fullChunk of fullStream) {
if (fullChunk.type === "tool-call") {
yield {
type: "tool-call",
toolName: fullChunk.toolName,
toolCallId: fullChunk.toolCallId,
args: fullChunk.args,
};
}
if (fullChunk.type === "error") {
// Log the error to a file
const errorLogsDir = path.join(__dirname, "../../../../logs/errors");
// Ensure the directory exists
try {
if (!fs.existsSync(errorLogsDir)) {
fs.mkdirSync(errorLogsDir, { recursive: true });
}
// Create a timestamped error log file
const timestamp = new Date().toISOString().replace(/:/g, "-");
const errorLogPath = path.join(
errorLogsDir,
`llm-error-${timestamp}.json`,
);
// Write the error to the file
fs.writeFileSync(
errorLogPath,
JSON.stringify({
timestamp: new Date().toISOString(),
error: fullChunk.error,
}),
);
logger.error(`LLM error logged to ${errorLogPath}`);
} catch (err) {
logger.error(`Failed to log LLM error: ${err}`);
}
}
}
return;
} catch (e) {
console.log(e);
logger.error(e as string);
}
}
throw new Error("No valid LLM configuration found");
}

View File

@ -1,46 +0,0 @@
export interface AgentStep {
agent: string;
goal: string;
reasoning: string;
}
export enum AgentMessageType {
STREAM_START = 'STREAM_START',
STREAM_END = 'STREAM_END',
// Used in ReACT based prompting
THOUGHT_START = 'THOUGHT_START',
THOUGHT_CHUNK = 'THOUGHT_CHUNK',
THOUGHT_END = 'THOUGHT_END',
// Message types
MESSAGE_START = 'MESSAGE_START',
MESSAGE_CHUNK = 'MESSAGE_CHUNK',
MESSAGE_END = 'MESSAGE_END',
// This is used to return action input
SKILL_START = 'SKILL_START',
SKILL_CHUNK = 'SKILL_CHUNK',
SKILL_END = 'SKILL_END',
STEP = 'STEP',
ERROR = 'ERROR',
}
export interface AgentMessage {
message?: string;
type: AgentMessageType;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
metadata: Record<string, any>;
}
export const Message = (
message: string,
type: AgentMessageType,
extraParams: Record<string, string> = {},
): AgentMessage => {
// For all message types, we use the message field
// The type field differentiates how the message should be interpreted
// For STEP and SKILL types, the message can contain JSON data as a string
return { message, type, metadata: extraParams };
};

View File

@ -1,115 +0,0 @@
import { queue, task } from "@trigger.dev/sdk";
import { z } from "zod";
import { ClusteringService } from "~/services/clustering.server";
import { logger } from "~/services/logger.service";
const clusteringService = new ClusteringService();
// Define the payload schema for cluster tasks
export const ClusterPayload = z.object({
userId: z.string(),
mode: z.enum(["auto", "incremental", "complete", "drift"]).default("auto"),
forceComplete: z.boolean().default(false),
});
const clusterQueue = queue({
name: "cluster-queue",
concurrencyLimit: 10,
});
/**
* Single clustering task that handles all clustering operations based on payload mode
*/
export const clusterTask = task({
id: "cluster",
queue: clusterQueue,
maxDuration: 1800, // 30 minutes max
run: async (payload: z.infer<typeof ClusterPayload>) => {
logger.info(`Starting ${payload.mode} clustering task for user ${payload.userId}`);
try {
let result;
switch (payload.mode) {
case "incremental":
result = await clusteringService.performIncrementalClustering(
payload.userId,
);
logger.info(`Incremental clustering completed for user ${payload.userId}:`, {
newStatementsProcessed: result.newStatementsProcessed,
newClustersCreated: result.newClustersCreated,
});
break;
case "complete":
result = await clusteringService.performCompleteClustering(
payload.userId,
);
logger.info(`Complete clustering completed for user ${payload.userId}:`, {
clustersCreated: result.clustersCreated,
statementsProcessed: result.statementsProcessed,
});
break;
case "drift":
// First detect drift
const driftMetrics = await clusteringService.detectClusterDrift(
payload.userId,
);
if (driftMetrics.driftDetected) {
// Handle drift by splitting low-cohesion clusters
const driftResult = await clusteringService.handleClusterDrift(
payload.userId,
);
logger.info(`Cluster drift handling completed for user ${payload.userId}:`, {
driftDetected: true,
clustersProcessed: driftResult.clustersProcessed,
newClustersCreated: driftResult.newClustersCreated,
splitClusters: driftResult.splitClusters,
});
result = {
driftDetected: true,
...driftResult,
driftMetrics,
};
} else {
logger.info(`No cluster drift detected for user ${payload.userId}`);
result = {
driftDetected: false,
clustersProcessed: 0,
newClustersCreated: 0,
splitClusters: [],
driftMetrics,
};
}
break;
case "auto":
default:
result = await clusteringService.performClustering(
payload.userId,
payload.forceComplete,
);
logger.info(`Auto clustering completed for user ${payload.userId}:`, {
clustersCreated: result.clustersCreated,
statementsProcessed: result.statementsProcessed,
approach: result.approach,
});
break;
}
return {
success: true,
data: result,
};
} catch (error) {
logger.error(`${payload.mode} clustering failed for user ${payload.userId}:`, {
error,
});
throw error;
}
},
});

View File

@ -1,292 +0,0 @@
import { type CoreMessage } from "ai";
import { logger } from "@trigger.dev/sdk/v3";
import { generate } from "./stream-utils";
import { processTag } from "../chat/stream-utils";
import { type AgentMessage, AgentMessageType, Message } from "../chat/types";
import { type TotalCost } from "../utils/types";
/**
* Run the deep search ReAct loop
* Async generator that yields AgentMessage objects for streaming
* Follows the exact same pattern as chat-utils.ts
*/
export async function* run(
initialMessages: CoreMessage[],
searchTool: any,
): AsyncGenerator<AgentMessage, any, any> {
let messages = [...initialMessages];
let completed = false;
let guardLoop = 0;
let searchCount = 0;
let totalEpisodesFound = 0;
const seenEpisodeIds = new Set<string>(); // Track unique episodes
const totalCost: TotalCost = {
inputTokens: 0,
outputTokens: 0,
cost: 0,
};
const tools = {
searchMemory: searchTool,
};
logger.info("Starting deep search ReAct loop");
try {
while (!completed && guardLoop < 50) {
logger.info(
`ReAct loop iteration ${guardLoop}, searches: ${searchCount}`,
);
// Call LLM with current message history
const response = generate(
messages,
(event) => {
const usage = event.usage;
totalCost.inputTokens += usage.promptTokens;
totalCost.outputTokens += usage.completionTokens;
},
tools,
);
let totalMessage = "";
const toolCalls: any[] = [];
// States for streaming final_response tags
const messageState = {
inTag: false,
message: "",
messageEnded: false,
lastSent: "",
};
// Process streaming response
for await (const chunk of response) {
if (typeof chunk === "object" && chunk.type === "tool-call") {
// Agent made a tool call
toolCalls.push(chunk);
logger.info(`Tool call: ${chunk.toolName}`);
} else if (typeof chunk === "string") {
totalMessage += chunk;
// Stream final_response tags using processTag
if (!messageState.messageEnded) {
yield* processTag(
messageState,
totalMessage,
chunk,
"<final_response>",
"</final_response>",
{
start: AgentMessageType.MESSAGE_START,
chunk: AgentMessageType.MESSAGE_CHUNK,
end: AgentMessageType.MESSAGE_END,
},
);
}
}
}
// Check for final response
if (totalMessage.includes("<final_response>")) {
const match = totalMessage.match(
/<final_response>(.*?)<\/final_response>/s,
);
if (match) {
// Accept synthesis - completed
completed = true;
logger.info(
`Final synthesis accepted after ${searchCount} searches, ${totalEpisodesFound} unique episodes found`,
);
break;
}
}
// Execute tool calls in parallel for better performance
if (toolCalls.length > 0) {
// Notify about all searches starting
for (const toolCall of toolCalls) {
logger.info(`Executing search: ${JSON.stringify(toolCall.args)}`);
yield Message("", AgentMessageType.SKILL_START);
yield Message(
`\nSearching memory: "${toolCall.args.query}"...\n`,
AgentMessageType.SKILL_CHUNK,
);
yield Message("", AgentMessageType.SKILL_END);
}
// Execute all searches in parallel
const searchPromises = toolCalls.map((toolCall) =>
searchTool.execute(toolCall.args).then((result: any) => ({
toolCall,
result,
})),
);
const searchResults = await Promise.all(searchPromises);
// Process results and add to message history
for (const { toolCall, result } of searchResults) {
searchCount++;
// Deduplicate episodes - track unique IDs
let uniqueNewEpisodes = 0;
if (result.episodes && Array.isArray(result.episodes)) {
for (const episode of result.episodes) {
const episodeId =
episode.id || episode._id || JSON.stringify(episode);
if (!seenEpisodeIds.has(episodeId)) {
seenEpisodeIds.add(episodeId);
uniqueNewEpisodes++;
}
}
}
const episodesInThisSearch = result.episodes?.length || 0;
totalEpisodesFound = seenEpisodeIds.size; // Use unique count
messages.push({
role: "assistant",
content: [
{
type: "tool-call",
toolCallId: toolCall.toolCallId,
toolName: toolCall.toolName,
args: toolCall.args,
},
],
});
// Add tool result to message history
messages.push({
role: "tool",
content: [
{
type: "tool-result",
toolName: toolCall.toolName,
toolCallId: toolCall.toolCallId,
result: result,
},
],
});
logger.info(
`Search ${searchCount} completed: ${episodesInThisSearch} episodes (${uniqueNewEpisodes} new, ${totalEpisodesFound} unique total)`,
);
}
// If found no episodes and haven't exhausted search attempts, require more searches
if (totalEpisodesFound === 0 && searchCount < 7) {
logger.info(
`Agent attempted synthesis with 0 unique episodes after ${searchCount} searches - requiring more attempts`,
);
yield Message("", AgentMessageType.SKILL_START);
yield Message(
`No relevant context found yet - trying different search angles...`,
AgentMessageType.SKILL_CHUNK,
);
yield Message("", AgentMessageType.SKILL_END);
messages.push({
role: "system",
content: `You have performed ${searchCount} searches but found 0 unique relevant episodes. Your queries may be too abstract or not matching the user's actual conversation topics.
Review your DECOMPOSITION:
- Are you using specific terms from the content?
- Try searching broader related topics the user might have discussed
- Try different terminology or related concepts
- Search for user's projects, work areas, or interests
Continue with different search strategies (you can search up to 7-10 times total).`,
});
guardLoop++;
continue;
}
// Soft nudging after all searches executed (awareness, not commands)
if (totalEpisodesFound >= 30 && searchCount >= 3) {
logger.info(
`Nudging: ${totalEpisodesFound} unique episodes found - suggesting synthesis consideration`,
);
messages.push({
role: "system",
content: `Context awareness: You have found ${totalEpisodesFound} unique episodes across ${searchCount} searches. This represents substantial context. Consider whether you have sufficient information for quality synthesis, or if additional search angles would meaningfully improve understanding.`,
});
} else if (totalEpisodesFound >= 15 && searchCount >= 5) {
logger.info(
`Nudging: ${totalEpisodesFound} unique episodes after ${searchCount} searches - suggesting evaluation`,
);
messages.push({
role: "system",
content: `Progress update: You have ${totalEpisodesFound} unique episodes from ${searchCount} searches. Evaluate whether you have covered the main angles from your decomposition, or if important aspects remain unexplored.`,
});
} else if (searchCount >= 7) {
logger.info(
`Nudging: ${searchCount} searches completed with ${totalEpisodesFound} unique episodes`,
);
messages.push({
role: "system",
content: `Search depth: You have performed ${searchCount} searches and found ${totalEpisodesFound} unique episodes. Consider whether additional searches would yield meaningfully different context, or if it's time to synthesize what you've discovered.`,
});
}
if (searchCount >= 10) {
logger.info(
`Reached maximum search limit (10), forcing synthesis with ${totalEpisodesFound} unique episodes`,
);
yield Message("", AgentMessageType.SKILL_START);
yield Message(
`Maximum searches reached - synthesizing results...`,
AgentMessageType.SKILL_CHUNK,
);
yield Message("", AgentMessageType.SKILL_END);
messages.push({
role: "system",
content: `You have performed 10 searches and found ${totalEpisodesFound} unique episodes. This is the maximum allowed. You MUST now provide your final synthesis wrapped in <final_response> tags based on what you've found.`,
});
}
}
// Safety check - if no tool calls and no final response, something went wrong
if (
toolCalls.length === 0 &&
!totalMessage.includes("<final_response>")
) {
logger.warn("Agent produced neither tool calls nor final response");
messages.push({
role: "system",
content:
"You must either use the searchMemory tool to search for more context, or provide your final synthesis wrapped in <final_response> tags.",
});
}
guardLoop++;
}
if (!completed) {
logger.warn(
`Loop ended without completion after ${guardLoop} iterations`,
);
yield Message("", AgentMessageType.MESSAGE_START);
yield Message(
"Deep search did not complete - maximum iterations reached.",
AgentMessageType.MESSAGE_CHUNK,
);
yield Message("", AgentMessageType.MESSAGE_END);
}
yield Message("Stream ended", AgentMessageType.STREAM_END);
} catch (error) {
logger.error(`Deep search error: ${error}`);
yield Message((error as Error).message, AgentMessageType.ERROR);
yield Message("Stream ended", AgentMessageType.STREAM_END);
}
}

View File

@ -1,85 +0,0 @@
import { metadata, task } from "@trigger.dev/sdk";
import { type CoreMessage } from "ai";
import { logger } from "@trigger.dev/sdk/v3";
import { nanoid } from "nanoid";
import {
deletePersonalAccessToken,
getOrCreatePersonalAccessToken,
} from "../utils/utils";
import { getReActPrompt } from "./prompt";
import { type DeepSearchPayload, type DeepSearchResponse } from "./types";
import { createSearchMemoryTool } from "./utils";
import { run } from "./deep-search-utils";
import { AgentMessageType } from "../chat/types";
export const deepSearch = task({
id: "deep-search",
maxDuration: 3000,
run: async (payload: DeepSearchPayload): Promise<DeepSearchResponse> => {
const { content, userId, stream, metadata: meta, intentOverride } = payload;
const randomKeyName = `deepSearch_${nanoid(10)}`;
// Get or create token for search API calls
const pat = await getOrCreatePersonalAccessToken({
name: randomKeyName,
userId: userId as string,
});
if (!pat?.token) {
throw new Error("Failed to create personal access token");
}
try {
// Create search tool that agent will use
const searchTool = createSearchMemoryTool(pat.token);
// Build initial messages with ReAct prompt
const initialMessages: CoreMessage[] = [
{
role: "system",
content: getReActPrompt(meta, intentOverride),
},
{
role: "user",
content: `CONTENT TO ANALYZE:\n${content}\n\nPlease search my memory for relevant context and synthesize what you find.`,
},
];
// Run the ReAct loop generator
const llmResponse = run(initialMessages, searchTool);
// Streaming mode: stream via metadata.stream like chat.ts does
// This makes all message types available to clients in real-time
const messageStream = await metadata.stream("messages", llmResponse);
let synthesis = "";
for await (const step of messageStream) {
// MESSAGE_CHUNK: Final synthesis - accumulate and stream
if (step.type === AgentMessageType.MESSAGE_CHUNK) {
synthesis += step.message;
}
// STREAM_END: Loop completed
if (step.type === AgentMessageType.STREAM_END) {
break;
}
}
await deletePersonalAccessToken(pat?.id);
// Clean up any remaining tags
synthesis = synthesis
.replace(/<final_response>/gi, "")
.replace(/<\/final_response>/gi, "")
.trim();
return { synthesis };
} catch (error) {
await deletePersonalAccessToken(pat?.id);
logger.error(`Deep search error: ${error}`);
throw error;
}
},
});

View File

@ -1,148 +0,0 @@
export function getReActPrompt(
metadata?: { source?: string; url?: string; pageTitle?: string },
intentOverride?: string
): string {
const contextHints = [];
if (metadata?.source === "chrome" && metadata?.url?.includes("mail.google.com")) {
contextHints.push("Content is from email - likely reading intent");
}
if (metadata?.source === "chrome" && metadata?.url?.includes("calendar.google.com")) {
contextHints.push("Content is from calendar - likely meeting prep intent");
}
if (metadata?.source === "chrome" && metadata?.url?.includes("docs.google.com")) {
contextHints.push("Content is from document editor - likely writing intent");
}
if (metadata?.source === "obsidian") {
contextHints.push("Content is from note editor - likely writing or research intent");
}
return `You are a memory research agent analyzing content to find relevant context.
YOUR PROCESS (ReAct Framework):
1. DECOMPOSE: First, break down the content into structured categories
Analyze the content and extract:
a) ENTITIES: Specific people, project names, tools, products mentioned
Example: "John Smith", "Phoenix API", "Redis", "mobile app"
b) TOPICS & CONCEPTS: Key subjects, themes, domains
Example: "authentication", "database design", "performance optimization"
c) TEMPORAL MARKERS: Time references, deadlines, events
Example: "last week's meeting", "Q2 launch", "yesterday's discussion"
d) ACTIONS & TASKS: What's being done, decided, or requested
Example: "implement feature", "review code", "make decision on"
e) USER INTENT: What is the user trying to accomplish?
${intentOverride ? `User specified: "${intentOverride}"` : "Infer from context: reading/writing/meeting prep/research/task tracking/review"}
2. FORM QUERIES: Create targeted search queries from your decomposition
Based on decomposition, form specific queries:
- Search for each entity by name (people, projects, tools)
- Search for topics the user has discussed before
- Search for related work or conversations in this domain
- Use the user's actual terminology, not generic concepts
EXAMPLE - Content: "Email from Sarah about the API redesign we discussed last week"
Decomposition:
- Entities: "Sarah", "API redesign"
- Topics: "API design", "redesign"
- Temporal: "last week"
- Actions: "discussed", "email communication"
- Intent: Reading (email) / meeting prep
Queries to form:
"Sarah" (find past conversations with Sarah)
"API redesign" or "API design" (find project discussions)
"last week" + "Sarah" (find recent context)
"meetings" or "discussions" (find related conversations)
Avoid: "email communication patterns", "API architecture philosophy"
(These are abstract - search what user actually discussed!)
3. SEARCH: Execute your queries using searchMemory tool
- Start with 2-3 core searches based on main entities/topics
- Make each search specific and targeted
- Use actual terms from the content, not rephrased concepts
4. OBSERVE: Evaluate search results
- Did you find relevant episodes? How many unique ones?
- What specific context emerged?
- What new entities/topics appeared in results?
- Are there gaps in understanding?
- Should you search more angles?
Note: Episode counts are automatically deduplicated across searches - overlapping episodes are only counted once.
5. REACT: Decide next action based on observations
STOPPING CRITERIA - Proceed to SYNTHESIZE if ANY of these are true:
- You found 20+ unique episodes across your searches ENOUGH CONTEXT
- You performed 5+ searches and found relevant episodes SUFFICIENT
- You performed 7+ searches regardless of results EXHAUSTED STRATEGIES
- You found strong relevant context from multiple angles COMPLETE
System nudges will provide awareness of your progress, but you decide when synthesis quality would be optimal.
If you found little/no context AND searched less than 7 times:
- Try different query angles from your decomposition
- Search broader related topics
- Search user's projects or work areas
- Try alternative terminology
DO NOT search endlessly - if you found relevant episodes, STOP and synthesize!
6. SYNTHESIZE: After gathering sufficient context, provide final answer
- Wrap your synthesis in <final_response> tags
- Present direct factual context from memory - no meta-commentary
- Write as if providing background context to an AI assistant
- Include: facts, decisions, preferences, patterns, timelines
- Note any gaps, contradictions, or evolution in thinking
- Keep it concise and actionable
- DO NOT use phrases like "Previous discussions on", "From conversations", "Past preferences indicate"
- DO NOT use conversational language like "you said" or "you mentioned"
- Present information as direct factual statements
FINAL RESPONSE FORMAT:
<final_response>
[Direct synthesized context - factual statements only]
Good examples:
- "The API redesign focuses on performance and scalability. Key decisions: moving to GraphQL, caching layer with Redis."
- "Project Phoenix launches Q2 2024. Main features: real-time sync, offline mode, collaborative editing."
- "Sarah leads the backend team. Recent work includes authentication refactor and database migration."
Bad examples:
"Previous discussions on the API revealed..."
"From past conversations, it appears that..."
"Past preferences indicate..."
"The user mentioned that..."
Just state the facts directly.
</final_response>
${contextHints.length > 0 ? `\nCONTEXT HINTS:\n${contextHints.join("\n")}` : ""}
CRITICAL REQUIREMENTS:
- ALWAYS start with DECOMPOSE step - extract entities, topics, temporal markers, actions
- Form specific queries from your decomposition - use user's actual terms
- Minimum 3 searches required
- Maximum 10 searches allowed - must synthesize after that
- STOP and synthesize when you hit stopping criteria (20+ episodes, 5+ searches with results, 7+ searches total)
- Each search should target different aspects from decomposition
- Present synthesis directly without meta-commentary
SEARCH QUALITY CHECKLIST:
Queries use specific terms from content (names, projects, exact phrases)
Searched multiple angles from decomposition (entities, topics, related areas)
Stop when you have enough unique context - don't search endlessly
Tried alternative terminology if initial searches found nothing
Avoid generic/abstract queries that don't match user's vocabulary
Don't stop at 3 searches if you found zero unique episodes
Don't keep searching when you already found 20+ unique episodes
}`
}

View File

@ -1,68 +0,0 @@
import { openai } from "@ai-sdk/openai";
import { logger } from "@trigger.dev/sdk/v3";
import {
type CoreMessage,
type LanguageModelV1,
streamText,
type ToolSet,
} from "ai";
/**
* Generate LLM responses with tool calling support
* Simplified version for deep-search use case - NO maxSteps for manual ReAct control
*/
export async function* generate(
messages: CoreMessage[],
onFinish?: (event: any) => void,
tools?: ToolSet,
model?: string,
): AsyncGenerator<
| string
| {
type: string;
toolName: string;
args?: any;
toolCallId?: string;
}
> {
const modelToUse = model || process.env.MODEL || "gpt-4.1-2025-04-14";
const modelInstance = openai(modelToUse) as LanguageModelV1;
logger.info(`Starting LLM generation with model: ${modelToUse}`);
try {
const { textStream, fullStream } = streamText({
model: modelInstance,
messages,
temperature: 1,
tools,
// NO maxSteps - we handle tool execution manually in the ReAct loop
toolCallStreaming: true,
onFinish,
});
// Yield text chunks
for await (const chunk of textStream) {
yield chunk;
}
// Yield tool calls
for await (const fullChunk of fullStream) {
if (fullChunk.type === "tool-call") {
yield {
type: "tool-call",
toolName: fullChunk.toolName,
toolCallId: fullChunk.toolCallId,
args: fullChunk.args,
};
}
if (fullChunk.type === "error") {
logger.error(`LLM error: ${JSON.stringify(fullChunk)}`);
}
}
} catch (error) {
logger.error(`LLM generation error: ${error}`);
throw error;
}
}

View File

@ -1,20 +0,0 @@
export interface DeepSearchPayload {
content: string;
userId: string;
stream: boolean;
intentOverride?: string;
metadata?: {
source?: "chrome" | "obsidian" | "mcp";
url?: string;
pageTitle?: string;
};
}
export interface DeepSearchResponse {
synthesis: string;
episodes?: Array<{
content: string;
createdAt: Date;
spaceIds: string[];
}>;
}

View File

@ -1,64 +0,0 @@
import { tool } from "ai";
import { z } from "zod";
import axios from "axios";
import { logger } from "@trigger.dev/sdk/v3";
export function createSearchMemoryTool(token: string) {
return tool({
description:
"Search the user's memory for relevant facts and episodes. Use this tool multiple times with different queries to gather comprehensive context.",
parameters: z.object({
query: z
.string()
.describe(
"Search query to find relevant information. Be specific: entity names, topics, concepts.",
),
}),
execute: async ({ query }) => {
try {
const response = await axios.post(
`${process.env.API_BASE_URL || "https://core.heysol.ai"}/api/v1/search`,
{ query },
{
headers: {
Authorization: `Bearer ${token}`,
},
},
);
const searchResult = response.data;
return {
facts: searchResult.facts || [],
episodes: searchResult.episodes || [],
summary: `Found ${searchResult.episodes?.length || 0} relevant memories`,
};
} catch (error) {
logger.error(`SearchMemory tool error: ${error}`);
return {
facts: [],
episodes: [],
summary: "No results found",
};
}
},
});
}
// Helper to extract unique episodes from tool calls
export function extractEpisodesFromToolCalls(toolCalls: any[]): any[] {
const episodes: any[] = [];
for (const call of toolCalls || []) {
if (call.toolName === "searchMemory" && call.result?.episodes) {
episodes.push(...call.result.episodes);
}
}
// Deduplicate by content + createdAt
const uniqueEpisodes = Array.from(
new Map(episodes.map((e) => [`${e.content}-${e.createdAt}`, e])).values(),
);
return uniqueEpisodes.slice(0, 10);
}

View File

@ -18,7 +18,7 @@ import type { CoreMessage } from "ai";
import { z } from "zod";
import { type Space } from "@prisma/client";
interface SpaceAssignmentPayload {
export interface SpaceAssignmentPayload {
userId: string;
workspaceId: string;
mode: "new_space" | "episode";

View File

@ -1,163 +1,11 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { logger } from "@trigger.dev/sdk/v3";
import { jsonSchema, tool, type ToolSet } from "ai";
import * as fs from "fs";
import * as path from "path";
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
import { prisma } from "./prisma";
export const configureStdioMCPEnvironment = (
spec: any,
account: any,
): { env: Record<string, string>; args: any[] } => {
if (!spec.mcp) {
return { env: {}, args: [] };
}
const mcpSpec = spec.mcp;
const configuredMCP = { ...mcpSpec };
// Replace config placeholders in environment variables
if (configuredMCP.env) {
for (const [key, value] of Object.entries(configuredMCP.env)) {
if (typeof value === "string" && value.includes("${config:")) {
// Extract the config key from the placeholder
const configKey = value.match(/\$\{config:(.*?)\}/)?.[1];
if (
configKey &&
account.integrationConfiguration &&
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(account.integrationConfiguration as any)[configKey]
) {
configuredMCP.env[key] = value.replace(
`\${config:${configKey}}`,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(account.integrationConfiguration as any)[configKey],
);
}
}
if (typeof value === "string" && value.includes("${integrationConfig:")) {
// Extract the config key from the placeholder
const configKey = value.match(/\$\{integrationConfig:(.*?)\}/)?.[1];
if (
configKey &&
account.integrationDefinition.config &&
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(account.integrationDefinition.config as any)[configKey]
) {
configuredMCP.env[key] = value.replace(
`\${integrationConfig:${configKey}}`,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
(account.integrationDefinition.config as any)[configKey],
);
}
}
}
}
return {
env: configuredMCP.env || {},
args: Array.isArray(configuredMCP.args) ? configuredMCP.args : [],
};
};
export class MCP {
private Client: any;
private client: any = {};
constructor() {}
public async init() {
this.Client = await MCP.importClient();
}
private static async importClient() {
const { Client } = await import(
"@modelcontextprotocol/sdk/client/index.js"
);
return Client;
}
async load(headers: any) {
return await this.connectToServer(
`${process.env.API_BASE_URL}/api/v1/mcp?source=core`,
headers,
);
}
async allTools(): Promise<ToolSet> {
try {
const { tools } = await this.client.listTools();
const finalTools: ToolSet = {};
tools.map(({ name, description, inputSchema }: any) => {
finalTools[name] = tool({
description,
parameters: jsonSchema(inputSchema),
});
});
return finalTools;
} catch (error) {
return {};
}
// Flatten and convert to object
}
async getTool(name: string) {
try {
const { tools: clientTools } = await this.client.listTools();
const clientTool = clientTools.find((to: any) => to.name === name);
return JSON.stringify(clientTool);
} catch (e) {
logger.error((e as string) ?? "Getting tool failed");
throw new Error("Getting tool failed");
}
}
async callTool(name: string, parameters: any) {
const response = await this.client.callTool({
name,
arguments: parameters,
});
return response;
}
async connectToServer(url: string, headers: any) {
try {
const client = new this.Client(
{
name: "Core",
version: "1.0.0",
},
{
capabilities: {},
},
);
// Configure the transport for MCP server
const transport = new StreamableHTTPClientTransport(new URL(url), {
requestInit: { headers },
});
// Connect to the MCP server
await client.connect(transport, { timeout: 60 * 1000 * 5 });
this.client = client;
logger.info(`Connected to MCP server`);
} catch (e) {
logger.error(`Failed to connect to MCP server: `, { e });
throw e;
}
}
}
export const fetchAndSaveStdioIntegrations = async () => {
try {
logger.info("Starting stdio integrations fetch and save process");

View File

@ -8,11 +8,9 @@ import {
type Workspace,
} from "@prisma/client";
import { logger } from "@trigger.dev/sdk/v3";
import { type CoreMessage } from "ai";
import { type HistoryStep } from "./types";
import axios from "axios";
import nodeCrypto from "node:crypto";
import { customAlphabet, nanoid } from "nanoid";
import { prisma } from "./prisma";
@ -148,58 +146,6 @@ export interface RunChatPayload {
isContinuation?: boolean;
}
export const init = async ({ payload }: { payload: InitChatPayload }) => {
logger.info("Loading init");
const conversationHistory = await prisma.conversationHistory.findUnique({
where: { id: payload.conversationHistoryId },
include: { conversation: true },
});
const conversation = conversationHistory?.conversation as Conversation;
const workspace = await prisma.workspace.findUnique({
where: { id: conversation.workspaceId as string },
});
if (!workspace) {
return { conversation, conversationHistory };
}
const randomKeyName = `chat_${nanoid(10)}`;
const pat = await getOrCreatePersonalAccessToken({
name: randomKeyName,
userId: workspace.userId as string,
});
const user = await prisma.user.findFirst({
where: { id: workspace.userId as string },
});
// Set up axios interceptor for memory operations
axios.interceptors.request.use((config) => {
if (config.url?.startsWith("https://core::memory")) {
// Handle both search and ingest endpoints
config.url = config.url.replace(
"https://core::memory",
process.env.API_BASE_URL ?? "",
);
config.headers.Authorization = `Bearer ${pat.token}`;
}
return config;
});
return {
conversation,
conversationHistory,
tokenId: pat.id,
token: pat.token,
userId: user?.id,
userName: user?.name,
};
};
export const createConversationHistoryForAgent = async (
conversationId: string,
) => {

View File

@ -3,7 +3,6 @@ import { addToQueue } from "~/lib/ingest.server";
import { logger } from "~/services/logger.service";
import { SearchService } from "~/services/search.server";
import { SpaceService } from "~/services/space.server";
import { deepSearch } from "~/trigger/deep-search";
import { IntegrationLoader } from "./integration-loader";
import { hasCredits } from "~/services/billing.server";
import { prisma } from "~/db.server";
@ -229,8 +228,8 @@ export async function callMemoryTool(
return await handleGetIntegrationActions({ ...args });
case "execute_integration_action":
return await handleExecuteIntegrationAction({ ...args });
case "memory_deep_search":
return await handleMemoryDeepSearch({ ...args, userId, source });
// case "memory_deep_search":
// return await handleMemoryDeepSearch({ ...args, userId, source });
default:
throw new Error(`Unknown memory tool: ${toolName}`);
}
@ -596,58 +595,3 @@ async function handleExecuteIntegrationAction(args: any) {
};
}
}
// Handler for memory_deep_search
async function handleMemoryDeepSearch(args: any) {
try {
const { content, intentOverride, userId, source } = args;
if (!content) {
throw new Error("content is required");
}
// Trigger non-streaming deep search task
const handle = await deepSearch.triggerAndWait({
content,
userId,
stream: false, // MCP doesn't need streaming
intentOverride,
metadata: { source },
});
// Wait for task completion
if (handle.ok) {
return {
content: [
{
type: "text",
text: JSON.stringify(handle.output),
},
],
isError: false,
};
} else {
return {
content: [
{
type: "text",
text: `Error performing deep search: ${handle.error instanceof Error ? handle.error.message : String(handle.error)}`,
},
],
isError: true,
};
}
} catch (error) {
logger.error(`MCP deep search error: ${error}`);
return {
content: [
{
type: "text",
text: `Error performing deep search: ${error instanceof Error ? error.message : String(error)}`,
},
],
isError: true,
};
}
}

View File

@ -2,7 +2,8 @@ import { logger } from "~/services/logger.service";
import { fetchAndSaveStdioIntegrations } from "~/trigger/utils/mcp";
import { initNeo4jSchemaOnce } from "~/lib/neo4j.server";
import { env } from "~/env.server";
import { startWorkers } from "~/bullmq/start-workers";
import { initWorkers, shutdownWorkers } from "~/bullmq/start-workers";
import { trackConfig } from "~/services/telemetry.server";
// Global flag to ensure startup only runs once per server process
let startupInitialized = false;
@ -44,20 +45,36 @@ export async function initializeStartupServices() {
if (env.QUEUE_PROVIDER === "trigger") {
try {
const triggerApiUrl = env.TRIGGER_API_URL;
if (triggerApiUrl) {
await waitForTriggerLogin(triggerApiUrl);
await addEnvVariablesInTrigger();
} else {
console.error("TRIGGER_API_URL is not set in environment variables.");
// At this point, env validation should have already ensured these are present
// But we add a runtime check for safety
if (
!triggerApiUrl ||
!env.TRIGGER_PROJECT_ID ||
!env.TRIGGER_SECRET_KEY
) {
console.error(
"TRIGGER_API_URL, TRIGGER_PROJECT_ID, and TRIGGER_SECRET_KEY must be set when QUEUE_PROVIDER=trigger",
);
process.exit(1);
}
await waitForTriggerLogin(triggerApiUrl);
await addEnvVariablesInTrigger();
} catch (e) {
console.error(e);
console.error("Trigger is not configured");
process.exit(1);
}
} else {
await startWorkers();
await initWorkers();
// Handle graceful shutdown
process.on("SIGTERM", async () => {
await shutdownWorkers();
});
process.on("SIGINT", async () => {
await shutdownWorkers();
process.exit(0);
});
}
try {
@ -70,6 +87,10 @@ export async function initializeStartupServices() {
await fetchAndSaveStdioIntegrations();
logger.info("Stdio integrations initialization completed");
// Track system configuration once at startup
await trackConfig();
logger.info("System configuration tracked");
startupInitialized = true;
logger.info("Application initialization completed successfully");
} catch (error) {
@ -126,6 +147,14 @@ export async function addEnvVariablesInTrigger() {
TRIGGER_SECRET_KEY,
} = env;
// These should always be present when this function is called
// but we add a runtime check for type safety
if (!TRIGGER_PROJECT_ID || !TRIGGER_API_URL || !TRIGGER_SECRET_KEY) {
throw new Error(
"TRIGGER_PROJECT_ID, TRIGGER_API_URL, and TRIGGER_SECRET_KEY are required",
);
}
const DATABASE_URL = getDatabaseUrl(POSTGRES_DB);
// Map of key to value from env, replacing 'localhost' as needed

View File

@ -14,11 +14,12 @@
"trigger:deploy": "pnpm dlx trigger.dev@4.0.4 deploy"
},
"dependencies": {
"@ai-sdk/amazon-bedrock": "2.2.12",
"@ai-sdk/anthropic": "^1.2.12",
"@ai-sdk/google": "^1.2.22",
"@ai-sdk/openai": "^1.3.21",
"@anthropic-ai/sdk": "^0.60.0",
"@ai-sdk/amazon-bedrock": "3.0.47",
"@ai-sdk/anthropic": "^2.0.37",
"@ai-sdk/google": "^2.0.23",
"@ai-sdk/openai": "^2.0.53",
"@ai-sdk/react": "2.0.78",
"@anthropic-ai/sdk": "^0.67.0",
"@aws-sdk/client-s3": "3.879.0",
"@aws-sdk/credential-providers": "^3.894.0",
"@aws-sdk/s3-request-presigner": "3.879.0",
@ -81,7 +82,7 @@
"@tiptap/starter-kit": "2.11.9",
"@trigger.dev/react-hooks": "4.0.4",
"@trigger.dev/sdk": "4.0.4",
"ai": "4.3.19",
"ai": "5.0.78",
"axios": "^1.10.0",
"bullmq": "^5.53.2",
"cheerio": "^1.1.2",
@ -117,9 +118,10 @@
"neo4j-driver": "^5.28.1",
"non.geist": "^1.0.2",
"novel": "^1.0.2",
"ollama-ai-provider": "1.2.0",
"ollama-ai-provider-v2": "1.5.1",
"openai": "^5.12.2",
"posthog-js": "^1.116.6",
"posthog-node": "^5.10.3",
"react": "^18.2.0",
"react-calendar-heatmap": "^1.10.0",
"react-dom": "^18.2.0",
@ -127,6 +129,7 @@
"react-resizable-panels": "^1.0.9",
"react-hotkeys-hook": "^4.5.0",
"react-virtualized": "^9.22.6",
"resumable-stream": "2.2.8",
"remix-auth": "^4.2.0",
"remix-auth-oauth2": "^3.4.1",
"remix-themes": "^2.0.4",

22
docker/Dockerfile.neo4j Normal file
View File

@ -0,0 +1,22 @@
FROM neo4j:5.26.0
# Manual installation of plugins with correct download URLs
# GDS 2.13.2 is compatible with Neo4j 5.26
# APOC 5.26.0 matches Neo4j 5.26
RUN apt-get update && apt-get install -y curl && \
curl -L https://github.com/neo4j/graph-data-science/releases/download/2.13.2/neo4j-graph-data-science-2.13.2.jar \
-o /var/lib/neo4j/plugins/neo4j-graph-data-science.jar && \
curl -L https://github.com/neo4j/apoc/releases/download/5.26.0/apoc-5.26.0-core.jar \
-o /var/lib/neo4j/plugins/apoc-core.jar && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
chown -R neo4j:neo4j /var/lib/neo4j/plugins
# Default configuration for GDS and APOC
ENV NEO4J_dbms_security_procedures_unrestricted=gds.*,apoc.*
ENV NEO4J_dbms_security_procedures_allowlist=gds.*,apoc.*
ENV NEO4J_apoc_export_file_enabled=true
ENV NEO4J_apoc_import_file_enabled=true
ENV NEO4J_apoc_import_file_use_neo4j_config=true
EXPOSE 7474 7687

243
docs/TELEMETRY.md Normal file
View File

@ -0,0 +1,243 @@
# Telemetry in Core
Core collects anonymous usage data to help us understand how the product is being used and to make data-driven improvements. This document explains what we collect, why we collect it, and how to opt-out.
## Our Commitment to Privacy
We take your privacy seriously. Telemetry is designed to be:
- **Transparent**: You can see exactly what we collect (listed below)
- **Respectful**: Easy to disable at any time
- **Minimal**: We only collect what helps improve the product
- **Secure**: Data is transmitted securely to PostHog
## What We Collect
### User Information
- **Email address only**: Used to identify unique users (can be anonymized - see below)
- No other personal information is collected
### Feature Usage Events
We track when these features are used (event name only, no additional data):
- **episode_ingested**: When you add a conversation episode
- **document_ingested**: When you add a document
- **search_performed**: When you perform a search
- **deep_search_performed**: When you use deep search
- **conversation_created**: When you start a new AI conversation
- **conversation_message_sent**: When you send a message in a conversation
- **space_created**: When you create a new space
- **space_updated**: When you update a space
- **user_registered**: When a new user signs up
### System Configuration (Tracked Once at Startup)
- **Queue provider**: Whether you're using Trigger.dev or BullMQ
- **Model provider**: Which LLM you're using (OpenAI, Anthropic, Ollama, etc.)
- **Model name**: The specific model configured
- **Embedding model**: Which embedding model is configured
- **App environment**: Development, production, or test
- **Node environment**: Runtime environment
### Errors (Automatic)
- **Error type**: The type of error that occurred
- **Error message**: Brief description of the error
- **Error stack trace**: Technical details for debugging
- **Request context**: URL, method, user agent (for server errors)
### Page Views (Client-Side)
- **Page navigation**: Which pages are visited
- **Session information**: Basic session tracking
## What We DON'T Collect
We explicitly **do not** collect:
- ❌ **Your document content**: None of your ingested documents or notes
- ❌ **Space content**: Your space data remains private
- ❌ **Search queries**: We track that searches happen, not what you searched for
- ❌ **Conversation content**: We never collect the actual messages or responses
- ❌ **User names**: Only email addresses are collected (can be anonymized)
- ❌ **Workspace IDs**: Not tracked
- ❌ **Space IDs**: Not tracked
- ❌ **Conversation IDs**: Not tracked
- ❌ **API keys or secrets**: No sensitive credentials
- ❌ **IP addresses**: Not tracked
- ❌ **File paths or system details**: No filesystem information
- ❌ **Environment variables**: Configuration remains private
**Privacy-First Approach**: We only track the event name and user email. No metadata, no additional properties, no detailed analytics.
## Why We Collect This Data
### Product Improvement
- Understand which features are most valuable
- Identify features that need improvement
- Prioritize development based on actual usage
### Reliability & Performance
- Detect and fix errors before they affect many users
- Identify performance bottlenecks
- Monitor system health across different configurations
### Usage Patterns
- Understand how different deployment types (Docker, manual, cloud) are used
- See which queue providers and models are popular
- Make informed decisions about which integrations to prioritize
## How to Opt-Out
We respect your choice to disable telemetry. Here are several ways to control telemetry:
### Option 1: Disable Telemetry Completely
Add to your `.env` file:
```bash
TELEMETRY_ENABLED=false
```
### Option 2: Anonymous Mode
Keep telemetry enabled but send "anonymous" instead of your email:
```bash
TELEMETRY_ANONYMOUS=true
```
### Option 3: Remove PostHog Key
Set the PostHog key to empty:
```bash
POSTHOG_PROJECT_KEY=
```
After making any of these changes, restart your Core instance.
## Environment Variables
```bash
# PostHog project key
POSTHOG_PROJECT_KEY=phc_your_key_here
# Enable/disable telemetry (default: true)
TELEMETRY_ENABLED=true
# Send "anonymous" instead of email (default: false)
TELEMETRY_ANONYMOUS=false
# Industry standard opt-out
DO_NOT_TRACK=1
```
## For Self-Hosted Deployments
### Default Behavior
- Telemetry is **enabled by default** with opt-out
- Sends data to our PostHog instance
- Easy to disable (see options above)
### Using Your Own PostHog Instance
If you prefer to keep all data in-house, you can:
1. Deploy your own PostHog instance (https://posthog.com/docs/self-host)
2. Set `POSTHOG_PROJECT_KEY` to your self-hosted instance's key
3. All telemetry data stays on your infrastructure
### Completely Disable Telemetry
For maximum privacy in self-hosted deployments:
1. Set `TELEMETRY_ENABLED=false` in your `.env`
2. Or set `DO_NOT_TRACK=1`
3. No telemetry data will be sent
### Anonymous Mode
If you want to contribute usage data without identifying yourself:
1. Set `TELEMETRY_ANONYMOUS=true` in your `.env`
2. All events will be tracked as "anonymous" instead of your email
3. Helps us improve the product while maintaining your privacy
## Transparency
### Open Source
Core's telemetry code is completely open source. You can inspect exactly what is being tracked:
**Server-Side Tracking:**
- `apps/webapp/app/services/telemetry.server.ts` - Core telemetry service
- `apps/webapp/app/entry.server.tsx` - Global error tracking
- `apps/webapp/app/lib/ingest.server.ts:66,76` - Episode/document ingestion
- `apps/webapp/app/routes/api.v1.search.tsx:57` - Search tracking
- `apps/webapp/app/routes/api.v1.deep-search.tsx:33` - Deep search tracking
- `apps/webapp/app/services/conversation.server.ts:60,110` - Conversation tracking
- `apps/webapp/app/services/space.server.ts:68,201` - Space tracking
- `apps/webapp/app/models/user.server.ts:80,175` - User registration tracking
- `apps/webapp/app/utils/startup.ts:78` - System config tracking (once at startup)
**Client-Side Tracking:**
- `apps/webapp/app/hooks/usePostHog.ts` - Page views and user identification
- `apps/webapp/app/root.tsx:118-119` - PostHog initialization
### PostHog Key Security
- The PostHog project key (`phc_*`) is safe to expose publicly
- It can only **send** events, not read existing data
- This is standard practice for client-side analytics
### Data Minimization
Our approach prioritizes minimal data collection:
- **Event name only**: Just the feature name (e.g., "search_performed")
- **Email only**: Single identifier (can be anonymized)
- **No metadata**: No counts, times, IDs, or other properties
- **Config once**: System configuration tracked only at startup, not per-event
## Questions?
If you have questions about telemetry:
- Open an issue on GitHub: https://github.com/redplanethq/core/issues
- Review the source code to see exactly what's tracked
- Check PostHog's privacy policy: https://posthog.com/privacy
## Summary
**What we track**: Event names + email (e.g., "search_performed" by "user@example.com")
**What we don't track**: Content, queries, messages, IDs, counts, times, or any metadata
**How to opt-out**: `TELEMETRY_ENABLED=false` or `DO_NOT_TRACK=1`
**Anonymous mode**: `TELEMETRY_ANONYMOUS=true` (sends "anonymous" instead of email)
**Default**: Enabled with easy opt-out
### Events Tracked
| Event | Location | When It Fires |
| --------------------------- | ----------------------------------- | -------------------------------- |
| `episode_ingested` | lib/ingest.server.ts:76 | Conversation episode added |
| `document_ingested` | lib/ingest.server.ts:66 | Document added |
| `search_performed` | routes/api.v1.search.tsx:57 | Basic search executed |
| `deep_search_performed` | routes/api.v1.deep-search.tsx:33 | Deep search executed |
| `conversation_created` | services/conversation.server.ts:110 | New conversation started |
| `conversation_message_sent` | services/conversation.server.ts:60 | Message sent in conversation |
| `space_created` | services/space.server.ts:68 | New space created |
| `space_updated` | services/space.server.ts:201 | Space updated |
| `user_registered` | models/user.server.ts:80,175 | New user signs up |
| `error_occurred` | entry.server.tsx:36 | Server error (auto-tracked) |
| `system_config` | utils/startup.ts:78 | App starts (config tracked once) |
We believe in building in public and being transparent about data collection. Thank you for helping make Core better!

View File

@ -32,10 +32,6 @@ To run CORE, you will need:
- 8+ GB RAM
- 20+ GB Storage
**Background Jobs Machine (if running separately):**
- 2+ vCPU
- 4+ GB RAM
- 10+ GB Storage
## Deployment Options
@ -47,7 +43,7 @@ CORE offers two deployment approaches depending on your needs:
### Combined Setup
For self deployment with both CORE and Trigger.dev running together:
For self deployment:
1. Clone core repository
```bash

View File

@ -48,10 +48,16 @@ Environment variables for the CORE webapp container.
| `MODEL` | No | gpt-4-turbo-2024-04-09 | Default language model |
| `EMBEDDING_MODEL` | No | text-embedding-3-small | Model for text embeddings |
| `OLLAMA_URL` | No | http://ollama:11434 | Ollama server URL for local models |
| **Background Jobs - Trigger.dev** | | | |
| `TRIGGER_PROJECT_ID` | Yes | — | Trigger.dev project identifier |
| `TRIGGER_SECRET_KEY` | Yes | — | Trigger.dev authentication secret |
| `TRIGGER_API_URL` | Yes | http://host.docker.internal:8030 | Trigger.dev API endpoint (use localhost:8030 for local, api.trigger.dev for cloud) |
| **Background Jobs** | | | |
| `QUEUE_PROVIDER` | No | trigger | Queue provider: "trigger" for Trigger.dev or "bullmq" for BullMQ (Redis-based) |
| `TRIGGER_PROJECT_ID` | Conditional | — | Trigger.dev project identifier (required only when QUEUE_PROVIDER=trigger) |
| `TRIGGER_SECRET_KEY` | Conditional | — | Trigger.dev authentication secret (required only when QUEUE_PROVIDER=trigger) |
| `TRIGGER_API_URL` | Conditional | http://host.docker.internal:8030 | Trigger.dev API endpoint (required only when QUEUE_PROVIDER=trigger) |
| `TRIGGER_DB` | No | trigger | Database name for Trigger.dev |
| **Telemetry** | | | |
| `POSTHOG_PROJECT_KEY` | No | phc_SwfGIzzX5gh5bazVWoRxZTBhkr7FwvzArS0NRyGXm1a | PostHog project key for usage analytics |
| `TELEMETRY_ENABLED` | No | true | Enable (true) or disable (false) telemetry collection |
| `TELEMETRY_ANONYMOUS` | No | false | Send anonymous telemetry (true) or include user email (false) |
## Security Considerations

View File

@ -29,7 +29,6 @@ The self-hosted version of CORE is composed of several containers that you run o
- **Webapp**: The main application container, responsible for serving the user interface and orchestrating memory operations.
- **PostgreSQL**: Stores metadata, user accounts, and configuration data.
- **Neo4j**: Graph database used for storing and querying the memory graph.
- **[Trigger](https://trigger.dev/)**: Manages background jobs and workflows, such as data ingestion and memory formation. We use Trigger to reliably handle all background processing.
- **Redis**: Provides caching and session management.
This modular architecture allows you to scale each service as needed and gives you full control over your deployment.

View File

@ -1,4 +1,4 @@
VERSION=0.1.25
VERSION=0.1.26
# Nest run in docker, change host to database container name
DB_HOST=postgres
@ -43,9 +43,13 @@ NEO4J_PASSWORD=27192e6432564f4788d55c15131bd5ac
NEO4J_AUTH=neo4j/27192e6432564f4788d55c15131bd5ac
OPENAI_API_KEY=
OLLAMA_URL=
OLLAMA_URL=http://ollama:11434
EMBEDDING_MODEL=text-embedding-3-small
MODEL=gpt-4.1-2025-04-14
## for opensource embedding model
# EMBEDDING_MODEL=mxbai-embed-large
QUEUE_PROVIDER=bullmq

View File

@ -42,6 +42,9 @@ services:
- FROM_EMAIL=${FROM_EMAIL}
- RESEND_API_KEY=${RESEND_API_KEY}
- COHERE_API_KEY=${COHERE_API_KEY}
- QUEUE_PROVIDER=${QUEUE_PROVIDER}
- TELEMETRY_ENABLED=${TELEMETRY_ENABLED}
- TELEMETRY_ANONYMOUS=${TELEMETRY_ANONYMOUS}
ports:
- "3033:3000"
depends_on:
@ -84,7 +87,7 @@ services:
neo4j:
container_name: core-neo4j
image: neo4j:5
image: redplanethq/neo4j:0.1.0
environment:
- NEO4J_AUTH=${NEO4J_AUTH}
- NEO4J_dbms_security_procedures_unrestricted=gds.*,apoc.*
@ -108,6 +111,26 @@ services:
retries: 10
start_period: 20s
# Uncomment this if you want to use a local embedding modal
# ollama:
# container_name: core-ollama
# image: ollama/ollama:0.12.6
# ports:
# - "11434:11434"
# volumes:
# - ollama_data:/root/.ollama
# - ./scripts/ollama-init.sh:/usr/local/bin/ollama-init.sh:ro
# networks:
# - core
# entrypoint: ["/bin/bash", "/usr/local/bin/ollama-init.sh"]
# healthcheck:
# test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
# interval: 30s
# timeout: 10s
# retries: 5
# start_period: 90s
# restart: unless-stopped
networks:
core:
name: core
@ -117,3 +140,4 @@ volumes:
postgres_data:
neo4j_data:
shared:
ollama_data:

View File

@ -0,0 +1,18 @@
#!/bin/bash
set -e
echo "Starting Ollama server..."
ollama serve &
OLLAMA_PID=$!
echo "Waiting for Ollama server to be ready..."
sleep 5
echo "Pulling mxbai-embed-large model..."
ollama pull mxbai-embed-large
echo "Model pulled successfully!"
echo "Ollama is ready to accept requests."
# Keep the Ollama server running
wait $OLLAMA_PID

View File

@ -1,7 +1,7 @@
{
"name": "core",
"private": true,
"version": "0.1.25",
"version": "0.1.26",
"workspaces": [
"apps/*",
"packages/*"

290
pnpm-lock.yaml generated
View File

@ -281,20 +281,23 @@ importers:
apps/webapp:
dependencies:
'@ai-sdk/amazon-bedrock':
specifier: 2.2.12
version: 2.2.12(zod@3.25.76)
specifier: 3.0.47
version: 3.0.47(zod@3.25.76)
'@ai-sdk/anthropic':
specifier: ^1.2.12
version: 1.2.12(zod@3.25.76)
specifier: ^2.0.37
version: 2.0.37(zod@3.25.76)
'@ai-sdk/google':
specifier: ^1.2.22
version: 1.2.22(zod@3.25.76)
specifier: ^2.0.23
version: 2.0.23(zod@3.25.76)
'@ai-sdk/openai':
specifier: ^1.3.21
version: 1.3.22(zod@3.25.76)
specifier: ^2.0.53
version: 2.0.53(zod@3.25.76)
'@ai-sdk/react':
specifier: 2.0.78
version: 2.0.78(react@18.3.1)(zod@3.25.76)
'@anthropic-ai/sdk':
specifier: ^0.60.0
version: 0.60.0
specifier: ^0.67.0
version: 0.67.0(zod@3.25.76)
'@aws-sdk/client-s3':
specifier: 3.879.0
version: 3.879.0
@ -480,10 +483,10 @@ importers:
version: 4.0.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
'@trigger.dev/sdk':
specifier: 4.0.4
version: 4.0.4(ai@4.3.19(react@18.3.1)(zod@3.25.76))(zod@3.25.76)
version: 4.0.4(ai@5.0.78(zod@3.25.76))(zod@3.25.76)
ai:
specifier: 4.3.19
version: 4.3.19(react@18.3.1)(zod@3.25.76)
specifier: 5.0.78
version: 5.0.78(zod@3.25.76)
axios:
specifier: ^1.10.0
version: 1.10.0
@ -589,15 +592,18 @@ importers:
novel:
specifier: ^1.0.2
version: 1.0.2(@tiptap/extension-code-block@2.11.9(@tiptap/core@2.25.0(@tiptap/pm@2.25.0))(@tiptap/pm@2.25.0))(@types/react-dom@18.3.7(@types/react@18.2.69))(@types/react@18.2.69)(highlight.js@11.11.1)(lowlight@3.3.0)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
ollama-ai-provider:
specifier: 1.2.0
version: 1.2.0(zod@3.25.76)
ollama-ai-provider-v2:
specifier: 1.5.1
version: 1.5.1(zod@3.25.76)
openai:
specifier: ^5.12.2
version: 5.12.2(ws@8.18.3)(zod@3.25.76)
posthog-js:
specifier: ^1.116.6
version: 1.250.2
posthog-node:
specifier: ^5.10.3
version: 5.10.3
react:
specifier: ^18.2.0
version: 18.3.1
@ -634,6 +640,9 @@ importers:
remix-utils:
specifier: ^7.7.0
version: 7.7.0(@remix-run/node@2.1.0(typescript@5.8.3))(@remix-run/react@2.16.7(react-dom@18.3.1(react@18.3.1))(react@18.3.1)(typescript@5.8.3))(@remix-run/router@1.23.0)(crypto-js@4.2.0)(react@18.3.1)(zod@3.25.76)
resumable-stream:
specifier: 2.2.8
version: 2.2.8
sigma:
specifier: ^3.0.2
version: 3.0.2(graphology-types@0.24.8)
@ -925,56 +934,56 @@ importers:
packages:
'@ai-sdk/amazon-bedrock@2.2.12':
resolution: {integrity: sha512-m8gARnh45pr1s08Uu4J/Pm8913mwJPejPOm59b+kUqMsP9ilhUtH/bp8432Ra/v+vHuMoBrglG2ZvXtctAaH2g==}
'@ai-sdk/amazon-bedrock@3.0.47':
resolution: {integrity: sha512-oTAxTU4k1+EIKP41nvLGN7dWwoK7dg1JptrX6csn7abmSfQSsygDrfeMf8/7Mdnr+frt9i5ogvpQkp1ak0916Q==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/anthropic@1.2.12':
resolution: {integrity: sha512-YSzjlko7JvuiyQFmI9RN1tNZdEiZxc+6xld/0tq/VkJaHpEzGAb1yiNxxvmYVcjvfu/PcvCxAAYXmTYQQ63IHQ==}
'@ai-sdk/anthropic@2.0.37':
resolution: {integrity: sha512-r2e9BWoobisH9B5b7x3yYG/k9WlsZqa4D94o7gkwktReqrjjv83zNMop4KmlJsh/zBhbsaP8S8SUfiwK+ESxgg==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/google@1.2.22':
resolution: {integrity: sha512-Ppxu3DIieF1G9pyQ5O1Z646GYR0gkC57YdBqXJ82qvCdhEhZHu0TWhmnOoeIWe2olSbuDeoOY+MfJrW8dzS3Hw==}
'@ai-sdk/gateway@2.0.1':
resolution: {integrity: sha512-vPVIbnP35ZnayS937XLo85vynR85fpBQWHCdUweq7apzqFOTU2YkUd4V3msebEHbQ2Zro60ZShDDy9SMiyWTqA==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/openai@1.3.22':
resolution: {integrity: sha512-QwA+2EkG0QyjVR+7h6FE7iOu2ivNqAVMm9UJZkVxxTk5OIq5fFJDTEI/zICEMuHImTTXR2JjsL6EirJ28Jc4cw==}
'@ai-sdk/google@2.0.23':
resolution: {integrity: sha512-VbCnKR+6aWUVLkAiSW5gUEtST7KueEmlt+d6qwDikxlLnFG9pzy59je8MiDVeM5G2tuSXbvZQF78PGIfXDBmow==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/provider-utils@2.2.8':
resolution: {integrity: sha512-fqhG+4sCVv8x7nFzYnFo19ryhAa3w096Kmc3hWxMQfW/TubPOmt3A6tYZhl4mUfQWWQMsuSkLrtjlWuXBVSGQA==}
'@ai-sdk/openai@2.0.53':
resolution: {integrity: sha512-GIkR3+Fyif516ftXv+YPSPstnAHhcZxNoR2s8uSHhQ1yBT7I7aQYTVwpjAuYoT3GR+TeP50q7onj2/nDRbT2FQ==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.23.8
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/provider@1.1.3':
resolution: {integrity: sha512-qZMxYJ0qqX/RfnuIaab+zp8UAeJn/ygXXAffR5I4N0n1IrvA6qBsjc8hXLmBiMV2zoXlifkacF7sEFnYnjBcqg==}
'@ai-sdk/provider-utils@3.0.12':
resolution: {integrity: sha512-ZtbdvYxdMoria+2SlNarEk6Hlgyf+zzcznlD55EAl+7VZvJaSg2sqPvwArY7L6TfDEDJsnCq0fdhBSkYo0Xqdg==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/provider@2.0.0':
resolution: {integrity: sha512-6o7Y2SeO9vFKB8lArHXehNuusnpddKPk7xqL7T2/b+OvXMRIXUO1rR4wcv1hAFUAT9avGZshty3Wlua/XA7TvA==}
engines: {node: '>=18'}
'@ai-sdk/react@1.2.12':
resolution: {integrity: sha512-jK1IZZ22evPZoQW3vlkZ7wvjYGYF+tRBKXtrcolduIkQ/m/sOAVcVeVDUDvh1T91xCnWCdUGCPZg2avZ90mv3g==}
'@ai-sdk/react@2.0.78':
resolution: {integrity: sha512-f5inDBHJyUEzbtNxc9HiTxbcGjtot0uuc//0/khGrl8IZlLxw+yTxO/T1Qq95Rw5QPwTx9/Aw7wIZei3qws9hA==}
engines: {node: '>=18'}
peerDependencies:
react: ^18 || ^19 || ^19.0.0-rc
zod: ^3.23.8
zod: ^3.25.76 || ^4.1.8
peerDependenciesMeta:
zod:
optional: true
'@ai-sdk/ui-utils@1.2.11':
resolution: {integrity: sha512-3zcwCc8ezzFlwp3ZD15wAPjf2Au4s3vAbKsXQVyhxODHcmu0iyPO2Eua6D/vicq/AUm/BAo60r97O6HU+EI0+w==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.23.8
'@alloc/quick-lru@5.2.0':
resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==}
engines: {node: '>=10'}
@ -983,9 +992,14 @@ packages:
resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==}
engines: {node: '>=6.0.0'}
'@anthropic-ai/sdk@0.60.0':
resolution: {integrity: sha512-9zu/TXaUy8BZhXedDtt1wT3H4LOlpKDO1/ftiFpeR3N1PCr3KJFKkxxlQWWt1NNp08xSwUNJ3JNY8yhl8av6eQ==}
'@anthropic-ai/sdk@0.67.0':
resolution: {integrity: sha512-Buxbf6jYJ+pPtfCgXe1pcFtZmdXPrbdqhBjiscFt9irS1G0hCsmR/fPA+DwKTk4GPjqeNnnCYNecXH6uVZ4G/A==}
hasBin: true
peerDependencies:
zod: ^3.25.0 || ^4.0.0
peerDependenciesMeta:
zod:
optional: true
'@arr/every@1.0.1':
resolution: {integrity: sha512-UQFQ6SgyJ6LX42W8rHCs8KVc0JS0tzVL9ct4XYedJukskYVWTo49tNiMEK9C2HTyarbNiT/RVIRSY82vH+6sTg==}
@ -3161,6 +3175,9 @@ packages:
'@popperjs/core@2.11.8':
resolution: {integrity: sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==}
'@posthog/core@1.3.1':
resolution: {integrity: sha512-sGKVHituJ8L/bJxVV4KamMFp+IBWAZyCiYunFawJZ4cc59PCtLnKFIMEV6kn7A4eZQcQ6EKV5Via4sF3Z7qMLQ==}
'@prisma/client@5.4.1':
resolution: {integrity: sha512-xyD0DJ3gRNfLbPsC+YfMBBuLJtZKQfy1OD2qU/PZg+HKrr7SO+09174LMeTlWP0YF2wca9LxtVd4HnAiB5ketQ==}
engines: {node: '>=16.13'}
@ -5102,6 +5119,9 @@ packages:
'@socket.io/component-emitter@3.1.2':
resolution: {integrity: sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==}
'@standard-schema/spec@1.0.0':
resolution: {integrity: sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==}
'@swc/core-darwin-arm64@1.3.101':
resolution: {integrity: sha512-mNFK+uHNPRXSnfTOG34zJOeMl2waM4hF4a2NY7dkMXrPqw9CoJn4MwTXJcyMiSz1/BnNjjTCHF3Yhj0jPxmkzQ==}
engines: {node: '>=10'}
@ -5679,9 +5699,6 @@ packages:
'@types/debug@4.1.12':
resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==}
'@types/diff-match-patch@1.0.36':
resolution: {integrity: sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg==}
'@types/eslint-scope@3.7.7':
resolution: {integrity: sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==}
@ -6115,6 +6132,10 @@ packages:
'@vanilla-extract/private@1.0.8':
resolution: {integrity: sha512-oRAbUlq1SyTWCo7dQnTVm+xgJMqNl8K1dEempQHXzQvUuyEfBabMt0wNGf+VCHzvKbx/Bzr9p/2wy8WA9+2z2g==}
'@vercel/oidc@3.0.3':
resolution: {integrity: sha512-yNEQvPcVrK9sIe637+I0jD6leluPxzwJKx/Haw6F4H77CdDsszUn5V3o96LPziXkSNE2B83+Z3mjqGKBK/R6Gg==}
engines: {node: '>= 20'}
'@web3-storage/multipart-parser@1.0.0':
resolution: {integrity: sha512-BEO6al7BYqcnfX15W2cnGR+Q566ACXAT9UQykORCWW80lmkpWsnEob6zJS1ZVBKsSJC8+7vJkHwlp+lXG1UCdw==}
@ -6223,15 +6244,11 @@ packages:
resolution: {integrity: sha512-0poP0T7el6Vq3rstR8Mn4V/IQrpBLO6POkUSrN7RhyY+GF/InCFShQzsQ39T25gkHhLgSLByyAz+Kjb+c2L98w==}
engines: {node: '>=12'}
ai@4.3.19:
resolution: {integrity: sha512-dIE2bfNpqHN3r6IINp9znguYdhIOheKW2LDigAMrgt/upT3B8eBGPSCblENvaZGoq+hxaN9fSMzjWpbqloP+7Q==}
ai@5.0.78:
resolution: {integrity: sha512-ec77fmQwJGLduswMrW4AAUGSOiu8dZaIwMmWHHGKsrMUFFS6ugfkTyx0srtuKYHNRRLRC2dT7cPirnUl98VnxA==}
engines: {node: '>=18'}
peerDependencies:
react: ^18 || ^19 || ^19.0.0-rc
zod: ^3.23.8
peerDependenciesMeta:
react:
optional: true
zod: ^3.25.76 || ^4.1.8
ajv-formats@2.1.1:
resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==}
@ -7280,9 +7297,6 @@ packages:
didyoumean@1.2.2:
resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==}
diff-match-patch@1.0.5:
resolution: {integrity: sha512-IayShXAgj/QMXgB0IWmKx+rOPuGMhqm5w6jvFxmVenXKIzRqTAAsbBPT3kWQeGANj3jGgvcvv4yK6SxqYmikgw==}
diff@5.2.0:
resolution: {integrity: sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==}
engines: {node: '>=0.3.1'}
@ -7769,6 +7783,10 @@ packages:
resolution: {integrity: sha512-nVpZkTMM9rF6AQ9gPJpFsNAMt48wIzB5TQgiTLdHiuO8XEDhUgZEhqKlZWXbIzo9VmJ/HvysHqEaVeD5v9TPvA==}
engines: {node: '>=20.0.0'}
eventsource-parser@3.0.6:
resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==}
engines: {node: '>=18.0.0'}
eventsource@3.0.7:
resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==}
engines: {node: '>=18.0.0'}
@ -8724,6 +8742,10 @@ packages:
resolution: {integrity: sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ==}
engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0}
json-schema-to-ts@3.1.1:
resolution: {integrity: sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g==}
engines: {node: '>=16'}
json-schema-traverse@0.4.1:
resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==}
@ -8748,11 +8770,6 @@ packages:
jsonc-parser@3.2.1:
resolution: {integrity: sha512-AilxAyFOAcK5wA1+LeaySVBrHsGQvUFCDWXKpZjzaL0PqW+xfBOttn8GNtWKFWqneyMZj41MWF9Kl6iPWLwgOA==}
jsondiffpatch@0.6.0:
resolution: {integrity: sha512-3QItJOXp2AP1uv7waBkao5nCvhEv+QmJAd38Ybq7wNI74Q+BBmnLn4EDKz6yI9xGAIQoUF87qHt+kc1IVxB4zQ==}
engines: {node: ^18.0.0 || >=20.0.0}
hasBin: true
jsonfile@4.0.0:
resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==}
@ -9720,14 +9737,11 @@ packages:
ohash@1.1.6:
resolution: {integrity: sha512-TBu7PtV8YkAZn0tSxobKY2n2aAQva936lhRrj6957aDaCf9IEtqsKbgMzXE/F/sjqYOwmrukeORHNLe5glk7Cg==}
ollama-ai-provider@1.2.0:
resolution: {integrity: sha512-jTNFruwe3O/ruJeppI/quoOUxG7NA6blG3ZyQj3lei4+NnJo7bi3eIRWqlVpRlu/mbzbFXeJSBuYQWF6pzGKww==}
ollama-ai-provider-v2@1.5.1:
resolution: {integrity: sha512-5R3z7Y+mm8VEtoq+rIoIqkEy83oYM3DXX6Nyrn6yofYvYl56BCoJMNwXsPrpmCI0O4fN/gAIDTLpznYMRGzZ5g==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.0.0
peerDependenciesMeta:
zod:
optional: true
zod: ^4.0.16
on-finished@2.3.0:
resolution: {integrity: sha512-ikqdkGAAyf/X/gPhXGvfgAytDZtDbr+bkNUJ0N9h5MI/dmdgCs3l6hoHrcUv41sRKew3jIwrp4qQDXiK99Utww==}
@ -9917,9 +9931,6 @@ packages:
resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==}
engines: {node: '>= 0.8'}
partial-json@0.1.7:
resolution: {integrity: sha512-Njv/59hHaokb/hRUjce3Hdv12wd60MtM9Z5Olmn+nehe0QDAsRtRbJPvJ0Z91TusF0SuZRIvnM+S4l6EIP8leA==}
partysocket@1.1.4:
resolution: {integrity: sha512-jXP7PFj2h5/v4UjDS8P7MZy6NJUQ7sspiFyxL4uc/+oKOL+KdtXzHnTV8INPGxBrLTXgalyG3kd12Qm7WrYc3A==}
@ -10273,6 +10284,10 @@ packages:
rrweb-snapshot:
optional: true
posthog-node@5.10.3:
resolution: {integrity: sha512-pe0P/4MfTSBgM4PWRTeg2iKDSSX6nxnlxAyW+v2+acpCSU50KM2YE5UFJ1Vkq/PtwcJgrt2Ydj66IzuRn2uwFQ==}
engines: {node: '>=20'}
preact@10.26.9:
resolution: {integrity: sha512-SSjF9vcnF27mJK1XyFMNJzFd5u3pQiATFqoaDy03XuN00u4ziveVVEGt5RKJrDR8MHE/wJo9Nnad56RLzS2RMA==}
@ -10913,6 +10928,9 @@ packages:
resolution: {integrity: sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==}
engines: {node: '>=8'}
resumable-stream@2.2.8:
resolution: {integrity: sha512-F9+SLKw/a/p7hRjy2CNwzT66UIlY7aY4D3Sg9xwuZMA7nxVQrVPXCWU27qIGcO4jlauL0T3XkCN2218qi6ugTw==}
retry@0.12.0:
resolution: {integrity: sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==}
engines: {node: '>= 4'}
@ -11010,9 +11028,6 @@ packages:
resolution: {integrity: sha512-Gn/JaSk/Mt9gYubxTtSn/QCV4em9mpAPiR1rqy/Ocu19u/G9J5WWdNoUT4SiV6mFC3y6cxyFcFwdzPM3FgxGAQ==}
engines: {node: '>= 10.13.0'}
secure-json-parse@2.7.0:
resolution: {integrity: sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==}
selderee@0.11.0:
resolution: {integrity: sha512-5TF+l7p4+OsnP8BCCvSyZiSPc4x4//p5uPwK8TCnVPJYRmU2aYKMpOXvw8zM5a5JvuuCGN1jmsMwuU2W02ukfA==}
@ -11600,6 +11615,9 @@ packages:
resolution: {integrity: sha512-kr8SKKw94OI+xTGOkfsvwZQ8mWoikZDd2n8XZHjJVZUARZT+4/VV6cacRS6CLsH9bNm+HFIPU1Zx4CnNnb4qlQ==}
engines: {node: '>=6'}
ts-algebra@2.0.0:
resolution: {integrity: sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==}
ts-api-utils@1.4.3:
resolution: {integrity: sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==}
engines: {node: '>=16'}
@ -12371,61 +12389,62 @@ packages:
snapshots:
'@ai-sdk/amazon-bedrock@2.2.12(zod@3.25.76)':
'@ai-sdk/amazon-bedrock@3.0.47(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
'@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
'@ai-sdk/anthropic': 2.0.37(zod@3.25.76)
'@ai-sdk/provider': 2.0.0
'@ai-sdk/provider-utils': 3.0.12(zod@3.25.76)
'@smithy/eventstream-codec': 4.1.1
'@smithy/util-utf8': 4.1.0
aws4fetch: 1.0.20
zod: 3.25.76
'@ai-sdk/anthropic@1.2.12(zod@3.25.76)':
'@ai-sdk/anthropic@2.0.37(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
'@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
'@ai-sdk/provider': 2.0.0
'@ai-sdk/provider-utils': 3.0.12(zod@3.25.76)
zod: 3.25.76
'@ai-sdk/google@1.2.22(zod@3.25.76)':
'@ai-sdk/gateway@2.0.1(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
'@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
'@ai-sdk/provider': 2.0.0
'@ai-sdk/provider-utils': 3.0.12(zod@3.25.76)
'@vercel/oidc': 3.0.3
zod: 3.25.76
'@ai-sdk/openai@1.3.22(zod@3.25.76)':
'@ai-sdk/google@2.0.23(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
'@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
'@ai-sdk/provider': 2.0.0
'@ai-sdk/provider-utils': 3.0.12(zod@3.25.76)
zod: 3.25.76
'@ai-sdk/provider-utils@2.2.8(zod@3.25.76)':
'@ai-sdk/openai@2.0.53(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
nanoid: 3.3.8
secure-json-parse: 2.7.0
'@ai-sdk/provider': 2.0.0
'@ai-sdk/provider-utils': 3.0.12(zod@3.25.76)
zod: 3.25.76
'@ai-sdk/provider@1.1.3':
'@ai-sdk/provider-utils@3.0.12(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 2.0.0
'@standard-schema/spec': 1.0.0
eventsource-parser: 3.0.6
zod: 3.25.76
'@ai-sdk/provider@2.0.0':
dependencies:
json-schema: 0.4.0
'@ai-sdk/react@1.2.12(react@18.3.1)(zod@3.25.76)':
'@ai-sdk/react@2.0.78(react@18.3.1)(zod@3.25.76)':
dependencies:
'@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
'@ai-sdk/ui-utils': 1.2.11(zod@3.25.76)
'@ai-sdk/provider-utils': 3.0.12(zod@3.25.76)
ai: 5.0.78(zod@3.25.76)
react: 18.3.1
swr: 2.3.3(react@18.3.1)
throttleit: 2.1.0
optionalDependencies:
zod: 3.25.76
'@ai-sdk/ui-utils@1.2.11(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
'@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
zod: 3.25.76
zod-to-json-schema: 3.24.5(zod@3.25.76)
'@alloc/quick-lru@5.2.0': {}
'@ampproject/remapping@2.3.0':
@ -12433,7 +12452,11 @@ snapshots:
'@jridgewell/gen-mapping': 0.3.8
'@jridgewell/trace-mapping': 0.3.25
'@anthropic-ai/sdk@0.60.0': {}
'@anthropic-ai/sdk@0.67.0(zod@3.25.76)':
dependencies:
json-schema-to-ts: 3.1.1
optionalDependencies:
zod: 3.25.76
'@arr/every@1.0.1': {}
@ -15584,6 +15607,8 @@ snapshots:
'@popperjs/core@2.11.8': {}
'@posthog/core@1.3.1': {}
'@prisma/client@5.4.1(prisma@5.4.1)':
dependencies:
'@prisma/engines-version': 5.4.1-1.2f302df92bd8945e20ad4595a73def5b96afa54f
@ -17977,6 +18002,8 @@ snapshots:
'@socket.io/component-emitter@3.1.2': {}
'@standard-schema/spec@1.0.0': {}
'@swc/core-darwin-arm64@1.3.101':
optional: true
@ -18445,7 +18472,7 @@ snapshots:
- supports-color
- utf-8-validate
'@trigger.dev/sdk@4.0.4(ai@4.3.19(react@18.3.1)(zod@3.25.76))(zod@3.25.76)':
'@trigger.dev/sdk@4.0.4(ai@5.0.78(zod@3.25.76))(zod@3.25.76)':
dependencies:
'@opentelemetry/api': 1.9.0
'@opentelemetry/semantic-conventions': 1.36.0
@ -18461,7 +18488,7 @@ snapshots:
ws: 8.18.3
zod: 3.25.76
optionalDependencies:
ai: 4.3.19(react@18.3.1)(zod@3.25.76)
ai: 5.0.78(zod@3.25.76)
transitivePeerDependencies:
- bufferutil
- supports-color
@ -18623,8 +18650,6 @@ snapshots:
dependencies:
'@types/ms': 2.1.0
'@types/diff-match-patch@1.0.36': {}
'@types/eslint-scope@3.7.7':
dependencies:
'@types/eslint': 9.6.1
@ -19134,6 +19159,8 @@ snapshots:
'@vanilla-extract/private@1.0.8': {}
'@vercel/oidc@3.0.3': {}
'@web3-storage/multipart-parser@1.0.0': {}
'@webassemblyjs/ast@1.14.1':
@ -19265,17 +19292,13 @@ snapshots:
clean-stack: 4.2.0
indent-string: 5.0.0
ai@4.3.19(react@18.3.1)(zod@3.25.76):
ai@5.0.78(zod@3.25.76):
dependencies:
'@ai-sdk/provider': 1.1.3
'@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
'@ai-sdk/react': 1.2.12(react@18.3.1)(zod@3.25.76)
'@ai-sdk/ui-utils': 1.2.11(zod@3.25.76)
'@ai-sdk/gateway': 2.0.1(zod@3.25.76)
'@ai-sdk/provider': 2.0.0
'@ai-sdk/provider-utils': 3.0.12(zod@3.25.76)
'@opentelemetry/api': 1.9.0
jsondiffpatch: 0.6.0
zod: 3.25.76
optionalDependencies:
react: 18.3.1
ajv-formats@2.1.1(ajv@8.17.1):
optionalDependencies:
@ -20422,8 +20445,6 @@ snapshots:
didyoumean@1.2.2: {}
diff-match-patch@1.0.5: {}
diff@5.2.0: {}
dir-glob@3.0.1:
@ -21213,6 +21234,8 @@ snapshots:
eventsource-parser@3.0.3: {}
eventsource-parser@3.0.6: {}
eventsource@3.0.7:
dependencies:
eventsource-parser: 3.0.3
@ -22262,6 +22285,11 @@ snapshots:
json-parse-even-better-errors@3.0.2: {}
json-schema-to-ts@3.1.1:
dependencies:
'@babel/runtime': 7.27.6
ts-algebra: 2.0.0
json-schema-traverse@0.4.1: {}
json-schema-traverse@1.0.0: {}
@ -22278,12 +22306,6 @@ snapshots:
jsonc-parser@3.2.1: {}
jsondiffpatch@0.6.0:
dependencies:
'@types/diff-match-patch': 1.0.36
chalk: 5.4.1
diff-match-patch: 1.0.5
jsonfile@4.0.0:
optionalDependencies:
graceful-fs: 4.2.11
@ -23585,12 +23607,10 @@ snapshots:
ohash@1.1.6: {}
ollama-ai-provider@1.2.0(zod@3.25.76):
ollama-ai-provider-v2@1.5.1(zod@3.25.76):
dependencies:
'@ai-sdk/provider': 1.1.3
'@ai-sdk/provider-utils': 2.2.8(zod@3.25.76)
partial-json: 0.1.7
optionalDependencies:
'@ai-sdk/provider': 2.0.0
'@ai-sdk/provider-utils': 3.0.12(zod@3.25.76)
zod: 3.25.76
on-finished@2.3.0:
@ -23789,8 +23809,6 @@ snapshots:
parseurl@1.3.3: {}
partial-json@0.1.7: {}
partysocket@1.1.4:
dependencies:
event-target-polyfill: 0.0.4
@ -24116,6 +24134,10 @@ snapshots:
preact: 10.26.9
web-vitals: 4.2.4
posthog-node@5.10.3:
dependencies:
'@posthog/core': 1.3.1
preact@10.26.9: {}
preferred-pm@3.1.4:
@ -24896,6 +24918,8 @@ snapshots:
onetime: 5.1.2
signal-exit: 3.0.7
resumable-stream@2.2.8: {}
retry@0.12.0: {}
retry@0.13.1: {}
@ -25023,8 +25047,6 @@ snapshots:
ajv-formats: 2.1.1(ajv@8.17.1)
ajv-keywords: 5.1.0(ajv@8.17.1)
secure-json-parse@2.7.0: {}
selderee@0.11.0:
dependencies:
parseley: 0.12.1
@ -25778,6 +25800,8 @@ snapshots:
dependencies:
matchit: 1.1.0
ts-algebra@2.0.0: {}
ts-api-utils@1.4.3(typescript@5.8.3):
dependencies:
typescript: 5.8.3
@ -26603,10 +26627,6 @@ snapshots:
dependencies:
zod: 3.23.8
zod-to-json-schema@3.24.5(zod@3.25.76):
dependencies:
zod: 3.25.76
zod-validation-error@1.5.0(zod@3.23.8):
dependencies:
zod: 3.23.8

View File

@ -92,6 +92,8 @@
"PRO_PLAN_CREDITS",
"PRO_OVERAGE_PRICE",
"MAX_PLAN_CREDITS",
"MAX_OVERAGE_PRICE"
"MAX_OVERAGE_PRICE",
"TELEMETRY_ENABLED",
"TELEMETRY_ANONYMOUS"
]
}