1. Added onboarding

2. fix: remove custom tool calls in chat and link directly to mcp
This commit is contained in:
Harshith Mullapudi 2025-10-21 11:15:52 +05:30
parent 170eed76fb
commit 00f983079f
33 changed files with 950 additions and 1288 deletions

View File

@ -1,4 +1,4 @@
Sol License
Core License
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007

View File

@ -1,6 +1,6 @@
import { EditorContent, useEditor } from "@tiptap/react";
import { useEffect } from "react";
import { useEffect, memo } from "react";
import { UserTypeEnum } from "@core/types";
import { type ConversationHistory } from "@core/database";
import { cn } from "~/lib/utils";
@ -11,7 +11,7 @@ interface AIConversationItemProps {
conversationHistory: ConversationHistory;
}
export const ConversationItem = ({
const ConversationItemComponent = ({
conversationHistory,
}: AIConversationItemProps) => {
const isUser =
@ -49,3 +49,12 @@ export const ConversationItem = ({
</div>
);
};
// Memoize to prevent unnecessary re-renders
export const ConversationItem = memo(ConversationItemComponent, (prevProps, nextProps) => {
// Only re-render if the conversation history ID or message changed
return (
prevProps.conversationHistory.id === nextProps.conversationHistory.id &&
prevProps.conversationHistory.message === nextProps.conversationHistory.message
);
});

View File

@ -5,11 +5,11 @@ import { Paragraph } from "@tiptap/extension-paragraph";
import { Text } from "@tiptap/extension-text";
import { type Editor } from "@tiptap/react";
import { EditorContent, Placeholder, EditorRoot } from "novel";
import { useCallback, useState } from "react";
import { useCallback, useState, useEffect } from "react";
import { cn } from "~/lib/utils";
import { Button } from "../ui";
import { LoaderCircle } from "lucide-react";
import { Form, useSubmit } from "@remix-run/react";
import { Form, useSubmit, useActionData } from "@remix-run/react";
interface ConversationTextareaProps {
defaultValue?: string;
@ -19,6 +19,7 @@ interface ConversationTextareaProps {
className?: string;
onChange?: (text: string) => void;
disabled?: boolean;
onConversationCreated?: (conversation: any) => void;
}
export function ConversationTextarea({
@ -27,10 +28,18 @@ export function ConversationTextarea({
placeholder,
conversationId,
onChange,
onConversationCreated,
}: ConversationTextareaProps) {
const [text, setText] = useState(defaultValue ?? "");
const [editor, setEditor] = useState<Editor>();
const submit = useSubmit();
const actionData = useActionData<{ conversation?: any }>();
useEffect(() => {
if (actionData?.conversation && onConversationCreated) {
onConversationCreated(actionData.conversation);
}
}, [actionData]);
const onUpdate = (editor: Editor) => {
setText(editor.getHTML());
@ -44,18 +53,19 @@ export function ConversationTextarea({
const data = isLoading ? {} : { message: text, conversationId };
// When conversationId exists and not stopping, submit to current route
// When isLoading (stopping), submit to the specific conversation route
submit(data as any, {
action: isLoading
? `/home/conversation/${conversationId}`
: "/home/conversation",
: conversationId
? `/home/conversation/${conversationId}`
: "/home/conversation",
method: "post",
});
editor?.commands.clearContent(true);
setText("");
editor.commands.clearContent(true);
setText("");
}, [editor, text]);
// Send message to API
@ -68,7 +78,9 @@ export function ConversationTextarea({
submit(data as any, {
action: isLoading
? `/home/conversation/${conversationId}`
: "/home/conversation",
: conversationId
? `/home/conversation/${conversationId}`
: "/home/conversation",
method: "post",
});

View File

@ -17,7 +17,7 @@ export const StreamingConversation = ({
afterStreaming,
apiURL,
}: StreamingConversationProps) => {
const { message, isEnd } = useTriggerStream(runId, token, apiURL);
const { message } = useTriggerStream(runId, token, apiURL, afterStreaming);
const [loadingText, setLoadingText] = React.useState("Thinking...");
const loadingMessages = [
@ -48,13 +48,6 @@ export const StreamingConversation = ({
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [message]);
React.useEffect(() => {
if (isEnd) {
afterStreaming();
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [isEnd]);
React.useEffect(() => {
let currentIndex = 0;
let delay = 5000; // Start with 2 seconds for more thinking time

View File

@ -1,5 +1,5 @@
import { useRealtimeRunWithStreams } from "@trigger.dev/react-hooks";
import React from "react";
import React, { useEffect, useState } from "react";
import { EventSource, type ErrorEvent } from "eventsource";
const getTriggerAPIURL = (apiURL?: string) => {
return (
@ -12,102 +12,53 @@ export const useTriggerStream = (
runId: string,
token: string,
apiURL?: string,
afterStreaming?: (finalMessage: string) => void,
) => {
// Need to fix this later
const baseURL = React.useMemo(() => getTriggerAPIURL(apiURL), [apiURL]);
const [error, setError] = useState<ErrorEvent | null>(null);
const [message, setMessage] = useState("");
const { error, streams, run } = useRealtimeRunWithStreams(runId, {
accessToken: token,
baseURL, // Optional if you are using a self-hosted Trigger.dev instance
});
useEffect(() => {
startStreaming();
}, []);
const isEnd = React.useMemo(() => {
if (error) {
return true;
}
const startStreaming = () => {
const eventSource = new EventSource(
`${baseURL}/realtime/v1/streams/${runId}/messages`,
{
fetch: (input, init) =>
fetch(input, {
...init,
headers: {
...init.headers,
Authorization: `Bearer ${token}`,
},
}),
},
);
if (
run &&
[
"COMPLETED",
"CANCELED",
"FAILED",
"CRASHED",
"INTERRUPTED",
"SYSTEM_FAILURE",
"EXPIRED",
"TIMED_OUT",
].includes(run?.status)
) {
return true;
}
eventSource.onmessage = (event) => {
try {
const eventData = JSON.parse(event.data);
const hasStreamEnd =
streams.messages &&
streams.messages.filter((item) => {
// Check if the item has a type that includes 'MESSAGE_' and is not empty
return item.type?.includes("STREAM_END");
});
if (eventData.type.includes("MESSAGE_")) {
setMessage((prevMessage) => prevMessage + eventData.message);
}
} catch (e) {
console.error("Failed to parse message:", e);
}
};
if (hasStreamEnd && hasStreamEnd.length > 0) {
return true;
}
eventSource.onerror = (err) => {
console.error("EventSource failed:", err);
setError(err);
eventSource.close();
if (afterStreaming) {
afterStreaming(message);
}
};
};
return false;
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [run?.status, error, streams.messages?.length]);
const message = React.useMemo(() => {
if (!streams?.messages) {
return "";
}
// Filter and combine all message chunks
return streams.messages
.filter((item) => {
// Check if the item has a type that includes 'MESSAGE_' and is not empty
return item.type?.includes("MESSAGE_");
})
.map((item) => item.message)
.join("");
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [streams.messages?.length]);
// const actionMessages = React.useMemo(() => {
// if (!streams?.messages) {
// return {};
// }
// // eslint-disable-next-line @typescript-eslint/no-explicit-any
// const messages: Record<string, { isStreaming: boolean; content: any[] }> =
// {};
// streams.messages.forEach((item) => {
// if (item.type?.includes("SKILL_")) {
// try {
// const parsed = JSON.parse(item.message);
// const skillId = parsed.skillId;
// if (!messages[skillId]) {
// messages[skillId] = { isStreaming: true, content: [] };
// }
// if (item.type === "SKILL_END") {
// messages[skillId].isStreaming = false;
// }
// messages[skillId].content.push(parsed);
// } catch (e) {
// console.error("Failed to parse message:", e);
// }
// }
// });
// return messages;
// // eslint-disable-next-line react-hooks/exhaustive-deps
// }, [streams.messages?.length]);
return { isEnd, message, actionMessages: [] };
return { error, message, actionMessages: [] };
};

View File

@ -2,25 +2,20 @@ import { NodeViewWrapper } from "@tiptap/react";
import React from "react";
import { getIcon as iconUtil, type IconType } from "../../icon-utils";
import { ChevronDown, ChevronRight } from "lucide-react";
import StaticLogo from "~/components/logo/logo";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
export const SkillComponent = (props: any) => {
const id = props.node.attrs.id;
const name = props.node.attrs.name;
const agent = props.node.attrs.agent;
const [open, setOpen] = React.useState(false);
if (id === "undefined" || id === undefined || !name) {
return null;
}
const getIcon = () => {
const Icon = iconUtil(agent as IconType);
return <Icon size={18} className="rounded-sm" />;
return <StaticLogo size={18} className="rounded-sm" />;
};
const snakeToTitleCase = (input: string): string => {
@ -46,7 +41,7 @@ export const SkillComponent = (props: any) => {
<>
<div className="bg-grayAlpha-100 text-sm-md mt-0.5 flex w-fit items-center gap-2 rounded p-2">
{getIcon()}
<span className="font-mono text-sm">{snakeToTitleCase(name)}</span>
<span className="font-mono text-sm">{snakeToTitleCase(agent)}</span>
</div>
</>
);

View File

@ -0,0 +1,4 @@
export { OnboardingModal } from "./onboarding-modal";
export { Provider, OnboardingStep } from "./types";
export type { ProviderConfig, OnboardingState } from "./types";
export { PROVIDER_CONFIGS, SUGGESTED_INGESTION_PROMPTS, VERIFICATION_PROMPT } from "./provider-config";

View File

@ -0,0 +1,137 @@
import { useState } from "react";
import { Copy, Check, Loader2, AlertCircle } from "lucide-react";
import { Button } from "../ui";
import { SUGGESTED_INGESTION_PROMPTS } from "./provider-config";
interface IngestionStepProps {
providerName: string;
ingestionStatus: "idle" | "waiting" | "processing" | "complete" | "error";
onStartWaiting: () => void;
error?: string;
}
export function IngestionStep({
providerName,
ingestionStatus,
onStartWaiting,
error,
}: IngestionStepProps) {
const [copiedIndex, setCopiedIndex] = useState<number | null>(null);
const handleCopy = async (text: string, index: number) => {
await navigator.clipboard.writeText(text);
setCopiedIndex(index);
setTimeout(() => setCopiedIndex(null), 2000);
};
return (
<div className="space-y-6">
<div>
<h2 className="mb-2 text-xl font-semibold">
Let's Store Your First Memory
</h2>
<p className="text-muted-foreground text-sm">
Copy one of these prompts and paste it into {providerName} to create
your first memory
</p>
</div>
{ingestionStatus === "idle" && (
<>
<div className="space-y-3">
{SUGGESTED_INGESTION_PROMPTS.map((prompt, index) => (
<div
key={index}
className="group bg-grayAlpha-100 hover:border-primary/50 relative rounded-lg border border-gray-300 p-4 transition-colors"
>
<p className="pr-10 text-sm">{prompt}</p>
<button
onClick={() => handleCopy(prompt, index)}
className="hover:bg-background absolute top-3 right-3 rounded-md p-2 transition-colors"
title="Copy to clipboard"
>
{copiedIndex === index ? (
<Check className="h-4 w-4 text-green-500" />
) : (
<Copy className="text-muted-foreground h-4 w-4" />
)}
</button>
</div>
))}
</div>
<div className="flex items-center justify-between rounded-lg border border-blue-500/20 bg-blue-500/10 p-4">
<div className="flex items-start gap-3">
<AlertCircle className="mt-0.5 h-5 w-5 text-blue-500" />
<div className="text-sm">
<p className="font-medium text-blue-700 dark:text-blue-300">
Important
</p>
<p className="text-blue-600 dark:text-blue-400">
After pasting the prompt in {providerName}, click the button
below to wait for ingestion
</p>
</div>
</div>
</div>
<div className="flex justify-end">
<Button onClick={onStartWaiting} size="lg">
I've Sent the Prompt
</Button>
</div>
</>
)}
{(ingestionStatus === "waiting" || ingestionStatus === "processing") && (
<div className="flex flex-col items-center justify-center space-y-4 py-12">
<Loader2 className="text-primary h-12 w-12 animate-spin" />
<div className="space-y-2 text-center">
<h3 className="text-lg font-medium">
{ingestionStatus === "waiting"
? "Waiting for your first ingestion..."
: "Processing your memory..."}
</h3>
<p className="text-muted-foreground max-w-md text-sm">
{ingestionStatus === "waiting"
? "Make sure you've sent the prompt in your provider app. We're listening for the first memory ingestion."
: "We're storing your information. This usually takes a few seconds."}
</p>
</div>
</div>
)}
{ingestionStatus === "complete" && (
<div className="flex flex-col items-center justify-center space-y-4 py-12">
<div className="flex h-16 w-16 items-center justify-center rounded-full bg-green-500/10">
<Check className="h-8 w-8 text-green-500" />
</div>
<div className="space-y-2 text-center">
<h3 className="text-lg font-medium">Memory stored successfully!</h3>
<p className="text-muted-foreground text-sm">
Your first memory has been ingested. Let's verify it worked.
</p>
</div>
</div>
)}
{ingestionStatus === "error" && (
<div className="flex flex-col items-center justify-center space-y-4 py-12">
<div className="flex h-16 w-16 items-center justify-center rounded-full bg-red-500/10">
<AlertCircle className="h-8 w-8 text-red-500" />
</div>
<div className="space-y-2 text-center">
<h3 className="text-lg font-medium">Something went wrong</h3>
<p className="text-muted-foreground max-w-md text-sm">
{error ||
"We couldn't detect your memory ingestion. Please try again or check your provider connection."}
</p>
</div>
<Button onClick={onStartWaiting} variant="secondary">
Try Again
</Button>
</div>
)}
</div>
);
}

View File

@ -0,0 +1,230 @@
import { useState } from "react";
import { Dialog, DialogContent, DialogHeader, DialogTitle } from "../ui/dialog";
import { type Provider, OnboardingStep } from "./types";
import { ProviderSelectionStep } from "./provider-selection-step";
import { IngestionStep } from "./ingestion-step";
import { VerificationStep } from "./verification-step";
import { PROVIDER_CONFIGS } from "./provider-config";
import { Progress } from "../ui/progress";
interface OnboardingModalProps {
isOpen: boolean;
onClose: () => void;
onComplete: () => void;
}
export function OnboardingModal({
isOpen,
onClose,
onComplete,
}: OnboardingModalProps) {
const [currentStep, setCurrentStep] = useState<OnboardingStep>(
OnboardingStep.PROVIDER_SELECTION,
);
const [selectedProvider, setSelectedProvider] = useState<Provider>();
const [ingestionStatus, setIngestionStatus] = useState<
"idle" | "waiting" | "processing" | "complete" | "error"
>("idle");
const [verificationResult, setVerificationResult] = useState<string>();
const [isCheckingRecall, setIsCheckingRecall] = useState(false);
const [error, setError] = useState<string>();
// Calculate progress
const getProgress = () => {
switch (currentStep) {
case OnboardingStep.PROVIDER_SELECTION:
return 33;
case OnboardingStep.FIRST_INGESTION:
return 66;
case OnboardingStep.VERIFICATION:
return 100;
default:
return 0;
}
};
// Poll for ingestion status
const pollIngestion = async () => {
setIngestionStatus("waiting");
try {
const maxAttempts = 30; // 60 seconds (30 * 2s)
let attempts = 0;
// Store the timestamp when polling starts
const startTime = Date.now();
const poll = async (): Promise<boolean> => {
if (attempts >= maxAttempts) {
throw new Error("Ingestion timeout - please try again");
}
// Check for new ingestion logs from the last 5 minutes
const response = await fetch("/api/v1/logs?limit=1");
const data = await response.json();
// Check if there's a recent ingestion (created after we started polling)
if (data.logs && data.logs.length > 0) {
const latestLog = data.logs[0];
const logTime = new Date(latestLog.time).getTime();
// If the log was created after we started polling, we found a new ingestion
if (logTime >= startTime) {
return true;
}
}
await new Promise((resolve) => setTimeout(resolve, 2000));
attempts++;
return poll();
};
const success = await poll();
if (success) {
setIngestionStatus("complete");
// Auto-advance to verification step after 2 seconds
setTimeout(() => {
setCurrentStep(OnboardingStep.VERIFICATION);
}, 2000);
}
} catch (err) {
setError(err instanceof Error ? err.message : "Unknown error occurred");
setIngestionStatus("error");
}
};
const handleProviderSelect = (provider: Provider) => {
setSelectedProvider(provider);
};
const handleContinueFromProvider = () => {
setCurrentStep(OnboardingStep.FIRST_INGESTION);
};
const handleStartWaiting = () => {
pollIngestion();
};
const handleComplete = () => {
setCurrentStep(OnboardingStep.COMPLETE);
onComplete();
onClose();
};
// Poll for recall logs to detect verification
const pollRecallLogs = async () => {
setIsCheckingRecall(true);
try {
const maxAttempts = 30; // 60 seconds
let attempts = 0;
const startTime = Date.now();
const poll = async (): Promise<string | null> => {
if (attempts >= maxAttempts) {
throw new Error("Verification timeout - please try again");
}
// Check for new recall logs
const response = await fetch("/api/v1/recall-logs?limit=1");
const data = await response.json();
// Check if there's a recent recall (created after we started polling)
if (data.recallLogs && data.recallLogs.length > 0) {
const latestRecall = data.recallLogs[0];
const recallTime = new Date(latestRecall.createdAt).getTime();
// If the recall was created after we started polling
if (recallTime >= startTime) {
// Return the query as verification result
return latestRecall.query || "Recall detected successfully";
}
}
await new Promise((resolve) => setTimeout(resolve, 2000));
attempts++;
return poll();
};
const result = await poll();
if (result) {
setVerificationResult(result);
setIsCheckingRecall(false);
}
} catch (err) {
setError(err instanceof Error ? err.message : "Unknown error occurred");
setIsCheckingRecall(false);
}
};
const getStepTitle = () => {
switch (currentStep) {
case OnboardingStep.PROVIDER_SELECTION:
return "Step 1 of 3";
case OnboardingStep.FIRST_INGESTION:
return "Step 2 of 3";
case OnboardingStep.VERIFICATION:
return "Step 3 of 3";
default:
return "";
}
};
return (
<Dialog open={isOpen} onOpenChange={onClose}>
<DialogContent className="max-h-[90vh] max-w-3xl overflow-y-auto p-4">
<DialogHeader>
<div className="space-y-3">
<DialogTitle className="text-2xl">Welcome to Core</DialogTitle>
<div className="space-y-2">
<div className="flex items-center justify-between">
<p className="text-muted-foreground text-sm">
{getStepTitle()}
</p>
</div>
<Progress
segments={[{ value: getProgress() }]}
className="mb-2"
color="#c15e50"
/>
</div>
</div>
</DialogHeader>
<div>
{currentStep === OnboardingStep.PROVIDER_SELECTION && (
<ProviderSelectionStep
selectedProvider={selectedProvider}
onSelectProvider={handleProviderSelect}
onContinue={handleContinueFromProvider}
/>
)}
{currentStep === OnboardingStep.FIRST_INGESTION &&
selectedProvider && (
<IngestionStep
providerName={PROVIDER_CONFIGS[selectedProvider].name}
ingestionStatus={ingestionStatus}
onStartWaiting={handleStartWaiting}
error={error}
/>
)}
{currentStep === OnboardingStep.VERIFICATION && selectedProvider && (
<VerificationStep
providerName={PROVIDER_CONFIGS[selectedProvider].name}
verificationResult={verificationResult}
isCheckingRecall={isCheckingRecall}
onStartChecking={pollRecallLogs}
onComplete={handleComplete}
/>
)}
</div>
</DialogContent>
</Dialog>
);
}

View File

@ -0,0 +1,54 @@
import { Provider, type ProviderConfig } from "./types";
export const PROVIDER_CONFIGS: Record<Provider, ProviderConfig> = {
[Provider.CLAUDE_CODE]: {
id: Provider.CLAUDE_CODE,
name: "Claude Code CLI",
description: "Connect your Claude Code CLI to CORE's memory system",
docsUrl: "https://docs.heysol.ai/providers/claude-code",
icon: "claude",
},
[Provider.CLAUDE]: {
id: Provider.CLAUDE,
name: "Claude",
description: "Connect your Claude Desktop app to CORE's memory system",
docsUrl: "https://docs.heysol.ai/providers/claude",
icon: "claude",
},
[Provider.CURSOR]: {
id: Provider.CURSOR,
name: "Cursor",
description: "Connect your Cursor Desktop app to CORE's memory system",
docsUrl: "https://docs.heysol.ai/providers/cursor",
icon: "cursor",
},
[Provider.KILO_CODE]: {
id: Provider.KILO_CODE,
name: "Kilo-Code",
description: "Connect Kilo Code Agent to CORE's memory system via MCP",
docsUrl: "https://docs.heysol.ai/providers/kilo-code",
icon: "kilo-code",
},
[Provider.VSCODE]: {
id: Provider.VSCODE,
name: "VS Code (Github Copilot)",
description: "Connect your VS Code editor to CORE's memory system via MCP",
docsUrl: "https://docs.heysol.ai/providers/vscode",
icon: "vscode",
},
[Provider.ZED]: {
id: Provider.ZED,
name: "Zed",
description: "Connect your Zed editor to CORE's memory system via MCP",
docsUrl: "https://docs.heysol.ai/providers/zed",
icon: "zed",
},
};
export const SUGGESTED_INGESTION_PROMPTS = [
"I'm a full-stack developer working on a React and Node.js application. I prefer TypeScript, functional programming patterns, and writing comprehensive tests.",
"I'm working on a machine learning project using Python and PyTorch. I focus on computer vision and prefer Jupyter notebooks for exploration.",
"I'm a DevOps engineer managing Kubernetes clusters. I work primarily with Terraform, Helm, and CI/CD pipelines using GitHub Actions.",
];
export const VERIFICATION_PROMPT = "Who am I? Tell me what you know about me.";

View File

@ -0,0 +1,89 @@
import { Check, ExternalLink } from "lucide-react";
import { Button } from "../ui";
import { PROVIDER_CONFIGS } from "./provider-config";
import { type Provider } from "./types";
import { getIconForAuthorise } from "../icon-utils";
interface ProviderSelectionStepProps {
selectedProvider?: Provider;
onSelectProvider: (provider: Provider) => void;
onContinue: () => void;
}
export function ProviderSelectionStep({
selectedProvider,
onSelectProvider,
onContinue,
}: ProviderSelectionStepProps) {
const providers = Object.values(PROVIDER_CONFIGS);
return (
<div className="space-y-2">
<div>
<h2 className="mb-2 text-xl font-semibold">Choose Your Provider</h2>
<p className="text-muted-foreground text-sm">
Select the application you'll use to connect with Core
</p>
</div>
<div className="grid grid-cols-1 gap-3 sm:grid-cols-2 lg:grid-cols-3">
{providers.map((provider) => {
const isSelected = selectedProvider === provider.id;
return (
<Button
key={provider.id}
variant="outline"
onClick={() => onSelectProvider(provider.id)}
size="2xl"
className={`relative flex flex-col items-start justify-center gap-1 rounded-lg border-1 border-gray-300 p-4 text-left transition-all ${
isSelected
? "border-primary bg-primary/5"
: "hover:border-primary/50 border-gray-300"
}`}
>
<div className="flex h-full items-center gap-2">
{getIconForAuthorise(provider.icon, 20)}
<div className="flex items-center gap-2">
<h3 className="font-medium">{provider.name}</h3>
</div>
</div>
</Button>
);
})}
</div>
{selectedProvider && (
<div className="bg-grayAlpha-100 space-y-4 rounded-lg p-4">
<div className="space-y-3">
<h3 className="font-medium">Next Steps</h3>
<p className="text-muted-foreground text-sm">
Follow our setup guide to connect{" "}
{PROVIDER_CONFIGS[selectedProvider].name} with Core. Once you've
completed the setup, come back here to continue.
</p>
<a
href={PROVIDER_CONFIGS[selectedProvider].docsUrl}
target="_blank"
rel="noopener noreferrer"
className="bg-primary text-primary-foreground hover:bg-primary/90 inline-flex items-center gap-2 rounded-md px-4 py-2 text-sm font-medium transition-colors"
>
Open Setup Guide
<ExternalLink className="h-4 w-4" />
</a>
</div>
</div>
)}
<div className="flex justify-end">
<Button
onClick={onContinue}
disabled={!selectedProvider}
size="lg"
variant="secondary"
>
Continue to Setup
</Button>
</div>
</div>
);
}

View File

@ -0,0 +1,32 @@
export enum Provider {
CLAUDE_CODE = "claude-code",
CLAUDE = "claude",
CURSOR = "cursor",
KILO_CODE = "kilo-code",
VSCODE = "vscode",
ZED = "zed",
}
export enum OnboardingStep {
PROVIDER_SELECTION = "provider_selection",
FIRST_INGESTION = "first_ingestion",
VERIFICATION = "verification",
COMPLETE = "complete",
}
export interface ProviderConfig {
id: Provider;
name: string;
description: string;
docsUrl: string;
icon: string;
}
export interface OnboardingState {
currentStep: OnboardingStep;
selectedProvider?: Provider;
isConnected: boolean;
ingestionStatus: "idle" | "waiting" | "processing" | "complete" | "error";
verificationResult?: string;
error?: string;
}

View File

@ -0,0 +1,101 @@
import { useState } from "react";
import {
Copy,
Check,
AlertCircle,
ThumbsUp,
ThumbsDown,
Loader2,
} from "lucide-react";
import { Button } from "../ui";
import { VERIFICATION_PROMPT } from "./provider-config";
interface VerificationStepProps {
providerName: string;
verificationResult?: string;
isCheckingRecall?: boolean;
onStartChecking: () => void;
onComplete: () => void;
}
export function VerificationStep({
providerName,
verificationResult,
isCheckingRecall = false,
onStartChecking,
onComplete,
}: VerificationStepProps) {
const [copied, setCopied] = useState(false);
const handleCopy = async () => {
await navigator.clipboard.writeText(VERIFICATION_PROMPT);
setCopied(true);
setTimeout(() => setCopied(false), 2000);
};
return (
<div className="space-y-6">
<div>
<h2 className="mb-2 text-xl font-semibold">Verify Your Memory</h2>
<p className="text-muted-foreground text-sm">
Let's test if your memory is working correctly by asking the AI about
you
</p>
</div>
{!verificationResult && !isCheckingRecall && (
<>
<div className="group bg-grayAlpha-100 relative rounded-lg border border-gray-300 p-4">
<p className="mb-1 text-sm font-medium">Copy this prompt:</p>
<p className="pr-10 text-sm">{VERIFICATION_PROMPT}</p>
<button
onClick={handleCopy}
className="hover:bg-background absolute top-3 right-3 rounded-md p-2 transition-colors"
title="Copy to clipboard"
>
{copied ? (
<Check className="h-4 w-4 text-green-500" />
) : (
<Copy className="text-muted-foreground h-4 w-4" />
)}
</button>
</div>
<div className="flex items-center gap-3 rounded-lg border border-blue-500/20 bg-blue-500/10 p-4">
<AlertCircle className="h-5 w-5 shrink-0 text-blue-500" />
<div className="flex-1 text-sm">
<p className="text-blue-600 dark:text-blue-400">
Paste this prompt in {providerName}. Once you ask, click the
button below to detect the recall.
</p>
</div>
</div>
<div className="flex justify-end gap-3">
<Button onClick={onComplete} variant="ghost" size="lg">
Skip Verification
</Button>
<Button onClick={onStartChecking} size="lg" variant="secondary">
I've Asked the Question
</Button>
</div>
</>
)}
{isCheckingRecall && !verificationResult && (
<div className="flex flex-col items-center justify-center space-y-4 py-12">
<Loader2 className="text-primary h-12 w-12 animate-spin" />
<div className="space-y-2 text-center">
<h3 className="text-lg font-medium">
Waiting for your recall query...
</h3>
<p className="text-muted-foreground max-w-md text-sm">
Make sure you've asked "{VERIFICATION_PROMPT}" in {providerName}.
We're listening for the recall.
</p>
</div>
</div>
)}
</div>
);
}

View File

@ -149,7 +149,7 @@ export const ScrollAreaWithAutoScroll = ({
className?: string;
}) => {
const { scrollRef } = useAutoScroll({
smooth: true,
smooth: false,
content: children,
});
@ -161,7 +161,7 @@ export const ScrollAreaWithAutoScroll = ({
className,
)}
>
<div className="flex h-full w-full max-w-[97ch] flex-col pb-4">
<div className="flex h-full w-full max-w-[80ch] flex-col pb-4">
{children}
</div>
</div>

View File

@ -1,32 +0,0 @@
import { z } from "zod";
import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
import { json } from "@remix-run/node";
import { extensionSearch } from "~/trigger/extension/search";
export const ExtensionSearchBodyRequest = z.object({
input: z.string().min(1, "Input text is required"),
outputType: z.string().default("markdown"),
});
const { action, loader } = createActionApiRoute(
{
body: ExtensionSearchBodyRequest,
method: "POST",
allowJWT: true,
authorization: {
action: "search",
},
corsStrategy: "all",
},
async ({ body, authentication }) => {
const trigger = await extensionSearch.trigger({
userInput: body.input,
userId: authentication.userId,
outputType: body.outputType,
});
return json(trigger);
},
);
export { action, loader };

View File

@ -1,32 +0,0 @@
import { z } from "zod";
import { createActionApiRoute } from "~/services/routeBuilders/apiBuilder.server";
import { json } from "@remix-run/node";
import { extensionSummary } from "~/trigger/extension/summary";
export const ExtensionSummaryBodyRequest = z.object({
html: z.string().min(1, "HTML content is required"),
url: z.string().url("Valid URL is required"),
title: z.string().optional(),
parseImages: z.boolean().default(false),
});
const { action, loader } = createActionApiRoute(
{
body: ExtensionSummaryBodyRequest,
allowJWT: true,
authorization: {
action: "search",
},
corsStrategy: "all",
},
async ({ body, authentication }) => {
const response = await extensionSummary.trigger({
...body,
apiKey: authentication.apiKey,
});
return json(response);
},
);
export { action, loader };

View File

@ -0,0 +1,84 @@
import { type LoaderFunctionArgs, json } from "@remix-run/node";
import { z } from "zod";
import { prisma } from "~/db.server";
import { createHybridLoaderApiRoute } from "~/services/routeBuilders/apiBuilder.server";
// Schema for recall logs search parameters
const RecallLogsSearchParams = z.object({
page: z.string().optional(),
limit: z.string().optional(),
query: z.string().optional(),
});
export const loader = createHybridLoaderApiRoute(
{
allowJWT: true,
searchParams: RecallLogsSearchParams,
corsStrategy: "all",
findResource: async () => 1,
},
async ({ authentication, searchParams }) => {
const page = parseInt(searchParams.page || "1");
const limit = parseInt(searchParams.limit || "100");
const query = searchParams.query;
const skip = (page - 1) * limit;
// Get user and workspace in one query
const user = await prisma.user.findUnique({
where: { id: authentication.userId },
select: { Workspace: { select: { id: true } } },
});
if (!user?.Workspace) {
throw new Response("Workspace not found", { status: 404 });
}
// Build where clause for filtering
const whereClause: any = {
workspaceId: user.Workspace.id,
deleted: null,
};
if (query) {
whereClause.query = {
contains: query,
mode: "insensitive",
};
}
const [recallLogs, totalCount] = await Promise.all([
prisma.recallLog.findMany({
where: whereClause,
select: {
id: true,
createdAt: true,
accessType: true,
query: true,
targetType: true,
targetId: true,
searchMethod: true,
resultCount: true,
similarityScore: true,
source: true,
},
orderBy: {
createdAt: "desc",
},
skip,
take: limit,
}),
prisma.recallLog.count({
where: whereClause,
}),
]);
return json({
recallLogs,
totalCount,
page,
limit,
hasMore: skip + recallLogs.length < totalCount,
});
},
);

View File

@ -5,11 +5,18 @@ import {
import { sort } from "fast-sort";
import { useParams, useRevalidator, useNavigate } from "@remix-run/react";
import { requireUser, requireWorkpace } from "~/services/session.server";
import { parse } from "@conform-to/zod";
import {
requireUserId,
requireUser,
requireWorkpace,
} from "~/services/session.server";
import {
getConversationAndHistory,
getCurrentConversationRun,
stopConversation,
createConversation,
CreateConversationSchema,
} from "~/services/conversation.server";
import { type ConversationHistory } from "@core/database";
import {
@ -49,16 +56,39 @@ export async function action({ params, request }: ActionFunctionArgs) {
if (request.method.toUpperCase() !== "POST") {
return new Response("Method Not Allowed", { status: 405 });
}
const userId = await requireUserId(request);
const workspace = await requireWorkpace(request);
// params.conversationId will be available here
const formData = await request.formData();
const { conversationId } = params;
if (!conversationId) {
throw new Error("No conversation");
}
const result = await stopConversation(conversationId, workspace.id);
return json(result);
// Check if this is a stop request (isLoading = true means stop button was clicked)
const message = formData.get("message");
// If no message, it's a stop request
if (!message) {
const result = await stopConversation(conversationId, workspace.id);
return json(result);
}
// Otherwise, create a new conversation message
const submission = parse(formData, { schema: CreateConversationSchema });
if (!submission.value || submission.intent !== "submit") {
return json(submission);
}
const conversation = await createConversation(workspace?.id, userId, {
message: submission.value.message,
title: submission.value.title,
conversationId: submission.value.conversationId,
});
return json({ conversation });
}
// Accessing params in the component
@ -72,7 +102,6 @@ export default function SingleConversation() {
const { conversationId } = useParams();
const revalidator = useRevalidator();
const navigate = useNavigate();
React.useEffect(() => {
@ -97,20 +126,15 @@ export default function SingleConversation() {
// Filter out any conversation history items that come after the lastConversationHistoryId
const filteredConversationHistory = lastConversationHistoryId
? sortedConversationHistory.filter((_ch, currentIndex: number) => {
// Find the index of the last conversation history
// Only keep items that come before or are the last conversation history
return currentIndex <= lastIndex;
})
: sortedConversationHistory;
return (
<>
{filteredConversationHistory.map(
(ch: ConversationHistory, index: number) => {
return <ConversationItem key={index} conversationHistory={ch} />;
},
)}
{filteredConversationHistory.map((ch: ConversationHistory) => {
return <ConversationItem key={ch.id} conversationHistory={ch} />;
})}
</>
);
};
@ -155,7 +179,7 @@ export default function SingleConversation() {
</ScrollAreaWithAutoScroll>
<div className="flex w-full flex-col items-center">
<div className="w-full max-w-[97ch] px-1 pr-2">
<div className="w-full max-w-[80ch] px-1 pr-2">
{conversation?.status !== "need_approval" && (
<ConversationTextarea
conversationId={conversationId as string}
@ -163,6 +187,16 @@ export default function SingleConversation() {
isLoading={
!!conversationResponse || conversation?.status === "running"
}
onConversationCreated={(conversation) => {
if (conversation) {
setConversationResponse({
conversationHistoryId:
conversation.conversationHistoryId,
id: conversation.id,
token: conversation.token,
});
}
}}
/>
)}
</div>

View File

@ -4,7 +4,7 @@ import {
} from "@remix-run/server-runtime";
import { useTypedLoaderData } from "remix-typedjson";
import { parse } from "@conform-to/zod";
import { redirect, json } from "@remix-run/node";
import {
requireUser,
requireUserId,
@ -16,7 +16,7 @@ import {
createConversation,
CreateConversationSchema,
} from "~/services/conversation.server";
import { json } from "@remix-run/node";
import { PageHeader } from "~/components/common/page-header";
export async function loader({ request }: LoaderFunctionArgs) {
@ -47,17 +47,16 @@ export async function action({ request }: ActionFunctionArgs) {
conversationId: submission.value.conversationId,
});
// Redirect to the conversation page after creation
// conversationId may be in different places depending on createConversation logic
// If conversationId exists in submission, return the conversation data (don't redirect)
if (submission.value.conversationId) {
return json({ conversation });
}
// For new conversations (no conversationId), redirect to the conversation page
const conversationId = conversation?.conversationId;
if (conversationId) {
return new Response(null, {
status: 302,
headers: {
Location: `/home/conversation/${conversationId}`,
},
});
return redirect(`/home/conversation/${conversationId}`);
}
// fallback: just return the conversation object

View File

@ -1,4 +1,4 @@
import { useState } from "react";
import { useEffect, useState } from "react";
import { useLogs } from "~/hooks/use-logs";
import { LogsFilters } from "~/components/logs/logs-filters";
import { VirtualLogsList } from "~/components/logs/virtual-logs-list";
@ -12,11 +12,13 @@ import {
} from "~/components/ui/resizable";
import { Outlet, useParams } from "@remix-run/react";
import { cn } from "~/lib/utils";
import { OnboardingModal } from "~/components/onboarding";
export default function LogsAll() {
const [selectedSource, setSelectedSource] = useState<string | undefined>();
const [selectedStatus, setSelectedStatus] = useState<string | undefined>();
const [selectedType, setSelectedType] = useState<string | undefined>();
const [onboarding, setOnboarding] = useState(false);
const { logId } = useParams();
@ -34,6 +36,12 @@ export default function LogsAll() {
type: selectedType,
});
useEffect(() => {
if (!isLoading && logs && logs.length === 1) {
setOnboarding(true);
}
}, [logs.length, isLoading]);
return (
<>
<ResizablePanelGroup direction="horizontal">
@ -117,6 +125,16 @@ export default function LogsAll() {
<Outlet />
</ResizablePanel>
</ResizablePanelGroup>
<OnboardingModal
isOpen={onboarding}
onClose={() => {
setOnboarding(false);
}}
onComplete={() => {
setOnboarding(false);
}}
/>
</>
);
}

View File

@ -149,6 +149,7 @@ export async function getCurrentConversationRun(
conversation: {
workspaceId,
},
userType: UserTypeEnum.User,
},
orderBy: {
updatedAt: "desc",
@ -160,12 +161,17 @@ export async function getCurrentConversationRun(
}
const response = await runs.list({
tag: [conversationId, conversationHistory.id],
tag: [conversationId, conversationHistory.id, workspaceId],
status: ["QUEUED", "EXECUTING"],
limit: 1,
});
const run = response.data[0];
if (!response) {
return undefined;
}
const run = response?.data?.[0];
if (!run) {
return undefined;
}
@ -290,7 +296,9 @@ export const GetConversationsListSchema = z.object({
search: z.string().optional(),
});
export type GetConversationsListDto = z.infer<typeof GetConversationsListSchema>;
export type GetConversationsListDto = z.infer<
typeof GetConversationsListSchema
>;
export async function getConversationsList(
workspaceId: string,

View File

@ -23,7 +23,6 @@ import {
type TotalCost,
} from "../utils/types";
import { flattenObject } from "../utils/utils";
import { searchMemory, addMemory, searchSpaces } from "./memory-utils";
interface LLMOutputInterface {
response: AsyncGenerator<
@ -57,93 +56,7 @@ const progressUpdateTool = tool({
}),
});
const searchMemoryTool = tool({
description:
"Search the user's memory graph for episodes or statements based on a query",
parameters: jsonSchema({
type: "object",
properties: {
query: {
type: "string",
description: "The search query in third person perspective",
},
validAt: {
type: "string",
description: "The valid at time in ISO format",
},
startTime: {
type: "string",
description: "The start time in ISO format",
},
endTime: {
type: "string",
description: "The end time in ISO format",
},
spaceIds: {
type: "array",
items: {
type: "string",
format: "uuid",
},
description: "Array of strings representing UUIDs of spaces",
},
},
required: ["query"],
additionalProperties: false,
}),
});
const addMemoryTool = tool({
description: "Add information to the user's memory graph",
parameters: jsonSchema({
type: "object",
properties: {
message: {
type: "string",
description: "The content/text to add to memory",
},
},
required: ["message"],
additionalProperties: false,
}),
});
const searchSpacesTool = tool({
description: "Get spaces in memory",
parameters: jsonSchema({
type: "object",
properties: {},
required: [],
additionalProperties: false,
}),
});
const loadMCPTools = tool({
description:
"Load tools for a specific integration. Call this when you need to use a third-party service.",
parameters: jsonSchema({
type: "object",
properties: {
integration: {
type: "array",
items: {
type: "string",
},
description:
'Array of integration names to load (e.g., ["github", "linear", "slack"])',
},
},
required: ["integration"],
additionalProperties: false,
}),
});
const internalTools = [
"core--progress_update",
"core--search_memory",
"core--add_memory",
"core--load_mcp",
];
const internalTools = ["core--progress_update"];
async function addResources(messages: CoreMessage[], resources: Resource[]) {
const resourcePromises = resources.map(async (resource) => {
@ -230,7 +143,6 @@ async function makeNextCall(
TOOLS: ToolSet,
totalCost: TotalCost,
guardLoop: number,
mcpServers: string[],
): Promise<LLMOutputInterface> {
const { context, history, previousHistory } = executionState;
@ -238,7 +150,6 @@ async function makeNextCall(
USER_MESSAGE: executionState.query,
CONTEXT: context,
USER_MEMORY: executionState.userMemoryContext,
AVAILABLE_MCP_TOOLS: mcpServers.join(", "),
};
let messages: CoreMessage[] = [];
@ -291,8 +202,6 @@ export async function* run(
previousHistory: CoreMessage[],
mcp: MCP,
stepHistory: HistoryStep[],
mcpServers: string[],
mcpHeaders: any,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
): AsyncGenerator<AgentMessage, any, any> {
let guardLoop = 0;
@ -300,10 +209,6 @@ export async function* run(
let tools = {
...(await mcp.allTools()),
"core--progress_update": progressUpdateTool,
"core--search_memory": searchMemoryTool,
"core--add_memory": addMemoryTool,
"core--search_spaces": searchSpacesTool,
"core--load_mcp": loadMCPTools,
};
logger.info("Tools have been formed");
@ -339,7 +244,6 @@ export async function* run(
tools,
totalCost,
guardLoop,
mcpServers,
);
let toolCallInfo;
@ -424,7 +328,7 @@ export async function* run(
thought: "",
skill: "",
skillId: "",
userMessage: "Sol agent error, retrying \n",
userMessage: "Core agent error, retrying \n",
isQuestion: false,
isFinal: false,
tokenCount: totalCost,
@ -541,43 +445,6 @@ export async function* run(
stepRecord.userMessage += skillInput.message;
yield Message("", AgentMessageType.MESSAGE_END);
result = "Progress update sent successfully";
} else if (toolName === "search_memory") {
try {
result = await searchMemory(skillInput);
} catch (apiError) {
logger.error("Memory utils calls failed for search_memory", {
apiError,
});
result =
"Memory search failed - please check your memory configuration";
}
} else if (toolName === "add_memory") {
try {
result = await addMemory(skillInput);
} catch (apiError) {
logger.error("Memory utils calls failed for add_memory", {
apiError,
});
result =
"Memory storage failed - please check your memory configuration";
}
} else if (toolName === "search_spaces") {
try {
result = await searchSpaces();
} catch (apiError) {
logger.error("Search spaces call failed", {
apiError,
});
result = "Search spaces call failed";
}
} else if (toolName === "load_mcp") {
// Load MCP integration and update available tools
await mcp.load(skillInput.integration, mcpHeaders);
tools = {
...tools,
...(await mcp.allTools()),
};
result = "MCP integration loaded successfully";
}
}
// Handle other MCP tools

View File

@ -52,12 +52,11 @@ export const chat = task({
const { previousHistory, ...otherData } = payload.context;
const { agents = [] } = payload.context;
// Initialise mcp
const mcpHeaders = { Authorization: `Bearer ${init?.token}` };
const mcp = new MCP();
await mcp.init();
await mcp.load(agents, mcpHeaders);
await mcp.load(mcpHeaders);
// Prepare context with additional metadata
const context = {
@ -93,8 +92,6 @@ export const chat = task({
previousExecutionHistory,
mcp,
stepHistory,
init?.mcpServers ?? [],
mcpHeaders,
);
const stream = await metadata.stream("messages", llmResponse);
@ -142,6 +139,7 @@ export const chat = task({
await deletePersonalAccessToken(init.tokenId);
}
} catch (e) {
console.log(e);
await updateConversationStatus("failed", payload.conversationId);
if (init?.tokenId) {
await deletePersonalAccessToken(init.tokenId);

View File

@ -1,62 +0,0 @@
import { logger } from "@trigger.dev/sdk/v3";
import axios from "axios";
// Memory API functions using axios interceptor
export interface SearchMemoryParams {
query: string;
validAt?: string;
startTime?: string;
endTime?: string;
}
export interface AddMemoryParams {
message: string;
referenceTime?: string;
spaceId?: string;
sessionId?: string;
metadata?: any;
}
export const searchMemory = async (params: SearchMemoryParams) => {
try {
const response = await axios.post(
"https://core::memory/api/v1/search",
params,
);
return response.data;
} catch (error) {
logger.error("Memory search failed", { error, params });
return { error: "Memory search failed" };
}
};
export const addMemory = async (params: AddMemoryParams) => {
try {
// Set defaults for required fields
const memoryInput = {
...params,
episodeBody: params.message,
referenceTime: params.referenceTime || new Date().toISOString(),
source: "CORE",
};
const response = await axios.post(
"https://core::memory/api/v1/add",
memoryInput,
);
return response.data;
} catch (error) {
logger.error("Memory storage failed", { error, params });
return { error: "Memory storage failed" };
}
};
export const searchSpaces = async () => {
try {
const response = await axios.post("https://core::memory/api/v1/spaces");
return response.data;
} catch (error) {
logger.error("Memory storage failed", { error });
return { error: "Memory storage failed" };
}
};

View File

@ -72,7 +72,6 @@ MEMORY USAGE:
</memory>
<external_services>
- Available integrations: {{AVAILABLE_MCP_TOOLS}}
- To use: load_mcp with EXACT integration name from the available list
- Can load multiple at once with an array
- Only load when tools are NOT already available in your current toolset

View File

@ -1,143 +0,0 @@
import { metadata, task } from "@trigger.dev/sdk";
import { streamText, type CoreMessage, tool } from "ai";
import { z } from "zod";
import { openai } from "@ai-sdk/openai";
import { logger } from "~/services/logger.service";
import {
deletePersonalAccessToken,
getOrCreatePersonalAccessToken,
} from "../utils/utils";
import axios from "axios";
import { nanoid } from "nanoid";
export const ExtensionSearchBodyRequest = z.object({
userInput: z.string().min(1, "User input is required"),
userId: z.string().min(1, "User ID is required"),
outputType: z.string().default("markdown"),
context: z
.string()
.optional()
.describe("Additional context about the user's current work"),
});
// Export a singleton instance
export const extensionSearch = task({
id: "extensionSearch",
maxDuration: 3000,
run: async (body: z.infer<typeof ExtensionSearchBodyRequest>) => {
const { userInput, userId, context } =
ExtensionSearchBodyRequest.parse(body);
const outputType = body.outputType;
const randomKeyName = `extensionSearch_${nanoid(10)}`;
const pat = await getOrCreatePersonalAccessToken({
name: randomKeyName,
userId: userId as string,
});
// Define the searchMemory tool that actually calls the search service
const searchMemoryTool = tool({
description:
"Search the user's memory for relevant facts and episodes based on a query",
parameters: z.object({
query: z.string().describe("Search query to find relevant information"),
}),
execute: async ({ query }) => {
try {
const response = await axios.post(
`https://core.heysol.ai/api/v1/search`,
{ query },
{
headers: {
Authorization: `Bearer ${pat.token}`,
},
},
);
const searchResult = response.data;
return {
facts: searchResult.facts || {},
episodes: searchResult.episodes || [],
};
} catch (error) {
logger.error(`SearchMemory tool error: ${error}`);
return {
facts: [],
episodes: [],
};
}
},
});
const messages: CoreMessage[] = [
{
role: "system",
content: `You are a specialized memory search and summarization agent. Your job is to:
1. FIRST: Understand the user's intent and what information they need to achieve their goal
2. THEN: Design a strategic search plan to gather that information from memory
3. Execute multiple targeted searches using the searchMemory tool
4. Format your response in ${outputType} and return exact content from episodes or facts without modification.
SEARCH STRATEGY:
- Analyze the user's query to understand their underlying intent and information needs
- For comparisons: search each entity separately, then look for comparative information
- For "how to" questions: search for procedures, examples, and related concepts
- For troubleshooting: search for error messages, solutions, and similar issues
- For explanations: search for definitions, examples, and context
- Always use multiple targeted searches with different angles rather than one broad search
- Think about what background knowledge would help answer the user's question
EXAMPLES:
- "Graphiti vs CORE comparison" Intent: Compare two systems Search: "Graphiti", "CORE", "Graphiti features", "CORE features"
- "How to implement authentication" Intent: Learn implementation Search: "authentication", "authentication implementation", "login system"
- "Why is my build failing" Intent: Debug issue Search: "build error", "build failure", "deployment issues"
IMPORTANT: Always format your response in ${outputType}. When you find relevant content in episodes or facts, return the exact content as found - preserve lists, code blocks, formatting, and structure exactly as they appear. Present the information clearly organized in ${outputType} format with appropriate headers and structure.
HANDLING PARTIAL RESULTS:
- If you find complete information for the query, present it organized by topic
- If you find partial information, clearly state what you found and what you didn't find
- Always provide helpful related information even if it doesn't directly answer the query
- Example: "I didn't find specific information about X vs Y comparison, but here's what I found about X: [exact content] and about Y: [exact content], which can help you build the comparison"
If no relevant information is found at all, provide a brief statement indicating that in ${outputType} format.`,
},
{
role: "user",
content: `User input: "${userInput}"${context ? `\n\nAdditional context: ${context}` : ""}\n\nPlease search my memory for relevant information and provide the exact content from episodes or facts that relate to this question. Format your response in ${outputType} and do not modify or summarize the found content.`,
},
];
try {
const result = streamText({
model: openai(process.env.MODEL as string),
messages,
tools: {
searchMemory: searchMemoryTool,
},
maxSteps: 5,
temperature: 0.3,
maxTokens: 1000,
});
const stream = await metadata.stream("messages", result.textStream);
let finalText: string = "";
for await (const chunk of stream) {
finalText = finalText + chunk;
}
await deletePersonalAccessToken(pat?.id);
return finalText;
} catch (error) {
await deletePersonalAccessToken(pat?.id);
logger.error(`SearchMemoryAgent error: ${error}`);
return `Context related to: ${userInput}. Looking for relevant background information, previous discussions, and related concepts that would help provide a comprehensive answer.`;
}
},
});

View File

@ -1,526 +0,0 @@
import { metadata, task } from "@trigger.dev/sdk";
import { type CoreMessage } from "ai";
import * as cheerio from "cheerio";
import { z } from "zod";
import { makeModelCall } from "~/lib/model.server";
import { summarizeImage, extractImageUrls } from "./utils";
import { DocumentChunker } from "~/services/documentChunker.server";
export type PageType = "text" | "video";
export const ExtensionSummaryBodyRequest = z.object({
html: z.string().min(1, "HTML content is required"),
url: z.string().url("Valid URL is required"),
title: z.string().optional(),
parseImages: z.boolean().default(false),
apiKey: z.string().optional(),
});
interface ContentExtractionResult {
pageType: PageType;
title: string;
content: string;
images: string[];
metadata: {
url: string;
wordCount: number;
imageCount: number;
};
supported: boolean;
}
/**
* Detect if page contains video content
*/
function isVideoPage(url: string, $: cheerio.CheerioAPI): boolean {
const hostname = new URL(url).hostname.toLowerCase();
// Known video platforms
if (
hostname.includes("youtube.com") ||
hostname.includes("youtu.be") ||
hostname.includes("vimeo.com") ||
hostname.includes("twitch.tv") ||
hostname.includes("tiktok.com")
) {
return true;
}
// Generic video content detection
const videoElements = $("video").length;
const videoPlayers = $(
'.video-player, [class*="video-player"], [data-testid*="video"]',
).length;
// If there are multiple video indicators, likely a video-focused page
return videoElements > 0 || videoPlayers > 2;
}
/**
* Extract all text content and images from any webpage
*/
function extractTextContent(
$: cheerio.CheerioAPI,
url: string,
html: string,
parseImages: boolean = false,
): ContentExtractionResult {
// Extract title from multiple possible locations
const title =
$("title").text() ||
$('meta[property="og:title"]').attr("content") ||
$('meta[name="title"]').attr("content") ||
$("h1").first().text() ||
"Untitled Page";
// Check if this is primarily a video page
const isVideo = isVideoPage(url, $);
const pageType: PageType = isVideo ? "video" : "text";
let content = "";
if (isVideo) {
// For video pages, try to get description/transcript text
content =
$("#description, .video-description, .description").text() ||
$('meta[name="description"]').attr("content") ||
$('[class*="transcript"], [class*="caption"]').text() ||
"Video content detected - text summarization not available";
} else {
// Simple universal text extraction
// Remove non-content elements
$("script, style, noscript, nav, header, footer").remove();
// Get all text content
const allText = $("body").text();
// Split into sentences and filter for meaningful content
const sentences = allText
.split(/[.!?]+/)
.map((s) => s.trim())
.filter((s) => s.length > 20) // Keep sentences with substance
.filter(
(s) =>
!/^(click|menu|button|nav|home|search|login|signup|subscribe)$/i.test(
s.toLowerCase(),
),
) // Remove UI text
.filter((s) => s.split(" ").length > 3); // Keep sentences with multiple words
content = sentences.join(". ");
}
// Clean up whitespace and normalize text
content = content.replace(/\s+/g, " ").trim();
// Extract images if requested
const images = parseImages ? extractImageUrls(html) : [];
const wordCount = content
.split(/\s+/)
.filter((word) => word.length > 0).length;
const supported = !isVideo && (content.length > 50 || images.length > 0);
return {
pageType,
title: title.trim(),
content, // Limit content size for processing
images,
metadata: {
url,
wordCount,
imageCount: images.length,
},
supported,
};
}
/**
* Process images and get their summaries
*/
async function processImages(
images: string[],
apiKey?: string,
): Promise<string[]> {
if (images.length === 0) return [];
const imageSummaries: string[] = [];
for (const imageUrl of images) {
try {
const summary = await summarizeImage(imageUrl, apiKey);
imageSummaries.push(`[Image Description]: ${summary}`);
} catch (error) {
console.error(`Error processing image ${imageUrl}:`, error);
imageSummaries.push(
`[Image Description]: Unable to analyze image at ${imageUrl}`,
);
}
}
return imageSummaries;
}
/**
* Generate summary using LLM with optional image descriptions
*/
async function generateSummary(
title: string,
content: string,
lastSummary: string | null,
imageSummaries: string[] = [],
) {
// Combine content with image descriptions
const contentWithImages =
imageSummaries.length > 0
? `${content}\n\n${imageSummaries.join("\n\n")}`
: content;
const messages: CoreMessage[] = [
{
role: "system",
content: `You are C.O.R.E. (Contextual Observation & Recall Engine), a smart memory enrichment system.
Create ONE enriched sentence that transforms the episode into a contextually-rich memory using SELECTIVE enrichment.
<smart_enrichment_process>
Evaluate the episode and apply enrichment ONLY where it adds significant value:
1. PRIMARY FACTS - always preserve the core information from the episode
2. STRATEGIC ENRICHMENT - add context only for HIGH VALUE cases (see guidelines below)
3. VISUAL CONTENT - capture exact text on signs, objects shown, specific details from images
4. EMOTIONAL PRESERVATION - maintain the tone and feeling of emotional exchanges
5. IDENTITY PRESERVATION - preserve definitional and possessive relationships that establish entity connections
ENRICHMENT DECISION MATRIX:
- Clear, complete statement minimal enrichment (just temporal + attribution)
- Unclear references resolve with context
- Emotional support preserve feeling, avoid historical dumping
- New developments connect to ongoing narrative
- Visual content extract specific details as primary facts
</smart_enrichment_process>
<chunk_continuity_rules>
When processing content that appears to be part of a larger document or conversation (indicated by session context):
1. BUILD ON CONTEXT - Use the previous session context to continue the narrative naturally without restating established information
2. MAINTAIN FLOW - Each chunk should add new information while referencing the established context appropriately
3. NO REDUNDANT TEMPORAL ANCHORING - Don't repeat the same date markers in sequential chunks unless the timeframe actually changes
4. FOCUS ON PROGRESSION - Emphasize what's new or developing in the current chunk relative to what's already been established
5. SEAMLESS CONTINUATION - When session context exists, treat the current content as a continuation rather than a standalone episode
</chunk_continuity_rules>
<context_usage_decision>
When related memories/previous episodes are provided, evaluate if they improve understanding:
USE CONTEXT when current episode has:
- Unclear pronouns ("she", "it", "they" without clear antecedent)
- Vague references ("the agency", "the event" without definition in current episode)
- Continuation phrases ("following up", "as we discussed")
- Incomplete information that context clarifies
IGNORE CONTEXT when current episode is:
- Clear and self-contained ("I got a job in New York")
- Simple emotional responses ("Thanks, that's great!")
- Generic encouragement ("You're doing awesome!")
- Complete statements with all necessary information
DECISION RULE: If the current episode can be understood perfectly without context, don't use it. Only use context when it genuinely clarifies or
resolves ambiguity.
</context_usage_decision>
<visual_content_capture>
For episodes with images/photos, EXTRACT:
- Exact text on signs, posters, labels (e.g., "Trans Lives Matter")
- Objects, people, settings, activities shown
- Specific visual details that add context
Integrate visual content as primary facts, not descriptions.
</visual_content_capture>
<strategic_enrichment>
When related memories are provided, apply SELECTIVE enrichment:
HIGH VALUE ENRICHMENT (always include):
- Temporal resolution: "last week" "June 20, 2023"
- Entity disambiguation: "she" "Caroline" when unclear
- Missing critical context: "the agency" "Bright Futures Adoption Agency" (first mention only)
- New developments: connecting current facts to ongoing storylines
- Identity-defining possessives: "my X, Y" preserve the relationship between person and Y as their X
- Definitional phrases: maintain the defining relationship, not just the entity reference
- Origin/source connections: preserve "from my X" relationships
LOW VALUE ENRICHMENT (usually skip):
- Obvious references: "Thanks, Mel!" doesn't need Melanie's full context
- Support/encouragement statements: emotional exchanges rarely need historical anchoring
- Already clear entities: don't replace pronouns when reference is obvious
- Repetitive context: never repeat the same descriptive phrase within a conversation
- Ongoing conversations: don't re-establish context that's already been set
- Emotional responses: keep supportive statements simple and warm
- Sequential topics: reference previous topics minimally ("recent X" not full description)
ANTI-BLOAT RULES:
- If the original statement is clear and complete, add minimal enrichment
- Never use the same contextual phrase twice in one conversation
- Focus on what's NEW, not what's already established
- Preserve emotional tone - don't bury feelings in facts
- ONE CONTEXT REFERENCE PER TOPIC: Don't keep referencing "the charity race" with full details
- STOP AT CLARITY: If original meaning is clear, don't add backstory
- AVOID COMPOUND ENRICHMENT: Don't chain multiple contextual additions in one sentence
CONTEXT FATIGUE PREVENTION:
- After mentioning a topic once with full context, subsequent references should be minimal
- Use "recent" instead of repeating full details: "recent charity race" not "the May 20, 2023 charity race for mental health"
- Focus on CURRENT episode facts, not historical anchoring
- Don't re-explain what's already been established in the conversation
ENRICHMENT SATURATION RULE:
Once a topic has been enriched with full context in the conversation, subsequent mentions should be minimal:
- First mention: "May 20, 2023 charity race for mental health"
- Later mentions: "the charity race" or "recent race"
- Don't re-explain established context
IDENTITY AND DEFINITIONAL RELATIONSHIP PRESERVATION:
- Preserve possessive phrases that define relationships: "my X, Y" "Y, [person]'s X"
- Keep origin/source relationships: "from my X" preserve the X connection
- Preserve family/professional/institutional relationships expressed through possessives
- Don't reduce identity-rich phrases to simple location/entity references
</strategic_enrichment>
<quality_control>
RETURN "NOTHING_TO_SUMMARISE" if content consists ONLY of:
- Pure generic responses without context ("awesome", "thanks", "okay" with no subject)
- Empty pleasantries with no substance ("how are you", "have a good day")
- Standalone acknowledgments without topic reference ("got it", "will do")
- Truly vague encouragement with no specific subject matter ("great job" with no context)
- Already captured information without new connections
- Technical noise or system messages
STORE IN MEMORY if content contains:
- Specific facts, names, dates, or detailed information
- Personal details, preferences, or decisions
- Concrete plans, commitments, or actions
- Visual content with specific details
- Temporal information that can be resolved
- New connections to existing knowledge
- Encouragement that references specific activities or topics
- Statements expressing personal values or beliefs
- Support that's contextually relevant to ongoing conversations
- Responses that reveal relationship dynamics or personal characteristics
MEANINGFUL ENCOURAGEMENT EXAMPLES (STORE these):
- "Taking time for yourself is so important" Shows personal values about self-care
- "You're doing an awesome job looking after yourself and your family" Specific topic reference
- "That charity race sounds great" Contextually relevant support
- "Your future family is gonna be so lucky" Values-based encouragement about specific situation
EMPTY ENCOURAGEMENT EXAMPLES (DON'T STORE these):
- "Great job!" (no context)
- "Awesome!" (no subject)
- "Keep it up!" (no specific reference)
</quality_control>
<enrichment_examples>
HIGH VALUE enrichment:
- Original: "She said yes!"
- Enriched: "Caroline received approval from Bright Futures Agency for her adoption application."
- Why: Resolves unclear pronoun, adds temporal context, identifies the approving entity
MINIMAL enrichment (emotional support):
- Original: "You'll be an awesome mom! Good luck!"
- Enriched: "Melanie encouraged Caroline about her adoption plans, affirming she would be an awesome mother."
- Why: Simple temporal context, preserve emotional tone, no historical dumping
ANTI-BLOAT example (what NOT to do):
- Wrong: "Melanie praised Caroline for her commitment to creating a family for children in need through adoption—supported by the inclusive Adoption Agency whose brochure and signs reading 'new arrival' and 'information and domestic building' Caroline had shared earlier that day—and encouraged her by affirming she would be an awesome mom."
- Right: "Melanie encouraged Caroline about her adoption plans, affirming she would be an awesome mother."
CLEAR REFERENCE (minimal enrichment):
- Original: "Thanks, Caroline! The event was really thought-provoking."
- Enriched: "Melanie thanked Caroline and described the charity race as thought-provoking."
- Why: Clear context doesn't need repetitive anchoring
CONVERSATION FLOW EXAMPLES:
WRONG (context fatigue): "reinforcing their ongoing conversation about mental health following Melanie's participation in the recent charity race for mental health"
RIGHT (minimal reference): "reinforcing their conversation about mental health"
WRONG (compound enrichment): "as she begins the process of turning her dream of giving children a loving home into reality and considers specific adoption agencies"
RIGHT (focused): "as she begins pursuing her adoption plans"
WRONG (over-contextualization): "following her participation in the May 20, 2023 charity race for mental health awareness"
RIGHT (after first mention): "following the recent charity race"
GENERIC IDENTITY PRESERVATION EXAMPLES:
- Original: "my hometown, Boston" Enriched: "Boston, [person]'s hometown"
- Original: "my workplace, Google" Enriched: "Google, [person]'s workplace"
- Original: "my sister, Sarah" Enriched: "Sarah, [person]'s sister"
- Original: "from my university, MIT" Enriched: "from MIT, [person]'s university"
POSSESSIVE + APPOSITIVE PATTERNS (Critical for Relations):
- Original: "my colleague at my office, Microsoft"
- Enriched: "his colleague at Microsoft, David's workplace"
- Why: Preserves both the work relationship AND the employment identity
- Original: "my friend from my university, Stanford"
- Enriched: "her friend from Stanford, Lisa's alma mater"
- Why: Establishes both the friendship and educational institution identity
- Original: "my neighbor in my city, Chicago"
- Enriched: "his neighbor in Chicago, Mark's hometown"
- Why: Maintains both the neighbor relationship and residence identity
WRONG (loses relationships): reduces to just entity names without preserving the defining relationship
RIGHT (preserves identity): maintains the possessive/definitional connection that establishes entity relationships
</enrichment_examples>
OUTPUT FORMAT REQUIREMENTS:
- Provide your response directly in HTML format
- Use appropriate HTML tags for structure and formatting (p, h1-h6, ul, ol, strong, em, etc.)
- Do NOT wrap your response in any special tags like <output>
- If there is nothing worth summarizing, return: NOTHING_TO_SUMMARISE
FORMAT EXAMPLES:
CORRECT: <p>Caroline shared her adoption plans with Melanie, discussing the application process and timeline.</p>
CORRECT: <h3>Italy Trip Planning</h3><p>User explored romantic destinations for their anniversary celebration.</p>
CORRECT: NOTHING_TO_SUMMARISE
WRONG: Plain text without HTML formatting
`,
},
{
role: "user",
content: `Title: ${title}
Content: ${contentWithImages}
<SAME_SESSION_CONTEXT>
${lastSummary || "No previous episodes in this session"}
</SAME_SESSION_CONTEXT>
Please provide a concise summary of this content in HTML format.`,
},
];
return await makeModelCall(
true,
messages,
() => {}, // onFinish callback
{ temperature: 0.3 },
);
}
async function* generateSummaryWithChunks(
content: string,
title: string,
imageSummaries: string[],
) {
const documentchunk = new DocumentChunker();
const chunks = await documentchunk.chunkDocument(content, title);
let lastSummary = "";
for await (const chunk of chunks.chunks) {
const response = (await generateSummary(
chunk.title || title,
chunk.content,
lastSummary ? lastSummary : null,
imageSummaries,
)) as any;
for await (const res of response.textStream) {
lastSummary += res;
yield res;
}
// Use the complete current chunk summary as context for the next chunk
lastSummary = lastSummary.trim();
}
}
export const extensionSummary = task({
id: "extensionSummary",
maxDuration: 3000,
run: async (body: z.infer<typeof ExtensionSummaryBodyRequest>) => {
try {
const $ = cheerio.load(body.html);
// Extract content from any webpage
const extraction = extractTextContent(
$,
body.url,
body.html,
body.parseImages,
);
// Override title if provided
if (body.title) {
extraction.title = body.title;
}
let summary = "";
let imageSummaries: string[] = [];
if (extraction.supported) {
// Process images if requested and available
if (body.parseImages && extraction.images.length > 0) {
imageSummaries = await processImages(extraction.images, body.apiKey);
}
// Generate summary for text content with image descriptions
if (extraction.content.length > 0 || imageSummaries.length > 0) {
const response = generateSummaryWithChunks(
extraction.content,
extraction.title,
imageSummaries,
) as any;
const stream = await metadata.stream("messages", response);
let finalText: string = "";
for await (const chunk of stream) {
finalText = finalText + chunk;
}
summary = finalText;
} else {
summary = "Unable to extract sufficient content for summarization.";
}
} else {
// Handle unsupported content types
if (extraction.pageType === "video") {
summary =
"Video content detected. Text summarization not available for video-focused pages.";
} else {
summary =
"Unable to extract sufficient text content for summarization.";
}
}
const response = {
success: true,
pageType: extraction.pageType,
title: extraction.title,
summary,
content: extraction.content.slice(0, 1000), // Return first 1000 chars of content
images: extraction.images,
imageSummaries: imageSummaries.length > 0 ? imageSummaries : undefined,
supported: extraction.supported,
metadata: extraction.metadata,
};
return response;
} catch (error) {
console.error("Error processing extension summary request:", error);
return {
success: false,
error: "Failed to process page content",
pageType: "text" as PageType,
title: body.title || "Error",
summary: "Unable to process this page content.",
content: "",
images: [],
supported: false,
metadata: {
url: body.url,
wordCount: 0,
imageCount: 0,
},
};
}
},
});

View File

@ -1,108 +0,0 @@
import { type DataContent, type CoreMessage } from "ai";
import axios from "axios";
import { makeModelCall } from "~/lib/model.server";
/**
* Summarizes an image by sending it to the model for analysis
* Focuses on describing Figma designs, personal photos, emotions, tone, location, premise,
* and design/art language when applicable
*/
export async function summarizeImage(
imageUrl: string,
apiKey?: string,
): Promise<string> {
const response = await axios.get(imageUrl, {
responseType: "arraybuffer",
headers: {
Authorization: `Bearer ${apiKey}`,
},
});
const messages: CoreMessage[] = [
{
role: "system",
content: `You are a helpful assistant that analyzes images and provides detailed descriptions. When describing images, focus on:
For Figma designs and UI/UX content:
- Design language, visual hierarchy, and layout patterns
- Color palette, typography, and spacing
- User interface elements and interactions
- Design system components and patterns
- Overall design approach and style
For personal photos and general images:
- Setting, location, and environment details
- Emotions, mood, and atmosphere
- People's expressions, body language, and interactions
- Lighting, composition, and visual tone
- Objects, activities, and context
- Time of day or season if apparent
For art and creative content:
- Artistic style, medium, and technique
- Color theory, composition, and visual elements
- Artistic movement or influence
- Emotional impact and artistic intent
- Cultural or historical context if relevant
Provide a comprehensive, detailed description that captures both the visual elements and the underlying meaning or purpose of the image. Be specific and descriptive while maintaining clarity.`,
},
{
role: "user",
content: [
{
type: "text",
text: "Please analyze this image and provide a detailed description following the guidelines above.",
},
{
type: "image",
image: response.data as DataContent,
},
],
},
];
try {
const response = await makeModelCall(
false, // Don't stream for image analysis
messages,
() => {}, // Empty onFinish callback
{ temperature: 0.7 },
);
return response as string;
} catch (error) {
console.error("Error summarizing image:", error);
return "Unable to analyze image content.";
}
}
/**
* Extracts image URLs from HTML content and limits to first 5 images
*/
export function extractImageUrls(html: string): string[] {
// Match img tags with src attributes
const imgRegex = /<img[^>]+src\s*=\s*["']([^"']+)["'][^>]*>/gi;
const imageUrls: string[] = [];
let match;
while ((match = imgRegex.exec(html)) !== null && imageUrls.length < 5) {
const src = match[1];
// Filter out common non-content images
if (
!src.includes("favicon") &&
!src.includes("logo") &&
!src.includes("icon") &&
!src.includes("avatar") &&
!src.endsWith(".svg") && // Often logos/icons
!src.includes("tracking") &&
!src.includes("analytics") &&
src.startsWith("http") // Only external URLs
) {
imageUrls.push(src);
}
}
return imageUrls;
}

View File

@ -4,7 +4,6 @@ import { jsonSchema, tool, type ToolSet } from "ai";
import * as fs from "fs";
import * as path from "path";
import { type MCPTool } from "./types";
import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
import { prisma } from "./prisma";
@ -66,7 +65,7 @@ export const configureStdioMCPEnvironment = (
export class MCP {
private Client: any;
private clients: Record<string, any> = {};
private client: any = {};
constructor() {}
@ -81,68 +80,38 @@ export class MCP {
return Client;
}
async load(agents: string[], headers: any) {
await Promise.all(
agents.map(async (agent) => {
return await this.connectToServer(
agent,
`${process.env.API_BASE_URL}/api/v1/mcp/${agent}`,
headers,
);
}),
async load(headers: any) {
return await this.connectToServer(
`${process.env.API_BASE_URL}/api/v1/mcp?source=core`,
headers,
);
}
async allTools(): Promise<ToolSet> {
const clientEntries = Object.entries(this.clients);
try {
const { tools } = await this.client.listTools();
// Fetch all tools in parallel
const toolsArrays = await Promise.all(
clientEntries.map(async ([clientKey, client]) => {
try {
const { tools } = await client.listTools();
return tools.map(({ name, description, inputSchema }: any) => [
`${clientKey}--${name}`,
tool({
description,
parameters: jsonSchema(inputSchema),
}),
]);
} catch (error) {
logger.error(`Error fetching tools for ${clientKey}:`, { error });
return [];
}
}),
);
const finalTools: ToolSet = {};
// Flatten and convert to object
return Object.fromEntries(toolsArrays.flat());
}
tools.map(({ name, description, inputSchema }: any) => {
finalTools[name] = tool({
description,
parameters: jsonSchema(inputSchema),
});
});
async tools(): Promise<MCPTool[]> {
const allTools: MCPTool[] = [];
for (const clientKey in this.clients) {
const client = this.clients[clientKey];
const { tools: clientTools } = await client.listTools();
for (const tool of clientTools) {
// Add client prefix to tool name
tool.name = `${clientKey}--${tool.name}`;
allTools.push(tool);
}
return finalTools;
} catch (error) {
return {};
}
return allTools;
// Flatten and convert to object
}
async getTool(name: string) {
try {
const clientKey = name.split("--")[0];
const toolName = name.split("--")[1];
const client = this.clients[clientKey];
const { tools: clientTools } = await client.listTools();
const clientTool = clientTools.find((to: any) => to.name === toolName);
const { tools: clientTools } = await this.client.listTools();
const clientTool = clientTools.find((to: any) => to.name === name);
return JSON.stringify(clientTool);
} catch (e) {
@ -152,24 +121,22 @@ export class MCP {
}
async callTool(name: string, parameters: any) {
const clientKey = name.split("--")[0];
const toolName = name.split("--")[1];
const client = this.clients[clientKey];
const response = await client.callTool({
name: toolName,
console.log(name, parameters);
const response = await this.client.callTool({
name,
arguments: parameters,
});
console.log(response);
return response;
}
async connectToServer(name: string, url: string, headers: any) {
async connectToServer(url: string, headers: any) {
try {
const client = new this.Client(
{
name,
name: "Core",
version: "1.0.0",
},
{
@ -184,23 +151,16 @@ export class MCP {
// Connect to the MCP server
await client.connect(transport, { timeout: 60 * 1000 * 5 });
this.clients[name] = client;
this.client = client;
logger.info(`Connected to ${name} MCP server`);
logger.info(`Connected to MCP server`);
} catch (e) {
logger.error(`Failed to connect to ${name} MCP server: `, { e });
logger.error(`Failed to connect to MCP server: `, { e });
throw e;
}
}
}
export const getIntegrationStdioFile = async (
integrationDefinitionSlug: string,
) => {
// If the file is in public/integrations/[slug]/main, it is served at /integrations/[slug]/main
return `/integrations/${integrationDefinitionSlug}/main`;
};
export const fetchAndSaveStdioIntegrations = async () => {
try {
logger.info("Starting stdio integrations fetch and save process");

View File

@ -5,7 +5,6 @@ import {
type IntegrationDefinitionV2,
type Prisma,
UserType,
type UserUsage,
type Workspace,
} from "@prisma/client";
@ -16,7 +15,6 @@ import { type HistoryStep } from "./types";
import axios from "axios";
import nodeCrypto from "node:crypto";
import { customAlphabet, nanoid } from "nanoid";
import { Exa } from "exa-js";
import { prisma } from "./prisma";
import { BILLING_CONFIG, isBillingEnabled } from "~/config/billing.server";
@ -177,37 +175,20 @@ export const init = async ({ payload }: { payload: InitChatPayload }) => {
where: { id: workspace.userId as string },
});
const integrationAccounts = await prisma.integrationAccount.findMany({
where: {
workspaceId: workspace.id,
},
include: { integrationDefinition: true },
});
// Set up axios interceptor for memory operations
axios.interceptors.request.use((config) => {
if (config.url?.startsWith("https://core::memory")) {
// Handle both search and ingest endpoints
if (config.url.includes("/search")) {
config.url = `${process.env.API_BASE_URL}/api/v1/search`;
} else if (config.url.includes("/add")) {
config.url = `${process.env.API_BASE_URL}/api/v1/add`;
}
config.url = config.url.replace(
"https://core::memory",
process.env.API_BASE_URL ?? "",
);
config.headers.Authorization = `Bearer ${pat.token}`;
}
return config;
});
// Create MCP server for each integration account
const mcpServers: string[] = integrationAccounts
.map((account) => {
const integrationConfig = account.integrationConfiguration as any;
if (integrationConfig.mcp) {
return account.integrationDefinition.slug;
}
return undefined;
})
.filter((slug): slug is string => slug !== undefined);
return {
conversation,
@ -216,7 +197,6 @@ export const init = async ({ payload }: { payload: InitChatPayload }) => {
token: pat.token,
userId: user?.id,
userName: user?.name,
mcpServers,
};
};

View File

@ -97,7 +97,7 @@
"date-fns": "^4.1.0",
"dayjs": "^1.11.10",
"emails": "workspace:*",
"exa-js": "^1.8.20",
"eventsource": "^4.0.0",
"execa": "^9.6.0",
"express": "^4.18.1",
"fast-sort": "^3.4.0",

View File

@ -110,7 +110,7 @@ services:
webapp:
container_name: trigger-webapp
image: ghcr.io/triggerdotdev/trigger.dev@sha256:a19c438f348ac05c939f39ed455ed27b4f189f720b4c9810aef8e71fdc731211
image: ghcr.io/triggerdotdev/trigger.dev:v4.0.4
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
ports:
@ -245,7 +245,7 @@ services:
# Worker related
supervisor:
container_name: trigger-supervisor
image: ghcr.io/triggerdotdev/supervisor:${TRIGGER_IMAGE_TAG:-v4-beta}
image: ghcr.io/triggerdotdev/supervisor:v4.0.4
restart: ${RESTART_POLICY:-unless-stopped}
logging: *logging-config
depends_on:

11
pnpm-lock.yaml generated
View File

@ -529,6 +529,9 @@ importers:
emails:
specifier: workspace:*
version: link:../../packages/emails
eventsource:
specifier: ^4.0.0
version: 4.0.0
exa-js:
specifier: ^1.8.20
version: 1.8.20(encoding@0.1.13)(ws@8.18.3)
@ -7780,6 +7783,10 @@ packages:
resolution: {integrity: sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==}
engines: {node: '>=18.0.0'}
eventsource@4.0.0:
resolution: {integrity: sha512-fvIkb9qZzdMxgZrEQDyll+9oJsyaVvY92I2Re+qK0qEJ+w5s0X3dtz+M0VAPOjP1gtU3iqWyjQ0G3nvd5CLZ2g==}
engines: {node: '>=20.0.0'}
evt@2.5.9:
resolution: {integrity: sha512-GpjX476FSlttEGWHT8BdVMoI8wGXQGbEOtKcP4E+kggg+yJzXBZN2n4x7TS/zPBJ1DZqWI+rguZZApjjzQ0HpA==}
@ -21231,6 +21238,10 @@ snapshots:
dependencies:
eventsource-parser: 3.0.3
eventsource@4.0.0:
dependencies:
eventsource-parser: 3.0.3
evt@2.5.9:
dependencies:
minimal-polyfills: 2.2.3