mirror of
https://github.com/eliasstepanik/core.git
synced 2026-01-11 17:18:28 +00:00
* Feat: add deep search api * Feat: deep search agent * fix: stream utils for deep search * fix: deep search --------- Co-authored-by: Manoj <saimanoj58@gmail.com>
69 lines
1.6 KiB
TypeScript
69 lines
1.6 KiB
TypeScript
import { openai } from "@ai-sdk/openai";
|
|
import { logger } from "@trigger.dev/sdk/v3";
|
|
import {
|
|
type CoreMessage,
|
|
type LanguageModelV1,
|
|
streamText,
|
|
type ToolSet,
|
|
} from "ai";
|
|
|
|
/**
|
|
* Generate LLM responses with tool calling support
|
|
* Simplified version for deep-search use case - NO maxSteps for manual ReAct control
|
|
*/
|
|
export async function* generate(
|
|
messages: CoreMessage[],
|
|
onFinish?: (event: any) => void,
|
|
tools?: ToolSet,
|
|
model?: string,
|
|
): AsyncGenerator<
|
|
| string
|
|
| {
|
|
type: string;
|
|
toolName: string;
|
|
args?: any;
|
|
toolCallId?: string;
|
|
}
|
|
> {
|
|
const modelToUse = model || process.env.MODEL || "gpt-4.1-2025-04-14";
|
|
const modelInstance = openai(modelToUse) as LanguageModelV1;
|
|
|
|
logger.info(`Starting LLM generation with model: ${modelToUse}`);
|
|
|
|
try {
|
|
const { textStream, fullStream } = streamText({
|
|
model: modelInstance,
|
|
messages,
|
|
temperature: 1,
|
|
tools,
|
|
// NO maxSteps - we handle tool execution manually in the ReAct loop
|
|
toolCallStreaming: true,
|
|
onFinish,
|
|
});
|
|
|
|
// Yield text chunks
|
|
for await (const chunk of textStream) {
|
|
yield chunk;
|
|
}
|
|
|
|
// Yield tool calls
|
|
for await (const fullChunk of fullStream) {
|
|
if (fullChunk.type === "tool-call") {
|
|
yield {
|
|
type: "tool-call",
|
|
toolName: fullChunk.toolName,
|
|
toolCallId: fullChunk.toolCallId,
|
|
args: fullChunk.args,
|
|
};
|
|
}
|
|
|
|
if (fullChunk.type === "error") {
|
|
logger.error(`LLM error: ${JSON.stringify(fullChunk)}`);
|
|
}
|
|
}
|
|
} catch (error) {
|
|
logger.error(`LLM generation error: ${error}`);
|
|
throw error;
|
|
}
|
|
}
|