mirror of
https://github.com/eliasstepanik/core.git
synced 2026-01-11 09:08:28 +00:00
Feat: add support for ollama
This commit is contained in:
parent
892ed79990
commit
3a3055e3cc
@ -1,6 +1,7 @@
|
||||
import { z } from "zod";
|
||||
import { isValidDatabaseUrl } from "./utils/db";
|
||||
import { isValidRegex } from "./utils/regex";
|
||||
import { LLMModelEnum } from "@core/types";
|
||||
|
||||
const EnvironmentSchema = z.object({
|
||||
NODE_ENV: z.union([
|
||||
@ -69,6 +70,10 @@ const EnvironmentSchema = z.object({
|
||||
SMTP_SECURE: z.coerce.boolean().optional(),
|
||||
SMTP_USER: z.string().optional(),
|
||||
SMTP_PASSWORD: z.string().optional(),
|
||||
|
||||
// Model envs
|
||||
MODEL: z.string().default(LLMModelEnum.GPT41),
|
||||
OLLAMA_URL: z.string().optional(),
|
||||
});
|
||||
|
||||
export type Environment = z.infer<typeof EnvironmentSchema>;
|
||||
|
||||
@ -7,48 +7,58 @@ import {
|
||||
} from "ai";
|
||||
import { openai } from "@ai-sdk/openai";
|
||||
import { logger } from "~/services/logger.service";
|
||||
import { env } from "~/env.server";
|
||||
import { createOllama } from "ollama-ai-provider";
|
||||
|
||||
export async function makeModelCall(
|
||||
stream: boolean,
|
||||
model: LLMModelEnum,
|
||||
messages: CoreMessage[],
|
||||
onFinish: (text: string, model: string) => void,
|
||||
options?: any,
|
||||
) {
|
||||
let modelInstance;
|
||||
const model = env.MODEL;
|
||||
let finalModel: string = "unknown";
|
||||
const ollamaUrl = process.env.OLLAMA_URL;
|
||||
|
||||
switch (model) {
|
||||
case LLMModelEnum.GPT35TURBO:
|
||||
case LLMModelEnum.GPT4TURBO:
|
||||
case LLMModelEnum.GPT4O:
|
||||
case LLMModelEnum.GPT41:
|
||||
case LLMModelEnum.GPT41MINI:
|
||||
case LLMModelEnum.GPT41NANO:
|
||||
finalModel = LLMMappings[model];
|
||||
modelInstance = openai(finalModel, { ...options });
|
||||
break;
|
||||
if (ollamaUrl) {
|
||||
const ollama = createOllama({
|
||||
baseURL: ollamaUrl,
|
||||
});
|
||||
modelInstance = ollama(model); // Default to llama2 if no model specified
|
||||
} else {
|
||||
switch (model) {
|
||||
case LLMModelEnum.GPT35TURBO:
|
||||
case LLMModelEnum.GPT4TURBO:
|
||||
case LLMModelEnum.GPT4O:
|
||||
case LLMModelEnum.GPT41:
|
||||
case LLMModelEnum.GPT41MINI:
|
||||
case LLMModelEnum.GPT41NANO:
|
||||
finalModel = LLMMappings[model];
|
||||
modelInstance = openai(finalModel, { ...options });
|
||||
break;
|
||||
|
||||
case LLMModelEnum.CLAUDEOPUS:
|
||||
case LLMModelEnum.CLAUDESONNET:
|
||||
case LLMModelEnum.CLAUDEHAIKU:
|
||||
finalModel = LLMMappings[model];
|
||||
break;
|
||||
case LLMModelEnum.CLAUDEOPUS:
|
||||
case LLMModelEnum.CLAUDESONNET:
|
||||
case LLMModelEnum.CLAUDEHAIKU:
|
||||
finalModel = LLMMappings[model];
|
||||
break;
|
||||
|
||||
case LLMModelEnum.GEMINI25FLASH:
|
||||
case LLMModelEnum.GEMINI25PRO:
|
||||
case LLMModelEnum.GEMINI20FLASH:
|
||||
case LLMModelEnum.GEMINI20FLASHLITE:
|
||||
finalModel = LLMMappings[model];
|
||||
break;
|
||||
case LLMModelEnum.GEMINI25FLASH:
|
||||
case LLMModelEnum.GEMINI25PRO:
|
||||
case LLMModelEnum.GEMINI20FLASH:
|
||||
case LLMModelEnum.GEMINI20FLASHLITE:
|
||||
finalModel = LLMMappings[model];
|
||||
break;
|
||||
|
||||
default:
|
||||
logger.warn(`Unsupported model type: ${model}`);
|
||||
break;
|
||||
default:
|
||||
logger.warn(`Unsupported model type: ${model}`);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (stream) {
|
||||
return await streamText({
|
||||
return streamText({
|
||||
model: modelInstance as LanguageModelV1,
|
||||
messages,
|
||||
onFinish: async ({ text }) => {
|
||||
|
||||
@ -196,14 +196,9 @@ export class KnowledgeGraphService {
|
||||
|
||||
let responseText = "";
|
||||
|
||||
await makeModelCall(
|
||||
false,
|
||||
LLMModelEnum.GPT41,
|
||||
messages as CoreMessage[],
|
||||
(text) => {
|
||||
responseText = text;
|
||||
},
|
||||
);
|
||||
await makeModelCall(false, messages as CoreMessage[], (text) => {
|
||||
responseText = text;
|
||||
});
|
||||
|
||||
// Convert to EntityNode objects
|
||||
let entities: EntityNode[] = [];
|
||||
@ -258,14 +253,9 @@ export class KnowledgeGraphService {
|
||||
const messages = extractStatements(context);
|
||||
|
||||
let responseText = "";
|
||||
await makeModelCall(
|
||||
false,
|
||||
LLMModelEnum.GPT41,
|
||||
messages as CoreMessage[],
|
||||
(text) => {
|
||||
responseText = text;
|
||||
},
|
||||
);
|
||||
await makeModelCall(false, messages as CoreMessage[], (text) => {
|
||||
responseText = text;
|
||||
});
|
||||
|
||||
console.log(responseText);
|
||||
const outputMatch = responseText.match(/<output>([\s\S]*?)<\/output>/);
|
||||
@ -483,14 +473,9 @@ export class KnowledgeGraphService {
|
||||
const messages = dedupeNodes(dedupeContext);
|
||||
let responseText = "";
|
||||
|
||||
await makeModelCall(
|
||||
false,
|
||||
LLMModelEnum.GPT41,
|
||||
messages as CoreMessage[],
|
||||
(text) => {
|
||||
responseText = text;
|
||||
},
|
||||
);
|
||||
await makeModelCall(false, messages as CoreMessage[], (text) => {
|
||||
responseText = text;
|
||||
});
|
||||
|
||||
// Step 5: Process LLM response
|
||||
const outputMatch = responseText.match(/<output>([\s\S]*?)<\/output>/);
|
||||
@ -673,7 +658,7 @@ export class KnowledgeGraphService {
|
||||
let responseText = "";
|
||||
|
||||
// Call the LLM to analyze all statements at once
|
||||
await makeModelCall(false, LLMModelEnum.GPT41, messages, (text) => {
|
||||
await makeModelCall(false, messages, (text) => {
|
||||
responseText = text;
|
||||
});
|
||||
|
||||
@ -804,14 +789,9 @@ export class KnowledgeGraphService {
|
||||
let responseText = "";
|
||||
|
||||
// Call the LLM to extract attributes
|
||||
await makeModelCall(
|
||||
false,
|
||||
LLMModelEnum.GPT41,
|
||||
messages as CoreMessage[],
|
||||
(text) => {
|
||||
responseText = text;
|
||||
},
|
||||
);
|
||||
await makeModelCall(false, messages as CoreMessage[], (text) => {
|
||||
responseText = text;
|
||||
});
|
||||
|
||||
try {
|
||||
const outputMatch = responseText.match(/<output>([\s\S]*?)<\/output>/);
|
||||
@ -864,7 +844,7 @@ export class KnowledgeGraphService {
|
||||
};
|
||||
const messages = normalizePrompt(context);
|
||||
let responseText = "";
|
||||
await makeModelCall(false, LLMModelEnum.GPT41, messages, (text) => {
|
||||
await makeModelCall(false, messages, (text) => {
|
||||
responseText = text;
|
||||
});
|
||||
let normalizedEpisodeBody = "";
|
||||
|
||||
@ -100,7 +100,6 @@ export async function applyCrossEncoderReranking(
|
||||
let responseText = "";
|
||||
await makeModelCall(
|
||||
false,
|
||||
LLMModelEnum.GPT41NANO,
|
||||
messages,
|
||||
(text) => {
|
||||
responseText = text;
|
||||
|
||||
@ -68,6 +68,7 @@
|
||||
"nanoid": "3.3.8",
|
||||
"neo4j-driver": "^5.28.1",
|
||||
"non.geist": "^1.0.2",
|
||||
"ollama-ai-provider": "1.2.0",
|
||||
"posthog-js": "^1.116.6",
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
|
||||
25
pnpm-lock.yaml
generated
25
pnpm-lock.yaml
generated
@ -204,6 +204,9 @@ importers:
|
||||
non.geist:
|
||||
specifier: ^1.0.2
|
||||
version: 1.0.4
|
||||
ollama-ai-provider:
|
||||
specifier: 1.2.0
|
||||
version: 1.2.0(zod@3.23.8)
|
||||
posthog-js:
|
||||
specifier: ^1.116.6
|
||||
version: 1.250.2
|
||||
@ -6580,6 +6583,15 @@ packages:
|
||||
resolution: {integrity: sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA==}
|
||||
engines: {node: '>= 0.4'}
|
||||
|
||||
ollama-ai-provider@1.2.0:
|
||||
resolution: {integrity: sha512-jTNFruwe3O/ruJeppI/quoOUxG7NA6blG3ZyQj3lei4+NnJo7bi3eIRWqlVpRlu/mbzbFXeJSBuYQWF6pzGKww==}
|
||||
engines: {node: '>=18'}
|
||||
peerDependencies:
|
||||
zod: ^3.0.0
|
||||
peerDependenciesMeta:
|
||||
zod:
|
||||
optional: true
|
||||
|
||||
on-finished@2.3.0:
|
||||
resolution: {integrity: sha512-ikqdkGAAyf/X/gPhXGvfgAytDZtDbr+bkNUJ0N9h5MI/dmdgCs3l6hoHrcUv41sRKew3jIwrp4qQDXiK99Utww==}
|
||||
engines: {node: '>= 0.8'}
|
||||
@ -6685,6 +6697,9 @@ packages:
|
||||
resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==}
|
||||
engines: {node: '>= 0.8'}
|
||||
|
||||
partial-json@0.1.7:
|
||||
resolution: {integrity: sha512-Njv/59hHaokb/hRUjce3Hdv12wd60MtM9Z5Olmn+nehe0QDAsRtRbJPvJ0Z91TusF0SuZRIvnM+S4l6EIP8leA==}
|
||||
|
||||
path-exists@4.0.0:
|
||||
resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==}
|
||||
engines: {node: '>=8'}
|
||||
@ -15619,6 +15634,14 @@ snapshots:
|
||||
define-properties: 1.2.1
|
||||
es-object-atoms: 1.1.1
|
||||
|
||||
ollama-ai-provider@1.2.0(zod@3.23.8):
|
||||
dependencies:
|
||||
'@ai-sdk/provider': 1.1.3
|
||||
'@ai-sdk/provider-utils': 2.2.8(zod@3.23.8)
|
||||
partial-json: 0.1.7
|
||||
optionalDependencies:
|
||||
zod: 3.23.8
|
||||
|
||||
on-finished@2.3.0:
|
||||
dependencies:
|
||||
ee-first: 1.1.1
|
||||
@ -15737,6 +15760,8 @@ snapshots:
|
||||
|
||||
parseurl@1.3.3: {}
|
||||
|
||||
partial-json@0.1.7: {}
|
||||
|
||||
path-exists@4.0.0: {}
|
||||
|
||||
path-is-absolute@1.0.1: {}
|
||||
|
||||
@ -58,6 +58,8 @@
|
||||
"NEO4J_PASSWORD",
|
||||
"OPENAI_API_KEY",
|
||||
"MAGIC_LINK_SECRET",
|
||||
"ENABLE_EMAIL_LOGIN"
|
||||
"ENABLE_EMAIL_LOGIN",
|
||||
"MODEL",
|
||||
"OLLAMA_URL"
|
||||
]
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user