mirror of
https://github.com/eliasstepanik/core.git
synced 2026-01-11 09:28:40 +00:00
feat: integrate Ollama embedding service with mxbai-embed-large model support
This commit is contained in:
parent
bcae1bd4a1
commit
1c2c2acd47
@ -48,7 +48,11 @@ MAGIC_LINK_SECRET=27192e6432564f4788d55c15131bd5ac
|
||||
NEO4J_AUTH=neo4j/27192e6432564f4788d55c15131bd5ac
|
||||
OLLAMA_URL=http://ollama:11434
|
||||
|
||||
EMBEDDING_MODEL=text-embedding-3-small
|
||||
# Embedding model configuration
|
||||
# For self-hosted with Ollama, use: mxbai-embed-large (recommended, 1024 dimensions)
|
||||
# For OpenAI, use: text-embedding-3-small
|
||||
EMBEDDING_MODEL=mxbai-embed-large
|
||||
EMBEDDING_MODEL_SIZE=1024
|
||||
MODEL=gpt-4.1-2025-04-14
|
||||
|
||||
## AWS Bedrock ##
|
||||
|
||||
@ -64,8 +64,9 @@ const EnvironmentSchema = z.object({
|
||||
NEO4J_USERNAME: z.string(),
|
||||
NEO4J_PASSWORD: z.string(),
|
||||
|
||||
//OpenAI
|
||||
//Model API Key
|
||||
OPENAI_API_KEY: z.string(),
|
||||
ANTHROPIC_API_KEY: z.string(),
|
||||
|
||||
EMAIL_TRANSPORT: z.string().optional(),
|
||||
FROM_EMAIL: z.string().optional(),
|
||||
|
||||
@ -99,6 +99,9 @@ export async function makeModelCall(
|
||||
case "claude-3-7-sonnet-20250219":
|
||||
case "claude-3-opus-20240229":
|
||||
case "claude-3-5-haiku-20241022":
|
||||
case "claude-sonnet-4-5":
|
||||
case "claude-haiku-4-5":
|
||||
case "claude-opus-4-1":
|
||||
modelInstance = anthropic(model, { ...options });
|
||||
break;
|
||||
|
||||
@ -206,6 +209,7 @@ export async function getEmbedding(text: string) {
|
||||
});
|
||||
return embedding;
|
||||
}
|
||||
console.log("Using Ollama embedding url: ", ollamaUrl);
|
||||
|
||||
const ollama = createOllama({
|
||||
baseURL: ollamaUrl,
|
||||
|
||||
@ -43,7 +43,8 @@ NEO4J_PASSWORD=27192e6432564f4788d55c15131bd5ac
|
||||
NEO4J_AUTH=neo4j/27192e6432564f4788d55c15131bd5ac
|
||||
|
||||
OPENAI_API_KEY=
|
||||
OLLAMA_URL=
|
||||
ANTHROPIC_API_KEY=
|
||||
OLLAMA_URL=http://core-ollama:11434
|
||||
|
||||
EMBEDDING_MODEL=text-embedding-3-small
|
||||
MODEL=gpt-4.1-2025-04-14
|
||||
|
||||
@ -51,6 +51,8 @@ services:
|
||||
condition: service_started
|
||||
neo4j:
|
||||
condition: service_healthy
|
||||
ollama:
|
||||
condition: service_healthy
|
||||
networks:
|
||||
- core
|
||||
|
||||
@ -108,6 +110,25 @@ services:
|
||||
retries: 10
|
||||
start_period: 20s
|
||||
|
||||
ollama:
|
||||
container_name: core-ollama
|
||||
image: ollama/ollama:0.12.6
|
||||
ports:
|
||||
- "11434:11434"
|
||||
volumes:
|
||||
- ollama_data:/root/.ollama
|
||||
- ./scripts/ollama-init.sh:/usr/local/bin/ollama-init.sh:ro
|
||||
networks:
|
||||
- core
|
||||
entrypoint: ["/bin/bash", "/usr/local/bin/ollama-init.sh"]
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 90s
|
||||
restart: unless-stopped
|
||||
|
||||
webapp:
|
||||
container_name: trigger-webapp
|
||||
image: ghcr.io/triggerdotdev/trigger.dev@sha256:a19c438f348ac05c939f39ed455ed27b4f189f720b4c9810aef8e71fdc731211
|
||||
@ -351,6 +372,7 @@ networks:
|
||||
volumes:
|
||||
postgres_data:
|
||||
neo4j_data:
|
||||
ollama_data:
|
||||
shared:
|
||||
clickhouse:
|
||||
minio:
|
||||
|
||||
18
hosting/docker/scripts/ollama-init.sh
Executable file
18
hosting/docker/scripts/ollama-init.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Starting Ollama server..."
|
||||
ollama serve &
|
||||
OLLAMA_PID=$!
|
||||
|
||||
echo "Waiting for Ollama server to be ready..."
|
||||
sleep 5
|
||||
|
||||
echo "Pulling mxbai-embed-large model..."
|
||||
ollama pull mxbai-embed-large
|
||||
|
||||
echo "Model pulled successfully!"
|
||||
echo "Ollama is ready to accept requests."
|
||||
|
||||
# Keep the Ollama server running
|
||||
wait $OLLAMA_PID
|
||||
Loading…
x
Reference in New Issue
Block a user