mirror of
https://github.com/eliasstepanik/core.git
synced 2026-01-10 23:38:27 +00:00
feat: add Ollama container and update ingestion status for unchanged documents
This commit is contained in:
parent
17b8f9520b
commit
a548bae670
@ -101,6 +101,12 @@ export async function processDocumentIngestion(
|
||||
// Early return for unchanged documents
|
||||
if (differentialDecision.strategy === "skip_processing") {
|
||||
logger.log("Document content unchanged, skipping processing");
|
||||
await prisma.ingestionQueue.update({
|
||||
where: { id: payload.queueId },
|
||||
data: {
|
||||
status: IngestionStatus.COMPLETED,
|
||||
},
|
||||
});
|
||||
return {
|
||||
success: true,
|
||||
};
|
||||
|
||||
@ -43,7 +43,7 @@ NEO4J_PASSWORD=27192e6432564f4788d55c15131bd5ac
|
||||
NEO4J_AUTH=neo4j/27192e6432564f4788d55c15131bd5ac
|
||||
|
||||
OPENAI_API_KEY=
|
||||
OLLAMA_URL=
|
||||
OLLAMA_URL=http://ollama:11434
|
||||
|
||||
EMBEDDING_MODEL=text-embedding-3-small
|
||||
MODEL=gpt-4.1-2025-04-14
|
||||
|
||||
@ -108,6 +108,25 @@ services:
|
||||
retries: 10
|
||||
start_period: 20s
|
||||
|
||||
ollama:
|
||||
container_name: core-ollama
|
||||
image: ollama/ollama:0.12.6
|
||||
ports:
|
||||
- "11434:11434"
|
||||
volumes:
|
||||
- ollama_data:/root/.ollama
|
||||
- ./scripts/ollama-init.sh:/usr/local/bin/ollama-init.sh:ro
|
||||
networks:
|
||||
- core
|
||||
entrypoint: ["/bin/bash", "/usr/local/bin/ollama-init.sh"]
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:11434/api/tags"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 90s
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
core:
|
||||
name: core
|
||||
@ -117,3 +136,4 @@ volumes:
|
||||
postgres_data:
|
||||
neo4j_data:
|
||||
shared:
|
||||
ollama_data:
|
||||
|
||||
18
hosting/docker/scripts/ollama-init.sh
Normal file
18
hosting/docker/scripts/ollama-init.sh
Normal file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
echo "Starting Ollama server..."
|
||||
ollama serve &
|
||||
OLLAMA_PID=$!
|
||||
|
||||
echo "Waiting for Ollama server to be ready..."
|
||||
sleep 5
|
||||
|
||||
echo "Pulling mxbai-embed-large model..."
|
||||
ollama pull mxbai-embed-large
|
||||
|
||||
echo "Model pulled successfully!"
|
||||
echo "Ollama is ready to accept requests."
|
||||
|
||||
# Keep the Ollama server running
|
||||
wait $OLLAMA_PID
|
||||
Loading…
x
Reference in New Issue
Block a user