diff --git a/.env.example b/.env.example index bf9ea11..2694189 100644 --- a/.env.example +++ b/.env.example @@ -41,10 +41,7 @@ NEO4J_USERNAME=neo4j NEO4J_PASSWORD=27192e6432564f4788d55c15131bd5ac OPENAI_API_KEY= - MAGIC_LINK_SECRET=27192e6432564f4788d55c15131bd5ac - - NEO4J_AUTH=neo4j/27192e6432564f4788d55c15131bd5ac OLLAMA_URL=http://ollama:11434 diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/build-docker-image.yml index 5f53f5b..b784c09 100644 --- a/.github/workflows/build-docker-image.yml +++ b/.github/workflows/build-docker-image.yml @@ -7,32 +7,6 @@ on: workflow_dispatch: jobs: - build-init: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - with: - ref: main - - - name: Set up QEMU - uses: docker/setup-qemu-action@v1 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - - name: Login to Docker Registry - run: echo "${{ secrets.DOCKER_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_USERNAME }}" --password-stdin - - - name: Build and Push Frontend Docker Image - uses: docker/build-push-action@v2 - with: - context: . - file: ./apps/init/Dockerfile - platforms: linux/amd64,linux/arm64 - push: true - tags: redplanethq/init:${{ github.ref_name }} - build-webapp: runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index 0a7d866..e264cdb 100644 --- a/.gitignore +++ b/.gitignore @@ -46,13 +46,14 @@ registry/ .cursor CLAUDE.md +AGENTS.md .claude -.clinerules/byterover-rules.md -.kilocode/rules/byterover-rules.md -.roo/rules/byterover-rules.md -.windsurf/rules/byterover-rules.md -.cursor/rules/byterover-rules.mdc -.kiro/steering/byterover-rules.md -.qoder/rules/byterover-rules.md -.augment/rules/byterover-rules.md \ No newline at end of file +.clinerules +.kilocode +.roo +.windsurf +.cursor +.kiro +.qoder +.augment \ No newline at end of file diff --git a/README.md b/README.md index aeea5a1..5a194eb 100644 --- a/README.md +++ b/README.md @@ -55,7 +55,7 @@ CORE memory achieves **88.24%** average accuracy in Locomo dataset across all re ## Overview -**Problem** +**Problem** Developers waste time re-explaining context to AI tools. Hit token limits in Claude? Start fresh and lose everything. Switch from ChatGPT/Claude to Cursor? Explain your context again. Your conversations, decisions, and insights vanish between sessions. With every new AI tool, the cost of context switching grows. @@ -64,6 +64,7 @@ Developers waste time re-explaining context to AI tools. Hit token limits in Cla CORE is an open-source unified, persistent memory layer for all your AI tools. Your context follows you from Cursor to Claude to ChatGPT to Claude Code. One knowledge graph remembers who said what, when, and why. Connect once, remember everywhere. Stop managing context and start building. ## πŸš€ CORE Self-Hosting + Want to run CORE on your own infrastructure? Self-hosting gives you complete control over your data and deployment. **Quick Deploy Options:** @@ -80,15 +81,20 @@ Want to run CORE on your own infrastructure? Self-hosting gives you complete con ### Setup 1. Clone the repository: + ``` git clone https://github.com/RedPlanetHQ/core.git cd core ``` + 2. Configure environment variables in `core/.env`: + ``` OPENAI_API_KEY=your_openai_api_key ``` + 3. Start the service + ``` docker-compose up -d ``` @@ -100,6 +106,7 @@ Once deployed, you can configure your AI providers (OpenAI, Anthropic) and start Note: We tried open-source models like Ollama or GPT OSS but facts generation were not good, we are still figuring out how to improve on that and then will also support OSS models. ## πŸš€ CORE Cloud + **Build your unified memory graph in 5 minutes:** Don't want to manage infrastructure? CORE Cloud lets you build your personal memory system instantly - no setup, no servers, just memory that works. @@ -115,24 +122,24 @@ Don't want to manage infrastructure? CORE Cloud lets you build your personal mem ## 🧩 Key Features -### 🧠 **Unified, Portable Memory**: +### 🧠 **Unified, Portable Memory**: + Add and recall your memory across **Cursor, Windsurf, Claude Desktop, Claude Code, Gemini CLI, AWS's Kiro, VS Code, and Roo Code** via MCP ![core-claude](https://github.com/user-attachments/assets/56c98288-ee87-4cd0-8b02-860aca1c7f9a) - -### πŸ•ΈοΈ **Temporal + Reified Knowledge Graph**: +### πŸ•ΈοΈ **Temporal + Reified Knowledge Graph**: Remember the story behind every factβ€”track who said what, when, and why with rich relationships and full provenance, not just flat storage ![core-memory-graph](https://github.com/user-attachments/assets/5d1ee659-d519-4624-85d1-e0497cbdd60a) - -### 🌐 **Browser Extension**: +### 🌐 **Browser Extension**: Save conversations and content from ChatGPT, Grok, Gemini, Twitter, YouTube, blog posts, and any webpage directly into your CORE memory. **How to Use Extension** + 1. [Download the Extension](https://chromewebstore.google.com/detail/core-extension/cglndoindnhdbfcbijikibfjoholdjcc) from the Chrome Web Store. 2. Login to [CORE dashboard](https://core.heysol.ai) - Navigate to Settings (bottom left) @@ -141,13 +148,12 @@ Save conversations and content from ChatGPT, Grok, Gemini, Twitter, YouTube, blo https://github.com/user-attachments/assets/6e629834-1b9d-4fe6-ae58-a9068986036a +### πŸ’¬ **Chat with Memory**: -### πŸ’¬ **Chat with Memory**: Ask questions like "What are my writing preferences?" with instant insights from your connected knowledge ![chat-with-memory](https://github.com/user-attachments/assets/d798802f-bd51-4daf-b2b5-46de7d206f66) - ### ⚑ **Auto-Sync from Apps**: Automatically capture relevant context from Linear, Slack, Notion, GitHub and other connected apps into your CORE memory @@ -156,16 +162,12 @@ Automatically capture relevant context from Linear, Slack, Notion, GitHub and ot ![core-slack](https://github.com/user-attachments/assets/d5fefe38-221e-4076-8a44-8ed673960f03) - -### πŸ”— **MCP Integration Hub**: +### πŸ”— **MCP Integration Hub**: Connect Linear, Slack, GitHub, Notion once to COREβ€”then use all their tools in Claude, Cursor, or any MCP client with a single URL - ![core-linear-claude](https://github.com/user-attachments/assets/7d59d92b-8c56-4745-a7ab-9a3c0341aa32) - - ## How CORE create memory memory-ingest-diagram @@ -179,7 +181,6 @@ CORE’s ingestion pipeline has four phases designed to capture evolving context The Result: Instead of a flat database, CORE gives you a memory that grows and changes with you - preserving context, evolution, and ownership so agents can actually use it. - ![memory-ingest-eg](https://github.com/user-attachments/assets/1d0a8007-153a-4842-9586-f6f4de43e647) ## How CORE recalls from memory @@ -204,7 +205,7 @@ Explore our documentation to get the most out of CORE - [Connect Core MCP with Claude](https://docs.heysol.ai/providers/claude) - [Connect Core MCP with Cursor](https://docs.heysol.ai/providers/cursor) - [Connect Core MCP with Claude Code](https://docs.heysol.ai/providers/claude-code) -- [Connect Core MCP with Codex](https://docs.heysol.ai/providers/codex) +- [Connect Core MCP with Codex](https://docs.heysol.ai/providers/codex) - [Basic Concepts](https://docs.heysol.ai/overview) - [API Reference](https://docs.heysol.ai/api-reference/get-user-profile) @@ -249,21 +250,11 @@ Have questions or feedback? We're here to help: +<<<<<<< Updated upstream +<<<<<<< HEAD +# ======= - - - - - - - - - - - - - - - +> > > > > > > Stashed changes +> > > > > > > 62db6c1 (feat: automatic space identification) diff --git a/apps/init/.gitignore b/apps/init/.gitignore deleted file mode 100644 index 3814592..0000000 --- a/apps/init/.gitignore +++ /dev/null @@ -1,51 +0,0 @@ -# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. - -# Dependencies -node_modules -.pnp -.pnp.js - -# Local env files -.env -.env.local -.env.development.local -.env.test.local -.env.production.local - -# Testing -coverage - -# Turbo -.turbo - -# Vercel -.vercel - -# Build Outputs -.next/ -out/ -build -dist -.tshy/ -.tshy-build/ - -# Debug -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -# Misc -.DS_Store -*.pem - -docker-compose.dev.yaml - -clickhouse/ -.vscode/ -registry/ - -.cursor -CLAUDE.md - -.claude - diff --git a/apps/init/Dockerfile b/apps/init/Dockerfile deleted file mode 100644 index 46ec445..0000000 --- a/apps/init/Dockerfile +++ /dev/null @@ -1,70 +0,0 @@ -ARG NODE_IMAGE=node:20.11.1-bullseye-slim@sha256:5a5a92b3a8d392691c983719dbdc65d9f30085d6dcd65376e7a32e6fe9bf4cbe - -FROM ${NODE_IMAGE} AS pruner - -WORKDIR /core - -COPY --chown=node:node . . -RUN npx -q turbo@2.5.3 prune --scope=@redplanethq/init --docker -RUN find . -name "node_modules" -type d -prune -exec rm -rf '{}' + - -# Base strategy to have layer caching -FROM ${NODE_IMAGE} AS base -RUN apt-get update && apt-get install -y openssl dumb-init postgresql-client -WORKDIR /core -COPY --chown=node:node .gitignore .gitignore -COPY --from=pruner --chown=node:node /core/out/json/ . -COPY --from=pruner --chown=node:node /core/out/pnpm-lock.yaml ./pnpm-lock.yaml -COPY --from=pruner --chown=node:node /core/out/pnpm-workspace.yaml ./pnpm-workspace.yaml - -## Dev deps -FROM base AS dev-deps -WORKDIR /core -# Corepack is used to install pnpm -RUN corepack enable -ENV NODE_ENV development -RUN pnpm install --ignore-scripts --no-frozen-lockfile - -## Production deps -FROM base AS production-deps -WORKDIR /core -# Corepack is used to install pnpm -RUN corepack enable -ENV NODE_ENV production -RUN pnpm install --prod --no-frozen-lockfile - -## Builder (builds the init CLI) -FROM base AS builder -WORKDIR /core -# Corepack is used to install pnpm -RUN corepack enable - -COPY --from=pruner --chown=node:node /core/out/full/ . -COPY --from=dev-deps --chown=node:node /core/ . -COPY --chown=node:node turbo.json turbo.json -COPY --chown=node:node .configs/tsconfig.base.json .configs/tsconfig.base.json -RUN pnpm run build --filter=@redplanethq/init... - -# Runner -FROM ${NODE_IMAGE} AS runner -RUN apt-get update && apt-get install -y openssl postgresql-client ca-certificates -WORKDIR /core -RUN corepack enable -ENV NODE_ENV production - -COPY --from=base /usr/bin/dumb-init /usr/bin/dumb-init -COPY --from=pruner --chown=node:node /core/out/full/ . -COPY --from=production-deps --chown=node:node /core . -COPY --from=builder --chown=node:node /core/apps/init/dist ./apps/init/dist - -# Copy the trigger dump file -COPY --chown=node:node apps/init/trigger.dump ./apps/init/trigger.dump - -# Copy and set up entrypoint script -COPY --chown=node:node apps/init/entrypoint.sh ./apps/init/entrypoint.sh -RUN chmod +x ./apps/init/entrypoint.sh - -USER node -WORKDIR /core/apps/init -ENTRYPOINT ["dumb-init", "--"] -CMD ["./entrypoint.sh"] \ No newline at end of file diff --git a/apps/init/README.md b/apps/init/README.md deleted file mode 100644 index 9d6d85d..0000000 --- a/apps/init/README.md +++ /dev/null @@ -1,197 +0,0 @@ -# Core CLI - -🧠 **CORE - Contextual Observation & Recall Engine** - -A Command-Line Interface for setting up and managing the Core development environment. - -## Installation - -```bash -npm install -g @redplanethq/core -``` - -## Commands - -### `core init` - -**One-time setup command** - Initializes the Core development environment with full configuration. - -### `core start` - -**Daily usage command** - Starts all Core services (Docker containers). - -### `core stop` - -**Daily usage command** - Stops all Core services (Docker containers). - -## Getting Started - -### Prerequisites - -- **Node.js** (v18.20.0 or higher) -- **Docker** and **Docker Compose** -- **Git** -- **pnpm** package manager - -### Initial Setup - -1. **Clone the Core repository:** - ```bash - git clone https://github.com/redplanethq/core.git - cd core - ``` - -2. **Run the initialization command:** - ```bash - core init - ``` - -3. **The CLI will guide you through the complete setup process:** - -#### Step 1: Prerequisites Check -- The CLI shows a checklist of required tools -- Confirms you're in the Core repository directory -- Exits with instructions if prerequisites aren't met - -#### Step 2: Environment Configuration - -- Copies `.env.example` to `.env` in the root directory -- Copies `trigger/.env.example` to `trigger/.env` -- Skips copying if `.env` files already exist - -#### Step 3: Docker Services Startup - -- Starts main Core services: `docker compose up -d` -- Starts Trigger.dev services: `docker compose up -d` (in trigger/ directory) -- Shows real-time output with progress indicators - -#### Step 4: Database Health Check - -- Verifies PostgreSQL is running on `localhost:5432` -- Retries for up to 60 seconds if needed - -#### Step 5: Trigger.dev Setup (Interactive) - -- **If Trigger.dev is not configured:** - - 1. Prompts you to open http://localhost:8030 - 2. Asks you to login to Trigger.dev - 3. Guides you to create an organization and project - 4. Collects your Project ID and Secret Key - 5. Updates `.env` with your Trigger.dev configuration - 6. Restarts Core services with new configuration - -- **If Trigger.dev is already configured:** - - Skips setup and shows "Configuration already exists" message - -#### Step 6: Docker Registry Login - -- Displays docker login command with credentials from `.env` -- Waits for you to complete the login process - -#### Step 7: Trigger.dev Task Deployment - -- Automatically runs: `npx trigger.dev@v4-beta login -a http://localhost:8030` -- Deploys tasks with: `pnpm trigger:deploy` -- Shows manual deployment instructions if automatic deployment fails - -#### Step 8: Setup Complete! - -- Confirms all services are running -- Shows service URLs and connection information - -## Daily Usage - -After initial setup, use these commands for daily development: - -### Start Services - -```bash -core start -``` - -Starts all Docker containers for Core development. - -### Stop Services - -```bash -core stop -``` - -Stops all Docker containers. - -## Service URLs - -After setup, these services will be available: - -- **Core Application**: http://localhost:3033 -- **Trigger.dev**: http://localhost:8030 -- **PostgreSQL**: localhost:5432 - -## Troubleshooting - -### Repository Not Found - -If you run commands outside the Core repository: - -- The CLI will ask you to confirm you're in the Core repository -- If not, it provides instructions to clone the repository -- Navigate to the Core repository directory before running commands again - -### Docker Issues - -- Ensure Docker is running -- Check Docker Compose is installed -- Verify you have sufficient system resources - -### Trigger.dev Setup Issues - -- Check container logs: `docker logs trigger-webapp --tail 50` -- Ensure you can access http://localhost:8030 -- Verify your network allows connections to localhost - -### Environment Variables - -The CLI automatically manages these environment variables: - -- `TRIGGER_PROJECT_ID` - Your Trigger.dev project ID -- `TRIGGER_SECRET_KEY` - Your Trigger.dev secret key -- Docker registry credentials for deployment - -### Manual Trigger.dev Deployment - -If automatic deployment fails, run manually: - -```bash -npx trigger.dev@v4-beta login -a http://localhost:8030 -pnpm trigger:deploy -``` - -## Development Workflow - -1. **First time setup:** `core init` -2. **Daily development:** - - `core start` - Start your development environment - - Do your development work - - `core stop` - Stop services when done - -## Support - -For issues and questions: - -- Check the main Core repository: https://github.com/redplanethq/core -- Review Docker container logs for troubleshooting -- Ensure all prerequisites are properly installed - -## Features - -- πŸš€ **One-command setup** - Complete environment initialization -- πŸ”„ **Smart configuration** - Skips already configured components -- πŸ“± **Real-time feedback** - Live progress indicators and output -- 🐳 **Docker integration** - Full container lifecycle management -- πŸ”§ **Interactive setup** - Guided configuration process -- 🎯 **Error handling** - Graceful failure with recovery instructions - ---- - -**Happy coding with Core!** πŸŽ‰ diff --git a/apps/init/entrypoint.sh b/apps/init/entrypoint.sh deleted file mode 100644 index 86b64f0..0000000 --- a/apps/init/entrypoint.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# Exit on any error -set -e - -echo "Starting init CLI..." - -# Wait for database to be ready -echo "Waiting for database connection..." -until pg_isready -h "${DB_HOST:-localhost}" -p "${DB_PORT:-5432}" -U "${POSTGRES_USER:-docker}"; do - echo "Database is unavailable - sleeping" - sleep 2 -done - -echo "Database is ready!" - -# Run the init command -echo "Running init command..." -node ./dist/esm/index.js init - -echo "Init completed successfully!" -exit 0 \ No newline at end of file diff --git a/apps/init/package.json b/apps/init/package.json deleted file mode 100644 index b7c2101..0000000 --- a/apps/init/package.json +++ /dev/null @@ -1,145 +0,0 @@ -{ - "name": "@redplanethq/init", - "version": "0.1.0", - "description": "A init service to create trigger instance", - "type": "module", - "license": "MIT", - "repository": { - "type": "git", - "url": "https://github.com/redplanethq/core", - "directory": "apps/init" - }, - "publishConfig": { - "access": "public" - }, - "keywords": [ - "typescript" - ], - "files": [ - "dist", - "trigger.dump" - ], - "bin": { - "core": "./dist/esm/index.js" - }, - "tshy": { - "selfLink": false, - "main": false, - "module": false, - "dialects": [ - "esm" - ], - "project": "./tsconfig.json", - "exclude": [ - "**/*.test.ts" - ], - "exports": { - "./package.json": "./package.json", - ".": "./src/index.ts" - } - }, - "devDependencies": { - "@epic-web/test-server": "^0.1.0", - "@types/gradient-string": "^1.1.2", - "@types/ini": "^4.1.1", - "@types/object-hash": "3.0.6", - "@types/polka": "^0.5.7", - "@types/react": "^18.2.48", - "@types/resolve": "^1.20.6", - "@types/rimraf": "^4.0.5", - "@types/semver": "^7.5.0", - "@types/source-map-support": "0.5.10", - "@types/ws": "^8.5.3", - "cpy-cli": "^5.0.0", - "execa": "^8.0.1", - "find-up": "^7.0.0", - "rimraf": "^5.0.7", - "ts-essentials": "10.0.1", - "tshy": "^3.0.2", - "tsx": "4.17.0" - }, - "scripts": { - "clean": "rimraf dist .tshy .tshy-build .turbo", - "typecheck": "tsc -p tsconfig.src.json --noEmit", - "build": "tshy", - "test": "vitest", - "test:e2e": "vitest --run -c ./e2e/vitest.config.ts" - }, - "dependencies": { - "@clack/prompts": "^0.10.0", - "@depot/cli": "0.0.1-cli.2.80.0", - "@opentelemetry/api": "1.9.0", - "@opentelemetry/api-logs": "0.52.1", - "@opentelemetry/exporter-logs-otlp-http": "0.52.1", - "@opentelemetry/exporter-trace-otlp-http": "0.52.1", - "@opentelemetry/instrumentation": "0.52.1", - "@opentelemetry/instrumentation-fetch": "0.52.1", - "@opentelemetry/resources": "1.25.1", - "@opentelemetry/sdk-logs": "0.52.1", - "@opentelemetry/sdk-node": "0.52.1", - "@opentelemetry/sdk-trace-base": "1.25.1", - "@opentelemetry/sdk-trace-node": "1.25.1", - "@opentelemetry/semantic-conventions": "1.25.1", - "ansi-escapes": "^7.0.0", - "braces": "^3.0.3", - "c12": "^1.11.1", - "chalk": "^5.2.0", - "chokidar": "^3.6.0", - "cli-table3": "^0.6.3", - "commander": "^9.4.1", - "defu": "^6.1.4", - "dotenv": "^16.4.5", - "dotenv-expand": "^12.0.2", - "esbuild": "^0.23.0", - "eventsource": "^3.0.2", - "evt": "^2.4.13", - "fast-npm-meta": "^0.2.2", - "git-last-commit": "^1.0.1", - "gradient-string": "^2.0.2", - "has-flag": "^5.0.1", - "import-in-the-middle": "1.11.0", - "import-meta-resolve": "^4.1.0", - "ini": "^5.0.0", - "jsonc-parser": "3.2.1", - "magicast": "^0.3.4", - "minimatch": "^10.0.1", - "mlly": "^1.7.1", - "nypm": "^0.5.4", - "nanoid": "3.3.8", - "object-hash": "^3.0.0", - "open": "^10.0.3", - "knex": "3.1.0", - "p-limit": "^6.2.0", - "p-retry": "^6.1.0", - "partysocket": "^1.0.2", - "pkg-types": "^1.1.3", - "polka": "^0.5.2", - "pg": "8.16.3", - "resolve": "^1.22.8", - "semver": "^7.5.0", - "signal-exit": "^4.1.0", - "source-map-support": "0.5.21", - "std-env": "^3.7.0", - "supports-color": "^10.0.0", - "tiny-invariant": "^1.2.0", - "tinyexec": "^0.3.1", - "tinyglobby": "^0.2.10", - "uuid": "11.1.0", - "ws": "^8.18.0", - "xdg-app-paths": "^8.3.0", - "zod": "3.23.8", - "zod-validation-error": "^1.5.0" - }, - "engines": { - "node": ">=18.20.0" - }, - "exports": { - "./package.json": "./package.json", - ".": { - "import": { - "types": "./dist/esm/index.d.ts", - "default": "./dist/esm/index.js" - } - } - } -} diff --git a/apps/init/src/cli/index.ts b/apps/init/src/cli/index.ts deleted file mode 100644 index e20545c..0000000 --- a/apps/init/src/cli/index.ts +++ /dev/null @@ -1,14 +0,0 @@ -import { Command } from "commander"; -import { initCommand } from "../commands/init.js"; -import { VERSION } from "./version.js"; - -const program = new Command(); - -program.name("core").description("Core CLI - A Command-Line Interface for Core").version(VERSION); - -program - .command("init") - .description("Initialize Core development environment (run once)") - .action(initCommand); - -program.parse(process.argv); diff --git a/apps/init/src/cli/version.ts b/apps/init/src/cli/version.ts deleted file mode 100644 index 2985a76..0000000 --- a/apps/init/src/cli/version.ts +++ /dev/null @@ -1,3 +0,0 @@ -import { env } from "../utils/env.js"; - -export const VERSION = env.VERSION; diff --git a/apps/init/src/commands/init.ts b/apps/init/src/commands/init.ts deleted file mode 100644 index 83ad673..0000000 --- a/apps/init/src/commands/init.ts +++ /dev/null @@ -1,36 +0,0 @@ -import { intro, outro, note } from "@clack/prompts"; -import { printCoreBrainLogo } from "../utils/ascii.js"; -import { initTriggerDatabase, updateWorkerImage } from "../utils/trigger.js"; - -export async function initCommand() { - // Display the CORE brain logo - printCoreBrainLogo(); - - intro("πŸš€ Core Development Environment Setup"); - - try { - await initTriggerDatabase(); - await updateWorkerImage(); - - note( - [ - "Your services will start running:", - "", - "β€’ Core Application: http://localhost:3033", - "β€’ Trigger.dev: http://localhost:8030", - "β€’ PostgreSQL: localhost:5432", - "", - "You can now start developing with Core!", - "", - "ℹ️ When logging in to the Core Application, you can find the login URL in the Docker container logs:", - " docker logs core-app --tail 50", - ].join("\n"), - "πŸš€ Services Running" - ); - outro("πŸŽ‰ Setup Complete!"); - process.exit(0); - } catch (error: any) { - outro(`❌ Setup failed: ${error.message}`); - process.exit(1); - } -} diff --git a/apps/init/src/index.ts b/apps/init/src/index.ts deleted file mode 100644 index 44007a1..0000000 --- a/apps/init/src/index.ts +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env node - -import "./cli/index.js"; diff --git a/apps/init/src/utils/ascii.ts b/apps/init/src/utils/ascii.ts deleted file mode 100644 index 5df2765..0000000 --- a/apps/init/src/utils/ascii.ts +++ /dev/null @@ -1,29 +0,0 @@ -import chalk from "chalk"; -import { VERSION } from "../cli/version.js"; - -export function printCoreBrainLogo(): void { - const brain = ` - β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— - β–ˆβ–ˆβ•”β•β•β•β•β•β–ˆβ–ˆβ•”β•β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•”β•β•β•β•β• - β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— - β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ•”β•β•β–ˆβ–ˆβ•—β–ˆβ–ˆβ•”β•β•β• - β•šβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β•šβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•‘β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— - β•šβ•β•β•β•β•β• β•šβ•β•β•β•β•β• β•šβ•β• β•šβ•β•β•šβ•β•β•β•β•β•β• - - o o o - o o---o---o o - o---o o o---o---o - o o---o---o---o o - o---o o o---o---o - o o---o---o o - o o o - - `; - - console.log(chalk.cyan(brain)); - console.log( - chalk.bold.white( - ` 🧠 CORE - Contextual Observation & Recall Engine ${VERSION ? chalk.gray(`(${VERSION})`) : ""}\n` - ) - ); -} diff --git a/apps/init/src/utils/env.ts b/apps/init/src/utils/env.ts deleted file mode 100644 index 458ec7b..0000000 --- a/apps/init/src/utils/env.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { z } from "zod"; - -const EnvironmentSchema = z.object({ - // Version - VERSION: z.string().default("0.1.24"), - - // Database - DB_HOST: z.string().default("localhost"), - DB_PORT: z.string().default("5432"), - TRIGGER_DB: z.string().default("trigger"), - POSTGRES_USER: z.string().default("docker"), - POSTGRES_PASSWORD: z.string().default("docker"), - - // Trigger database - TRIGGER_TASKS_IMAGE: z.string().default("redplanethq/proj_core:latest"), - - // Node environment - NODE_ENV: z - .union([z.literal("development"), z.literal("production"), z.literal("test")]) - .default("development"), -}); - -export type Environment = z.infer; -export const env = EnvironmentSchema.parse(process.env); diff --git a/apps/init/src/utils/trigger.ts b/apps/init/src/utils/trigger.ts deleted file mode 100644 index b8bd389..0000000 --- a/apps/init/src/utils/trigger.ts +++ /dev/null @@ -1,182 +0,0 @@ -import Knex from "knex"; -import path from "path"; -import { fileURLToPath } from "url"; -import { env } from "./env.js"; -import { spinner, note, log } from "@clack/prompts"; - -const __filename = fileURLToPath(import.meta.url); -const __dirname = path.dirname(__filename); - -/** - * Returns a PostgreSQL database URL for the given database name. - * Throws if required environment variables are missing. - */ -export function getDatabaseUrl(dbName: string): string { - const { POSTGRES_USER, POSTGRES_PASSWORD, DB_HOST, DB_PORT } = env; - - if (!POSTGRES_USER || !POSTGRES_PASSWORD || !DB_HOST || !DB_PORT || !dbName) { - throw new Error( - "One or more required environment variables are missing: POSTGRES_USER, POSTGRES_PASSWORD, DB_HOST, DB_PORT, dbName" - ); - } - - return `postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:${DB_PORT}/${dbName}`; -} - -/** - * Checks if the database specified by TRIGGER_DB exists, and creates it if it does not. - * Returns { exists: boolean, created: boolean } - exists indicates success, created indicates if database was newly created. - */ -export async function ensureDatabaseExists(): Promise<{ exists: boolean; created: boolean }> { - const { TRIGGER_DB } = env; - - if (!TRIGGER_DB) { - throw new Error("TRIGGER_DB environment variable is missing"); - } - - // Build a connection string to the default 'postgres' database - const adminDbUrl = getDatabaseUrl("postgres"); - - // Create a Knex instance for the admin connection - const adminKnex = Knex({ - client: "pg", - connection: adminDbUrl, - }); - - const s = spinner(); - s.start("Checking for Trigger.dev database..."); - - try { - // Check if the database exists - const result = await adminKnex.select(1).from("pg_database").where("datname", TRIGGER_DB); - - if (result.length === 0) { - s.message("Database not found. Creating..."); - // Database does not exist, create it - await adminKnex.raw(`CREATE DATABASE "${TRIGGER_DB}"`); - s.stop("Database created."); - return { exists: true, created: true }; - } else { - s.stop("Database exists."); - return { exists: true, created: false }; - } - } catch (err) { - s.stop("Failed to ensure database exists."); - log.warning("Failed to ensure database exists: " + (err as Error).message); - return { exists: false, created: false }; - } finally { - await adminKnex.destroy(); - } -} - -// Main initialization function -export async function initTriggerDatabase() { - const { TRIGGER_DB } = env; - - if (!TRIGGER_DB) { - throw new Error("TRIGGER_DB environment variable is missing"); - } - - // Ensure the database exists - const { exists, created } = await ensureDatabaseExists(); - if (!exists) { - throw new Error("Failed to create or verify database exists"); - } - - // Only run pg_restore if the database was newly created - if (!created) { - note("Database already exists, skipping restore from trigger.dump"); - return; - } - - // Run pg_restore with the trigger.dump file - const dumpFilePath = path.join(__dirname, "../../../trigger.dump"); - const connectionString = getDatabaseUrl(TRIGGER_DB); - - const s = spinner(); - s.start("Restoring database from trigger.dump..."); - - try { - // Use execSync and capture stdout/stderr, send to spinner.log - const { spawn } = await import("child_process"); - await new Promise((resolve, reject) => { - const child = spawn( - "pg_restore", - ["--verbose", "--no-acl", "--no-owner", "-d", connectionString, dumpFilePath], - { stdio: ["ignore", "pipe", "pipe"] } - ); - - child.stdout.on("data", (data) => { - s.message(data.toString()); - }); - - child.stderr.on("data", (data) => { - s.message(data.toString()); - }); - - child.on("close", (code) => { - if (code === 0) { - s.stop("Database restored successfully from trigger.dump"); - resolve(); - } else { - s.stop("Failed to restore database."); - log.warning(`Failed to restore database: pg_restore exited with code ${code}`); - reject(new Error(`Database restore failed: pg_restore exited with code ${code}`)); - } - }); - - child.on("error", (err) => { - s.stop("Failed to restore database."); - log.warning("Failed to restore database: " + err.message); - reject(new Error(`Database restore failed: ${err.message}`)); - }); - }); - } catch (error: any) { - s.stop("Failed to restore database."); - log.warning("Failed to restore database: " + error.message); - throw new Error(`Database restore failed: ${error.message}`); - } -} - -export async function updateWorkerImage() { - const { TRIGGER_DB, TRIGGER_TASKS_IMAGE } = env; - - if (!TRIGGER_DB) { - throw new Error("TRIGGER_DB environment variable is missing"); - } - - const connectionString = getDatabaseUrl(TRIGGER_DB); - - const knex = Knex({ - client: "pg", - connection: connectionString, - }); - - const s = spinner(); - s.start("Updating worker image reference..."); - - try { - // Get the first record from WorkerDeployment table - const firstWorkerDeployment = await knex("WorkerDeployment").select("id").first(); - - if (!firstWorkerDeployment) { - s.stop("No WorkerDeployment records found, skipping image update"); - note("No WorkerDeployment records found, skipping image update"); - return; - } - - // Update the imageReference column with the TRIGGER_TASKS_IMAGE value - await knex("WorkerDeployment").where("id", firstWorkerDeployment.id).update({ - imageReference: TRIGGER_TASKS_IMAGE, - updatedAt: new Date(), - }); - - s.stop(`Successfully updated worker image reference to: ${TRIGGER_TASKS_IMAGE}`); - } catch (error: any) { - s.stop("Failed to update worker image."); - log.warning("Failed to update worker image: " + error.message); - throw new Error(`Worker image update failed: ${error.message}`); - } finally { - await knex.destroy(); - } -} diff --git a/apps/init/trigger.dump b/apps/init/trigger.dump deleted file mode 100644 index 81a09bc..0000000 Binary files a/apps/init/trigger.dump and /dev/null differ diff --git a/apps/init/tsconfig.json b/apps/init/tsconfig.json deleted file mode 100644 index b01fe1d..0000000 --- a/apps/init/tsconfig.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "include": ["./src/**/*.ts"], - "exclude": ["./src/**/*.test.ts"], - "compilerOptions": { - "target": "es2022", - "lib": ["ES2022", "DOM", "DOM.Iterable", "DOM.AsyncIterable"], - "module": "NodeNext", - "moduleResolution": "NodeNext", - "moduleDetection": "force", - "verbatimModuleSyntax": false, - "jsx": "react", - - "strict": true, - "alwaysStrict": true, - "strictPropertyInitialization": true, - "skipLibCheck": true, - "forceConsistentCasingInFileNames": true, - "noUnusedLocals": false, - "noUnusedParameters": false, - "noImplicitAny": true, - "noImplicitReturns": true, - "noImplicitThis": true, - - "noFallthroughCasesInSwitch": true, - "resolveJsonModule": true, - - "removeComments": false, - "esModuleInterop": true, - "emitDecoratorMetadata": false, - "experimentalDecorators": false, - "downlevelIteration": true, - "isolatedModules": true, - "noUncheckedIndexedAccess": true, - - "pretty": true, - "isolatedDeclarations": false, - "composite": true, - "sourceMap": true - } -} diff --git a/apps/init/vite.config.ts b/apps/init/vite.config.ts deleted file mode 100644 index aba1cd0..0000000 --- a/apps/init/vite.config.ts +++ /dev/null @@ -1,8 +0,0 @@ -import { configDefaults, defineConfig } from "vitest/config"; - -export default defineConfig({ - test: { - globals: true, - exclude: [...configDefaults.exclude, "e2e/**/*"], - }, -}); diff --git a/apps/webapp/app/bullmq/queues/index.ts b/apps/webapp/app/bullmq/queues/index.ts index ebefc4b..fb627af 100644 --- a/apps/webapp/app/bullmq/queues/index.ts +++ b/apps/webapp/app/bullmq/queues/index.ts @@ -92,3 +92,69 @@ export const sessionCompactionQueue = new Queue("session-compaction-queue", { }, }, }); + +/** + * BERT topic analysis queue + * Handles CPU-intensive topic modeling on user episodes + */ +export const bertTopicQueue = new Queue("bert-topic-queue", { + connection: getRedisConnection(), + defaultJobOptions: { + attempts: 2, // Only 2 attempts due to long runtime + backoff: { + type: "exponential", + delay: 5000, + }, + removeOnComplete: { + age: 7200, // Keep completed jobs for 2 hours + count: 100, + }, + removeOnFail: { + age: 172800, // Keep failed jobs for 48 hours (for debugging) + }, + }, +}); + +/** + * Space assignment queue + * Handles assigning episodes to spaces based on semantic matching + */ +export const spaceAssignmentQueue = new Queue("space-assignment-queue", { + connection: getRedisConnection(), + defaultJobOptions: { + attempts: 3, + backoff: { + type: "exponential", + delay: 2000, + }, + removeOnComplete: { + age: 3600, + count: 1000, + }, + removeOnFail: { + age: 86400, + }, + }, +}); + +/** + * Space summary queue + * Handles generating summaries for spaces + */ +export const spaceSummaryQueue = new Queue("space-summary-queue", { + connection: getRedisConnection(), + defaultJobOptions: { + attempts: 3, + backoff: { + type: "exponential", + delay: 2000, + }, + removeOnComplete: { + age: 3600, + count: 1000, + }, + removeOnFail: { + age: 86400, + }, + }, +}); diff --git a/apps/webapp/app/bullmq/start-workers.ts b/apps/webapp/app/bullmq/start-workers.ts index 1d64b40..b16c597 100644 --- a/apps/webapp/app/bullmq/start-workers.ts +++ b/apps/webapp/app/bullmq/start-workers.ts @@ -66,7 +66,6 @@ export async function initWorkers(): Promise { queue: conversationTitleQueue, name: "conversation-title", }, - { worker: sessionCompactionWorker, queue: sessionCompactionQueue, diff --git a/apps/webapp/app/bullmq/workers/index.ts b/apps/webapp/app/bullmq/workers/index.ts index e2d930d..3b58656 100644 --- a/apps/webapp/app/bullmq/workers/index.ts +++ b/apps/webapp/app/bullmq/workers/index.ts @@ -18,24 +18,39 @@ import { processConversationTitleCreation, type CreateConversationTitlePayload, } from "~/jobs/conversation/create-title.logic"; - import { processSessionCompaction, type SessionCompactionPayload, } from "~/jobs/session/session-compaction.logic"; +import { + processTopicAnalysis, + type TopicAnalysisPayload, +} from "~/jobs/bert/topic-analysis.logic"; + import { enqueueIngestEpisode, enqueueSpaceAssignment, enqueueSessionCompaction, + enqueueBertTopicAnalysis, + enqueueSpaceSummary, } from "~/lib/queue-adapter.server"; import { logger } from "~/services/logger.service"; +import { + processSpaceAssignment, + type SpaceAssignmentPayload, +} from "~/jobs/spaces/space-assignment.logic"; +import { + processSpaceSummary, + type SpaceSummaryPayload, +} from "~/jobs/spaces/space-summary.logic"; /** * Episode ingestion worker - * Processes individual episode ingestion jobs with per-user concurrency + * Processes individual episode ingestion jobs with global concurrency * - * Note: Per-user concurrency is achieved by using userId as part of the jobId - * when adding jobs to the queue, ensuring only one job per user runs at a time + * Note: BullMQ uses global concurrency limit (5 jobs max). + * Trigger.dev uses per-user concurrency via concurrencyKey. + * For most open-source deployments, global concurrency is sufficient. */ export const ingestWorker = new Worker( "ingest-queue", @@ -47,11 +62,12 @@ export const ingestWorker = new Worker( // Callbacks to enqueue follow-up jobs enqueueSpaceAssignment, enqueueSessionCompaction, + enqueueBertTopicAnalysis, ); }, { connection: getRedisConnection(), - concurrency: 5, // Process up to 5 jobs in parallel + concurrency: 1, // Global limit: process up to 1 jobs in parallel }, ); @@ -108,6 +124,65 @@ export const sessionCompactionWorker = new Worker( }, ); +/** + * BERT topic analysis worker + * Handles CPU-intensive topic modeling + */ +export const bertTopicWorker = new Worker( + "bert-topic-queue", + async (job) => { + const payload = job.data as TopicAnalysisPayload; + return await processTopicAnalysis( + payload, + // Callback to enqueue space summary + enqueueSpaceSummary, + ); + }, + { + connection: getRedisConnection(), + concurrency: 2, // Process up to 2 analyses in parallel (CPU-intensive) + }, +); + +/** + * Space assignment worker + * Handles assigning episodes to spaces based on semantic matching + * + * Note: Global concurrency of 1 ensures sequential processing. + * Trigger.dev uses per-user concurrency via concurrencyKey. + */ +export const spaceAssignmentWorker = new Worker( + "space-assignment-queue", + async (job) => { + const payload = job.data as SpaceAssignmentPayload; + return await processSpaceAssignment( + payload, + // Callback to enqueue space summary + enqueueSpaceSummary, + ); + }, + { + connection: getRedisConnection(), + concurrency: 1, // Global limit: process one job at a time + }, +); + +/** + * Space summary worker + * Handles generating summaries for spaces + */ +export const spaceSummaryWorker = new Worker( + "space-summary-queue", + async (job) => { + const payload = job.data as SpaceSummaryPayload; + return await processSpaceSummary(payload); + }, + { + connection: getRedisConnection(), + concurrency: 1, // Process one space summary at a time + }, +); + /** * Graceful shutdown handler */ @@ -116,8 +191,10 @@ export async function closeAllWorkers(): Promise { ingestWorker.close(), documentIngestWorker.close(), conversationTitleWorker.close(), - sessionCompactionWorker.close(), + bertTopicWorker.close(), + spaceSummaryWorker.close(), + spaceAssignmentWorker.close(), ]); logger.log("All BullMQ workers closed"); } diff --git a/apps/webapp/app/jobs/bert/topic-analysis.logic.ts b/apps/webapp/app/jobs/bert/topic-analysis.logic.ts new file mode 100644 index 0000000..238d01b --- /dev/null +++ b/apps/webapp/app/jobs/bert/topic-analysis.logic.ts @@ -0,0 +1,250 @@ +import { exec } from "child_process"; +import { promisify } from "util"; +import { identifySpacesForTopics } from "~/jobs/spaces/space-identification.logic"; +import { assignEpisodesToSpace } from "~/services/graphModels/space"; +import { logger } from "~/services/logger.service"; +import { SpaceService } from "~/services/space.server"; +import { prisma } from "~/trigger/utils/prisma"; + +const execAsync = promisify(exec); + +export interface TopicAnalysisPayload { + userId: string; + workspaceId: string; + minTopicSize?: number; + nrTopics?: number; +} + +export interface TopicAnalysisResult { + topics: { + [topicId: string]: { + keywords: string[]; + episodeIds: string[]; + }; + }; +} + +/** + * Run BERT analysis using exec (for BullMQ/Docker) + */ +async function runBertWithExec( + userId: string, + minTopicSize: number, + nrTopics?: number, +): Promise { + let command = `python3 /core/apps/webapp/python/main.py ${userId} --json`; + + if (minTopicSize) { + command += ` --min-topic-size ${minTopicSize}`; + } + + if (nrTopics) { + command += ` --nr-topics ${nrTopics}`; + } + + console.log(`[BERT Topic Analysis] Executing: ${command}`); + + const { stdout, stderr } = await execAsync(command, { + timeout: 300000, // 5 minutes + maxBuffer: 10 * 1024 * 1024, // 10MB buffer for large outputs + }); + + if (stderr) { + console.warn(`[BERT Topic Analysis] Warnings:`, stderr); + } + + return stdout; +} + +/** + * Process BERT topic analysis on user's episodes + * This is the common logic shared between Trigger.dev and BullMQ + * + * NOTE: This function does NOT update workspace.metadata.lastTopicAnalysisAt + * That should be done by the caller BEFORE enqueueing this job to prevent + * duplicate analyses from racing conditions. + */ +export async function processTopicAnalysis( + payload: TopicAnalysisPayload, + enqueueSpaceSummary?: (params: { + spaceId: string; + userId: string; + }) => Promise, + pythonRunner?: ( + userId: string, + minTopicSize: number, + nrTopics?: number, + ) => Promise, +): Promise { + const { userId, workspaceId, minTopicSize = 10, nrTopics } = payload; + + console.log(`[BERT Topic Analysis] Starting analysis for user: ${userId}`); + console.log( + `[BERT Topic Analysis] Parameters: minTopicSize=${minTopicSize}, nrTopics=${nrTopics || "auto"}`, + ); + + try { + const startTime = Date.now(); + + // Run BERT analysis using provided runner or default exec + const runner = pythonRunner || runBertWithExec; + const stdout = await runner(userId, minTopicSize, nrTopics); + + const duration = Date.now() - startTime; + console.log(`[BERT Topic Analysis] Completed in ${duration}ms`); + + // Parse the JSON output + const result: TopicAnalysisResult = JSON.parse(stdout); + + // Log summary + const topicCount = Object.keys(result.topics).length; + const totalEpisodes = Object.values(result.topics).reduce( + (sum, topic) => sum + topic.episodeIds.length, + 0, + ); + + console.log( + `[BERT Topic Analysis] Found ${topicCount} topics covering ${totalEpisodes} episodes`, + ); + + // Step 2: Identify spaces for topics using LLM + try { + logger.info("[BERT Topic Analysis] Starting space identification", { + userId, + topicCount, + }); + + const spaceProposals = await identifySpacesForTopics({ + userId, + topics: result.topics, + }); + + logger.info("[BERT Topic Analysis] Space identification completed", { + userId, + proposalCount: spaceProposals.length, + }); + + // Step 3: Create or find spaces and assign episodes + // Get existing spaces from PostgreSQL + const existingSpacesFromDb = await prisma.space.findMany({ + where: { workspaceId }, + }); + const existingSpacesByName = new Map( + existingSpacesFromDb.map((s) => [s.name.toLowerCase(), s]), + ); + + for (const proposal of spaceProposals) { + try { + // Check if space already exists (case-insensitive match) + let spaceId: string; + const existingSpace = existingSpacesByName.get( + proposal.name.toLowerCase(), + ); + + if (existingSpace) { + // Use existing space + spaceId = existingSpace.id; + logger.info("[BERT Topic Analysis] Using existing space", { + spaceName: proposal.name, + spaceId, + }); + } else { + // Create new space (creates in both PostgreSQL and Neo4j) + // Skip automatic space assignment since we're manually assigning from BERT topics + const spaceService = new SpaceService(); + const newSpace = await spaceService.createSpace({ + name: proposal.name, + description: proposal.intent, + userId, + workspaceId, + }); + spaceId = newSpace.id; + logger.info("[BERT Topic Analysis] Created new space", { + spaceName: proposal.name, + spaceId, + intent: proposal.intent, + }); + } + + // Collect all episode IDs from the topics in this proposal + const episodeIds: string[] = []; + for (const topicId of proposal.topics) { + const topic = result.topics[topicId]; + if (topic) { + episodeIds.push(...topic.episodeIds); + } + } + + // Assign all episodes from these topics to the space + if (episodeIds.length > 0) { + await assignEpisodesToSpace(episodeIds, spaceId, userId); + logger.info("[BERT Topic Analysis] Assigned episodes to space", { + spaceName: proposal.name, + spaceId, + episodeCount: episodeIds.length, + topics: proposal.topics, + }); + + // Step 4: Trigger space summary if callback provided + if (enqueueSpaceSummary) { + await enqueueSpaceSummary({ spaceId, userId }); + logger.info("[BERT Topic Analysis] Triggered space summary", { + spaceName: proposal.name, + spaceId, + }); + } + } + } catch (spaceError) { + logger.error( + "[BERT Topic Analysis] Failed to process space proposal", + { + proposal, + error: spaceError, + }, + ); + // Continue with other proposals + } + } + } catch (spaceIdentificationError) { + logger.error( + "[BERT Topic Analysis] Space identification failed, returning topics only", + { + error: spaceIdentificationError, + }, + ); + // Return topics even if space identification fails + } + + return result; + } catch (error) { + console.error(`[BERT Topic Analysis] Error:`, error); + + if (error instanceof Error) { + // Check for timeout + if (error.message.includes("ETIMEDOUT")) { + throw new Error( + `Topic analysis timed out after 5 minutes. User may have too many episodes.`, + ); + } + + // Check for Python errors + if (error.message.includes("python3: not found")) { + throw new Error(`Python 3 is not installed or not available in PATH.`); + } + + // Check for Neo4j connection errors + if (error.message.includes("Failed to connect to Neo4j")) { + throw new Error( + `Could not connect to Neo4j. Check NEO4J_URI and credentials.`, + ); + } + + // Check for no episodes + if (error.message.includes("No episodes found")) { + throw new Error(`No episodes found for userId: ${userId}`); + } + } + + throw error; + } +} diff --git a/apps/webapp/app/jobs/ingest/ingest-episode.logic.ts b/apps/webapp/app/jobs/ingest/ingest-episode.logic.ts index e1b515a..5ddc8ae 100644 --- a/apps/webapp/app/jobs/ingest/ingest-episode.logic.ts +++ b/apps/webapp/app/jobs/ingest/ingest-episode.logic.ts @@ -7,6 +7,10 @@ import { prisma } from "~/trigger/utils/prisma"; import { EpisodeType } from "@core/types"; import { deductCredits, hasCredits } from "~/trigger/utils/utils"; import { assignEpisodesToSpace } from "~/services/graphModels/space"; +import { + shouldTriggerTopicAnalysis, + updateLastTopicAnalysisTime, +} from "~/services/bertTopicAnalysis.server"; export const IngestBodyRequest = z.object({ episodeBody: z.string(), @@ -55,6 +59,12 @@ export async function processEpisodeIngestion( sessionId: string; source: string; }) => Promise, + enqueueBertTopicAnalysis?: (params: { + userId: string; + workspaceId: string; + minTopicSize?: number; + nrTopics?: number; + }) => Promise, ): Promise { try { logger.log(`Processing job for user ${payload.userId}`); @@ -250,6 +260,44 @@ export async function processEpisodeIngestion( }); } + // Auto-trigger BERT topic analysis if threshold met (20+ new episodes) + try { + if ( + currentStatus === IngestionStatus.COMPLETED && + enqueueBertTopicAnalysis + ) { + const shouldTrigger = await shouldTriggerTopicAnalysis( + payload.userId, + payload.workspaceId, + ); + + if (shouldTrigger) { + logger.info( + `Triggering BERT topic analysis after reaching 20+ new episodes`, + { + userId: payload.userId, + workspaceId: payload.workspaceId, + }, + ); + + await enqueueBertTopicAnalysis({ + userId: payload.userId, + workspaceId: payload.workspaceId, + minTopicSize: 10, + }); + + // Update the last analysis timestamp + await updateLastTopicAnalysisTime(payload.workspaceId); + } + } + } catch (topicAnalysisError) { + // Don't fail the ingestion if topic analysis fails + logger.warn(`Failed to trigger topic analysis after ingestion:`, { + error: topicAnalysisError, + userId: payload.userId, + }); + } + return { success: true, episodeDetails }; } catch (err: any) { await prisma.ingestionQueue.update({ diff --git a/apps/webapp/app/jobs/session/session-compaction.logic.ts b/apps/webapp/app/jobs/session/session-compaction.logic.ts index 92e242b..ae1b6ca 100644 --- a/apps/webapp/app/jobs/session/session-compaction.logic.ts +++ b/apps/webapp/app/jobs/session/session-compaction.logic.ts @@ -36,7 +36,7 @@ export interface SessionCompactionResult { } // Zod schema for LLM response validation -const CompactionResultSchema = z.object({ +export const CompactionResultSchema = z.object({ summary: z.string().describe("Consolidated narrative of the entire session"), confidence: z .number() @@ -45,7 +45,7 @@ const CompactionResultSchema = z.object({ .describe("Confidence score of the compaction quality"), }); -const CONFIG = { +export const CONFIG = { minEpisodesForCompaction: 5, // Minimum episodes to trigger compaction compactionThreshold: 1, // Trigger after N new episodes maxEpisodesPerBatch: 50, // Process in batches if needed diff --git a/apps/webapp/app/jobs/spaces/space-assignment.logic.ts b/apps/webapp/app/jobs/spaces/space-assignment.logic.ts new file mode 100644 index 0000000..5dbbf81 --- /dev/null +++ b/apps/webapp/app/jobs/spaces/space-assignment.logic.ts @@ -0,0 +1,1201 @@ +import { z } from "zod"; +import { logger } from "~/services/logger.service"; +import { SpaceService } from "~/services/space.server"; +import { makeModelCall } from "~/lib/model.server"; +import { createBatch, getBatch } from "~/lib/batch.server"; +import { runQuery } from "~/lib/neo4j.server"; +import { + assignEpisodesToSpace, + getSpaceEpisodeCount, +} from "~/services/graphModels/space"; +import { + updateMultipleSpaceStatuses, + SPACE_STATUS, +} from "~/trigger/utils/space-status"; +import type { CoreMessage } from "ai"; +import { type Space } from "@prisma/client"; + +export interface SpaceAssignmentPayload { + userId: string; + workspaceId: string; + mode: "new_space" | "episode"; + newSpaceId?: string; // For new_space mode + episodeIds?: string[]; // For daily_batch mode (default: 1) + batchSize?: number; // Processing batch size +} + +interface EpisodeData { + uuid: string; + content: string; + originalContent: string; + source: string; + createdAt: Date; + metadata: any; +} + +interface AssignmentResult { + episodeId: string; + spaceIds: string[]; + confidence: number; + reasoning?: string; +} + +const CONFIG = { + newSpaceMode: { + batchSize: 20, + confidenceThreshold: 0.75, // Intent-based threshold for new space creation + useBatchAPI: true, // Use batch API for new space mode + minEpisodesForBatch: 5, // Minimum episodes to use batch API + }, + episodeMode: { + batchSize: 20, + confidenceThreshold: 0.75, // Intent-based threshold for episode assignment + useBatchAPI: true, // Use batch API for episode mode + minEpisodesForBatch: 5, // Minimum episodes to use batch API + }, +}; + +// Zod schema for LLM response validation +const AssignmentResultSchema = z.array( + z.object({ + episodeId: z.string(), + addSpaceId: z.array(z.string()), + confidence: z.number(), + reasoning: z.string(), + }), +); + +export interface SpaceAssignmentResult { + success: boolean; + mode: string; + processed: number; + assignments: number; + batches?: number; + spacesAvailable: number; + affectedSpaces: number; + summaryTriggered: boolean; + patternCheckTriggered: boolean; +} + +/** + * Core business logic for space assignment + * This is shared between Trigger.dev and BullMQ implementations + */ +export async function processSpaceAssignment( + payload: SpaceAssignmentPayload, + // Callback function for triggering space summary + enqueueSpaceSummary?: (params: { + userId: string; + workspaceId: string; + spaceId: string; + triggerSource: "assignment" | "manual" | "scheduled"; + }) => Promise, +): Promise { + const { + userId, + workspaceId, + mode, + newSpaceId, + episodeIds, + batchSize = mode === "new_space" + ? CONFIG.newSpaceMode.batchSize + : CONFIG.episodeMode.batchSize, + } = payload; + + logger.info(`Starting space assignment`, { + userId, + mode, + newSpaceId, + episodeIds, + batchSize, + }); + + const spaceService = new SpaceService(); + + try { + // 1. Get user's spaces + const spaces = await spaceService.getUserSpaces(userId); + + if (spaces.length === 0) { + logger.info(`No spaces found for user ${userId}, skipping assignment`); + return { + success: true, + mode, + processed: 0, + assignments: 0, + spacesAvailable: 0, + affectedSpaces: 0, + summaryTriggered: false, + patternCheckTriggered: false, + }; + } + + // 2. Get episodes to analyze based on mode + const episodes = await getEpisodesToAnalyze(userId, mode, { + newSpaceId, + episodeIds, + }); + + if (episodes.length === 0) { + logger.info( + `No episodes to analyze for user ${userId} in ${mode} mode`, + ); + return { + success: true, + mode, + processed: 0, + assignments: 0, + spacesAvailable: spaces.length, + affectedSpaces: 0, + summaryTriggered: false, + patternCheckTriggered: false, + }; + } + + // 3. Process episodes using batch AI or fallback to sequential + const config = + mode === "new_space" ? CONFIG.newSpaceMode : CONFIG.episodeMode; + const shouldUseBatchAPI = true; + + let totalProcessed = 0; + let totalAssignments = 0; + let totalBatches = 0; + const affectedSpaces = new Set(); // Track spaces that received new episodes + + if (shouldUseBatchAPI) { + logger.info( + `Using Batch AI processing for ${episodes.length} episodes`, + { + mode, + userId, + batchSize, + }, + ); + + const batchResult = await processBatchAI( + episodes, + spaces, + userId, + mode, + newSpaceId, + batchSize, + ); + totalProcessed = batchResult.processed; + totalAssignments = batchResult.assignments; + batchResult.affectedSpaces?.forEach((spaceId) => + affectedSpaces.add(spaceId), + ); + } else { + logger.info( + `Using sequential processing for ${episodes.length} episodes (below batch threshold)`, + { + mode, + userId, + minRequired: config.minEpisodesForBatch, + }, + ); + + // Fallback to sequential processing for smaller episode sets + totalBatches = Math.ceil(episodes.length / batchSize); + + for (let i = 0; i < totalBatches; i++) { + const batch = episodes.slice(i * batchSize, (i + 1) * batchSize); + + logger.info( + `Processing batch ${i + 1}/${totalBatches} with ${batch.length} episodes`, + { + mode, + userId, + }, + ); + + const batchResult = await processBatch( + batch, + spaces, + userId, + mode, + newSpaceId, + ); + totalProcessed += batchResult.processed; + totalAssignments += batchResult.assignments; + batchResult.affectedSpaces?.forEach((spaceId) => + affectedSpaces.add(spaceId), + ); + + // Add delay between batches to avoid rate limiting + if (i < totalBatches - 1) { + await new Promise((resolve) => setTimeout(resolve, 1000)); + } + } + } + + logger.info(`Completed LLM space assignment`, { + userId, + mode, + totalProcessed, + totalAssignments, + spacesAvailable: spaces.length, + affectedSpaces: affectedSpaces.size, + }); + + // 4. Update space status to "processing" for affected spaces + if (affectedSpaces.size > 0) { + try { + await updateMultipleSpaceStatuses( + Array.from(affectedSpaces), + SPACE_STATUS.PROCESSING, + { + userId, + operation: "space-assignment", + metadata: { mode, phase: "start_processing" }, + }, + ); + } catch (statusError) { + logger.warn(`Failed to update space statuses to processing:`, { + error: statusError, + userId, + mode, + }); + } + } + + // 5. Trigger space summaries for affected spaces (fan-out pattern) + if (affectedSpaces.size > 0 && enqueueSpaceSummary) { + try { + logger.info( + `Triggering space summaries for ${affectedSpaces.size} affected spaces in parallel`, + ); + + // Fan out to multiple parallel triggers + const summaryPromises = Array.from(affectedSpaces).map((spaceId) => + enqueueSpaceSummary({ + userId, + workspaceId, + spaceId, + triggerSource: "assignment", + }).catch((error) => { + logger.warn(`Failed to trigger summary for space ${spaceId}:`, { + error, + }); + return { success: false, spaceId, error: error.message }; + }), + ); + + const summaryResults = await Promise.allSettled(summaryPromises); + const successful = summaryResults.filter( + (r) => r.status === "fulfilled", + ).length; + const failed = summaryResults.filter( + (r) => r.status === "rejected", + ).length; + + logger.info(`Space summary triggers completed`, { + userId, + mode, + totalSpaces: affectedSpaces.size, + successful, + failed, + }); + } catch (summaryError) { + // Don't fail the assignment if summary generation fails + logger.warn(`Failed to trigger space summaries after assignment:`, { + error: summaryError, + userId, + mode, + affectedSpaces: Array.from(affectedSpaces), + }); + } + } + + // 6. Update space status to "ready" after all processing is complete + if (affectedSpaces.size > 0) { + try { + await updateMultipleSpaceStatuses( + Array.from(affectedSpaces), + SPACE_STATUS.READY, + { + userId, + operation: "space-assignment", + metadata: { mode, phase: "completed_processing" }, + }, + ); + } catch (finalStatusError) { + logger.warn(`Failed to update space statuses to ready:`, { + error: finalStatusError, + userId, + mode, + }); + } + } + + return { + success: true, + mode, + processed: totalProcessed, + assignments: totalAssignments, + batches: totalBatches, + spacesAvailable: spaces.length, + affectedSpaces: affectedSpaces.size, + summaryTriggered: affectedSpaces.size > 0, + patternCheckTriggered: affectedSpaces.size > 0, + }; + } catch (error) { + logger.error( + `Error in LLM space assignment for user ${userId}:`, + error as Record, + ); + throw error; + } +} + +async function getEpisodesToAnalyze( + userId: string, + mode: "new_space" | "episode", + options: { newSpaceId?: string; episodeIds?: string[] }, +): Promise { + let query: string; + let params: any = { userId }; + + if (mode === "new_space") { + // For new space: analyze all recent episodes + query = ` + MATCH (e:Episode {userId: $userId}) + RETURN e + ORDER BY e.createdAt DESC + LIMIT 1000 + `; + } else { + // For episode mode: analyze specific episodes + query = ` + UNWIND $episodeIds AS episodeId + MATCH (e:Episode {uuid: episodeId, userId: $userId}) + RETURN e + ORDER BY e.createdAt DESC + `; + params.episodeIds = options.episodeIds; + } + + const result = await runQuery(query, params); + + return result.map((record) => { + const episode = record.get("e").properties; + return { + uuid: episode.uuid, + content: episode.content, + originalContent: episode.originalContent, + source: episode.source, + createdAt: new Date(episode.createdAt), + metadata: JSON.parse(episode.metadata || "{}"), + }; + }); +} + +async function processBatchAI( + episodes: EpisodeData[], + spaces: Space[], + userId: string, + mode: "new_space" | "episode", + newSpaceId?: string, + batchSize: number = 50, +): Promise<{ + processed: number; + assignments: number; + affectedSpaces?: string[]; +}> { + try { + // Create batches of episodes + const episodeBatches: EpisodeData[][] = []; + for (let i = 0; i < episodes.length; i += batchSize) { + episodeBatches.push(episodes.slice(i, i + batchSize)); + } + + logger.info( + `Creating ${episodeBatches.length} batch AI requests for ${episodes.length} episodes`, + ); + + // Create batch requests with prompts + const batchRequests = await Promise.all( + episodeBatches.map(async (batch, index) => { + const promptMessages = await createLLMPrompt( + batch, + spaces, + mode, + newSpaceId, + userId, + ); + const systemPrompt = + promptMessages.find((m) => m.role === "system")?.content || ""; + const userPrompt = + promptMessages.find((m) => m.role === "user")?.content || ""; + + return { + customId: `episode-space-assignment-${userId}-${mode}-${index}`, + messages: [{ role: "user" as const, content: userPrompt }], + systemPrompt, + }; + }), + ); + + // Submit batch to AI provider + const { batchId } = await createBatch({ + requests: batchRequests, + outputSchema: AssignmentResultSchema, + maxRetries: 3, + timeoutMs: 1200000, // 10 minutes timeout + }); + + logger.info(`Batch AI job created: ${batchId}`, { + userId, + mode, + batchRequests: batchRequests.length, + }); + + // Poll for completion with improved handling + const maxPollingTime = 1200000; // 13 minutes + const pollInterval = 5000; // 5 seconds + const startTime = Date.now(); + + let batch = await getBatch({ batchId }); + + while (batch.status === "processing" || batch.status === "pending") { + const elapsed = Date.now() - startTime; + + if (elapsed > maxPollingTime) { + logger.warn( + `Batch AI job timed out after ${elapsed}ms, processing partial results`, + { + batchId, + status: batch.status, + completed: batch.completedRequests, + total: batch.totalRequests, + failed: batch.failedRequests, + }, + ); + break; // Exit loop to process any available results + } + + logger.info(`Batch AI job status: ${batch.status}`, { + batchId, + completed: batch.completedRequests, + total: batch.totalRequests, + failed: batch.failedRequests, + elapsed: elapsed, + }); + + await new Promise((resolve) => setTimeout(resolve, pollInterval)); + batch = await getBatch({ batchId }); + } + + // Handle different completion scenarios + if (batch.status === "failed") { + logger.error(`Batch AI job failed completely`, { + batchId, + status: batch.status, + }); + throw new Error(`Batch AI job failed with status: ${batch.status}`); + } + + // Log final status regardless of completion state + logger.info(`Batch AI job processing finished`, { + batchId, + status: batch.status, + completed: batch.completedRequests, + total: batch.totalRequests, + failed: batch.failedRequests, + }); + + if (!batch.results || batch.results.length === 0) { + logger.warn(`No results returned from batch AI job ${batchId}`, { + status: batch.status, + completed: batch.completedRequests, + failed: batch.failedRequests, + }); + + // If we have no results but some requests failed, fall back to sequential processing + if (batch.failedRequests && batch.failedRequests > 0) { + logger.info( + `Falling back to sequential processing due to batch failures`, + ); + return await processBatch(episodes, spaces, userId, mode, newSpaceId); + } + + return { processed: episodes.length, assignments: 0 }; + } + + logger.info(`Processing batch results`, { + batchId, + status: batch.status, + resultsCount: batch.results.length, + totalRequests: batch.totalRequests, + completedRequests: batch.completedRequests, + failedRequests: batch.failedRequests, + }); + + // Process all batch results + let totalAssignments = 0; + const affectedSpaces = new Set(); + const confidenceThreshold = + mode === "new_space" + ? CONFIG.newSpaceMode.confidenceThreshold + : CONFIG.episodeMode.confidenceThreshold; + + for (const result of batch.results) { + if (result.error) { + logger.warn(`Batch AI request ${result.customId} failed:`, { + error: result.error, + }); + continue; + } + + if (!result.response) { + logger.warn(`No response from batch AI request ${result.customId}`); + continue; + } + + // Parse assignments from this batch result + let assignments: AssignmentResult[] = []; + try { + // Extract episode batch info from customId + const batchIndexMatch = result.customId.match(/-(\d+)$/); + const batchIndex = batchIndexMatch ? parseInt(batchIndexMatch[1]) : 0; + const episodeBatch = episodeBatches[batchIndex]; + + if (Array.isArray(result.response)) { + // Handle direct array response (from structured output) + assignments = result.response.map((a) => ({ + episodeId: a.episodeId, + spaceIds: a.addSpaceId || [], + confidence: a.confidence || 0.75, + reasoning: a.reasoning, + })); + } else if (typeof result.response === "string") { + // Parse from text response with tags (fallback for non-structured output) + assignments = parseLLMResponseWithTags( + result.response, + episodeBatch, + spaces, + ); + } else if (typeof result.response === "object" && result.response) { + // Handle object response that might contain the array directly + try { + let responseData = result.response; + if (responseData.results && Array.isArray(responseData.results)) { + responseData = responseData.results; + } + + if (Array.isArray(responseData)) { + assignments = responseData.map((a) => ({ + episodeId: a.episodeId, + spaceIds: a.addSpaceId || [], + confidence: a.confidence || 0.75, + reasoning: a.reasoning, + })); + } else { + // Fallback parsing + assignments = parseLLMResponse( + JSON.stringify(result.response), + episodeBatch, + spaces, + ); + } + } catch (parseError) { + logger.error( + `Error processing object response ${result.customId}:`, + { error: parseError }, + ); + assignments = []; + } + } else { + // Fallback parsing + assignments = parseLLMResponse( + JSON.stringify(result.response), + episodeBatch, + spaces, + ); + } + } catch (parseError) { + logger.error(`Error parsing batch result ${result.customId}:`, { + error: parseError, + }); + continue; + } + + // Group episodes by space for batch assignment + const spaceToEpisodes = new Map(); + + for (const assignment of assignments) { + if ( + assignment.spaceIds.length > 0 && + assignment.confidence >= confidenceThreshold + ) { + for (const spaceId of assignment.spaceIds) { + if (!spaceToEpisodes.has(spaceId)) { + spaceToEpisodes.set(spaceId, []); + } + spaceToEpisodes.get(spaceId)!.push(assignment.episodeId); + } + } + } + + // Apply batch assignments - one call per space + for (const [spaceId, episodeIds] of spaceToEpisodes) { + try { + const assignmentResult = await assignEpisodesToSpace( + episodeIds, + spaceId, + userId, + ); + + if (assignmentResult.success) { + totalAssignments += episodeIds.length; + affectedSpaces.add(spaceId); + logger.info( + `Batch AI assigned ${episodeIds.length} episodes to space ${spaceId}`, + { + episodeIds, + mode, + batchId: result.customId, + }, + ); + } + } catch (error) { + logger.warn( + `Failed to assign ${episodeIds.length} episodes to space ${spaceId}:`, + { error, episodeIds }, + ); + } + } + } + + // Log final batch processing results + logger.info(`Batch AI processing completed`, { + batchId, + totalEpisodes: episodes.length, + processedBatches: batch.results.length, + totalAssignments, + affectedSpaces: affectedSpaces.size, + completedRequests: batch.completedRequests, + failedRequests: batch.failedRequests || 0, + }); + + // If we have significant failures, consider fallback processing for remaining episodes + const failureRate = batch.failedRequests + ? batch.failedRequests / batch.totalRequests + : 0; + if (failureRate > 0.5) { + // If more than 50% failed + logger.warn( + `High failure rate (${Math.round(failureRate * 100)}%) in batch processing, consider reviewing prompts or input quality`, + ); + } + + return { + processed: episodes.length, + assignments: totalAssignments, + affectedSpaces: Array.from(affectedSpaces), + }; + } catch (error) { + logger.error("Error in Batch AI processing:", { error }); + throw error; + } +} + +async function processBatch( + episodes: EpisodeData[], + spaces: Space[], + userId: string, + mode: "new_space" | "episode", + newSpaceId?: string, +): Promise<{ + processed: number; + assignments: number; + affectedSpaces?: string[]; +}> { + try { + // Create the LLM prompt based on mode + const prompt = await createLLMPrompt( + episodes, + spaces, + mode, + newSpaceId, + userId, + ); + + // Episode-intent matching is MEDIUM complexity (semantic analysis with intent alignment) + let responseText = ""; + await makeModelCall( + false, + prompt, + (text: string) => { + responseText = text; + }, + undefined, + "high", + ); + + // Parse LLM response + const assignments = parseLLMResponseWithTags( + responseText, + episodes, + spaces, + ); + + // Apply assignments + let totalAssignments = 0; + const affectedSpaces = new Set(); + const confidenceThreshold = + mode === "new_space" + ? CONFIG.newSpaceMode.confidenceThreshold + : CONFIG.episodeMode.confidenceThreshold; + + for (const assignment of assignments) { + if ( + assignment.spaceIds.length > 0 && + assignment.confidence >= confidenceThreshold + ) { + // Assign to each space individually to track metadata properly + for (const spaceId of assignment.spaceIds) { + try { + const result = await assignEpisodesToSpace( + [assignment.episodeId], + spaceId, + userId, + ); + + if (result.success) { + totalAssignments++; + affectedSpaces.add(spaceId); + + logger.info( + `LLM assigned episode ${assignment.episodeId} to space ${spaceId}`, + { + confidence: assignment.confidence, + reasoning: assignment.reasoning || "No reasoning", + mode, + } as Record, + ); + } + } catch (error) { + logger.warn( + `Failed to assign episode ${assignment.episodeId} to space ${spaceId}:`, + error as Record, + ); + } + } + } + } + + return { + processed: episodes.length, + assignments: totalAssignments, + affectedSpaces: Array.from(affectedSpaces), + }; + } catch (error) { + logger.error("Error processing batch:", error as Record); + return { processed: 0, assignments: 0, affectedSpaces: [] }; + } +} + +async function createLLMPrompt( + episodes: EpisodeData[], + spaces: Space[], + mode: "new_space" | "episode", + newSpaceId?: string, + userId?: string, +): Promise { + const episodesDescription = episodes + .map( + (ep) => + `ID: ${ep.uuid}\nCONTENT: ${ep.content}\nSOURCE: ${ep.source}\nMETADATA: ${JSON.stringify(ep.metadata)}`, + ) + .join("\n\n"); + + // Get enhanced space information with episode counts + const enhancedSpaces = await Promise.all( + spaces.map(async (space) => { + const currentCount = userId + ? await getSpaceEpisodeCount(space.id, userId) + : 0; + return { + ...space, + currentEpisodeCount: currentCount, + }; + }), + ); + + if (mode === "new_space" && newSpaceId) { + // Focus on the new space for assignment + const newSpace = enhancedSpaces.find((s) => s.id === newSpaceId); + if (!newSpace) { + throw new Error(`New space ${newSpaceId} not found`); + } + + return [ + { + role: "system", + content: `You are analyzing episodes for assignment to a newly created space based on the space's intent and purpose. + +CORE PRINCIPLE: Match episodes based on WHAT THE EPISODE IS FUNDAMENTALLY ABOUT (its primary subject), not just keyword overlap. + +STEP-BY-STEP FILTERING PROCESS: + +Step 1: IDENTIFY PRIMARY SUBJECT +Ask: "Who or what is this episode fundamentally about?" +- Is it about a specific person? (by name, or "I"/"my" = speaker) +- Is it about a system, tool, or organization? +- Is it about a project, event, or activity? +- Is it about a concept, topic, or idea? + +Step 2: HANDLE IMPLICIT SUBJECTS +- "I prefer..." or "My..." β†’ Subject is the SPEAKER (check episode source/metadata for identity) +- "User discussed..." or "Person X said..." β†’ Subject is that specific person +- "We decided..." β†’ Subject is the group/team/project being discussed +- If unclear, identify from context clues in the episode content + +Step 3: CHECK SUBJECT ALIGNMENT +Does the PRIMARY SUBJECT match what the space is about? +- Match the subject identity (right person/thing/concept?) +- Match the subject relationship (is episode ABOUT the subject or just MENTIONING it?) +- Match the intent purpose (does episode serve the space's purpose?) +- Check scope constraints: If space description includes scope requirements (e.g., "cross-context", "not app-specific", "broadly useful", "stable for 3+ months"), verify episode meets those constraints + +Step 4: DISTINGUISH SUBJECT vs META +Ask: "Is this episode ABOUT the subject itself, or ABOUT discussing/analyzing the subject?" +- ABOUT subject: Episode contains actual content related to subject +- META-discussion: Episode discusses how to handle/analyze/organize the subject +- Only assign if episode is ABOUT the subject, not meta-discussion + +Step 5: VERIFY CONFIDENCE +Only assign if confidence >= 0.75 based on: +- Subject identity clarity (is subject clearly identified?) +- Subject alignment strength (how well does it match space intent?) +- Content relevance (does episode content serve space purpose?) + +CRITICAL RULE: PRIMARY SUBJECT MATCHING +The episode's PRIMARY SUBJECT must match the space's target subject. +- If space is about Person A, episodes about Person B should NOT match (even if same topic) +- If space is about a specific concept, meta-discussions about that concept should NOT match +- If space is about actual behaviors/facts, process discussions about organizing those facts should NOT match + +EXAMPLES OF CORRECT FILTERING: + +Example 1 - Person Identity: +Space: "Alex's work preferences" +Episode A: "I prefer morning meetings and async updates" (speaker: Alex) β†’ ASSIGN βœ… (primary subject: Alex's preferences) +Episode B: "Jordan prefers afternoon meetings" (speaker: System) β†’ DO NOT ASSIGN ❌ (primary subject: Jordan, not Alex) + +Example 2 - Meta vs Actual: +Space: "Recipe collection" +Episode A: "My lasagna recipe: 3 layers pasta, bΓ©chamel, meat sauce..." β†’ ASSIGN βœ… (primary subject: actual recipe) +Episode B: "We should organize recipes by cuisine type" β†’ DO NOT ASSIGN ❌ (primary subject: organizing system, not recipe) + +Example 3 - Keyword Overlap Without Subject Match: +Space: "Home renovation project" +Episode A: "Installed new kitchen cabinets, chose oak wood" β†’ ASSIGN βœ… (primary subject: home renovation) +Episode B: "Friend asked advice about their kitchen renovation" β†’ DO NOT ASSIGN ❌ (primary subject: friend's project, not this home) + +Example 4 - Scope Constraints: +Space: "Personal identity and preferences (broadly useful across contexts, not app-specific)" +Episode A: "I prefer async communication and morning work hours" β†’ ASSIGN βœ… (cross-context preference, broadly applicable) +Episode B: "Demonstrated knowledge of ProjectX technical stack" β†’ DO NOT ASSIGN ❌ (work/project knowledge, not personal identity) + +RESPONSE FORMAT: +Provide your response inside tags with a valid JSON array: + + +[ + { + "episodeId": "episode-uuid", + "addSpaceId": ["${newSpaceId}"], + "confidence": 0.75, + "reasoning": "Brief explanation of intent match" + } +] + + +IMPORTANT: If an episode doesn't align with the space's intent, use empty addSpaceId array: [] +Example: {"episodeId": "ep-123", "addSpaceId": [], "confidence": 0.0, "reasoning": "No intent alignment"}`, + }, + { + role: "user", + content: `NEW SPACE TO POPULATE: +Name: ${newSpace.name} +Intent/Purpose: ${newSpace.description || "No description"} +Current Episodes: ${newSpace.currentEpisodeCount} + +EPISODES TO EVALUATE: +${episodesDescription} + +ASSIGNMENT TASK: +For each episode above, follow the step-by-step process to determine if it should be assigned to this space. + +Remember: +1. Identify the PRIMARY SUBJECT of each episode (who/what is it about?) +2. Check if that PRIMARY SUBJECT matches what this space is about +3. If the episode is ABOUT something else (even if it mentions related keywords), do NOT assign +4. If the episode is a META-discussion about the space's topic (not actual content), do NOT assign +5. Only assign if the episode's primary subject aligns with the space's intent AND confidence >= 0.75 + +Provide your analysis and assignments using the specified JSON format.`, + }, + ]; + } else { + // Episode mode - consider all spaces + const spacesDescription = enhancedSpaces + .map((space) => { + const spaceInfo = [ + `- ${space.name} (${space.id})`, + ` Intent/Purpose: ${space.description || "No description"}`, + ` Current Episodes: ${space.currentEpisodeCount}`, + ]; + + if (space.summary) { + spaceInfo.push(` Summary: ${space.summary}`); + } + + return spaceInfo.join("\n"); + }) + .join("\n\n"); + + return [ + { + role: "system", + content: `You are an expert at organizing episodes into semantic spaces based on the space's intent and purpose. + +CORE PRINCIPLE: Match episodes based on WHAT THE EPISODE IS FUNDAMENTALLY ABOUT (its primary subject), not just keyword overlap. + +STEP-BY-STEP FILTERING PROCESS: + +Step 1: IDENTIFY PRIMARY SUBJECT +Ask: "Who or what is this episode fundamentally about?" +- Is it about a specific person? (by name, or "I"/"my" = speaker) +- Is it about a system, tool, or organization? +- Is it about a project, event, or activity? +- Is it about a concept, topic, or idea? + +Step 2: HANDLE IMPLICIT SUBJECTS +- "I prefer..." or "My..." β†’ Subject is the SPEAKER (check episode source/metadata for identity) +- "User discussed..." or "Person X said..." β†’ Subject is that specific person +- "We decided..." β†’ Subject is the group/team/project being discussed +- If unclear, identify from context clues in the episode content + +Step 3: CHECK SUBJECT ALIGNMENT WITH EACH SPACE +For each available space, does the episode's PRIMARY SUBJECT match what that space is about? +- Match the subject identity (right person/thing/concept?) +- Match the subject relationship (is episode ABOUT the subject or just MENTIONING it?) +- Match the intent purpose (does episode serve the space's purpose?) +- An episode can match multiple spaces if its primary subject serves multiple intents + +Step 4: DISTINGUISH SUBJECT vs META +Ask: "Is this episode ABOUT the subject itself, or ABOUT discussing/analyzing the subject?" +- ABOUT subject: Episode contains actual content related to subject +- META-discussion: Episode discusses how to handle/analyze/organize the subject +- Only assign if episode is ABOUT the subject, not meta-discussion + +Step 5: VERIFY CONFIDENCE +Only assign to a space if confidence >= 0.75 based on: +- Subject identity clarity (is subject clearly identified?) +- Subject alignment strength (how well does it match space intent?) +- Content relevance (does episode content serve space purpose?) + +Step 6: MULTI-SPACE ASSIGNMENT +- An episode can belong to multiple spaces if its primary subject serves multiple intents +- Each space assignment should meet the >= 0.75 confidence threshold independently +- If no spaces match, use empty addSpaceId: [] + +CRITICAL RULE: PRIMARY SUBJECT MATCHING +The episode's PRIMARY SUBJECT must match the space's target subject. +- If space is about Person A, episodes about Person B should NOT match (even if same topic) +- If space is about a specific concept, meta-discussions about that concept should NOT match +- If space is about actual behaviors/facts, process discussions about organizing those facts should NOT match + +EXAMPLES OF CORRECT FILTERING: + +Example 1 - Person Identity: +Space: "Alex's work preferences" +Episode A: "I prefer morning meetings and async updates" (speaker: Alex) β†’ ASSIGN βœ… (primary subject: Alex's preferences) +Episode B: "Jordan prefers afternoon meetings" (speaker: System) β†’ DO NOT ASSIGN ❌ (primary subject: Jordan, not Alex) + +Example 2 - Meta vs Actual: +Space: "Recipe collection" +Episode A: "My lasagna recipe: 3 layers pasta, bΓ©chamel, meat sauce..." β†’ ASSIGN βœ… (primary subject: actual recipe) +Episode B: "We should organize recipes by cuisine type" β†’ DO NOT ASSIGN ❌ (primary subject: organizing system, not recipe) + +Example 3 - Keyword Overlap Without Subject Match: +Space: "Home renovation project" +Episode A: "Installed new kitchen cabinets, chose oak wood" β†’ ASSIGN βœ… (primary subject: home renovation) +Episode B: "Friend asked advice about their kitchen renovation" β†’ DO NOT ASSIGN ❌ (primary subject: friend's project, not this home) + +Example 4 - Scope Constraints: +Space: "Personal identity and preferences (broadly useful across contexts, not app-specific)" +Episode A: "I prefer async communication and morning work hours" β†’ ASSIGN βœ… (cross-context preference, broadly applicable) +Episode B: "I format task titles as {verb}: {title} in TaskApp" β†’ DO NOT ASSIGN ❌ (app-specific behavior, fails "not app-specific" constraint) +Episode C: "Demonstrated knowledge of ProjectX technical stack" β†’ DO NOT ASSIGN ❌ (work/project knowledge, not personal identity) + +RESPONSE FORMAT: +Provide your response inside tags with a valid JSON array: + + +[ + { + "episodeId": "episode-uuid", + "addSpaceId": ["space-uuid1", "space-uuid2"], + "confidence": 0.75, + "reasoning": "Brief explanation of intent match" + } +] + + +IMPORTANT: If no spaces' intents align with an episode, use empty addSpaceId array: [] +Example: {"episodeId": "ep-123", "addSpaceId": [], "confidence": 0.0, "reasoning": "No matching space intent"}`, + }, + { + role: "user", + content: `AVAILABLE SPACES (with their intents/purposes): +${spacesDescription} + +EPISODES TO ORGANIZE: +${episodesDescription} + +ASSIGNMENT TASK: +For each episode above, follow the step-by-step process to determine which space(s) it should be assigned to. + +Remember: +1. Identify the PRIMARY SUBJECT of each episode (who/what is it about?) +2. Check if that PRIMARY SUBJECT matches what each space is about +3. If the episode is ABOUT something else (even if it mentions related keywords), do NOT assign to that space +4. If the episode is a META-discussion about a space's topic (not actual content), do NOT assign to that space +5. An episode can be assigned to multiple spaces if its primary subject serves multiple intents +6. Only assign if the episode's primary subject aligns with the space's intent AND confidence >= 0.75 for that space + +Provide your analysis and assignments using the specified JSON format.`, + }, + ]; + } +} + +function parseLLMResponseWithTags( + response: string, + episodes: EpisodeData[], + spaces: Space[], +): AssignmentResult[] { + try { + // Extract content from tags + const outputMatch = response.match(/([\s\S]*?)<\/output>/); + if (!outputMatch) { + logger.warn( + "No tags found in LLM response, falling back to full response parsing", + ); + return parseLLMResponse(response, episodes, spaces); + } + + const jsonContent = outputMatch[1].trim(); + const parsed = JSON.parse(jsonContent); + + if (!Array.isArray(parsed)) { + logger.warn( + "Invalid LLM response format - expected array in tags", + ); + return []; + } + + const validSpaceIds = new Set(spaces.map((s) => s.id)); + const validEpisodeIds = new Set(episodes.map((e) => e.uuid)); + + return parsed + .filter((assignment: any) => { + // Validate assignment structure + if ( + !assignment.episodeId || + !validEpisodeIds.has(assignment.episodeId) + ) { + return false; + } + + // Validate spaceIds array + if (!assignment.addSpaceId || !Array.isArray(assignment.addSpaceId)) { + assignment.addSpaceId = []; + } + + // Filter out invalid space IDs + assignment.addSpaceId = assignment.addSpaceId.filter( + (spaceId: string) => validSpaceIds.has(spaceId), + ); + + return true; + }) + .map((assignment: any) => ({ + episodeId: assignment.episodeId, + spaceIds: assignment.addSpaceId, + confidence: assignment.confidence || 0.75, + reasoning: assignment.reasoning, + })); + } catch (error) { + logger.error( + "Error parsing LLM response with tags:", + error as Record, + ); + logger.debug("Raw LLM response:", { response } as Record); + // Fallback to regular parsing + return parseLLMResponse(response, episodes, spaces); + } +} + +function parseLLMResponse( + response: string, + episodes: EpisodeData[], + spaces: Space[], +): AssignmentResult[] { + try { + // Clean the response - remove any markdown formatting + const cleanedResponse = response + .replace(/```json\n?/g, "") + .replace(/```\n?/g, "") + .trim(); + + const parsed = JSON.parse(cleanedResponse); + + if (!parsed.assignments || !Array.isArray(parsed.assignments)) { + logger.warn("Invalid LLM response format - no assignments array"); + return []; + } + + const validSpaceIds = new Set(spaces.map((s) => s.id)); + const validEpisodeIds = new Set(episodes.map((e) => e.uuid)); + + return parsed.assignments + .filter((assignment: any) => { + // Validate assignment structure + if ( + !assignment.episodeId || + !validEpisodeIds.has(assignment.episodeId) + ) { + return false; + } + + if (!assignment.spaceIds || !Array.isArray(assignment.spaceIds)) { + return false; + } + + // Filter out invalid space IDs + assignment.spaceIds = assignment.spaceIds.filter((spaceId: string) => + validSpaceIds.has(spaceId), + ); + + return true; + }) + .map((assignment: any) => ({ + episodeId: assignment.episodeId, + spaceIds: assignment.spaceIds, + confidence: assignment.confidence || 0.75, + reasoning: assignment.reasoning, + })); + } catch (error) { + logger.error( + "Error parsing LLM response:", + error as Record, + ); + logger.debug("Raw LLM response:", { response } as Record); + return []; + } +} diff --git a/apps/webapp/app/jobs/spaces/space-identification.logic.ts b/apps/webapp/app/jobs/spaces/space-identification.logic.ts new file mode 100644 index 0000000..55cfb42 --- /dev/null +++ b/apps/webapp/app/jobs/spaces/space-identification.logic.ts @@ -0,0 +1,229 @@ +/** + * Space Identification Logic + * + * Uses LLM to identify appropriate spaces for topics discovered by BERT analysis + */ + +import { makeModelCall } from "~/lib/model.server"; +import { getAllSpacesForUser } from "~/services/graphModels/space"; +import { getEpisode } from "~/services/graphModels/episode"; +import { logger } from "~/services/logger.service"; +import type { SpaceNode } from "@core/types"; + +export interface TopicData { + keywords: string[]; + episodeIds: string[]; +} + +export interface SpaceProposal { + name: string; + intent: string; + confidence: number; + reason: string; + topics: string[]; // Array of topic IDs +} + +interface IdentifySpacesParams { + userId: string; + topics: Record; +} + +/** + * Identify spaces for topics using LLM analysis + * Takes top 10 keywords and top 5 episodes per topic + */ +export async function identifySpacesForTopics( + params: IdentifySpacesParams, +): Promise { + const { userId, topics } = params; + + // Get existing spaces for the user + const existingSpaces = await getAllSpacesForUser(userId); + + // Prepare topic data with top 10 keywords and top 5 episodes + const topicsForAnalysis = await Promise.all( + Object.entries(topics).map(async ([topicId, topicData]) => { + // Take top 10 keywords + const topKeywords = topicData.keywords.slice(0, 10); + + // Take top 5 episodes and fetch their content + const topEpisodeIds = topicData.episodeIds.slice(0, 5); + const episodes = await Promise.all( + topEpisodeIds.map((id) => getEpisode(id)), + ); + + return { + topicId, + keywords: topKeywords, + episodes: episodes + .filter((e) => e !== null) + .map((e) => ({ + content: e!.content.substring(0, 500), // Limit to 500 chars per episode + })), + episodeCount: topicData.episodeIds.length, + }; + }), + ); + + // Build the prompt + const prompt = buildSpaceIdentificationPrompt( + existingSpaces, + topicsForAnalysis, + ); + + logger.info("Identifying spaces for topics", { + userId, + topicCount: Object.keys(topics).length, + existingSpaceCount: existingSpaces.length, + }); + + // Call LLM with structured output + let responseText = ""; + await makeModelCall( + false, // not streaming + [{ role: "user", content: prompt }], + (text) => { + responseText = text; + }, + { + temperature: 0.7, + }, + "high", // Use high complexity for space identification + ); + + // Parse the response + const proposals = parseSpaceProposals(responseText); + + logger.info("Space identification completed", { + userId, + proposalCount: proposals.length, + }); + + return proposals; +} + +/** + * Build the prompt for space identification + */ +function buildSpaceIdentificationPrompt( + existingSpaces: SpaceNode[], + topics: Array<{ + topicId: string; + keywords: string[]; + episodes: Array<{ content: string }>; + episodeCount: number; + }>, +): string { + const existingSpacesSection = + existingSpaces.length > 0 + ? `## Existing Spaces + +The user currently has these spaces: +${existingSpaces.map((s) => `- **${s.name}**: ${s.description || "No description"} (${s.contextCount || 0} episodes)`).join("\n")} + +When identifying new spaces, consider if topics fit into existing spaces or if new spaces are needed.` + : `## Existing Spaces + +The user currently has no spaces defined. This is a fresh start for space organization.`; + + const topicsSection = `## Topics Discovered + +BERT topic modeling has identified ${topics.length} distinct topics from the user's episodes. Each topic represents a cluster of semantically related content. + +${topics + .map( + (t, idx) => `### Topic ${idx + 1} (ID: ${t.topicId}) +**Episode Count**: ${t.episodeCount} +**Top Keywords**: ${t.keywords.join(", ")} + +**Sample Episodes** (showing ${t.episodes.length} of ${t.episodeCount}): +${t.episodes.map((e, i) => `${i + 1}. ${e.content}`).join("\n")} +`, + ) + .join("\n")}`; + + return `You are a knowledge organization expert. Your task is to analyze discovered topics and identify appropriate "spaces" (thematic containers) for organizing episodic memories. + +${existingSpacesSection} + +${topicsSection} + +## Task + +Analyze the topics above and identify spaces that would help organize this content meaningfully. For each space: + +1. **Consider existing spaces first**: If topics clearly belong to existing spaces, assign them there +2. **Create new spaces when needed**: If topics represent distinct themes not covered by existing spaces +3. **Group related topics**: Multiple topics can be assigned to the same space if they share a theme +4. **Aim for 20-50 episodes per space**: This is the sweet spot for space cohesion +5. **Focus on user intent**: What would help the user find and understand this content later? + +## Output Format + +Return your analysis as a JSON array of space proposals. Each proposal should have: + +\`\`\`json +[ + { + "name": "Space name (use existing space name if assigning to existing space)", + "intent": "Clear description of what this space represents", + "confidence": 0.85, + "reason": "Brief explanation of why these topics belong together", + "topics": ["topic-id-1", "topic-id-2"] + } +] +\`\`\` + +**Important Guidelines**: +- **confidence**: 0.0-1.0 scale indicating how confident you are this is a good grouping +- **topics**: Array of topic IDs (use the exact IDs from above like "0", "1", "-1", etc.) +- **name**: For existing spaces, use the EXACT name. For new spaces, create a clear, concise name +- Only propose spaces with confidence >= 0.6 +- Each topic should only appear in ONE space proposal +- Topic "-1" is the outlier topic (noise) - only include if it genuinely fits a theme + +Return ONLY the JSON array, no additional text.`; +} + +/** + * Parse space proposals from LLM response + */ +function parseSpaceProposals(responseText: string): SpaceProposal[] { + try { + // Extract JSON from markdown code blocks if present + const jsonMatch = responseText.match(/```(?:json)?\s*(\[[\s\S]*?\])\s*```/); + const jsonText = jsonMatch ? jsonMatch[1] : responseText; + + const proposals = JSON.parse(jsonText.trim()); + + if (!Array.isArray(proposals)) { + throw new Error("Response is not an array"); + } + + // Validate and filter proposals + return proposals + .filter((p) => { + return ( + p.name && + p.intent && + typeof p.confidence === "number" && + p.confidence >= 0.6 && + Array.isArray(p.topics) && + p.topics.length > 0 + ); + }) + .map((p) => ({ + name: p.name.trim(), + intent: p.intent.trim(), + confidence: p.confidence, + reason: (p.reason || "").trim(), + topics: p.topics.map((t: any) => String(t)), + })); + } catch (error) { + logger.error("Failed to parse space proposals", { + error, + responseText: responseText.substring(0, 500), + }); + return []; + } +} diff --git a/apps/webapp/app/jobs/spaces/space-summary.logic.ts b/apps/webapp/app/jobs/spaces/space-summary.logic.ts new file mode 100644 index 0000000..0482fa5 --- /dev/null +++ b/apps/webapp/app/jobs/spaces/space-summary.logic.ts @@ -0,0 +1,721 @@ +import { logger } from "~/services/logger.service"; +import { SpaceService } from "~/services/space.server"; +import { makeModelCall } from "~/lib/model.server"; +import { runQuery } from "~/lib/neo4j.server"; +import { updateSpaceStatus, SPACE_STATUS } from "~/trigger/utils/space-status"; +import type { CoreMessage } from "ai"; +import { z } from "zod"; +import { getSpace, updateSpace } from "~/trigger/utils/space-utils"; +import { getSpaceEpisodeCount } from "~/services/graphModels/space"; + +export interface SpaceSummaryPayload { + userId: string; + spaceId: string; // Single space only + triggerSource?: "assignment" | "manual" | "scheduled"; +} + +interface SpaceEpisodeData { + uuid: string; + content: string; + originalContent: string; + source: string; + createdAt: Date; + validAt: Date; + metadata: any; + sessionId: string | null; +} + +interface SpaceSummaryData { + spaceId: string; + spaceName: string; + spaceDescription?: string; + contextCount: number; + summary: string; + keyEntities: string[]; + themes: string[]; + confidence: number; + lastUpdated: Date; + isIncremental: boolean; +} + +// Zod schema for LLM response validation +const SummaryResultSchema = z.object({ + summary: z.string(), + keyEntities: z.array(z.string()), + themes: z.array(z.string()), + confidence: z.number().min(0).max(1), +}); + +const CONFIG = { + maxEpisodesForSummary: 20, // Limit episodes for performance + minEpisodesForSummary: 1, // Minimum episodes to generate summary + summaryEpisodeThreshold: 5, // Minimum new episodes required to trigger summary (configurable) +}; + +export interface SpaceSummaryResult { + success: boolean; + spaceId: string; + triggerSource: string; + summary?: { + statementCount: number; + confidence: number; + themesCount: number; + } | null; + reason?: string; +} + +/** + * Core business logic for space summary generation + * This is shared between Trigger.dev and BullMQ implementations + */ +export async function processSpaceSummary( + payload: SpaceSummaryPayload, +): Promise { + const { userId, spaceId, triggerSource = "manual" } = payload; + + logger.info(`Starting space summary generation`, { + userId, + spaceId, + triggerSource, + }); + + try { + // Update status to processing + await updateSpaceStatus(spaceId, SPACE_STATUS.PROCESSING, { + userId, + operation: "space-summary", + metadata: { triggerSource, phase: "start_summary" }, + }); + + // Generate summary for the single space + const summaryResult = await generateSpaceSummary( + spaceId, + userId, + triggerSource, + ); + + if (summaryResult) { + // Store the summary + await storeSummary(summaryResult); + + // Update status to ready after successful completion + await updateSpaceStatus(spaceId, SPACE_STATUS.READY, { + userId, + operation: "space-summary", + metadata: { + triggerSource, + phase: "completed_summary", + contextCount: summaryResult.contextCount, + confidence: summaryResult.confidence, + }, + }); + + logger.info(`Generated summary for space ${spaceId}`, { + statementCount: summaryResult.contextCount, + confidence: summaryResult.confidence, + themes: summaryResult.themes.length, + triggerSource, + }); + + return { + success: true, + spaceId, + triggerSource, + summary: { + statementCount: summaryResult.contextCount, + confidence: summaryResult.confidence, + themesCount: summaryResult.themes.length, + }, + }; + } else { + // No summary generated - this could be due to insufficient episodes or no new episodes + // This is not an error state, so update status to ready + await updateSpaceStatus(spaceId, SPACE_STATUS.READY, { + userId, + operation: "space-summary", + metadata: { + triggerSource, + phase: "no_summary_needed", + reason: "Insufficient episodes or no new episodes to summarize", + }, + }); + + logger.info( + `No summary generated for space ${spaceId} - insufficient or no new episodes`, + ); + return { + success: true, + spaceId, + triggerSource, + summary: null, + reason: "No episodes to summarize", + }; + } + } catch (error) { + // Update status to error on exception + try { + await updateSpaceStatus(spaceId, SPACE_STATUS.ERROR, { + userId, + operation: "space-summary", + metadata: { + triggerSource, + phase: "exception", + error: error instanceof Error ? error.message : "Unknown error", + }, + }); + } catch (statusError) { + logger.warn(`Failed to update status to error for space ${spaceId}`, { + statusError, + }); + } + + logger.error( + `Error in space summary generation for space ${spaceId}:`, + error as Record, + ); + throw error; + } +} + +async function generateSpaceSummary( + spaceId: string, + userId: string, + triggerSource?: "assignment" | "manual" | "scheduled", +): Promise { + try { + // 1. Get space details + const spaceService = new SpaceService(); + const space = await spaceService.getSpace(spaceId, userId); + + if (!space) { + logger.warn(`Space ${spaceId} not found for user ${userId}`); + return null; + } + + // 2. Check episode count threshold (skip for manual triggers) + if (triggerSource !== "manual") { + const currentEpisodeCount = await getSpaceEpisodeCount(spaceId, userId); + const lastSummaryEpisodeCount = space.contextCount || 0; + const episodeDifference = currentEpisodeCount - lastSummaryEpisodeCount; + + if ( + episodeDifference < CONFIG.summaryEpisodeThreshold || + lastSummaryEpisodeCount !== 0 + ) { + logger.info( + `Skipping summary generation for space ${spaceId}: only ${episodeDifference} new episodes (threshold: ${CONFIG.summaryEpisodeThreshold})`, + { + currentEpisodeCount, + lastSummaryEpisodeCount, + episodeDifference, + threshold: CONFIG.summaryEpisodeThreshold, + }, + ); + return null; + } + + logger.info( + `Proceeding with summary generation for space ${spaceId}: ${episodeDifference} new episodes (threshold: ${CONFIG.summaryEpisodeThreshold})`, + { + currentEpisodeCount, + lastSummaryEpisodeCount, + episodeDifference, + }, + ); + } + + // 2. Check for existing summary + const existingSummary = await getExistingSummary(spaceId); + const isIncremental = existingSummary !== null; + + // 3. Get episodes (all or new ones based on existing summary) + const episodes = await getSpaceEpisodes( + spaceId, + userId, + isIncremental ? existingSummary?.lastUpdated : undefined, + ); + + // Handle case where no new episodes exist for incremental update + if (isIncremental && episodes.length === 0) { + logger.info( + `No new episodes found for space ${spaceId}, skipping summary update`, + ); + return null; + } + + // Check minimum episode requirement for new summaries only + if (!isIncremental && episodes.length < CONFIG.minEpisodesForSummary) { + logger.info( + `Space ${spaceId} has insufficient episodes (${episodes.length}) for new summary`, + ); + return null; + } + + // 4. Process episodes using unified approach + let summaryResult; + + if (episodes.length > CONFIG.maxEpisodesForSummary) { + logger.info( + `Large space detected (${episodes.length} episodes). Processing in batches.`, + ); + + // Process in batches, each building on previous result + const batches: SpaceEpisodeData[][] = []; + for (let i = 0; i < episodes.length; i += CONFIG.maxEpisodesForSummary) { + batches.push(episodes.slice(i, i + CONFIG.maxEpisodesForSummary)); + } + + let currentSummary = existingSummary?.summary || null; + let currentThemes = existingSummary?.themes || []; + let cumulativeConfidence = 0; + + for (const [batchIndex, batch] of batches.entries()) { + logger.info( + `Processing batch ${batchIndex + 1}/${batches.length} with ${batch.length} episodes`, + ); + + const batchResult = await generateUnifiedSummary( + space.name, + space.description as string, + batch, + currentSummary, + currentThemes, + ); + + if (batchResult) { + currentSummary = batchResult.summary; + currentThemes = batchResult.themes; + cumulativeConfidence += batchResult.confidence; + } else { + logger.warn(`Failed to process batch ${batchIndex + 1}`); + } + + // Small delay between batches + if (batchIndex < batches.length - 1) { + await new Promise((resolve) => setTimeout(resolve, 500)); + } + } + + summaryResult = currentSummary + ? { + summary: currentSummary, + themes: currentThemes, + confidence: Math.min(cumulativeConfidence / batches.length, 1.0), + } + : null; + } else { + logger.info( + `Processing ${episodes.length} episodes with unified approach`, + ); + + // Use unified approach for smaller spaces + summaryResult = await generateUnifiedSummary( + space.name, + space.description as string, + episodes, + existingSummary?.summary || null, + existingSummary?.themes || [], + ); + } + + if (!summaryResult) { + logger.warn(`Failed to generate LLM summary for space ${spaceId}`); + return null; + } + + // Get the actual current counts from Neo4j + const currentEpisodeCount = await getSpaceEpisodeCount(spaceId, userId); + + return { + spaceId: space.uuid, + spaceName: space.name, + spaceDescription: space.description as string, + contextCount: currentEpisodeCount, + summary: summaryResult.summary, + keyEntities: summaryResult.keyEntities || [], + themes: summaryResult.themes, + confidence: summaryResult.confidence, + lastUpdated: new Date(), + isIncremental, + }; + } catch (error) { + logger.error( + `Error generating summary for space ${spaceId}:`, + error as Record, + ); + return null; + } +} + +async function generateUnifiedSummary( + spaceName: string, + spaceDescription: string | undefined, + episodes: SpaceEpisodeData[], + previousSummary: string | null = null, + previousThemes: string[] = [], +): Promise<{ + summary: string; + themes: string[]; + confidence: number; + keyEntities?: string[]; +} | null> { + try { + const prompt = createUnifiedSummaryPrompt( + spaceName, + spaceDescription, + episodes, + previousSummary, + previousThemes, + ); + + // Space summary generation requires HIGH complexity (creative synthesis, narrative generation) + let responseText = ""; + await makeModelCall( + false, + prompt, + (text: string) => { + responseText = text; + }, + undefined, + "high", + ); + + return parseSummaryResponse(responseText); + } catch (error) { + logger.error( + "Error generating unified summary:", + error as Record, + ); + return null; + } +} + +function createUnifiedSummaryPrompt( + spaceName: string, + spaceDescription: string | undefined, + episodes: SpaceEpisodeData[], + previousSummary: string | null, + previousThemes: string[], +): CoreMessage[] { + // If there are no episodes and no previous summary, we cannot generate a meaningful summary + if (episodes.length === 0 && previousSummary === null) { + throw new Error( + "Cannot generate summary without episodes or existing summary", + ); + } + + const episodesText = episodes + .map( + (episode) => + `- ${episode.content} (Source: ${episode.source}, Session: ${episode.sessionId || "N/A"})`, + ) + .join("\n"); + + // Extract key entities and themes from episode content + const contentWords = episodes + .map((ep) => ep.content.toLowerCase()) + .join(" ") + .split(/\s+/) + .filter((word) => word.length > 3); + + const wordFrequency = new Map(); + contentWords.forEach((word) => { + wordFrequency.set(word, (wordFrequency.get(word) || 0) + 1); + }); + + const topEntities = Array.from(wordFrequency.entries()) + .sort(([, a], [, b]) => b - a) + .slice(0, 10) + .map(([word]) => word); + + const isUpdate = previousSummary !== null; + + return [ + { + role: "system", + content: `You are an expert at analyzing and summarizing episodes within semantic spaces based on the space's intent and purpose. Your task is to ${isUpdate ? "update an existing summary by integrating new episodes" : "create a comprehensive summary of episodes"}. + +CRITICAL RULES: +1. Base your summary ONLY on insights derived from the actual content/episodes provided +2. Use the space's INTENT/PURPOSE (from description) to guide what to summarize and how to organize it +3. Write in a factual, neutral tone - avoid promotional language ("pivotal", "invaluable", "cutting-edge") +4. Be specific and concrete - reference actual content, patterns, and insights found in the episodes +5. If episodes are insufficient for meaningful insights, state that more data is needed + +INTENT-DRIVEN SUMMARIZATION: +Your summary should SERVE the space's intended purpose. Examples: +- "Learning React" β†’ Summarize React concepts, patterns, techniques learned +- "Project X Updates" β†’ Summarize progress, decisions, blockers, next steps +- "Health Tracking" β†’ Summarize metrics, trends, observations, insights +- "Guidelines for React" β†’ Extract actionable patterns, best practices, rules +- "Evolution of design thinking" β†’ Track how thinking changed over time, decision points +The intent defines WHY this space exists - organize content to serve that purpose. + +INSTRUCTIONS: +${ + isUpdate + ? `1. Review the existing summary and themes carefully +2. Analyze the new episodes for patterns and insights that align with the space's intent +3. Identify connecting points between existing knowledge and new episodes +4. Update the summary to seamlessly integrate new information while preserving valuable existing insights +5. Evolve themes by adding new ones or refining existing ones based on the space's purpose +6. Organize the summary to serve the space's intended use case` + : `1. Analyze the semantic content and relationships within the episodes +2. Identify topics/sections that align with the space's INTENT and PURPOSE +3. Create a coherent summary that serves the space's intended use case +4. Organize the summary based on the space's purpose (not generic frequency-based themes)` +} +${isUpdate ? "7" : "5"}. Assess your confidence in the ${isUpdate ? "updated" : ""} summary quality (0.0-1.0) + +INTENT-ALIGNED ORGANIZATION: +- Organize sections based on what serves the space's purpose +- Topics don't need minimum episode counts - relevance to intent matters most +- Each section should provide value aligned with the space's intended use +- For "guidelines" spaces: focus on actionable patterns +- For "tracking" spaces: focus on temporal patterns and changes +- For "learning" spaces: focus on concepts and insights gained +- Let the space's intent drive the structure, not rigid rules + +${ + isUpdate + ? `CONNECTION FOCUS: +- Entity relationships that span across batches/time +- Theme evolution and expansion +- Temporal patterns and progressions +- Contradictions or confirmations of existing insights +- New insights that complement existing knowledge` + : "" +} + +RESPONSE FORMAT: +Provide your response inside tags with valid JSON. Include both HTML summary and markdown format. + + +{ + "summary": "${isUpdate ? "Updated HTML summary that integrates new insights with existing knowledge. Write factually about what the statements reveal - mention specific entities, relationships, and patterns found in the data. Avoid marketing language. Use HTML tags for structure." : "Factual HTML summary based on patterns found in the statements. Report what the data actually shows - specific entities, relationships, frequencies, and concrete insights. Avoid promotional language. Use HTML tags like

, ,

    ,
  • for structure. Keep it concise and evidence-based."}", + "keyEntities": ["entity1", "entity2", "entity3"], + "themes": ["${isUpdate ? 'updated_theme1", "new_theme2", "evolved_theme3' : 'theme1", "theme2", "theme3'}"], + "confidence": 0.85 +} + + +JSON FORMATTING RULES: +- HTML content in summary field is allowed and encouraged +- Escape quotes within strings as \" +- Escape HTML angle brackets if needed: < and > +- Use proper HTML tags for structure:

    , , ,

      ,
    • ,

      , etc. +- HTML content should be well-formed and semantic + +GUIDELINES: +${ + isUpdate + ? `- Preserve valuable insights from existing summary +- Integrate new information by highlighting connections +- Themes should evolve naturally, don't replace wholesale +- The updated summary should read as a coherent whole +- Make the summary user-friendly and explain what value this space provides` + : `- Report only what the episodes actually reveal - be specific and concrete +- Cite actual content and patterns found in the episodes +- Avoid generic descriptions that could apply to any space +- Use neutral, factual language - no "comprehensive", "robust", "cutting-edge" etc. +- Themes must be backed by at least 3 supporting episodes with clear evidence +- Better to have fewer, well-supported themes than many weak ones +- Confidence should reflect actual data quality and coverage, not aspirational goals` +}`, + }, + { + role: "user", + content: `SPACE INFORMATION: +Name: "${spaceName}" +Intent/Purpose: ${spaceDescription || "No specific intent provided - organize naturally based on content"} + +${ + isUpdate + ? `EXISTING SUMMARY: +${previousSummary} + +EXISTING THEMES: +${previousThemes.join(", ")} + +NEW EPISODES TO INTEGRATE (${episodes.length} episodes):` + : `EPISODES IN THIS SPACE (${episodes.length} episodes):` +} +${episodesText} + +${ + episodes.length > 0 + ? `TOP WORDS BY FREQUENCY: +${topEntities.join(", ")}` + : "" +} + +${ + isUpdate + ? "Please identify connections between the existing summary and new episodes, then update the summary to integrate the new insights coherently. Organize the summary to SERVE the space's intent/purpose. Remember: only summarize insights from the actual episode content." + : "Please analyze the episodes and provide a comprehensive summary that SERVES the space's intent/purpose. Organize sections based on what would be most valuable for this space's intended use case. If the intent is unclear, organize naturally based on content patterns. Only summarize insights from actual episode content." +}`, + }, + ]; +} + +async function getExistingSummary(spaceId: string): Promise<{ + summary: string; + themes: string[]; + lastUpdated: Date; + contextCount: number; +} | null> { + try { + const existingSummary = await getSpace(spaceId); + + if (existingSummary?.summary) { + return { + summary: existingSummary.summary, + themes: existingSummary.themes, + lastUpdated: existingSummary.summaryGeneratedAt || new Date(), + contextCount: existingSummary.contextCount || 0, + }; + } + + return null; + } catch (error) { + logger.warn(`Failed to get existing summary for space ${spaceId}:`, { + error, + }); + return null; + } +} + +async function getSpaceEpisodes( + spaceId: string, + userId: string, + sinceDate?: Date, +): Promise { + // Query episodes directly using Space-[:HAS_EPISODE]->Episode relationships + const params: any = { spaceId, userId }; + + let dateCondition = ""; + if (sinceDate) { + dateCondition = "AND e.createdAt > $sinceDate"; + params.sinceDate = sinceDate.toISOString(); + } + + const query = ` + MATCH (space:Space {uuid: $spaceId, userId: $userId})-[:HAS_EPISODE]->(e:Episode {userId: $userId}) + WHERE e IS NOT NULL ${dateCondition} + RETURN DISTINCT e + ORDER BY e.createdAt DESC + `; + + const result = await runQuery(query, params); + + return result.map((record) => { + const episode = record.get("e").properties; + return { + uuid: episode.uuid, + content: episode.content, + originalContent: episode.originalContent, + source: episode.source, + createdAt: new Date(episode.createdAt), + validAt: new Date(episode.validAt), + metadata: JSON.parse(episode.metadata || "{}"), + sessionId: episode.sessionId, + }; + }); +} + +function parseSummaryResponse(response: string): { + summary: string; + themes: string[]; + confidence: number; + keyEntities?: string[]; +} | null { + try { + // Extract content from tags + const outputMatch = response.match(/([\s\S]*?)<\/output>/); + if (!outputMatch) { + logger.warn("No tags found in LLM summary response"); + logger.debug("Full LLM response:", { response }); + return null; + } + + let jsonContent = outputMatch[1].trim(); + + let parsed; + try { + parsed = JSON.parse(jsonContent); + } catch (jsonError) { + logger.warn("JSON parsing failed, attempting cleanup and retry", { + originalError: jsonError, + jsonContent: jsonContent.substring(0, 500) + "...", // Log first 500 chars + }); + + // More aggressive cleanup for malformed JSON + jsonContent = jsonContent + .replace(/([^\\])"/g, '$1\\"') // Escape unescaped quotes + .replace(/^"/g, '\\"') // Escape quotes at start + .replace(/\\\\"/g, '\\"'); // Fix double-escaped quotes + + parsed = JSON.parse(jsonContent); + } + + // Validate the response structure + const validationResult = SummaryResultSchema.safeParse(parsed); + if (!validationResult.success) { + logger.warn("Invalid LLM summary response format:", { + error: validationResult.error, + parsedData: parsed, + }); + return null; + } + + return validationResult.data; + } catch (error) { + logger.error( + "Error parsing LLM summary response:", + error as Record, + ); + logger.debug("Failed response content:", { response }); + return null; + } +} + +async function storeSummary(summaryData: SpaceSummaryData): Promise { + try { + // Store in PostgreSQL for API access and persistence + await updateSpace(summaryData); + + // Also store in Neo4j for graph-based queries + const query = ` + MATCH (space:Space {uuid: $spaceId}) + SET space.summary = $summary, + space.keyEntities = $keyEntities, + space.themes = $themes, + space.summaryConfidence = $confidence, + space.summaryContextCount = $contextCount, + space.summaryLastUpdated = datetime($lastUpdated) + RETURN space + `; + + await runQuery(query, { + spaceId: summaryData.spaceId, + summary: summaryData.summary, + keyEntities: summaryData.keyEntities, + themes: summaryData.themes, + confidence: summaryData.confidence, + contextCount: summaryData.contextCount, + lastUpdated: summaryData.lastUpdated.toISOString(), + }); + + logger.info(`Stored summary for space ${summaryData.spaceId}`, { + themes: summaryData.themes.length, + keyEntities: summaryData.keyEntities.length, + confidence: summaryData.confidence, + }); + } catch (error) { + logger.error( + `Error storing summary for space ${summaryData.spaceId}:`, + error as Record, + ); + throw error; + } +} diff --git a/apps/webapp/app/lib/queue-adapter.server.ts b/apps/webapp/app/lib/queue-adapter.server.ts index af9281d..4a867dc 100644 --- a/apps/webapp/app/lib/queue-adapter.server.ts +++ b/apps/webapp/app/lib/queue-adapter.server.ts @@ -15,7 +15,8 @@ import type { z } from "zod"; import type { IngestBodyRequest } from "~/jobs/ingest/ingest-episode.logic"; import type { CreateConversationTitlePayload } from "~/jobs/conversation/create-title.logic"; import type { SessionCompactionPayload } from "~/jobs/session/session-compaction.logic"; -import { type SpaceAssignmentPayload } from "~/trigger/spaces/space-assignment"; +import type { SpaceAssignmentPayload } from "~/jobs/spaces/space-assignment.logic"; +import type { SpaceSummaryPayload } from "~/jobs/spaces/space-summary.logic"; type QueueProvider = "trigger" | "bullmq"; @@ -144,22 +145,86 @@ export async function enqueueSessionCompaction( /** * Enqueue space assignment job - * (Helper for common job logic to call) */ export async function enqueueSpaceAssignment( payload: SpaceAssignmentPayload, -): Promise { +): Promise<{ id?: string }> { const provider = env.QUEUE_PROVIDER as QueueProvider; if (provider === "trigger") { const { triggerSpaceAssignment } = await import( "~/trigger/spaces/space-assignment" ); - await triggerSpaceAssignment(payload); + const handler = await triggerSpaceAssignment(payload); + return { id: handler.id }; } else { - // For BullMQ, space assignment is not implemented yet - // You can add it later when needed - console.warn("Space assignment not implemented for BullMQ yet"); + // BullMQ + const { spaceAssignmentQueue } = await import("~/bullmq/queues"); + const job = await spaceAssignmentQueue.add("space-assignment", payload, { + jobId: `space-assignment-${payload.userId}-${payload.mode}-${Date.now()}`, + attempts: 3, + backoff: { type: "exponential", delay: 2000 }, + }); + return { id: job.id }; + } +} + +/** + * Enqueue space summary job + */ +export async function enqueueSpaceSummary( + payload: SpaceSummaryPayload, +): Promise<{ id?: string }> { + const provider = env.QUEUE_PROVIDER as QueueProvider; + + if (provider === "trigger") { + const { triggerSpaceSummary } = await import( + "~/trigger/spaces/space-summary" + ); + const handler = await triggerSpaceSummary(payload); + return { id: handler.id }; + } else { + // BullMQ + const { spaceSummaryQueue } = await import("~/bullmq/queues"); + const job = await spaceSummaryQueue.add("space-summary", payload, { + jobId: `space-summary-${payload.spaceId}-${Date.now()}`, + attempts: 3, + backoff: { type: "exponential", delay: 2000 }, + }); + return { id: job.id }; + } +} + +/** + * Enqueue BERT topic analysis job + */ +export async function enqueueBertTopicAnalysis(payload: { + userId: string; + workspaceId: string; + minTopicSize?: number; + nrTopics?: number; +}): Promise<{ id?: string }> { + const provider = env.QUEUE_PROVIDER as QueueProvider; + + if (provider === "trigger") { + const { bertTopicAnalysisTask } = await import( + "~/trigger/bert/topic-analysis" + ); + const handler = await bertTopicAnalysisTask.trigger(payload, { + queue: "bert-topic-analysis", + concurrencyKey: payload.userId, + tags: [payload.userId, "bert-analysis"], + }); + return { id: handler.id }; + } else { + // BullMQ + const { bertTopicQueue } = await import("~/bullmq/queues"); + const job = await bertTopicQueue.add("topic-analysis", payload, { + jobId: `bert-${payload.userId}-${Date.now()}`, + attempts: 2, // Only 2 attempts for expensive operations + backoff: { type: "exponential", delay: 5000 }, + }); + return { id: job.id }; } } diff --git a/apps/webapp/app/models/workspace.server.ts b/apps/webapp/app/models/workspace.server.ts index 7bd5bee..7800a19 100644 --- a/apps/webapp/app/models/workspace.server.ts +++ b/apps/webapp/app/models/workspace.server.ts @@ -29,12 +29,6 @@ Exclude: β€’ Anything not explicitly consented to share don't store anything the user did not explicitly consent to share.`; -const githubDescription = `Everything related to my GitHub work - repos I'm working on, projects I contribute to, code I'm writing, PRs I'm reviewing. Basically my coding life on GitHub.`; - -const healthDescription = `My health and wellness stuff - how I'm feeling, what I'm learning about my body, experiments I'm trying, patterns I notice. Whatever matters to me about staying healthy.`; - -const fitnessDescription = `My workouts and training - what I'm doing at the gym, runs I'm going on, progress I'm making, goals I'm chasing. Anything related to physical exercise and getting stronger.`; - export async function createWorkspace( input: CreateWorkspaceDto, ): Promise { @@ -56,32 +50,7 @@ export async function createWorkspace( await ensureBillingInitialized(workspace.id); // Create default spaces - await Promise.all([ - spaceService.createSpace({ - name: "Profile", - description: profileRule, - userId: input.userId, - workspaceId: workspace.id, - }), - spaceService.createSpace({ - name: "GitHub", - description: githubDescription, - userId: input.userId, - workspaceId: workspace.id, - }), - spaceService.createSpace({ - name: "Health", - description: healthDescription, - userId: input.userId, - workspaceId: workspace.id, - }), - spaceService.createSpace({ - name: "Fitness", - description: fitnessDescription, - userId: input.userId, - workspaceId: workspace.id, - }), - ]); + await Promise.all([]); try { const response = await sendEmail({ email: "welcome", to: user.email }); diff --git a/apps/webapp/app/routes/api.v1.conversation._index.tsx b/apps/webapp/app/routes/api.v1.conversation._index.tsx index 8e92a4c..922f02c 100644 --- a/apps/webapp/app/routes/api.v1.conversation._index.tsx +++ b/apps/webapp/app/routes/api.v1.conversation._index.tsx @@ -19,7 +19,10 @@ import { import { getModel } from "~/lib/model.server"; import { UserTypeEnum } from "@core/types"; import { nanoid } from "nanoid"; -import { getOrCreatePersonalAccessToken } from "~/services/personalAccessToken.server"; +import { + deletePersonalAccessToken, + getOrCreatePersonalAccessToken, +} from "~/services/personalAccessToken.server"; import { hasAnswer, hasQuestion, @@ -126,6 +129,7 @@ const { loader, action } = createHybridActionApiRoute( }); result.consumeStream(); // no await + await deletePersonalAccessToken(pat?.id); return result.toUIMessageStreamResponse({ originalMessages: validatedMessages, diff --git a/apps/webapp/app/routes/api.v1.logs.tsx b/apps/webapp/app/routes/api.v1.logs.tsx index 0a06c57..78950e7 100644 --- a/apps/webapp/app/routes/api.v1.logs.tsx +++ b/apps/webapp/app/routes/api.v1.logs.tsx @@ -1,6 +1,7 @@ import { json } from "@remix-run/node"; import { z } from "zod"; import { prisma } from "~/db.server"; + import { createHybridLoaderApiRoute } from "~/services/routeBuilders/apiBuilder.server"; // Schema for logs search parameters diff --git a/apps/webapp/app/routes/api.v1.spaces.ts b/apps/webapp/app/routes/api.v1.spaces.ts index a363482..7a195c2 100644 --- a/apps/webapp/app/routes/api.v1.spaces.ts +++ b/apps/webapp/app/routes/api.v1.spaces.ts @@ -7,7 +7,10 @@ import { SpaceService } from "~/services/space.server"; import { json } from "@remix-run/node"; import { prisma } from "~/db.server"; import { apiCors } from "~/utils/apiCors"; -import { isTriggerDeployment } from "~/lib/queue-adapter.server"; +import { + enqueueSpaceAssignment, + isTriggerDeployment, +} from "~/lib/queue-adapter.server"; const spaceService = new SpaceService(); @@ -74,6 +77,14 @@ const { action } = createHybridActionApiRoute( workspaceId: user.Workspace.id, }); + await enqueueSpaceAssignment({ + userId: user.id, + workspaceId: user.Workspace.id, + mode: "new_space", + newSpaceId: space.id, + batchSize: 25, // Analyze recent statements for the new space + }); + return json({ space, success: true }); } diff --git a/apps/webapp/app/services/bertTopicAnalysis.server.ts b/apps/webapp/app/services/bertTopicAnalysis.server.ts new file mode 100644 index 0000000..27b0dcf --- /dev/null +++ b/apps/webapp/app/services/bertTopicAnalysis.server.ts @@ -0,0 +1,107 @@ +import { prisma } from "~/trigger/utils/prisma"; +import { logger } from "~/services/logger.service"; +import { runQuery } from "~/lib/neo4j.server"; + +interface WorkspaceMetadata { + lastTopicAnalysisAt?: string; + [key: string]: any; +} + +/** + * Check if we should trigger a BERT topic analysis for this workspace + * Criteria: 20+ new episodes since last analysis (or no previous analysis) + */ +export async function shouldTriggerTopicAnalysis( + userId: string, + workspaceId: string, +): Promise { + try { + // Get workspace metadata + const workspace = await prisma.workspace.findUnique({ + where: { id: workspaceId }, + select: { metadata: true }, + }); + + if (!workspace) { + logger.warn(`Workspace not found: ${workspaceId}`); + return false; + } + + const metadata = (workspace.metadata || {}) as WorkspaceMetadata; + const lastAnalysisAt = metadata.lastTopicAnalysisAt; + + // Count episodes since last analysis + const query = lastAnalysisAt + ? ` + MATCH (e:Episode {userId: $userId}) + WHERE e.createdAt > datetime($lastAnalysisAt) + RETURN count(e) as newEpisodeCount + ` + : ` + MATCH (e:Episode {userId: $userId}) + RETURN count(e) as totalEpisodeCount + `; + + const result = await runQuery(query, { + userId, + lastAnalysisAt, + }); + + const episodeCount = lastAnalysisAt + ? result[0]?.get("newEpisodeCount")?.toNumber() || 0 + : result[0]?.get("totalEpisodeCount")?.toNumber() || 0; + + logger.info( + `[Topic Analysis Check] User: ${userId}, New episodes: ${episodeCount}, Last analysis: ${lastAnalysisAt || "never"}`, + ); + + // Trigger if 20+ new episodes + return episodeCount >= 20; + } catch (error) { + logger.error( + `[Topic Analysis Check] Error checking episode count:`, + error, + ); + return false; + } +} + +/** + * Update workspace metadata with last topic analysis timestamp + */ +export async function updateLastTopicAnalysisTime( + workspaceId: string, +): Promise { + try { + const workspace = await prisma.workspace.findUnique({ + where: { id: workspaceId }, + select: { metadata: true }, + }); + + if (!workspace) { + logger.warn(`Workspace not found: ${workspaceId}`); + return; + } + + const metadata = (workspace.metadata || {}) as WorkspaceMetadata; + + await prisma.workspace.update({ + where: { id: workspaceId }, + data: { + metadata: { + ...metadata, + lastTopicAnalysisAt: new Date().toISOString(), + }, + }, + }); + + logger.info( + `[Topic Analysis] Updated last analysis timestamp for workspace: ${workspaceId}`, + ); + } catch (error) { + logger.error( + `[Topic Analysis] Error updating last analysis timestamp:`, + error, + ); + } +} diff --git a/apps/webapp/app/services/clustering.server.ts b/apps/webapp/app/services/clustering.server.ts deleted file mode 100644 index e69de29..0000000 diff --git a/apps/webapp/app/services/graphModels/space.ts b/apps/webapp/app/services/graphModels/space.ts index be0fbfb..054af4e 100644 --- a/apps/webapp/app/services/graphModels/space.ts +++ b/apps/webapp/app/services/graphModels/space.ts @@ -45,6 +45,43 @@ export async function createSpace( }; } +/** + * Get all active spaces for a user + */ +export async function getAllSpacesForUser( + userId: string, +): Promise { + const query = ` + MATCH (s:Space {userId: $userId}) + WHERE s.isActive = true + + // Count episodes assigned to each space + OPTIONAL MATCH (s)-[:HAS_EPISODE]->(e:Episode {userId: $userId}) + + WITH s, count(e) as episodeCount + RETURN s, episodeCount + ORDER BY s.createdAt DESC + `; + + const result = await runQuery(query, { userId }); + + return result.map((record) => { + const spaceData = record.get("s").properties; + const episodeCount = record.get("episodeCount") || 0; + + return { + uuid: spaceData.uuid, + name: spaceData.name, + description: spaceData.description, + userId: spaceData.userId, + createdAt: new Date(spaceData.createdAt), + updatedAt: new Date(spaceData.updatedAt), + isActive: spaceData.isActive, + contextCount: Number(episodeCount), + }; + }); +} + /** * Get a specific space by ID */ diff --git a/apps/webapp/app/services/mcp.server.ts b/apps/webapp/app/services/mcp.server.ts index 3800f60..f44fb54 100644 --- a/apps/webapp/app/services/mcp.server.ts +++ b/apps/webapp/app/services/mcp.server.ts @@ -58,6 +58,7 @@ async function createMcpServer( // Handle memory tools and integration meta-tools if ( name.startsWith("memory_") || + name === "get_session_id" || name === "get_integrations" || name === "get_integration_actions" || name === "execute_integration_action" diff --git a/apps/webapp/app/services/sessionCompaction.server.ts b/apps/webapp/app/services/sessionCompaction.server.ts deleted file mode 100644 index 8d4b36b..0000000 --- a/apps/webapp/app/services/sessionCompaction.server.ts +++ /dev/null @@ -1,262 +0,0 @@ -import { logger } from "~/services/logger.service"; -import { - getCompactedSessionBySessionId, - getCompactionStats, - getSessionEpisodes, - type CompactedSessionNode, -} from "~/services/graphModels/compactedSession"; -import { enqueueSessionCompaction } from "~/lib/queue-adapter.server"; - -/** - * Configuration for session compaction - */ -export const COMPACTION_CONFIG = { - minEpisodesForCompaction: 5, // Minimum episodes to trigger initial compaction - compactionThreshold: 1, // Trigger update after N new episodes - autoCompactionEnabled: true, // Enable automatic compaction -}; - -/** - * SessionCompactionService - Manages session compaction lifecycle - */ -export class SessionCompactionService { - /** - * Check if a session should be compacted - */ - async shouldCompact(sessionId: string, userId: string): Promise<{ - shouldCompact: boolean; - reason: string; - episodeCount?: number; - newEpisodeCount?: number; - }> { - try { - // Get existing compact - const existingCompact = await getCompactedSessionBySessionId(sessionId, userId); - - if (!existingCompact) { - // No compact exists, check if we have enough episodes - const episodeCount = await this.getSessionEpisodeCount(sessionId, userId); - - if (episodeCount >= COMPACTION_CONFIG.minEpisodesForCompaction) { - return { - shouldCompact: true, - reason: "initial_compaction", - episodeCount, - }; - } - - return { - shouldCompact: false, - reason: "insufficient_episodes", - episodeCount, - }; - } - - // Compact exists, check if we have enough new episodes - const newEpisodeCount = await this.getNewEpisodeCount( - sessionId, - userId, - existingCompact.endTime - ); - - if (newEpisodeCount >= COMPACTION_CONFIG.compactionThreshold) { - return { - shouldCompact: true, - reason: "update_compaction", - newEpisodeCount, - }; - } - - return { - shouldCompact: false, - reason: "insufficient_new_episodes", - newEpisodeCount, - }; - } catch (error) { - logger.error(`Error checking if session should compact`, { - sessionId, - userId, - error: error instanceof Error ? error.message : String(error), - }); - - return { - shouldCompact: false, - reason: "error", - }; - } - } - - /** - * Get total episode count for a session - */ - private async getSessionEpisodeCount( - sessionId: string, - userId: string - ): Promise { - const episodes = await getSessionEpisodes(sessionId, userId); - return episodes.length; - } - - /** - * Get count of new episodes since last compaction - */ - private async getNewEpisodeCount( - sessionId: string, - userId: string, - afterTime: Date - ): Promise { - const episodes = await getSessionEpisodes(sessionId, userId, afterTime); - return episodes.length; - } - - /** - * Trigger compaction for a session - */ - async triggerCompaction( - sessionId: string, - userId: string, - source: string, - triggerSource: "auto" | "manual" | "threshold" = "auto" - ): Promise<{ success: boolean; taskId?: string; error?: string }> { - try { - // Check if compaction should be triggered - const check = await this.shouldCompact(sessionId, userId); - - if (!check.shouldCompact) { - logger.info(`Compaction not needed`, { - sessionId, - userId, - reason: check.reason, - }); - - return { - success: false, - error: `Compaction not needed: ${check.reason}`, - }; - } - - // Trigger the compaction task - logger.info(`Triggering session compaction`, { - sessionId, - userId, - source, - triggerSource, - reason: check.reason, - }); - - const handle = await enqueueSessionCompaction({ - userId, - sessionId, - source, - triggerSource, - }); - - logger.info(`Session compaction triggered`, { - sessionId, - userId, - taskId: handle.id, - }); - - return { - success: true, - taskId: handle.id, - }; - } catch (error) { - logger.error(`Failed to trigger compaction`, { - sessionId, - userId, - error: error instanceof Error ? error.message : String(error), - }); - - return { - success: false, - error: error instanceof Error ? error.message : "Unknown error", - }; - } - } - - /** - * Get compacted session for recall - */ - async getCompactForRecall( - sessionId: string, - userId: string - ): Promise { - try { - return await getCompactedSessionBySessionId(sessionId, userId); - } catch (error) { - logger.error(`Error fetching compact for recall`, { - sessionId, - userId, - error: error instanceof Error ? error.message : String(error), - }); - return null; - } - } - - /** - * Get compaction statistics for a user - */ - async getStats(userId: string): Promise<{ - totalCompacts: number; - totalEpisodes: number; - averageCompressionRatio: number; - mostRecentCompaction: Date | null; - }> { - try { - return await getCompactionStats(userId); - } catch (error) { - logger.error(`Error fetching compaction stats`, { - userId, - error: error instanceof Error ? error.message : String(error), - }); - - return { - totalCompacts: 0, - totalEpisodes: 0, - averageCompressionRatio: 0, - mostRecentCompaction: null, - }; - } - } - - /** - * Auto-trigger compaction after episode ingestion - * Called from ingestion pipeline - */ - async autoTriggerAfterIngestion( - sessionId: string | null | undefined, - userId: string, - source: string - ): Promise { - // Skip if no sessionId or auto-compaction disabled - if (!sessionId || !COMPACTION_CONFIG.autoCompactionEnabled) { - return; - } - - try { - const check = await this.shouldCompact(sessionId, userId); - - if (check.shouldCompact) { - logger.info(`Auto-triggering compaction after ingestion`, { - sessionId, - userId, - reason: check.reason, - }); - - // Trigger compaction asynchronously (don't wait) - await this.triggerCompaction(sessionId, userId, source, "auto"); - } - } catch (error) { - // Log error but don't fail ingestion - logger.error(`Error in auto-trigger compaction`, { - sessionId, - userId, - error: error instanceof Error ? error.message : String(error), - }); - } - } -} - -// Singleton instance -export const sessionCompactionService = new SessionCompactionService(); diff --git a/apps/webapp/app/services/space.server.ts b/apps/webapp/app/services/space.server.ts index dcf213b..72d2a25 100644 --- a/apps/webapp/app/services/space.server.ts +++ b/apps/webapp/app/services/space.server.ts @@ -16,8 +16,6 @@ import { updateSpace, } from "./graphModels/space"; import { prisma } from "~/trigger/utils/prisma"; -import { trackFeatureUsage } from "./telemetry.server"; -import { enqueueSpaceAssignment } from "~/lib/queue-adapter.server"; export class SpaceService { /** @@ -65,26 +63,7 @@ export class SpaceService { logger.info(`Created space ${space.id} successfully`); // Track space creation - trackFeatureUsage("space_created", params.userId).catch(console.error); - - // Trigger automatic LLM assignment for the new space - try { - await enqueueSpaceAssignment({ - userId: params.userId, - workspaceId: params.workspaceId, - mode: "new_space", - newSpaceId: space.id, - batchSize: 25, // Analyze recent statements for the new space - }); - - logger.info(`Triggered LLM space assignment for new space ${space.id}`); - } catch (error) { - // Don't fail space creation if LLM assignment fails - logger.warn( - `Failed to trigger LLM assignment for space ${space.id}:`, - error as Record, - ); - } + // trackFeatureUsage("space_created", params.userId).catch(console.error); return space; } @@ -197,9 +176,6 @@ export class SpaceService { logger.info(`Nothing to update to graph`); } - // Track space update - trackFeatureUsage("space_updated", userId).catch(console.error); - logger.info(`Updated space ${spaceId} successfully`); return space; } diff --git a/apps/webapp/app/trigger/bert/topic-analysis.ts b/apps/webapp/app/trigger/bert/topic-analysis.ts new file mode 100644 index 0000000..afba294 --- /dev/null +++ b/apps/webapp/app/trigger/bert/topic-analysis.ts @@ -0,0 +1,53 @@ +import { task } from "@trigger.dev/sdk/v3"; +import { python } from "@trigger.dev/python"; +import { + processTopicAnalysis, + type TopicAnalysisPayload, +} from "~/jobs/bert/topic-analysis.logic"; +import { spaceSummaryTask } from "~/trigger/spaces/space-summary"; + +/** + * Python runner for Trigger.dev using python.runScript + */ +async function runBertWithTriggerPython( + userId: string, + minTopicSize: number, + nrTopics?: number, +): Promise { + const args = [userId, "--json"]; + + if (nrTopics) { + args.push("--nr-topics", String(nrTopics)); + } + + console.log( + `[BERT Topic Analysis] Running with Trigger.dev Python: args=${args.join(" ")}`, + ); + + const result = await python.runScript("./python/main.py", args); + return result.stdout; +} + +/** + * Trigger.dev task for BERT topic analysis + * + * This is a thin wrapper around the common logic in jobs/bert/topic-analysis.logic.ts + */ +export const bertTopicAnalysisTask = task({ + id: "bert-topic-analysis", + queue: { + name: "bert-topic-analysis", + concurrencyLimit: 3, // Max 3 parallel analyses to avoid CPU overload + }, + run: async (payload: TopicAnalysisPayload) => { + return await processTopicAnalysis( + payload, + // Callback to enqueue space summary + async (params) => { + await spaceSummaryTask.trigger(params); + }, + // Python runner for Trigger.dev + runBertWithTriggerPython, + ); + }, +}); diff --git a/apps/webapp/app/trigger/ingest/ingest.ts b/apps/webapp/app/trigger/ingest/ingest.ts index 4a3cd02..a65f672 100644 --- a/apps/webapp/app/trigger/ingest/ingest.ts +++ b/apps/webapp/app/trigger/ingest/ingest.ts @@ -6,6 +6,7 @@ import { } from "~/jobs/ingest/ingest-episode.logic"; import { triggerSpaceAssignment } from "../spaces/space-assignment"; import { triggerSessionCompaction } from "../session/session-compaction"; +import { bertTopicAnalysisTask } from "../bert/topic-analysis"; const ingestionQueue = queue({ name: "ingestion-queue", @@ -32,6 +33,14 @@ export const ingestTask = task({ async (params) => { await triggerSessionCompaction(params); }, + // Callback for BERT topic analysis + async (params) => { + await bertTopicAnalysisTask.trigger(params, { + queue: "bert-topic-analysis", + concurrencyKey: params.userId, + tags: [params.userId, "bert-analysis"], + }); + }, ); }, }); diff --git a/apps/webapp/app/trigger/ingest/retry-no-credits.ts b/apps/webapp/app/trigger/ingest/retry-no-credits.ts index dcc0556..7eab24e 100644 --- a/apps/webapp/app/trigger/ingest/retry-no-credits.ts +++ b/apps/webapp/app/trigger/ingest/retry-no-credits.ts @@ -1,9 +1,9 @@ import { task } from "@trigger.dev/sdk"; import { z } from "zod"; -import { IngestionQueue, IngestionStatus } from "@core/database"; +import { IngestionStatus } from "@core/database"; import { logger } from "~/services/logger.service"; import { prisma } from "../utils/prisma"; -import { IngestBodyRequest, ingestTask } from "./ingest"; +import { type IngestBodyRequest, ingestTask } from "./ingest"; export const RetryNoCreditBodyRequest = z.object({ workspaceId: z.string(), @@ -43,9 +43,7 @@ export const retryNoCreditsTask = task({ }; } - logger.log( - `Found ${noCreditItems.length} NO_CREDITS episodes to retry`, - ); + logger.log(`Found ${noCreditItems.length} NO_CREDITS episodes to retry`); const results = { total: noCreditItems.length, diff --git a/apps/webapp/app/trigger/spaces/space-assignment.ts b/apps/webapp/app/trigger/spaces/space-assignment.ts index 23359d7..bfc815f 100644 --- a/apps/webapp/app/trigger/spaces/space-assignment.ts +++ b/apps/webapp/app/trigger/spaces/space-assignment.ts @@ -1,79 +1,12 @@ import { queue, task } from "@trigger.dev/sdk/v3"; import { logger } from "~/services/logger.service"; -import { SpaceService } from "~/services/space.server"; -import { makeModelCall } from "~/lib/model.server"; -import { createBatch, getBatch } from "~/lib/batch.server"; -import { runQuery } from "~/lib/neo4j.server"; import { - assignEpisodesToSpace, - getSpaceEpisodeCount, -} from "~/services/graphModels/space"; + processSpaceAssignment, + type SpaceAssignmentPayload, +} from "~/jobs/spaces/space-assignment.logic"; import { triggerSpaceSummary } from "./space-summary"; -import { triggerSpacePattern } from "./space-pattern"; -import { - updateMultipleSpaceStatuses, - SPACE_STATUS, -} from "../utils/space-status"; -import type { CoreMessage } from "ai"; -import { z } from "zod"; -import { type Space } from "@prisma/client"; -export interface SpaceAssignmentPayload { - userId: string; - workspaceId: string; - mode: "new_space" | "episode"; - newSpaceId?: string; // For new_space mode - episodeIds?: string[]; // For daily_batch mode (default: 1) - batchSize?: number; // Processing batch size -} - -interface EpisodeData { - uuid: string; - content: string; - originalContent: string; - source: string; - createdAt: Date; - metadata: any; -} - -interface SpaceData { - uuid: string; - name: string; - description?: string; - episodeCount: number; -} - -interface AssignmentResult { - episodeId: string; - spaceIds: string[]; - confidence: number; - reasoning?: string; -} - -const CONFIG = { - newSpaceMode: { - batchSize: 20, - confidenceThreshold: 0.75, // Intent-based threshold for new space creation - useBatchAPI: true, // Use batch API for new space mode - minEpisodesForBatch: 5, // Minimum episodes to use batch API - }, - episodeMode: { - batchSize: 20, - confidenceThreshold: 0.75, // Intent-based threshold for episode assignment - useBatchAPI: true, // Use batch API for episode mode - minEpisodesForBatch: 5, // Minimum episodes to use batch API - }, -}; - -// Zod schema for LLM response validation -const AssignmentResultSchema = z.array( - z.object({ - episodeId: z.string(), - addSpaceId: z.array(z.string()), - confidence: z.number(), - reasoning: z.string(), - }), -); +export type { SpaceAssignmentPayload }; const spaceAssignmentQueue = queue({ name: "space-assignment-queue", @@ -85,1110 +18,22 @@ export const spaceAssignmentTask = task({ queue: spaceAssignmentQueue, maxDuration: 1800, // 15 minutes timeout run: async (payload: SpaceAssignmentPayload) => { - const { - userId, - workspaceId, - mode, - newSpaceId, - episodeIds, - batchSize = mode === "new_space" - ? CONFIG.newSpaceMode.batchSize - : CONFIG.episodeMode.batchSize, - } = payload; - - logger.info(`Starting space assignment`, { - userId, - mode, - newSpaceId, - episodeIds, - batchSize, + logger.info(`[Trigger.dev] Starting space assignment task`, { + userId: payload.userId, + mode: payload.mode, }); - const spaceService = new SpaceService(); - - try { - // 1. Get user's spaces - const spaces = await spaceService.getUserSpaces(userId); - - if (spaces.length === 0) { - logger.info(`No spaces found for user ${userId}, skipping assignment`); - return { - success: true, - message: "No spaces to assign to", - processed: 0, - }; - } - - // 2. Get episodes to analyze based on mode - const episodes = await getEpisodesToAnalyze(userId, mode, { - newSpaceId, - episodeIds, - }); - - if (episodes.length === 0) { - logger.info( - `No episodes to analyze for user ${userId} in ${mode} mode`, - ); - return { - success: true, - message: "No episodes to analyze", - processed: 0, - }; - } - - // 3. Process episodes using batch AI or fallback to sequential - const config = - mode === "new_space" ? CONFIG.newSpaceMode : CONFIG.episodeMode; - // const shouldUseBatchAPI = - // config.useBatchAPI && episodes.length >= config.minEpisodesForBatch; - const shouldUseBatchAPI = true; - - let totalProcessed = 0; - let totalAssignments = 0; - let totalBatches = 0; - const affectedSpaces = new Set(); // Track spaces that received new episodes - - if (shouldUseBatchAPI) { - logger.info( - `Using Batch AI processing for ${episodes.length} episodes`, - { - mode, - userId, - batchSize, - }, - ); - - const batchResult = await processBatchAI( - episodes, - spaces, - userId, - mode, - newSpaceId, - batchSize, - ); - totalProcessed = batchResult.processed; - totalAssignments = batchResult.assignments; - batchResult.affectedSpaces?.forEach((spaceId) => - affectedSpaces.add(spaceId), - ); - } else { - logger.info( - `Using sequential processing for ${episodes.length} episodes (below batch threshold)`, - { - mode, - userId, - minRequired: config.minEpisodesForBatch, - }, - ); - - // Fallback to sequential processing for smaller episode sets - totalBatches = Math.ceil(episodes.length / batchSize); - - for (let i = 0; i < totalBatches; i++) { - const batch = episodes.slice(i * batchSize, (i + 1) * batchSize); - - logger.info( - `Processing batch ${i + 1}/${totalBatches} with ${batch.length} episodes`, - { - mode, - userId, - }, - ); - - const batchResult = await processBatch( - batch, - spaces, - userId, - mode, - newSpaceId, - ); - totalProcessed += batchResult.processed; - totalAssignments += batchResult.assignments; - batchResult.affectedSpaces?.forEach((spaceId) => - affectedSpaces.add(spaceId), - ); - - // Add delay between batches to avoid rate limiting - if (i < totalBatches - 1) { - await new Promise((resolve) => setTimeout(resolve, 1000)); - } - } - } - - logger.info(`Completed LLM space assignment`, { - userId, - mode, - totalProcessed, - totalAssignments, - spacesAvailable: spaces.length, - affectedSpaces: affectedSpaces.size, - }); - - // 4. Update space status to "processing" for affected spaces - if (affectedSpaces.size > 0) { - try { - await updateMultipleSpaceStatuses( - Array.from(affectedSpaces), - SPACE_STATUS.PROCESSING, - { - userId, - operation: "space-assignment", - metadata: { mode, phase: "start_processing" }, - }, - ); - } catch (statusError) { - logger.warn(`Failed to update space statuses to processing:`, { - error: statusError, - userId, - mode, - }); - } - } - - // 5. Trigger space summaries for affected spaces (fan-out pattern) - if (affectedSpaces.size > 0) { - try { - logger.info( - `Triggering space summaries for ${affectedSpaces.size} affected spaces in parallel`, - ); - - // Fan out to multiple parallel triggers - const summaryPromises = Array.from(affectedSpaces).map((spaceId) => - triggerSpaceSummary({ - userId, - workspaceId, - spaceId, - triggerSource: "assignment", - }).catch((error) => { - logger.warn(`Failed to trigger summary for space ${spaceId}:`, { - error, - }); - return { success: false, spaceId, error: error.message }; - }), - ); - - const summaryResults = await Promise.allSettled(summaryPromises); - const successful = summaryResults.filter( - (r) => r.status === "fulfilled", - ).length; - const failed = summaryResults.filter( - (r) => r.status === "rejected", - ).length; - - logger.info(`Space summary triggers completed`, { - userId, - mode, - totalSpaces: affectedSpaces.size, - successful, - failed, - }); - } catch (summaryError) { - // Don't fail the assignment if summary generation fails - logger.warn(`Failed to trigger space summaries after assignment:`, { - error: summaryError, - userId, - mode, - affectedSpaces: Array.from(affectedSpaces), - }); - } - } - - // 6. Update space status to "ready" after all processing is complete - if (affectedSpaces.size > 0) { - try { - await updateMultipleSpaceStatuses( - Array.from(affectedSpaces), - SPACE_STATUS.READY, - { - userId, - operation: "space-assignment", - metadata: { mode, phase: "completed_processing" }, - }, - ); - } catch (finalStatusError) { - logger.warn(`Failed to update space statuses to ready:`, { - error: finalStatusError, - userId, - mode, - }); - } - } - - return { - success: true, - mode, - processed: totalProcessed, - assignments: totalAssignments, - batches: totalBatches, - spacesAvailable: spaces.length, - affectedSpaces: affectedSpaces.size, - summaryTriggered: affectedSpaces.size > 0, - patternCheckTriggered: affectedSpaces.size > 0, - }; - } catch (error) { - logger.error( - `Error in LLM space assignment for user ${userId}:`, - error as Record, - ); - throw error; - } + // Use common business logic with callback for triggering space summaries + return await processSpaceAssignment( + payload, + // Callback to enqueue space summary + async (summaryPayload) => { + return await triggerSpaceSummary(summaryPayload); + }, + ); }, }); -async function getEpisodesToAnalyze( - userId: string, - mode: "new_space" | "episode", - options: { newSpaceId?: string; episodeIds?: string[] }, -): Promise { - let query: string; - let params: any = { userId }; - - if (mode === "new_space") { - // For new space: analyze all recent episodes - query = ` - MATCH (e:Episode {userId: $userId}) - RETURN e - ORDER BY e.createdAt DESC - LIMIT 1000 - `; - } else { - // For episode mode: analyze specific episodes - query = ` - UNWIND $episodeIds AS episodeId - MATCH (e:Episode {uuid: episodeId, userId: $userId}) - RETURN e - ORDER BY e.createdAt DESC - `; - params.episodeIds = options.episodeIds; - } - - const result = await runQuery(query, params); - - return result.map((record) => { - const episode = record.get("e").properties; - return { - uuid: episode.uuid, - content: episode.content, - originalContent: episode.originalContent, - source: episode.source, - createdAt: new Date(episode.createdAt), - metadata: JSON.parse(episode.metadata || "{}"), - }; - }); -} - -async function processBatchAI( - episodes: EpisodeData[], - spaces: Space[], - userId: string, - mode: "new_space" | "episode", - newSpaceId?: string, - batchSize: number = 50, -): Promise<{ - processed: number; - assignments: number; - affectedSpaces?: string[]; -}> { - try { - // Create batches of episodes - const episodeBatches: EpisodeData[][] = []; - for (let i = 0; i < episodes.length; i += batchSize) { - episodeBatches.push(episodes.slice(i, i + batchSize)); - } - - logger.info( - `Creating ${episodeBatches.length} batch AI requests for ${episodes.length} episodes`, - ); - - // Create batch requests with prompts - const batchRequests = await Promise.all( - episodeBatches.map(async (batch, index) => { - const promptMessages = await createLLMPrompt( - batch, - spaces, - mode, - newSpaceId, - userId, - ); - const systemPrompt = - promptMessages.find((m) => m.role === "system")?.content || ""; - const userPrompt = - promptMessages.find((m) => m.role === "user")?.content || ""; - - return { - customId: `episode-space-assignment-${userId}-${mode}-${index}`, - messages: [{ role: "user" as const, content: userPrompt }], - systemPrompt, - }; - }), - ); - - // Submit batch to AI provider - const { batchId } = await createBatch({ - requests: batchRequests, - outputSchema: AssignmentResultSchema, - maxRetries: 3, - timeoutMs: 1200000, // 10 minutes timeout - }); - - logger.info(`Batch AI job created: ${batchId}`, { - userId, - mode, - batchRequests: batchRequests.length, - }); - - // Poll for completion with improved handling - const maxPollingTime = 1200000; // 13 minutes - const pollInterval = 5000; // 5 seconds - const startTime = Date.now(); - - let batch = await getBatch({ batchId }); - - while (batch.status === "processing" || batch.status === "pending") { - const elapsed = Date.now() - startTime; - - if (elapsed > maxPollingTime) { - logger.warn( - `Batch AI job timed out after ${elapsed}ms, processing partial results`, - { - batchId, - status: batch.status, - completed: batch.completedRequests, - total: batch.totalRequests, - failed: batch.failedRequests, - }, - ); - break; // Exit loop to process any available results - } - - logger.info(`Batch AI job status: ${batch.status}`, { - batchId, - completed: batch.completedRequests, - total: batch.totalRequests, - failed: batch.failedRequests, - elapsed: elapsed, - }); - - await new Promise((resolve) => setTimeout(resolve, pollInterval)); - batch = await getBatch({ batchId }); - } - - // Handle different completion scenarios - if (batch.status === "failed") { - logger.error(`Batch AI job failed completely`, { - batchId, - status: batch.status, - }); - throw new Error(`Batch AI job failed with status: ${batch.status}`); - } - - // Log final status regardless of completion state - logger.info(`Batch AI job processing finished`, { - batchId, - status: batch.status, - completed: batch.completedRequests, - total: batch.totalRequests, - failed: batch.failedRequests, - }); - - if (!batch.results || batch.results.length === 0) { - logger.warn(`No results returned from batch AI job ${batchId}`, { - status: batch.status, - completed: batch.completedRequests, - failed: batch.failedRequests, - }); - - // If we have no results but some requests failed, fall back to sequential processing - if (batch.failedRequests && batch.failedRequests > 0) { - logger.info( - `Falling back to sequential processing due to batch failures`, - ); - return await processBatch(episodes, spaces, userId, mode, newSpaceId); - } - - return { processed: episodes.length, assignments: 0 }; - } - - logger.info(`Processing batch results`, { - batchId, - status: batch.status, - resultsCount: batch.results.length, - totalRequests: batch.totalRequests, - completedRequests: batch.completedRequests, - failedRequests: batch.failedRequests, - }); - - // Process all batch results - let totalAssignments = 0; - const affectedSpaces = new Set(); - const confidenceThreshold = - mode === "new_space" - ? CONFIG.newSpaceMode.confidenceThreshold - : CONFIG.episodeMode.confidenceThreshold; - - for (const result of batch.results) { - if (result.error) { - logger.warn(`Batch AI request ${result.customId} failed:`, { - error: result.error, - }); - continue; - } - - if (!result.response) { - logger.warn(`No response from batch AI request ${result.customId}`); - continue; - } - - // Parse assignments from this batch result - let assignments: AssignmentResult[] = []; - try { - // Extract episode batch info from customId - const batchIndexMatch = result.customId.match(/-(\d+)$/); - const batchIndex = batchIndexMatch ? parseInt(batchIndexMatch[1]) : 0; - const episodeBatch = episodeBatches[batchIndex]; - - if (Array.isArray(result.response)) { - // Handle direct array response (from structured output) - assignments = result.response.map((a) => ({ - episodeId: a.episodeId, - spaceIds: a.addSpaceId || [], - confidence: a.confidence || 0.75, - reasoning: a.reasoning, - })); - } else if (typeof result.response === "string") { - // Parse from text response with tags (fallback for non-structured output) - assignments = parseLLMResponseWithTags( - result.response, - episodeBatch, - spaces, - ); - } else if (typeof result.response === "object" && result.response) { - // Handle object response that might contain the array directly - try { - let responseData = result.response; - if (responseData.results && Array.isArray(responseData.results)) { - responseData = responseData.results; - } - - if (Array.isArray(responseData)) { - assignments = responseData.map((a) => ({ - episodeId: a.episodeId, - spaceIds: a.addSpaceId || [], - confidence: a.confidence || 0.75, - reasoning: a.reasoning, - })); - } else { - // Fallback parsing - assignments = parseLLMResponse( - JSON.stringify(result.response), - episodeBatch, - spaces, - ); - } - } catch (parseError) { - logger.error( - `Error processing object response ${result.customId}:`, - { error: parseError }, - ); - assignments = []; - } - } else { - // Fallback parsing - assignments = parseLLMResponse( - JSON.stringify(result.response), - episodeBatch, - spaces, - ); - } - } catch (parseError) { - logger.error(`Error parsing batch result ${result.customId}:`, { - error: parseError, - }); - continue; - } - - // Group episodes by space for batch assignment - const spaceToEpisodes = new Map(); - - for (const assignment of assignments) { - if ( - assignment.spaceIds.length > 0 && - assignment.confidence >= confidenceThreshold - ) { - for (const spaceId of assignment.spaceIds) { - if (!spaceToEpisodes.has(spaceId)) { - spaceToEpisodes.set(spaceId, []); - } - spaceToEpisodes.get(spaceId)!.push(assignment.episodeId); - } - } - } - - // Apply batch assignments - one call per space - for (const [spaceId, episodeIds] of spaceToEpisodes) { - try { - const assignmentResult = await assignEpisodesToSpace( - episodeIds, - spaceId, - userId, - ); - - if (assignmentResult.success) { - totalAssignments += episodeIds.length; - affectedSpaces.add(spaceId); - logger.info( - `Batch AI assigned ${episodeIds.length} episodes to space ${spaceId}`, - { - episodeIds, - mode, - batchId: result.customId, - }, - ); - } - } catch (error) { - logger.warn( - `Failed to assign ${episodeIds.length} episodes to space ${spaceId}:`, - { error, episodeIds }, - ); - } - } - } - - // Log final batch processing results - logger.info(`Batch AI processing completed`, { - batchId, - totalEpisodes: episodes.length, - processedBatches: batch.results.length, - totalAssignments, - affectedSpaces: affectedSpaces.size, - completedRequests: batch.completedRequests, - failedRequests: batch.failedRequests || 0, - }); - - // If we have significant failures, consider fallback processing for remaining episodes - const failureRate = batch.failedRequests - ? batch.failedRequests / batch.totalRequests - : 0; - if (failureRate > 0.5) { - // If more than 50% failed - logger.warn( - `High failure rate (${Math.round(failureRate * 100)}%) in batch processing, consider reviewing prompts or input quality`, - ); - } - - return { - processed: episodes.length, - assignments: totalAssignments, - affectedSpaces: Array.from(affectedSpaces), - }; - } catch (error) { - logger.error("Error in Batch AI processing:", { error }); - throw error; - } -} - -async function processBatch( - episodes: EpisodeData[], - spaces: Space[], - userId: string, - mode: "new_space" | "episode", - newSpaceId?: string, -): Promise<{ - processed: number; - assignments: number; - affectedSpaces?: string[]; -}> { - try { - // Create the LLM prompt based on mode - const prompt = await createLLMPrompt( - episodes, - spaces, - mode, - newSpaceId, - userId, - ); - - // Episode-intent matching is MEDIUM complexity (semantic analysis with intent alignment) - let responseText = ""; - await makeModelCall( - false, - prompt, - (text: string) => { - responseText = text; - }, - undefined, - "high", - ); - - // Response text is now set by the callback - - // Parse LLM response - const assignments = parseLLMResponseWithTags( - responseText, - episodes, - spaces, - ); - - // Apply assignments - let totalAssignments = 0; - const affectedSpaces = new Set(); - const confidenceThreshold = - mode === "new_space" - ? CONFIG.newSpaceMode.confidenceThreshold - : CONFIG.episodeMode.confidenceThreshold; - - for (const assignment of assignments) { - if ( - assignment.spaceIds.length > 0 && - assignment.confidence >= confidenceThreshold - ) { - // Assign to each space individually to track metadata properly - for (const spaceId of assignment.spaceIds) { - try { - const result = await assignEpisodesToSpace( - [assignment.episodeId], - spaceId, - userId, - ); - - if (result.success) { - totalAssignments++; - affectedSpaces.add(spaceId); - - logger.info( - `LLM assigned episode ${assignment.episodeId} to space ${spaceId}`, - { - confidence: assignment.confidence, - reasoning: assignment.reasoning || "No reasoning", - mode, - } as Record, - ); - } - } catch (error) { - logger.warn( - `Failed to assign episode ${assignment.episodeId} to space ${spaceId}:`, - error as Record, - ); - } - } - } - } - - return { - processed: episodes.length, - assignments: totalAssignments, - affectedSpaces: Array.from(affectedSpaces), - }; - } catch (error) { - logger.error("Error processing batch:", error as Record); - return { processed: 0, assignments: 0, affectedSpaces: [] }; - } -} - -async function createLLMPrompt( - episodes: EpisodeData[], - spaces: Space[], - mode: "new_space" | "episode", - newSpaceId?: string, - userId?: string, -): Promise { - const episodesDescription = episodes - .map( - (ep) => - `ID: ${ep.uuid}\nCONTENT: ${ep.content}\nSOURCE: ${ep.source}\nMETADATA: ${JSON.stringify(ep.metadata)}`, - ) - .join("\n\n"); - - // Get enhanced space information with episode counts - const enhancedSpaces = await Promise.all( - spaces.map(async (space) => { - const currentCount = userId - ? await getSpaceEpisodeCount(space.id, userId) - : 0; - return { - ...space, - currentEpisodeCount: currentCount, - }; - }), - ); - - if (mode === "new_space" && newSpaceId) { - // Focus on the new space for assignment - const newSpace = enhancedSpaces.find((s) => s.id === newSpaceId); - if (!newSpace) { - throw new Error(`New space ${newSpaceId} not found`); - } - - return [ - { - role: "system", - content: `You are analyzing episodes for assignment to a newly created space based on the space's intent and purpose. - -CORE PRINCIPLE: Match episodes based on WHAT THE EPISODE IS FUNDAMENTALLY ABOUT (its primary subject), not just keyword overlap. - -STEP-BY-STEP FILTERING PROCESS: - -Step 1: IDENTIFY PRIMARY SUBJECT -Ask: "Who or what is this episode fundamentally about?" -- Is it about a specific person? (by name, or "I"/"my" = speaker) -- Is it about a system, tool, or organization? -- Is it about a project, event, or activity? -- Is it about a concept, topic, or idea? - -Step 2: HANDLE IMPLICIT SUBJECTS -- "I prefer..." or "My..." β†’ Subject is the SPEAKER (check episode source/metadata for identity) -- "User discussed..." or "Person X said..." β†’ Subject is that specific person -- "We decided..." β†’ Subject is the group/team/project being discussed -- If unclear, identify from context clues in the episode content - -Step 3: CHECK SUBJECT ALIGNMENT -Does the PRIMARY SUBJECT match what the space is about? -- Match the subject identity (right person/thing/concept?) -- Match the subject relationship (is episode ABOUT the subject or just MENTIONING it?) -- Match the intent purpose (does episode serve the space's purpose?) -- Check scope constraints: If space description includes scope requirements (e.g., "cross-context", "not app-specific", "broadly useful", "stable for 3+ months"), verify episode meets those constraints - -Step 4: DISTINGUISH SUBJECT vs META -Ask: "Is this episode ABOUT the subject itself, or ABOUT discussing/analyzing the subject?" -- ABOUT subject: Episode contains actual content related to subject -- META-discussion: Episode discusses how to handle/analyze/organize the subject -- Only assign if episode is ABOUT the subject, not meta-discussion - -Step 5: VERIFY CONFIDENCE -Only assign if confidence >= 0.75 based on: -- Subject identity clarity (is subject clearly identified?) -- Subject alignment strength (how well does it match space intent?) -- Content relevance (does episode content serve space purpose?) - -CRITICAL RULE: PRIMARY SUBJECT MATCHING -The episode's PRIMARY SUBJECT must match the space's target subject. -- If space is about Person A, episodes about Person B should NOT match (even if same topic) -- If space is about a specific concept, meta-discussions about that concept should NOT match -- If space is about actual behaviors/facts, process discussions about organizing those facts should NOT match - -EXAMPLES OF CORRECT FILTERING: - -Example 1 - Person Identity: -Space: "Alex's work preferences" -Episode A: "I prefer morning meetings and async updates" (speaker: Alex) β†’ ASSIGN βœ… (primary subject: Alex's preferences) -Episode B: "Jordan prefers afternoon meetings" (speaker: System) β†’ DO NOT ASSIGN ❌ (primary subject: Jordan, not Alex) - -Example 2 - Meta vs Actual: -Space: "Recipe collection" -Episode A: "My lasagna recipe: 3 layers pasta, bΓ©chamel, meat sauce..." β†’ ASSIGN βœ… (primary subject: actual recipe) -Episode B: "We should organize recipes by cuisine type" β†’ DO NOT ASSIGN ❌ (primary subject: organizing system, not recipe) - -Example 3 - Keyword Overlap Without Subject Match: -Space: "Home renovation project" -Episode A: "Installed new kitchen cabinets, chose oak wood" β†’ ASSIGN βœ… (primary subject: home renovation) -Episode B: "Friend asked advice about their kitchen renovation" β†’ DO NOT ASSIGN ❌ (primary subject: friend's project, not this home) - -Example 4 - Scope Constraints: -Space: "Personal identity and preferences (broadly useful across contexts, not app-specific)" -Episode A: "I prefer async communication and morning work hours" β†’ ASSIGN βœ… (cross-context preference, broadly applicable) -Episode B: "Demonstrated knowledge of ProjectX technical stack" β†’ DO NOT ASSIGN ❌ (work/project knowledge, not personal identity) - -RESPONSE FORMAT: -Provide your response inside tags with a valid JSON array: - - -[ - { - "episodeId": "episode-uuid", - "addSpaceId": ["${newSpaceId}"], - "confidence": 0.75, - "reasoning": "Brief explanation of intent match" - } -] - - -IMPORTANT: If an episode doesn't align with the space's intent, use empty addSpaceId array: [] -Example: {"episodeId": "ep-123", "addSpaceId": [], "confidence": 0.0, "reasoning": "No intent alignment"}`, - }, - { - role: "user", - content: `NEW SPACE TO POPULATE: -Name: ${newSpace.name} -Intent/Purpose: ${newSpace.description || "No description"} -Current Episodes: ${newSpace.currentEpisodeCount} - -EPISODES TO EVALUATE: -${episodesDescription} - -ASSIGNMENT TASK: -For each episode above, follow the step-by-step process to determine if it should be assigned to this space. - -Remember: -1. Identify the PRIMARY SUBJECT of each episode (who/what is it about?) -2. Check if that PRIMARY SUBJECT matches what this space is about -3. If the episode is ABOUT something else (even if it mentions related keywords), do NOT assign -4. If the episode is a META-discussion about the space's topic (not actual content), do NOT assign -5. Only assign if the episode's primary subject aligns with the space's intent AND confidence >= 0.75 - -Provide your analysis and assignments using the specified JSON format.`, - }, - ]; - } else { - // Episode mode - consider all spaces - const spacesDescription = enhancedSpaces - .map((space) => { - const spaceInfo = [ - `- ${space.name} (${space.id})`, - ` Intent/Purpose: ${space.description || "No description"}`, - ` Current Episodes: ${space.currentEpisodeCount}`, - ]; - - if (space.summary) { - spaceInfo.push(` Summary: ${space.summary}`); - } - - return spaceInfo.join("\n"); - }) - .join("\n\n"); - - return [ - { - role: "system", - content: `You are an expert at organizing episodes into semantic spaces based on the space's intent and purpose. - -CORE PRINCIPLE: Match episodes based on WHAT THE EPISODE IS FUNDAMENTALLY ABOUT (its primary subject), not just keyword overlap. - -STEP-BY-STEP FILTERING PROCESS: - -Step 1: IDENTIFY PRIMARY SUBJECT -Ask: "Who or what is this episode fundamentally about?" -- Is it about a specific person? (by name, or "I"/"my" = speaker) -- Is it about a system, tool, or organization? -- Is it about a project, event, or activity? -- Is it about a concept, topic, or idea? - -Step 2: HANDLE IMPLICIT SUBJECTS -- "I prefer..." or "My..." β†’ Subject is the SPEAKER (check episode source/metadata for identity) -- "User discussed..." or "Person X said..." β†’ Subject is that specific person -- "We decided..." β†’ Subject is the group/team/project being discussed -- If unclear, identify from context clues in the episode content - -Step 3: CHECK SUBJECT ALIGNMENT WITH EACH SPACE -For each available space, does the episode's PRIMARY SUBJECT match what that space is about? -- Match the subject identity (right person/thing/concept?) -- Match the subject relationship (is episode ABOUT the subject or just MENTIONING it?) -- Match the intent purpose (does episode serve the space's purpose?) -- An episode can match multiple spaces if its primary subject serves multiple intents - -Step 4: DISTINGUISH SUBJECT vs META -Ask: "Is this episode ABOUT the subject itself, or ABOUT discussing/analyzing the subject?" -- ABOUT subject: Episode contains actual content related to subject -- META-discussion: Episode discusses how to handle/analyze/organize the subject -- Only assign if episode is ABOUT the subject, not meta-discussion - -Step 5: VERIFY CONFIDENCE -Only assign to a space if confidence >= 0.75 based on: -- Subject identity clarity (is subject clearly identified?) -- Subject alignment strength (how well does it match space intent?) -- Content relevance (does episode content serve space purpose?) - -Step 6: MULTI-SPACE ASSIGNMENT -- An episode can belong to multiple spaces if its primary subject serves multiple intents -- Each space assignment should meet the >= 0.75 confidence threshold independently -- If no spaces match, use empty addSpaceId: [] - -CRITICAL RULE: PRIMARY SUBJECT MATCHING -The episode's PRIMARY SUBJECT must match the space's target subject. -- If space is about Person A, episodes about Person B should NOT match (even if same topic) -- If space is about a specific concept, meta-discussions about that concept should NOT match -- If space is about actual behaviors/facts, process discussions about organizing those facts should NOT match - -EXAMPLES OF CORRECT FILTERING: - -Example 1 - Person Identity: -Space: "Alex's work preferences" -Episode A: "I prefer morning meetings and async updates" (speaker: Alex) β†’ ASSIGN βœ… (primary subject: Alex's preferences) -Episode B: "Jordan prefers afternoon meetings" (speaker: System) β†’ DO NOT ASSIGN ❌ (primary subject: Jordan, not Alex) - -Example 2 - Meta vs Actual: -Space: "Recipe collection" -Episode A: "My lasagna recipe: 3 layers pasta, bΓ©chamel, meat sauce..." β†’ ASSIGN βœ… (primary subject: actual recipe) -Episode B: "We should organize recipes by cuisine type" β†’ DO NOT ASSIGN ❌ (primary subject: organizing system, not recipe) - -Example 3 - Keyword Overlap Without Subject Match: -Space: "Home renovation project" -Episode A: "Installed new kitchen cabinets, chose oak wood" β†’ ASSIGN βœ… (primary subject: home renovation) -Episode B: "Friend asked advice about their kitchen renovation" β†’ DO NOT ASSIGN ❌ (primary subject: friend's project, not this home) - -Example 4 - Scope Constraints: -Space: "Personal identity and preferences (broadly useful across contexts, not app-specific)" -Episode A: "I prefer async communication and morning work hours" β†’ ASSIGN βœ… (cross-context preference, broadly applicable) -Episode B: "I format task titles as {verb}: {title} in TaskApp" β†’ DO NOT ASSIGN ❌ (app-specific behavior, fails "not app-specific" constraint) -Episode C: "Demonstrated knowledge of ProjectX technical stack" β†’ DO NOT ASSIGN ❌ (work/project knowledge, not personal identity) - -RESPONSE FORMAT: -Provide your response inside tags with a valid JSON array: - - -[ - { - "episodeId": "episode-uuid", - "addSpaceId": ["space-uuid1", "space-uuid2"], - "confidence": 0.75, - "reasoning": "Brief explanation of intent match" - } -] - - -IMPORTANT: If no spaces' intents align with an episode, use empty addSpaceId array: [] -Example: {"episodeId": "ep-123", "addSpaceId": [], "confidence": 0.0, "reasoning": "No matching space intent"}`, - }, - { - role: "user", - content: `AVAILABLE SPACES (with their intents/purposes): -${spacesDescription} - -EPISODES TO ORGANIZE: -${episodesDescription} - -ASSIGNMENT TASK: -For each episode above, follow the step-by-step process to determine which space(s) it should be assigned to. - -Remember: -1. Identify the PRIMARY SUBJECT of each episode (who/what is it about?) -2. Check if that PRIMARY SUBJECT matches what each space is about -3. If the episode is ABOUT something else (even if it mentions related keywords), do NOT assign to that space -4. If the episode is a META-discussion about a space's topic (not actual content), do NOT assign to that space -5. An episode can be assigned to multiple spaces if its primary subject serves multiple intents -6. Only assign if the episode's primary subject aligns with the space's intent AND confidence >= 0.75 for that space - -Provide your analysis and assignments using the specified JSON format.`, - }, - ]; - } -} - -function parseLLMResponseWithTags( - response: string, - episodes: EpisodeData[], - spaces: Space[], -): AssignmentResult[] { - try { - // Extract content from tags - const outputMatch = response.match(/([\s\S]*?)<\/output>/); - if (!outputMatch) { - logger.warn( - "No tags found in LLM response, falling back to full response parsing", - ); - return parseLLMResponse(response, episodes, spaces); - } - - const jsonContent = outputMatch[1].trim(); - const parsed = JSON.parse(jsonContent); - - if (!Array.isArray(parsed)) { - logger.warn( - "Invalid LLM response format - expected array in tags", - ); - return []; - } - - const validSpaceIds = new Set(spaces.map((s) => s.id)); - const validEpisodeIds = new Set(episodes.map((e) => e.uuid)); - - return parsed - .filter((assignment: any) => { - // Validate assignment structure - if ( - !assignment.episodeId || - !validEpisodeIds.has(assignment.episodeId) - ) { - return false; - } - - // Validate spaceIds array - if (!assignment.addSpaceId || !Array.isArray(assignment.addSpaceId)) { - assignment.addSpaceId = []; - } - - // Filter out invalid space IDs - assignment.addSpaceId = assignment.addSpaceId.filter( - (spaceId: string) => validSpaceIds.has(spaceId), - ); - - return true; - }) - .map((assignment: any) => ({ - episodeId: assignment.episodeId, - spaceIds: assignment.addSpaceId, - confidence: assignment.confidence || 0.75, - reasoning: assignment.reasoning, - })); - } catch (error) { - logger.error( - "Error parsing LLM response with tags:", - error as Record, - ); - logger.debug("Raw LLM response:", { response } as Record); - // Fallback to regular parsing - return parseLLMResponse(response, episodes, spaces); - } -} - -function parseLLMResponse( - response: string, - episodes: EpisodeData[], - spaces: Space[], -): AssignmentResult[] { - try { - // Clean the response - remove any markdown formatting - const cleanedResponse = response - .replace(/```json\n?/g, "") - .replace(/```\n?/g, "") - .trim(); - - const parsed = JSON.parse(cleanedResponse); - - if (!parsed.assignments || !Array.isArray(parsed.assignments)) { - logger.warn("Invalid LLM response format - no assignments array"); - return []; - } - - const validSpaceIds = new Set(spaces.map((s) => s.id)); - const validEpisodeIds = new Set(episodes.map((e) => e.uuid)); - - return parsed.assignments - .filter((assignment: any) => { - // Validate assignment structure - if ( - !assignment.episodeId || - !validEpisodeIds.has(assignment.episodeId) - ) { - return false; - } - - if (!assignment.spaceIds || !Array.isArray(assignment.spaceIds)) { - return false; - } - - // Filter out invalid space IDs - assignment.spaceIds = assignment.spaceIds.filter((spaceId: string) => - validSpaceIds.has(spaceId), - ); - - return true; - }) - .map((assignment: any) => ({ - episodeId: assignment.episodeId, - spaceIds: assignment.spaceIds, - confidence: assignment.confidence || 0.75, - reasoning: assignment.reasoning, - })); - } catch (error) { - logger.error( - "Error parsing LLM response:", - error as Record, - ); - logger.debug("Raw LLM response:", { response } as Record); - return []; - } -} - // Helper function to trigger the task export async function triggerSpaceAssignment(payload: SpaceAssignmentPayload) { return await spaceAssignmentTask.trigger(payload, { diff --git a/apps/webapp/app/trigger/spaces/space-pattern.ts b/apps/webapp/app/trigger/spaces/space-pattern.ts deleted file mode 100644 index 89a6263..0000000 --- a/apps/webapp/app/trigger/spaces/space-pattern.ts +++ /dev/null @@ -1,557 +0,0 @@ -import { task } from "@trigger.dev/sdk/v3"; -import { logger } from "~/services/logger.service"; -import { makeModelCall } from "~/lib/model.server"; -import { runQuery } from "~/lib/neo4j.server"; -import type { CoreMessage } from "ai"; -import { z } from "zod"; -import { - EXPLICIT_PATTERN_TYPES, - IMPLICIT_PATTERN_TYPES, - type SpacePattern, - type PatternDetectionResult, -} from "@core/types"; -import { createSpacePattern, getSpace } from "../utils/space-utils"; - -interface SpacePatternPayload { - userId: string; - workspaceId: string; - spaceId: string; - triggerSource?: - | "summary_complete" - | "manual" - | "assignment" - | "scheduled" - | "new_space" - | "growth_threshold" - | "ingestion_complete"; -} - -interface SpaceStatementData { - uuid: string; - fact: string; - subject: string; - predicate: string; - object: string; - createdAt: Date; - validAt: Date; - content?: string; // For implicit pattern analysis -} - -interface SpaceThemeData { - themes: string[]; - summary: string; -} - -// Zod schemas for LLM response validation -const ExplicitPatternSchema = z.object({ - name: z.string(), - type: z.string(), - summary: z.string(), - evidence: z.array(z.string()), - confidence: z.number().min(0).max(1), -}); - -const ImplicitPatternSchema = z.object({ - name: z.string(), - type: z.string(), - summary: z.string(), - evidence: z.array(z.string()), - confidence: z.number().min(0).max(1), -}); - -const PatternAnalysisSchema = z.object({ - explicitPatterns: z.array(ExplicitPatternSchema), - implicitPatterns: z.array(ImplicitPatternSchema), -}); - -const CONFIG = { - minStatementsForPatterns: 5, - maxPatternsPerSpace: 20, - minPatternConfidence: 0.85, -}; - -export const spacePatternTask = task({ - id: "space-pattern", - run: async (payload: SpacePatternPayload) => { - const { userId, workspaceId, spaceId, triggerSource = "manual" } = payload; - - logger.info(`Starting space pattern detection`, { - userId, - workspaceId, - spaceId, - triggerSource, - }); - - try { - // Get space data and check if it has enough content - const space = await getSpaceForPatternAnalysis(spaceId); - if (!space) { - return { - success: false, - spaceId, - error: "Space not found or insufficient data", - }; - } - - // Get statements for pattern analysis - const statements = await getSpaceStatementsForPatterns(spaceId, userId); - - if (statements.length < CONFIG.minStatementsForPatterns) { - logger.info( - `Space ${spaceId} has insufficient statements (${statements.length}) for pattern detection`, - ); - return { - success: true, - spaceId, - triggerSource, - patterns: { - explicitPatterns: [], - implicitPatterns: [], - totalPatternsFound: 0, - }, - }; - } - - // Detect patterns - const patternResult = await detectSpacePatterns(space, statements); - - if (patternResult) { - // Store patterns - await storePatterns( - patternResult.explicitPatterns, - patternResult.implicitPatterns, - spaceId, - ); - - logger.info(`Generated patterns for space ${spaceId}`, { - explicitPatterns: patternResult.explicitPatterns.length, - implicitPatterns: patternResult.implicitPatterns.length, - totalPatterns: patternResult.totalPatternsFound, - triggerSource, - }); - - return { - success: true, - spaceId, - triggerSource, - patterns: { - explicitPatterns: patternResult.explicitPatterns.length, - implicitPatterns: patternResult.implicitPatterns.length, - totalPatternsFound: patternResult.totalPatternsFound, - }, - }; - } else { - logger.warn(`Failed to detect patterns for space ${spaceId}`); - return { - success: false, - spaceId, - triggerSource, - error: "Failed to detect patterns", - }; - } - } catch (error) { - logger.error( - `Error in space pattern detection for space ${spaceId}:`, - error as Record, - ); - throw error; - } - }, -}); - -async function getSpaceForPatternAnalysis( - spaceId: string, -): Promise { - try { - const space = await getSpace(spaceId); - - if (!space || !space.themes || space.themes.length === 0) { - logger.warn( - `Space ${spaceId} not found or has no themes for pattern analysis`, - ); - return null; - } - - return { - themes: space.themes, - summary: space.summary || "", - }; - } catch (error) { - logger.error( - `Error getting space for pattern analysis:`, - error as Record, - ); - return null; - } -} - -async function getSpaceStatementsForPatterns( - spaceId: string, - userId: string, -): Promise { - const query = ` - MATCH (s:Statement) - WHERE s.userId = $userId - AND s.spaceIds IS NOT NULL - AND $spaceId IN s.spaceIds - AND s.invalidAt IS NULL - MATCH (s)-[:HAS_SUBJECT]->(subj:Entity) - MATCH (s)-[:HAS_PREDICATE]->(pred:Entity) - MATCH (s)-[:HAS_OBJECT]->(obj:Entity) - RETURN s, subj.name as subject, pred.name as predicate, obj.name as object - ORDER BY s.createdAt DESC - `; - - const result = await runQuery(query, { - spaceId, - userId, - }); - - return result.map((record) => { - const statement = record.get("s").properties; - return { - uuid: statement.uuid, - fact: statement.fact, - subject: record.get("subject"), - predicate: record.get("predicate"), - object: record.get("object"), - createdAt: new Date(statement.createdAt), - validAt: new Date(statement.validAt), - content: statement.fact, // Use fact as content for implicit analysis - }; - }); -} - -async function detectSpacePatterns( - space: SpaceThemeData, - statements: SpaceStatementData[], -): Promise { - try { - // Extract explicit patterns from themes - const explicitPatterns = await extractExplicitPatterns( - space.themes, - space.summary, - statements, - ); - - // Extract implicit patterns from statement analysis - const implicitPatterns = await extractImplicitPatterns(statements); - - return { - explicitPatterns, - implicitPatterns, - totalPatternsFound: explicitPatterns.length + implicitPatterns.length, - processingStats: { - statementsAnalyzed: statements.length, - themesProcessed: space.themes.length, - implicitPatternsExtracted: implicitPatterns.length, - }, - }; - } catch (error) { - logger.error( - "Error detecting space patterns:", - error as Record, - ); - return null; - } -} - -async function extractExplicitPatterns( - themes: string[], - summary: string, - statements: SpaceStatementData[], -): Promise[]> { - if (themes.length === 0) return []; - - const prompt = createExplicitPatternPrompt(themes, summary, statements); - - // Pattern extraction requires HIGH complexity (insight synthesis, pattern recognition) - let responseText = ""; - await makeModelCall(false, prompt, (text: string) => { - responseText = text; - }, undefined, 'high'); - - const patterns = parseExplicitPatternResponse(responseText); - - return patterns.map((pattern) => ({ - name: pattern.name || `${pattern.type} pattern`, - source: "explicit" as const, - type: pattern.type, - summary: pattern.summary, - evidence: pattern.evidence, - confidence: pattern.confidence, - userConfirmed: "pending" as const, - })); -} - -async function extractImplicitPatterns( - statements: SpaceStatementData[], -): Promise[]> { - if (statements.length < CONFIG.minStatementsForPatterns) return []; - - const prompt = createImplicitPatternPrompt(statements); - - // Implicit pattern discovery requires HIGH complexity (pattern recognition from statements) - let responseText = ""; - await makeModelCall(false, prompt, (text: string) => { - responseText = text; - }, undefined, 'high'); - - const patterns = parseImplicitPatternResponse(responseText); - - return patterns.map((pattern) => ({ - name: pattern.name || `${pattern.type} pattern`, - source: "implicit" as const, - type: pattern.type, - summary: pattern.summary, - evidence: pattern.evidence, - confidence: pattern.confidence, - userConfirmed: "pending" as const, - })); -} - -function createExplicitPatternPrompt( - themes: string[], - summary: string, - statements: SpaceStatementData[], -): CoreMessage[] { - const statementsText = statements - .map((stmt) => `[${stmt.uuid}] ${stmt.fact}`) - .join("\n"); - - const explicitTypes = Object.values(EXPLICIT_PATTERN_TYPES).join('", "'); - - return [ - { - role: "system", - content: `You are an expert at extracting structured patterns from themes and supporting evidence. - -Your task is to convert high-level themes into explicit patterns with supporting statement evidence. - -INSTRUCTIONS: -1. For each theme, create a pattern that explains what it reveals about the user -2. Give each pattern a short, descriptive name (2-4 words) -3. Find supporting statement IDs that provide evidence for each pattern -4. Assess confidence based on evidence strength and theme clarity -5. Use appropriate pattern types from these guidelines: "${explicitTypes}" - - "theme": High-level thematic content areas - - "topic": Specific subject matter or topics of interest - - "domain": Knowledge or work domains the user operates in - - "interest_area": Areas of personal interest or hobby -6. You may suggest new pattern types if none of the guidelines fit well - -RESPONSE FORMAT: -Provide your response inside tags with valid JSON. - - -{ - "explicitPatterns": [ - { - "name": "Short descriptive name for the pattern", - "type": "theme", - "summary": "Description of what this pattern reveals about the user", - "evidence": ["statement_id_1", "statement_id_2"], - "confidence": 0.85 - } - ] -} -`, - }, - { - role: "user", - content: `THEMES TO ANALYZE: -${themes.map((theme, i) => `${i + 1}. ${theme}`).join("\n")} - -SPACE SUMMARY: -${summary} - -SUPPORTING STATEMENTS: -${statementsText} - -Please extract explicit patterns from these themes and map them to supporting statement evidence.`, - }, - ]; -} - -function createImplicitPatternPrompt( - statements: SpaceStatementData[], -): CoreMessage[] { - const statementsText = statements - .map( - (stmt) => - `[${stmt.uuid}] ${stmt.fact} (${stmt.subject} β†’ ${stmt.predicate} β†’ ${stmt.object})`, - ) - .join("\n"); - - const implicitTypes = Object.values(IMPLICIT_PATTERN_TYPES).join('", "'); - - return [ - { - role: "system", - content: `You are an expert at discovering implicit behavioral patterns from statement analysis. - -Your task is to identify hidden patterns in user behavior, preferences, and habits from statement content. - -INSTRUCTIONS: -1. Analyze statement content for behavioral patterns, not explicit topics -2. Give each pattern a short, descriptive name (2-4 words) -3. Look for recurring behaviors, preferences, and working styles -4. Identify how the user approaches tasks, makes decisions, and interacts -5. Use appropriate pattern types from these guidelines: "${implicitTypes}" - - "preference": Personal preferences and choices - - "habit": Recurring behaviors and routines - - "workflow": Work and process patterns - - "communication_style": How user communicates and expresses ideas - - "decision_pattern": Decision-making approaches and criteria - - "temporal_pattern": Time-based behavioral patterns - - "behavioral_pattern": General behavioral tendencies - - "learning_style": How user learns and processes information - - "collaboration_style": How user works with others -6. You may suggest new pattern types if none of the guidelines fit well -7. Focus on what the statements reveal about how the user thinks, works, or behaves - -RESPONSE FORMAT: -Provide your response inside tags with valid JSON. - - -{ - "implicitPatterns": [ - { - "name": "Short descriptive name for the pattern", - "type": "preference", - "summary": "Description of what this behavioral pattern reveals", - "evidence": ["statement_id_1", "statement_id_2"], - "confidence": 0.75 - } - ] -} -`, - }, - { - role: "user", - content: `STATEMENTS TO ANALYZE FOR IMPLICIT PATTERNS: -${statementsText} - -Please identify implicit behavioral patterns, preferences, and habits from these statements.`, - }, - ]; -} - -function parseExplicitPatternResponse(response: string): Array<{ - name: string; - type: string; - summary: string; - evidence: string[]; - confidence: number; -}> { - try { - const outputMatch = response.match(/([\s\S]*?)<\/output>/); - if (!outputMatch) { - logger.warn("No tags found in explicit pattern response"); - return []; - } - - const parsed = JSON.parse(outputMatch[1].trim()); - const validationResult = z - .object({ - explicitPatterns: z.array(ExplicitPatternSchema), - }) - .safeParse(parsed); - - if (!validationResult.success) { - logger.warn("Invalid explicit pattern response format:", { - error: validationResult.error, - }); - return []; - } - - return validationResult.data.explicitPatterns.filter( - (p) => - p.confidence >= CONFIG.minPatternConfidence && p.evidence.length >= 3, // Ensure at least 3 evidence statements - ); - } catch (error) { - logger.error( - "Error parsing explicit pattern response:", - error as Record, - ); - return []; - } -} - -function parseImplicitPatternResponse(response: string): Array<{ - name: string; - type: string; - summary: string; - evidence: string[]; - confidence: number; -}> { - try { - const outputMatch = response.match(/([\s\S]*?)<\/output>/); - if (!outputMatch) { - logger.warn("No tags found in implicit pattern response"); - return []; - } - - const parsed = JSON.parse(outputMatch[1].trim()); - const validationResult = z - .object({ - implicitPatterns: z.array(ImplicitPatternSchema), - }) - .safeParse(parsed); - - if (!validationResult.success) { - logger.warn("Invalid implicit pattern response format:", { - error: validationResult.error, - }); - return []; - } - - return validationResult.data.implicitPatterns.filter( - (p) => - p.confidence >= CONFIG.minPatternConfidence && p.evidence.length >= 3, // Ensure at least 3 evidence statements - ); - } catch (error) { - logger.error( - "Error parsing implicit pattern response:", - error as Record, - ); - return []; - } -} - -async function storePatterns( - explicitPatterns: Omit< - SpacePattern, - "id" | "createdAt" | "updatedAt" | "spaceId" - >[], - implicitPatterns: Omit< - SpacePattern, - "id" | "createdAt" | "updatedAt" | "spaceId" - >[], - spaceId: string, -): Promise { - try { - const allPatterns = [...explicitPatterns, ...implicitPatterns]; - - if (allPatterns.length === 0) return; - - // Store in PostgreSQL - await createSpacePattern(spaceId, allPatterns); - - logger.info(`Stored ${allPatterns.length} patterns`, { - explicit: explicitPatterns.length, - implicit: implicitPatterns.length, - }); - } catch (error) { - logger.error("Error storing patterns:", error as Record); - throw error; - } -} - -// Helper function to trigger the task -export async function triggerSpacePattern(payload: SpacePatternPayload) { - return await spacePatternTask.trigger(payload, { - concurrencyKey: `space-pattern-${payload.spaceId}`, // Prevent parallel runs for the same space - tags: [payload.userId, payload.spaceId, payload.triggerSource || "manual"], - }); -} diff --git a/apps/webapp/app/trigger/spaces/space-summary.ts b/apps/webapp/app/trigger/spaces/space-summary.ts index ceafbeb..a21547a 100644 --- a/apps/webapp/app/trigger/spaces/space-summary.ts +++ b/apps/webapp/app/trigger/spaces/space-summary.ts @@ -1,62 +1,11 @@ import { queue, task } from "@trigger.dev/sdk/v3"; import { logger } from "~/services/logger.service"; -import { SpaceService } from "~/services/space.server"; -import { makeModelCall } from "~/lib/model.server"; -import { runQuery } from "~/lib/neo4j.server"; -import { updateSpaceStatus, SPACE_STATUS } from "../utils/space-status"; -import type { CoreMessage } from "ai"; -import { z } from "zod"; -import { triggerSpacePattern } from "./space-pattern"; -import { getSpace, updateSpace } from "../utils/space-utils"; +import { + processSpaceSummary, + type SpaceSummaryPayload, +} from "~/jobs/spaces/space-summary.logic"; -import { EpisodeType } from "@core/types"; -import { getSpaceEpisodeCount } from "~/services/graphModels/space"; -import { addToQueue } from "~/lib/ingest.server"; - -interface SpaceSummaryPayload { - userId: string; - workspaceId: string; - spaceId: string; // Single space only - triggerSource?: "assignment" | "manual" | "scheduled"; -} - -interface SpaceEpisodeData { - uuid: string; - content: string; - originalContent: string; - source: string; - createdAt: Date; - validAt: Date; - metadata: any; - sessionId: string | null; -} - -interface SpaceSummaryData { - spaceId: string; - spaceName: string; - spaceDescription?: string; - contextCount: number; - summary: string; - keyEntities: string[]; - themes: string[]; - confidence: number; - lastUpdated: Date; - isIncremental: boolean; -} - -// Zod schema for LLM response validation -const SummaryResultSchema = z.object({ - summary: z.string(), - keyEntities: z.array(z.string()), - themes: z.array(z.string()), - confidence: z.number().min(0).max(1), -}); - -const CONFIG = { - maxEpisodesForSummary: 20, // Limit episodes for performance - minEpisodesForSummary: 1, // Minimum episodes to generate summary - summaryEpisodeThreshold: 5, // Minimum new episodes required to trigger summary (configurable) -}; +export type { SpaceSummaryPayload }; export const spaceSummaryQueue = queue({ name: "space-summary-queue", @@ -67,735 +16,17 @@ export const spaceSummaryTask = task({ id: "space-summary", queue: spaceSummaryQueue, run: async (payload: SpaceSummaryPayload) => { - const { userId, workspaceId, spaceId, triggerSource = "manual" } = payload; - - logger.info(`Starting space summary generation`, { - userId, - workspaceId, - spaceId, - triggerSource, + logger.info(`[Trigger.dev] Starting space summary task`, { + userId: payload.userId, + spaceId: payload.spaceId, + triggerSource: payload.triggerSource, }); - try { - // Update status to processing - await updateSpaceStatus(spaceId, SPACE_STATUS.PROCESSING, { - userId, - operation: "space-summary", - metadata: { triggerSource, phase: "start_summary" }, - }); - - // Generate summary for the single space - const summaryResult = await generateSpaceSummary( - spaceId, - userId, - triggerSource, - ); - - if (summaryResult) { - // Store the summary - await storeSummary(summaryResult); - - // Update status to ready after successful completion - await updateSpaceStatus(spaceId, SPACE_STATUS.READY, { - userId, - operation: "space-summary", - metadata: { - triggerSource, - phase: "completed_summary", - contextCount: summaryResult.contextCount, - confidence: summaryResult.confidence, - }, - }); - - logger.info(`Generated summary for space ${spaceId}`, { - statementCount: summaryResult.contextCount, - confidence: summaryResult.confidence, - themes: summaryResult.themes.length, - triggerSource, - }); - - return { - success: true, - spaceId, - triggerSource, - summary: { - statementCount: summaryResult.contextCount, - confidence: summaryResult.confidence, - themesCount: summaryResult.themes.length, - }, - }; - } else { - // No summary generated - this could be due to insufficient episodes or no new episodes - // This is not an error state, so update status to ready - await updateSpaceStatus(spaceId, SPACE_STATUS.READY, { - userId, - operation: "space-summary", - metadata: { - triggerSource, - phase: "no_summary_needed", - reason: "Insufficient episodes or no new episodes to summarize", - }, - }); - - logger.info( - `No summary generated for space ${spaceId} - insufficient or no new episodes`, - ); - return { - success: true, - spaceId, - triggerSource, - summary: null, - reason: "No episodes to summarize", - }; - } - } catch (error) { - // Update status to error on exception - try { - await updateSpaceStatus(spaceId, SPACE_STATUS.ERROR, { - userId, - operation: "space-summary", - metadata: { - triggerSource, - phase: "exception", - error: error instanceof Error ? error.message : "Unknown error", - }, - }); - } catch (statusError) { - logger.warn(`Failed to update status to error for space ${spaceId}`, { - statusError, - }); - } - - logger.error( - `Error in space summary generation for space ${spaceId}:`, - error as Record, - ); - throw error; - } + // Use common business logic + return await processSpaceSummary(payload); }, }); -async function generateSpaceSummary( - spaceId: string, - userId: string, - triggerSource?: "assignment" | "manual" | "scheduled", -): Promise { - try { - // 1. Get space details - const spaceService = new SpaceService(); - const space = await spaceService.getSpace(spaceId, userId); - - if (!space) { - logger.warn(`Space ${spaceId} not found for user ${userId}`); - return null; - } - - // 2. Check episode count threshold (skip for manual triggers) - if (triggerSource !== "manual") { - const currentEpisodeCount = await getSpaceEpisodeCount(spaceId, userId); - const lastSummaryEpisodeCount = space.contextCount || 0; - const episodeDifference = currentEpisodeCount - lastSummaryEpisodeCount; - - if ( - episodeDifference < CONFIG.summaryEpisodeThreshold || - lastSummaryEpisodeCount !== 0 - ) { - logger.info( - `Skipping summary generation for space ${spaceId}: only ${episodeDifference} new episodes (threshold: ${CONFIG.summaryEpisodeThreshold})`, - { - currentEpisodeCount, - lastSummaryEpisodeCount, - episodeDifference, - threshold: CONFIG.summaryEpisodeThreshold, - }, - ); - return null; - } - - logger.info( - `Proceeding with summary generation for space ${spaceId}: ${episodeDifference} new episodes (threshold: ${CONFIG.summaryEpisodeThreshold})`, - { - currentEpisodeCount, - lastSummaryEpisodeCount, - episodeDifference, - }, - ); - } - - // 2. Check for existing summary - const existingSummary = await getExistingSummary(spaceId); - const isIncremental = existingSummary !== null; - - // 3. Get episodes (all or new ones based on existing summary) - const episodes = await getSpaceEpisodes( - spaceId, - userId, - isIncremental ? existingSummary?.lastUpdated : undefined, - ); - - // Handle case where no new episodes exist for incremental update - if (isIncremental && episodes.length === 0) { - logger.info( - `No new episodes found for space ${spaceId}, skipping summary update`, - ); - return null; - } - - // Check minimum episode requirement for new summaries only - if (!isIncremental && episodes.length < CONFIG.minEpisodesForSummary) { - logger.info( - `Space ${spaceId} has insufficient episodes (${episodes.length}) for new summary`, - ); - return null; - } - - // 4. Process episodes using unified approach - let summaryResult; - - if (episodes.length > CONFIG.maxEpisodesForSummary) { - logger.info( - `Large space detected (${episodes.length} episodes). Processing in batches.`, - ); - - // Process in batches, each building on previous result - const batches: SpaceEpisodeData[][] = []; - for (let i = 0; i < episodes.length; i += CONFIG.maxEpisodesForSummary) { - batches.push(episodes.slice(i, i + CONFIG.maxEpisodesForSummary)); - } - - let currentSummary = existingSummary?.summary || null; - let currentThemes = existingSummary?.themes || []; - let cumulativeConfidence = 0; - - for (const [batchIndex, batch] of batches.entries()) { - logger.info( - `Processing batch ${batchIndex + 1}/${batches.length} with ${batch.length} episodes`, - ); - - const batchResult = await generateUnifiedSummary( - space.name, - space.description as string, - batch, - currentSummary, - currentThemes, - ); - - if (batchResult) { - currentSummary = batchResult.summary; - currentThemes = batchResult.themes; - cumulativeConfidence += batchResult.confidence; - } else { - logger.warn(`Failed to process batch ${batchIndex + 1}`); - } - - // Small delay between batches - if (batchIndex < batches.length - 1) { - await new Promise((resolve) => setTimeout(resolve, 500)); - } - } - - summaryResult = currentSummary - ? { - summary: currentSummary, - themes: currentThemes, - confidence: Math.min(cumulativeConfidence / batches.length, 1.0), - } - : null; - } else { - logger.info( - `Processing ${episodes.length} episodes with unified approach`, - ); - - // Use unified approach for smaller spaces - summaryResult = await generateUnifiedSummary( - space.name, - space.description as string, - episodes, - existingSummary?.summary || null, - existingSummary?.themes || [], - ); - } - - if (!summaryResult) { - logger.warn(`Failed to generate LLM summary for space ${spaceId}`); - return null; - } - - // Get the actual current counts from Neo4j - const currentEpisodeCount = await getSpaceEpisodeCount(spaceId, userId); - - return { - spaceId: space.uuid, - spaceName: space.name, - spaceDescription: space.description as string, - contextCount: currentEpisodeCount, - summary: summaryResult.summary, - keyEntities: summaryResult.keyEntities || [], - themes: summaryResult.themes, - confidence: summaryResult.confidence, - lastUpdated: new Date(), - isIncremental, - }; - } catch (error) { - logger.error( - `Error generating summary for space ${spaceId}:`, - error as Record, - ); - return null; - } -} - -async function generateUnifiedSummary( - spaceName: string, - spaceDescription: string | undefined, - episodes: SpaceEpisodeData[], - previousSummary: string | null = null, - previousThemes: string[] = [], -): Promise<{ - summary: string; - themes: string[]; - confidence: number; - keyEntities?: string[]; -} | null> { - try { - const prompt = createUnifiedSummaryPrompt( - spaceName, - spaceDescription, - episodes, - previousSummary, - previousThemes, - ); - - // Space summary generation requires HIGH complexity (creative synthesis, narrative generation) - let responseText = ""; - await makeModelCall( - false, - prompt, - (text: string) => { - responseText = text; - }, - undefined, - "high", - ); - - return parseSummaryResponse(responseText); - } catch (error) { - logger.error( - "Error generating unified summary:", - error as Record, - ); - return null; - } -} - -function createUnifiedSummaryPrompt( - spaceName: string, - spaceDescription: string | undefined, - episodes: SpaceEpisodeData[], - previousSummary: string | null, - previousThemes: string[], -): CoreMessage[] { - // If there are no episodes and no previous summary, we cannot generate a meaningful summary - if (episodes.length === 0 && previousSummary === null) { - throw new Error( - "Cannot generate summary without episodes or existing summary", - ); - } - - const episodesText = episodes - .map( - (episode) => - `- ${episode.content} (Source: ${episode.source}, Session: ${episode.sessionId || "N/A"})`, - ) - .join("\n"); - - // Extract key entities and themes from episode content - const contentWords = episodes - .map((ep) => ep.content.toLowerCase()) - .join(" ") - .split(/\s+/) - .filter((word) => word.length > 3); - - const wordFrequency = new Map(); - contentWords.forEach((word) => { - wordFrequency.set(word, (wordFrequency.get(word) || 0) + 1); - }); - - const topEntities = Array.from(wordFrequency.entries()) - .sort(([, a], [, b]) => b - a) - .slice(0, 10) - .map(([word]) => word); - - const isUpdate = previousSummary !== null; - - return [ - { - role: "system", - content: `You are an expert at analyzing and summarizing episodes within semantic spaces based on the space's intent and purpose. Your task is to ${isUpdate ? "update an existing summary by integrating new episodes" : "create a comprehensive summary of episodes"}. - -CRITICAL RULES: -1. Base your summary ONLY on insights derived from the actual content/episodes provided -2. Use the space's INTENT/PURPOSE (from description) to guide what to summarize and how to organize it -3. Write in a factual, neutral tone - avoid promotional language ("pivotal", "invaluable", "cutting-edge") -4. Be specific and concrete - reference actual content, patterns, and insights found in the episodes -5. If episodes are insufficient for meaningful insights, state that more data is needed - -INTENT-DRIVEN SUMMARIZATION: -Your summary should SERVE the space's intended purpose. Examples: -- "Learning React" β†’ Summarize React concepts, patterns, techniques learned -- "Project X Updates" β†’ Summarize progress, decisions, blockers, next steps -- "Health Tracking" β†’ Summarize metrics, trends, observations, insights -- "Guidelines for React" β†’ Extract actionable patterns, best practices, rules -- "Evolution of design thinking" β†’ Track how thinking changed over time, decision points -The intent defines WHY this space exists - organize content to serve that purpose. - -INSTRUCTIONS: -${ - isUpdate - ? `1. Review the existing summary and themes carefully -2. Analyze the new episodes for patterns and insights that align with the space's intent -3. Identify connecting points between existing knowledge and new episodes -4. Update the summary to seamlessly integrate new information while preserving valuable existing insights -5. Evolve themes by adding new ones or refining existing ones based on the space's purpose -6. Organize the summary to serve the space's intended use case` - : `1. Analyze the semantic content and relationships within the episodes -2. Identify topics/sections that align with the space's INTENT and PURPOSE -3. Create a coherent summary that serves the space's intended use case -4. Organize the summary based on the space's purpose (not generic frequency-based themes)` -} -${isUpdate ? "7" : "5"}. Assess your confidence in the ${isUpdate ? "updated" : ""} summary quality (0.0-1.0) - -INTENT-ALIGNED ORGANIZATION: -- Organize sections based on what serves the space's purpose -- Topics don't need minimum episode counts - relevance to intent matters most -- Each section should provide value aligned with the space's intended use -- For "guidelines" spaces: focus on actionable patterns -- For "tracking" spaces: focus on temporal patterns and changes -- For "learning" spaces: focus on concepts and insights gained -- Let the space's intent drive the structure, not rigid rules - -${ - isUpdate - ? `CONNECTION FOCUS: -- Entity relationships that span across batches/time -- Theme evolution and expansion -- Temporal patterns and progressions -- Contradictions or confirmations of existing insights -- New insights that complement existing knowledge` - : "" -} - -RESPONSE FORMAT: -Provide your response inside tags with valid JSON. Include both HTML summary and markdown format. - - -{ - "summary": "${isUpdate ? "Updated HTML summary that integrates new insights with existing knowledge. Write factually about what the statements reveal - mention specific entities, relationships, and patterns found in the data. Avoid marketing language. Use HTML tags for structure." : "Factual HTML summary based on patterns found in the statements. Report what the data actually shows - specific entities, relationships, frequencies, and concrete insights. Avoid promotional language. Use HTML tags like

      , ,

        ,
      • for structure. Keep it concise and evidence-based."}", - "keyEntities": ["entity1", "entity2", "entity3"], - "themes": ["${isUpdate ? 'updated_theme1", "new_theme2", "evolved_theme3' : 'theme1", "theme2", "theme3'}"], - "confidence": 0.85 -} - - -JSON FORMATTING RULES: -- HTML content in summary field is allowed and encouraged -- Escape quotes within strings as \" -- Escape HTML angle brackets if needed: < and > -- Use proper HTML tags for structure:

        , , ,

          ,
        • ,

          , etc. -- HTML content should be well-formed and semantic - -GUIDELINES: -${ - isUpdate - ? `- Preserve valuable insights from existing summary -- Integrate new information by highlighting connections -- Themes should evolve naturally, don't replace wholesale -- The updated summary should read as a coherent whole -- Make the summary user-friendly and explain what value this space provides` - : `- Report only what the episodes actually reveal - be specific and concrete -- Cite actual content and patterns found in the episodes -- Avoid generic descriptions that could apply to any space -- Use neutral, factual language - no "comprehensive", "robust", "cutting-edge" etc. -- Themes must be backed by at least 3 supporting episodes with clear evidence -- Better to have fewer, well-supported themes than many weak ones -- Confidence should reflect actual data quality and coverage, not aspirational goals` -}`, - }, - { - role: "user", - content: `SPACE INFORMATION: -Name: "${spaceName}" -Intent/Purpose: ${spaceDescription || "No specific intent provided - organize naturally based on content"} - -${ - isUpdate - ? `EXISTING SUMMARY: -${previousSummary} - -EXISTING THEMES: -${previousThemes.join(", ")} - -NEW EPISODES TO INTEGRATE (${episodes.length} episodes):` - : `EPISODES IN THIS SPACE (${episodes.length} episodes):` -} -${episodesText} - -${ - episodes.length > 0 - ? `TOP WORDS BY FREQUENCY: -${topEntities.join(", ")}` - : "" -} - -${ - isUpdate - ? "Please identify connections between the existing summary and new episodes, then update the summary to integrate the new insights coherently. Organize the summary to SERVE the space's intent/purpose. Remember: only summarize insights from the actual episode content." - : "Please analyze the episodes and provide a comprehensive summary that SERVES the space's intent/purpose. Organize sections based on what would be most valuable for this space's intended use case. If the intent is unclear, organize naturally based on content patterns. Only summarize insights from actual episode content." -}`, - }, - ]; -} - -async function getExistingSummary(spaceId: string): Promise<{ - summary: string; - themes: string[]; - lastUpdated: Date; - contextCount: number; -} | null> { - try { - const existingSummary = await getSpace(spaceId); - - if (existingSummary?.summary) { - return { - summary: existingSummary.summary, - themes: existingSummary.themes, - lastUpdated: existingSummary.summaryGeneratedAt || new Date(), - contextCount: existingSummary.contextCount || 0, - }; - } - - return null; - } catch (error) { - logger.warn(`Failed to get existing summary for space ${spaceId}:`, { - error, - }); - return null; - } -} - -async function getSpaceEpisodes( - spaceId: string, - userId: string, - sinceDate?: Date, -): Promise { - // Query episodes directly using Space-[:HAS_EPISODE]->Episode relationships - const params: any = { spaceId, userId }; - - let dateCondition = ""; - if (sinceDate) { - dateCondition = "AND e.createdAt > $sinceDate"; - params.sinceDate = sinceDate.toISOString(); - } - - const query = ` - MATCH (space:Space {uuid: $spaceId, userId: $userId})-[:HAS_EPISODE]->(e:Episode {userId: $userId}) - WHERE e IS NOT NULL ${dateCondition} - RETURN DISTINCT e - ORDER BY e.createdAt DESC - `; - - const result = await runQuery(query, params); - - return result.map((record) => { - const episode = record.get("e").properties; - return { - uuid: episode.uuid, - content: episode.content, - originalContent: episode.originalContent, - source: episode.source, - createdAt: new Date(episode.createdAt), - validAt: new Date(episode.validAt), - metadata: JSON.parse(episode.metadata || "{}"), - sessionId: episode.sessionId, - }; - }); -} - -function parseSummaryResponse(response: string): { - summary: string; - themes: string[]; - confidence: number; - keyEntities?: string[]; -} | null { - try { - // Extract content from tags - const outputMatch = response.match(/([\s\S]*?)<\/output>/); - if (!outputMatch) { - logger.warn("No tags found in LLM summary response"); - logger.debug("Full LLM response:", { response }); - return null; - } - - let jsonContent = outputMatch[1].trim(); - - let parsed; - try { - parsed = JSON.parse(jsonContent); - } catch (jsonError) { - logger.warn("JSON parsing failed, attempting cleanup and retry", { - originalError: jsonError, - jsonContent: jsonContent.substring(0, 500) + "...", // Log first 500 chars - }); - - // More aggressive cleanup for malformed JSON - jsonContent = jsonContent - .replace(/([^\\])"/g, '$1\\"') // Escape unescaped quotes - .replace(/^"/g, '\\"') // Escape quotes at start - .replace(/\\\\"/g, '\\"'); // Fix double-escaped quotes - - parsed = JSON.parse(jsonContent); - } - - // Validate the response structure - const validationResult = SummaryResultSchema.safeParse(parsed); - if (!validationResult.success) { - logger.warn("Invalid LLM summary response format:", { - error: validationResult.error, - parsedData: parsed, - }); - return null; - } - - return validationResult.data; - } catch (error) { - logger.error( - "Error parsing LLM summary response:", - error as Record, - ); - logger.debug("Failed response content:", { response }); - return null; - } -} - -async function storeSummary(summaryData: SpaceSummaryData): Promise { - try { - // Store in PostgreSQL for API access and persistence - await updateSpace(summaryData); - - // Also store in Neo4j for graph-based queries - const query = ` - MATCH (space:Space {uuid: $spaceId}) - SET space.summary = $summary, - space.keyEntities = $keyEntities, - space.themes = $themes, - space.summaryConfidence = $confidence, - space.summaryContextCount = $contextCount, - space.summaryLastUpdated = datetime($lastUpdated) - RETURN space - `; - - await runQuery(query, { - spaceId: summaryData.spaceId, - summary: summaryData.summary, - keyEntities: summaryData.keyEntities, - themes: summaryData.themes, - confidence: summaryData.confidence, - contextCount: summaryData.contextCount, - lastUpdated: summaryData.lastUpdated.toISOString(), - }); - - logger.info(`Stored summary for space ${summaryData.spaceId}`, { - themes: summaryData.themes.length, - keyEntities: summaryData.keyEntities.length, - confidence: summaryData.confidence, - }); - } catch (error) { - logger.error( - `Error storing summary for space ${summaryData.spaceId}:`, - error as Record, - ); - throw error; - } -} - -/** - * Process space summary sequentially: ingest document then trigger patterns - */ -async function processSpaceSummarySequentially({ - userId, - workspaceId, - spaceId, - spaceName, - summaryContent, - triggerSource, -}: { - userId: string; - workspaceId: string; - spaceId: string; - spaceName: string; - summaryContent: string; - triggerSource: - | "summary_complete" - | "manual" - | "assignment" - | "scheduled" - | "new_space" - | "growth_threshold" - | "ingestion_complete"; -}): Promise { - // Step 1: Ingest summary as document synchronously - await ingestSpaceSummaryDocument(spaceId, userId, spaceName, summaryContent); - - logger.info( - `Successfully ingested space summary document for space ${spaceId}`, - ); - - // Step 2: Now trigger space patterns (patterns will have access to the ingested summary) - await triggerSpacePattern({ - userId, - workspaceId, - spaceId, - triggerSource, - }); - - logger.info( - `Sequential processing completed for space ${spaceId}: summary ingested β†’ patterns triggered`, - ); -} - -/** - * Ingest space summary as document synchronously - */ -async function ingestSpaceSummaryDocument( - spaceId: string, - userId: string, - spaceName: string, - summaryContent: string, -): Promise { - // Create the ingest body - const ingestBody = { - episodeBody: summaryContent, - referenceTime: new Date().toISOString(), - metadata: { - documentType: "space_summary", - spaceId, - spaceName, - generatedAt: new Date().toISOString(), - }, - source: "space", - spaceId, - sessionId: spaceId, - type: EpisodeType.DOCUMENT, - }; - - // Add to queue - await addToQueue(ingestBody, userId); - - logger.info(`Queued space summary for synchronous ingestion`); - - return; -} - // Helper function to trigger the task export async function triggerSpaceSummary(payload: SpaceSummaryPayload) { return await spaceSummaryTask.trigger(payload, { diff --git a/apps/webapp/app/trigger/utils/space-utils.ts b/apps/webapp/app/trigger/utils/space-utils.ts index 8144c56..fee6c27 100644 --- a/apps/webapp/app/trigger/utils/space-utils.ts +++ b/apps/webapp/app/trigger/utils/space-utils.ts @@ -1,4 +1,3 @@ -import { type SpacePattern } from "@core/types"; import { prisma } from "./prisma"; export const getSpace = async (spaceId: string) => { @@ -11,22 +10,6 @@ export const getSpace = async (spaceId: string) => { return space; }; -export const createSpacePattern = async ( - spaceId: string, - allPatterns: Omit< - SpacePattern, - "id" | "createdAt" | "updatedAt" | "spaceId" - >[], -) => { - return await prisma.spacePattern.createMany({ - data: allPatterns.map((pattern) => ({ - ...pattern, - spaceId, - userConfirmed: pattern.userConfirmed as any, // Temporary cast until Prisma client is regenerated - })), - }); -}; - export const updateSpace = async (summaryData: { spaceId: string; summary: string; @@ -41,7 +24,7 @@ export const updateSpace = async (summaryData: { summary: summaryData.summary, themes: summaryData.themes, contextCount: summaryData.contextCount, - summaryGeneratedAt: new Date().toISOString() + summaryGeneratedAt: new Date().toISOString(), }, }); }; diff --git a/apps/webapp/app/utils/mcp/memory.ts b/apps/webapp/app/utils/mcp/memory.ts index 9bc5792..fa441d7 100644 --- a/apps/webapp/app/utils/mcp/memory.ts +++ b/apps/webapp/app/utils/mcp/memory.ts @@ -1,3 +1,4 @@ +import { randomUUID } from "node:crypto"; import { EpisodeTypeEnum } from "@core/types"; import { addToQueue } from "~/lib/ingest.server"; import { logger } from "~/services/logger.service"; @@ -19,24 +20,24 @@ const SearchParamsSchema = { description: "Search query optimized for knowledge graph retrieval. Choose the right query structure based on your search intent:\n\n" + "1. **Entity-Centric Queries** (Best for graph search):\n" + - " - βœ… GOOD: \"User's preferences for code style and formatting\"\n" + - " - βœ… GOOD: \"Project authentication implementation decisions\"\n" + - " - ❌ BAD: \"user code style\"\n" + + ' - βœ… GOOD: "User\'s preferences for code style and formatting"\n' + + ' - βœ… GOOD: "Project authentication implementation decisions"\n' + + ' - ❌ BAD: "user code style"\n' + " - Format: [Person/Project] + [relationship/attribute] + [context]\n\n" + "2. **Multi-Entity Relationship Queries** (Excellent for episode graph):\n" + - " - βœ… GOOD: \"User and team discussions about API design patterns\"\n" + - " - βœ… GOOD: \"relationship between database schema and performance optimization\"\n" + - " - ❌ BAD: \"user team api design\"\n" + + ' - βœ… GOOD: "User and team discussions about API design patterns"\n' + + ' - βœ… GOOD: "relationship between database schema and performance optimization"\n' + + ' - ❌ BAD: "user team api design"\n' + " - Format: [Entity1] + [relationship type] + [Entity2] + [context]\n\n" + "3. **Semantic Question Queries** (Good for vector search):\n" + - " - βœ… GOOD: \"What causes authentication errors in production? What are the security requirements?\"\n" + - " - βœ… GOOD: \"How does caching improve API response times compared to direct database queries?\"\n" + - " - ❌ BAD: \"auth errors production\"\n" + + ' - βœ… GOOD: "What causes authentication errors in production? What are the security requirements?"\n' + + ' - βœ… GOOD: "How does caching improve API response times compared to direct database queries?"\n' + + ' - ❌ BAD: "auth errors production"\n' + " - Format: Complete natural questions with full context\n\n" + "4. **Concept Exploration Queries** (Good for BFS traversal):\n" + - " - βœ… GOOD: \"concepts and ideas related to database indexing and query optimization\"\n" + - " - βœ… GOOD: \"topics connected to user authentication and session management\"\n" + - " - ❌ BAD: \"database indexing concepts\"\n" + + ' - βœ… GOOD: "concepts and ideas related to database indexing and query optimization"\n' + + ' - βœ… GOOD: "topics connected to user authentication and session management"\n' + + ' - ❌ BAD: "database indexing concepts"\n' + " - Format: [concept] + related/connected + [domain/context]\n\n" + "Avoid keyword soup queries - use complete phrases with proper context for best results.", }, @@ -75,6 +76,11 @@ const IngestSchema = { description: "The conversation text to store. Include both what the user asked and what you answered. Keep it concise but complete.", }, + sessionId: { + type: "string", + description: + "IMPORTANT: Session ID (UUID) is required to track the conversation session. If you don't have a sessionId in your context, you MUST call the get_session_id tool first to obtain one before calling memory_ingest.", + }, spaceIds: { type: "array", items: { @@ -84,14 +90,14 @@ const IngestSchema = { "Optional: Array of space UUIDs (from memory_get_spaces). Add this to organize the memory by project. Example: If discussing 'core' project, include the 'core' space ID. Leave empty to store in general memory.", }, }, - required: ["message"], + required: ["message", "sessionId"], }; export const memoryTools = [ { name: "memory_ingest", description: - "Store conversation in memory for future reference. USE THIS TOOL: At the END of every conversation after fully answering the user. WHAT TO STORE: 1) User's question or request, 2) Your solution or explanation, 3) Important decisions made, 4) Key insights discovered. HOW TO USE: Put the entire conversation summary in the 'message' field. Optionally add spaceIds array to organize by project. Returns: Success confirmation with storage ID.", + "Store conversation in memory for future reference. USE THIS TOOL: At the END of every conversation after fully answering the user. WHAT TO STORE: 1) User's question or request, 2) Your solution or explanation, 3) Important decisions made, 4) Key insights discovered. HOW TO USE: Put the entire conversation summary in the 'message' field. IMPORTANT: You MUST provide a sessionId - if you don't have one in your context, call get_session_id tool first to obtain it. Optionally add spaceIds array to organize by project. Returns: Success confirmation with storage ID.", inputSchema: IngestSchema, }, { @@ -150,6 +156,20 @@ export const memoryTools = [ }, }, }, + { + name: "get_session_id", + description: + "Get a new session ID for the MCP connection. USE THIS TOOL: When you need a session ID and don't have one yet. This generates a unique UUID to identify your MCP session. IMPORTANT: If any other tool requires a sessionId parameter and you don't have one, call this tool first to get a session ID. Returns: A UUID string to use as sessionId.", + inputSchema: { + type: "object", + properties: { + new: { + type: "boolean", + description: "Set to true to get a new sessionId.", + }, + }, + }, + }, { name: "get_integrations", description: @@ -243,6 +263,8 @@ export async function callMemoryTool( return await handleUserProfile(userId); case "memory_get_space": return await handleGetSpace({ ...args, userId }); + case "get_session_id": + return await handleGetSessionId(); case "get_integrations": return await handleGetIntegrations({ ...args, userId }); case "get_integration_actions": @@ -334,6 +356,7 @@ async function handleMemoryIngest(args: any) { source: args.source, type: EpisodeTypeEnum.CONVERSATION, spaceIds, + sessionId: args.sessionId, }, args.userId, ); @@ -462,7 +485,7 @@ async function handleGetSpace(args: any) { const spaceDetails = { id: space.id, name: space.name, - description: space.description, + summary: space.summary, }; return { @@ -489,6 +512,35 @@ async function handleGetSpace(args: any) { } } +// Handler for get_session_id +async function handleGetSessionId() { + try { + const sessionId = randomUUID(); + + return { + content: [ + { + type: "text", + text: JSON.stringify({ sessionId }), + }, + ], + isError: false, + }; + } catch (error) { + logger.error(`MCP get session id error: ${error}`); + + return { + content: [ + { + type: "text", + text: `Error generating session ID: ${error instanceof Error ? error.message : String(error)}`, + }, + ], + isError: true, + }; + } +} + // Handler for get_integrations async function handleGetIntegrations(args: any) { try { diff --git a/apps/webapp/package.json b/apps/webapp/package.json index aae6e45..49179ce 100644 --- a/apps/webapp/package.json +++ b/apps/webapp/package.json @@ -44,6 +44,7 @@ "@radix-ui/react-icons": "^1.3.0", "@radix-ui/react-label": "^2.0.2", "@radix-ui/react-popover": "^1.0.7", + "@radix-ui/react-progress": "^1.1.4", "@radix-ui/react-scroll-area": "^1.0.5", "@radix-ui/react-select": "^2.0.0", "@radix-ui/react-separator": "^1.1.7", @@ -53,7 +54,6 @@ "@radix-ui/react-tabs": "^1.0.4", "@radix-ui/react-toast": "^1.1.5", "@radix-ui/react-tooltip": "^1.2.7", - "@radix-ui/react-progress": "^1.1.4", "@remix-run/express": "2.16.7", "@remix-run/node": "2.1.0", "@remix-run/react": "2.16.7", @@ -80,6 +80,7 @@ "@tiptap/pm": "^2.11.9", "@tiptap/react": "^2.11.9", "@tiptap/starter-kit": "2.11.9", + "@trigger.dev/python": "4.0.4", "@trigger.dev/react-hooks": "4.0.4", "@trigger.dev/sdk": "4.0.4", "ai": "5.0.78", @@ -125,25 +126,25 @@ "react": "^18.2.0", "react-calendar-heatmap": "^1.10.0", "react-dom": "^18.2.0", + "react-hotkeys-hook": "^4.5.0", "react-markdown": "10.1.0", "react-resizable-panels": "^1.0.9", - "react-hotkeys-hook": "^4.5.0", "react-virtualized": "^9.22.6", - "resumable-stream": "2.2.8", "remix-auth": "^4.2.0", "remix-auth-oauth2": "^3.4.1", "remix-themes": "^2.0.4", "remix-typedjson": "0.3.1", "remix-utils": "^7.7.0", + "resumable-stream": "2.2.8", "sigma": "^3.0.2", - "stripe": "19.0.0", "simple-oauth2": "^5.1.0", + "stripe": "19.0.0", "tailwind-merge": "^2.6.0", - "tiptap-markdown": "0.9.0", "tailwind-scrollbar-hide": "^2.0.0", "tailwindcss-animate": "^1.0.7", "tailwindcss-textshadow": "^2.1.3", "tiny-invariant": "^1.3.1", + "tiptap-markdown": "0.9.0", "zod": "3.25.76", "zod-error": "1.5.0", "zod-validation-error": "^1.5.0" diff --git a/apps/webapp/python/README.md b/apps/webapp/python/README.md new file mode 100644 index 0000000..f44116b --- /dev/null +++ b/apps/webapp/python/README.md @@ -0,0 +1,299 @@ +# BERT Topic Modeling CLI for Echo Episodes + +This CLI tool performs topic modeling on Echo episodes using BERTopic. It connects to Neo4j, retrieves episodes with their pre-computed embeddings for a given user, and discovers thematic clusters using HDBSCAN clustering. + +## Features + +- Connects to Neo4j database to fetch episodes +- Uses pre-computed embeddings (no need to regenerate them) +- Performs semantic topic clustering with BERTopic +- Displays topics with: + - Top keywords per topic + - Episode count per topic + - Sample episodes for each topic +- Configurable minimum topic size +- Environment variable support for easy configuration + +## Prerequisites + +- Python 3.8+ +- Access to Neo4j database with episodes stored +- Pre-computed embeddings stored in Neo4j (in `contentEmbedding` field) + +## Installation + +1. Navigate to the bert directory: + +```bash +cd apps/webapp/app/bert +``` + +2. Install dependencies: + +```bash +pip install -r requirements.txt +``` + +## Configuration + +The CLI can read Neo4j connection details from: + +1. **Environment variables** (recommended) - Create a `.env` file or export: + + ```bash + export NEO4J_URI=bolt://localhost:7687 + export NEO4J_USERNAME=neo4j + export NEO4J_PASSWORD=your_password + ``` + +2. **Command-line options** - Pass credentials directly as flags + +3. **From project root** - The tool automatically loads `.env` from the project root + +## Usage + +### Basic Usage + +Using environment variables (most common): + +```bash +python main.py +``` + +### Advanced Options + +```bash +python main.py [OPTIONS] +``` + +**Options:** + +- `--min-topic-size INTEGER`: Minimum number of episodes per topic (default: 10) +- `--nr-topics INTEGER`: Target number of topics for reduction (optional) +- `--propose-spaces`: Generate space proposals using OpenAI (requires OPENAI_API_KEY) +- `--openai-api-key TEXT`: OpenAI API key for space proposals (or use OPENAI_API_KEY env var) +- `--json`: Output only final results in JSON format (suppresses all other output) +- `--neo4j-uri TEXT`: Neo4j connection URI (default: bolt://localhost:7687) +- `--neo4j-username TEXT`: Neo4j username (default: neo4j) +- `--neo4j-password TEXT`: Neo4j password (required) + +### Examples + +1. **Basic usage with environment variables:** + + ```bash + python main.py user-123 + ``` + +2. **Custom minimum topic size:** + + ```bash + python main.py user-123 --min-topic-size 10 + ``` + +3. **Explicit credentials:** + + ```bash + python main.py user-123 \ + --neo4j-uri bolt://neo4j:7687 \ + --neo4j-username neo4j \ + --neo4j-password mypassword + ``` + +4. **Using Docker compose Neo4j:** + ```bash + python main.py user-123 \ + --neo4j-uri bolt://localhost:7687 \ + --neo4j-password 27192e6432564f4788d55c15131bd5ac + ``` + +5. **With space proposals:** + ```bash + python main.py user-123 --propose-spaces + ``` + +6. **JSON output mode (for programmatic use):** + ```bash + python main.py user-123 --json + ``` + +7. **JSON output with space proposals:** + ```bash + python main.py user-123 --propose-spaces --json + ``` + +### Get Help + +```bash +python main.py --help +``` + +## Output Formats + +### Human-Readable Output (Default) + +The CLI outputs: + +``` +================================================================================ +BERT TOPIC MODELING FOR ECHO EPISODES +================================================================================ +User ID: user-123 +Min Topic Size: 20 +================================================================================ + +βœ“ Connected to Neo4j at bolt://localhost:7687 +βœ“ Fetched 150 episodes with embeddings + +πŸ” Running BERTopic analysis (min_topic_size=20)... +βœ“ Topic modeling complete + +================================================================================ +TOPIC MODELING RESULTS +================================================================================ +Total Topics Found: 5 +Total Episodes: 150 +================================================================================ + +──────────────────────────────────────────────────────────────────────────────── +Topic 0: 45 episodes +──────────────────────────────────────────────────────────────────────────────── +Keywords: authentication, login, user, security, session, password, token, oauth, jwt, credentials + +Sample Episodes (showing up to 3): + 1. [uuid-123] + Discussing authentication flow for the new user login system... + + 2. [uuid-456] + Implementing OAuth2 with JWT tokens for secure sessions... + + 3. [uuid-789] + Password reset functionality with email verification... + +──────────────────────────────────────────────────────────────────────────────── +Topic 1: 32 episodes +──────────────────────────────────────────────────────────────────────────────── +Keywords: database, neo4j, query, graph, cypher, nodes, relationships, index, performance, optimization + +Sample Episodes (showing up to 3): + ... + +Topic -1 (Outliers): 8 episodes + +================================================================================ +βœ“ Analysis complete! +================================================================================ + +βœ“ Neo4j connection closed +``` + +### JSON Output Mode (--json flag) + +When using the `--json` flag, the tool outputs only a clean JSON object with no debug logs: + +```json +{ + "topics": { + "0": { + "keywords": ["authentication", "login", "user", "security", "session"], + "episodeIds": ["uuid-123", "uuid-456", "uuid-789"] + }, + "1": { + "keywords": ["database", "neo4j", "query", "graph", "cypher"], + "episodeIds": ["uuid-abc", "uuid-def"] + } + }, + "spaces": [ + { + "name": "User Authentication", + "intent": "Episodes about user authentication, login systems, and security belong in this space.", + "confidence": 85, + "topics": [0, 3], + "estimatedEpisodes": 120 + } + ] +} +``` + +**JSON Output Structure:** +- `topics`: Dictionary of topic IDs with keywords and episode UUIDs +- `spaces`: Array of space proposals (only if `--propose-spaces` is used) + - `name`: Space name (2-5 words) + - `intent`: Classification intent (1-2 sentences) + - `confidence`: Confidence score (0-100) + - `topics`: Source topic IDs that form this space + - `estimatedEpisodes`: Estimated number of episodes in this space + +**Use Cases for JSON Mode:** +- Programmatic consumption by other tools +- Piping output to jq or other JSON processors +- Integration with CI/CD pipelines +- Automated space creation workflows + +## How It Works + +1. **Connection**: Establishes connection to Neo4j database +2. **Data Fetching**: Queries all episodes for the given userId that have: + - Non-null `contentEmbedding` field + - Non-empty content +3. **Topic Modeling**: Runs BERTopic with: + - Pre-computed embeddings (no re-embedding needed) + - HDBSCAN clustering (automatic cluster discovery) + - Keyword extraction via c-TF-IDF +4. **Results**: Displays topics with keywords and sample episodes + +## Neo4j Query + +The tool uses this Cypher query to fetch episodes: + +```cypher +MATCH (e:Episode {userId: $userId}) +WHERE e.contentEmbedding IS NOT NULL + AND size(e.contentEmbedding) > 0 + AND e.content IS NOT NULL + AND e.content <> '' +RETURN e.uuid as uuid, + e.content as content, + e.contentEmbedding as embedding, + e.createdAt as createdAt +ORDER BY e.createdAt DESC +``` + +## Tuning Parameters + +- **`--min-topic-size`**: + - Smaller values (5-10): More granular topics, may include noise + - Larger values (20-30): Broader topics, more coherent but fewer clusters + - Recommended: Start with 20 and adjust based on your data + +## Troubleshooting + +### No episodes found + +- Verify the userId exists in Neo4j +- Check that episodes have `contentEmbedding` populated +- Ensure episodes have non-empty `content` field + +### Connection errors + +- Verify Neo4j is running: `docker ps | grep neo4j` +- Check URI format: should be `bolt://host:port` +- Verify credentials are correct + +### Too few/many topics + +- Adjust `--min-topic-size` parameter +- Need more topics: decrease the value (e.g., `--min-topic-size 10`) +- Need fewer topics: increase the value (e.g., `--min-topic-size 30`) + +## Dependencies + +- `bertopic>=0.16.0` - Topic modeling +- `neo4j>=5.14.0` - Neo4j Python driver +- `click>=8.1.0` - CLI framework +- `numpy>=1.24.0` - Numerical operations +- `python-dotenv>=1.0.0` - Environment variable loading + +## License + +Part of the Echo project. diff --git a/apps/webapp/python/main.py b/apps/webapp/python/main.py new file mode 100644 index 0000000..e4fc22a --- /dev/null +++ b/apps/webapp/python/main.py @@ -0,0 +1,384 @@ +#!/usr/bin/env python3 +""" +BERT Topic Modeling CLI for Echo Episodes + +This CLI tool connects to Neo4j, retrieves episodes with their embeddings for a given userId, +and performs topic modeling using BERTopic to discover thematic clusters. +""" + +import os +import sys +import json +from typing import List, Tuple, Dict, Any +import click +import numpy as np +from neo4j import GraphDatabase +from bertopic import BERTopic +from bertopic.vectorizers import ClassTfidfTransformer +from dotenv import load_dotenv +from sklearn.feature_extraction.text import CountVectorizer +from umap import UMAP +from hdbscan import HDBSCAN + + +class Neo4jConnection: + """Manages Neo4j database connection.""" + + def __init__(self, uri: str, username: str, password: str, quiet: bool = False): + """Initialize Neo4j connection. + + Args: + uri: Neo4j connection URI (e.g., bolt://localhost:7687) + username: Neo4j username + password: Neo4j password + quiet: If True, suppress output messages + """ + self.quiet = quiet + try: + self.driver = GraphDatabase.driver(uri, auth=(username, password)) + # Verify connection + self.driver.verify_connectivity() + if not quiet: + click.echo(f"βœ“ Connected to Neo4j at {uri}") + except Exception as e: + if not quiet: + click.echo(f"βœ— Failed to connect to Neo4j: {e}", err=True) + sys.exit(1) + + def close(self): + """Close the Neo4j connection.""" + if self.driver: + self.driver.close() + if not self.quiet: + click.echo("βœ“ Neo4j connection closed") + + def get_episodes_with_embeddings(self, user_id: str) -> Tuple[List[str], List[str], np.ndarray]: + """Fetch all episodes with their embeddings for a given user. + + Args: + user_id: The user ID to fetch episodes for + + Returns: + Tuple of (episode_uuids, episode_contents, embeddings_array) + """ + query = """ + MATCH (e:Episode {userId: $userId}) + WHERE e.contentEmbedding IS NOT NULL + AND size(e.contentEmbedding) > 0 + AND e.content IS NOT NULL + AND e.content <> '' + RETURN e.uuid as uuid, + e.content as content, + e.contentEmbedding as embedding, + e.createdAt as createdAt + ORDER BY e.createdAt DESC + """ + + with self.driver.session() as session: + result = session.run(query, userId=user_id) + records = list(result) + + if not records: + if not self.quiet: + click.echo(f"βœ— No episodes found for userId: {user_id}", err=True) + sys.exit(1) + + uuids = [] + contents = [] + embeddings = [] + + for record in records: + uuids.append(record["uuid"]) + contents.append(record["content"]) + embeddings.append(record["embedding"]) + + embeddings_array = np.array(embeddings, dtype=np.float32) + + if not self.quiet: + click.echo(f"βœ“ Fetched {len(contents)} episodes with embeddings") + return uuids, contents, embeddings_array + + +def run_bertopic_analysis( + contents: List[str], + embeddings: np.ndarray, + min_topic_size: int = 20, + nr_topics: int = None, + quiet: bool = False +) -> Tuple[BERTopic, List[int], List[float]]: + """Run BERTopic clustering on episode contents with improved configuration. + + Args: + contents: List of episode content strings + embeddings: Pre-computed embeddings for the episodes + min_topic_size: Minimum number of documents per topic + nr_topics: Target number of topics (optional, for topic reduction) + quiet: If True, suppress output messages + + Returns: + Tuple of (bertopic_model, topic_assignments, probabilities) + """ + if not quiet: + click.echo(f"\nπŸ” Running BERTopic analysis (min_topic_size={min_topic_size})...") + + # Step 1: Configure UMAP for dimensionality reduction + # More aggressive reduction helps find distinct clusters + umap_model = UMAP( + n_neighbors=15, # Balance between local/global structure + n_components=5, # Reduce to 5 dimensions + min_dist=0.0, # Allow tight clusters + metric='cosine', # Use cosine similarity + random_state=42 + ) + + # Step 2: Configure HDBSCAN for clustering + # Tuned to find more granular topics + hdbscan_model = HDBSCAN( + min_cluster_size=min_topic_size, # Minimum episodes per topic + min_samples=5, # More sensitive to local density + metric='euclidean', + cluster_selection_method='eom', # Excess of mass method + prediction_data=True + ) + + # Step 3: Configure vectorizer with stopword removal + # Remove common English stopwords that pollute topic keywords + vectorizer_model = CountVectorizer( + stop_words='english', # Remove common English words + min_df=2, # Word must appear in at least 2 docs + max_df=0.95, # Ignore words in >95% of docs + ngram_range=(1, 2) # Include unigrams and bigrams + ) + + # Step 4: Configure c-TF-IDF with better parameters + ctfidf_model = ClassTfidfTransformer( + reduce_frequent_words=True, # Further reduce common words + bm25_weighting=True # Use BM25 for better keyword extraction + ) + + # Step 5: Initialize BERTopic with all custom components + model = BERTopic( + embedding_model=None, # Use pre-computed embeddings + umap_model=umap_model, + hdbscan_model=hdbscan_model, + vectorizer_model=vectorizer_model, + ctfidf_model=ctfidf_model, + top_n_words=15, # More keywords per topic + nr_topics=nr_topics, # Optional topic reduction + calculate_probabilities=True, + verbose=(not quiet) + ) + + # Fit the model with pre-computed embeddings + topics, probs = model.fit_transform(contents, embeddings=embeddings) + + # Get topic count + unique_topics = len(set(topics)) - (1 if -1 in topics else 0) + if not quiet: + click.echo(f"βœ“ Topic modeling complete - Found {unique_topics} topics") + + return model, topics, probs + + +def print_topic_results( + model: BERTopic, + topics: List[int], + uuids: List[str], + contents: List[str] +): + """Print formatted topic results. + + Args: + model: Fitted BERTopic model + topics: Topic assignments for each episode + uuids: Episode UUIDs + contents: Episode contents + """ + # Get topic info + topic_info = model.get_topic_info() + num_topics = len(topic_info) - 1 # Exclude outlier topic (-1) + + click.echo(f"\n{'='*80}") + click.echo(f"TOPIC MODELING RESULTS") + click.echo(f"{'='*80}") + click.echo(f"Total Topics Found: {num_topics}") + click.echo(f"Total Episodes: {len(contents)}") + click.echo(f"{'='*80}\n") + + # Print each topic + for idx, row in topic_info.iterrows(): + topic_id = row['Topic'] + count = row['Count'] + + # Skip outlier topic + if topic_id == -1: + click.echo(f"Topic -1 (Outliers): {count} episodes\n") + continue + + # Get top words for this topic + topic_words = model.get_topic(topic_id) + + click.echo(f"{'─'*80}") + click.echo(f"Topic {topic_id}: {count} episodes") + click.echo(f"{'─'*80}") + + # Print top keywords + if topic_words: + keywords = [word for word, score in topic_words[:10]] + click.echo(f"Keywords: {', '.join(keywords)}") + + # Print sample episodes + topic_episodes = [(uuid, content) for uuid, content, topic + in zip(uuids, contents, topics) if topic == topic_id] + + click.echo(f"\nSample Episodes (showing up to 3):") + for i, (uuid, content) in enumerate(topic_episodes[:3]): + # Truncate content for display + truncated = content[:200] + "..." if len(content) > 200 else content + click.echo(f" {i+1}. [{uuid}]") + click.echo(f" {truncated}\n") + + click.echo() + + +def build_json_output( + model: BERTopic, + topics: List[int], + uuids: List[str] +) -> Dict[str, Any]: + """Build JSON output structure. + + Args: + model: Fitted BERTopic model + topics: Topic assignments for each episode + uuids: Episode UUIDs + + Returns: + Dictionary with topics data + """ + # Build topics dictionary + topics_dict = {} + topic_info = model.get_topic_info() + + for idx, row in topic_info.iterrows(): + topic_id = row['Topic'] + + # Skip outlier topic + if topic_id == -1: + continue + + # Get keywords + topic_words = model.get_topic(topic_id) + keywords = [word for word, score in topic_words[:10]] if topic_words else [] + + # Get episode IDs for this topic + episode_ids = [uuid for uuid, topic in zip(uuids, topics) if topic == topic_id] + + topics_dict[topic_id] = { + "keywords": keywords, + "episodeIds": episode_ids + } + + return {"topics": topics_dict} + + +@click.command() +@click.argument('user_id', type=str) +@click.option( + '--min-topic-size', + default=10, + type=int, + help='Minimum number of episodes per topic (default: 10, lower = more granular topics)' +) +@click.option( + '--nr-topics', + default=None, + type=int, + help='Target number of topics for reduction (optional, e.g., 20 for ~20 topics)' +) +@click.option( + '--neo4j-uri', + envvar='NEO4J_URI', + default='bolt://localhost:7687', + help='Neo4j connection URI (default: bolt://localhost:7687)' +) +@click.option( + '--neo4j-username', + envvar='NEO4J_USERNAME', + default='neo4j', + help='Neo4j username (default: neo4j)' +) +@click.option( + '--neo4j-password', + envvar='NEO4J_PASSWORD', + required=True, + help='Neo4j password (required, can use NEO4J_PASSWORD env var)' +) +@click.option( + '--json', + 'json_output', + is_flag=True, + default=False, + help='Output only final results in JSON format (suppresses all other output)' +) +def main(user_id: str, min_topic_size: int, nr_topics: int, neo4j_uri: str, neo4j_username: str, neo4j_password: str, json_output: bool): + """ + Run BERTopic analysis on episodes for a given USER_ID. + + This tool connects to Neo4j, retrieves all episodes with embeddings for the specified user, + and performs topic modeling to discover thematic clusters. + + Examples: + + # Using environment variables from .env file + python main.py user-123 + + # With custom min topic size + python main.py user-123 --min-topic-size 10 + + # With explicit Neo4j credentials + python main.py user-123 --neo4j-uri bolt://localhost:7687 --neo4j-password mypassword + """ + # Print header only if not in JSON mode + if not json_output: + click.echo(f"\n{'='*80}") + click.echo("BERT TOPIC MODELING FOR ECHO EPISODES") + click.echo(f"{'='*80}") + click.echo(f"User ID: {user_id}") + click.echo(f"Min Topic Size: {min_topic_size}") + if nr_topics: + click.echo(f"Target Topics: ~{nr_topics}") + click.echo(f"{'='*80}\n") + + # Connect to Neo4j (quiet mode if JSON output) + neo4j_conn = Neo4jConnection(neo4j_uri, neo4j_username, neo4j_password, quiet=json_output) + + try: + # Fetch episodes with embeddings + uuids, contents, embeddings = neo4j_conn.get_episodes_with_embeddings(user_id) + + # Run BERTopic analysis + model, topics, probs = run_bertopic_analysis(contents, embeddings, min_topic_size, nr_topics, quiet=json_output) + + # Output results + if json_output: + # JSON output mode - only print JSON + output = build_json_output(model, topics, uuids) + click.echo(json.dumps(output, indent=2)) + else: + # Normal output mode - print formatted results + print_topic_results(model, topics, uuids, contents) + + click.echo(f"{'='*80}") + click.echo("βœ“ Analysis complete!") + click.echo(f"{'='*80}\n") + + finally: + # Always close connection + neo4j_conn.close() + + +if __name__ == '__main__': + # Load environment variables from .env file if present + load_dotenv() + main() diff --git a/apps/webapp/python/requirements.txt b/apps/webapp/python/requirements.txt new file mode 100644 index 0000000..3b9f751 --- /dev/null +++ b/apps/webapp/python/requirements.txt @@ -0,0 +1,8 @@ +bertopic>=0.16.0 +neo4j>=5.14.0 +click>=8.1.0 +numpy>=1.24.0 +python-dotenv>=1.0.0 +scikit-learn>=1.3.0 +umap-learn>=0.5.4 +hdbscan>=0.8.33 diff --git a/apps/webapp/trigger.config.ts b/apps/webapp/trigger.config.ts index 938f4b5..65ed8f1 100644 --- a/apps/webapp/trigger.config.ts +++ b/apps/webapp/trigger.config.ts @@ -1,6 +1,7 @@ import { defineConfig } from "@trigger.dev/sdk/v3"; import { syncEnvVars } from "@trigger.dev/build/extensions/core"; import { prismaExtension } from "@trigger.dev/build/extensions/prisma"; +import { pythonExtension } from "@trigger.dev/python/extension"; export default defineConfig({ project: process.env.TRIGGER_PROJECT_ID as string, @@ -23,6 +24,9 @@ export default defineConfig({ dirs: ["./app/trigger"], build: { extensions: [ + pythonExtension({ + scripts: ["./python/*.py"], + }), syncEnvVars(() => ({ // ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY as string, // API_BASE_URL: process.env.API_BASE_URL as string, diff --git a/docker/Dockerfile b/docker/Dockerfile index 8833971..c994713 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -55,7 +55,16 @@ RUN pnpm run build --filter=webapp... # Runner FROM ${NODE_IMAGE} AS runner -RUN apt-get update && apt-get install -y openssl netcat-openbsd ca-certificates +RUN apt-get update && apt-get install -y \ + openssl \ + netcat-openbsd \ + ca-certificates \ + python3 \ + python3-pip \ + python3-venv \ + gcc \ + g++ \ + && rm -rf /var/lib/apt/lists/* WORKDIR /core RUN corepack enable ENV NODE_ENV production @@ -69,6 +78,13 @@ COPY --from=builder --chown=node:node /core/apps/webapp/build ./apps/webapp/buil COPY --from=builder --chown=node:node /core/apps/webapp/public ./apps/webapp/public COPY --from=builder --chown=node:node /core/scripts ./scripts +# Install BERT Python dependencies +COPY --chown=node:node apps/webapp/python/requirements.txt ./apps/webapp/python/requirements.txt +RUN pip3 install --no-cache-dir -r ./apps/webapp/python/requirements.txt + +# Copy BERT scripts +COPY --chown=node:node apps/webapp/python/main.py ./apps/webapp/python/main.py + EXPOSE 3000 USER node diff --git a/docs/self-hosting/docker.mdx b/docs/self-hosting/docker.mdx index 2dde599..42143b5 100644 --- a/docs/self-hosting/docker.mdx +++ b/docs/self-hosting/docker.mdx @@ -14,9 +14,7 @@ description: "Get started with CORE in 5 minutes" ## Requirements -These are the minimum requirements for running the webapp and background job components. They can run on the same, or on separate machines. - -It's fine to run everything on the same machine for testing. To be able to scale your workers, you will want to run them separately. +These are the minimum requirements for running the core. ### Prerequisites @@ -27,7 +25,6 @@ To run CORE, you will need: ### System Requirements -**Webapp & Database Machine:** - 4+ vCPU - 8+ GB RAM - 20+ GB Storage @@ -41,7 +38,7 @@ CORE offers multiple deployment approaches depending on your needs: For a one-click deployment experience, use Railway: -[![Deploy on Railway](https://railway.com/button.svg)](https://railway.com/deploy/6aEd9C?referralCode=LHvbIb&utm_medium=integration&utm_source=template&utm_campaign=generic) +[![Deploy on Railway](https://railway.com/button.svg)](https://railway.com/deploy/core?referralCode=LHvbIb&utm_medium=integration&utm_source=template&utm_campaign=generic) Railway will automatically set up all required services and handle the infrastructure for you. diff --git a/docs/self-hosting/overview.mdx b/docs/self-hosting/overview.mdx index aea684d..5850fca 100644 --- a/docs/self-hosting/overview.mdx +++ b/docs/self-hosting/overview.mdx @@ -16,7 +16,7 @@ We provide version-tagged releases for self-hosted deployments. It's highly advi For a quick one-click deployment, you can use Railway: -[![Deploy on Railway](https://railway.com/button.svg)](https://railway.com/deploy/6aEd9C?referralCode=LHvbIb&utm_medium=integration&utm_source=template&utm_campaign=generic) +[![Deploy on Railway](https://railway.com/button.svg)](https://railway.com/deploy/core?referralCode=LHvbIb&utm_medium=integration&utm_source=template&utm_campaign=generic) Alternatively, you can follow our [Docker deployment guide](/self-hosting/docker) for manual setup. diff --git a/hosting/docker/.env b/hosting/docker/.env index 1d6bf66..56a84f4 100644 --- a/hosting/docker/.env +++ b/hosting/docker/.env @@ -52,4 +52,6 @@ MODEL=gpt-4.1-2025-04-14 ## for opensource embedding model # EMBEDDING_MODEL=mxbai-embed-large -QUEUE_PROVIDER=bullmq \ No newline at end of file +QUEUE_PROVIDER=bullmq + +TELEMETRY_ENABLED=false \ No newline at end of file diff --git a/hosting/docker/docker-compose.yaml b/hosting/docker/docker-compose.yaml index 4487903..3d102ee 100644 --- a/hosting/docker/docker-compose.yaml +++ b/hosting/docker/docker-compose.yaml @@ -33,6 +33,7 @@ services: - ENABLE_EMAIL_LOGIN=${ENABLE_EMAIL_LOGIN} - OLLAMA_URL=${OLLAMA_URL} - EMBEDDING_MODEL=${EMBEDDING_MODEL} + - EMBEDDING_MODEL_SIZE=${EMBEDDING_MODEL_SIZE} - MODEL=${MODEL} - TRIGGER_PROJECT_ID=${TRIGGER_PROJECT_ID} - TRIGGER_SECRET_KEY=${TRIGGER_SECRET_KEY} diff --git a/packages/database/prisma/migrations/20251029102022_add_metadata_to_workspace/migration.sql b/packages/database/prisma/migrations/20251029102022_add_metadata_to_workspace/migration.sql new file mode 100644 index 0000000..fbff632 --- /dev/null +++ b/packages/database/prisma/migrations/20251029102022_add_metadata_to_workspace/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "Workspace" ADD COLUMN "metadata" JSONB NOT NULL DEFAULT '{}'; diff --git a/packages/database/prisma/schema.prisma b/packages/database/prisma/schema.prisma index e6f67e0..ae728fa 100644 --- a/packages/database/prisma/schema.prisma +++ b/packages/database/prisma/schema.prisma @@ -694,6 +694,8 @@ model Workspace { slug String @unique icon String? + metadata Json @default("{}") + integrations String[] userId String? @unique diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 1a78a8f..0d21cb5 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -31,253 +31,6 @@ importers: specifier: 5.5.4 version: 5.5.4 - apps/init: - dependencies: - '@clack/prompts': - specifier: ^0.10.0 - version: 0.10.1 - '@depot/cli': - specifier: 0.0.1-cli.2.80.0 - version: 0.0.1-cli.2.80.0 - '@opentelemetry/api': - specifier: 1.9.0 - version: 1.9.0 - '@opentelemetry/api-logs': - specifier: 0.52.1 - version: 0.52.1 - '@opentelemetry/exporter-logs-otlp-http': - specifier: 0.52.1 - version: 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-trace-otlp-http': - specifier: 0.52.1 - version: 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': - specifier: 0.52.1 - version: 0.52.1(@opentelemetry/api@1.9.0)(supports-color@10.0.0) - '@opentelemetry/instrumentation-fetch': - specifier: 0.52.1 - version: 0.52.1(@opentelemetry/api@1.9.0)(supports-color@10.0.0) - '@opentelemetry/resources': - specifier: 1.25.1 - version: 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-logs': - specifier: 0.52.1 - version: 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-node': - specifier: 0.52.1 - version: 0.52.1(@opentelemetry/api@1.9.0)(supports-color@10.0.0) - '@opentelemetry/sdk-trace-base': - specifier: 1.25.1 - version: 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-node': - specifier: 1.25.1 - version: 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': - specifier: 1.25.1 - version: 1.25.1 - ansi-escapes: - specifier: ^7.0.0 - version: 7.0.0 - braces: - specifier: ^3.0.3 - version: 3.0.3 - c12: - specifier: ^1.11.1 - version: 1.11.2(magicast@0.3.5) - chalk: - specifier: ^5.2.0 - version: 5.4.1 - chokidar: - specifier: ^3.6.0 - version: 3.6.0 - cli-table3: - specifier: ^0.6.3 - version: 0.6.5 - commander: - specifier: ^9.4.1 - version: 9.5.0 - defu: - specifier: ^6.1.4 - version: 6.1.4 - dotenv: - specifier: ^16.4.5 - version: 16.5.0 - dotenv-expand: - specifier: ^12.0.2 - version: 12.0.2 - esbuild: - specifier: ^0.23.0 - version: 0.23.1 - eventsource: - specifier: ^3.0.2 - version: 3.0.7 - evt: - specifier: ^2.4.13 - version: 2.5.9 - fast-npm-meta: - specifier: ^0.2.2 - version: 0.2.2 - git-last-commit: - specifier: ^1.0.1 - version: 1.0.1 - gradient-string: - specifier: ^2.0.2 - version: 2.0.2 - has-flag: - specifier: ^5.0.1 - version: 5.0.1 - import-in-the-middle: - specifier: 1.11.0 - version: 1.11.0 - import-meta-resolve: - specifier: ^4.1.0 - version: 4.1.0 - ini: - specifier: ^5.0.0 - version: 5.0.0 - jsonc-parser: - specifier: 3.2.1 - version: 3.2.1 - knex: - specifier: 3.1.0 - version: 3.1.0(pg@8.16.3)(supports-color@10.0.0) - magicast: - specifier: ^0.3.4 - version: 0.3.5 - minimatch: - specifier: ^10.0.1 - version: 10.0.2 - mlly: - specifier: ^1.7.1 - version: 1.7.4 - nanoid: - specifier: 3.3.8 - version: 3.3.8 - nypm: - specifier: ^0.5.4 - version: 0.5.4 - object-hash: - specifier: ^3.0.0 - version: 3.0.0 - open: - specifier: ^10.0.3 - version: 10.2.0 - p-limit: - specifier: ^6.2.0 - version: 6.2.0 - p-retry: - specifier: ^6.1.0 - version: 6.2.1 - partysocket: - specifier: ^1.0.2 - version: 1.1.4 - pg: - specifier: 8.16.3 - version: 8.16.3 - pkg-types: - specifier: ^1.1.3 - version: 1.3.1 - polka: - specifier: ^0.5.2 - version: 0.5.2 - resolve: - specifier: ^1.22.8 - version: 1.22.10 - semver: - specifier: ^7.5.0 - version: 7.7.2 - signal-exit: - specifier: ^4.1.0 - version: 4.1.0 - source-map-support: - specifier: 0.5.21 - version: 0.5.21 - std-env: - specifier: ^3.7.0 - version: 3.9.0 - supports-color: - specifier: ^10.0.0 - version: 10.0.0 - tiny-invariant: - specifier: ^1.2.0 - version: 1.3.3 - tinyexec: - specifier: ^0.3.1 - version: 0.3.2 - tinyglobby: - specifier: ^0.2.10 - version: 0.2.14 - uuid: - specifier: 11.1.0 - version: 11.1.0 - ws: - specifier: ^8.18.0 - version: 8.18.3 - xdg-app-paths: - specifier: ^8.3.0 - version: 8.3.0 - zod: - specifier: 3.23.8 - version: 3.23.8 - zod-validation-error: - specifier: ^1.5.0 - version: 1.5.0(zod@3.23.8) - devDependencies: - '@epic-web/test-server': - specifier: ^0.1.0 - version: 0.1.6 - '@types/gradient-string': - specifier: ^1.1.2 - version: 1.1.6 - '@types/ini': - specifier: ^4.1.1 - version: 4.1.1 - '@types/object-hash': - specifier: 3.0.6 - version: 3.0.6 - '@types/polka': - specifier: ^0.5.7 - version: 0.5.7 - '@types/react': - specifier: ^18.2.48 - version: 18.2.69 - '@types/resolve': - specifier: ^1.20.6 - version: 1.20.6 - '@types/rimraf': - specifier: ^4.0.5 - version: 4.0.5 - '@types/semver': - specifier: ^7.5.0 - version: 7.7.0 - '@types/source-map-support': - specifier: 0.5.10 - version: 0.5.10 - '@types/ws': - specifier: ^8.5.3 - version: 8.18.1 - cpy-cli: - specifier: ^5.0.0 - version: 5.0.0 - execa: - specifier: ^8.0.1 - version: 8.0.1 - find-up: - specifier: ^7.0.0 - version: 7.0.0 - rimraf: - specifier: ^5.0.7 - version: 5.0.10 - ts-essentials: - specifier: 10.0.1 - version: 10.0.1(typescript@5.8.3) - tshy: - specifier: ^3.0.2 - version: 3.0.2 - tsx: - specifier: 4.17.0 - version: 4.17.0 - apps/webapp: dependencies: '@ai-sdk/amazon-bedrock': @@ -478,6 +231,9 @@ importers: '@tiptap/starter-kit': specifier: 2.11.9 version: 2.11.9 + '@trigger.dev/python': + specifier: 4.0.4 + version: 4.0.4(@trigger.dev/build@4.0.4(typescript@5.8.3))(@trigger.dev/sdk@4.0.4(ai@5.0.78(zod@3.25.76))(zod@3.25.76)) '@trigger.dev/react-hooks': specifier: 4.0.4 version: 4.0.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1) @@ -1001,10 +757,6 @@ packages: zod: optional: true - '@arr/every@1.0.1': - resolution: {integrity: sha512-UQFQ6SgyJ6LX42W8rHCs8KVc0JS0tzVL9ct4XYedJukskYVWTo49tNiMEK9C2HTyarbNiT/RVIRSY82vH+6sTg==} - engines: {node: '>=4'} - '@aws-crypto/crc32@5.2.0': resolution: {integrity: sha512-nLbCWqQNgUiwwtFsen1AdzAtvuLRsQS8rYgMuxCrdKf9kOssamGLuPwyTY9wyYblNr9+1XM8v6zoDTPPSIeANg==} engines: {node: '>=16.0.0'} @@ -1702,19 +1454,9 @@ packages: '@changesets/write@0.2.3': resolution: {integrity: sha512-Dbamr7AIMvslKnNYsLFafaVORx4H0pvCA2MHqgtNCySMe1blImEyAEOzDmcgKAkgz4+uwoLz7demIrX+JBr/Xw==} - '@clack/core@0.4.2': - resolution: {integrity: sha512-NYQfcEy8MWIxrT5Fj8nIVchfRFA26yYKJcvBS7WlUIlw2OmQOY9DhGGXMovyI5J5PpxrCPGkgUi207EBrjpBvg==} - - '@clack/prompts@0.10.1': - resolution: {integrity: sha512-Q0T02vx8ZM9XSv9/Yde0jTmmBQufZhPJfYAg2XrrrxWWaZgq1rr8nU8Hv710BQ1dhoP8rtY7YUdpGej2Qza/cw==} - '@coji/remix-auth-google@4.2.0': resolution: {integrity: sha512-H9i3fvVz0GE18GUZHpz7p7FQjuiuloTIBAPjW7cfv7lUEk+mI6WRTVLEHJBLLuTlAF1+0EbzvPRYKutxZiFdfw==} - '@colors/colors@1.5.0': - resolution: {integrity: sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==} - engines: {node: '>=0.1.90'} - '@conform-to/dom@0.6.3': resolution: {integrity: sha512-8UbO8vTP0lwpJjzo0TIrQMqWQb65Gj6WpNEVfhHouTEkFTDCP9mND2kO6w1e4TgucRfIDKleYxcFee3lNZsBcw==} @@ -1732,65 +1474,6 @@ packages: '@daybrush/utils@1.13.0': resolution: {integrity: sha512-ALK12C6SQNNHw1enXK+UO8bdyQ+jaWNQ1Af7Z3FNxeAwjYhQT7do+TRE4RASAJ3ObaS2+TJ7TXR3oz2Gzbw0PQ==} - '@depot/cli-darwin-arm64@0.0.1-cli.2.80.0': - resolution: {integrity: sha512-H7tQ0zWXVmdYXGFvt3d/v5fmquMlMM1I9JC8C2yiBZ9En9a20hzSbKoiym92RtcfqjKQFvhXL0DT6vQmJ8bgQA==} - engines: {node: '>=14'} - cpu: [arm64] - os: [darwin] - - '@depot/cli-darwin-x64@0.0.1-cli.2.80.0': - resolution: {integrity: sha512-3RDyybnCC2YSvbJCkHxXBYbKR7adUO7U00O9DHreX8EyS2E/C3kQgET+rFXR+iDdeG0h+h+At2rDng7AxvUhDw==} - engines: {node: '>=14'} - cpu: [x64] - os: [darwin] - - '@depot/cli-linux-arm64@0.0.1-cli.2.80.0': - resolution: {integrity: sha512-Fc1+Guqdsl2WM+b+76FirRjjRvEoYZX0MI4uuIQF9c9gwp6UZSZ5o3U7lAUpdkclVMKXSAW5bsIpgcOfUiJX3w==} - engines: {node: '>=14'} - cpu: [arm64] - os: [linux] - - '@depot/cli-linux-arm@0.0.1-cli.2.80.0': - resolution: {integrity: sha512-95kjKwKxP6RKkAhJoPlO6g3drEYpFWy9AC7QfvQMHDRnI3HXgbxA/BSTp4R3C4TTfp5JUPShVf8Js/NAcrUpUQ==} - engines: {node: '>=14'} - cpu: [arm] - os: [linux] - - '@depot/cli-linux-ia32@0.0.1-cli.2.80.0': - resolution: {integrity: sha512-2KoyIoYqnyIuynPN+mWbugoOB2APiNNvFLKERWOwPa0KYLSfcENT3sYxDW8tDuIw2dftHlmZN73uvXrGFeMcDQ==} - engines: {node: '>=14'} - cpu: [ia32] - os: [linux] - - '@depot/cli-linux-x64@0.0.1-cli.2.80.0': - resolution: {integrity: sha512-B8xDlXFxhYHD2tUk0jUv7SKipLfWwJVlY7ZhDQixyQzatibXakahdqpvZJgE6FLZJd3lgFIEAVsKURZHa1l57A==} - engines: {node: '>=14'} - cpu: [x64] - os: [linux] - - '@depot/cli-win32-arm64@0.0.1-cli.2.80.0': - resolution: {integrity: sha512-CPJX691MKEhnKZX25xS+opWESgPQ73HKkEwkAvvVqdBzahndcHoqAeIxYLv2hoCqrDlkH3YCF+DJleLiEP3blA==} - engines: {node: '>=14'} - cpu: [arm64] - os: [win32] - - '@depot/cli-win32-ia32@0.0.1-cli.2.80.0': - resolution: {integrity: sha512-6eewZ1zPEyNL3zcCwC01s3rZNMqMGVu1iv/EPywj3/uk4CNyUBaTlZ3StQ1BLFHPQnU0vk29yOzA5ZEuRLSyxA==} - engines: {node: '>=14'} - cpu: [ia32] - os: [win32] - - '@depot/cli-win32-x64@0.0.1-cli.2.80.0': - resolution: {integrity: sha512-9CRcc7D0/x4UrBkDuc35WVPMQG5gKMD1JckGLEl6VREE0Ppdny6n+hunQ8prwVc8aqzKG134XCC2U4DUjYg18A==} - engines: {node: '>=14'} - cpu: [x64] - os: [win32] - - '@depot/cli@0.0.1-cli.2.80.0': - resolution: {integrity: sha512-KvmOiQdpbamFziqnzzgqBm6RjfGhLJimBBYEOVriTxCPtVJuBIFm34xZllM36OQzPZIWpWBP+2/UnOyRG5smUg==} - engines: {node: '>=14'} - hasBin: true - '@edgefirst-dev/data@0.0.4': resolution: {integrity: sha512-VLhlvEPDJ0Sd0pE6sAYTQkIqZCXVonaWlgRJIQQHzfjTXCadF77qqHj5NxaPSc4wCul0DJO/0MnejVqJAXUiRg==} engines: {node: '>=20.0.0'} @@ -1828,10 +1511,6 @@ packages: '@emotion/memoize@0.7.4': resolution: {integrity: sha512-Ja/Vfqe3HpuzRsG1oBtWTHk2PGZ7GR+2Vz5iYGelAw8dx32K0y7PjVuxK6z1nMpZOqAFsRUPCkK1YjJ56qJlgw==} - '@epic-web/test-server@0.1.6': - resolution: {integrity: sha512-n6+dZKI2I3J8f3vjIFAYh5trgAJl0crji76+K1aoiRJqEYjdsCX/nVgBrkBCwPpBqhv9NZ1XlMqzfOafkqY5EQ==} - engines: {node: '>=20'} - '@esbuild/aix-ppc64@0.19.11': resolution: {integrity: sha512-FnzU0LyE3ySQk7UntJO4+qIiQgI7KoODnZg5xzXIrFJlKd2P2gwHsHY4927xj9y5PJmJSzULiUCWmv7iWnNa7g==} engines: {node: '>=12'} @@ -1844,12 +1523,6 @@ packages: cpu: [ppc64] os: [aix] - '@esbuild/aix-ppc64@0.23.1': - resolution: {integrity: sha512-6VhYk1diRqrhBAqpJEdjASR/+WVRtfjpqKuNw11cLiaWpAT/Uu+nokB+UJnevzy/P9C/ty6AOe0dwueMrGh/iQ==} - engines: {node: '>=18'} - cpu: [ppc64] - os: [aix] - '@esbuild/aix-ppc64@0.25.5': resolution: {integrity: sha512-9o3TMmpmftaCMepOdA5k/yDw8SfInyzWWTjYTFCX3kPSDJMROQTb8jg+h9Cnwnmm1vOzvxN7gIfB5V2ewpjtGA==} engines: {node: '>=18'} @@ -1874,12 +1547,6 @@ packages: cpu: [arm64] os: [android] - '@esbuild/android-arm64@0.23.1': - resolution: {integrity: sha512-xw50ipykXcLstLeWH7WRdQuysJqejuAGPd30vd1i5zSyKK3WE+ijzHmLKxdiCMtH1pHz78rOg0BKSYOSB/2Khw==} - engines: {node: '>=18'} - cpu: [arm64] - os: [android] - '@esbuild/android-arm64@0.25.5': resolution: {integrity: sha512-VGzGhj4lJO+TVGV1v8ntCZWJktV7SGCs3Pn1GRWI1SBFtRALoomm8k5E9Pmwg3HOAal2VDc2F9+PM/rEY6oIDg==} engines: {node: '>=18'} @@ -1904,12 +1571,6 @@ packages: cpu: [arm] os: [android] - '@esbuild/android-arm@0.23.1': - resolution: {integrity: sha512-uz6/tEy2IFm9RYOyvKl88zdzZfwEfKZmnX9Cj1BHjeSGNuGLuMD1kR8y5bteYmwqKm1tj8m4cb/aKEorr6fHWQ==} - engines: {node: '>=18'} - cpu: [arm] - os: [android] - '@esbuild/android-arm@0.25.5': resolution: {integrity: sha512-AdJKSPeEHgi7/ZhuIPtcQKr5RQdo6OO2IL87JkianiMYMPbCtot9fxPbrMiBADOWWm3T2si9stAiVsGbTQFkbA==} engines: {node: '>=18'} @@ -1934,12 +1595,6 @@ packages: cpu: [x64] os: [android] - '@esbuild/android-x64@0.23.1': - resolution: {integrity: sha512-nlN9B69St9BwUoB+jkyU090bru8L0NA3yFvAd7k8dNsVH8bi9a8cUAUSEcEEgTp2z3dbEDGJGfP6VUnkQnlReg==} - engines: {node: '>=18'} - cpu: [x64] - os: [android] - '@esbuild/android-x64@0.25.5': resolution: {integrity: sha512-D2GyJT1kjvO//drbRT3Hib9XPwQeWd9vZoBJn+bu/lVsOZ13cqNdDeqIF/xQ5/VmWvMduP6AmXvylO/PIc2isw==} engines: {node: '>=18'} @@ -1964,12 +1619,6 @@ packages: cpu: [arm64] os: [darwin] - '@esbuild/darwin-arm64@0.23.1': - resolution: {integrity: sha512-YsS2e3Wtgnw7Wq53XXBLcV6JhRsEq8hkfg91ESVadIrzr9wO6jJDMZnCQbHm1Guc5t/CdDiFSSfWP58FNuvT3Q==} - engines: {node: '>=18'} - cpu: [arm64] - os: [darwin] - '@esbuild/darwin-arm64@0.25.5': resolution: {integrity: sha512-GtaBgammVvdF7aPIgH2jxMDdivezgFu6iKpmT+48+F8Hhg5J/sfnDieg0aeG/jfSvkYQU2/pceFPDKlqZzwnfQ==} engines: {node: '>=18'} @@ -1994,12 +1643,6 @@ packages: cpu: [x64] os: [darwin] - '@esbuild/darwin-x64@0.23.1': - resolution: {integrity: sha512-aClqdgTDVPSEGgoCS8QDG37Gu8yc9lTHNAQlsztQ6ENetKEO//b8y31MMu2ZaPbn4kVsIABzVLXYLhCGekGDqw==} - engines: {node: '>=18'} - cpu: [x64] - os: [darwin] - '@esbuild/darwin-x64@0.25.5': resolution: {integrity: sha512-1iT4FVL0dJ76/q1wd7XDsXrSW+oLoquptvh4CLR4kITDtqi2e/xwXwdCVH8hVHU43wgJdsq7Gxuzcs6Iq/7bxQ==} engines: {node: '>=18'} @@ -2024,12 +1667,6 @@ packages: cpu: [arm64] os: [freebsd] - '@esbuild/freebsd-arm64@0.23.1': - resolution: {integrity: sha512-h1k6yS8/pN/NHlMl5+v4XPfikhJulk4G+tKGFIOwURBSFzE8bixw1ebjluLOjfwtLqY0kewfjLSrO6tN2MgIhA==} - engines: {node: '>=18'} - cpu: [arm64] - os: [freebsd] - '@esbuild/freebsd-arm64@0.25.5': resolution: {integrity: sha512-nk4tGP3JThz4La38Uy/gzyXtpkPW8zSAmoUhK9xKKXdBCzKODMc2adkB2+8om9BDYugz+uGV7sLmpTYzvmz6Sw==} engines: {node: '>=18'} @@ -2054,12 +1691,6 @@ packages: cpu: [x64] os: [freebsd] - '@esbuild/freebsd-x64@0.23.1': - resolution: {integrity: sha512-lK1eJeyk1ZX8UklqFd/3A60UuZ/6UVfGT2LuGo3Wp4/z7eRTRYY+0xOu2kpClP+vMTi9wKOfXi2vjUpO1Ro76g==} - engines: {node: '>=18'} - cpu: [x64] - os: [freebsd] - '@esbuild/freebsd-x64@0.25.5': resolution: {integrity: sha512-PrikaNjiXdR2laW6OIjlbeuCPrPaAl0IwPIaRv+SMV8CiM8i2LqVUHFC1+8eORgWyY7yhQY+2U2fA55mBzReaw==} engines: {node: '>=18'} @@ -2084,12 +1715,6 @@ packages: cpu: [arm64] os: [linux] - '@esbuild/linux-arm64@0.23.1': - resolution: {integrity: sha512-/93bf2yxencYDnItMYV/v116zff6UyTjo4EtEQjUBeGiVpMmffDNUyD9UN2zV+V3LRV3/on4xdZ26NKzn6754g==} - engines: {node: '>=18'} - cpu: [arm64] - os: [linux] - '@esbuild/linux-arm64@0.25.5': resolution: {integrity: sha512-Z9kfb1v6ZlGbWj8EJk9T6czVEjjq2ntSYLY2cw6pAZl4oKtfgQuS4HOq41M/BcoLPzrUbNd+R4BXFyH//nHxVg==} engines: {node: '>=18'} @@ -2114,12 +1739,6 @@ packages: cpu: [arm] os: [linux] - '@esbuild/linux-arm@0.23.1': - resolution: {integrity: sha512-CXXkzgn+dXAPs3WBwE+Kvnrf4WECwBdfjfeYHpMeVxWE0EceB6vhWGShs6wi0IYEqMSIzdOF1XjQ/Mkm5d7ZdQ==} - engines: {node: '>=18'} - cpu: [arm] - os: [linux] - '@esbuild/linux-arm@0.25.5': resolution: {integrity: sha512-cPzojwW2okgh7ZlRpcBEtsX7WBuqbLrNXqLU89GxWbNt6uIg78ET82qifUy3W6OVww6ZWobWub5oqZOVtwolfw==} engines: {node: '>=18'} @@ -2144,12 +1763,6 @@ packages: cpu: [ia32] os: [linux] - '@esbuild/linux-ia32@0.23.1': - resolution: {integrity: sha512-VTN4EuOHwXEkXzX5nTvVY4s7E/Krz7COC8xkftbbKRYAl96vPiUssGkeMELQMOnLOJ8k3BY1+ZY52tttZnHcXQ==} - engines: {node: '>=18'} - cpu: [ia32] - os: [linux] - '@esbuild/linux-ia32@0.25.5': resolution: {integrity: sha512-sQ7l00M8bSv36GLV95BVAdhJ2QsIbCuCjh/uYrWiMQSUuV+LpXwIqhgJDcvMTj+VsQmqAHL2yYaasENvJ7CDKA==} engines: {node: '>=18'} @@ -2174,12 +1787,6 @@ packages: cpu: [loong64] os: [linux] - '@esbuild/linux-loong64@0.23.1': - resolution: {integrity: sha512-Vx09LzEoBa5zDnieH8LSMRToj7ir/Jeq0Gu6qJ/1GcBq9GkfoEAoXvLiW1U9J1qE/Y/Oyaq33w5p2ZWrNNHNEw==} - engines: {node: '>=18'} - cpu: [loong64] - os: [linux] - '@esbuild/linux-loong64@0.25.5': resolution: {integrity: sha512-0ur7ae16hDUC4OL5iEnDb0tZHDxYmuQyhKhsPBV8f99f6Z9KQM02g33f93rNH5A30agMS46u2HP6qTdEt6Q1kg==} engines: {node: '>=18'} @@ -2204,12 +1811,6 @@ packages: cpu: [mips64el] os: [linux] - '@esbuild/linux-mips64el@0.23.1': - resolution: {integrity: sha512-nrFzzMQ7W4WRLNUOU5dlWAqa6yVeI0P78WKGUo7lg2HShq/yx+UYkeNSE0SSfSure0SqgnsxPvmAUu/vu0E+3Q==} - engines: {node: '>=18'} - cpu: [mips64el] - os: [linux] - '@esbuild/linux-mips64el@0.25.5': resolution: {integrity: sha512-kB/66P1OsHO5zLz0i6X0RxlQ+3cu0mkxS3TKFvkb5lin6uwZ/ttOkP3Z8lfR9mJOBk14ZwZ9182SIIWFGNmqmg==} engines: {node: '>=18'} @@ -2234,12 +1835,6 @@ packages: cpu: [ppc64] os: [linux] - '@esbuild/linux-ppc64@0.23.1': - resolution: {integrity: sha512-dKN8fgVqd0vUIjxuJI6P/9SSSe/mB9rvA98CSH2sJnlZ/OCZWO1DJvxj8jvKTfYUdGfcq2dDxoKaC6bHuTlgcw==} - engines: {node: '>=18'} - cpu: [ppc64] - os: [linux] - '@esbuild/linux-ppc64@0.25.5': resolution: {integrity: sha512-UZCmJ7r9X2fe2D6jBmkLBMQetXPXIsZjQJCjgwpVDz+YMcS6oFR27alkgGv3Oqkv07bxdvw7fyB71/olceJhkQ==} engines: {node: '>=18'} @@ -2264,12 +1859,6 @@ packages: cpu: [riscv64] os: [linux] - '@esbuild/linux-riscv64@0.23.1': - resolution: {integrity: sha512-5AV4Pzp80fhHL83JM6LoA6pTQVWgB1HovMBsLQ9OZWLDqVY8MVobBXNSmAJi//Csh6tcY7e7Lny2Hg1tElMjIA==} - engines: {node: '>=18'} - cpu: [riscv64] - os: [linux] - '@esbuild/linux-riscv64@0.25.5': resolution: {integrity: sha512-kTxwu4mLyeOlsVIFPfQo+fQJAV9mh24xL+y+Bm6ej067sYANjyEw1dNHmvoqxJUCMnkBdKpvOn0Ahql6+4VyeA==} engines: {node: '>=18'} @@ -2294,12 +1883,6 @@ packages: cpu: [s390x] os: [linux] - '@esbuild/linux-s390x@0.23.1': - resolution: {integrity: sha512-9ygs73tuFCe6f6m/Tb+9LtYxWR4c9yg7zjt2cYkjDbDpV/xVn+68cQxMXCjUpYwEkze2RcU/rMnfIXNRFmSoDw==} - engines: {node: '>=18'} - cpu: [s390x] - os: [linux] - '@esbuild/linux-s390x@0.25.5': resolution: {integrity: sha512-K2dSKTKfmdh78uJ3NcWFiqyRrimfdinS5ErLSn3vluHNeHVnBAFWC8a4X5N+7FgVE1EjXS1QDZbpqZBjfrqMTQ==} engines: {node: '>=18'} @@ -2324,12 +1907,6 @@ packages: cpu: [x64] os: [linux] - '@esbuild/linux-x64@0.23.1': - resolution: {integrity: sha512-EV6+ovTsEXCPAp58g2dD68LxoP/wK5pRvgy0J/HxPGB009omFPv3Yet0HiaqvrIrgPTBuC6wCH1LTOY91EO5hQ==} - engines: {node: '>=18'} - cpu: [x64] - os: [linux] - '@esbuild/linux-x64@0.25.5': resolution: {integrity: sha512-uhj8N2obKTE6pSZ+aMUbqq+1nXxNjZIIjCjGLfsWvVpy7gKCOL6rsY1MhRh9zLtUtAI7vpgLMK6DxjO8Qm9lJw==} engines: {node: '>=18'} @@ -2360,24 +1937,12 @@ packages: cpu: [x64] os: [netbsd] - '@esbuild/netbsd-x64@0.23.1': - resolution: {integrity: sha512-aevEkCNu7KlPRpYLjwmdcuNz6bDFiE7Z8XC4CPqExjTvrHugh28QzUXVOZtiYghciKUacNktqxdpymplil1beA==} - engines: {node: '>=18'} - cpu: [x64] - os: [netbsd] - '@esbuild/netbsd-x64@0.25.5': resolution: {integrity: sha512-WOb5fKrvVTRMfWFNCroYWWklbnXH0Q5rZppjq0vQIdlsQKuw6mdSihwSo4RV/YdQ5UCKKvBy7/0ZZYLBZKIbwQ==} engines: {node: '>=18'} cpu: [x64] os: [netbsd] - '@esbuild/openbsd-arm64@0.23.1': - resolution: {integrity: sha512-3x37szhLexNA4bXhLrCC/LImN/YtWis6WXr1VESlfVtVeoFJBRINPJ3f0a/6LV8zpikqoUg4hyXw0sFBt5Cr+Q==} - engines: {node: '>=18'} - cpu: [arm64] - os: [openbsd] - '@esbuild/openbsd-arm64@0.25.5': resolution: {integrity: sha512-7A208+uQKgTxHd0G0uqZO8UjK2R0DDb4fDmERtARjSHWxqMTye4Erz4zZafx7Di9Cv+lNHYuncAkiGFySoD+Mw==} engines: {node: '>=18'} @@ -2402,12 +1967,6 @@ packages: cpu: [x64] os: [openbsd] - '@esbuild/openbsd-x64@0.23.1': - resolution: {integrity: sha512-aY2gMmKmPhxfU+0EdnN+XNtGbjfQgwZj43k8G3fyrDM/UdZww6xrWxmDkuz2eCZchqVeABjV5BpildOrUbBTqA==} - engines: {node: '>=18'} - cpu: [x64] - os: [openbsd] - '@esbuild/openbsd-x64@0.25.5': resolution: {integrity: sha512-G4hE405ErTWraiZ8UiSoesH8DaCsMm0Cay4fsFWOOUcz8b8rC6uCvnagr+gnioEjWn0wC+o1/TAHt+It+MpIMg==} engines: {node: '>=18'} @@ -2432,12 +1991,6 @@ packages: cpu: [x64] os: [sunos] - '@esbuild/sunos-x64@0.23.1': - resolution: {integrity: sha512-RBRT2gqEl0IKQABT4XTj78tpk9v7ehp+mazn2HbUeZl1YMdaGAQqhapjGTCe7uw7y0frDi4gS0uHzhvpFuI1sA==} - engines: {node: '>=18'} - cpu: [x64] - os: [sunos] - '@esbuild/sunos-x64@0.25.5': resolution: {integrity: sha512-l+azKShMy7FxzY0Rj4RCt5VD/q8mG/e+mDivgspo+yL8zW7qEwctQ6YqKX34DTEleFAvCIUviCFX1SDZRSyMQA==} engines: {node: '>=18'} @@ -2462,12 +2015,6 @@ packages: cpu: [arm64] os: [win32] - '@esbuild/win32-arm64@0.23.1': - resolution: {integrity: sha512-4O+gPR5rEBe2FpKOVyiJ7wNDPA8nGzDuJ6gN4okSA1gEOYZ67N8JPk58tkWtdtPeLz7lBnY6I5L3jdsr3S+A6A==} - engines: {node: '>=18'} - cpu: [arm64] - os: [win32] - '@esbuild/win32-arm64@0.25.5': resolution: {integrity: sha512-O2S7SNZzdcFG7eFKgvwUEZ2VG9D/sn/eIiz8XRZ1Q/DO5a3s76Xv0mdBzVM5j5R639lXQmPmSo0iRpHqUUrsxw==} engines: {node: '>=18'} @@ -2492,12 +2039,6 @@ packages: cpu: [ia32] os: [win32] - '@esbuild/win32-ia32@0.23.1': - resolution: {integrity: sha512-BcaL0Vn6QwCwre3Y717nVHZbAa4UBEigzFm6VdsVdT/MbZ38xoj1X9HPkZhbmaBGUD1W8vxAfffbDe8bA6AKnQ==} - engines: {node: '>=18'} - cpu: [ia32] - os: [win32] - '@esbuild/win32-ia32@0.25.5': resolution: {integrity: sha512-onOJ02pqs9h1iMJ1PQphR+VZv8qBMQ77Klcsqv9CNW2w6yLqoURLcgERAIurY6QE63bbLuqgP9ATqajFLK5AMQ==} engines: {node: '>=18'} @@ -2522,12 +2063,6 @@ packages: cpu: [x64] os: [win32] - '@esbuild/win32-x64@0.23.1': - resolution: {integrity: sha512-BHpFFeslkWrXWyUPnbKm+xYYVYruCinGcftSBaa8zoF9hZO4BcSCFUvHVTtzpIY6YzUnYtuEhZ+C9iEXjxnasg==} - engines: {node: '>=18'} - cpu: [x64] - os: [win32] - '@esbuild/win32-x64@0.25.5': resolution: {integrity: sha512-TXv6YnJ8ZMVdX+SXWVBo/0p8LTcrUYngpWjvm91TMjjBQii7Oz11Lw5lbDV5Y0TzuhSJHwiH4hEtC1I42mMS0g==} engines: {node: '>=18'} @@ -2574,15 +2109,6 @@ packages: resolution: {integrity: sha512-1TUx3KdaU3cN7nfCdNf+UVqA/PSX29Cjcox3fZZBtINlRrXVTmUkQnCKv2MbBUbCopbK4olAT1IHl76uZyCiVA==} engines: {node: '>=14.0.0'} - '@grpc/grpc-js@1.13.4': - resolution: {integrity: sha512-GsFaMXCkMqkKIvwCQjCrwH+GHbPKBjhwo/8ZuUkWHqbI73Kky9I+pQltrlT0+MWpedCoosda53lgjYfyEPgxBg==} - engines: {node: '>=12.10.0'} - - '@grpc/proto-loader@0.7.15': - resolution: {integrity: sha512-tMXdRCfYVixjuFK+Hk0Q1s38gV9zDiDJfWL3h1rv4Qc39oILCu1TRTDt7+fGUI8K4G1Fj125Hx/ru3azECWTyQ==} - engines: {node: '>=6'} - hasBin: true - '@hapi/boom@10.0.1': resolution: {integrity: sha512-ERcCZaEjdH3OgSJlyjVk8pHIFeus91CjKP3v+MpgBNp5IvGzP2l/bRiD78nqYcKPaZdbKkK5vDBVPd2ohHBlsA==} @@ -2601,19 +2127,6 @@ packages: '@hapi/wreck@18.1.0': resolution: {integrity: sha512-0z6ZRCmFEfV/MQqkQomJ7sl/hyxvcZM7LtuVqN3vdAO4vM9eBbowl0kaqQj9EJJQab+3Uuh1GxbGIBFy4NfJ4w==} - '@hono/node-server@1.16.0': - resolution: {integrity: sha512-9LwRb5XOrTFapOABiQjGC50wRVlzUvWZsDHINCnkBniP+Q+LQf4waN0nzk9t+2kqcTsnGnieSmqpHsr6kH2bdw==} - engines: {node: '>=18.14.1'} - peerDependencies: - hono: ^4 - - '@hono/node-ws@1.2.0': - resolution: {integrity: sha512-OBPQ8OSHBw29mj00wT/xGYtB6HY54j0fNSdVZ7gZM3TUeq0So11GXaWtFf1xWxQNfumKIsj0wRuLKWfVsO5GgQ==} - engines: {node: '>=18.14.1'} - peerDependencies: - '@hono/node-server': ^1.11.1 - hono: ^4.6.0 - '@humanwhocodes/config-array@0.13.0': resolution: {integrity: sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==} engines: {node: '>=10.10.0'} @@ -2680,9 +2193,6 @@ packages: '@jridgewell/trace-mapping@0.3.31': resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} - '@js-sdsl/ordered-map@4.4.2': - resolution: {integrity: sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==} - '@jsonhero/path@1.0.21': resolution: {integrity: sha512-gVUDj/92acpVoJwsVJ/RuWOaHyG4oFzn898WNGQItLCTQ+hOaVlEaImhwE1WqOTf+l3dGOUkbSiVKlb3q1hd1Q==} @@ -2844,39 +2354,20 @@ packages: '@one-ini/wasm@0.1.1': resolution: {integrity: sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw==} - '@open-draft/deferred-promise@2.2.0': - resolution: {integrity: sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA==} - '@opentelemetry/api-logs@0.203.0': resolution: {integrity: sha512-9B9RU0H7Ya1Dx/Rkyc4stuBZSGVQF27WigitInx2QQoj6KUpEFYPKoWjdFTunJYxmXmh17HeBvbMa1EhGyPmqQ==} engines: {node: '>=8.0.0'} - '@opentelemetry/api-logs@0.52.1': - resolution: {integrity: sha512-qnSqB2DQ9TPP96dl8cDubDvrUyWc0/sK81xHTK8eSUspzDM3bsewX903qclQFvVhgStjRWdC5bLb3kQqMkfV5A==} - engines: {node: '>=14'} - '@opentelemetry/api@1.9.0': resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} engines: {node: '>=8.0.0'} - '@opentelemetry/context-async-hooks@1.25.1': - resolution: {integrity: sha512-UW/ge9zjvAEmRWVapOP0qyCvPulWU6cQxGxDbWEFfGOj1VBBZAuOqTo3X6yWmDTD3Xe15ysCZChHncr2xFMIfQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/context-async-hooks@2.0.1': resolution: {integrity: sha512-XuY23lSI3d4PEqKA+7SLtAgwqIfc6E/E9eAQWLN1vlpC53ybO3o6jW4BsXo1xvz9lYyyWItfQDDLzezER01mCw==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/core@1.25.1': - resolution: {integrity: sha512-GeT/l6rBYWVQ4XArluLVB6WWQ8flHbdb6r2FCHC3smtdOAbrJBIv35tpV/yp9bmYUJf+xmZpu9DRTIeJVhFbEQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/core@2.0.1': resolution: {integrity: sha512-MaZk9SJIDgo1peKevlbhP6+IwIiNPNmswNL4AF0WaQJLbHXjr9SrZMgS12+iqr9ToV4ZVosCcc0f8Rg67LXjxw==} engines: {node: ^18.19.0 || >=20.6.0} @@ -2889,108 +2380,30 @@ packages: peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/exporter-logs-otlp-http@0.52.1': - resolution: {integrity: sha512-qKgywId2DbdowPZpOBXQKp0B8DfhfIArmSic15z13Nk/JAOccBUQdPwDjDnjsM5f0ckZFMVR2t/tijTUAqDZoA==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/exporter-trace-otlp-grpc@0.52.1': - resolution: {integrity: sha512-pVkSH20crBwMTqB3nIN4jpQKUEoB0Z94drIHpYyEqs7UBr+I0cpYyOR3bqjA/UasQUMROb3GX8ZX4/9cVRqGBQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - '@opentelemetry/exporter-trace-otlp-http@0.203.0': resolution: {integrity: sha512-ZDiaswNYo0yq/cy1bBLJFe691izEJ6IgNmkjm4C6kE9ub/OMQqDXORx2D2j8fzTBTxONyzusbaZlqtfmyqURPw==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/exporter-trace-otlp-http@0.52.1': - resolution: {integrity: sha512-05HcNizx0BxcFKKnS5rwOV+2GevLTVIRA0tRgWYyw4yCgR53Ic/xk83toYKts7kbzcI+dswInUg/4s8oyA+tqg==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/exporter-trace-otlp-proto@0.52.1': - resolution: {integrity: sha512-pt6uX0noTQReHXNeEslQv7x311/F1gJzMnp1HD2qgypLRPbXDeMzzeTngRTUaUbP6hqWNtPxuLr4DEoZG+TcEQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/exporter-zipkin@1.25.1': - resolution: {integrity: sha512-RmOwSvkimg7ETwJbUOPTMhJm9A9bG1U8s7Zo3ajDh4zM7eYcycQ0dM7FbLD6NXWbI2yj7UY4q8BKinKYBQksyw==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/instrumentation-fetch@0.52.1': - resolution: {integrity: sha512-EJDQXdv1ZGyBifox+8BK+hP0tg29abNPdScE+lW77bUVrThD5vn2dOo+blAS3Z8Od+eqTUTDzXVDIFjGgTK01w==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - '@opentelemetry/instrumentation@0.203.0': resolution: {integrity: sha512-ke1qyM+3AK2zPuBPb6Hk/GCsc5ewbLvPNkEuELx/JmANeEp6ZjnZ+wypPAJSucTw0wvCGrUaibDSdcrGFoWxKQ==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/instrumentation@0.52.1': - resolution: {integrity: sha512-uXJbYU/5/MBHjMp1FqrILLRuiJCs3Ofk0MeRDk8g1S1gD47U8X3JnSwcMO1rtRo1x1a7zKaQHaoYu49p/4eSKw==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.3.0 - '@opentelemetry/otlp-exporter-base@0.203.0': resolution: {integrity: sha512-Wbxf7k+87KyvxFr5D7uOiSq/vHXWommvdnNE7vECO3tAhsA2GfOlpWINCMWUEPdHZ7tCXxw6Epp3vgx3jU7llQ==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/otlp-exporter-base@0.52.1': - resolution: {integrity: sha512-z175NXOtX5ihdlshtYBe5RpGeBoTXVCKPPLiQlD6FHvpM4Ch+p2B0yWKYSrBfLH24H9zjJiBdTrtD+hLlfnXEQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - - '@opentelemetry/otlp-grpc-exporter-base@0.52.1': - resolution: {integrity: sha512-zo/YrSDmKMjG+vPeA9aBBrsQM9Q/f2zo6N04WMB3yNldJRsgpRBeLLwvAt/Ba7dpehDLOEFBd1i2JCoaFtpCoQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.0.0 - '@opentelemetry/otlp-transformer@0.203.0': resolution: {integrity: sha512-Y8I6GgoCna0qDQ2W6GCRtaF24SnvqvA8OfeTi7fqigD23u8Jpb4R5KFv/pRvrlGagcCLICMIyh9wiejp4TXu/A==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/otlp-transformer@0.52.1': - resolution: {integrity: sha512-I88uCZSZZtVa0XniRqQWKbjAUm73I8tpEy/uJYPPYw5d7BRdVk0RfTBQw8kSUl01oVWEuqxLDa802222MYyWHg==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.3.0 <1.10.0' - - '@opentelemetry/propagator-b3@1.25.1': - resolution: {integrity: sha512-p6HFscpjrv7//kE+7L+3Vn00VEDUJB0n6ZrjkTYHrJ58QZ8B3ajSJhRbCcY6guQ3PDjTbxWklyvIN2ojVbIb1A==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - - '@opentelemetry/propagator-jaeger@1.25.1': - resolution: {integrity: sha512-nBprRf0+jlgxks78G/xq72PipVK+4or9Ypntw0gVZYNTCSK8rg5SeaGV19tV920CMqBD/9UIOiFr23Li/Q8tiA==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - - '@opentelemetry/resources@1.25.1': - resolution: {integrity: sha512-pkZT+iFYIZsVn6+GzM0kSX+u3MSLCY9md+lIJOoKl/P+gJFfxJte/60Usdp8Ce4rOs8GduUpSPNe1ddGyDT1sQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/resources@2.0.1': resolution: {integrity: sha512-dZOB3R6zvBwDKnHDTB4X1xtMArB/d324VsbiPkX/Yu0Q8T2xceRthoIVFhJdvgVM2QhGVUyX9tzwiNxGtoBJUw==} engines: {node: ^18.19.0 || >=20.6.0} @@ -3003,64 +2416,24 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.4.0 <1.10.0' - '@opentelemetry/sdk-logs@0.52.1': - resolution: {integrity: sha512-MBYh+WcPPsN8YpRHRmK1Hsca9pVlyyKd4BxOC4SsgHACnl/bPp4Cri9hWhVm5+2tiQ9Zf4qSc1Jshw9tOLGWQA==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.4.0 <1.10.0' - - '@opentelemetry/sdk-metrics@1.25.1': - resolution: {integrity: sha512-9Mb7q5ioFL4E4dDrc4wC/A3NTHDat44v4I3p2pLPSxRvqUbDIQyMVr9uK+EU69+HWhlET1VaSrRzwdckWqY15Q==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.3.0 <1.10.0' - '@opentelemetry/sdk-metrics@2.0.1': resolution: {integrity: sha512-wf8OaJoSnujMAHWR3g+/hGvNcsC16rf9s1So4JlMiFaFHiE4HpIA3oUh+uWZQ7CNuK8gVW/pQSkgoa5HkkOl0g==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': '>=1.9.0 <1.10.0' - '@opentelemetry/sdk-node@0.52.1': - resolution: {integrity: sha512-uEG+gtEr6eKd8CVWeKMhH2olcCHM9dEK68pe0qE0be32BcCRsvYURhHaD1Srngh1SQcnQzZ4TP324euxqtBOJA==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.3.0 <1.10.0' - - '@opentelemetry/sdk-trace-base@1.25.1': - resolution: {integrity: sha512-C8k4hnEbc5FamuZQ92nTOp8X/diCY56XUTnMiv9UTuJitCzaNNHAVsdm5+HLCdI8SLQsLWIrG38tddMxLVoftw==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/sdk-trace-base@2.0.1': resolution: {integrity: sha512-xYLlvk/xdScGx1aEqvxLwf6sXQLXCjk3/1SQT9X9AoN5rXRhkdvIFShuNNmtTEPRBqcsMbS4p/gJLNI2wXaDuQ==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': '>=1.3.0 <1.10.0' - '@opentelemetry/sdk-trace-node@1.25.1': - resolution: {integrity: sha512-nMcjFIKxnFqoez4gUmihdBrbpsEnAX/Xj16sGvZm+guceYE0NE00vLhpDVK6f3q8Q4VFI5xG8JjlXKMB/SkTTQ==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/sdk-trace-node@2.0.1': resolution: {integrity: sha512-UhdbPF19pMpBtCWYP5lHbTogLWx9N0EBxtdagvkn5YtsAnCBZzL7SjktG+ZmupRgifsHMjwUaCCaVmqGfSADmA==} engines: {node: ^18.19.0 || >=20.6.0} peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/sdk-trace-web@1.25.1': - resolution: {integrity: sha512-SS6JaSkHngcBCNdWGthzcvaKGRnDw2AeP57HyTEileLToJ7WLMeV+064iRlVyoT4+e77MRp2T2dDSrmaUyxoNg==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.10.0' - - '@opentelemetry/semantic-conventions@1.25.1': - resolution: {integrity: sha512-ZDjMJJQRlyk8A1KZFCc+bCbsyrn1wTwdNt56F7twdfUfnHUZUq77/WfONCj8p72NZOyP7pNTdUWSTYC3GTbuuQ==} - engines: {node: '>=14'} - '@opentelemetry/semantic-conventions@1.36.0': resolution: {integrity: sha512-TtxJSRD8Ohxp6bKkhrm27JRHAxPczQA7idtcTOMYI+wQRRrfgqxHv1cFbCApcSnNjtXkmzFozn6jQtFrOmbjPQ==} engines: {node: '>=14'} @@ -3169,9 +2542,6 @@ packages: resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} engines: {node: '>=14'} - '@polka/url@0.5.0': - resolution: {integrity: sha512-oZLYFEAzUKyi3SKnXvj32ZCEGH6RDnao7COuCVhDydMS9NrCSVXhM79VaKyP5+Zc33m0QXEd2DN3UkU7OsHcfw==} - '@popperjs/core@2.11.8': resolution: {integrity: sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==} @@ -5556,6 +4926,13 @@ packages: resolution: {integrity: sha512-c5myttkNhqaqvLlEz3ttE1qEsULlD6ILBge5FAfEtMv9HVS/pNlgvMKrdFMefaGO/bE4HoxrNGdJsY683Kq32w==} engines: {node: '>=18.20.0'} + '@trigger.dev/python@4.0.4': + resolution: {integrity: sha512-46eYZZtvMsd+kahHlysOABDip2aLtyQHnR9PK+NqL7UDaKj9Rj67tG5Qb9UWgolc4W4tIDpBBedipexmJaJhtA==} + engines: {node: '>=18.20.0'} + peerDependencies: + '@trigger.dev/build': ^4.0.4 + '@trigger.dev/sdk': ^4.0.4 + '@trigger.dev/react-hooks@4.0.4': resolution: {integrity: sha512-tgyaGKwFTbVaD4QZdR5GBc2R7T/yq+vHpWw506ys75Mo9uEZN0rGmw7g5q1Pe4XJvsdDiVjcxcJ4tK8zwUM5Zg==} engines: {node: '>=18.20.0'} @@ -5726,9 +5103,6 @@ packages: '@types/geojson@7946.0.16': resolution: {integrity: sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==} - '@types/gradient-string@1.1.6': - resolution: {integrity: sha512-LkaYxluY4G5wR1M4AKQUal2q61Di1yVVCw42ImFTuaIoQVgmV0WP1xUaLB8zwb47mp82vWTpePI9JmrjEnJ7nQ==} - '@types/hast@2.3.10': resolution: {integrity: sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==} @@ -5738,9 +5112,6 @@ packages: '@types/http-errors@2.0.5': resolution: {integrity: sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==} - '@types/ini@4.1.1': - resolution: {integrity: sha512-MIyNUZipBTbyUNnhvuXJTY7B6qNI78meck9Jbv3wk0OgNwRyOOVEKDutAkOs1snB/tx0FafyR6/SN4Ps0hZPeg==} - '@types/is-ci@3.0.4': resolution: {integrity: sha512-AkCYCmwlXeuH89DagDCzvCAyltI2v9lh3U3DqSg/GrBYoReAaWwxfXCqMx9UV5MajLZ4ZFwZzV4cABGIxk2XRw==} @@ -5810,12 +5181,6 @@ packages: '@types/normalize-package-data@2.4.4': resolution: {integrity: sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==} - '@types/object-hash@3.0.6': - resolution: {integrity: sha512-fOBV8C1FIu2ELinoILQ+ApxcUKz4ngq+IWUYrxSGjXzzjUALijilampwkMgEtJ+h2njAW3pi853QpzNVCHB73w==} - - '@types/polka@0.5.7': - resolution: {integrity: sha512-TH8CDXM8zoskPCNmWabtK7ziGv9Q21s4hMZLVYK5HFEfqmGXBqq/Wgi7jNELWXftZK/1J/9CezYa06x1RKeQ+g==} - '@types/prismjs@1.26.5': resolution: {integrity: sha512-AUZTa7hQ2KY5L7AmtSiqxlhWxb4ina0yd8hNbl4TWuqnv/pFP0nDMb3YrfSBf4hJVGLh2YEIBfKaBW/9UEl6IQ==} @@ -5845,16 +5210,6 @@ packages: '@types/react@18.2.69': resolution: {integrity: sha512-W1HOMUWY/1Yyw0ba5TkCV+oqynRjG7BnteBB+B7JmAK7iw3l2SW+VGOxL+akPweix6jk2NNJtyJKpn4TkpfK3Q==} - '@types/resolve@1.20.6': - resolution: {integrity: sha512-A4STmOXPhMUtHH+S6ymgE2GiBSMqf4oTvcQZMcHzokuTLVYzXTB8ttjcgxOVaAp2lGwEdzZ0J+cRbbeevQj1UQ==} - - '@types/retry@0.12.2': - resolution: {integrity: sha512-XISRgDJ2Tc5q4TRqvgJtzsRkFYNJzZrhTdtMoGVBttwzzQJkPnS3WWTFc7kuDRoPtPakl+T+OfdEUjYJj7Jbow==} - - '@types/rimraf@4.0.5': - resolution: {integrity: sha512-DTCZoIQotB2SUJnYgrEx43cQIUYOlNZz0AZPbKU4PSLYTUdML5Gox0++z4F9kQocxStrCmRNhi4x5x/UlwtKUA==} - deprecated: This is a stub types definition. rimraf provides its own type definitions, so you do not need this installed. - '@types/scheduler@0.26.0': resolution: {integrity: sha512-WFHp9YUJQ6CKshqoC37iOlHnQSmxNc795UhB26CyBBttrN9svdIrUjl/NjnNmfcwtncN0h/0PPAFWv9ovP8mLA==} @@ -5867,24 +5222,12 @@ packages: '@types/serve-static@1.15.8': resolution: {integrity: sha512-roei0UY3LhpOJvjbIP6ZZFngyLKl5dskOtDhxY5THRSpO+ZI+nzJ+m5yUMzGrp89YRa7lvknKkMYjqQFGwA7Sg==} - '@types/shimmer@1.2.0': - resolution: {integrity: sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==} - '@types/simple-oauth2@5.0.7': resolution: {integrity: sha512-8JbWVJbiTSBQP/7eiyGKyXWAqp3dKQZpaA+pdW16FCi32ujkzRMG8JfjoAzdWt6W8U591ZNdHcPtP2D7ILTKuA==} '@types/slug@5.0.9': resolution: {integrity: sha512-6Yp8BSplP35Esa/wOG1wLNKiqXevpQTEF/RcL/NV6BBQaMmZh4YlDwCgrrFSoUE4xAGvnKd5c+lkQJmPrBAzfQ==} - '@types/source-map-support@0.5.10': - resolution: {integrity: sha512-tgVP2H469x9zq34Z0m/fgPewGhg/MLClalNOiPIzQlXrSS2YrKu/xCdSCKnEDwkFha51VKEKB6A9wW26/ZNwzA==} - - '@types/tinycolor2@1.4.6': - resolution: {integrity: sha512-iEN8J0BoMnsWBqjVbWH/c0G0Hh7O21lpR2/+PrvAVgWdzL7eexIFm4JN/Wn10PTcmNdtS6U67r499mlWMXOxNw==} - - '@types/trouter@3.1.4': - resolution: {integrity: sha512-4YIL/2AvvZqKBWenjvEpxpblT2KGO6793ipr5QS7/6DpQ3O3SwZGgNGWezxf3pzeYZc24a2pJIrR/+Jxh/wYNQ==} - '@types/unist@2.0.11': resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==} @@ -5903,9 +5246,6 @@ packages: '@types/webpack@5.28.5': resolution: {integrity: sha512-wR87cgvxj3p6D0Crt1r5avwqffqPXUkNlnQ1mjU93G7gCuFjufZR4I6j8cz5g1F1tTYpfOOFvly+cmIQwL9wvw==} - '@types/ws@8.18.1': - resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} - '@typescript-eslint/eslint-plugin@5.62.0': resolution: {integrity: sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} @@ -6240,10 +5580,6 @@ packages: resolution: {integrity: sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==} engines: {node: '>=8'} - aggregate-error@4.0.1: - resolution: {integrity: sha512-0poP0T7el6Vq3rstR8Mn4V/IQrpBLO6POkUSrN7RhyY+GF/InCFShQzsQ39T25gkHhLgSLByyAz+Kjb+c2L98w==} - engines: {node: '>=12'} - ai@5.0.78: resolution: {integrity: sha512-ec77fmQwJGLduswMrW4AAUGSOiu8dZaIwMmWHHGKsrMUFFS6ugfkTyx0srtuKYHNRRLRC2dT7cPirnUl98VnxA==} engines: {node: '>=18'} @@ -6273,10 +5609,6 @@ packages: resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} engines: {node: '>=6'} - ansi-escapes@7.0.0: - resolution: {integrity: sha512-GdYO7a61mR0fOlAsvC9/rIHf7L96sBc6dEWzeOu+KAea5bZyQRPIpojrVoI4AXGJS/ycu/fBTdLrUkA4ODrvjw==} - engines: {node: '>=18'} - ansi-regex@5.0.1: resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} engines: {node: '>=8'} @@ -6374,10 +5706,6 @@ packages: resolution: {integrity: sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==} engines: {node: '>=0.10.0'} - arrify@3.0.0: - resolution: {integrity: sha512-tLkvA81vQG/XqE2mjDkGQHoOINtMHtysSnemrmoGe6PydDPMRbVugqyk4A6V/WDWEfm3l+0d8anA9r8cv/5Jaw==} - engines: {node: '>=12'} - ast-types-flow@0.0.8: resolution: {integrity: sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==} @@ -6514,10 +5842,6 @@ packages: bullmq@5.53.2: resolution: {integrity: sha512-xHgxrP/yNJHD7VCw1h+eRBh+2TCPBCM39uC9gCyksYc6ufcJP+HTZ/A2lzB2x7qMFWrvsX7tM40AT2BmdkYL/Q==} - bundle-name@4.1.0: - resolution: {integrity: sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==} - engines: {node: '>=18'} - bundle-require@5.1.0: resolution: {integrity: sha512-3WrrOuZiyaaZPWiEt4G3+IffISVC9HYlWueJEBWED4ZH4aIAC2PnkdnuRrR94M+w6yGWn4AglWtJtBI8YqvgoA==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} @@ -6532,14 +5856,6 @@ packages: resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} engines: {node: '>= 0.8'} - c12@1.11.2: - resolution: {integrity: sha512-oBs8a4uvSDO9dm8b7OCFW7+dgtVrwmwnrVXYzLm43ta7ep2jCn/0MhoUFygIWtxhyy6+/MG7/agvpY0U1Iemew==} - peerDependencies: - magicast: ^0.3.4 - peerDependenciesMeta: - magicast: - optional: true - cac@6.7.14: resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} engines: {node: '>=8'} @@ -6647,9 +5963,6 @@ packages: resolution: {integrity: sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==} engines: {node: '>=8'} - citty@0.1.6: - resolution: {integrity: sha512-tskPPKEs8D2KPafUypv2gxwJP8h/OaJmC82QQGGDQcHvXX43xF2VDACcJVmZ0EuSxkpO9Kc4MlrA3q0+FG58AQ==} - cjs-module-lexer@1.4.3: resolution: {integrity: sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==} @@ -6666,10 +5979,6 @@ packages: resolution: {integrity: sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==} engines: {node: '>=6'} - clean-stack@4.2.0: - resolution: {integrity: sha512-LYv6XPxoyODi36Dp976riBtSY27VmFo+MKqEU9QCCWyTrdEPDog+RWA7xQWHi6Vbp61j5c4cdzzX1NidnwtUWg==} - engines: {node: '>=12'} - cli-cursor@3.1.0: resolution: {integrity: sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==} engines: {node: '>=8'} @@ -6678,10 +5987,6 @@ packages: resolution: {integrity: sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==} engines: {node: '>=6'} - cli-table3@0.6.5: - resolution: {integrity: sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ==} - engines: {node: 10.* || >= 12.*} - client-only@0.0.1: resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==} @@ -6746,9 +6051,6 @@ packages: color@3.2.1: resolution: {integrity: sha512-aBl7dZI9ENN6fUGC7mWpMTPNHmWUSNan9tuWN6ahh5ZLNk9baLJOnSMlrQkHcrfFgz2/RigjUVAjdx36VcemKA==} - colorette@2.0.19: - resolution: {integrity: sha512-3tlv/dIP7FWvj3BsbHrGLJ6l/oKh1O3TcgBqMn+yyCagOxc23fyzDS6HypQbgxWbkpDnf52p1LuR4eWDQ/K9WQ==} - combined-stream@1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} @@ -6787,10 +6089,6 @@ packages: resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==} engines: {node: '>= 12'} - commander@9.5.0: - resolution: {integrity: sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==} - engines: {node: ^12.20.0 || >=14} - compressible@2.0.18: resolution: {integrity: sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==} engines: {node: '>= 0.6'} @@ -6879,19 +6177,6 @@ packages: typescript: optional: true - cp-file@10.0.0: - resolution: {integrity: sha512-vy2Vi1r2epK5WqxOLnskeKeZkdZvTKfFZQCplE3XWsP+SUJyd5XAUFC9lFgTjjXJF2GMne/UML14iEmkAaDfFg==} - engines: {node: '>=14.16'} - - cpy-cli@5.0.0: - resolution: {integrity: sha512-fb+DZYbL9KHc0BC4NYqGRrDIJZPXUmjjtqdw4XRRg8iV8dIfghUX/WiL+q4/B/KFTy3sK6jsbUhBaz0/Hxg7IQ==} - engines: {node: '>=16'} - hasBin: true - - cpy@10.1.0: - resolution: {integrity: sha512-VC2Gs20JcTyeQob6UViBLnyP0bYHkBh6EiKzot9vi2DmeGlFT9Wd7VG3NBrkNx/jYvFBeyDOMMHdHQhbtKLgHQ==} - engines: {node: '>=16'} - crelt@1.0.6: resolution: {integrity: sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==} @@ -7146,15 +6431,6 @@ packages: supports-color: optional: true - debug@4.3.4: - resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} - engines: {node: '>=6.0'} - peerDependencies: - supports-color: '*' - peerDependenciesMeta: - supports-color: - optional: true - debug@4.3.7: resolution: {integrity: sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==} engines: {node: '>=6.0'} @@ -7215,14 +6491,6 @@ packages: resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} engines: {node: '>=0.10.0'} - default-browser-id@5.0.0: - resolution: {integrity: sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA==} - engines: {node: '>=18'} - - default-browser@5.2.1: - resolution: {integrity: sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg==} - engines: {node: '>=18'} - defaults@1.0.4: resolution: {integrity: sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==} @@ -7230,10 +6498,6 @@ packages: resolution: {integrity: sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==} engines: {node: '>= 0.4'} - define-lazy-prop@3.0.0: - resolution: {integrity: sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==} - engines: {node: '>=12'} - define-properties@1.2.1: resolution: {integrity: sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==} engines: {node: '>= 0.4'} @@ -7241,9 +6505,6 @@ packages: defined@1.0.1: resolution: {integrity: sha512-hsBd2qSVCRE+5PmNdHt1uzyrFu5d3RwmFDKzyNZMFq/EwDNJF7Ee5+D5oEKF0hU6LhtoUF1macFvOe4AskQC1Q==} - defu@6.1.4: - resolution: {integrity: sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg==} - delaunator@5.0.1: resolution: {integrity: sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==} @@ -7263,9 +6524,6 @@ packages: resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} engines: {node: '>=6'} - destr@2.0.5: - resolution: {integrity: sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==} - destroy@1.2.0: resolution: {integrity: sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} @@ -7343,10 +6601,6 @@ packages: resolution: {integrity: sha512-GopVGCpVS1UKH75VKHGuQFqS1Gusej0z4FyQkPdwjil2gNIv+LNsqBlboOzpJFZKVT95GkCyWJbBSdFEFUWI2A==} engines: {node: '>=12'} - dotenv-expand@12.0.2: - resolution: {integrity: sha512-lXpXz2ZE1cea1gL4sz2Ipj8y4PiVjytYr3Ij0SWoms1PGxIv7m2CRKuRuCRtHdVuvM/hNJPMxt5PbhboNC4dPQ==} - engines: {node: '>=12'} - dotenv@16.0.3: resolution: {integrity: sha512-7GO6HghkA5fYG9TYnNxi14/7K9f5occMlp3zXAuSxn7CKCxt9xbNWG7yF8hTCSUchlfWSe3uLmlPfigevRItzQ==} engines: {node: '>=12'} @@ -7434,10 +6688,6 @@ packages: resolution: {integrity: sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==} engines: {node: '>=6'} - environment@1.1.0: - resolution: {integrity: sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==} - engines: {node: '>=18'} - err-code@2.0.3: resolution: {integrity: sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==} @@ -7507,11 +6757,6 @@ packages: engines: {node: '>=12'} hasBin: true - esbuild@0.23.1: - resolution: {integrity: sha512-VVNz/9Sa0bs5SELtn3f7qhJCDPCF5oMEl5cO9/SSinpE9hbPVvxbd572HH5AKiP7WD8INO53GgfDDhRjkylHEg==} - engines: {node: '>=18'} - hasBin: true - esbuild@0.25.5: resolution: {integrity: sha512-P8OtKZRv/5J5hhz0cUAdu/cLuPIKXpQl1R9pZtvmHWQvrAUVd0UNIPT4IB4W3rNOqVO0rlqHmCIbSwxh/c9yUQ==} engines: {node: '>=18'} @@ -7532,10 +6777,6 @@ packages: resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} engines: {node: '>=10'} - escape-string-regexp@5.0.0: - resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==} - engines: {node: '>=12'} - eslint-config-prettier@8.10.0: resolution: {integrity: sha512-SM8AMJdeQqRYT9O9zguiruQZaN7+z+E4eAP9oiLNGKMtomwaB1E9dcgUD6ZAn/eQAb52USbvezbiljfZUhbJcg==} hasBin: true @@ -7699,10 +6940,6 @@ packages: deprecated: This version is no longer supported. Please see https://eslint.org/version-support for other options. hasBin: true - esm@3.2.25: - resolution: {integrity: sha512-U1suiZ2oDVWv4zPO56S0NcR5QriEahGtdN2OR6FiOG4WJvcjBVFB0qI4+eKoWFH483PKGuLuu6V8Z4T5g63UVA==} - engines: {node: '>=6'} - espree@9.6.1: resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} @@ -7768,9 +7005,6 @@ packages: resolution: {integrity: sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==} engines: {node: '>= 0.8'} - event-target-polyfill@0.0.4: - resolution: {integrity: sha512-Gs6RLjzlLRdT8X9ZipJdIZI/Y6/HhRLyq9RdDlCsnpxr/+Nn6bU2EFGuC94GjxqhM+Nmij2Vcq98yoHrU8uNFQ==} - event-target-shim@5.0.1: resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} engines: {node: '>=6'} @@ -7854,9 +7088,6 @@ packages: fast-levenshtein@2.0.6: resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} - fast-npm-meta@0.2.2: - resolution: {integrity: sha512-E+fdxeaOQGo/CMWc9f4uHFfgUPJRAu7N3uB8GBvB3SDPAIWJK4GKyYhkAGFq+GYrcbKNfQIz5VVQyJnDuPPCrg==} - fast-sort@3.4.1: resolution: {integrity: sha512-76uvGPsF6So53sZAqenP9UVT3p5l7cyTHkLWVCMinh41Y8NDrK1IYXJgaBMfc1gk7nJiSRZp676kddFG2Aa5+A==} @@ -7916,10 +7147,6 @@ packages: resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} engines: {node: '>=10'} - find-up@7.0.0: - resolution: {integrity: sha512-YyZM99iHrqLKjmt4LJDj58KI+fYyufRLBSYcqycxf//KpBk9FoewoGX0450m9nB44qrZnovzC2oeP5hUibxc/g==} - engines: {node: '>=18'} - find-yarn-workspace-root2@1.2.16: resolution: {integrity: sha512-hr6hb1w8ePMpPVUK39S4RlwJzi+xPLuVuG8XlwXU3KD5Yn3qgBWVfy3AzNlDhWvE1EORCE65/Qm26rFQt3VLVA==} @@ -8058,10 +7285,6 @@ packages: resolution: {integrity: sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==} engines: {node: '>=6'} - get-package-type@0.1.0: - resolution: {integrity: sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==} - engines: {node: '>=8.0.0'} - get-port@5.1.1: resolution: {integrity: sha512-g/Q1aTSDOxFpchXC4i8ZWvxA1lnPqx/JHqcpIw0/LX9T8x/GBbi6YnlN5nhaKIFkT8oFsscUKgDJYxfwfS6QsQ==} engines: {node: '>=8'} @@ -8089,16 +7312,6 @@ packages: get-tsconfig@4.10.1: resolution: {integrity: sha512-auHyJ4AgMz7vgS8Hp3N6HXSmlMdUyhSUrfBF16w153rxtLIEOE+HGqaBppczZvnHLqQJfiHotCYpNhl0lUROFQ==} - getopts@2.3.0: - resolution: {integrity: sha512-5eDf9fuSXwxBL6q5HX+dhDj+dslFGWzU5thZ9kNKUkcPtaPdatmUFKwHFrLb/uf/WpA4BHET+AX3Scl56cAjpA==} - - giget@1.2.5: - resolution: {integrity: sha512-r1ekGw/Bgpi3HLV3h1MRBIlSAdHoIMklpaQ3OQLFcRw9PwAj2rqigvIbg+dBUI51OxVI2jsEtDywDBjSiuf7Ug==} - hasBin: true - - git-last-commit@1.0.1: - resolution: {integrity: sha512-FDSgeMqa7GnJDxt/q0AbrxbfeTyxp4ImxEw1e4nw6NUHA5FMhFUq33dTXI4Xdgcj1VQ1q5QLWF6WxFrJ8KCBOg==} - glob-parent@5.1.2: resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} engines: {node: '>= 6'} @@ -8144,10 +7357,6 @@ packages: resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} engines: {node: '>=10'} - globby@13.2.2: - resolution: {integrity: sha512-Y1zNGV+pzQdh7H39l9zgB4PJqjRNqydvdYCDG4HFXM4XuvSaQQlEc91IU1yALL8gUTDomgBAfz3XJdmUS+oo0w==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - globrex@0.1.2: resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} @@ -8161,10 +7370,6 @@ packages: graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} - gradient-string@2.0.2: - resolution: {integrity: sha512-rEDCuqUQ4tbD78TpzsMtt5OIf0cBCSDWSJtUDaF6JsAh+k0v9r++NzxNEG87oDZx9ZwGhD8DaezR2L/yrw0Jdw==} - engines: {node: '>=10'} - grapheme-splitter@1.0.4: resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} @@ -8224,10 +7429,6 @@ packages: resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} engines: {node: '>=8'} - has-flag@5.0.1: - resolution: {integrity: sha512-CsNUt5x9LUdx6hnk/E2SZLsDyvfqANZSUq4+D3D8RzDJ2M+HDTIkF60ibS1vHaK55vzgiZw1bEPFG9yH7l33wA==} - engines: {node: '>=12'} - has-property-descriptors@1.0.2: resolution: {integrity: sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==} @@ -8263,10 +7464,6 @@ packages: resolution: {integrity: sha512-Xwwo44whKBVCYoliBQwaPvtd/2tYFkRQtXDWj1nackaV2JPXx3L0+Jvd8/qCJ2p+ML0/XVkJ2q+Mr+UVdpJK5w==} engines: {node: '>=12.0.0'} - hono@4.8.5: - resolution: {integrity: sha512-Up2cQbtNz1s111qpnnECdTGqSIUIhZJMLikdKkshebQSEBcoUKq6XJayLGqSZWidiH0zfHRCJqFu062Mz5UuRA==} - engines: {node: '>=16.9.0'} - hosted-git-info@2.8.9: resolution: {integrity: sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==} @@ -8349,9 +7546,6 @@ packages: import-in-the-middle@1.11.0: resolution: {integrity: sha512-5DimNQGoe0pLUHbR9qK84iWaWjjbsxiqXnw6Qz64+azRgleqv9k2kTt5fw7QsOpmaGYtuxxursnPPsnTKEx10Q==} - import-meta-resolve@4.1.0: - resolution: {integrity: sha512-I6fiaX09Xivtk+THaMfAwnA3MVA5Big1WHF1Dfx9hFuvNIWpXnorlkzhcQf6ehrqQiiZECRt1poOAkPmer3ruw==} - imurmurhash@0.1.4: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} @@ -8360,10 +7554,6 @@ packages: resolution: {integrity: sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==} engines: {node: '>=8'} - indent-string@5.0.0: - resolution: {integrity: sha512-m6FAo/spmsW2Ab2fU35JTYwtOKa2yAwXSwgjSv1TJzh4Mh7mC3lzAOVLBprb72XsTrgkEIsl7YrFNAiDiRhIGg==} - engines: {node: '>=12'} - inflight@1.0.6: resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. @@ -8374,10 +7564,6 @@ packages: ini@1.3.8: resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==} - ini@5.0.0: - resolution: {integrity: sha512-+N0ngpO3e7cRUWOJAS7qw0IZIVc6XPrW4MlFBdD066F2L4k1L6ker3hLqSq7iXxU5tgS4WGkIUElWn5vogAEnw==} - engines: {node: ^18.17.0 || >=20.5.0} - inline-style-parser@0.1.1: resolution: {integrity: sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==} @@ -8392,10 +7578,6 @@ packages: resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} engines: {node: '>=12'} - interpret@2.2.0: - resolution: {integrity: sha512-Ju0Bz/cEia55xDwUWEa8+olFpCiQoypjnQySseKtmjNrnps3P+xfpUmGr90T7yjlVJmOtybRvPXhKMbHr+fWnw==} - engines: {node: '>= 0.10'} - ioredis@5.6.1: resolution: {integrity: sha512-UxC0Yv1Y4WRJiGQxQkP0hfdL0/5/6YvdfOOClRgJ0qppSarkhneSa6UvkMkms0AkdGimSH3Ikqm+6mkMmX7vGA==} engines: {node: '>=12.22.0'} @@ -8473,11 +7655,6 @@ packages: is-deflate@1.0.0: resolution: {integrity: sha512-YDoFpuZWu1VRXlsnlYMzKyVRITXj7Ej/V9gXQ2/pAe7X1J7M/RNOqaIYi6qUn+B7nGyB9pDXrv02dsB58d2ZAQ==} - is-docker@3.0.0: - resolution: {integrity: sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - hasBin: true - is-extglob@2.1.1: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} @@ -8505,11 +7682,6 @@ packages: is-hexadecimal@2.0.1: resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==} - is-inside-container@1.0.0: - resolution: {integrity: sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==} - engines: {node: '>=14.16'} - hasBin: true - is-interactive@1.0.0: resolution: {integrity: sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==} engines: {node: '>=8'} @@ -8522,10 +7694,6 @@ packages: resolution: {integrity: sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==} engines: {node: '>= 0.4'} - is-network-error@1.1.0: - resolution: {integrity: sha512-tUdRRAnhT+OtCZR/LxZelH/C7QtjtFrTu5tXCA8pl55eTUElUHT+GPYV8MBMBvea/j+NxQqVt3LbWMRir7Gx9g==} - engines: {node: '>=16'} - is-number-object@1.1.1: resolution: {integrity: sha512-lZhclumE1G6VYD8VHe35wFaIif+CTy5SJIi5+3y4psDgWu4wPDoBhF8NxUOinEc7pHgiTsT6MaBb92rKhhD+Xw==} engines: {node: '>= 0.4'} @@ -8627,10 +7795,6 @@ packages: resolution: {integrity: sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==} engines: {node: '>=0.10.0'} - is-wsl@3.1.0: - resolution: {integrity: sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==} - engines: {node: '>=16'} - isarray@1.0.0: resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} @@ -8767,9 +7931,6 @@ packages: engines: {node: '>=6'} hasBin: true - jsonc-parser@3.2.1: - resolution: {integrity: sha512-AilxAyFOAcK5wA1+LeaySVBrHsGQvUFCDWXKpZjzaL0PqW+xfBOttn8GNtWKFWqneyMZj41MWF9Kl6iPWLwgOA==} - jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} @@ -8780,10 +7941,6 @@ packages: resolution: {integrity: sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==} engines: {node: '>=4.0'} - junk@4.0.1: - resolution: {integrity: sha512-Qush0uP+G8ZScpGMZvHUiRfI0YBWuB3gVBYlI0v0vvOJt5FLicco+IkP0a50LqTTQhmts/m6tP5SWE+USyIvcQ==} - engines: {node: '>=12.20'} - katex@0.16.22: resolution: {integrity: sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==} hasBin: true @@ -8805,34 +7962,6 @@ packages: resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==} engines: {node: '>=6'} - knex@3.1.0: - resolution: {integrity: sha512-GLoII6hR0c4ti243gMs5/1Rb3B+AjwMOfjYm97pu0FOQa7JH56hgBxYf5WK2525ceSbBY1cjeZ9yk99GPMB6Kw==} - engines: {node: '>=16'} - hasBin: true - peerDependencies: - better-sqlite3: '*' - mysql: '*' - mysql2: '*' - pg: '*' - pg-native: '*' - sqlite3: '*' - tedious: '*' - peerDependenciesMeta: - better-sqlite3: - optional: true - mysql: - optional: true - mysql2: - optional: true - pg: - optional: true - pg-native: - optional: true - sqlite3: - optional: true - tedious: - optional: true - language-subtag-registry@0.3.23: resolution: {integrity: sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==} @@ -8968,10 +8097,6 @@ packages: resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} engines: {node: '>=10'} - locate-path@7.2.0: - resolution: {integrity: sha512-gvVijfZvn7R+2qyPX8mAuKcFGDf6Nc61GdvGafQsHL0sBIxfKzA+usWn4GFC/bk+QdwPUD4kWFJLhElipq+0VA==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - lodash.camelcase@4.3.0: resolution: {integrity: sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==} @@ -9055,9 +8180,6 @@ packages: magic-string@0.30.17: resolution: {integrity: sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==} - magicast@0.3.5: - resolution: {integrity: sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==} - make-dir@2.1.0: resolution: {integrity: sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA==} engines: {node: '>=6'} @@ -9086,10 +8208,6 @@ packages: engines: {node: '>= 16'} hasBin: true - matchit@1.1.0: - resolution: {integrity: sha512-+nGYoOlfHmxe5BW5tE0EMJppXEwdSf8uBA1GTZC7Q77kbT35+VKLYJMzVNWCHSsga1ps1tPYFtFyvxvKzWVmMA==} - engines: {node: '>=6'} - math-intrinsics@1.1.0: resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} engines: {node: '>= 0.4'} @@ -9177,10 +8295,6 @@ packages: resolution: {integrity: sha512-S3UwM3yj5mtUSEfP41UZmt/0SCoVYUcU1rkXv+BQ5Ig8ndL4sPoJNBUJERafdPb5jjHJGuMgytgKvKIf58XNBw==} engines: {node: '>= 0.10.0'} - meow@12.1.1: - resolution: {integrity: sha512-BhXM0Au22RwUneMPwSCnyhTOizdWoIEPU9sp0Aqa1PnDMR5Wv2FGXYDjuzJEIX+Eo2Rb8xuYe5jrnm5QowQFkw==} - engines: {node: '>=16.10'} - meow@6.1.1: resolution: {integrity: sha512-3YffViIt2QWgTy6Pale5QpopX/IvU3LPL03jOTqp6pGj3VjesdO/U8CuHMKpnQr4shCNCM5fd5XFFvIIl6JBHg==} engines: {node: '>=8'} @@ -9496,9 +8610,6 @@ packages: ms@2.0.0: resolution: {integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==} - ms@2.1.2: - resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} - ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} @@ -9562,9 +8673,6 @@ packages: neo4j-driver@5.28.1: resolution: {integrity: sha512-jbyBwyM0a3RLGcP43q3hIxPUPxA+1bE04RovOKdNAS42EtBMVCKcPSeOvWiHxgXp1ZFd0a8XqK+7LtguInOLUg==} - nested-error-stacks@2.1.1: - resolution: {integrity: sha512-9iN1ka/9zmX1ZvLV9ewJYEk9h7RyRRtqdK0woXcqohu8EWIerfPUjYJPg0ULy0UqP7cslmdGc8xKDJcojlKiaw==} - next@14.1.4: resolution: {integrity: sha512-1WTaXeSrUwlz/XcnhGTY7+8eiaFvdet5z9u3V2jb+Ek1vFo0VhHKSAIJvDWfQpttWjnyw14kBeq28TPq7bTeEQ==} engines: {node: '>=18.17.0'} @@ -9592,9 +8700,6 @@ packages: node-emoji@1.11.0: resolution: {integrity: sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==} - node-fetch-native@1.6.6: - resolution: {integrity: sha512-8Mc2HhqPdlIfedsuZoc3yioPuzp6b+L5jRCRY1QzuWZh2EGJVQrGppC6V6cF0bLdbW0+O2YpqCA25aF/1lvipQ==} - node-fetch@2.7.0: resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} engines: {node: 4.x || >=6.0.0} @@ -9685,11 +8790,6 @@ packages: num2fraction@1.2.2: resolution: {integrity: sha512-Y1wZESM7VUThYY+4W+X4ySH2maqcA+p7UR+w8VWNWVAd6lwuXXWz/w/Cz43J/dI2I+PS6wD5N+bJUF+gjWvIqg==} - nypm@0.5.4: - resolution: {integrity: sha512-X0SNNrZiGU8/e/zAB7sCTtdxWTMSIO73q+xuKgglm2Yvzwlo8UoC5FNySQFCvl84uPaeADkqHUZUkWy4aH4xOA==} - engines: {node: ^14.16.0 || >=16.10.0} - hasBin: true - object-assign@4.1.1: resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} engines: {node: '>=0.10.0'} @@ -9734,9 +8834,6 @@ packages: resolution: {integrity: sha512-gXah6aZrcUxjWg2zR2MwouP2eHlCBzdV4pygudehaKXSGW4v2AsRQUK+lwwXhii6KFZcunEnmSUoYp5CXibxtA==} engines: {node: '>= 0.4'} - ohash@1.1.6: - resolution: {integrity: sha512-TBu7PtV8YkAZn0tSxobKY2n2aAQva936lhRrj6957aDaCf9IEtqsKbgMzXE/F/sjqYOwmrukeORHNLe5glk7Cg==} - ollama-ai-provider-v2@1.5.1: resolution: {integrity: sha512-5R3z7Y+mm8VEtoq+rIoIqkEy83oYM3DXX6Nyrn6yofYvYl56BCoJMNwXsPrpmCI0O4fN/gAIDTLpznYMRGzZ5g==} engines: {node: '>=18'} @@ -9766,10 +8863,6 @@ packages: resolution: {integrity: sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==} engines: {node: '>=12'} - open@10.2.0: - resolution: {integrity: sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA==} - engines: {node: '>=18'} - openai@5.12.2: resolution: {integrity: sha512-xqzHHQch5Tws5PcKR2xsZGX9xtch+JQFz5zb14dGqlshmmDAFBFEWmeIpf7wVqWV+w7Emj7jRgkNJakyKE0tYQ==} hasBin: true @@ -9793,10 +8886,6 @@ packages: orderedmap@2.1.1: resolution: {integrity: sha512-TvAWxi0nDe1j/rtMcWcIj94+Ffe6n7zhow33h40SKxmsmozs6dz/e+EajymfoFcHd7sxNn8yHM8839uixMOV6g==} - os-paths@7.4.0: - resolution: {integrity: sha512-Ux1J4NUqC6tZayBqLN1kUlDAEvLiQlli/53sSddU4IN+h+3xxnv2HmRSMpVSvr1hvJzotfMs3ERvETGK+f4OwA==} - engines: {node: '>= 4.0'} - os-tmpdir@1.0.2: resolution: {integrity: sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==} engines: {node: '>=0.10.0'} @@ -9814,18 +8903,10 @@ packages: resolution: {integrity: sha512-qFOyK5PjiWZd+QQIh+1jhdb9LpxTF0qs7Pm8o5QHYZ0M3vKqSqzsZaEB6oWlxZ+q2sJBMI/Ktgd2N5ZwQoRHfg==} engines: {node: '>= 0.4'} - p-event@5.0.1: - resolution: {integrity: sha512-dd589iCQ7m1L0bmC5NLlVYfy3TbBEsMUfWx9PyAgPeIcFZ/E2yaTZ4Rz4MiBmmJShviiftHVXOqfnfzJ6kyMrQ==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - p-filter@2.1.0: resolution: {integrity: sha512-ZBxxZ5sL2HghephhpGAQdoskxplTwr7ICaehZwLIlfL6acuVgZPm8yBNuRAFBGEqtD/hmUeq9eqLg2ys9Xr/yw==} engines: {node: '>=8'} - p-filter@3.0.0: - resolution: {integrity: sha512-QtoWLjXAW++uTX67HZQz1dbTpqBfiidsB6VtQUC9iR85S120+s0T5sO6s+B5MLzFcZkrEd/DGMmCjR+f2Qpxwg==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - p-limit@2.3.0: resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} engines: {node: '>=6'} @@ -9834,14 +8915,6 @@ packages: resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} engines: {node: '>=10'} - p-limit@4.0.0: - resolution: {integrity: sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - - p-limit@6.2.0: - resolution: {integrity: sha512-kuUqqHNUqoIWp/c467RI4X6mmyuojY5jGutNU0wVTmEOOfcuwLqyMVoAi9MKi2Ak+5i9+nhmrK4ufZE8069kHA==} - engines: {node: '>=18'} - p-locate@4.1.0: resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} engines: {node: '>=8'} @@ -9850,10 +8923,6 @@ packages: resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} engines: {node: '>=10'} - p-locate@6.0.0: - resolution: {integrity: sha512-wPrq66Llhl7/4AGC6I+cqxT07LhXvWL08LNXz1fENOw0Ap4sRZZ/gZpTTJ5jpurzzzfS2W/Ge9BY3LgLjCShcw==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - p-map@2.1.0: resolution: {integrity: sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==} engines: {node: '>=6'} @@ -9862,22 +8931,6 @@ packages: resolution: {integrity: sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==} engines: {node: '>=10'} - p-map@5.5.0: - resolution: {integrity: sha512-VFqfGDHlx87K66yZrNdI4YGtD70IRyd+zSvgks6mzHPRNkoKy+9EKP4SFC77/vTTQYmRmti7dvqC+m5jBrBAcg==} - engines: {node: '>=12'} - - p-map@6.0.0: - resolution: {integrity: sha512-T8BatKGY+k5rU+Q/GTYgrEf2r4xRMevAN5mtXc2aPc4rS1j3s+vWTaO2Wag94neXuCAUAs8cxBL9EeB5EA6diw==} - engines: {node: '>=16'} - - p-retry@6.2.1: - resolution: {integrity: sha512-hEt02O4hUct5wtwg4H4KcWgDdm+l1bOaEy/hWzd8xtXB9BqxTWBBhb+2ImAtH4Cv4rPjV76xN3Zumqk3k3AhhQ==} - engines: {node: '>=16.17'} - - p-timeout@5.1.0: - resolution: {integrity: sha512-auFDyzzzGZZZdHz3BtET9VEz0SE/uMEAx7uWfGPucfzEwwe/xH0iVeZibQmANYE/hp9T2+UUZT5m+BKyrDp3Ew==} - engines: {node: '>=12'} - p-try@2.2.0: resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} engines: {node: '>=6'} @@ -9931,17 +8984,10 @@ packages: resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} engines: {node: '>= 0.8'} - partysocket@1.1.4: - resolution: {integrity: sha512-jXP7PFj2h5/v4UjDS8P7MZy6NJUQ7sspiFyxL4uc/+oKOL+KdtXzHnTV8INPGxBrLTXgalyG3kd12Qm7WrYc3A==} - path-exists@4.0.0: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} - path-exists@5.0.0: - resolution: {integrity: sha512-RjhtfwJOxzcFmNOi6ltcbcu4Iu+FL3zEj83dk4kAS+fVpTxXLO1b38RvJgT/0QwvV/L3aY9TAnyv0EOqW4GoMQ==} - engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} - path-is-absolute@1.0.1: resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} engines: {node: '>=0.10.0'} @@ -9996,49 +9042,9 @@ packages: peek-stream@1.1.3: resolution: {integrity: sha512-FhJ+YbOSBb9/rIl2ZeE/QHEsWn7PqNYt8ARAY3kIgNGOk13g9FGyIY6JIl/xB/3TFRVoTv5as0l11weORrTekA==} - perfect-debounce@1.0.0: - resolution: {integrity: sha512-xCy9V055GLEqoFaHoC1SoLIaLmWctgCUaBaWxDZ7/Zx4CTyX7cJQLJOok/orfjZAh9kEYpjJa4d0KcJmCbctZA==} - periscopic@3.1.0: resolution: {integrity: sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==} - pg-cloudflare@1.2.7: - resolution: {integrity: sha512-YgCtzMH0ptvZJslLM1ffsY4EuGaU0cx4XSdXLRFae8bPP4dS5xL1tNB3k2o/N64cHJpwU7dxKli/nZ2lUa5fLg==} - - pg-connection-string@2.6.2: - resolution: {integrity: sha512-ch6OwaeaPYcova4kKZ15sbJ2hKb/VP48ZD2gE7i1J+L4MspCtBMAx8nMgz7bksc7IojCIIWuEhHibSMFH8m8oA==} - - pg-connection-string@2.9.1: - resolution: {integrity: sha512-nkc6NpDcvPVpZXxrreI/FOtX3XemeLl8E0qFr6F2Lrm/I8WOnaWNhIPK2Z7OHpw7gh5XJThi6j6ppgNoaT1w4w==} - - pg-int8@1.0.1: - resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} - engines: {node: '>=4.0.0'} - - pg-pool@3.10.1: - resolution: {integrity: sha512-Tu8jMlcX+9d8+QVzKIvM/uJtp07PKr82IUOYEphaWcoBhIYkoHpLXN3qO59nAI11ripznDsEzEv8nUxBVWajGg==} - peerDependencies: - pg: '>=8.0' - - pg-protocol@1.10.3: - resolution: {integrity: sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ==} - - pg-types@2.2.0: - resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==} - engines: {node: '>=4'} - - pg@8.16.3: - resolution: {integrity: sha512-enxc1h0jA/aq5oSDMvqyW3q89ra6XIIDZgCX9vkMrnz5DFTw/Ny3Li2lFQ+pt3L6MCgm/5o2o8HW9hiJji+xvw==} - engines: {node: '>= 16.0.0'} - peerDependencies: - pg-native: '>=3.0.1' - peerDependenciesMeta: - pg-native: - optional: true - - pgpass@1.0.5: - resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==} - picocolors@0.2.1: resolution: {integrity: sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA==} @@ -10093,13 +9099,6 @@ packages: pkg-types@2.1.0: resolution: {integrity: sha512-wmJwA+8ihJixSoHKxZJRBQG1oY8Yr9pGLzRmSsNms0iNWyHHAlZCa7mmKiFR10YPZuz/2k169JiS/inOjBCZ2A==} - polite-json@5.0.0: - resolution: {integrity: sha512-OLS/0XeUAcE8a2fdwemNja+udKgXNnY6yKVIXqAD2zVRx1KvY6Ato/rZ2vdzbxqYwPW0u6SCNC/bAMPNzpzxbw==} - engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} - - polka@0.5.2: - resolution: {integrity: sha512-FVg3vDmCqP80tOrs+OeNlgXYmFppTXdjD5E7I4ET1NjvtNmQrb1/mJibybKkb/d4NA7YWAr1ojxuhpL3FHqdlw==} - possible-typed-array-names@1.1.0: resolution: {integrity: sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==} engines: {node: '>= 0.4'} @@ -10257,22 +9256,6 @@ packages: resolution: {integrity: sha512-d/jtm+rdNT8tpXuHY5MMtcbJFBkhXE6593XVR9UoGCH8jSFGci7jGvMGH5RYd5PBJW+00NZQt6gf7CbagJCrhg==} engines: {node: ^10 || ^12 || >=14} - postgres-array@2.0.0: - resolution: {integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==} - engines: {node: '>=4'} - - postgres-bytea@1.0.0: - resolution: {integrity: sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==} - engines: {node: '>=0.10.0'} - - postgres-date@1.0.7: - resolution: {integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==} - engines: {node: '>=0.10.0'} - - postgres-interval@1.2.0: - resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==} - engines: {node: '>=0.10.0'} - posthog-js@1.250.2: resolution: {integrity: sha512-g/H9lJhjhsYPnpxntqp36osK7oJ6CFqul2+mVUkaacAJUG4DqCG8iawsSnZvvUmapjapbf2HUA7PCRrpRsl06A==} peerDependencies: @@ -10567,9 +9550,6 @@ packages: resolution: {integrity: sha512-RmkhL8CAyCRPXCE28MMH0z2PNWQBNk2Q09ZdxM9IOOXwxwZbN+qbWaatPkdkWIKL2ZVDImrN/pK5HTRz2PcS4g==} engines: {node: '>= 0.8'} - rc9@2.1.2: - resolution: {integrity: sha512-btXCnMmRIBINM2LDZoEmOogIZU7Qe7zn4BpomSKZ/ykbLObuBdvG+mFq11DL6fjH1DRwHhrlgtYWG96bJiC7Cg==} - react-calendar-heatmap@1.10.0: resolution: {integrity: sha512-e5vcrzMWzKIF710egr1FpjWyuDEFeZm39nvV25muc8Wtqqi8iDOfqREELeQ9Wouqf9hhj939gq0i+iAxo7KdSw==} peerDependencies: @@ -10757,10 +9737,6 @@ packages: resolution: {integrity: sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==} engines: {node: '>= 14.18.0'} - rechoir@0.8.0: - resolution: {integrity: sha512-/vxpCXddiX8NGfGO/mTafwjq4aFa/71pvamip0++IQk3zG8cbCj0fifNPrjjF1XMXUne91jL9OoxmdykoEtifQ==} - engines: {node: '>= 10.13.0'} - redent@3.0.0: resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==} engines: {node: '>=8'} @@ -10904,10 +9880,6 @@ packages: resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==} engines: {node: '>=8'} - resolve-import@2.0.0: - resolution: {integrity: sha512-jpKjLibLuc8D1XEV2+7zb0aqN7I8d12u89g/v6IsgCzdVlccMQJq4TKkPw5fbhHdxhm7nbVtN+KvOTnjFf+nEA==} - engines: {node: 20 || >=22} - resolve-pkg-maps@1.0.0: resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} @@ -10935,10 +9907,6 @@ packages: resolution: {integrity: sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==} engines: {node: '>= 4'} - retry@0.13.1: - resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} - engines: {node: '>= 4'} - reusify@1.1.0: resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==} engines: {iojs: '>=1.0.0', node: '>=0.10.0'} @@ -10948,10 +9916,6 @@ packages: deprecated: Rimraf versions prior to v4 are no longer supported hasBin: true - rimraf@5.0.10: - resolution: {integrity: sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==} - hasBin: true - rimraf@6.0.1: resolution: {integrity: sha512-9dkvaxAsk/xNXSJzMgFqqMCuFgt2+KsOFek3TMLfo8NCPfWpBmqwyNn5Y+NX56QUYfCtsyhF3ayiboEoUmJk/A==} engines: {node: 20 || >=22} @@ -10972,10 +9936,6 @@ packages: resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==} engines: {node: '>= 18'} - run-applescript@7.0.0: - resolution: {integrity: sha512-9by4Ij99JUr/MCFBUkDKLWK3G9HVXmabKz9U5MlIAIuvuzkiOicRYs8XJLxX+xahD+mLiiCYDqF9dKAgtzKP1A==} - engines: {node: '>=18'} - run-exclusive@2.2.19: resolution: {integrity: sha512-K3mdoAi7tjJ/qT7Flj90L7QyPozwUaAG+CVhkdDje4HLKXUYC3N/Jzkau3flHVDLQVhiHBtcimVodMjN9egYbA==} @@ -11107,9 +10067,6 @@ packages: resolution: {integrity: sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==} engines: {node: '>= 0.4'} - shimmer@1.2.1: - resolution: {integrity: sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==} - side-channel-list@1.0.0: resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==} engines: {node: '>= 0.4'} @@ -11142,17 +10099,10 @@ packages: simple-swizzle@0.2.2: resolution: {integrity: sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==} - sisteransi@1.0.5: - resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} - slash@3.0.0: resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} engines: {node: '>=8'} - slash@4.0.0: - resolution: {integrity: sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==} - engines: {node: '>=12'} - slug@6.1.0: resolution: {integrity: sha512-x6vLHCMasg4DR2LPiyFGI0gJJhywY6DTiGhCrOMzb3SOk/0JVLIaL4UhyFSHu04SD3uAavrKY/K3zZ3i6iRcgA==} @@ -11232,10 +10182,6 @@ packages: spdx-license-ids@3.0.21: resolution: {integrity: sha512-Bvg/8F5XephndSK3JffaRqdT+gyhfqIPwDHpX80tJrF8QQRYMo8sNMeaZ2Dp5+jhwKnUmIOyFFQfHRkjJm5nXg==} - split2@4.2.0: - resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==} - engines: {node: '>= 10.x'} - sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} @@ -11402,10 +10348,6 @@ packages: resolution: {integrity: sha512-5JRxVqC8I8NuOUjzBbvVJAKNM8qoVuH0O77h4WInc/qC2q5IreqKxYwgkga3PfA22OayK2ikceb/B26dztPl+Q==} engines: {node: '>=16'} - supports-color@10.0.0: - resolution: {integrity: sha512-HRVVSbCCMbj7/kdWF9Q+bbckjBHLtHMEoJWlkmYzzdwhYMkjkOwubLM6t7NbWKjgKamGDrWL1++KrjUO1t9oAQ==} - engines: {node: '>=18'} - supports-color@5.5.0: resolution: {integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==} engines: {node: '>=4'} @@ -11431,11 +10373,6 @@ packages: peerDependencies: react: ^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 - sync-content@2.0.1: - resolution: {integrity: sha512-NI1mo514yFhr8pV/5Etvgh+pSBUIpoAKoiBIUwALVlQQNAwb40bTw8hhPFaip/dvv0GhpHVOq0vq8iY02ppLTg==} - engines: {node: 20 || >=22} - hasBin: true - tailwind-merge@2.2.0: resolution: {integrity: sha512-SqqhhaL0T06SW59+JVNfAqKdqLs0497esifRrZ7jOaefP3o64fdFNDMrAQWZFMxTLJPiHVjRLUywT8uFz1xNWQ==} @@ -11496,10 +10433,6 @@ packages: resolution: {integrity: sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==} engines: {node: '>=18'} - tarn@3.0.2: - resolution: {integrity: sha512-51LAVKUSZSVfI05vjPESNc5vwqqZpbXCsU+/+wxlOrUjk2SnFTt97v9ZgQrD4YmxYW1Px6w2KjaDitCfkvgxMQ==} - engines: {node: '>=8.0.0'} - tdigest@0.1.2: resolution: {integrity: sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA==} @@ -11545,16 +10478,9 @@ packages: through2@2.0.5: resolution: {integrity: sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==} - tildify@2.0.0: - resolution: {integrity: sha512-Cc+OraorugtXNfs50hU9KS369rFXCfgGLpfCfvlc+Ud5u6VWmUQsOAa9HbTvheQdYnrdJqqv1e5oIqXppMYnSw==} - engines: {node: '>=8'} - tiny-invariant@1.3.3: resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} - tinycolor2@1.6.0: - resolution: {integrity: sha512-XPaBkWQJdsf3pLKJV9p4qN/S+fm2Oj8AIPo1BTUhg5oxkvm9+SVEGFdhyOz7tTdUTfvxMiAs4sp6/eZO2Ew+pw==} - tinyexec@0.3.2: resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} @@ -11562,9 +10488,6 @@ packages: resolution: {integrity: sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ==} engines: {node: '>=12.0.0'} - tinygradient@1.1.5: - resolution: {integrity: sha512-8nIfc2vgQ4TeLnk2lFj4tRLvvJwEfQuabdsmvDdQPT0xlk9TaNtpGd6nNRxXoK6vQhN6RSzj+Cnp5tTQmpxmbw==} - tippy.js@6.3.7: resolution: {integrity: sha512-E1d3oP2emgJ9dRQZdf3Kkn0qJgI6ZLpyS5z6ZkY1DF3kaQaBsGZsndEpHwx+eC+tYM41HaSNvNtLx8tU57FzTQ==} @@ -11611,10 +10534,6 @@ packages: trough@2.2.0: resolution: {integrity: sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==} - trouter@2.0.1: - resolution: {integrity: sha512-kr8SKKw94OI+xTGOkfsvwZQ8mWoikZDd2n8XZHjJVZUARZT+4/VV6cacRS6CLsH9bNm+HFIPU1Zx4CnNnb4qlQ==} - engines: {node: '>=6'} - ts-algebra@2.0.0: resolution: {integrity: sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==} @@ -11624,14 +10543,6 @@ packages: peerDependencies: typescript: '>=4.2.0' - ts-essentials@10.0.1: - resolution: {integrity: sha512-HPH+H2bkkO8FkMDau+hFvv7KYozzned9Zr1Urn7rRPXMF4mZmCKOq+u4AI1AAW+2bofIOXTuSdKo9drQuni2dQ==} - peerDependencies: - typescript: '>=4.5.0' - peerDependenciesMeta: - typescript: - optional: true - ts-interface-checker@0.1.13: resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} @@ -11665,11 +10576,6 @@ packages: resolution: {integrity: sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==} engines: {node: '>=6'} - tshy@3.0.2: - resolution: {integrity: sha512-8GkWnAfmNXxl8iDTZ1o2H4jdaj9H7HeDKkr5qd0ZhQBCNA41D3xqTyg2Ycs51VCfmjJ5e+0v9AUmD6ylAI9Bgw==} - engines: {node: 20 || >=22} - hasBin: true - tslib@1.14.1: resolution: {integrity: sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==} @@ -11701,11 +10607,6 @@ packages: peerDependencies: typescript: '>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta' - tsx@4.17.0: - resolution: {integrity: sha512-eN4mnDA5UMKDt4YZixo9tBioibaMBpoxBkD+rIPAjVmYERSG0/dWEY1CEFuV89CgASlKL499q8AhmkMnnjtOJg==} - engines: {node: '>=18.0.0'} - hasBin: true - tsx@4.20.4: resolution: {integrity: sha512-yyxBKfORQ7LuRt/BQKBXrpcq59ZvSW0XxwfjAt3w2/8PmdxaFzijtMhTawprSHhpzeM5BgU2hXHG3lklIERZXg==} engines: {node: '>=18.0.0'} @@ -11859,10 +10760,6 @@ packages: resolution: {integrity: sha512-l+zSMssRqrzDcb3fjMkjjLGmuiiK2pMIcV++mJaAc9vhjSGpvM7h43QgP+OAMb1GImHmbPyG2tBXeuyG5iY4gA==} engines: {node: '>=20.18.1'} - unicorn-magic@0.1.0: - resolution: {integrity: sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==} - engines: {node: '>=18'} - unicorn-magic@0.3.0: resolution: {integrity: sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==} engines: {node: '>=18'} @@ -11982,10 +10879,6 @@ packages: resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} engines: {node: '>= 0.4.0'} - uuid@11.1.0: - resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} - hasBin: true - uuid@9.0.1: resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==} hasBin: true @@ -12122,10 +11015,6 @@ packages: w3c-keyname@2.2.8: resolution: {integrity: sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==} - walk-up-path@4.0.0: - resolution: {integrity: sha512-3hu+tD8YzSLGuFYtPRb48vdhKMi0KQV5sn+uWr8+7dMEq/2G/dtLrdDinkLjqq5TIbIBjYJ4Ax/n3YiaW7QM8A==} - engines: {node: 20 || >=22} - watchpack@2.4.4: resolution: {integrity: sha512-c5EGNOiyxxV5qmTtAB7rbiXxi1ooX1pQKMLX/MIabJjRA0SJBQOjKF+KSVfHkr9U1cADPon0mRiVe/riyaiDUA==} engines: {node: '>=10.13.0'} @@ -12272,18 +11161,6 @@ packages: utf-8-validate: optional: true - wsl-utils@0.1.0: - resolution: {integrity: sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw==} - engines: {node: '>=18'} - - xdg-app-paths@8.3.0: - resolution: {integrity: sha512-mgxlWVZw0TNWHoGmXq+NC3uhCIc55dDpAlDkMQUaIAcQzysb0kxctwv//fvuW61/nAAeUBJMQ8mnZjMmuYwOcQ==} - engines: {node: '>= 4.0'} - - xdg-portable@10.6.0: - resolution: {integrity: sha512-xrcqhWDvtZ7WLmt8G4f3hHy37iK7D2idtosRgkeiSPZEPmBShp0VfmRBLWAPC6zLF48APJ21yfea+RfQMF4/Aw==} - engines: {node: '>= 4.0'} - xmlhttprequest-ssl@2.0.0: resolution: {integrity: sha512-QKxVRxiRACQcVuQEYFsI1hhkrMlrXHPegbbd1yn9UHOmRxY+si12nQYzri3vbzt8VdTTRviqcKxcyllFas5z2A==} engines: {node: '>=0.4.0'} @@ -12341,10 +11218,6 @@ packages: resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} engines: {node: '>=10'} - yocto-queue@1.2.1: - resolution: {integrity: sha512-AyeEbWOu/TAXdxlV9wmGcR0+yh2j3vYPGOECcIj2S7MkrLyC7ne+oye2BKTItt0ii2PHk4cDy+95+LshzbXnGg==} - engines: {node: '>=12.20'} - yoctocolors@2.1.1: resolution: {integrity: sha512-GQHQqAopRhwU8Kt1DDM8NjibDXHC8eoh1erhGAJPEyveY9qqVeXvVikNKrDz69sHowPMorbPUrH/mx8c50eiBQ==} engines: {node: '>=18'} @@ -12458,8 +11331,6 @@ snapshots: optionalDependencies: zod: 3.25.76 - '@arr/every@1.0.1': {} - '@aws-crypto/crc32@5.2.0': dependencies: '@aws-crypto/util': 5.2.0 @@ -13923,7 +12794,7 @@ snapshots: '@babel/traverse': 7.27.4 '@babel/types': 7.27.6 convert-source-map: 2.0.0 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 gensync: 1.0.0-beta.2 json5: 2.2.3 semver: 6.3.1 @@ -13943,7 +12814,7 @@ snapshots: '@babel/traverse': 7.27.4 '@babel/types': 7.27.6 convert-source-map: 2.0.0 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 gensync: 1.0.0-beta.2 json5: 2.2.3 semver: 6.3.1 @@ -14219,7 +13090,7 @@ snapshots: '@babel/parser': 7.27.5 '@babel/template': 7.27.2 '@babel/types': 7.27.6 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 globals: 11.12.0 transitivePeerDependencies: - supports-color @@ -14410,22 +13281,8 @@ snapshots: human-id: 1.0.2 prettier: 2.8.8 - '@clack/core@0.4.2': - dependencies: - picocolors: 1.1.1 - sisteransi: 1.0.5 - - '@clack/prompts@0.10.1': - dependencies: - '@clack/core': 0.4.2 - picocolors: 1.1.1 - sisteransi: 1.0.5 - '@coji/remix-auth-google@4.2.0': {} - '@colors/colors@1.5.0': - optional: true - '@conform-to/dom@0.6.3': {} '@conform-to/react@0.6.3(react@18.3.1)': @@ -14440,45 +13297,6 @@ snapshots: '@daybrush/utils@1.13.0': {} - '@depot/cli-darwin-arm64@0.0.1-cli.2.80.0': - optional: true - - '@depot/cli-darwin-x64@0.0.1-cli.2.80.0': - optional: true - - '@depot/cli-linux-arm64@0.0.1-cli.2.80.0': - optional: true - - '@depot/cli-linux-arm@0.0.1-cli.2.80.0': - optional: true - - '@depot/cli-linux-ia32@0.0.1-cli.2.80.0': - optional: true - - '@depot/cli-linux-x64@0.0.1-cli.2.80.0': - optional: true - - '@depot/cli-win32-arm64@0.0.1-cli.2.80.0': - optional: true - - '@depot/cli-win32-ia32@0.0.1-cli.2.80.0': - optional: true - - '@depot/cli-win32-x64@0.0.1-cli.2.80.0': - optional: true - - '@depot/cli@0.0.1-cli.2.80.0': - optionalDependencies: - '@depot/cli-darwin-arm64': 0.0.1-cli.2.80.0 - '@depot/cli-darwin-x64': 0.0.1-cli.2.80.0 - '@depot/cli-linux-arm': 0.0.1-cli.2.80.0 - '@depot/cli-linux-arm64': 0.0.1-cli.2.80.0 - '@depot/cli-linux-ia32': 0.0.1-cli.2.80.0 - '@depot/cli-linux-x64': 0.0.1-cli.2.80.0 - '@depot/cli-win32-arm64': 0.0.1-cli.2.80.0 - '@depot/cli-win32-ia32': 0.0.1-cli.2.80.0 - '@depot/cli-win32-x64': 0.0.1-cli.2.80.0 - '@edgefirst-dev/data@0.0.4': {} '@egjs/agent@2.4.4': {} @@ -14521,27 +13339,12 @@ snapshots: '@emotion/memoize@0.7.4': optional: true - '@epic-web/test-server@0.1.6': - dependencies: - '@hono/node-server': 1.16.0(hono@4.8.5) - '@hono/node-ws': 1.2.0(@hono/node-server@1.16.0(hono@4.8.5))(hono@4.8.5) - '@open-draft/deferred-promise': 2.2.0 - '@types/ws': 8.18.1 - hono: 4.8.5 - ws: 8.18.3 - transitivePeerDependencies: - - bufferutil - - utf-8-validate - '@esbuild/aix-ppc64@0.19.11': optional: true '@esbuild/aix-ppc64@0.21.5': optional: true - '@esbuild/aix-ppc64@0.23.1': - optional: true - '@esbuild/aix-ppc64@0.25.5': optional: true @@ -14554,9 +13357,6 @@ snapshots: '@esbuild/android-arm64@0.21.5': optional: true - '@esbuild/android-arm64@0.23.1': - optional: true - '@esbuild/android-arm64@0.25.5': optional: true @@ -14569,9 +13369,6 @@ snapshots: '@esbuild/android-arm@0.21.5': optional: true - '@esbuild/android-arm@0.23.1': - optional: true - '@esbuild/android-arm@0.25.5': optional: true @@ -14584,9 +13381,6 @@ snapshots: '@esbuild/android-x64@0.21.5': optional: true - '@esbuild/android-x64@0.23.1': - optional: true - '@esbuild/android-x64@0.25.5': optional: true @@ -14599,9 +13393,6 @@ snapshots: '@esbuild/darwin-arm64@0.21.5': optional: true - '@esbuild/darwin-arm64@0.23.1': - optional: true - '@esbuild/darwin-arm64@0.25.5': optional: true @@ -14614,9 +13405,6 @@ snapshots: '@esbuild/darwin-x64@0.21.5': optional: true - '@esbuild/darwin-x64@0.23.1': - optional: true - '@esbuild/darwin-x64@0.25.5': optional: true @@ -14629,9 +13417,6 @@ snapshots: '@esbuild/freebsd-arm64@0.21.5': optional: true - '@esbuild/freebsd-arm64@0.23.1': - optional: true - '@esbuild/freebsd-arm64@0.25.5': optional: true @@ -14644,9 +13429,6 @@ snapshots: '@esbuild/freebsd-x64@0.21.5': optional: true - '@esbuild/freebsd-x64@0.23.1': - optional: true - '@esbuild/freebsd-x64@0.25.5': optional: true @@ -14659,9 +13441,6 @@ snapshots: '@esbuild/linux-arm64@0.21.5': optional: true - '@esbuild/linux-arm64@0.23.1': - optional: true - '@esbuild/linux-arm64@0.25.5': optional: true @@ -14674,9 +13453,6 @@ snapshots: '@esbuild/linux-arm@0.21.5': optional: true - '@esbuild/linux-arm@0.23.1': - optional: true - '@esbuild/linux-arm@0.25.5': optional: true @@ -14689,9 +13465,6 @@ snapshots: '@esbuild/linux-ia32@0.21.5': optional: true - '@esbuild/linux-ia32@0.23.1': - optional: true - '@esbuild/linux-ia32@0.25.5': optional: true @@ -14704,9 +13477,6 @@ snapshots: '@esbuild/linux-loong64@0.21.5': optional: true - '@esbuild/linux-loong64@0.23.1': - optional: true - '@esbuild/linux-loong64@0.25.5': optional: true @@ -14719,9 +13489,6 @@ snapshots: '@esbuild/linux-mips64el@0.21.5': optional: true - '@esbuild/linux-mips64el@0.23.1': - optional: true - '@esbuild/linux-mips64el@0.25.5': optional: true @@ -14734,9 +13501,6 @@ snapshots: '@esbuild/linux-ppc64@0.21.5': optional: true - '@esbuild/linux-ppc64@0.23.1': - optional: true - '@esbuild/linux-ppc64@0.25.5': optional: true @@ -14749,9 +13513,6 @@ snapshots: '@esbuild/linux-riscv64@0.21.5': optional: true - '@esbuild/linux-riscv64@0.23.1': - optional: true - '@esbuild/linux-riscv64@0.25.5': optional: true @@ -14764,9 +13525,6 @@ snapshots: '@esbuild/linux-s390x@0.21.5': optional: true - '@esbuild/linux-s390x@0.23.1': - optional: true - '@esbuild/linux-s390x@0.25.5': optional: true @@ -14779,9 +13537,6 @@ snapshots: '@esbuild/linux-x64@0.21.5': optional: true - '@esbuild/linux-x64@0.23.1': - optional: true - '@esbuild/linux-x64@0.25.5': optional: true @@ -14797,15 +13552,9 @@ snapshots: '@esbuild/netbsd-x64@0.21.5': optional: true - '@esbuild/netbsd-x64@0.23.1': - optional: true - '@esbuild/netbsd-x64@0.25.5': optional: true - '@esbuild/openbsd-arm64@0.23.1': - optional: true - '@esbuild/openbsd-arm64@0.25.5': optional: true @@ -14818,9 +13567,6 @@ snapshots: '@esbuild/openbsd-x64@0.21.5': optional: true - '@esbuild/openbsd-x64@0.23.1': - optional: true - '@esbuild/openbsd-x64@0.25.5': optional: true @@ -14833,9 +13579,6 @@ snapshots: '@esbuild/sunos-x64@0.21.5': optional: true - '@esbuild/sunos-x64@0.23.1': - optional: true - '@esbuild/sunos-x64@0.25.5': optional: true @@ -14848,9 +13591,6 @@ snapshots: '@esbuild/win32-arm64@0.21.5': optional: true - '@esbuild/win32-arm64@0.23.1': - optional: true - '@esbuild/win32-arm64@0.25.5': optional: true @@ -14863,9 +13603,6 @@ snapshots: '@esbuild/win32-ia32@0.21.5': optional: true - '@esbuild/win32-ia32@0.23.1': - optional: true - '@esbuild/win32-ia32@0.25.5': optional: true @@ -14878,9 +13615,6 @@ snapshots: '@esbuild/win32-x64@0.21.5': optional: true - '@esbuild/win32-x64@0.23.1': - optional: true - '@esbuild/win32-x64@0.25.5': optional: true @@ -14894,7 +13628,7 @@ snapshots: '@eslint/eslintrc@2.1.4': dependencies: ajv: 6.12.6 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 espree: 9.6.1 globals: 13.24.0 ignore: 5.3.2 @@ -14937,18 +13671,6 @@ snapshots: '@google-cloud/precise-date@4.0.0': {} - '@grpc/grpc-js@1.13.4': - dependencies: - '@grpc/proto-loader': 0.7.15 - '@js-sdsl/ordered-map': 4.4.2 - - '@grpc/proto-loader@0.7.15': - dependencies: - lodash.camelcase: 4.3.0 - long: 5.3.2 - protobufjs: 7.5.3 - yargs: 17.7.2 - '@hapi/boom@10.0.1': dependencies: '@hapi/hoek': 11.0.7 @@ -14969,23 +13691,10 @@ snapshots: '@hapi/bourne': 3.0.0 '@hapi/hoek': 11.0.7 - '@hono/node-server@1.16.0(hono@4.8.5)': - dependencies: - hono: 4.8.5 - - '@hono/node-ws@1.2.0(@hono/node-server@1.16.0(hono@4.8.5))(hono@4.8.5)': - dependencies: - '@hono/node-server': 1.16.0(hono@4.8.5) - hono: 4.8.5 - ws: 8.18.3 - transitivePeerDependencies: - - bufferutil - - utf-8-validate - '@humanwhocodes/config-array@0.13.0': dependencies: '@humanwhocodes/object-schema': 2.0.3 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 minimatch: 3.1.2 transitivePeerDependencies: - supports-color @@ -15065,8 +13774,6 @@ snapshots: '@jridgewell/sourcemap-codec': 1.5.5 optional: true - '@js-sdsl/ordered-map@4.4.2': {} - '@jsonhero/path@1.0.21': {} '@jspm/core@2.1.0': {} @@ -15244,31 +13951,16 @@ snapshots: '@one-ini/wasm@0.1.1': {} - '@open-draft/deferred-promise@2.2.0': {} - '@opentelemetry/api-logs@0.203.0': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs@0.52.1': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api@1.9.0': {} - '@opentelemetry/context-async-hooks@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/context-async-hooks@2.0.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/semantic-conventions': 1.25.1 - '@opentelemetry/core@2.0.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -15283,25 +13975,6 @@ snapshots: '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) '@opentelemetry/sdk-logs': 0.203.0(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-logs-otlp-http@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-logs': 0.52.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/exporter-trace-otlp-grpc@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@grpc/grpc-js': 1.13.4 - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-grpc-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-trace-otlp-http@0.203.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -15311,60 +13984,12 @@ snapshots: '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-trace-otlp-http@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/exporter-trace-otlp-proto@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/exporter-zipkin@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - - '@opentelemetry/instrumentation-fetch@0.52.1(@opentelemetry/api@1.9.0)(supports-color@10.0.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0)(supports-color@10.0.0) - '@opentelemetry/sdk-trace-web': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - transitivePeerDependencies: - - supports-color - '@opentelemetry/instrumentation@0.203.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 '@opentelemetry/api-logs': 0.203.0 import-in-the-middle: 1.11.0 - require-in-the-middle: 7.5.2(supports-color@10.0.0) - transitivePeerDependencies: - - supports-color - - '@opentelemetry/instrumentation@0.52.1(@opentelemetry/api@1.9.0)(supports-color@10.0.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@types/shimmer': 1.2.0 - import-in-the-middle: 1.11.0 - require-in-the-middle: 7.5.2(supports-color@10.0.0) - semver: 7.7.2 - shimmer: 1.2.1 + require-in-the-middle: 7.5.2 transitivePeerDependencies: - supports-color @@ -15374,20 +13999,6 @@ snapshots: '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/otlp-transformer': 0.203.0(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-exporter-base@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/otlp-grpc-exporter-base@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@grpc/grpc-js': 1.13.4 - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/otlp-transformer@0.203.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -15399,33 +14010,6 @@ snapshots: '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) protobufjs: 7.5.3 - '@opentelemetry/otlp-transformer@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-logs': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-metrics': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - protobufjs: 7.5.3 - - '@opentelemetry/propagator-b3@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/propagator-jaeger@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/resources@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - '@opentelemetry/resources@2.0.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -15439,52 +14023,12 @@ snapshots: '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-logs@0.52.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - - '@opentelemetry/sdk-metrics@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - lodash.merge: 4.6.2 - '@opentelemetry/sdk-metrics@2.0.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-node@0.52.1(@opentelemetry/api@1.9.0)(supports-color@10.0.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/api-logs': 0.52.1 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-trace-otlp-grpc': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-trace-otlp-http': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-trace-otlp-proto': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/exporter-zipkin': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/instrumentation': 0.52.1(@opentelemetry/api@1.9.0)(supports-color@10.0.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-logs': 0.52.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-metrics': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-node': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - transitivePeerDependencies: - - supports-color - - '@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - '@opentelemetry/sdk-trace-base@2.0.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -15492,16 +14036,6 @@ snapshots: '@opentelemetry/resources': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.36.0 - '@opentelemetry/sdk-trace-node@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/context-async-hooks': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/propagator-b3': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/propagator-jaeger': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - semver: 7.7.2 - '@opentelemetry/sdk-trace-node@2.0.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -15509,15 +14043,6 @@ snapshots: '@opentelemetry/core': 2.0.1(@opentelemetry/api@1.9.0) '@opentelemetry/sdk-trace-base': 2.0.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-web@1.25.1(@opentelemetry/api@1.9.0)': - dependencies: - '@opentelemetry/api': 1.9.0 - '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) - '@opentelemetry/semantic-conventions': 1.25.1 - - '@opentelemetry/semantic-conventions@1.25.1': {} - '@opentelemetry/semantic-conventions@1.36.0': {} '@oslojs/asn1@1.0.0': @@ -15603,8 +14128,6 @@ snapshots: '@pkgjs/parseargs@0.11.0': optional: true - '@polka/url@0.5.0': {} - '@popperjs/core@2.11.8': {} '@posthog/core@1.3.1': {} @@ -18441,7 +16964,7 @@ snapshots: '@opentelemetry/semantic-conventions': 1.36.0 dequal: 2.0.3 eventsource: 3.0.7 - eventsource-parser: 3.0.3 + eventsource-parser: 3.0.6 execa: 8.0.1 humanize-duration: 3.33.0 jose: 5.10.0 @@ -18461,6 +16984,17 @@ snapshots: - supports-color - utf-8-validate + '@trigger.dev/python@4.0.4(@trigger.dev/build@4.0.4(typescript@5.8.3))(@trigger.dev/sdk@4.0.4(ai@5.0.78(zod@3.25.76))(zod@3.25.76))': + dependencies: + '@trigger.dev/build': 4.0.4(typescript@5.8.3) + '@trigger.dev/core': 4.0.4 + '@trigger.dev/sdk': 4.0.4(ai@5.0.78(zod@3.25.76))(zod@3.25.76) + tinyexec: 0.3.2 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + '@trigger.dev/react-hooks@4.0.4(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': dependencies: '@trigger.dev/core': 4.0.4 @@ -18686,10 +17220,6 @@ snapshots: '@types/geojson@7946.0.16': {} - '@types/gradient-string@1.1.6': - dependencies: - '@types/tinycolor2': 1.4.6 - '@types/hast@2.3.10': dependencies: '@types/unist': 2.0.11 @@ -18700,8 +17230,6 @@ snapshots: '@types/http-errors@2.0.5': {} - '@types/ini@4.1.1': {} - '@types/is-ci@3.0.4': dependencies: ci-info: 3.9.0 @@ -18772,15 +17300,6 @@ snapshots: '@types/normalize-package-data@2.4.4': {} - '@types/object-hash@3.0.6': {} - - '@types/polka@0.5.7': - dependencies: - '@types/express': 4.17.23 - '@types/express-serve-static-core': 4.19.6 - '@types/node': 20.11.5 - '@types/trouter': 3.1.4 - '@types/prismjs@1.26.5': {} '@types/prop-types@15.7.15': {} @@ -18814,14 +17333,6 @@ snapshots: '@types/scheduler': 0.26.0 csstype: 3.1.3 - '@types/resolve@1.20.6': {} - - '@types/retry@0.12.2': {} - - '@types/rimraf@4.0.5': - dependencies: - rimraf: 6.0.1 - '@types/scheduler@0.26.0': {} '@types/semver@7.7.0': {} @@ -18837,20 +17348,10 @@ snapshots: '@types/node': 20.11.5 '@types/send': 0.17.5 - '@types/shimmer@1.2.0': {} - '@types/simple-oauth2@5.0.7': {} '@types/slug@5.0.9': {} - '@types/source-map-support@0.5.10': - dependencies: - source-map: 0.6.1 - - '@types/tinycolor2@1.4.6': {} - - '@types/trouter@3.1.4': {} - '@types/unist@2.0.11': {} '@types/unist@3.0.3': {} @@ -18872,10 +17373,6 @@ snapshots: - uglify-js - webpack-cli - '@types/ws@8.18.1': - dependencies: - '@types/node': 20.19.7 - '@typescript-eslint/eslint-plugin@5.62.0(@typescript-eslint/parser@5.62.0(eslint@8.57.1)(typescript@5.8.3))(eslint@8.57.1)(typescript@5.8.3)': dependencies: '@eslint-community/regexpp': 4.12.1 @@ -18883,7 +17380,7 @@ snapshots: '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/type-utils': 5.62.0(eslint@8.57.1)(typescript@5.8.3) '@typescript-eslint/utils': 5.62.0(eslint@8.57.1)(typescript@5.8.3) - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 eslint: 8.57.1 graphemer: 1.4.0 ignore: 5.3.2 @@ -18903,7 +17400,7 @@ snapshots: '@typescript-eslint/type-utils': 6.21.0(eslint@8.57.1)(typescript@5.8.3) '@typescript-eslint/utils': 6.21.0(eslint@8.57.1)(typescript@5.8.3) '@typescript-eslint/visitor-keys': 6.21.0 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 eslint: 8.57.1 graphemer: 1.4.0 ignore: 5.3.2 @@ -18920,7 +17417,7 @@ snapshots: '@typescript-eslint/scope-manager': 5.62.0 '@typescript-eslint/types': 5.62.0 '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.8.3) - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 eslint: 8.57.1 optionalDependencies: typescript: 5.8.3 @@ -18933,7 +17430,7 @@ snapshots: '@typescript-eslint/types': 6.21.0 '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.8.3) '@typescript-eslint/visitor-keys': 6.21.0 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 eslint: 8.57.1 optionalDependencies: typescript: 5.8.3 @@ -18954,7 +17451,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.8.3) '@typescript-eslint/utils': 5.62.0(eslint@8.57.1)(typescript@5.8.3) - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 eslint: 8.57.1 tsutils: 3.21.0(typescript@5.8.3) optionalDependencies: @@ -18966,7 +17463,7 @@ snapshots: dependencies: '@typescript-eslint/typescript-estree': 6.21.0(typescript@5.8.3) '@typescript-eslint/utils': 6.21.0(eslint@8.57.1)(typescript@5.8.3) - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 eslint: 8.57.1 ts-api-utils: 1.4.3(typescript@5.8.3) optionalDependencies: @@ -18982,7 +17479,7 @@ snapshots: dependencies: '@typescript-eslint/types': 5.62.0 '@typescript-eslint/visitor-keys': 5.62.0 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 globby: 11.1.0 is-glob: 4.0.3 semver: 7.7.2 @@ -18996,7 +17493,7 @@ snapshots: dependencies: '@typescript-eslint/types': 6.21.0 '@typescript-eslint/visitor-keys': 6.21.0 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 globby: 11.1.0 is-glob: 4.0.3 minimatch: 9.0.3 @@ -19287,11 +17784,6 @@ snapshots: clean-stack: 2.2.0 indent-string: 4.0.0 - aggregate-error@4.0.1: - dependencies: - clean-stack: 4.2.0 - indent-string: 5.0.0 - ai@5.0.78(zod@3.25.76): dependencies: '@ai-sdk/gateway': 2.0.1(zod@3.25.76) @@ -19325,10 +17817,6 @@ snapshots: ansi-colors@4.1.3: {} - ansi-escapes@7.0.0: - dependencies: - environment: 1.1.0 - ansi-regex@5.0.1: {} ansi-regex@6.1.0: {} @@ -19449,8 +17937,6 @@ snapshots: arrify@1.0.1: {} - arrify@3.0.0: {} - ast-types-flow@0.0.8: {} astring@1.9.0: {} @@ -19556,7 +18042,7 @@ snapshots: dependencies: bytes: 3.1.2 content-type: 1.0.5 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 http-errors: 2.0.0 iconv-lite: 0.6.3 on-finished: 2.4.1 @@ -19626,10 +18112,6 @@ snapshots: transitivePeerDependencies: - supports-color - bundle-name@4.1.0: - dependencies: - run-applescript: 7.0.0 - bundle-require@5.1.0(esbuild@0.25.5): dependencies: esbuild: 0.25.5 @@ -19641,23 +18123,6 @@ snapshots: bytes@3.1.2: {} - c12@1.11.2(magicast@0.3.5): - dependencies: - chokidar: 3.6.0 - confbox: 0.1.8 - defu: 6.1.4 - dotenv: 16.5.0 - giget: 1.2.5 - jiti: 1.21.7 - mlly: 1.7.4 - ohash: 1.1.6 - pathe: 1.1.2 - perfect-debounce: 1.0.0 - pkg-types: 1.3.1 - rc9: 2.1.2 - optionalDependencies: - magicast: 0.3.5 - cac@6.7.14: {} cacache@17.1.4: @@ -19792,10 +18257,6 @@ snapshots: ci-info@3.9.0: {} - citty@0.1.6: - dependencies: - consola: 3.4.2 - cjs-module-lexer@1.4.3: {} class-transformer@0.5.1: {} @@ -19812,22 +18273,12 @@ snapshots: clean-stack@2.2.0: {} - clean-stack@4.2.0: - dependencies: - escape-string-regexp: 5.0.0 - cli-cursor@3.1.0: dependencies: restore-cursor: 3.1.0 cli-spinners@2.9.2: {} - cli-table3@0.6.5: - dependencies: - string-width: 4.2.3 - optionalDependencies: - '@colors/colors': 1.5.0 - client-only@0.0.1: {} cliui@6.0.0: @@ -19913,8 +18364,6 @@ snapshots: color-convert: 1.9.3 color-string: 1.9.1 - colorette@2.0.19: {} - combined-stream@1.0.8: dependencies: delayed-stream: 1.0.0 @@ -19937,8 +18386,6 @@ snapshots: commander@8.3.0: {} - commander@9.5.0: {} - compressible@2.0.18: dependencies: mime-db: 1.54.0 @@ -20022,28 +18469,6 @@ snapshots: optionalDependencies: typescript: 5.8.3 - cp-file@10.0.0: - dependencies: - graceful-fs: 4.2.11 - nested-error-stacks: 2.1.1 - p-event: 5.0.1 - - cpy-cli@5.0.0: - dependencies: - cpy: 10.1.0 - meow: 12.1.1 - - cpy@10.1.0: - dependencies: - arrify: 3.0.0 - cp-file: 10.0.0 - globby: 13.2.2 - junk: 4.0.1 - micromatch: 4.0.8 - nested-error-stacks: 2.1.1 - p-filter: 3.0.0 - p-map: 6.0.0 - crelt@1.0.6: {} cron-parser@4.9.0: @@ -20319,21 +18744,13 @@ snapshots: dependencies: ms: 2.1.3 - debug@4.3.4(supports-color@10.0.0): - dependencies: - ms: 2.1.2 - optionalDependencies: - supports-color: 10.0.0 - debug@4.3.7: dependencies: ms: 2.1.3 - debug@4.4.1(supports-color@10.0.0): + debug@4.4.1: dependencies: ms: 2.1.3 - optionalDependencies: - supports-color: 10.0.0 debug@4.4.3: dependencies: @@ -20379,13 +18796,6 @@ snapshots: deepmerge@4.3.1: {} - default-browser-id@5.0.0: {} - - default-browser@5.2.1: - dependencies: - bundle-name: 4.1.0 - default-browser-id: 5.0.0 - defaults@1.0.4: dependencies: clone: 1.0.4 @@ -20396,8 +18806,6 @@ snapshots: es-errors: 1.3.0 gopd: 1.2.0 - define-lazy-prop@3.0.0: {} - define-properties@1.2.1: dependencies: define-data-property: 1.1.4 @@ -20406,8 +18814,6 @@ snapshots: defined@1.0.1: {} - defu@6.1.4: {} - delaunator@5.0.1: dependencies: robust-predicates: 3.0.2 @@ -20420,8 +18826,6 @@ snapshots: dequal@2.0.3: {} - destr@2.0.5: {} - destroy@1.2.0: {} detect-indent@6.1.0: {} @@ -20495,10 +18899,6 @@ snapshots: dotenv-expand@10.0.0: {} - dotenv-expand@12.0.2: - dependencies: - dotenv: 16.5.0 - dotenv@16.0.3: {} dotenv@16.5.0: {} @@ -20599,8 +18999,6 @@ snapshots: env-paths@2.2.1: {} - environment@1.1.0: {} - err-code@2.0.3: {} errno@0.1.8: @@ -20811,33 +19209,6 @@ snapshots: '@esbuild/win32-ia32': 0.21.5 '@esbuild/win32-x64': 0.21.5 - esbuild@0.23.1: - optionalDependencies: - '@esbuild/aix-ppc64': 0.23.1 - '@esbuild/android-arm': 0.23.1 - '@esbuild/android-arm64': 0.23.1 - '@esbuild/android-x64': 0.23.1 - '@esbuild/darwin-arm64': 0.23.1 - '@esbuild/darwin-x64': 0.23.1 - '@esbuild/freebsd-arm64': 0.23.1 - '@esbuild/freebsd-x64': 0.23.1 - '@esbuild/linux-arm': 0.23.1 - '@esbuild/linux-arm64': 0.23.1 - '@esbuild/linux-ia32': 0.23.1 - '@esbuild/linux-loong64': 0.23.1 - '@esbuild/linux-mips64el': 0.23.1 - '@esbuild/linux-ppc64': 0.23.1 - '@esbuild/linux-riscv64': 0.23.1 - '@esbuild/linux-s390x': 0.23.1 - '@esbuild/linux-x64': 0.23.1 - '@esbuild/netbsd-x64': 0.23.1 - '@esbuild/openbsd-arm64': 0.23.1 - '@esbuild/openbsd-x64': 0.23.1 - '@esbuild/sunos-x64': 0.23.1 - '@esbuild/win32-arm64': 0.23.1 - '@esbuild/win32-ia32': 0.23.1 - '@esbuild/win32-x64': 0.23.1 - esbuild@0.25.5: optionalDependencies: '@esbuild/aix-ppc64': 0.25.5 @@ -20874,8 +19245,6 @@ snapshots: escape-string-regexp@4.0.0: {} - escape-string-regexp@5.0.0: {} - eslint-config-prettier@8.10.0(eslint@8.57.1): dependencies: eslint: 8.57.1 @@ -20908,7 +19277,7 @@ snapshots: eslint-import-resolver-typescript@3.10.1(eslint-plugin-import@2.31.0)(eslint@8.57.1): dependencies: '@nolyfill/is-core-module': 1.0.39 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 eslint: 8.57.1 get-tsconfig: 4.10.1 is-bun-module: 2.0.0 @@ -21130,7 +19499,7 @@ snapshots: ajv: 6.12.6 chalk: 4.1.2 cross-spawn: 7.0.6 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 doctrine: 3.0.0 escape-string-regexp: 4.0.0 eslint-scope: 7.2.2 @@ -21160,8 +19529,6 @@ snapshots: transitivePeerDependencies: - supports-color - esm@3.2.25: {} - espree@9.6.1: dependencies: acorn: 8.15.0 @@ -21226,8 +19593,6 @@ snapshots: '@types/node': 20.19.7 require-like: 0.1.2 - event-target-polyfill@0.0.4: {} - event-target-shim@5.0.1: {} events@3.3.0: {} @@ -21339,7 +19704,7 @@ snapshots: content-type: 1.0.5 cookie: 0.7.2 cookie-signature: 1.2.2 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 encodeurl: 2.0.0 escape-html: 1.0.3 etag: 1.8.1 @@ -21389,8 +19754,6 @@ snapshots: fast-levenshtein@2.0.6: {} - fast-npm-meta@0.2.2: {} - fast-sort@3.4.1: {} fast-uri@3.0.6: {} @@ -21443,7 +19806,7 @@ snapshots: finalhandler@2.1.0: dependencies: - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 encodeurl: 2.0.0 escape-html: 1.0.3 on-finished: 2.4.1 @@ -21462,12 +19825,6 @@ snapshots: locate-path: 6.0.0 path-exists: 4.0.0 - find-up@7.0.0: - dependencies: - locate-path: 7.2.0 - path-exists: 5.0.0 - unicorn-magic: 0.1.0 - find-yarn-workspace-root2@1.2.16: dependencies: micromatch: 4.0.8 @@ -21604,8 +19961,6 @@ snapshots: get-nonce@1.0.1: {} - get-package-type@0.1.0: {} - get-port@5.1.1: {} get-proto@1.0.1: @@ -21632,20 +19987,6 @@ snapshots: dependencies: resolve-pkg-maps: 1.0.0 - getopts@2.3.0: {} - - giget@1.2.5: - dependencies: - citty: 0.1.6 - consola: 3.4.2 - defu: 6.1.4 - node-fetch-native: 1.6.6 - nypm: 0.5.4 - pathe: 2.0.3 - tar: 6.2.1 - - git-last-commit@1.0.1: {} - glob-parent@5.1.2: dependencies: is-glob: 4.0.3 @@ -21711,14 +20052,6 @@ snapshots: merge2: 1.4.1 slash: 3.0.0 - globby@13.2.2: - dependencies: - dir-glob: 3.0.1 - fast-glob: 3.3.3 - ignore: 5.3.2 - merge2: 1.4.1 - slash: 4.0.0 - globrex@0.1.2: {} gopd@1.2.0: {} @@ -21727,11 +20060,6 @@ snapshots: graceful-fs@4.2.11: {} - gradient-string@2.0.2: - dependencies: - chalk: 4.1.2 - tinygradient: 1.1.5 - grapheme-splitter@1.0.4: {} graphemer@1.4.0: {} @@ -21788,8 +20116,6 @@ snapshots: has-flag@4.0.0: {} - has-flag@5.0.1: {} - has-property-descriptors@1.0.2: dependencies: es-define-property: 1.0.1 @@ -21856,8 +20182,6 @@ snapshots: highlight.js@11.11.1: {} - hono@4.8.5: {} - hosted-git-info@2.8.9: {} hosted-git-info@6.1.3: @@ -21942,14 +20266,10 @@ snapshots: cjs-module-lexer: 1.4.3 module-details-from-path: 1.0.4 - import-meta-resolve@4.1.0: {} - imurmurhash@0.1.4: {} indent-string@4.0.0: {} - indent-string@5.0.0: {} - inflight@1.0.6: dependencies: once: 1.4.0 @@ -21959,8 +20279,6 @@ snapshots: ini@1.3.8: {} - ini@5.0.0: {} - inline-style-parser@0.1.1: {} inline-style-parser@0.2.4: {} @@ -21973,13 +20291,11 @@ snapshots: internmap@2.0.3: {} - interpret@2.2.0: {} - ioredis@5.6.1: dependencies: '@ioredis/commands': 1.2.0 cluster-key-slot: 1.1.2 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 denque: 2.1.0 lodash.defaults: 4.2.0 lodash.isarguments: 3.1.0 @@ -22065,8 +20381,6 @@ snapshots: is-deflate@1.0.0: {} - is-docker@3.0.0: {} - is-extglob@2.1.1: {} is-finalizationregistry@1.1.1: @@ -22090,18 +20404,12 @@ snapshots: is-hexadecimal@2.0.1: {} - is-inside-container@1.0.0: - dependencies: - is-docker: 3.0.0 - is-interactive@1.0.0: {} is-map@2.0.3: {} is-negative-zero@2.0.3: {} - is-network-error@1.1.0: {} - is-number-object@1.1.1: dependencies: call-bound: 1.0.4 @@ -22183,10 +20491,6 @@ snapshots: is-windows@1.0.2: {} - is-wsl@3.1.0: - dependencies: - is-inside-container: 1.0.0 - isarray@1.0.0: {} isarray@2.0.5: {} @@ -22304,8 +20608,6 @@ snapshots: json5@2.2.3: {} - jsonc-parser@3.2.1: {} - jsonfile@4.0.0: optionalDependencies: graceful-fs: 4.2.11 @@ -22323,8 +20625,6 @@ snapshots: object.assign: 4.1.7 object.values: 1.2.1 - junk@4.0.1: {} - katex@0.16.22: dependencies: commander: 8.3.0 @@ -22346,27 +20646,6 @@ snapshots: kleur@4.1.5: {} - knex@3.1.0(pg@8.16.3)(supports-color@10.0.0): - dependencies: - colorette: 2.0.19 - commander: 10.0.1 - debug: 4.3.4(supports-color@10.0.0) - escalade: 3.2.0 - esm: 3.2.25 - get-package-type: 0.1.0 - getopts: 2.3.0 - interpret: 2.2.0 - lodash: 4.17.21 - pg-connection-string: 2.6.2 - rechoir: 0.8.0 - resolve-from: 5.0.0 - tarn: 3.0.2 - tildify: 2.0.0 - optionalDependencies: - pg: 8.16.3 - transitivePeerDependencies: - - supports-color - language-subtag-registry@0.3.23: {} language-tags@1.0.9: @@ -22488,10 +20767,6 @@ snapshots: dependencies: p-locate: 5.0.0 - locate-path@7.2.0: - dependencies: - p-locate: 6.0.0 - lodash.camelcase@4.3.0: {} lodash.castarray@4.4.0: {} @@ -22560,12 +20835,6 @@ snapshots: dependencies: '@jridgewell/sourcemap-codec': 1.5.0 - magicast@0.3.5: - dependencies: - '@babel/parser': 7.27.5 - '@babel/types': 7.27.6 - source-map-js: 1.2.1 - make-dir@2.1.0: dependencies: pify: 4.0.1 @@ -22591,10 +20860,6 @@ snapshots: marked@7.0.4: {} - matchit@1.1.0: - dependencies: - '@arr/every': 1.0.1 - math-intrinsics@1.1.0: {} md-to-react-email@5.0.2(react@18.3.1): @@ -22812,8 +21077,6 @@ snapshots: memorystream@0.3.1: {} - meow@12.1.1: {} - meow@6.1.1: dependencies: '@types/minimist': 1.2.5 @@ -23143,7 +21406,7 @@ snapshots: micromark@3.2.0: dependencies: '@types/debug': 4.1.12 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3 decode-named-character-reference: 1.1.0 micromark-core-commonmark: 1.1.0 micromark-factory-space: 1.1.0 @@ -23165,7 +21428,7 @@ snapshots: micromark@4.0.2: dependencies: '@types/debug': 4.1.12 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3 decode-named-character-reference: 1.1.0 devlop: 1.1.0 micromark-core-commonmark: 2.0.3 @@ -23305,8 +21568,6 @@ snapshots: ms@2.0.0: {} - ms@2.1.2: {} - ms@2.1.3: {} msgpackr-extract@3.0.3: @@ -23369,8 +21630,6 @@ snapshots: neo4j-driver-core: 5.28.1 rxjs: 7.8.2 - nested-error-stacks@2.1.1: {} - next@14.1.4(@babel/core@7.24.5)(@opentelemetry/api@1.9.0)(react-dom@18.2.0(react@18.2.0))(react@18.2.0)(sass@1.89.2): dependencies: '@next/env': 14.1.4 @@ -23409,8 +21668,6 @@ snapshots: dependencies: lodash: 4.17.21 - node-fetch-native@1.6.6: {} - node-fetch@2.7.0(encoding@0.1.13): dependencies: whatwg-url: 5.0.0 @@ -23545,15 +21802,6 @@ snapshots: num2fraction@1.2.2: {} - nypm@0.5.4: - dependencies: - citty: 0.1.6 - consola: 3.4.2 - pathe: 2.0.3 - pkg-types: 1.3.1 - tinyexec: 0.3.2 - ufo: 1.6.1 - object-assign@4.1.1: {} object-hash@2.2.0: {} @@ -23605,8 +21853,6 @@ snapshots: define-properties: 1.2.1 es-object-atoms: 1.1.1 - ohash@1.1.6: {} - ollama-ai-provider-v2@1.5.1(zod@3.25.76): dependencies: '@ai-sdk/provider': 2.0.0 @@ -23635,13 +21881,6 @@ snapshots: dependencies: mimic-fn: 4.0.0 - open@10.2.0: - dependencies: - default-browser: 5.2.1 - define-lazy-prop: 3.0.0 - is-inside-container: 1.0.0 - wsl-utils: 0.1.0 - openai@5.12.2(ws@8.18.3)(zod@3.25.76): optionalDependencies: ws: 8.18.3 @@ -23670,10 +21909,6 @@ snapshots: orderedmap@2.1.1: {} - os-paths@7.4.0: - optionalDependencies: - fsevents: 2.3.3 - os-tmpdir@1.0.2: {} outdent@0.5.0: {} @@ -23690,18 +21925,10 @@ snapshots: object-keys: 1.1.1 safe-push-apply: 1.0.0 - p-event@5.0.1: - dependencies: - p-timeout: 5.1.0 - p-filter@2.1.0: dependencies: p-map: 2.1.0 - p-filter@3.0.0: - dependencies: - p-map: 5.5.0 - p-limit@2.3.0: dependencies: p-try: 2.2.0 @@ -23710,14 +21937,6 @@ snapshots: dependencies: yocto-queue: 0.1.0 - p-limit@4.0.0: - dependencies: - yocto-queue: 1.2.1 - - p-limit@6.2.0: - dependencies: - yocto-queue: 1.2.1 - p-locate@4.1.0: dependencies: p-limit: 2.3.0 @@ -23726,30 +21945,12 @@ snapshots: dependencies: p-limit: 3.1.0 - p-locate@6.0.0: - dependencies: - p-limit: 4.0.0 - p-map@2.1.0: {} p-map@4.0.0: dependencies: aggregate-error: 3.1.0 - p-map@5.5.0: - dependencies: - aggregate-error: 4.0.1 - - p-map@6.0.0: {} - - p-retry@6.2.1: - dependencies: - '@types/retry': 0.12.2 - is-network-error: 1.1.0 - retry: 0.13.1 - - p-timeout@5.1.0: {} - p-try@2.2.0: {} package-json-from-dist@1.0.1: {} @@ -23809,14 +22010,8 @@ snapshots: parseurl@1.3.3: {} - partysocket@1.1.4: - dependencies: - event-target-polyfill: 0.0.4 - path-exists@4.0.0: {} - path-exists@5.0.0: {} - path-is-absolute@1.0.1: {} path-key@2.0.1: {} @@ -23859,51 +22054,12 @@ snapshots: duplexify: 3.7.1 through2: 2.0.5 - perfect-debounce@1.0.0: {} - periscopic@3.1.0: dependencies: '@types/estree': 1.0.8 estree-walker: 3.0.3 is-reference: 3.0.3 - pg-cloudflare@1.2.7: - optional: true - - pg-connection-string@2.6.2: {} - - pg-connection-string@2.9.1: {} - - pg-int8@1.0.1: {} - - pg-pool@3.10.1(pg@8.16.3): - dependencies: - pg: 8.16.3 - - pg-protocol@1.10.3: {} - - pg-types@2.2.0: - dependencies: - pg-int8: 1.0.1 - postgres-array: 2.0.0 - postgres-bytea: 1.0.0 - postgres-date: 1.0.7 - postgres-interval: 1.2.0 - - pg@8.16.3: - dependencies: - pg-connection-string: 2.9.1 - pg-pool: 3.10.1(pg@8.16.3) - pg-protocol: 1.10.3 - pg-types: 2.2.0 - pgpass: 1.0.5 - optionalDependencies: - pg-cloudflare: 1.2.7 - - pgpass@1.0.5: - dependencies: - split2: 4.2.0 - picocolors@0.2.1: {} picocolors@1.1.1: {} @@ -23942,13 +22098,6 @@ snapshots: exsolve: 1.0.5 pathe: 2.0.3 - polite-json@5.0.0: {} - - polka@0.5.2: - dependencies: - '@polka/url': 0.5.0 - trouter: 2.0.1 - possible-typed-array-names@1.1.0: {} postcss-discard-duplicates@5.1.0(postcss@8.5.5): @@ -24117,16 +22266,6 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.1 - postgres-array@2.0.0: {} - - postgres-bytea@1.0.0: {} - - postgres-date@1.0.7: {} - - postgres-interval@1.2.0: - dependencies: - xtend: 4.0.2 - posthog-js@1.250.2: dependencies: core-js: 3.43.0 @@ -24414,11 +22553,6 @@ snapshots: iconv-lite: 0.6.3 unpipe: 1.0.0 - rc9@2.1.2: - dependencies: - defu: 6.1.4 - destr: 2.0.5 - react-calendar-heatmap@1.10.0(react@18.3.1): dependencies: memoize-one: 5.2.1 @@ -24741,10 +22875,6 @@ snapshots: readdirp@4.1.2: {} - rechoir@0.8.0: - dependencies: - resolve: 1.22.10 - redent@3.0.0: dependencies: indent-string: 4.0.0 @@ -24870,9 +23000,9 @@ snapshots: require-from-string@2.0.2: {} - require-in-the-middle@7.5.2(supports-color@10.0.0): + require-in-the-middle@7.5.2: dependencies: - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.3 module-details-from-path: 1.0.4 resolve: 1.22.10 transitivePeerDependencies: @@ -24892,11 +23022,6 @@ snapshots: resolve-from@5.0.0: {} - resolve-import@2.0.0: - dependencies: - glob: 11.0.2 - walk-up-path: 4.0.0 - resolve-pkg-maps@1.0.0: {} resolve.exports@2.0.3: {} @@ -24922,18 +23047,12 @@ snapshots: retry@0.12.0: {} - retry@0.13.1: {} - reusify@1.1.0: {} rimraf@3.0.2: dependencies: glob: 7.2.3 - rimraf@5.0.10: - dependencies: - glob: 10.4.5 - rimraf@6.0.1: dependencies: glob: 11.0.2 @@ -24971,7 +23090,7 @@ snapshots: router@2.2.0: dependencies: - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 depd: 2.0.0 is-promise: 4.0.0 parseurl: 1.3.3 @@ -24979,8 +23098,6 @@ snapshots: transitivePeerDependencies: - supports-color - run-applescript@7.0.0: {} - run-exclusive@2.2.19: dependencies: minimal-polyfills: 2.2.3 @@ -25090,7 +23207,7 @@ snapshots: send@1.2.0: dependencies: - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 encodeurl: 2.0.0 escape-html: 1.0.3 etag: 1.8.1 @@ -25168,8 +23285,6 @@ snapshots: shell-quote@1.8.3: {} - shimmer@1.2.1: {} - side-channel-list@1.0.0: dependencies: es-errors: 1.3.0 @@ -25213,7 +23328,7 @@ snapshots: dependencies: '@hapi/hoek': 11.0.7 '@hapi/wreck': 18.1.0 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 joi: 17.13.3 transitivePeerDependencies: - supports-color @@ -25222,12 +23337,8 @@ snapshots: dependencies: is-arrayish: 0.3.2 - sisteransi@1.0.5: {} - slash@3.0.0: {} - slash@4.0.0: {} - slug@6.1.0: {} smartwrap@2.0.2: @@ -25348,8 +23459,6 @@ snapshots: spdx-license-ids@3.0.21: {} - split2@4.2.0: {} - sprintf-js@1.0.3: {} ssri@10.0.6: @@ -25532,8 +23641,6 @@ snapshots: dependencies: copy-anything: 3.0.5 - supports-color@10.0.0: {} - supports-color@5.5.0: dependencies: has-flag: 3.0.0 @@ -25558,14 +23665,6 @@ snapshots: react: 18.3.1 use-sync-external-store: 1.5.0(react@18.3.1) - sync-content@2.0.1: - dependencies: - glob: 11.0.2 - mkdirp: 3.0.1 - path-scurry: 2.0.0 - rimraf: 6.0.1 - tshy: 3.0.2 - tailwind-merge@2.2.0: dependencies: '@babel/runtime': 7.27.6 @@ -25682,8 +23781,6 @@ snapshots: mkdirp: 3.0.1 yallist: 5.0.0 - tarn@3.0.2: {} - tdigest@0.1.2: dependencies: bintrees: 1.0.2 @@ -25738,12 +23835,8 @@ snapshots: readable-stream: 2.3.8 xtend: 4.0.2 - tildify@2.0.0: {} - tiny-invariant@1.3.3: {} - tinycolor2@1.6.0: {} - tinyexec@0.3.2: {} tinyglobby@0.2.14: @@ -25751,11 +23844,6 @@ snapshots: fdir: 6.4.6(picomatch@4.0.2) picomatch: 4.0.2 - tinygradient@1.1.5: - dependencies: - '@types/tinycolor2': 1.4.6 - tinycolor2: 1.6.0 - tippy.js@6.3.7: dependencies: '@popperjs/core': 2.11.8 @@ -25796,20 +23884,12 @@ snapshots: trough@2.2.0: {} - trouter@2.0.1: - dependencies: - matchit: 1.1.0 - ts-algebra@2.0.0: {} ts-api-utils@1.4.3(typescript@5.8.3): dependencies: typescript: 5.8.3 - ts-essentials@10.0.1(typescript@5.8.3): - optionalDependencies: - typescript: 5.8.3 - ts-interface-checker@0.1.13: {} tsafe@1.8.5: {} @@ -25835,20 +23915,6 @@ snapshots: minimist: 1.2.8 strip-bom: 3.0.0 - tshy@3.0.2: - dependencies: - chalk: 5.4.1 - chokidar: 3.6.0 - foreground-child: 3.3.1 - minimatch: 10.0.2 - mkdirp: 3.0.1 - polite-json: 5.0.0 - resolve-import: 2.0.0 - rimraf: 6.0.1 - sync-content: 2.0.1 - typescript: 5.8.3 - walk-up-path: 4.0.0 - tslib@1.14.1: {} tslib@2.8.1: {} @@ -25859,7 +23925,7 @@ snapshots: cac: 6.7.14 chokidar: 4.0.3 consola: 3.4.2 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 esbuild: 0.25.5 fix-dts-default-cjs-exports: 1.0.1 joycon: 3.1.1 @@ -25887,13 +23953,6 @@ snapshots: tslib: 1.14.1 typescript: 5.8.3 - tsx@4.17.0: - dependencies: - esbuild: 0.23.1 - get-tsconfig: 4.10.1 - optionalDependencies: - fsevents: 2.3.3 - tsx@4.20.4: dependencies: esbuild: 0.25.5 @@ -26040,8 +24099,6 @@ snapshots: undici@7.13.0: {} - unicorn-magic@0.1.0: {} - unicorn-magic@0.3.0: {} unified@10.1.2: @@ -26217,8 +24274,6 @@ snapshots: utils-merge@1.0.1: {} - uuid@11.1.0: {} - uuid@9.0.1: {} uvu@0.5.6: @@ -26268,7 +24323,7 @@ snapshots: vite-node@1.6.1(@types/node@20.19.7)(less@4.4.0)(lightningcss@1.30.1)(sass@1.89.2)(terser@5.42.0): dependencies: cac: 6.7.14 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 pathe: 1.1.2 picocolors: 1.1.1 vite: 5.4.19(@types/node@20.19.7)(less@4.4.0)(lightningcss@1.30.1)(sass@1.89.2)(terser@5.42.0) @@ -26286,7 +24341,7 @@ snapshots: vite-node@3.2.3(@types/node@20.19.7)(jiti@2.4.2)(less@4.4.0)(lightningcss@1.30.1)(sass@1.89.2)(terser@5.42.0)(tsx@4.20.4)(yaml@2.8.0): dependencies: cac: 6.7.14 - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 es-module-lexer: 1.7.0 pathe: 2.0.3 vite: 6.3.5(@types/node@20.19.7)(jiti@2.4.2)(less@4.4.0)(lightningcss@1.30.1)(sass@1.89.2)(terser@5.42.0)(tsx@4.20.4)(yaml@2.8.0) @@ -26306,7 +24361,7 @@ snapshots: vite-tsconfig-paths@4.3.2(typescript@5.8.3)(vite@6.3.5(@types/node@20.19.7)(jiti@2.4.2)(less@4.4.0)(lightningcss@1.30.1)(sass@1.89.2)(terser@5.42.0)(tsx@4.20.4)(yaml@2.8.0)): dependencies: - debug: 4.4.1(supports-color@10.0.0) + debug: 4.4.1 globrex: 0.1.2 tsconfck: 3.1.6(typescript@5.8.3) optionalDependencies: @@ -26349,8 +24404,6 @@ snapshots: w3c-keyname@2.2.8: {} - walk-up-path@4.0.0: {} - watchpack@2.4.4: dependencies: glob-to-regexp: 0.4.1 @@ -26546,22 +24599,6 @@ snapshots: ws@8.18.3: {} - wsl-utils@0.1.0: - dependencies: - is-wsl: 3.1.0 - - xdg-app-paths@8.3.0: - dependencies: - xdg-portable: 10.6.0 - optionalDependencies: - fsevents: 2.3.3 - - xdg-portable@10.6.0: - dependencies: - os-paths: 7.4.0 - optionalDependencies: - fsevents: 2.3.3 - xmlhttprequest-ssl@2.0.0: {} xtend@4.0.2: {} @@ -26615,8 +24652,6 @@ snapshots: yocto-queue@0.1.0: {} - yocto-queue@1.2.1: {} - yoctocolors@2.1.1: {} zod-error@1.5.0: @@ -26627,10 +24662,6 @@ snapshots: dependencies: zod: 3.23.8 - zod-validation-error@1.5.0(zod@3.23.8): - dependencies: - zod: 3.23.8 - zod-validation-error@1.5.0(zod@3.25.76): dependencies: zod: 3.25.76