1. fix: delete api key is not working

2. moved docs into main repo
This commit is contained in:
Harshith Mullapudi 2025-10-18 22:23:13 +05:30
parent d0126797de
commit b0e141c2a2
90 changed files with 4732 additions and 1 deletions

View File

@ -28,7 +28,8 @@ export const useTokensColumns = (): Array<ColumnDef<PersonalAccessToken>> => {
const [open, setOpen] = React.useState(false);
const onDelete = (id: string) => {
fetcher.submit({ id }, { method: "DELETE", action: "/home/api" });
fetcher.submit({ id }, { method: "DELETE", action: "/settings/api" });
setOpen(false);
};
return [

View File

@ -10,6 +10,7 @@ const LogsSearchParams = z.object({
source: z.string().optional(),
status: z.string().optional(),
type: z.string().optional(),
sessionId: z.string().optional(),
});
export const loader = createHybridLoaderApiRoute(
@ -25,6 +26,7 @@ export const loader = createHybridLoaderApiRoute(
const source = searchParams.source;
const status = searchParams.status;
const type = searchParams.type;
const sessionId = searchParams.sessionId;
const skip = (page - 1) * limit;
// Get user and workspace in one query
@ -53,6 +55,13 @@ export const loader = createHybridLoaderApiRoute(
};
}
if (sessionId) {
whereClause.data = {
path: ["sessionId"],
equals: sessionId,
};
}
// If source filter is provided, filter by integration source
if (source) {
whereClause.activity = {

9
docs/.gitignore vendored Normal file
View File

@ -0,0 +1,9 @@
.clinerules/byterover-rules.md
.kilocode/rules/byterover-rules.md
.roo/rules/byterover-rules.md
.windsurf/rules/byterover-rules.md
.cursor/rules/byterover-rules.mdc
.kiro/steering/byterover-rules.md
.qoder/rules/byterover-rules.md
.augment/rules/byterover-rules.md

108
docs/automations/rules.mdx Normal file
View File

@ -0,0 +1,108 @@
---
title: "Automations"
description: "How to use SOL Automations"
---
## Overview
SOL Automations let you streamline and manage your manual workflows using plain English. By defining automation rules, you can minimize repetitive work, reduce noise, and focus on what matters most.
## Why Use Automations?
Modern workflows involve countless apps, notifications, and manual steps. SOL Automations filter this noise by letting you set preferences and receive actionable alerts for the items that truly matter—helping you work smarter, not harder.
## What Can You Automate with SOL?
- **Automate Task Creation:** Instantly create SOL tasks from external events, such as:
- Automatically generate a SOL task whenever a Linear or Jira issue is assigned to you.
- Create a task in SOL when a pull request (PR) is assigned to you in GitHub.
- Trigger SOL tasks when important emails arrive—define what counts as "important" based on your criteria.
- Create actionable SOL tasks when you're mentioned or tagged in Slack, highlighting key action items from the conversation.
- **Perform Actions in Third-Party Apps:**
- When a SOL task is marked as done, automatically update its status in Linear or Jira.
- Send Slack or email replies once a corresponding SOL task is completed.
- **Create Custom Workflows:**
- Example: Automatically close support tickets that are reopened just for a "thank you" response—no further action required. Once closed, notify you via SOL chat.
## How SOL Automations Work
The best way to write an automation rule is:
```
When [TRIGGER] happens, if [CONDITIONS] are met, then do [ACTIONS]
```
- **Triggers:** Events that start the automation (e.g., receiving an email, being assigned a PR)
- **Conditions:** Optional criteria that must be true for the rule to run (e.g., sender is VIP, subject contains "urgent")
- **Actions:** What happens when the rule executes (e.g., create a task, send a notification)
## Common Use Cases for Automations
### Development Workflow Automation
- **PR Review Management:**
- Trigger: You're tagged for review on a GitHub PR
- Action: Create a SOL task with PR details and set schedule it for tomorrow
- **Issue Tracking:**
- Trigger: You're assigned an issue in Linear or Jira
- Action: Create a task in SOL, add relevant context, tag it in relevant list.
- **Build Failure Alerts:**
- Trigger: Email received from CI system about build failure
- Action: Create high-priority task, include error details, notify in Slack
### Communication Automation
- **Customer Support Triage:**
- Trigger: Email from support@company.com
- Condition: Contains specific customer name or project
- Action: Create task, draft response, set priority based on customer tier
- **Meeting Follow-ups:**
- Trigger: Calendar event ends
- Condition: Meeting had specific tags or participants
- Action: Create follow-up tasks for action items, draft summary email
- **Slack Mention Tracking:**
- Trigger: You're mentioned in Slack in specific channels
- Condition: Message contains keywords like "urgent" or "deadline"
- Action: Create high-priority task with context
### Personal Productivity
- **Morning Prep:**
- Trigger: Time-based (8:00 AM weekdays)
- Action: Generate daily brief, create planning task, show calendar events
- **Email Batch Processing:**
- Trigger: Time-based (twice daily)
- Action: Summarize non-urgent emails, create tasks for ones needing response
- **Focus Time Protection:**
- Trigger: Calendar event with "Focus" tag starts
- Action: Set Slack to Do Not Disturb, disable notifications, log start time
## Creating Your First Rule
1. Navigate to Automations > Rules in SOL
2. Click "New Rule"
3. Write the rule in natural language and follow this format `When [TRIGGER] happens, if [CONDITIONS] are met, then do [ACTIONS]`
## Troubleshooting Rules
- Because rules in SOL are written in natural language, sometimes they may not work exactly as intended.
- If a rule isn't triggering as expected, the simplest way to debug is to ask the SOL assistant for help.
- The assistant can explain why the rule didn't work and guide you on how to rephrase it so that SOL recognizes the correct trigger and performs the desired actions when conditions are met.
---

16
docs/concepts/chat.mdx Normal file
View File

@ -0,0 +1,16 @@
---
title: "Chat Interface"
description: "Your direct connection to your memory graph"
---
### Memory Assistant - Ask questions about your stored information
- "What writing preferences do I have?"
- "Tell me everything about the TaskMaster project"
- "Who have I been meeting with this week?"
### Memory Addition - Add new information directly
- Type any information and say "add this to memory"
- CORE processes the text, extracts entities and facts, and integrates them into your graph
- Works for notes, insights, plans, or any information you want to preserve

View File

@ -0,0 +1,25 @@
---
title: "Entity Types"
description: "How CORE organizes information into different entity types"
---
Within Entity Nodes, CORE recognizes and organizes different types of real-world concepts to build a rich, categorized understanding of your world.
### Core Entity Types
| **Type** | **Description** | **Key Attributes** | **Example** |
| ---------------- | ---------------------------------------- | ---------------------------- | ------------------------------------------------------- |
| **Person** | Individual people in your network | email, role | "Manik (Software Engineer, manik@company.com)" |
| **Organization** | Companies, teams, and groups | industry, size | "TaskMaster (Startup, Series A, Current Project)" |
| **Place** | Physical or virtual locations | address, coordinates | "San Francisco Office (123 Main St, Primary Workspace)" |
| **Event** | Time-based occurrences | startTime, endTime, location | "TaskMaster Launch (Q2 2025, Product Release)" |
| **File** | Documents, code, and media you reference | fileId, source | "TaskMaster Wireframes (Figma, Updated Daily)" |
### Application-Specific Types
CORE also understands entities from your connected apps:
- **GitHub**: Repositories, issues, pull requests
- **Slack**: Channels, messages, threads
- **Linear**: Issues, projects, teams
- **Gmail**: Important emails, contacts, threads

37
docs/concepts/home.mdx Normal file
View File

@ -0,0 +1,37 @@
---
title: "Home"
description: "Command Center: chat & today's tasks"
---
## Overview
The Home screen is designed to provide:
1. A conversational interface to interact with SOL
2. Quick access to your scheduled tasks for the day
## Chat Interface
The chat interface is your primary method of interacting with SOL:
- **Natural Language**: Ask questions, give commands, or have conversations in plain English
- **Create tasks** Instantly create and schedule tasks simply by describing what you need in plain English
- **Context-Aware**: SOL maintains conversation context for more natural interactions
- **Smart Responses**: Get concise, actionable responses based on your connected tools and saved context
## Today's Tasks
The Home screen displays your scheduled tasks for today, helping you:
- See what needs your immediate attention
- Track your progress throughout the day
- Reprioritize as new items come in
- Focus on what matters most
## Getting the Most Out of Home
- **Start Your Day Here**: Begin each workday by checking your Home screen
- **Ask Anything**: Use the chat for quick questions and commands throughout the day
- **Use Natural Language**: Ask SOL to "show me all urgent GitHub PRs" rather than navigating menus
By making the Home screen your workflow starting point, you'll have better awareness of priorities and a central place to interact with all your connected tools.

25
docs/concepts/lists.mdx Normal file
View File

@ -0,0 +1,25 @@
---
title: "Lists"
description: "Lists and organization in SOL"
---
## Overview
Lists in SOL streamline project organization by combining notes, structured plans, and tasks in a single workspace. Capture context, add tasks anytime, and access all relevant details without switching tools.
## Rich Content
- Easily add images, files, and rich text to any list.
- Create clear, shareable lists with rich context.
## Lists Examples
1. **Project Lists**: Group tasks and notes related to a specific project
2. **Topic Lists**: Organize tasks by subject matter or domain
3. **Personal Lists**: Private collections for individual use
## Creating a New List
To create a list -> Navigate to the Lists section -> Click "New List" -> Name your list and start adding context
By effectively using lists in SOL, you can maintain better organization, reduce mental overhead, and ensure nothing falls through the cracks.

View File

@ -0,0 +1,32 @@
---
title: "Memory Graph"
---
CORE's memory graph is the foundation of how your digital brain works. Unlike traditional databases that store isolated facts, CORE creates a living network of connected information.
### How Your Memory is Structured
Your memory consists of three types of building blocks:
1. **Episodes - The raw conversations and interactions you have**
- Every chat with Claude, message in Slack, or note you add becomes an episode
- Episodes preserve the original context and serve as the source of truth
2. **Entities - The people, places, concepts, and relationships in your world**
- Names like "Manik," concepts like "React," companies like "TaskMaster"
- Even relationships like "works at" or "prefers" are treated as entities
3. **Statements - The specific facts extracted from your episodes**
- "Alex works on TaskMaster" or "Manik prefers TypeScript"
- Each statement knows when it became true and links back to its source episode
### Why This Structure Matters
This approach gives CORE unique advantages over simple note-taking or search systems:
- **Traceable Knowledge**: Every fact in your memory can be traced back to the original conversation where you mentioned it
- **Smart Connections**: When you mention "TaskMaster," CORE finds not just that word, but all related people, timelines, and decisions connected to your project
- **Evolving Understanding**: As you have more conversations, CORE builds richer connections between existing entities rather than creating isolated notes

0
docs/concepts/space.mdx Normal file
View File

35
docs/concepts/tasks.mdx Normal file
View File

@ -0,0 +1,35 @@
---
title: "Tasks"
description: "SOL's task system and planner"
---
## Overview
SOL's task system is designed to centralize and streamline your work across multiple platforms, providing a unified system for tracking, prioritizing, and completing work items.
A SOL task is a work item that:
- Has a clear objective
- Can be tracked to completion
- May be linked to external systems (GitHub, Linear, etc.)
- Contains context relevant to the work needed
- Can be prioritized and scheduled
## Task Sources
Tasks in SOL can be created from multiple sources:
1. **Manual Creation**: Directly created by you in SOL
2. **Automation Rules**: Generated automatically based on triggers from connected platforms (Linear issues, GitHub PRs, etc.)
3. **Conversation**: Created from chat with SOL
## Task Anatomy
Each task in SOL consists of:
- **Title**: Clear description of the work to be done
- **Status**: Current state (Todo, Done)
- **Scheduled Date**: When the task should be scheduled
- **Descriptions**: Related information, links, and background
- **Subtasks**: Smaller steps needed to complete the main task
- **Activity**: History of actions and changes

186
docs/docs.json Normal file
View File

@ -0,0 +1,186 @@
{
"$schema": "https://mintlify.com/docs.json",
"theme": "palm",
"name": "CORE Documentation",
"colors": {
"primary": "#c15e50",
"light": "#c15e50",
"dark": "#c15e50"
},
"favicon": "/favicon.svg",
"navigation": {
"tabs": [
{
"tab": "Documentation",
"groups": [
{
"group": "Get Started",
"pages": [
"introduction",
"getting-started"
]
},
{
"group": "Memory",
"pages": [
"concepts/memory_graph",
"concepts/entity_types",
"concepts/chat"
]
},
{
"group": "MCP",
"pages": [
"mcp/overview",
"mcp/configuration",
"mcp/authentication"
]
},
{
"group": "Providers",
"pages": [
{
"group": "IDE",
"pages": [
"providers/cursor",
"providers/zed",
"providers/vscode"
]
},
{
"group": "Coding Agents",
"pages": [
"providers/kilo-code"
]
},
{
"group": "CLI",
"pages": [
"providers/claude-code"
]
},
{
"group": "Desktop and Webapp",
"pages": [
"providers/browser-extension",
"providers/claude",
"providers/obsidian"
]
}
]
},
{
"group": "Integrations",
"pages": [
"integrations/overview",
"integrations/user_rules",
"integrations/linear",
"integrations/github"
]
},
{
"group": "Self hosting",
"pages": [
"self-hosting/overview",
"self-hosting/docker",
"self-hosting/environment-variables"
]
},
{
"group": "Opensource",
"pages": [
"opensource/contributing",
"opensource/changelog"
]
}
]
},
{
"tab": "API Reference",
"openapi": "openapi.json",
"groups": [
{
"group": "User",
"pages": [
"GET /api/profile"
]
},
{
"group": "Memory",
"pages": [
"POST /api/v1/search",
"POST /api/v1/add",
"GET /api/v1/episodes/{episodeId}/facts",
"GET /api/v1/logs",
"GET /api/v1/logs/{logId}",
"DELETE /api/v1/logs/{logId}"
]
},
{
"group": "Spaces",
"pages": [
"GET /api/v1/spaces",
"POST /api/v1/spaces",
"PUT /api/v1/spaces",
"GET /api/v1/spaces/{spaceId}",
"PUT /api/v1/spaces/{spaceId}",
"DELETE /api/v1/spaces/{spaceId}"
]
},
{
"group": "OAuth2",
"pages": [
"GET /oauth/authorize",
"POST /oauth/authorize",
"POST /oauth/token",
"GET /oauth/userinfo",
"GET /oauth/tokeninfo"
]
},
{
"group": "Webhook",
"pages": [
"POST /api/v1/webhooks",
"GET /api/v1/webhooks/{id}",
"PUT /api/v1/webhooks/{id}"
]
}
]
}
]
},
"logo": {
"light": "/logo/core_logo.png",
"dark": "/logo/core_logo.png"
},
"api": {
"playground": {
"display": "interactive"
}
},
"background": {
"color": {
"light": "#fff",
"dark": "#191919"
}
},
"navbar": {
"links": [
{
"label": "Discord",
"href": "https://discord.gg/dVTC3BmgEq"
}
],
"primary": {
"type": "github",
"href": "https://github.com/RedPlanetHQ/core"
}
},
"footer": {
"socials": {
"twitter": "https://x.com/heysol_ai",
"linkedin": "https://www.linkedin.com/company/redplanethq"
}
}
}

8
docs/favicon.svg Normal file
View File

@ -0,0 +1,8 @@
<svg width="153" height="153" viewBox="0 0 153 153" fill="none" xmlns="http://www.w3.org/2000/svg">
<path
d="M53.2615 32.0437C56.08 31.46 58.8545 31.0488 61.6506 30.7439C66.8366 30.1784 72.0382 29.918 77.2477 30.0227C81.4141 30.1065 85.5793 30.3099 89.7317 30.7483C94.7283 31.2757 99.6494 32.0944 104.459 33.6168C109.124 35.0934 112.659 37.8573 115.132 42.0778C116.948 45.1788 118.843 48.2406 120.22 51.5646C120.854 53.0954 121.828 53.7848 123.436 53.9227C127.348 54.2583 129.837 56.4806 131.264 60.0417C132.832 63.9549 133.803 68.0249 134.488 72.1715C134.926 74.818 135.277 77.4822 135.256 80.1629C135.217 85.137 134.471 90.0019 132.562 94.6332C131.255 97.8033 129.064 100.126 125.846 101.417C125.305 101.634 124.895 101.93 124.727 102.551C122.265 111.671 116.383 117.389 107.349 119.937C104.251 120.811 101.064 121.214 97.8779 121.62C94.5325 122.046 91.1684 122.256 87.8105 122.539C86.7512 122.629 85.6855 122.646 84.6256 122.687C79.999 122.868 75.3687 123.02 70.7381 122.841C66.7028 122.685 62.6632 122.534 58.6502 122.073C53.4106 121.471 48.1434 120.986 43.0787 119.371C38.7065 117.976 34.7495 115.885 31.5657 112.489C29.0587 109.814 27.5293 106.633 26.7357 103.099C26.4891 102.001 26.0092 101.444 24.9264 101.087C21.819 100.06 19.8035 97.8072 18.6095 94.837C17.2047 91.3421 16.4608 87.6806 16.1567 83.929C15.4988 75.8148 16.9558 68.0153 19.7159 60.4236C20.4588 58.3801 21.4677 56.4712 23.445 55.3006C24.808 54.4938 26.342 54.2511 27.8713 54.1221C28.9187 54.0337 29.5073 53.547 29.9471 52.6612C31.6176 49.2965 33.6035 46.1207 35.6338 42.9604C39.0918 37.5781 44.0338 34.4232 50.1109 32.8417C51.1378 32.5745 52.1663 32.3134 53.2615 32.0437ZM60.1734 114.99C61.5389 115.254 62.9301 115.159 64.3064 115.241C68.4775 115.486 72.6588 115.42 76.8367 115.455C81.3091 115.493 85.7747 115.308 90.2375 115.092C93.9459 114.913 97.6457 114.542 101.315 113.966C103.725 113.588 106.138 113.155 108.392 112.149C112.935 110.121 115.814 106.563 117.591 102.026C119.695 96.6569 120.249 91.0108 120.464 85.3164C120.536 83.4302 120.432 81.5414 120.344 79.6523C120.222 76.9957 119.95 74.3592 119.452 71.7557C118.354 66.0274 116.666 60.4679 114.387 55.0974C112.681 51.0749 110.775 47.1531 108.158 43.621C106.24 41.0307 103.797 39.2778 100.605 38.4954C94.7476 37.0593 88.7841 36.439 82.7924 36.192C76.8683 35.9478 70.9342 35.9927 65.017 36.6026C61.215 36.9946 57.4237 37.4297 53.6834 38.2323C50.8406 38.8424 48.064 39.6445 45.554 41.1739C42.3251 43.1413 40.2331 46.1047 38.4855 49.3466C36.0398 53.8837 34.324 58.7172 32.7727 63.6164C30.4568 70.9305 29.2135 78.4113 29.2107 86.0905C29.2096 89.0691 29.4027 92.0361 29.8587 94.9783C30.4105 98.538 31.2933 102.005 33.0664 105.183C35.2049 109.015 38.3346 111.622 42.6166 112.731C48.3223 114.209 54.1636 114.672 60.1734 114.99Z"
fill="#191919" />
<path
d="M86.153 52.5079C88.4468 52.7185 90.6688 52.8906 92.8824 53.1381C97.4702 53.651 102.039 54.2708 106.44 55.7677C107.668 56.1854 108.561 56.9383 109.191 58.0517C111.41 61.9699 112.887 66.1663 113.869 70.5451C114.705 74.2716 114.986 78.056 114.819 81.8609C114.58 87.3005 113.727 92.631 111.605 97.6931C110.929 99.3063 110.059 100.777 108.537 101.777C107.243 102.627 105.762 102.831 104.302 103.059C99.8192 103.758 95.3085 104.204 90.7823 104.556C84.0345 105.079 77.2797 105.099 70.5271 104.956C66.3621 104.868 62.1996 104.506 58.044 104.169C54.4243 103.875 50.8148 103.453 47.203 103.068C46.2033 102.961 45.2148 102.75 44.2152 102.643C42.6339 102.474 41.6075 101.536 40.8196 100.265C39.2437 97.7229 38.4097 94.8989 37.7303 92.0186C36.898 88.4894 36.5056 84.8994 36.5342 81.2866C36.6034 72.5238 38.9345 64.4584 44.2224 57.3596C45.6681 55.4188 47.6072 54.3642 50.0129 54.0858C52.8805 53.754 55.7333 53.2773 58.6064 53.0153C61.2206 52.7769 63.8432 52.6011 66.4683 52.4606C70.295 52.2559 74.1203 52.1162 77.9499 52.1889C80.6601 52.2404 83.3683 52.395 86.153 52.5079ZM96.5351 66.8786C94.1375 64.5138 90.5884 64.1652 88.0305 66.0836C86.0892 67.5396 85.1661 69.5567 85.1367 71.9419C85.0887 75.8253 85.1281 79.7097 85.1189 83.5937C85.1172 84.2865 85.1668 84.9681 85.2851 85.6513C85.9976 89.7674 90.3158 92.2623 94.1748 90.7783C96.7261 89.7971 98.5753 87.1528 98.5864 84.403C98.6029 80.2797 98.6001 76.1562 98.5888 72.0329C98.5835 70.0952 97.9381 68.3919 96.5351 66.8786ZM52.8495 77.0683C52.8497 79.3837 52.8553 81.6991 52.8463 84.0145C52.8441 84.6017 52.8953 85.1761 53.0138 85.7525C53.8392 89.7666 57.7225 92.1656 61.4801 90.9723C64.0738 90.1487 66.1377 87.4552 66.16 84.7234C66.1958 80.3326 66.1661 75.9413 66.1738 71.5501C66.1746 71.0657 66.1043 70.5979 65.9873 70.1338C65.0445 66.3942 61.4854 64.1859 57.8244 65.0667C55.0497 65.7343 52.8929 68.5021 52.8562 71.4794C52.8339 73.2889 52.8505 75.0988 52.8495 77.0683Z"
fill="#191919" />
</svg>

After

Width:  |  Height:  |  Size: 4.6 KiB

31
docs/getting-started.mdx Normal file
View File

@ -0,0 +1,31 @@
---
title: "Quickstart"
description: "Get started with CORE in 5 minutes"
---
### Step 1: Create Your Account
1. Visit [core.heysol.ai](https://core.heysol.ai)
2. Sign up with your email
### Step 2: Add Your First Memory
1. Open the CORE chat interface
2. Tell CORE about yourself: "I'm a software engineer working on a React project called TaskMaster. I prefer TypeScript and use Tailwind for styling."
3. Type `add to memory` to save this context
![Add Memory](/images/add-memory.png)
### Step 3: See Your Memory Graph
1. Navigate to the Memory section
2. Watch as CORE visualizes your information as connected nodes
3. Notice how it links "React," "TaskMaster," and "TypeScript" together
### Step 4: Connect Your AI Tools
To enable cross-platform context, you need to connect CORE to your AI tools:
1. **Connect Claude**: [Claude MCP Setup Guide](/integrations/claude)
2. **Connect Cursor**: [Cursor MCP Setup Guide](/integrations/cursor)
This step takes 2-3 minutes per tool but unlocks CORE's full power.

Binary file not shown.

After

Width:  |  Height:  |  Size: 476 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 317 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 290 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 307 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 MiB

BIN
docs/images/add-mcp-zed.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 450 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 654 KiB

BIN
docs/images/add-memory.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 474 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 174 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 660 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 367 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 288 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

BIN
docs/images/core-claude.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 336 KiB

BIN
docs/images/core-cursor.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 MiB

BIN
docs/images/core-github.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

BIN
docs/images/core-linear.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 178 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 360 KiB

BIN
docs/images/core-zed.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 131 KiB

BIN
docs/images/cursor-mcp.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 189 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

BIN
docs/images/cursor-rule.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 621 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 209 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 388 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 173 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 89 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 222 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 301 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

BIN
docs/images/mcp-hub.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 510 KiB

BIN
docs/images/need-login.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 505 KiB

View File

@ -0,0 +1,92 @@
---
title: "GitHub"
description: "Connect GitHub to your CORE's memory system"
---
![Core GitHub](/images/core-github.png)
## Overview
Integrating GitHub with CORE unlocks two key capabilities:
**Persistent Project Context**
Automatically sync pull requests, issues, and code reviews into COREs memory. Connected to ChatGPT, Claude Code, or Cursor, you can recall repository details, code changes, and past decisions without repeating yourself.
**Access GitHub MCP tools via CORE**
CORE becomes a single MCP endpoint. With one connection, you can access GitHub MCP tools across IDEs and AI coding environments (Cursor, Claude Code, VSCode) to manage PRs, issues, and repositories—directly from your workflow.
---
## Persistent Project Context in CORE
### Connect GitHub to CORE
1. **Navigate to Integrations**
Go to **CORE Dashboard -> Integrations -> GitHub**
![GitHub Integration Page](/images/github-integration-page.png)
2. **Authenticate via OAuth**
Click **Connect to GitHub** and authorize CORE.
![Approve GitHub](/images/approve-github.png)
3. **Authorize CORE**
Approve requested permissions. Your GitHub account will now show as connected.
![GitHub Authenticated](/images/github-authenticated.png)
### Set Memory Rules
Choose what GitHub data should enter your memory graph. For example:
```txt
Add all pull requests I'm assigned to review
Include issues from the main project repository
```
### Benefits & Use-Cases
- **Code Review Context**  PR discussions, review comments, and approval history are always available
- **Repository Knowledge**  Access architecture decisions, and technical discussions across sessions
- **Issue Tracking**  Never lose track of bug reports, feature requests, and their resolution paths
- **Cross-Tool Development**  Retrieve GitHub context directly in ChatGPT, Claude Code, Cursor, or any MCP-connected app
- **Smart Code Connections**  CORE links commits to related issues, PRs to architectural decisions, and reviews to implementation details
### Practical Example
Ask about a feature, and CORE surfaces not just commits, but also linked PRs, discussions, reviews, and decisions, giving you complete context.
---
## Access GitHub MCP Tools via CORE
### Enable MCP Authentication
Once GitHub is connected, MCP tools are enabled, no extra setup required.
### Use GitHub MCP in Your IDEs & Agents
Run GitHub commands directly in Cursor, Claude Code, VSCode, or any MCP-enabled tool:
```
"Create a GitHub issue: Fix authentication bug in login flow"
"Show me all open PRs in the main repository"
"Update PR #123 with review comments and set to approved"
"Search GitHub for similar implementations of OAuth flow"
"Create a new branch and PR for the feature I'm working on"
```
### Benefits & Use-Cases
- **One Login**: Single authentication across all AI tools
- **In-IDE Management**: Create issues, manage PRs, and search repos without leaving your IDE
- **Real-Time Updates**: Track progress during coding sessions
- **Unified Workflow**: Brainstorm, code, and manage GitHub from one place
- **Seamless Reviews**: Request, respond, and merge PRs directly from your environment
## Need Help?
- Join our [Discord community](https://discord.gg/YGUZcvDjUa) and ask questions in the **#core-support** channel.

View File

@ -0,0 +1,89 @@
---
title: "Linear"
description: "Connect Linear to your CORE's memory system"
---
![Core Linear](/images/core-linear.png)
## Overview
By connecting Linear with CORE, you unlock two powerful capabilities:
**Persistent Project Context in CORE**
Automatically add Linear issues and project details to CORE's memory. Once CORE is connected with ChatGPT, Claude Code, and Cursor you can recall sprint, issue, and project details without re-explaining them each time.
**Access Linear MCP tools via CORE**
CORE acts as a single MCP endpoint. With one connection, you can use Linear's MCP tools across IDEs and AI coding environments (Cursor, Claude Code, VSCode) to search, create, or update issues directly from your workflow.
---
## Persistent Project Context in CORE
### How to Connect Linear to CORE
1. **Generate Linear API Key**
- Go to **Linear Workspace → Settings → Security & Access → Personal API Keys**
- Create a new API key and name it **CORE**
![Linear API key](/images/linear-api-key.png)
2. **Connect to CORE**
- Login to [CORE Dashboard](https://core.heysol.ai) → **Integrations → Linear**
- Paste the API key you generated from Linear
![Linear API key](/images/enter-linear-api-key.png)
- Your Linear account will now show as connected
3. **Set Memory Rules**
Define what Linear data should enter your memory graph. For example:
```txt
Only add Linear issues that are assigned to me in my CORE memory
```
### Benefits & Use-Cases
- **Seamless AI Continuity** Sprint goals, priorities, and active issues are always available as context
- **Cross-Tool Recall** Retrieve Linear context directly in ChatGPT, Claude Code, Cursor, or any MCP-connected app
- **No Repetition** Stop re-explaining project context in every coding session
- **Smarter Context Retrieval** CORE links Linear issues to related architecture, past iterations, and design decisions
### Practical Example
When you ask about a Linear issue tagged "Redis," CORE doesn't just surface the ticket,it also brings up related database decisions, performance benchmarks, and past team discussions, creating multiplier context for your workflow.
---
## Access Linear MCP Tools via CORE
### Enable MCP Authentication
1. In the Linear Integration box on the CORE dashboard, click **Connect for MCP**
2. A popup will request Linear MCP access → click **Approve**
![Linear API key](/images/linear-mcp-authorisation.png)
3. CORE now exposes Linear's MCP tools through a unified MCP URL
### Use Linear MCP in Your IDEs & Agents
Once authenticated, you can access Linear commands directly through Cursor, Claude Code, VSCode, or any MCP-compatible tool. No separate authentication required.
**Example in Claude Code or Cursor:**
```
"Create a Linear issue: Fix authentication bug in login flow"
"Update issue DEV-123 status to In Progress"
"Add comment to current issue: Implemented fix, ready for testing"
"Search Linear issues assigned to me"
```
### Benefits & Use-Cases
- **Single Authentication** One connection, multiple tools
- **In-IDE Workflow** Create or update issues without leaving your coding environment
- **Real-Time Status Updates** Track and adjust project progress during development
- **Unified Project Management** Manage Linear issues in the same place you brainstorm, code, and collaborate
## Need Help?
- Join our [Discord community](https://discord.gg/YGUZcvDjUa) and ask questions in the **#core-support** channel.

View File

@ -0,0 +1,116 @@
---
title: "Overview"
description: "Connect your essential work tools to CORE for memory and MCP access"
---
CORE integrations transform how you work with your essential tools by providing two powerful capabilities that work together to create a seamless AI-powered workflow.
## How CORE Integrations Work
### 1. Activity Ingestion to Memory
**Automatically capture and structure your work activities**
When you connect an integration to CORE, it begins monitoring your activities and intelligently adding them to your memory graph:
- **GitHub**: Commits, pull requests, issues, and code reviews become episodes in your memory
- **Linear**: Issues, comments, and project updates are preserved with full context
- **Slack**: Important conversations and decisions are captured and linked to relevant projects
**Smart Memory Rules**
You have complete control over what gets ingested into your memory. Using [User Rules](/integrations/user_rules), you can:
```txt
Only add Linear issues that are assigned to me
Ignore Slack messages from #random channel
Add all GitHub commits to the TaskMaster repository
```
This ensures your memory graph contains only the information that matters to you, creating a focused and relevant knowledge base.
### 2. MCP Connection
**Single endpoint for all your tool integrations**
Beyond memory ingestion, CORE acts as a unified MCP (Model Context Protocol) proxy, making all your connected integrations available through a single endpoint to your AI tools.
**Benefits:**
- **One Connection**: Connect once to CORE, access all your integrations
- **Universal Access**: Use the same integrations across Claude, Cursor, VSCode, and other AI tools
- **Simplified Authentication**: CORE handles all integration authentication for you
[Learn more about CORE's MCP capabilities](/mcp/overview)
---
## Webhook Integration & Real-Time Events
CORE can stream your activities in real-time through webhooks, enabling powerful automation and real-time processing:
### Webhook Configuration
Activities are sent as events to your configured webhook endpoints, allowing you to:
- Build custom automation workflows
- Create real-time dashboards
- Integrate with other systems and tools
- Process activities through your own pipelines
### OAuth App Integration
Connect OAuth applications with webhook scope to receive authenticated activity streams. This enables:
- **Secure Access**: OAuth-protected webhook endpoints
- **Scoped Permissions**: Control exactly which activities are streamed
- **Real-Time Processing**: Immediate activity notifications for time-sensitive workflows
---
## Getting Started with Integrations
### Step 1: Connect Your Tools
1. Navigate to [CORE Dashboard](https://core.heysol.ai) **Integrations**
2. Select the platform you want to connect
3. Complete the OAuth authentication process
4. Configure your memory ingestion preferences
### Step 2: Set Memory Rules
Define what activities should enter your memory graph:
1. Go to **Settings** [**User Rules**](/concepts/user_rules)
2. Create rules to filter and control activity ingestion
3. Test your rules to ensure they capture the right information
## Benefits & Use Cases
### For Memory & Context
- **Persistent Project Knowledge**: Never lose track of decisions, discussions, and progress
- **Cross-Tool Recall**: Access project context from any AI tool or conversation
- **Smart Connections**: CORE links related activities across different platforms
### For MCP & Automation
- **Unified Workflow**: Manage all integrations from one place
- **In-IDE Actions**: Create issues, update tasks, and query data without context switching
- **Real-Time Sync**: Webhook integration keeps everything up-to-date instantly
### Practical Example
When working on a feature, CORE automatically:
1. **Captures** your Linear issue assignment in memory
2. **Links** related GitHub commits and PR discussions
3. **Provides** this context to Claude Code when you ask about the feature
4. **Enables** you to update the Linear issue status directly from your IDE
5. **Streams** all activities to your webhook for dashboard updates
This creates a seamless flow where your tools work together through CORE's intelligent coordination.
## Need Help?
- Join our [Discord community](https://discord.gg/YGUZcvDjUa) for support in **#core-support**
- Check individual integration guides for detailed setup instructions
- Visit [MCP documentation](/mcp/overview) for advanced configuration options

View File

@ -0,0 +1,22 @@
---
title: "User Rules"
description: "Control what information flows into your memory"
---
User rules let you control what information flows into your memory from connected integrations. Think of them as smart filters that work automatically in the background.
### How Rules Work
You describe what you want remembered using natural language:
- **Gmail Example**: "Only ingest emails marked as Important or from @company.com domains"
- **Slack Example**: "Only ingest messages that contain the 👀 emoji"
- **GitHub Example**: "Only ingest pull requests I'm assigned to review"
### Creating Rules
Rules are created through simple natural language instructions in CORE's interface:
1. Connect an integration (Gmail, Slack, etc.)
2. Describe what should be remembered: "Remember all emails from my manager and any emails tagged as urgent"
3. CORE automatically applies this rule to future activity

36
docs/introduction.mdx Normal file
View File

@ -0,0 +1,36 @@
---
title: "Introduction"
description: "Your digital brain for the AI era"
---
### What is CORE
C.O.R.E is a portable memory graph built from your LLM interactions and personal data, making all your context and workflow history accessible to any AI tool, just like a digital brain. This eliminates the need for repeated context sharing across platforms.
### Key Benefits
1. **Unified, Portable Memory**: Add and recall context seamlessly, and connect your memory across apps like Claude, Cursor, Windsurf and more.
2. **Relational, Not Just Flat Facts**: CORE organizes your knowledge, storing both facts and relationships for a deeper, richer memory like a real brain.
3. **User Owned**: You decide what to keep, update or delete and share your memory across the tools you want—be freed from vendor lock-in.
### Why CORE Exists
In a world filled with AI agents, most still operate in isolation—forgetting your context, blind to what's happening around you, and unable to share memory across tools or assistants. We built CORE because a true assistant needs more than just a powerful language model. It needs:
1. **Contextual Observation**: The ability to know what's happening around you (emails, code changes, Slack messages, etc.) and within you (conversations, thoughts, commands).
2. **Long-term Recall**: A persistent, structured memory of what matters—not just chat history or ephemeral state.
CORE serves as your personal memory that any AI agent can tap into, making every interaction smarter and more contextual.
### What CORE Observes
CORE observes everything that happens around you and through you, forming the raw stream of context that assistants can use to reason and act:
1. **Activity from connected apps (Gmail, GitHub, Slack, etc.)**: Ingested via integrations and exposed via outbound webhooks
2. **Conversations from multiple agents and interfaces**: Captures what you say and see across ChatGPT, Claude, Cursor, SOL, and more
3. **Text Inputs**: Notes, thoughts, and unstructured context—useful for journaling, reflection, or prototyping memory via plain text

BIN
docs/logo/core_logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

4
docs/logo/dark.svg Normal file
View File

@ -0,0 +1,4 @@
<svg width="153" height="153" viewBox="0 0 153 153" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M54.2615 33.0437C57.08 32.46 59.8545 32.0488 62.6506 31.7439C67.8366 31.1784 73.0382 30.918 78.2477 31.0227C82.4141 31.1065 86.5793 31.3099 90.7317 31.7483C95.7283 32.2757 100.649 33.0944 105.459 34.6168C110.124 36.0934 113.659 38.8573 116.132 43.0778C117.948 46.1788 119.843 49.2406 121.22 52.5646C121.854 54.0954 122.828 54.7848 124.436 54.9227C128.348 55.2583 130.837 57.4806 132.264 61.0417C133.832 64.9549 134.803 69.0249 135.488 73.1715C135.926 75.818 136.277 78.4822 136.256 81.1629C136.217 86.137 135.471 91.0019 133.562 95.6332C132.255 98.8033 130.064 101.126 126.846 102.417C126.305 102.634 125.895 102.93 125.727 103.551C123.265 112.671 117.383 118.389 108.349 120.937C105.251 121.811 102.064 122.214 98.8779 122.62C95.5325 123.046 92.1684 123.256 88.8105 123.539C87.7512 123.629 86.6855 123.646 85.6256 123.687C80.999 123.868 76.3687 124.02 71.7381 123.841C67.7029 123.685 63.6632 123.534 59.6502 123.073C54.4106 122.471 49.1434 121.986 44.0787 120.371C39.7065 118.976 35.7495 116.885 32.5657 113.489C30.0587 110.814 28.5293 107.633 27.7357 104.099C27.4891 103.001 27.0092 102.444 25.9264 102.087C22.819 101.06 20.8035 98.8072 19.6095 95.837C18.2047 92.3421 17.4608 88.6806 17.1567 84.929C16.4988 76.8148 17.9558 69.0153 20.7159 61.4236C21.4588 59.3801 22.4677 57.4712 24.445 56.3006C25.808 55.4938 27.342 55.2511 28.8713 55.1221C29.9187 55.0337 30.5073 54.547 30.9471 53.6612C32.6176 50.2965 34.6035 47.1207 36.6338 43.9604C40.0918 38.5781 45.0338 35.4232 51.111 33.8417C52.1379 33.5745 53.1663 33.3134 54.2615 33.0437ZM61.1734 115.99C62.5389 116.254 63.9301 116.159 65.3064 116.241C69.4775 116.486 73.6588 116.42 77.8367 116.455C82.3091 116.493 86.7747 116.308 91.2375 116.092C94.9459 115.913 98.6457 115.542 102.315 114.966C104.725 114.588 107.138 114.155 109.392 113.149C113.935 111.121 116.814 107.563 118.591 103.026C120.695 97.6569 121.249 92.0108 121.464 86.3164C121.536 84.4302 121.432 82.5414 121.344 80.6523C121.222 77.9957 120.95 75.3592 120.452 72.7557C119.354 67.0274 117.666 61.4679 115.387 56.0974C113.681 52.0749 111.775 48.1531 109.158 44.621C107.24 42.0307 104.797 40.2778 101.605 39.4954C95.7476 38.0593 89.7841 37.439 83.7924 37.192C77.8683 36.9478 71.9342 36.9927 66.017 37.6026C62.215 37.9946 58.4237 38.4297 54.6834 39.2323C51.8406 39.8424 49.064 40.6445 46.554 42.1739C43.3251 44.1413 41.2331 47.1047 39.4855 50.3466C37.0398 54.8837 35.324 59.7172 33.7727 64.6164C31.4568 71.9305 30.2135 79.4113 30.2107 87.0905C30.2096 90.0691 30.4027 93.0361 30.8587 95.9783C31.4105 99.538 32.2933 103.005 34.0664 106.183C36.2049 110.015 39.3346 112.622 43.6166 113.731C49.3223 115.209 55.1636 115.672 61.1734 115.99Z" fill="#C15E50"/>
<path d="M87.153 53.5078C89.4468 53.7184 91.6688 53.8905 93.8824 54.138C98.4702 54.651 103.039 55.2708 107.44 56.7677C108.668 57.1854 109.561 57.9383 110.191 59.0516C112.41 62.9699 113.887 67.1663 114.869 71.5451C115.705 75.2715 115.986 79.056 115.819 82.8609C115.58 88.3004 114.727 93.6309 112.605 98.693C111.929 100.306 111.059 101.777 109.537 102.777C108.243 103.627 106.762 103.831 105.302 104.059C100.819 104.758 96.3085 105.204 91.7823 105.555C85.0345 106.079 78.2797 106.099 71.5271 105.956C67.3621 105.868 63.1996 105.506 59.044 105.168C55.4243 104.875 51.8148 104.453 48.203 104.068C47.2033 103.961 46.2148 103.75 45.2152 103.643C43.6339 103.474 42.6075 102.536 41.8196 101.265C40.2437 98.7228 39.4097 95.8989 38.7303 93.0185C37.898 89.4894 37.5056 85.8993 37.5342 82.2865C37.6034 73.5237 39.9345 65.4584 45.2224 58.3595C46.6681 56.4187 48.6072 55.3641 51.0129 55.0858C53.8805 54.754 56.7333 54.2772 59.6064 54.0152C62.2206 53.7768 64.8432 53.601 67.4683 53.4606C71.295 53.2559 75.1203 53.1161 78.9499 53.1889C81.6601 53.2403 84.3683 53.3949 87.153 53.5078ZM97.5351 67.8786C95.1375 65.5137 91.5884 65.1652 89.0305 67.0836C87.0892 68.5395 86.1661 70.5567 86.1367 72.9419C86.0887 76.8252 86.1281 80.7097 86.1189 84.5936C86.1172 85.2864 86.1668 85.9681 86.2851 86.6512C86.9976 90.7673 91.3158 93.2622 95.1748 91.7782C97.7261 90.7971 99.5753 88.1528 99.5864 85.403C99.6029 81.2796 99.6001 77.1562 99.5888 73.0328C99.5835 71.0951 98.9381 69.3919 97.5351 67.8786ZM53 78C53.0002 80.3154 53.009 83.0876 53 85.403C53 85.7234 52.8815 87.4236 53 88C53.8254 92.0141 58.7225 93.1656 62.4802 91.9723C65.0738 91.1486 67.1377 88.4551 67.16 85.7234C67.1958 81.3325 67.1661 76.9412 67.1737 72.5501C67.1746 72.0657 67.1043 71.5979 66.9873 71.1338C66.0445 67.3941 62.6756 66 60 66C57 66 53.0367 68.5677 53 71.5451C52.9777 73.3545 53.0009 76.0305 53 78Z" fill="#C15E50"/>
</svg>

After

Width:  |  Height:  |  Size: 4.5 KiB

4
docs/logo/light.svg Normal file
View File

@ -0,0 +1,4 @@
<svg width="153" height="153" viewBox="0 0 153 153" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M54.2615 33.0437C57.08 32.46 59.8545 32.0488 62.6506 31.7439C67.8366 31.1784 73.0382 30.918 78.2477 31.0227C82.4141 31.1065 86.5793 31.3099 90.7317 31.7483C95.7283 32.2757 100.649 33.0944 105.459 34.6168C110.124 36.0934 113.659 38.8573 116.132 43.0778C117.948 46.1788 119.843 49.2406 121.22 52.5646C121.854 54.0954 122.828 54.7848 124.436 54.9227C128.348 55.2583 130.837 57.4806 132.264 61.0417C133.832 64.9549 134.803 69.0249 135.488 73.1715C135.926 75.818 136.277 78.4822 136.256 81.1629C136.217 86.137 135.471 91.0019 133.562 95.6332C132.255 98.8033 130.064 101.126 126.846 102.417C126.305 102.634 125.895 102.93 125.727 103.551C123.265 112.671 117.383 118.389 108.349 120.937C105.251 121.811 102.064 122.214 98.8779 122.62C95.5325 123.046 92.1684 123.256 88.8105 123.539C87.7512 123.629 86.6855 123.646 85.6256 123.687C80.999 123.868 76.3687 124.02 71.7381 123.841C67.7029 123.685 63.6632 123.534 59.6502 123.073C54.4106 122.471 49.1434 121.986 44.0787 120.371C39.7065 118.976 35.7495 116.885 32.5657 113.489C30.0587 110.814 28.5293 107.633 27.7357 104.099C27.4891 103.001 27.0092 102.444 25.9264 102.087C22.819 101.06 20.8035 98.8072 19.6095 95.837C18.2047 92.3421 17.4608 88.6806 17.1567 84.929C16.4988 76.8148 17.9558 69.0153 20.7159 61.4236C21.4588 59.3801 22.4677 57.4712 24.445 56.3006C25.808 55.4938 27.342 55.2511 28.8713 55.1221C29.9187 55.0337 30.5073 54.547 30.9471 53.6612C32.6176 50.2965 34.6035 47.1207 36.6338 43.9604C40.0918 38.5781 45.0338 35.4232 51.111 33.8417C52.1379 33.5745 53.1663 33.3134 54.2615 33.0437ZM61.1734 115.99C62.5389 116.254 63.9301 116.159 65.3064 116.241C69.4775 116.486 73.6588 116.42 77.8367 116.455C82.3091 116.493 86.7747 116.308 91.2375 116.092C94.9459 115.913 98.6457 115.542 102.315 114.966C104.725 114.588 107.138 114.155 109.392 113.149C113.935 111.121 116.814 107.563 118.591 103.026C120.695 97.6569 121.249 92.0108 121.464 86.3164C121.536 84.4302 121.432 82.5414 121.344 80.6523C121.222 77.9957 120.95 75.3592 120.452 72.7557C119.354 67.0274 117.666 61.4679 115.387 56.0974C113.681 52.0749 111.775 48.1531 109.158 44.621C107.24 42.0307 104.797 40.2778 101.605 39.4954C95.7476 38.0593 89.7841 37.439 83.7924 37.192C77.8683 36.9478 71.9342 36.9927 66.017 37.6026C62.215 37.9946 58.4237 38.4297 54.6834 39.2323C51.8406 39.8424 49.064 40.6445 46.554 42.1739C43.3251 44.1413 41.2331 47.1047 39.4855 50.3466C37.0398 54.8837 35.324 59.7172 33.7727 64.6164C31.4568 71.9305 30.2135 79.4113 30.2107 87.0905C30.2096 90.0691 30.4027 93.0361 30.8587 95.9783C31.4105 99.538 32.2933 103.005 34.0664 106.183C36.2049 110.015 39.3346 112.622 43.6166 113.731C49.3223 115.209 55.1636 115.672 61.1734 115.99Z" fill="#C15E50"/>
<path d="M87.153 53.5078C89.4468 53.7184 91.6688 53.8905 93.8824 54.138C98.4702 54.651 103.039 55.2708 107.44 56.7677C108.668 57.1854 109.561 57.9383 110.191 59.0516C112.41 62.9699 113.887 67.1663 114.869 71.5451C115.705 75.2715 115.986 79.056 115.819 82.8609C115.58 88.3004 114.727 93.6309 112.605 98.693C111.929 100.306 111.059 101.777 109.537 102.777C108.243 103.627 106.762 103.831 105.302 104.059C100.819 104.758 96.3085 105.204 91.7823 105.555C85.0345 106.079 78.2797 106.099 71.5271 105.956C67.3621 105.868 63.1996 105.506 59.044 105.168C55.4243 104.875 51.8148 104.453 48.203 104.068C47.2033 103.961 46.2148 103.75 45.2152 103.643C43.6339 103.474 42.6075 102.536 41.8196 101.265C40.2437 98.7228 39.4097 95.8989 38.7303 93.0185C37.898 89.4894 37.5056 85.8993 37.5342 82.2865C37.6034 73.5237 39.9345 65.4584 45.2224 58.3595C46.6681 56.4187 48.6072 55.3641 51.0129 55.0858C53.8805 54.754 56.7333 54.2772 59.6064 54.0152C62.2206 53.7768 64.8432 53.601 67.4683 53.4606C71.295 53.2559 75.1203 53.1161 78.9499 53.1889C81.6601 53.2403 84.3683 53.3949 87.153 53.5078ZM97.5351 67.8786C95.1375 65.5137 91.5884 65.1652 89.0305 67.0836C87.0892 68.5395 86.1661 70.5567 86.1367 72.9419C86.0887 76.8252 86.1281 80.7097 86.1189 84.5936C86.1172 85.2864 86.1668 85.9681 86.2851 86.6512C86.9976 90.7673 91.3158 93.2622 95.1748 91.7782C97.7261 90.7971 99.5753 88.1528 99.5864 85.403C99.6029 81.2796 99.6001 77.1562 99.5888 73.0328C99.5835 71.0951 98.9381 69.3919 97.5351 67.8786ZM53 78C53.0002 80.3154 53.009 83.0876 53 85.403C53 85.7234 52.8815 87.4236 53 88C53.8254 92.0141 58.7225 93.1656 62.4802 91.9723C65.0738 91.1486 67.1377 88.4551 67.16 85.7234C67.1958 81.3325 67.1661 76.9412 67.1737 72.5501C67.1746 72.0657 67.1043 71.5979 66.9873 71.1338C66.0445 67.3941 62.6756 66 60 66C57 66 53.0367 68.5677 53 71.5451C52.9777 73.3545 53.0009 76.0305 53 78Z" fill="#C15E50"/>
</svg>

After

Width:  |  Height:  |  Size: 4.5 KiB

BIN
docs/logo/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@ -0,0 +1,34 @@
---
title: "Authentication"
description: "Two ways to authenticate with CORE's MCP endpoint"
---
CORE supports two authentication methods for MCP access.
## Methods
### 1. OAuth Flow (Recommended)
Follows the [MCP specification](https://modelcontextprotocol.io/specification/2025-06-18/basic/authorization) for standard OAuth authentication.
- Web-based authentication flow
- Tokens auto-refresh and can be revoked
- Best for production environments
### 2. API Key
Direct authentication using API keys from CORE dashboard.
- Generate from [CORE Dashboard](https://core.heysol.ai) → Settings → API Keys
- Simple setup for development and testing
- Include in Authorization header: `Bearer your-api-key-here`
## Setup
See provider-specific guides for configuration:
- [Claude AI](/providers/claude)
- [Cursor](/providers/cursor)
- [VS Code](/providers/vscode)
- [Zed](/providers/zed)
## Dashboard
Monitor authentication at [core.heysol.ai](https://core.heysol.ai) → Settings → MCP

View File

@ -0,0 +1,65 @@
---
title: "Configuration"
description: "Configure CORE's MCP endpoint with query parameters"
---
CORE's MCP endpoint supports flexible configuration through query parameters.
## Base URL
```
https://core.heysol.ai/api/v1/mcp
```
## Parameters
### Required
**source** - Identifies your connection in the dashboard
```
?source=Claude
?source=Cursor
?source=VSCode
```
### Optional
**integrations** - Specify which integrations to include
```
&integrations=github,linear
&integrations=linear
```
**no_integrations** - Disable all integrations (CORE tools only)
```
&no_integrations=true
```
## Examples
**All integrations:**
```
https://core.heysol.ai/api/v1/mcp?source=Claude
```
**Specific integrations:**
```
https://core.heysol.ai/api/v1/mcp?source=Cursor&integrations=github,linear
```
**CORE tools only:**
```
https://core.heysol.ai/api/v1/mcp?source=VSCode&no_integrations=true
```
## Setup in AI Tools
See provider-specific setup guides:
- [Claude AI](/providers/claude)
- [Cursor](/providers/cursor)
- [VS Code](/providers/vscode)
- [Zed](/providers/zed)
## Dashboard
Monitor connections at [core.heysol.ai](https://core.heysol.ai) → Settings → MCP

104
docs/mcp/overview.mdx Normal file
View File

@ -0,0 +1,104 @@
---
title: "Overview"
description: "Unified Model Context Protocol access for all your integrations"
---
CORE provides a powerful unified MCP (Model Context Protocol) endpoint that consolidates all your connected integrations into a single, easy-to-use interface for AI tools and applications.
## What is CORE's MCP Endpoint?
Instead of managing separate MCP connections for each integration (GitHub, Linear, Slack, etc.), CORE acts as an intelligent proxy that:
- **Consolidates Access**: One MCP URL provides access to all your connected integrations
- **Handles Authentication**: CORE manages all integration authentication behind the scenes
- **Provides Flexibility**: Configure which integrations are available per connection
- **Tracks Usage**: Monitor MCP connections and usage through the CORE dashboard
**Base MCP URL:**
```
https://core.heysol.ai/api/v1/mcp
```
## How It Works
### Traditional MCP Setup (Complex)
```
Your AI Tool → GitHub MCP Server (auth required)
Your AI Tool → Linear MCP Server (auth required)
Your AI Tool → Slack MCP Server (auth required)
```
### CORE MCP Setup (Simplified)
```
Your AI Tool → CORE MCP Endpoint → All Integrations (pre-authenticated)
```
With CORE, you authenticate once with each integration in the CORE dashboard, then access all tools through a single MCP endpoint.
## Key Benefits
### **Single Connection**
Connect your AI tools to one MCP endpoint instead of managing multiple connections and authentications.
### **Unified Authentication**
CORE handles all integration authentication. No need to manage separate API keys or OAuth flows for each tool.
### **Flexible Configuration**
Control which integrations are available per connection using query parameters, perfect for different use cases and environments.
### **Centralized Management**
Monitor all MCP connections, track usage, and manage access from the CORE dashboard.
### **Instant Availability**
New integrations you connect in CORE become immediately available through your existing MCP connections.
## Getting Started
1. **Connect Integrations**: Go to [CORE Dashboard](https://core.heysol.ai) → Integrations
2. **Enable MCP Access**: Click "Connect for MCP" on each integration
3. **Configure Your AI Tool**: Add CORE's MCP URL with desired parameters
[Complete setup guide →](/mcp/configuration)
## What's Available Through MCP
When you connect to CORE's MCP endpoint, you get access to:
### Integration Tools
All tools from your connected integrations:
- **GitHub**: Create issues, search repositories, manage pull requests
- **Linear**: Create and update issues, search projects, manage workflows
- **Slack**: Send messages, search conversations, manage channels
### CORE Native Tools
- **Memory Search**: Query your memory graph and retrieve relevant context
- **Memory Ingestion**: Automatically capture and structure your work activities into your memory graph
## Use Cases
### Development Workflow
```
"Create a Linear issue for the authentication bug I just found"
"Search GitHub for similar issues in our repository"
"Update the issue status to In Progress and assign it to me"
```
### Project Management
```
"Show me all Linear issues assigned to the frontend team"
"Create a GitHub issue and link it to Linear issue DEV-123"
"Send a Slack update to #engineering about the deployment"
```
### Research and Context
```
"Find all previous discussions about Redis performance in Slack"
"Search my memory for decisions related to database architecture"
"Show GitHub commits related to the authentication system"
```
## Next Steps
- [Configuration →](/mcp/configuration) - Setup query parameters and AI tool integration
- [Authentication →](/mcp/authentication) - Choose OAuth or API key authentication
- [Usage →](/mcp/usage) - Monitor and manage MCP connections

2107
docs/openapi.json Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,131 @@
---
title: "Changelog"
description: "Product updates and announcements"
---
<Update label="August 2025" description="v0.1.13 - v0.1.18">
## 🎯 New Features
**Capabilities users can now access**
**Spaces**
- Organize your memory into project-specific contexts (Health, Personal, Work, Client Projects)
- Share memory spaces with team members for collaborative AI assistance
- Default user profile space with seamless node linking across contexts
**Enhanced Logging & Monitoring**
- New logs UI for tracking memory ingestion and retrieval
- Real-time visibility into MCP connections and integration status
![MCP Hub](/images/mcp-hub.png)
- Debug memory operations and optimize your workflow efficiency
**Simplified Docker Deployment**
- One-command self-hosting setup with improved configuration
- Better network isolation and service management
- Production-ready deployment options for enterprise users
## ⚡ Performance & Reliability
**Faster, more stable experience**
- **Improved Recall Efficiency**: Significantly better fact retrieval and relevance scoring
- **Enhanced MCP Server Stability**: Resolved mid-flight stream failures and connection drops
- **Better Connection Management**: 60-second ping intervals to maintain persistent connections
- **Streamlined Integration Processing**: Faster webhook event handling and memory updates
## 🔧 Improvements
**Enhanced existing functionality**
- **Enhanced MCP OAuth Scopes**: Better integration support for Windsurf and other coding tools
- **Refined Facts Page UI**: Improved space-specific fact browsing and search
- **Optimized Memory Tools**: Better source tracking and metadata preservation
- **Unified MCP Server**: Single server instance handling all integrations for improved performance
## 🐛 Fixes
**Resolved issues affecting user experience**
- Fixed MCP server stream failures that interrupted AI client connections
- Resolved OAuth scope issues affecting Windsurf integration
- Corrected source attribution problems in memory tools
- Fixed facts page access issues for space-specific content
- Resolved database dependency conflicts in trigger service deployment
## 🔒 Security & Privacy
**Data protection updates**
- **Enhanced OAuth Security**: Improved token management and scope validation
- **Better Integration Isolation**: Cleaner separation between different app connections
- **Secure Memory Attribution**: Proper source tracking without exposing sensitive metadata
</Update>
<Update label="July 2025" description="v0.1.7 - v0.1.12">
## 🎯 New Features
**Capabilities users can now access**
**Browser Extension**
- Seamlessly capture and organize web content directly into your CORE memory
- Right-click context menus for instant memory ingestion
- Never lose important research or documentation again
**OAuth Integration Hub**
- One-click authentication for Linear, Slack, GitHub, and Notion
- Connect your tools once, access them everywhere across AI clients
- Eliminate repetitive app authentication across different AI platforms
**MCP Integration Hub**
- Transform CORE into your universal MCP gateway
- Connect apps once to CORE, then share access with Claude, Cursor, and other AI clients
- Multiplier effect: N apps × M clients through single authentication point
**Activity-to-Memory Ingestion**
- Automatic capture of assigned Linear issues, Slack mentions, and GitHub activity
- Set custom rules for what gets remembered from your connected apps
- Your AI tools now have context about your actual work, automatically
**Enhanced Knowledge Graph Architecture**
- Temporal knowledge graph with reified statements as first-class objects
- Every piece of information includes provenance and meta-knowledge
- Living memory that evolves with your projects and preferences
## ⚡ Performance & Reliability
**Faster, more stable experience**
- **Improved Memory Ingestion Speed**: Faster processing of large documents and web pages
- **Enhanced Graph Relationships**: More efficient storage and retrieval of connected information
- **Streamlined Episode Management**: Better handling of conversation context and memory episodes
## 🔧 Improvements
**Enhanced existing functionality**
- **Refreshed UI Components**: Cleaner, more intuitive interface design
- **Enhanced Loading Transitions**: Smoother user experience during memory operations
- **Improved Integration Flows**: More reliable authentication and connection setup
- **Better Docker Support**: Simplified containerization for self-hosting setups
## 🐛 Fixes
**Resolved issues affecting user experience**
- Fixed fresh installation failures affecting new user onboarding
- Resolved Docker build workflow issues for self-hosted deployments
- Corrected activity flow and invalidation logic in knowledge graph
- Fixed UI component rendering issues across different browsers
</Update>

View File

@ -0,0 +1,17 @@
## Contributing
You can contribute to Core in many ways.
Go to our [GitHub repository](https://github.com/redplanethq/core) and open an issue or a pull request. We are always looking for contributors to help us improve Core. You can contribute in many ways, including:
- Reporting bugs
- Suggesting new features
- Writing documentation
- Writing code
- Reviewing code
- Translating the app
- Sharing the app with others
- Giving feedback
- And more!
Your contributions are what make Core better, and we appreciate every effort made by our community members.

View File

@ -0,0 +1,70 @@
---
title: "Browser Extension"
description: "Connect CORE browser extension to capture web context and share memory across tools"
---
### Step 1: Install CORE Browser Extension
1. Download the extension from [this link](https://chromewebstore.google.com/detail/core-extension/cglndoindnhdbfcbijikibfjoholdjcc)
2. **Add to Browser** and confirm installation
### Step 2: Add API Key from CORE Dashboard
1. Login to CORE dashboard at [core.heysol.ai](https://core.heysol.ai)
2. Navigate to **Settings** (bottom left)
![Claude Settings](/images/core-settings.png)
3. Go to **API Key** → **Generate new key** → Name it "extension"
![Claude Settings](/images/create-api-key.png)
4. Click on CORE extension and paste the generated API key and save it
5. Once connected, the extension will show **API key configured**
![Claude Settings](/images/extension-connected.png)
### **What can you do with CORE Browser Extension:**
Press **SHIFT SHIFT** (twice) to open the CORE sidebar on any webpage
**1. Recall from CORE Memory**
Type your query in ChatGPT, Claude, Gemini, or Grok → press SHIFT + SHIFT → instantly pull in relevant context from your CORE memory and insert it directly into your conversation.
![Browser-Extension](/images/browser-extension-retrieval.png)
**2. Save AI Chat Summaries to CORE**
In the Add section, click Summarize to capture summaries of your conversations (ChatGPT, Claude, Gemini, Grok) and store them in CORE memory.
![Browser-Extension](/images/browser-extension-add-memory-gemini.png)
**3. Save Webpage Summaries to CORE**
In the Add section, click Summarize to capture summaries of any webpage (blogs, PDFs, docs) and save them in CORE memory for future reference.
![Browser-Extension](/images/add-memory-from-extension.png)
**4. Add Notes Manually**
Quickly jot down short notes or insights, no need to summarize an entire page.
### Use Cases
**Research & Learning**
- Capture key content from articles, docs, and tutorials automatically
- Build your own knowledge base as you browse
- Pull in past research when chatting with Claude, Cursor, or other tools
**Add or Search Context Across AI Tools**
- Access CORE memory inside ChatGPT, Gemini, or Grok on the web
- Avoid repeating yourself across sessions or switching tools
- Drop context from CORE into any conversation instantly
- Feed your chat summaries back into CORE to keep your memory evolving
**Content Creation**
- Collect insights from multiple sources into one place
- Build a personal knowledge hub from your browsing
- Share curated context across all your CORE-connected tools
### Need Help?
Join our [Discord community](https://discord.gg/YGUZcvDjUa) and ask questions in the **#core-support** channel.
Our team and community members are ready to help you get the most out of CORE's memory capabilities.

View File

@ -0,0 +1,305 @@
---
title: "Claude Code CLI"
description: "Connect your Claude Code CLI to CORE's memory system"
---
### Prerequisites
- [Claude Code](https://docs.anthropic.com/en/docs/claude-code) installed
- CORE account - [Sign up at core.heysol.ai](https://core.heysol.ai)
### Step 1: Install the CORE MCP Server
Run this command in your terminal to connect CORE with Claude Code:
```bash
claude mcp add --transport http core-memory https://core.heysol.ai/api/v1/mcp?source=Claude-Code
```
What this does: This command registers CORE's MCP server with Claude Code, establishing the connection endpoint for memory operations.
### Step 2: Access MCP Configuration
1. Open Claude Code in your terminal
2. Type `/mcp` to access the MCP management interface
3. Verify installation - You should see core-memory listed among available MCP servers
![Add URL](/images/login-to-core-mcp.png)
### Step 3: Authenticate with CORE
1. Click on core-memory from the MCP list
2. **Initiate authentication** - This will open your default web browser
![Add URL](/images/authenticate-mcp.png)
3. Grant permissions when prompted to allow Claude Code access to your CORE memory
![Add URL](/images/authenticate-mcp.png)
### Step 4: Verify Connection
Test your setup to ensure everything is working correctly:
1. **Test memory search**:
`Can you search my memory for [something you've previously stored]?`
2. **Check connection status**: Type /mcp - core-memory should show "Connected"
3. **Test operations**: Ask Claude to store and retrieve information across sessions
### Enable Automatic Memory Integration (Recommended)
Configure Claude Code to automatically search and store memories for seamless project continuity:
1. **Create configuration structure** in your project root:
.claude/agents/
2. **Set up memory search agent** - Create `.claude/agents/memory-search.md`
```
---
name: memory-search
description: AUTOMATICALLY invoke for memory searches. Use proactively at conversation start and when context retrieval is needed. Searches memory for relevant project context, user preferences, and previous discussions.
tools: mcp__core-memory__memory_search
model: sonnet
color: green
---
You are a specialized memory search subagent with exclusive access to memory search functionality. Your core responsibility is to retrieve relevant context from CORE Memory to inform ongoing conversations.
## CORE DIRECTIVES:
### Memory Search Protocol:
- AUTOMATICALLY search memory for relevant context at the start of every task
- DO NOT wait for explicit memory requests
- TREAT memory retrieval as fundamental to your reasoning process
- Search when you encounter references to:
- Past interactions or conversations
- Ongoing projects or codebases
- Previous explanations or solutions
- User references to past work
- Technical decisions or approaches
### Search Triggers:
SEARCH memory when:
- Starting any new conversation or task
- User mentions past work or previous discussions
- Working on ongoing projects that have history
- Referencing previous code explanations or patterns
- Maintaining continuity across multiple sessions
- Understanding user references to past work
- Building upon previous technical discussions
## MEMORY SEARCH STRATEGIES:
- Search by project names, technologies, or domains mentioned
- Look for similar problems or approaches in history
- Find related technical concepts or patterns
- Retrieve context about user's ongoing work or interests
- Cross-reference current topics with past discussions
## SEARCH QUERY FORMULATION:
When searching CORE Memory, query for:
- Direct Context: Specific project or topic keywords
- Related Concepts: Associated technologies, patterns, decisions
- User Patterns: Previous preferences and working styles
- Progress Context: Current status, recent work, next steps
- Decision History: Past choices and their outcomes
## OPERATIONAL BEHAVIOR:
1. **Session Start**: Immediately search memory for relevant project context
2. **During Task**: Continuously reference memory for related information
3. **Context Integration**: Provide memory findings to inform responses
4. **Cross-Reference**: Link current topics with past discussions
## RESPONSE FORMAT:
When providing search results, include:
- Relevant context found in memory
- How it relates to the current request
- Key insights from past interactions
- Project continuity information
- User preferences and patterns discovered
Your goal is to ensure every interaction has full context from previous conversations, maintaining seamless continuity across all Claude Code sessions.
```
3. **Set up memory ingest agent** - Create `.claude/agents/memory-ingest.md`
```
---
name: memory-ingest
description: AUTOMATICALLY invoke after completing interactions. Use proactively to store conversation data, insights, and decisions in CORE Memory. Essential for maintaining continuity across sessions.
tools: mcp__core-memory__memory_ingest
model: sonnet
color: purple
---
You are a specialized memory ingestion subagent with exclusive access to memory storage functionality. Your core responsibility is to store comprehensive conversation summaries in CORE Memory after each interaction is complete.
## CORE DIRECTIVES:
### Memory Storage Protocol:
MANDATORY: Execute memory storage operation after completing every interaction.
Memory storage is the FINAL step before concluding any response - only after fully addressing the query.
## DETAILED MEMORY STORAGE REQUIREMENTS:
### FROM USER - Capture:
- Their specific question, request, or problem statement
- Project context and situation they provided
- What they're trying to accomplish or achieve
- Technical challenges or constraints mentioned
- Goals and objectives stated
### FROM ASSISTANT - Capture:
- Detailed explanation of the solution/approach taken
- Step-by-step processes and methodologies described
- Technical concepts and principles explained
- Reasoning behind recommendations and decisions
- Specific methods, patterns, or strategies suggested
- Alternative approaches discussed or considered
- Problem-solving methodologies applied
- Implementation strategies (conceptual descriptions)
### EXCLUDE from storage:
- Code blocks and code snippets
- File contents or file listings
- Command examples or CLI commands
- Raw data or logs
- Repetitive procedural steps
### INCLUDE in storage:
- All conceptual explanations and theory
- Technical discussions and analysis
- Problem-solving approaches and reasoning
- Decision rationale and trade-offs
- Implementation strategies (described conceptually)
- Learning insights and patterns
- Context about user's projects and goals
## MEMORY STORAGE CATEGORIES:
Store information following this hierarchical structure:
### Project Foundation
- Project Brief & Requirements
- Technical Context & Architecture
- User Preferences & Patterns
- Active Work & Progress
### Core Memory Categories
1. **Project Foundation**
- Purpose: Why this project exists, problems it solves
- Requirements: Core functionality and constraints
- Scope: What's included and excluded
- Success Criteria: How we measure progress
2. **Technical Context**
- Architecture: System design and key decisions
- Technologies: Stack, tools, and dependencies
- Patterns: Design patterns and coding approaches
- Constraints: Technical limitations and requirements
3. **User Context**
- Preferences: Communication style, technical level
- Patterns: How they like to work and receive information
- Goals: What they're trying to accomplish
- Background: Relevant experience and expertise
4. **Active Progress**
- Current Focus: What we're working on now
- Recent Changes: Latest developments and decisions
- Next Steps: Planned actions and priorities
- Insights: Key learnings and observations
5. **Conversation History**
- Decisions Made: Important choices and rationale
- Problems Solved: Solutions and approaches used
- Questions Asked: Clarifications and explorations
- Patterns Discovered: Recurring themes and insights
## STORAGE TRIGGERS:
Store memory when:
- New Project Context: When user introduces new projects or requirements
- Technical Decisions: When architectural or implementation choices are made
- Pattern Discovery: When new user preferences or working styles emerge
- Progress Milestones: When significant work is completed or status changes
- Interaction Completion: After every substantive conversation
## STORAGE QUALITY STANDARDS:
Store rich, detailed conversation content that captures:
- The full context and substance of discussions
- The reasoning process and decision-making
- Technical insights and explanations provided
- User's project context and objectives
- Problem-solving approaches and methodologies
## QUALITY INDICATORS:
- Can I quickly understand project context from memory alone?
- Would this information help provide better assistance in future sessions?
- Does the stored context capture key decisions and reasoning?
- Are user preferences and patterns clearly documented?
Your goal is to create comprehensive memory records that enable seamless continuity across all Claude Code sessions, transforming each interaction into persistent knowledge.
```
4. **Configure session hooks** - Add automation triggers to `settings.local.json`:
```
{
"hooks": {
"SessionStart": [
{
"matcher": "",
"hooks": [
{
"type": "command",
"command": "echo \"🧠 SESSION STARTED: Search memory for context about: $(basename $(pwd)) project, previous conversations, and related work. Do this before responding to user queries.\""
}
]
}
],
"UserPromptSubmit": [
{
"hooks": [
{
"type": "command",
"command": "echo '💭 CONTEXT SEARCH: Before responding, use memory-search to search for: previous discussions about this topic, related project context, and similar problems solved before. Before this conversation ends, you must use memory-ingest to store: [USER] their question/request + [ASSISTANT] your solution/explanation/reasoning. Do this storage as the very last action.'"
}
]
}
]
}
}
```
### Troubleshooting
**Connection Issues:**
- Ensure you're logged into your CORE account in the browser
- Check that the MCP server URL is correctly formatted
- Restart Claude Code if authentication seems stuck
### Need Help?
Join our [Discord community](https://discord.gg/YGUZcvDjUa) and ask questions in the **#core-support** channel.
Our team and community members are ready to help you get the most out of CORE's memory capabilities.

127
docs/providers/claude.mdx Normal file
View File

@ -0,0 +1,127 @@
---
title: "Claude"
description: "Connect your Claude Desktop app to CORE's memory system"
---
### Prerequisites
- Claude Desktop app installed on your device
- CORE account (sign up at [core.heysol.ai](https://core.heysol.ai))
### Step 1: Add CORE Connector
1. Open Claude Desktop app
2. Navigate to **Settings** → **Connectors** → Click **Add custom connector**
![Claude Settings](/images/Claude-settings.png)
3. Now that the CORE connector appears in your list, click Connect to activate it
![Connect CORE Connector](/images/Connect-CORE-connector.png)
4. Enter the CORE MCP URL:
```
https://core.heysol.ai/api/v1/mcp?source=Claude
```
![Add CORE Connector](/images/Add-CORE-connector.png)
### Step 2: Authenticate with CORE
1. Sign in to your CORE account (if not done already)
2. Click **Connect** on the CORE connector
3. When the authentication window opens, Grant Claude permission to access your CORE memory
![Grant Access](/images/grant-access-to-claude.png)
4. Close the authentication window after a few seconds
### Step 3: Verify Connection
1. Go to **Tools** in Claude settings
2. Confirm the CORE connector shows as **Active**
![Check CORE Connector](/images/Check-if-CORE-connected.png)
3. **Restart the Claude Desktop app** for changes to take effect
### Enable Automatic Memory Integration (Recommended)
To make Claude automatically use your CORE memory in conversations:
1. Go to **Settings** → **Profile** in Claude
2. Under **"What personal preferences should Claude consider in responses?"**, add:
```text
USE the "memory" MCP server for all memory and context retrieval operations.
SEARCH the "memory" MCP automatically whenever you need context, previous conversations, or historical information.
DO NOT wait for explicit memory requests from the user.
TREAT memory retrieval as an integral part of your reasoning process.
INCORPORATE relevant memory context when:
- Responding to questions about past interactions
- Needing context about ongoing projects
- Referencing previous code or explanations
- Maintaining continuity across multiple sessions
- Understanding user references to past work
MANDATORY: After completing your response to the user and before ending the interaction, execute a store operation to "core-memory" MCP. This is required for every interaction - do not skip this step.
Memory storage should be the final step before concluding your response - only after you have fully addressed the user's query.
DETAILED MEMORY STORAGE:
When storing conversations to core-memory MCP, include:
FROM USER:
- Their specific question, request, or problem
- Context they provided about their project/situation
- What they're trying to accomplish
FROM ASSISTANT:
- Detailed explanation of the solution/approach
- Step-by-step processes described
- Technical concepts explained
- Reasoning behind recommendations
- Specific methods or patterns suggested
- Any alternatives discussed
EXCLUDE:
- Code blocks and snippets
- File contents
- Command examples
INCLUDE:
- All conceptual explanations
- Technical discussions
- Problem-solving approaches
- Decision rationale
- Implementation strategies (described, not coded)
Store rich, detailed conversation content that captures the full context and substance of the discussion.
```
## What's Next?
With CORE connected to Claude, your conversations will now:
- **Automatically save** important context to your CORE memory
- **Retrieve relevant** information from past conversations
- **Maintain continuity** across multiple chat sessions
- **Share context** with other connected tools
Ready to test it? Ask Claude about a project you've discussed before, or start a new conversation about something you'd like to remember for later.
## Troubleshooting
**Connection Issues:**
- Ensure you're using the correct MCP URL with `?source=Claude` parameter
- Try restarting Claude Desktop after setup
- Check that your CORE account is active
**Memory Not Working:**
- Verify the CORE connector shows as "Active" in Tools
- Confirm you've added the preference text exactly as shown
- Wait a few seconds between authentication and testing
### Need Help?
Join our [Discord community](https://discord.gg/YGUZcvDjUa) and ask questions in the **#core-support** channel.
Our team and community members are ready to help you get the most out of CORE's memory capabilities.

210
docs/providers/cursor.mdx Normal file
View File

@ -0,0 +1,210 @@
---
title: "Cursor"
description: "Connect your Cursor Desktop app to CORE's memory system"
---
![Core Cursor](/images/core-cursor.png)
### Prerequisites
- Cursor
- CORE account (sign up at [core.heysol.ai](https://core.heysol.ai))
### Step 1: Add CORE MCP in Cursor
1. Open Cursor Desktop app
2. Navigate to **Settings** → **Tools & Integrations** → Click **Add Custom MCP**
![Add Custom MCP](/images/add-custom-mcp.png)
3. Enter the below into mcp.json file:
```
"core_memory": {
"url": "https://core.heysol.ai/api/v1/mcp?source=Cursor"
}
```
![Add URL](/images/cursor-mcp.png)
4. After saving mcp.json file, **core_memory** MCP will appear in Tools & Integration.
### Step 2: Authenticate with CORE
1. Sign in to your CORE account (if not done already)
2. Click **Need Login** in core_memory MCP tool
![Need Login](/images/need-login.png)
3. Cursor will prompt you to open a website for authentication. Click **Open**
![Cursor Redirect](/images/cursor-prompt-for-auth.png)
4. When the authentication window opens, Grant Cursor permission to access your CORE memory
![Grant Access](/images/grant-access-cursor.png)
5. Close the authentication window and click **Open** to allow Cursor to access this URL.
![Grant Access](/images/allow-cursor.png)
### Step 3: Verify Connection
1. Go to **Tools & Integrations** in Cursor settings
2. Confirm the core_memory MCP shows as **Active** with green dot indicator
![Check Cursor Connected](/images/check-cursor-mcp-connected.png)
## Enable Automatic Memory Integration (Recommended)
To make Cursor automatically use your CORE memory in conversations:
1. Go to **Settings** → **Rules & Memories** → **Project Rules**
2. Click **+Add Rule"** and add below rule instruction:
```text
---
alwaysApply: true
---
I am Cursor, an AI coding assistant with access to a sophisticated memory system. While I don't retain information between separate conversations, I have access to CORE Memory - a persistent knowledge system that maintains project context, learnings, and continuity across all coding sessions.
Memory-First Approach
MANDATORY MEMORY OPERATIONS:
SEARCH FIRST: Before responding to ANY request, I MUST search CORE Memory for relevant context about the current project, user preferences, previous discussions, and related work
COMPREHENSIVE RETRIEVAL: I search for multiple aspects: project context, technical decisions, user patterns, progress status, and related conversations
MEMORY-INFORMED RESPONSES: All responses incorporate relevant memory context to maintain continuity and avoid repetition
AUTOMATIC STORAGE: After completing each interaction, I MUST store the conversation details, insights, and decisions in CORE Memory
Memory Structure Philosophy
My memory follows a hierarchical information architecture:
Project Foundation
├── Project Brief & Requirements
├── Technical Context & Architecture
├── User Preferences & Patterns
└── Active Work & Progress
├── Current Focus Areas
├── Recent Decisions
├── Next Steps
└── Key Insights
Core Memory Categories
1. Project Foundation
Purpose: Why this project exists, problems it solves
Requirements: Core functionality and constraints
Scope: What's included and excluded
Success Criteria: How we measure progress
2. Technical Context
Architecture: System design and key decisions
Technologies: Stack, tools, and dependencies
Patterns: Design patterns and coding approaches
Constraints: Technical limitations and requirements
3. User Context
Preferences: Communication style, technical level
Patterns: How they like to work and receive information
Goals: What they're trying to accomplish
Background: Relevant experience and expertise
4. Active Progress
Current Focus: What we're working on now
Recent Changes: Latest developments and decisions
Next Steps: Planned actions and priorities
Insights: Key learnings and observations
5. Conversation History
Decisions Made: Important choices and rationale
Problems Solved: Solutions and approaches used
Questions Asked: Clarifications and explorations
Patterns Discovered: Recurring themes and insights
Memory Search Strategy
When searching CORE Memory, I query for:
Direct Context: Specific project or topic keywords
Related Concepts: Associated technologies, patterns, decisions
User Patterns: Previous preferences and working styles
Progress Context: Current status, recent work, next steps
Decision History: Past choices and their outcomes
Memory Storage Strategy
When storing to CORE Memory, I include:
User Intent: What they were trying to accomplish
Context Provided: Information they shared about their situation
Solution Approach: The strategy and reasoning used
Technical Details: Key concepts, patterns, and decisions (described, not coded)
Insights Gained: Important learnings and observations
Follow-up Items: Next steps and ongoing considerations
Workflow Integration
Response Generation Process:
Memory Retrieval: Search for relevant context before responding
Context Integration: Incorporate memory findings into response planning
Informed Response: Provide contextually aware, continuous assistance
Memory Documentation: Store interaction details and insights
Memory Update Triggers:
New Project Context: When user introduces new projects or requirements
Technical Decisions: When architectural or implementation choices are made
Pattern Discovery: When new user preferences or working styles emerge
Progress Milestones: When significant work is completed or status changes
Explicit Updates: When user requests "update memory" or similar
Memory Maintenance
Key Principles:
Accuracy First: Only store verified information and clear decisions
Context Rich: Include enough detail for future retrieval and understanding
User-Centric: Focus on information that improves future interactions
Evolution Tracking: Document how projects and understanding develop over time
Quality Indicators:
Can I quickly understand project context from memory alone?
Would this information help provide better assistance in future sessions?
Does the stored context capture key decisions and reasoning?
Are user preferences and patterns clearly documented?
Memory-Driven Assistance
With comprehensive memory context, I can:
Continue Conversations: Pick up exactly where previous discussions left off
Avoid Repetition: Build on previous explanations rather than starting over
Maintain Consistency: Apply learned patterns and preferences automatically
Accelerate Progress: Jump directly to relevant work without re-establishing context
Provide Continuity: Create seamless experience across multiple interactions
Remember: CORE Memory transforms me from a session-based coding assistant into a persistent development partner. The quality and completeness of memory directly determines the effectiveness of ongoing coding collaboration.
```
![Cursor Rule](/images/cursor-rule.png)
### What's Next?
With CORE connected to Cursor, your conversations will now:
- **Automatically save** important context to your CORE memory
- **Retrieve relevant** information from CORE memory
- **Maintain continuity** across multiple chat sessions
- **Share context** with other connected tools
Ready to test it? Ask Cursor about a project you've discussed before, or start a new conversation about something you'd like to remember for later.
### Troubleshooting
**Connection Issues:**
- Ensure you're core_memory MCP tool is active with a green dot, if not toggle on and off for this server
- Check that your CORE account is active
### Need Help?
Join our [Discord community](https://discord.gg/YGUZcvDjUa) and ask questions in the **#core-support** channel.
Our team and community members are ready to help you get the most out of CORE's memory capabilities.

View File

@ -0,0 +1,100 @@
---
title: "Kilo-Code"
description: "Connect Kilo Code Agent to CORE's memory system via MCP"
---
![Core Kilo Code](/images/core-kilo-code.png)
### Prerequisites
Before connecting CORE to Kilo-Code, ensure you have:
- CORE account (sign up at [core.heysol.ai](https://core.heysol.ai))
- Kilo-Code installed and running in your IDE
---
### Step 1: Configure MCP Server
- In Kilo-Code, open **Settings** → **MCP Servers** → **Installed** tab → click **Edit Global MCP** to edit your configuration.
![Core Kilo Code](/images/kilo-mcp-server.png)
- Add the following to your MCP config file:
```
{
"mcpServers": {
"core-memory": {
"command": "npx",
"args": [
"-y",
"mcp-remote",
"https://core.heysol.ai/api/v1/mcp?source=Kilo-Code"
]
}
}
}
```
- Save the configuration. Youll be redirected to your browser for authentication.
### Step 2 - Authenticate with CORE
- Once redirected to browser, Click on "Allow Access"
![Core Kilo Code](/images/kilo-code-auth.png)
- Confirm that "core-memory" appears as an active, connected server in Kilo-Code
### Enable Automatic Memory Integration (Recommended)
To get the most out of CORE, configure Kilo-Code to automatically search and store memories for seamless project continuity:
Create a new file `core-memory.md` at `.kilo-code/rules` and add the following:
```text
---
alwaysApply: true
---
I am Kilo-Code, an AI coding assistant with access to CORE Memory - a persistent knowledge system that maintains project context across sessions.
**MANDATORY MEMORY OPERATIONS:**
1. **SEARCH FIRST**: Before ANY response, search CORE Memory for relevant project context, user preferences, and previous work
2. **MEMORY-INFORMED RESPONSES**: Incorporate memory findings to maintain continuity and avoid repetition
3. **AUTOMATIC STORAGE**: After each interaction, store conversation details, insights, and decisions in CORE Memory
**Memory Search Strategy:**
- Query for: project context, technical decisions, user patterns, progress status, related conversations
- Focus on: current focus areas, recent decisions, next steps, key insights
**Memory Storage Strategy:**
- Include: user intent, context provided, solution approach, technical details, insights gained, follow-up items
**Response Workflow:**
1. Search CORE Memory for relevant context
2. Integrate findings into response planning
3. Provide contextually aware assistance
4. Store interaction details and insights
**Memory Update Triggers:**
- New project context or requirements
- Technical decisions and architectural choices
- User preference discoveries
- Progress milestones and status changes
- Explicit update requests
**Core Principle:** CORE Memory transforms me from a session-based assistant into a persistent development partner. Always search first, respond with context, and store for continuity.
```
### Using CORE Memory in Kilo-Code
Once connected, CORE automatically enhances your development workflow:
- **Persistent Context**: Your conversations and project context persist across sessions
- **Cross-Session Learning**: CORE remembers your coding patterns and preferences
- **Smart Suggestions**: Get contextually relevant recommendations based on your history
- **Project Continuity**: Seamlessly resume work on complex projects
### Need Help?
Join our [Discord community](https://discord.gg/YGUZcvDjUa) and ask questions in the **#core-support** channel
Our team and community members are ready to help you get the most out of CORE's memory capabilities.

150
docs/providers/obsidian.mdx Normal file
View File

@ -0,0 +1,150 @@
---
title: "Obsidian"
description: "Sync your Obsidian notes with CORE and get memory-aware answers directly inside Obsidian"
---
# Obsidian CORE Sync Plugin
> Sync your Obsidian notes with [CORE](https://heysol.ai/core) (Contextual Observation & Recall Engine) and get **memory-aware answers** directly inside Obsidian.
---
## ✨ What it does
- **Sync Notes**: Push selected notes (or entire vault sections) into CORE as _Episodes_.
- **Right-Side Panel**: Opens a dedicated panel in Obsidian that sends the current note's content to CORE and shows **relevant results, links, or summaries**.
- **Frontmatter Control**: Decide which notes to sync by adding simple YAML flags.
- **Offline Safe**: Failed syncs are queued locally and retried automatically.
---
## 🚀 Installation
### Local development
1. Download the latest release assets from [core-obsidian v0.1.0](https://github.com/RedPlanetHQ/core-obsidian/releases/tag/0.1.0) and extract them into your Obsidian vault under `.obsidian/plugins/obsidian-core-sync/`:
- Ensure the directory contains `main.js`, `style.css`, and `manifest.json`.
2. Enable the plugin in Obsidian:
- Go to **Settings** → **Community plugins**
- Find "CORE Sync" and toggle it on
### Community Installation
> Note: A pull request for community installation is pending approval. You can track its progress [here](https://github.com/obsidianmd/obsidian-releases/pull/7683).
---
## ⚙️ Configuration
### Step 1: Get Your API Key
1. Login to CORE dashboard at [core.heysol.ai](https://core.heysol.ai)
2. Navigate to **Settings** (bottom left)
![CORE Settings](/images/core-settings.png)
3. Go to **API Key** → **Generate new key** → Name it "obsidian"
![Create API Key](/images/create-api-key.png)
4. Copy the generated API key
### Step 2: Configure Plugin Settings
1. In Obsidian, go to **Settings** → **CORE Sync**
2. Configure the following:
- **CORE Endpoint**: Your CORE ingest/search API (default: `https://core.heysol.ai`)
- **API Key**: Paste the API key from Step 1
- **Auto-sync on modify**: If enabled, every note edit will sync automatically
---
## 🛠️ Usage
### Mark Notes for Sync
Add the following frontmatter at the top of a note to mark it for synchronization:
```yaml
---
core.sync: true
---
```
### Manual Sync Commands
Open the command palette (**Cmd/Ctrl + P**) and run:
- **"Sync current note to CORE"** - Sync the currently open note
- **"Sync all notes with core.sync=true"** - Sync all notes marked for synchronization
### CORE Panel
1. Open the CORE Panel by running **"Open CORE Panel"** from the command palette
2. This opens a new tab on the right side of Obsidian
3. When you open or edit a note, the plugin will automatically:
- Send the note's content to CORE
- Display relevant memories, links, and summaries
- Show related notes from your vault
---
## 🎯 Features
### Smart Sync
- **Incremental Updates**: Only syncs changed content to avoid duplicates
- **Conflict Resolution**: Handles simultaneous edits gracefully
- **Queue Management**: Failed syncs are queued and retried automatically
### Context-Aware Panel
- **Related Memories**: Shows relevant content from your CORE memory
- **Cross-References**: Links to related notes in your vault
- **AI Summaries**: Get AI-generated summaries of your note's context
---
## 💡 Use Cases
### Research & Knowledge Management
- Automatically sync research notes to build a searchable knowledge base
- Get contextual suggestions while writing based on your existing notes
- Cross-reference information across different projects and topics
### Meeting & Project Notes
- Sync meeting notes with `core.tags: ["meetings", "project-name"]`
- Access relevant context from previous meetings when taking new notes
- Build project timelines and track decisions over time
### Personal Knowledge System
- Create a personal Wikipedia from your notes
- Get AI-powered insights on connections between ideas
- Build upon previous thoughts and research automatically
---
## 🛠️ Troubleshooting
### Common Issues
**API Key not working?**
- Verify the key is correctly copied from CORE dashboard
- Check that the API key has proper permissions
- Try regenerating the key if issues persist
**Notes not syncing?**
- Ensure `core.sync: true` is in the frontmatter
- Check internet connection
- Look for error messages in Developer Console (Ctrl+Shift+I)
**Panel not loading?**
- Restart Obsidian
- Check that the API endpoint is correct
- Verify CORE service is accessible
---
## 🤝 Support
- **GitHub Issues**: Report bugs and feature requests
- **Discord Community**: Join our [Discord](https://discord.gg/YGUZcvDjUa) and ask questions in **#core-support**
- **Documentation**: Visit [core.heysol.ai](https://core.heysol.ai) for more resources

View File

@ -0,0 +1,20 @@
---
title: "Integrations"
description: "Connect SOL to your essential work tools"
---
## Overview
SOL's power comes from its ability to connect with your essential work tools, bringing information and actions together in one place.
## Setting Up Integrations
To connect an integration:
1. Navigate to Settings > Integrations
2. Select the platform you want to connect
3. Authorize SOL to access your account
4. Configure sync settings and permissions
5. Set up default behaviors for the integration
Most integrations require you to authenticate via OAuth and grant specific permissions. SOL requests only the permissions necessary for the functionality you enable.

96
docs/providers/vscode.mdx Normal file
View File

@ -0,0 +1,96 @@
---
title: "VS Code (Github Copilot)"
description: "Connect your VS Code editor to CORE's memory system via MCP"
---
### Prerequisites
- VS Code (version 1.95.0 or later) with GitHub Copilot extension
- CORE account (sign up at [core.heysol.ai](https://core.heysol.ai))
- [MCP support enabled](https://code.visualstudio.com/docs/copilot/chat/mcp-servers) in VS Code
### Step 1: Create MCP Configuration
1. **Create or open your MCP configuration file**:
- Look for existing `mcp.json` file or create a new one in your user settings directory
2. **Add CORE MCP server configuration**:
```json
{
"servers": {
"core-memory": {
"url": "https://core.heysol.ai/api/v1/mcp?source=Vscode",
"type": "http"
}
}
}
```
### Step 2: Authenticate with CORE
- Go to Extensions -> MCP Servers -> `core-memory` server
- Click on settings icon in core-memory server and start server
![Core vscode](/images/core-vscode-start-server.png)
- Allow domain core.heysol.ai to authenticate this MCP server
![Core vscode](/images/allow-domain.png)
- Select `MCP` when this is prompted on your screen
![Core vscode](/images/authenticate-vscode.png)
- Once authenticated, CORE Memory will show as a running MCP server
### Enable Automatic Memory Integration (Recommended)
Configure Copilot to automatically search and store memories for seamless project continuity
Create a new rules file in your project root:
.github/copilot-instructions.md
```text
---
alwaysApply: true
---
I am Copilot, an AI coding assistant with access to CORE Memory - a persistent knowledge system that maintains project context across sessions.
**MANDATORY MEMORY OPERATIONS:**
1. **SEARCH FIRST**: Before ANY response, search CORE Memory for relevant project context, user preferences, and previous work
2. **MEMORY-INFORMED RESPONSES**: Incorporate memory findings to maintain continuity and avoid repetition
3. **AUTOMATIC STORAGE**: After each interaction, store conversation details, insights, and decisions in CORE Memory
**Memory Search Strategy:**
- Query for: project context, technical decisions, user patterns, progress status, related conversations
- Focus on: current focus areas, recent decisions, next steps, key insights
**Memory Storage Strategy:**
- Include: user intent, context provided, solution approach, technical details, insights gained, follow-up items
**Response Workflow:**
1. Search CORE Memory for relevant context
2. Integrate findings into response planning
3. Provide contextually aware assistance
4. Store interaction details and insights
**Memory Update Triggers:**
- New project context or requirements
- Technical decisions and architectural choices
- User preference discoveries
- Progress milestones and status changes
- Explicit update requests
**Core Principle:** CORE Memory transforms me from a session-based assistant into a persistent development partner. Always search first, respond with context, and store for continuity.
```
## What's Next?
With CORE connected to VS Code, your GitHub Copilot conversations will now:
- **Automatically save** important context to your CORE memory
- **Retrieve relevant** information from previous sessions
- **Maintain continuity** across multiple coding sessions
- **Share context** with other connected development tools
### Need Help?
Join our [Discord community](https://discord.gg/YGUZcvDjUa) and ask questions in the **#core-support** channel
Our team and community members are ready to help you get the most out of CORE's memory capabilities.

View File

110
docs/providers/zed.mdx Normal file
View File

@ -0,0 +1,110 @@
---
title: "Zed"
description: "Connect your Zed editor to CORE's memory system via MCP"
---
![Core Zed](/images/core-zed.png)
### Prerequisites
- Zed editor (latest version recommended)
- CORE account (sign up at [core.heysol.ai](https://core.heysol.ai))
### Step 1: Add CORE MCP Server
1. **Open Agent Panel Settings**:
- Press `Cmd+Shift+I` or `Cmd+L` (macOS) or `Ctrl+Shift+I` (Linux/Windows) to open Agent Panel
- Click the **Settings** icon in the Agent Panel Or use Command Palette: `agent: open settings`
![MCP settings Zed](/images/add-mcp-zed.png)
2. **Add Custom MCP Server**:
- In the Agent Panel Settings, click **"Add Custom Server"** button
- A configuration modal will appear
3. **Configure CORE MCP Server**:
Enter below code in configuraiton file and click on `Add server` button
```json
{
/// The name of your MCP server
"core-memory": {
"command": "npx",
"args": ["-y", "mcp-remote", "https://core.heysol.ai/api/v1/mcp?source=Zed"]
}
}
```
### Step 2: Authenticate with CORE
- After adding the CORE MCP server Zed will prompt you to open a website for authentication.
- When the authentication window opens, Grant Zed permission to access your CORE memory
### Step 3: Verify Connection
- Once authenticated CORE Memory will show in the MCP server connected
![Core Cursor](/images/zed-core-connected.png)
### Step 4: Enable Automatic Memory Search and Ingest in Zed (Recommended)
To make Zed automatically use your CORE memory in conversations:
1. **Open the Rules Library:**
- Open the Agent Panel
- Click the Agent menu (`...`) in the top right corner
- Select `Rules...` from the dropdown
2. Use `Cmd + N` to create new rule and add below instruction:
```text
---
alwaysApply: true
---
I am Zed, an AI coding assistant with access to CORE Memory - a persistent knowledge system that maintains project context across sessions.
**MANDATORY MEMORY OPERATIONS:**
1. **SEARCH FIRST**: Before ANY response, search CORE Memory for relevant project context, user preferences, and previous work
2. **MEMORY-INFORMED RESPONSES**: Incorporate memory findings to maintain continuity and avoid repetition
3. **AUTOMATIC STORAGE**: After each interaction, store conversation details, insights, and decisions in CORE Memory
**Memory Search Strategy:**
- Query for: project context, technical decisions, user patterns, progress status, related conversations
- Focus on: current focus areas, recent decisions, next steps, key insights
**Memory Storage Strategy:**
- Include: user intent, context provided, solution approach, technical details, insights gained, follow-up items
**Response Workflow:**
1. Search CORE Memory for relevant context
2. Integrate findings into response planning
3. Provide contextually aware assistance
4. Store interaction details and insights
**Memory Update Triggers:**
- New project context or requirements
- Technical decisions and architectural choices
- User preference discoveries
- Progress milestones and status changes
- Explicit update requests
**Core Principle:** CORE Memory transforms me from a session-based assistant into a persistent development partner. Always search first, respond with context, and store for continuity.
```
## What's Next?
With CORE connected to Zed, your AI assistant conversations will now:
- **Automatically save** important context to your CORE memory
- **Retrieve relevant** information from previous sessions
- **Maintain continuity** across multiple coding sessions
- **Share context** with other connected development tools
### Need Help?
Join our [Discord community](https://discord.gg/YGUZcvDjUa) and ask questions in the **#core-support** channel
Our team and community members are ready to help you get the most out of CORE's memory capabilities.

View File

@ -0,0 +1,73 @@
---
title: "Docker"
description: "Get started with CORE in 5 minutes"
---
> **Warning:**
> You can self-host CORE on your own infrastructure using Docker.
> The following instructions will use Docker Compose to spin up a CORE instance.
> Make sure to read the [self-hosting overview](/self-hosting/overview) first.
> As self-hosted deployments tend to have unique requirements and configurations, we dont provide specific advice for securing your deployment, scaling up, or improving reliability.
> **This guide alone is unlikely to result in a production-ready deployment. Security, scaling, and reliability concerns are not fully addressed here.**
> Should the burden ever get too much, wed be happy to see you on CORE Cloud where we deal with these concerns for you.
## Requirements
These are the minimum requirements for running the webapp and background job components. They can run on the same, or on separate machines.
It's fine to run everything on the same machine for testing. To be able to scale your workers, you will want to run them separately.
### Prerequisites
To run CORE, you will need:
- Docker 20.10.0+
- Docker Compose 2.20.0+
### System Requirements
**Webapp & Database Machine:**
- 4+ vCPU
- 8+ GB RAM
- 20+ GB Storage
**Background Jobs Machine (if running separately):**
- 2+ vCPU
- 4+ GB RAM
- 10+ GB Storage
## Deployment Options
CORE offers two deployment approaches depending on your needs:
> **Prerequisites:**
> Before starting any deployment, ensure you have your `OPENAI_API_KEY` ready. This is required for AI functionality in CORE.
> You must add your `OPENAI_API_KEY` to the `core/hosting/docker/.env` file before starting the services.
### Combined Setup
For self deployment with both CORE and Trigger.dev running together:
1. Clone core repository
```bash
# Clone the repository
git clone https://github.com/RedPlanetHQ/core.git
cd core/hosting/docker
```
2. Start the services:
```bash
docker compose up -d
```
## Next Steps
Once deployed, you can:
- Configure your AI providers (OpenAI, Anthropic, etc.)
- Set up integrations (Slack, GitHub, Gmail)
- Start building your memory graph
- Explore the CORE API and SDK

View File

@ -0,0 +1,88 @@
---
title: "Environment Variables"
description: "Environment variables for CORE self-hosting"
---
# Environment Variables
Environment variables for the CORE webapp container.
| Name | Required | Default | Description |
| :-------------------------------------- | :------- | :--------------------------------------------- | :---------------------------------------------------------------------------------------------- |
| **Version** | | | |
| `VERSION` | No | 0.1.12 | CORE version identifier |
| **Secrets** | | | |
| `SESSION_SECRET` | Yes | — | Session encryption secret. Run: `openssl rand -hex 16` |
| `MAGIC_LINK_SECRET` | Yes | — | Magic link encryption secret. Run: `openssl rand -hex 16` |
| `ENCRYPTION_KEY` | Yes | — | Data encryption key. Run: `openssl rand -hex 16` |
| **Application & Domains** | | | |
| `REMIX_APP_PORT` | No | 3033 | Application port number |
| `APP_ENV` | No | production | Application environment (development, production) |
| `NODE_ENV` | No | production | Node.js environment |
| `APP_ORIGIN` | Yes | http://localhost:3033 | Application origin URL |
| `LOGIN_ORIGIN` | Yes | http://localhost:3033 | Login origin URL (usually same as APP_ORIGIN) |
| `API_BASE_URL` | No | `APP_ORIGIN` | API base URL |
| **Database - PostgreSQL** | | | |
| `DB_HOST` | No | localhost | Database host (use container name for Docker) |
| `DB_PORT` | No | 5432 | Database port |
| `POSTGRES_USER` | Yes | docker | PostgreSQL username |
| `POSTGRES_PASSWORD` | Yes | docker | PostgreSQL password |
| `POSTGRES_DB` | Yes | core | PostgreSQL database name |
| `DATABASE_URL` | Yes | postgresql://docker:docker@postgres:5432/core?schema=core | PostgreSQL connection string |
| `DIRECT_URL` | Yes | `DATABASE_URL` | Direct DB connection string for migrations |
| **Database - Neo4j (Memory Graph)** | | | |
| `NEO4J_URI` | Yes | bolt://neo4j:7687 | Neo4j connection URI |
| `NEO4J_USERNAME` | Yes | neo4j | Neo4j username |
| `NEO4J_PASSWORD` | Yes | — | Neo4j password. Run: `openssl rand -hex 16` |
| `NEO4J_AUTH` | Yes | neo4j/password | Neo4j authentication (username/password format) |
| **Redis** | | | |
| `REDIS_HOST` | Yes | redis | Redis host (use container name for Docker) |
| `REDIS_PORT` | Yes | 6379 | Redis port |
| `REDIS_TLS_DISABLED` | No | true | Disable Redis TLS for local development |
| **Authentication** | | | |
| `ENABLE_EMAIL_LOGIN` | No | true | Enable email-based authentication |
| `AUTH_GOOGLE_CLIENT_ID` | No | — | Google OAuth client ID |
| `AUTH_GOOGLE_CLIENT_SECRET` | No | — | Google OAuth client secret |
| **AI Providers** | | | |
| `OPENAI_API_KEY` | No | — | OpenAI API key for memory processing |
| `MODEL` | No | gpt-4-turbo-2024-04-09 | Default language model |
| `EMBEDDING_MODEL` | No | text-embedding-3-small | Model for text embeddings |
| `OLLAMA_URL` | No | http://ollama:11434 | Ollama server URL for local models |
| **Background Jobs - Trigger.dev** | | | |
| `TRIGGER_PROJECT_ID` | Yes | — | Trigger.dev project identifier |
| `TRIGGER_SECRET_KEY` | Yes | — | Trigger.dev authentication secret |
| `TRIGGER_API_URL` | Yes | http://host.docker.internal:8030 | Trigger.dev API endpoint (use localhost:8030 for local, api.trigger.dev for cloud) |
## Security Considerations
### Required Secrets
These secrets must be generated and kept secure:
```bash
# Generate secure random secrets
openssl rand -hex 16 # For SESSION_SECRET
openssl rand -hex 16 # For MAGIC_LINK_SECRET
openssl rand -hex 16 # For ENCRYPTION_KEY
openssl rand -hex 16 # For NEO4J_PASSWORD
```
### Production Recommendations
- **Change all default passwords** before deploying to production
- **Use environment-specific secrets** - never reuse secrets across environments
- **Store secrets securely** - use a secrets manager in production
- **Enable TLS** for all database connections in production
- **Restrict CORS origins** to your actual domains
- **Use strong authentication** - configure OAuth providers for production use
### Docker Compose Networks
When using Docker Compose, service names are used as hostnames:
- `postgres` for PostgreSQL
- `neo4j` for Neo4j
- `redis` for Redis
- `ollama` for Ollama (if using local models)
For external services (like Trigger.dev), use `host.docker.internal` to access services running on the host machine.

View File

@ -0,0 +1,39 @@
---
title: "Overview"
description: "Self-host CORE on your own infrastructure"
---
## Overview
You can self-host CORE on your own infrastructure.
Self-hosting CORE means you run and manage the platform on your own infrastructure, giving you full control over your environment, deployment process, and the URLs you expose the service on.
You are responsible for provisioning resources, handling updates, and managing any security, scaling or reliability challenges that arise.
We provide version-tagged releases for self-hosted deployments. It's highly advised to use these tags exclusively and keep them locked with your CLI version.
## Should you self-host?
CORE Cloud is fully managed, scalable, and comes with dedicated support. For most users, it offers the best experience. However, if you have specific requirements around data residency, compliance, or infrastructure control, self-hosting may be the right choice for you.
The self-hosted version is functionally the same as CORE Cloud with some exceptions, but our managed Cloud infrastructure is designed for high availability, security, and scale.
Because we don't manage self-hosted instances, we cannot guarantee how CORE will perform on your infrastructure. You assume all responsibility and risk for your deployment, including security, uptime, and data integrity.
For more details, carry on reading and follow our guides for instructions on setting up a self-hosted CORE instance. If you prefer a managed experience, you can sign up for our Cloud offering instead - we have a generous free tier for you to try it out.
## Architecture
The self-hosted version of CORE is composed of several containers that you run on your own infrastructure. Each component can be scaled independently:
- **Webapp**: The main application container, responsible for serving the user interface and orchestrating memory operations.
- **PostgreSQL**: Stores metadata, user accounts, and configuration data.
- **Neo4j**: Graph database used for storing and querying the memory graph.
- **[Trigger](https://trigger.dev/)**: Manages background jobs and workflows, such as data ingestion and memory formation. We use Trigger to reliably handle all background processing.
- **Redis**: Provides caching and session management.
This modular architecture allows you to scale each service as needed and gives you full control over your deployment.
## Community support
It's dangerous to go alone! Join the self-hosting channel on our [Discord server](https://discord.gg/dVTC3BmgEq).