Integration Patterns
🔌 Embed ReasonKit into your applications and workflows.
ReasonKit is designed to integrate seamlessly with your existing tools, pipelines, and applications.
Integration Methods
| Method | Best For | Complexity |
|---|---|---|
| CLI | Scripts, CI/CD, manual use | Low |
| Library | Rust applications | Medium |
| HTTP API | Any language, microservices | Medium |
| MCP Server | AI assistants, Claude | Low |
CLI Integration
Shell Scripts
#!/bin/bash
# decision-helper.sh
QUESTION="$1"
PROFILE="${2:-balanced}"
# Run analysis and capture output
RESULT=$(rk-core think "$QUESTION" --profile "$PROFILE" --output json)
# Parse with jq
CONFIDENCE=$(echo "$RESULT" | jq -r '.confidence')
SYNTHESIS=$(echo "$RESULT" | jq -r '.synthesis')
# Act on result
if (( $(echo "$CONFIDENCE > 0.8" | bc -l) )); then
echo "High confidence decision: $SYNTHESIS"
else
echo "Low confidence, consider more research"
fi
CI/CD Integration
GitHub Actions:
name: PR Analysis
on: pull_request
jobs:
analyze:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install ReasonKit
run: cargo install reasonkit-core
- name: Analyze PR
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
run: |
# Get PR description
PR_BODY=$(gh pr view ${{ github.event.number }} --json body -q .body)
# Analyze with ReasonKit
rk-core think "Should this PR be merged? Context: $PR_BODY" \
--profile balanced \
--output json > analysis.json
- name: Post Comment
run: |
SYNTHESIS=$(jq -r '.synthesis' analysis.json)
gh pr comment ${{ github.event.number }} \
--body "## ReasonKit Analysis\n\n$SYNTHESIS"
GitLab CI:
analyze_mr:
stage: review
script:
- cargo install reasonkit-core
- |
rk-core think "Review this merge request: $CI_MERGE_REQUEST_DESCRIPTION" \
--profile balanced \
--output json > analysis.json
- cat analysis.json
artifacts:
paths:
- analysis.json
Cron Jobs
# Daily decision review
0 9 * * * /usr/local/bin/rk-core think "Review yesterday's decisions" \
--profile deep \
--output markdown >> /var/log/daily-review.md
Rust Library Integration
Add Dependency
# Cargo.toml
[dependencies]
reasonkit-core = "0.1"
tokio = { version = "1", features = ["full"] }
Basic Usage
use reasonkit_core::{run_analysis, Config, Profile};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = Config {
profile: Profile::Balanced,
..Config::default()
};
let analysis = run_analysis(
"Should I refactor this module?",
&config,
).await?;
println!("Confidence: {}", analysis.confidence);
println!("Synthesis: {}", analysis.synthesis);
Ok(())
}
Custom ThinkTool Pipeline
#![allow(unused)]
fn main() {
use reasonkit_core::thinktool::{
GigaThink, LaserLogic, ProofGuard,
ThinkTool, ToolConfig,
};
async fn custom_analysis(input: &str) -> Result<CustomResult> {
let provider = create_provider()?;
// Run specific tools in sequence
let perspectives = GigaThink::new()
.with_perspectives(15)
.execute(input, &provider)
.await?;
let logic = LaserLogic::new()
.with_depth(Depth::Deep)
.execute(input, &provider)
.await?;
// Custom synthesis
Ok(CustomResult {
perspectives: perspectives.items,
logic_issues: logic.flaws,
})
}
}
Streaming Results
#![allow(unused)]
fn main() {
use reasonkit_core::stream::AnalysisStream;
use futures::StreamExt;
async fn stream_analysis(input: &str) -> Result<()> {
let config = Config::default();
let mut stream = AnalysisStream::new(input, &config);
while let Some(event) = stream.next().await {
match event? {
StreamEvent::ToolStarted(name) => {
println!("Starting {}...", name);
}
StreamEvent::ToolProgress(name, progress) => {
println!("{}: {}%", name, progress);
}
StreamEvent::ToolCompleted(name, result) => {
println!("{} complete: {:?}", name, result);
}
StreamEvent::Synthesis(text) => {
println!("Final: {}", text);
}
}
}
Ok(())
}
}
HTTP API Integration
Running the API Server
# Start ReasonKit as an HTTP server
rk-core serve --port 8080
API Endpoints
POST /v1/analyze
Request:
{
"input": "Should I do X?",
"profile": "balanced",
"options": {
"proofguard_sources": 5
}
}
Response:
{
"id": "analysis_abc123",
"status": "completed",
"confidence": 0.85,
"synthesis": "...",
"tools": [...]
}
GET /v1/analysis/{id}
Returns analysis status and results
GET /v1/profiles
Lists available profiles
GET /v1/health
Health check endpoint
Client Examples
Python:
import requests
def analyze(question: str, profile: str = "balanced") -> dict:
response = requests.post(
"http://localhost:8080/v1/analyze",
json={
"input": question,
"profile": profile,
},
headers={"Authorization": f"Bearer {API_KEY}"},
)
response.raise_for_status()
return response.json()
result = analyze("Should I invest in this stock?", "paranoid")
print(f"Confidence: {result['confidence']}")
JavaScript/TypeScript:
interface AnalysisResult {
id: string;
confidence: number;
synthesis: string;
tools: ToolResult[];
}
async function analyze(
input: string,
profile: string = "balanced"
): Promise<AnalysisResult> {
const response = await fetch("http://localhost:8080/v1/analyze", {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${API_KEY}`,
},
body: JSON.stringify({ input, profile }),
});
if (!response.ok) {
throw new Error(`Analysis failed: ${response.statusText}`);
}
return response.json();
}
curl:
curl -X POST http://localhost:8080/v1/analyze \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $API_KEY" \
-d '{
"input": "Should I accept this job offer?",
"profile": "deep"
}'
MCP Server Integration
ReasonKit can run as an MCP (Model Context Protocol) server for AI assistants.
Setup
# Install MCP server
cargo install reasonkit-mcp
# Configure in Claude Desktop
# ~/.config/claude/claude_desktop_config.json
{
"mcpServers": {
"reasonkit": {
"command": "reasonkit-mcp",
"args": ["--profile", "balanced"],
"env": {
"ANTHROPIC_API_KEY": "your-key"
}
}
}
}
Available Tools
When connected, Claude can use:
reasonkit_think— Full analysisreasonkit_gigathink— Multi-perspective brainstormreasonkit_laserlogic— Logic analysisreasonkit_proofguard— Fact verification
Webhook Integration
Outgoing Webhooks
# Configure webhook endpoint
rk-core config set webhook.url "https://your-server.com/webhook"
rk-core config set webhook.events "analysis.completed,analysis.failed"
# Webhook payload format:
{
"event": "analysis.completed",
"timestamp": "2025-01-15T10:30:00Z",
"analysis_id": "abc123",
"input_hash": "sha256:...",
"confidence": 0.85,
"profile": "balanced"
}
Incoming Webhooks
# Trigger analysis via webhook
curl -X POST http://localhost:8080/webhook/analyze \
-H "X-Webhook-Secret: your-secret" \
-d '{"input": "Question from external system"}'
Database Integration
SQLite Logging
# Enable SQLite logging
export RK_LOG_DB="$HOME/.local/share/reasonkit/analyses.db"
# Query past analyses
sqlite3 "$RK_LOG_DB" "SELECT * FROM analyses WHERE confidence > 0.8"
Schema
CREATE TABLE analyses (
id TEXT PRIMARY KEY,
input_text TEXT NOT NULL,
input_hash TEXT NOT NULL,
profile TEXT NOT NULL,
confidence REAL,
synthesis TEXT,
raw_result TEXT, -- JSON blob
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
duration_ms INTEGER
);
CREATE INDEX idx_confidence ON analyses(confidence);
CREATE INDEX idx_created_at ON analyses(created_at);
Best Practices
Rate Limiting
#![allow(unused)]
fn main() {
use governor::{Quota, RateLimiter};
let limiter = RateLimiter::direct(Quota::per_minute(NonZeroU32::new(30).unwrap()));
async fn analyze_with_limit(input: &str) -> Result<Analysis> {
limiter.until_ready().await;
run_analysis(input, &Config::default()).await
}
}
Error Handling
#![allow(unused)]
fn main() {
match run_analysis(input, &config).await {
Ok(analysis) => process_result(analysis),
Err(ReasonKitError::RateLimit(retry_after)) => {
tokio::time::sleep(retry_after).await;
// Retry
}
Err(ReasonKitError::Timeout(_)) => {
// Use cached result or default
}
Err(e) => {
log::error!("Analysis failed: {}", e);
return fallback_response();
}
}
}
Caching
#![allow(unused)]
fn main() {
use moka::sync::Cache;
let cache: Cache<String, Analysis> = Cache::builder()
.max_capacity(1000)
.time_to_live(Duration::from_secs(3600))
.build();
async fn cached_analysis(input: &str) -> Result<Analysis> {
let key = hash(input);
if let Some(cached) = cache.get(&key) {
return Ok(cached);
}
let result = run_analysis(input, &Config::default()).await?;
cache.insert(key, result.clone());
Ok(result)
}
}
Related
- Architecture — Internal design
- LLM Providers — Provider configuration
- API Reference — Output format details