From 796eb83204fc575c5eaccca1f51cb7ac1b4a54fb Mon Sep 17 00:00:00 2001 From: Snider Date: Mon, 16 Mar 2026 07:35:34 +0000 Subject: [PATCH] feat: add local-agent.sh wrapper + local concurrency config Co-Authored-By: Virgil --- config/agents.yaml | 1 + scripts/local-agent.sh | 109 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+) create mode 100755 scripts/local-agent.sh diff --git a/config/agents.yaml b/config/agents.yaml index 9e6f6e1..3013fbe 100644 --- a/config/agents.yaml +++ b/config/agents.yaml @@ -14,6 +14,7 @@ concurrency: claude: 1 gemini: 1 codex: 1 + local: 1 # Rate limiting / quota management # Controls pacing between task dispatches to stay within daily quotas. diff --git a/scripts/local-agent.sh b/scripts/local-agent.sh new file mode 100755 index 0000000..52de97c --- /dev/null +++ b/scripts/local-agent.sh @@ -0,0 +1,109 @@ +#!/bin/bash +# Local agent wrapper — runs Ollama model on workspace files +# Usage: local-agent.sh +# +# Reads PROMPT.md, CLAUDE.md, TODO.md, PLAN.md from current directory, +# combines them into a single prompt, sends to Ollama, outputs result. + +set -e + +PROMPT="$1" +MODEL="${LOCAL_MODEL:-hf.co/unsloth/Qwen3-Coder-Next-GGUF:UD-IQ4_NL}" +CTX_SIZE="${LOCAL_CTX:-16384}" + +# Build context from workspace files +CONTEXT="" + +if [ -f "CLAUDE.md" ]; then + CONTEXT="${CONTEXT} + +=== PROJECT CONVENTIONS (CLAUDE.md) === +$(cat CLAUDE.md) +" +fi + +if [ -f "PLAN.md" ]; then + CONTEXT="${CONTEXT} + +=== WORK PLAN (PLAN.md) === +$(cat PLAN.md) +" +fi + +if [ -f "TODO.md" ]; then + CONTEXT="${CONTEXT} + +=== TASK (TODO.md) === +$(cat TODO.md) +" +fi + +if [ -f "CONTEXT.md" ]; then + CONTEXT="${CONTEXT} + +=== PRIOR KNOWLEDGE (CONTEXT.md) === +$(head -200 CONTEXT.md) +" +fi + +if [ -f "CONSUMERS.md" ]; then + CONTEXT="${CONTEXT} + +=== CONSUMERS (CONSUMERS.md) === +$(cat CONSUMERS.md) +" +fi + +if [ -f "RECENT.md" ]; then + CONTEXT="${CONTEXT} + +=== RECENT CHANGES (RECENT.md) === +$(cat RECENT.md) +" +fi + +# List all source files for the model to review +FILES="" +if [ -d "." ]; then + FILES=$(find . -name "*.go" -o -name "*.php" -o -name "*.ts" | grep -v vendor | grep -v node_modules | grep -v ".git" | sort) +fi + +# Build the full prompt +FULL_PROMPT="${CONTEXT} + +=== INSTRUCTIONS === +${PROMPT} + +=== SOURCE FILES IN THIS REPO === +${FILES} + +Review each source file listed above. Read them one at a time and report your findings. +For each file, use: cat to read it, then analyse it according to the instructions. +" + +# Call Ollama API (non-streaming for clean output) +RESPONSE=$(curl -s http://localhost:11434/api/generate \ + -d "$(python3 -c " +import json +print(json.dumps({ + 'model': '${MODEL}', + 'prompt': $(python3 -c "import json,sys; print(json.dumps(sys.stdin.read()))" <<< "$FULL_PROMPT"), + 'stream': False, + 'options': { + 'temperature': 0.1, + 'num_ctx': ${CTX_SIZE}, + 'top_p': 0.95, + 'top_k': 40 + } +})) +")" 2>/dev/null) + +# Extract and output the response +echo "$RESPONSE" | python3 -c " +import json, sys +try: + d = json.load(sys.stdin) + print(d.get('response', 'Error: no response')) +except: + print('Error: failed to parse response') +"