Add workspace utility scripts
- llm: Local LLM wrapper for llama-swap - homelab-status: Quick K8s/cluster health check - calc: Python/JS REPL for quick calculations - transcribe: Whisper audio transcription wrapper Added to fish PATH.
This commit is contained in:
37
TOOLS.md
37
TOOLS.md
@@ -86,6 +86,43 @@ curl http://127.0.0.1:8080/v1/chat/completions \
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## 🛠️ Workspace Scripts
|
||||||
|
|
||||||
|
Scripts in `~/clawd/scripts/` — add to PATH with:
|
||||||
|
```bash
|
||||||
|
export PATH="$HOME/clawd/scripts:$PATH"
|
||||||
|
```
|
||||||
|
|
||||||
|
### llm — Local LLM wrapper
|
||||||
|
```bash
|
||||||
|
llm "What is 2+2?" # Uses default model (gemma)
|
||||||
|
llm -m qwen3 "Explain kubernetes" # Use specific model
|
||||||
|
llm coder "Write a Python function" # Short alias
|
||||||
|
```
|
||||||
|
|
||||||
|
### homelab-status — Quick cluster check
|
||||||
|
```bash
|
||||||
|
homelab-status # Quick overview
|
||||||
|
homelab-status --full # Include storage details
|
||||||
|
```
|
||||||
|
|
||||||
|
### calc — Code REPL
|
||||||
|
```bash
|
||||||
|
calc "2 + 2" # Quick math
|
||||||
|
calc "sum([1,2,3,4,5])" # Python functions
|
||||||
|
calc "math.sqrt(144)" # With imports
|
||||||
|
calc -j "[1,2,3].map(x => x*2)" # JavaScript mode
|
||||||
|
```
|
||||||
|
|
||||||
|
### transcribe — Audio transcription
|
||||||
|
```bash
|
||||||
|
transcribe meeting.mp3 # Quick transcription (base model)
|
||||||
|
transcribe -m medium interview.wav # Better accuracy
|
||||||
|
transcribe -l en -f srt podcast.mp3 # English subtitles
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## 📁 Key Repos
|
## 📁 Key Repos
|
||||||
|
|
||||||
### Homelab GitOps
|
### Homelab GitOps
|
||||||
|
|||||||
76
scripts/calc
Executable file
76
scripts/calc
Executable file
@@ -0,0 +1,76 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Quick code execution for calculations and data processing
|
||||||
|
# Usage: calc "expression"
|
||||||
|
# calc -p "python code"
|
||||||
|
# calc -j "javascript code"
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
MODE="python"
|
||||||
|
CODE=""
|
||||||
|
|
||||||
|
show_help() {
|
||||||
|
echo "Usage: calc [options] \"code\""
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " -p, --python Python mode (default)"
|
||||||
|
echo " -j, --js JavaScript mode (Node.js)"
|
||||||
|
echo " -h, --help Show this help"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " calc \"2 + 2\" # Quick math"
|
||||||
|
echo " calc \"sum([1,2,3,4,5])\" # Python functions"
|
||||||
|
echo " calc -p \"import math; math.pi\" # Python with imports"
|
||||||
|
echo " calc -j \"[1,2,3].map(x => x*2)\" # JavaScript"
|
||||||
|
echo ""
|
||||||
|
echo "Python mode has these pre-imported:"
|
||||||
|
echo " math, json, re, datetime, random, statistics"
|
||||||
|
}
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-p|--python)
|
||||||
|
MODE="python"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-j|--js)
|
||||||
|
MODE="js"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
show_help
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
if [[ -z "$CODE" ]]; then
|
||||||
|
CODE="$1"
|
||||||
|
else
|
||||||
|
CODE="$CODE $1"
|
||||||
|
fi
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ -z "$CODE" ]]; then
|
||||||
|
show_help
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
case $MODE in
|
||||||
|
python)
|
||||||
|
python3 -c "
|
||||||
|
import math, json, re, sys
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
from random import random, randint, choice
|
||||||
|
from statistics import mean, median
|
||||||
|
from collections import Counter, defaultdict
|
||||||
|
result = $CODE
|
||||||
|
if result is not None: print(result)
|
||||||
|
"
|
||||||
|
;;
|
||||||
|
|
||||||
|
js)
|
||||||
|
node -e "console.log($CODE)"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
135
scripts/homelab-status
Executable file
135
scripts/homelab-status
Executable file
@@ -0,0 +1,135 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Quick homelab status check
|
||||||
|
# Usage: homelab-status [--full]
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
FULL=false
|
||||||
|
[[ "$1" == "--full" || "$1" == "-f" ]] && FULL=true
|
||||||
|
|
||||||
|
# Colors
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
NC='\033[0m' # No Color
|
||||||
|
|
||||||
|
ok() { echo -e "${GREEN}✓${NC} $1"; }
|
||||||
|
warn() { echo -e "${YELLOW}⚠${NC} $1"; }
|
||||||
|
fail() { echo -e "${RED}✗${NC} $1"; }
|
||||||
|
info() { echo -e "${BLUE}ℹ${NC} $1"; }
|
||||||
|
|
||||||
|
echo "═══════════════════════════════════════"
|
||||||
|
echo " 🏠 HOMELAB STATUS"
|
||||||
|
echo "═══════════════════════════════════════"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# K8s Cluster
|
||||||
|
echo "☸️ KUBERNETES"
|
||||||
|
echo "───────────────────────────────────────"
|
||||||
|
|
||||||
|
if ! kubectl cluster-info &>/dev/null; then
|
||||||
|
fail "Cluster unreachable"
|
||||||
|
else
|
||||||
|
# Nodes
|
||||||
|
NODES_TOTAL=$(kubectl get nodes --no-headers 2>/dev/null | wc -l)
|
||||||
|
NODES_READY=$(kubectl get nodes --no-headers 2>/dev/null | grep -c " Ready" || echo 0)
|
||||||
|
if [[ "$NODES_READY" == "$NODES_TOTAL" ]]; then
|
||||||
|
ok "Nodes: $NODES_READY/$NODES_TOTAL Ready"
|
||||||
|
else
|
||||||
|
warn "Nodes: $NODES_READY/$NODES_TOTAL Ready"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Pods
|
||||||
|
PODS_RUNNING=$(kubectl get pods -A --no-headers 2>/dev/null | grep -c "Running" || echo 0)
|
||||||
|
PODS_NOT_RUNNING=$(kubectl get pods -A --no-headers 2>/dev/null | grep -cvE "Running|Completed" || echo 0)
|
||||||
|
if [[ "$PODS_NOT_RUNNING" == "0" ]]; then
|
||||||
|
ok "Pods: $PODS_RUNNING Running"
|
||||||
|
else
|
||||||
|
warn "Pods: $PODS_RUNNING Running, $PODS_NOT_RUNNING not ready"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show failed pods if any
|
||||||
|
if [[ "$PODS_FAILED" != "0" ]] || $FULL; then
|
||||||
|
kubectl get pods -A --no-headers 2>/dev/null | grep -E "(Error|CrashLoop|Failed|Pending)" | head -5 | while read -r line; do
|
||||||
|
echo " $line"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Alertmanager
|
||||||
|
echo "🚨 ALERTS"
|
||||||
|
echo "───────────────────────────────────────"
|
||||||
|
ALERTMANAGER_URL="http://alertmanager.monitoring.192.168.153.240.nip.io"
|
||||||
|
ALERTS=$(curl -sf "$ALERTMANAGER_URL/api/v2/alerts?active=true&silenced=false" 2>/dev/null || echo "[]")
|
||||||
|
ALERT_COUNT=$(echo "$ALERTS" | jq 'length' 2>/dev/null || echo "?")
|
||||||
|
|
||||||
|
if [[ "$ALERT_COUNT" == "0" ]]; then
|
||||||
|
ok "No active alerts"
|
||||||
|
elif [[ "$ALERT_COUNT" == "?" ]]; then
|
||||||
|
warn "Alertmanager unreachable"
|
||||||
|
else
|
||||||
|
warn "$ALERT_COUNT active alert(s)"
|
||||||
|
echo "$ALERTS" | jq -r '.[].labels.alertname' 2>/dev/null | head -5 | while read -r alert; do
|
||||||
|
echo " - $alert"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Key Services
|
||||||
|
echo "🔌 SERVICES"
|
||||||
|
echo "───────────────────────────────────────"
|
||||||
|
|
||||||
|
check_service() {
|
||||||
|
local name="$1"
|
||||||
|
local url="$2"
|
||||||
|
if curl -sf --max-time 3 "$url" >/dev/null 2>&1; then
|
||||||
|
ok "$name"
|
||||||
|
else
|
||||||
|
fail "$name"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
check_service "Grafana" "http://grafana.monitoring.192.168.153.240.nip.io/api/health"
|
||||||
|
check_service "ArgoCD" "https://argocd.taildb3494.ts.net" 2>/dev/null || check_service "ArgoCD" "http://argocd-server.argocd.svc:80" 2>/dev/null || warn "ArgoCD (via Tailscale only)"
|
||||||
|
check_service "Longhorn" "http://ui.longhorn-system.192.168.153.240.nip.io"
|
||||||
|
|
||||||
|
# Ollama (homelab)
|
||||||
|
if curl -sf --max-time 3 "http://100.85.116.57:11434/api/tags" >/dev/null 2>&1; then
|
||||||
|
ok "Ollama (homelab)"
|
||||||
|
else
|
||||||
|
warn "Ollama (homelab) - unreachable"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Local LLM
|
||||||
|
echo "🤖 LOCAL LLM"
|
||||||
|
echo "───────────────────────────────────────"
|
||||||
|
if curl -sf "http://127.0.0.1:8080/health" >/dev/null 2>&1; then
|
||||||
|
ok "llama-swap running"
|
||||||
|
MODELS=$(curl -s "http://127.0.0.1:8080/v1/models" | jq -r '.data[].id' 2>/dev/null | tr '\n' ', ' | sed 's/,$//')
|
||||||
|
info "Models: $MODELS"
|
||||||
|
else
|
||||||
|
warn "llama-swap not running"
|
||||||
|
info "Start: systemctl --user start llama-swap"
|
||||||
|
fi
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Storage (if full)
|
||||||
|
if $FULL; then
|
||||||
|
echo "💾 STORAGE"
|
||||||
|
echo "───────────────────────────────────────"
|
||||||
|
kubectl get pvc -A --no-headers 2>/dev/null | head -10 | while read -r ns name status vol cap mode class age; do
|
||||||
|
if [[ "$status" == "Bound" ]]; then
|
||||||
|
ok "$ns/$name ($cap)"
|
||||||
|
else
|
||||||
|
warn "$ns/$name - $status"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "═══════════════════════════════════════"
|
||||||
|
echo " $(date '+%Y-%m-%d %H:%M:%S %Z')"
|
||||||
|
echo "═══════════════════════════════════════"
|
||||||
99
scripts/llm
Executable file
99
scripts/llm
Executable file
@@ -0,0 +1,99 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Local LLM wrapper for llama-swap
|
||||||
|
# Usage: llm [model] "prompt"
|
||||||
|
# llm -m model "prompt"
|
||||||
|
# echo "prompt" | llm [model]
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
ENDPOINT="${LLAMA_SWAP_URL:-http://127.0.0.1:8080}"
|
||||||
|
DEFAULT_MODEL="${LLAMA_SWAP_MODEL:-gemma}"
|
||||||
|
MAX_TOKENS="${LLAMA_SWAP_MAX_TOKENS:-2048}"
|
||||||
|
|
||||||
|
# Parse args
|
||||||
|
MODEL="$DEFAULT_MODEL"
|
||||||
|
PROMPT=""
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-m|--model)
|
||||||
|
MODEL="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-t|--tokens)
|
||||||
|
MAX_TOKENS="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
echo "Usage: llm [-m model] [-t max_tokens] \"prompt\""
|
||||||
|
echo ""
|
||||||
|
echo "Models: gemma, qwen3, coder, glm, reasoning, gpt-oss"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " llm \"What is 2+2?\""
|
||||||
|
echo " llm -m coder \"Write a Python hello world\""
|
||||||
|
echo " echo \"Explain this\" | llm qwen3"
|
||||||
|
echo ""
|
||||||
|
echo "Environment:"
|
||||||
|
echo " LLAMA_SWAP_URL Endpoint (default: http://127.0.0.1:8080)"
|
||||||
|
echo " LLAMA_SWAP_MODEL Default model (default: gemma)"
|
||||||
|
echo " LLAMA_SWAP_MAX_TOKENS Max tokens (default: 2048)"
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "Unknown option: $1" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
if [[ -z "$PROMPT" ]]; then
|
||||||
|
# Check if it's a model name
|
||||||
|
if [[ "$1" =~ ^(gemma|qwen3|coder|glm|reasoning|gpt-oss)$ ]]; then
|
||||||
|
MODEL="$1"
|
||||||
|
else
|
||||||
|
PROMPT="$1"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
PROMPT="$PROMPT $1"
|
||||||
|
fi
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Read from stdin if no prompt
|
||||||
|
if [[ -z "$PROMPT" ]]; then
|
||||||
|
if [[ ! -t 0 ]]; then
|
||||||
|
PROMPT=$(cat)
|
||||||
|
else
|
||||||
|
echo "Error: No prompt provided" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if llama-swap is running
|
||||||
|
if ! curl -sf "$ENDPOINT/health" >/dev/null 2>&1; then
|
||||||
|
echo "Error: llama-swap not running at $ENDPOINT" >&2
|
||||||
|
echo "Start with: systemctl --user start llama-swap" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build JSON payload
|
||||||
|
JSON=$(jq -n \
|
||||||
|
--arg model "$MODEL" \
|
||||||
|
--arg prompt "$PROMPT" \
|
||||||
|
--argjson max_tokens "$MAX_TOKENS" \
|
||||||
|
'{model: $model, messages: [{role: "user", content: $prompt}], max_tokens: $max_tokens}')
|
||||||
|
|
||||||
|
# Make request and extract response
|
||||||
|
RESPONSE=$(curl -s "$ENDPOINT/v1/chat/completions" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "$JSON")
|
||||||
|
|
||||||
|
# Check for error
|
||||||
|
if echo "$RESPONSE" | jq -e '.error' >/dev/null 2>&1; then
|
||||||
|
echo "Error: $(echo "$RESPONSE" | jq -r '.error.message')" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract content
|
||||||
|
echo "$RESPONSE" | jq -r '.choices[0].message.content'
|
||||||
94
scripts/transcribe
Executable file
94
scripts/transcribe
Executable file
@@ -0,0 +1,94 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Transcribe audio files using Whisper
|
||||||
|
# Usage: transcribe <audio_file> [options]
|
||||||
|
# transcribe recording.mp3
|
||||||
|
# transcribe -m medium meeting.wav
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
MODEL="${WHISPER_MODEL:-base}"
|
||||||
|
LANGUAGE=""
|
||||||
|
OUTPUT_FORMAT="txt"
|
||||||
|
AUDIO_FILE=""
|
||||||
|
|
||||||
|
show_help() {
|
||||||
|
echo "Usage: transcribe [options] <audio_file>"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " -m, --model MODEL Whisper model (tiny, base, small, medium, large)"
|
||||||
|
echo " Default: base (fast), use medium/large for accuracy"
|
||||||
|
echo " -l, --language LANG Force language (e.g., en, es, fr)"
|
||||||
|
echo " -f, --format FORMAT Output format (txt, json, srt, vtt)"
|
||||||
|
echo " -h, --help Show this help"
|
||||||
|
echo ""
|
||||||
|
echo "Examples:"
|
||||||
|
echo " transcribe meeting.mp3 # Quick transcription"
|
||||||
|
echo " transcribe -m medium interview.wav # Better accuracy"
|
||||||
|
echo " transcribe -l en -f srt podcast.mp3 # English subtitles"
|
||||||
|
echo ""
|
||||||
|
echo "Models (speed vs accuracy):"
|
||||||
|
echo " tiny - Fastest, lowest accuracy (~1GB VRAM)"
|
||||||
|
echo " base - Fast, good accuracy (~1GB VRAM) [default]"
|
||||||
|
echo " small - Balanced (~2GB VRAM)"
|
||||||
|
echo " medium - Better accuracy (~5GB VRAM)"
|
||||||
|
echo " large - Best accuracy (~10GB VRAM)"
|
||||||
|
echo ""
|
||||||
|
echo "Environment:"
|
||||||
|
echo " WHISPER_MODEL Default model (default: base)"
|
||||||
|
}
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-m|--model)
|
||||||
|
MODEL="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-l|--language)
|
||||||
|
LANGUAGE="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-f|--format)
|
||||||
|
OUTPUT_FORMAT="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
-h|--help)
|
||||||
|
show_help
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
-*)
|
||||||
|
echo "Unknown option: $1" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
AUDIO_FILE="$1"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ -z "$AUDIO_FILE" ]]; then
|
||||||
|
echo "Error: No audio file provided" >&2
|
||||||
|
show_help
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! -f "$AUDIO_FILE" ]]; then
|
||||||
|
echo "Error: File not found: $AUDIO_FILE" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build whisper command
|
||||||
|
CMD="whisper \"$AUDIO_FILE\" --model $MODEL --output_format $OUTPUT_FORMAT"
|
||||||
|
|
||||||
|
if [[ -n "$LANGUAGE" ]]; then
|
||||||
|
CMD="$CMD --language $LANGUAGE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run transcription
|
||||||
|
echo "Transcribing: $AUDIO_FILE (model: $MODEL)" >&2
|
||||||
|
eval $CMD
|
||||||
|
|
||||||
|
# Show output location
|
||||||
|
BASE=$(basename "$AUDIO_FILE" | sed 's/\.[^.]*$//')
|
||||||
|
echo "" >&2
|
||||||
|
echo "Output: ${BASE}.${OUTPUT_FORMAT}" >&2
|
||||||
Reference in New Issue
Block a user