Files
porthole/internal/collectors/k8s/issues_nodes.go
OpenCode Test 1421b4659e feat: implement ControlTower TUI for cluster and host monitoring
Add complete TUI application for monitoring Kubernetes clusters and host
systems. Features include:

Core features:
- Collector framework with concurrent scheduling
- Host collectors: disk, memory, load, network
- Kubernetes collectors: pods, nodes, workloads, events with informers
- Issue deduplication, state management, and resolve-after logic
- Bubble Tea TUI with table view, details pane, and filtering
- JSON export functionality

UX improvements:
- Help overlay with keybindings
- Priority/category filters with visual indicators
- Direct priority jump (0/1/2/3)
- Bulk acknowledge (Shift+A)
- Clipboard copy (y)
- Theme toggle (T)
- Age format toggle (d)
- Wide title toggle (t)
- Vi-style navigation (j/k)
- Home/End jump (g/G)
- Rollup drill-down in details

Robustness:
- Grace period for unreachable clusters
- Rollups for high-volume issues
- Flap suppression
- RBAC error handling

Files: All core application code with tests for host collectors,
engine, store, model, and export packages.
2025-12-24 13:29:51 -08:00

80 lines
2.0 KiB
Go

package k8s
import (
"fmt"
corev1 "k8s.io/api/core/v1"
"tower/internal/model"
)
// IssuesFromNodes applies the PLAN.md node rules.
//
// Pure rule function: does not talk to the API server.
func IssuesFromNodes(nodes []*corev1.Node) []model.Issue {
out := make([]model.Issue, 0, 8)
for _, n := range nodes {
if n == nil {
continue
}
// Ready / NotReady
if cond := findNodeCondition(n, corev1.NodeReady); cond != nil {
if cond.Status != corev1.ConditionTrue {
out = append(out, model.Issue{
ID: fmt.Sprintf("k8s:node:%s:NotReady", n.Name),
Category: model.CategoryKubernetes,
Priority: model.PriorityP0,
Title: fmt.Sprintf("Node NotReady: %s", n.Name),
Details: cond.Message,
Evidence: map[string]string{
"kind": "Node",
"reason": "NotReady",
"namespace": "",
"node": n.Name,
"status": string(cond.Status),
},
SuggestedFix: "kubectl describe node " + n.Name,
})
}
}
// Pressure conditions.
for _, ctype := range []corev1.NodeConditionType{corev1.NodeMemoryPressure, corev1.NodeDiskPressure, corev1.NodePIDPressure} {
if cond := findNodeCondition(n, ctype); cond != nil {
if cond.Status == corev1.ConditionTrue {
out = append(out, model.Issue{
ID: fmt.Sprintf("k8s:node:%s:%s", n.Name, string(ctype)),
Category: model.CategoryKubernetes,
Priority: model.PriorityP1,
Title: fmt.Sprintf("Node %s: %s", ctype, n.Name),
Details: cond.Message,
Evidence: map[string]string{
"kind": "Node",
"reason": string(ctype),
"namespace": "",
"node": n.Name,
"status": string(cond.Status),
},
SuggestedFix: "kubectl describe node " + n.Name,
})
}
}
}
}
return out
}
func findNodeCondition(n *corev1.Node, t corev1.NodeConditionType) *corev1.NodeCondition {
if n == nil {
return nil
}
for i := range n.Status.Conditions {
c := &n.Status.Conditions[i]
if c.Type == t {
return c
}
}
return nil
}