Add complete TUI application for monitoring Kubernetes clusters and host systems. Features include: Core features: - Collector framework with concurrent scheduling - Host collectors: disk, memory, load, network - Kubernetes collectors: pods, nodes, workloads, events with informers - Issue deduplication, state management, and resolve-after logic - Bubble Tea TUI with table view, details pane, and filtering - JSON export functionality UX improvements: - Help overlay with keybindings - Priority/category filters with visual indicators - Direct priority jump (0/1/2/3) - Bulk acknowledge (Shift+A) - Clipboard copy (y) - Theme toggle (T) - Age format toggle (d) - Wide title toggle (t) - Vi-style navigation (j/k) - Home/End jump (g/G) - Rollup drill-down in details Robustness: - Grace period for unreachable clusters - Rollups for high-volume issues - Flap suppression - RBAC error handling Files: All core application code with tests for host collectors, engine, store, model, and export packages.
102 lines
2.4 KiB
Go
102 lines
2.4 KiB
Go
package k8s
|
|
|
|
import (
|
|
"fmt"
|
|
"strings"
|
|
"time"
|
|
|
|
corev1 "k8s.io/api/core/v1"
|
|
|
|
"tower/internal/model"
|
|
)
|
|
|
|
var warningEventReasons = map[string]struct{}{
|
|
"FailedScheduling": {},
|
|
"FailedMount": {},
|
|
"BackOff": {},
|
|
"Unhealthy": {},
|
|
"OOMKilling": {},
|
|
"FailedPull": {},
|
|
"Forbidden": {},
|
|
"ErrImagePull": {},
|
|
"ImagePullBackOff": {},
|
|
}
|
|
|
|
// IssuesFromEvents applies the PLAN.md Event rules.
|
|
//
|
|
// Dedup by (object UID, reason). For v1 Events, this is approximated by
|
|
// (involvedObject.uid, reason).
|
|
func IssuesFromEvents(events []*corev1.Event, now time.Time) []model.Issue {
|
|
_ = now
|
|
out := make([]model.Issue, 0, 16)
|
|
seen := map[string]struct{}{}
|
|
|
|
for _, e := range events {
|
|
if e == nil {
|
|
continue
|
|
}
|
|
if strings.ToLower(e.Type) != strings.ToLower(string(corev1.EventTypeWarning)) {
|
|
continue
|
|
}
|
|
if _, ok := warningEventReasons[e.Reason]; !ok {
|
|
continue
|
|
}
|
|
|
|
uid := string(e.InvolvedObject.UID)
|
|
k := uid + ":" + e.Reason
|
|
if _, ok := seen[k]; ok {
|
|
continue
|
|
}
|
|
seen[k] = struct{}{}
|
|
|
|
ns := e.InvolvedObject.Namespace
|
|
if ns == "" {
|
|
ns = e.Namespace
|
|
}
|
|
|
|
objKey := e.InvolvedObject.Kind + "/" + e.InvolvedObject.Name
|
|
title := fmt.Sprintf("K8s Event %s: %s (%s)", e.Reason, objKey, ns)
|
|
if ns == "" {
|
|
title = fmt.Sprintf("K8s Event %s: %s", e.Reason, objKey)
|
|
}
|
|
|
|
details := strings.TrimSpace(e.Message)
|
|
if details == "" {
|
|
details = "Warning event emitted by Kubernetes."
|
|
}
|
|
|
|
out = append(out, model.Issue{
|
|
ID: fmt.Sprintf("k8s:event:%s:%s", uid, e.Reason),
|
|
Category: model.CategoryKubernetes,
|
|
Priority: model.PriorityP2,
|
|
Title: title,
|
|
Details: details,
|
|
Evidence: map[string]string{
|
|
"kind": e.InvolvedObject.Kind,
|
|
"reason": e.Reason,
|
|
"namespace": ns,
|
|
"name": e.InvolvedObject.Name,
|
|
"uid": uid,
|
|
},
|
|
SuggestedFix: suggestedFixForEvent(ns, e.InvolvedObject.Kind, e.InvolvedObject.Name),
|
|
})
|
|
}
|
|
|
|
return out
|
|
}
|
|
|
|
func suggestedFixForEvent(ns, kind, name string) string {
|
|
kindLower := strings.ToLower(kind)
|
|
if ns != "" {
|
|
switch kindLower {
|
|
case "pod":
|
|
return fmt.Sprintf("kubectl -n %s describe pod %s", ns, name)
|
|
case "node":
|
|
return fmt.Sprintf("kubectl describe node %s", name)
|
|
default:
|
|
return fmt.Sprintf("kubectl -n %s describe %s %s", ns, kindLower, name)
|
|
}
|
|
}
|
|
return fmt.Sprintf("kubectl describe %s %s", kindLower, name)
|
|
}
|