From 44c75b2f5c8def6fa0af1d69e7ea1491a20da62a Mon Sep 17 00:00:00 2001 From: behrooz Date: Sat, 27 Sep 2025 17:01:54 +0330 Subject: [PATCH] add more features --- handler/handler.go | 2588 ++++++++++++++++++++++++++++++++++++++++++- main.go | 47 +- models/workloads.go | 113 +- 3 files changed, 2704 insertions(+), 44 deletions(-) diff --git a/handler/handler.go b/handler/handler.go index a9d581a..c3c90bb 100644 --- a/handler/handler.go +++ b/handler/handler.go @@ -13,19 +13,27 @@ import ( "main/helpers" "main/models" "net/http" + "os" + "os/exec" "strconv" "strings" "time" + "gopkg.in/yaml.v2" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/apimachinery/pkg/api/resource" "k8s.io/kubectl/pkg/scheme" + syaml "sigs.k8s.io/yaml" // "github.com/gorilla/mux" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" + autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -173,16 +181,25 @@ func Cluster_namespaces(w http.ResponseWriter, r *http.Request) { log.Fatal("Error getting list of pods: ", err) } - var nslist []string - for _, ns := range listOfnamespaces.Items { - nslist = append(nslist, ns.Name) + Allnamespaces := []models.Namespace{} + + var namespace models.Namespace + now := time.Now() + for _, s := range listOfnamespaces.Items { + namespace.Name = s.Name + namespace.Status = string(s.Status.Phase) + age := now.Sub(s.CreationTimestamp.Time) + namespace.Age = helpers.Human(age) + + Allnamespaces = append(Allnamespaces, namespace) } + //pod_list, err := json.Marshal(Allservice) if err != nil { http.Error(w, "Internal Error", http.StatusInternalServerError) } w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(nslist) + json.NewEncoder(w).Encode(Allnamespaces) } @@ -338,6 +355,45 @@ func Cluster_daemonsets(w http.ResponseWriter, r *http.Request) { } +func getDeploymentreadtStratus(clientset *kubernetes.Clientset, deploymentName string, namespace string, labels []string) (string, error) { + + _, err := clientset.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{}) + if err != nil { + return "", err + } + + //pod, _ := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) + //fmt.Print(len(pod.Items)) + + // Get the pods matching the label selector + podList, err := clientset.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{ + LabelSelector: labels[0], // Ensure correct label selector + }) + + if err != nil { + return "", err + } + + totalPods := len(podList.Items) + readyPods := 0 + + for _, pod := range podList.Items { + readyContainer := 0 + totalContainers := len(pod.Status.ContainerStatuses) + for _, contianer := range pod.Status.ContainerStatuses { + if contianer.Ready { + readyContainer++ + } + } + + if readyContainer == totalContainers { + readyPods++ + } + } + + return fmt.Sprintf("%d/%d", readyPods, totalPods), nil +} + func Cluster_deployments(w http.ResponseWriter, r *http.Request) { Authorization(w, r) @@ -370,6 +426,18 @@ func Cluster_deployments(w http.ResponseWriter, r *http.Request) { deployment.Name = d.Name deployment.Namespace = d.Namespace deployment.Replicas = *d.Spec.Replicas + labels := deployments.Items[0].Spec.Selector.MatchLabels + now := time.Now() + age := now.Sub(d.CreationTimestamp.Time) // same as kubectl AGE + deployment.Age = helpers.Human(age) + deployment.Image = d.Spec.Template.Spec.Containers[0].Image + deployment.Strategy = string(d.Spec.Strategy.Type) + deployment.UpdateToDate = d.Status.UpdatedReplicas + var label_result []string + for k, v := range labels { + label_result = append(label_result, fmt.Sprintf("%s=%s", k, v)) + } + deployment.Ready, _ = getDeploymentreadtStratus(clientset, deployment.Name, deployment.Namespace, label_result) for _, c := range d.Status.Conditions { if c.Type == appsv1.DeploymentAvailable { avail = (c.Status == corev1.ConditionTrue) @@ -379,7 +447,6 @@ func Cluster_deployments(w http.ResponseWriter, r *http.Request) { deployment.Reason = reason } } - Alldeployment = append(Alldeployment, deployment) } @@ -439,7 +506,20 @@ func Cluster_pods(w http.ResponseWriter, r *http.Request) { for _, st := range p.Status.ContainerStatuses { restarts += st.RestartCount } - pod.Restart = restartedCount + pod.Restarts = restartedCount + + total := len(p.Status.ContainerStatuses) + ready := 0 + for _, cs := range p.Status.ContainerStatuses { + if cs.Ready { + ready++ + } + } + pod.Ready = fmt.Sprintf("%d/%d", ready, total) + + pod.Ip = p.Status.PodIP + pod.Node = p.Spec.NodeName + pod.Image = p.Status.ContainerStatuses[0].Image Allpod = append(Allpod, pod) } @@ -644,7 +724,6 @@ func Cluster_replicationcontrollers(w http.ResponseWriter, r *http.Request) { } func Cluster_cronjobs(w http.ResponseWriter, r *http.Request) { - Authorization(w, r) clustername := r.URL.Query().Get("Name") namespace := r.URL.Query().Get("Namespace") @@ -655,62 +734,63 @@ func Cluster_cronjobs(w http.ResponseWriter, r *http.Request) { } clientset, _, err := getClientset(w, clustername) - if err != nil { - log.Fatal("Error getting Replicationcontrollers: ", err) + log.Fatal("Error getting cronjobs: ", err) } - replicationcontrollers, err := clientset.CoreV1().ReplicationControllers(namespace).List(context.TODO(), metav1.ListOptions{}) - + cjs, err := clientset.BatchV1().CronJobs(namespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { - log.Fatal("Error getting list of ReplicationController: ", err) + log.Fatal("Error getting list of CronJobs: ", err) } - AllreplicationController := []models.ReplicationController{} - - var ReplicationController models.ReplicationController + allCronJobs := []models.CronJob{} + var item models.CronJob now := time.Now() - for _, d := range replicationcontrollers.Items { - ReplicationController.Name = d.Name - ReplicationController.Namespace = d.Namespace - age := now.Sub(d.CreationTimestamp.Time) // same as kubectl AGE - ReplicationController.Age = helpers.Human(age) - - ReplicationController.Desired = *d.Spec.Replicas - ReplicationController.Current = d.Status.Replicas - ReplicationController.Ready = d.Status.ReadyReplicas - - AllreplicationController = append(AllreplicationController, ReplicationController) + for _, cj := range cjs.Items { + item.Name = cj.Name + item.Namespace = cj.Namespace + item.Schedule = cj.Spec.Schedule + if cj.Spec.Suspend != nil { + item.Suspend = *cj.Spec.Suspend + } else { + item.Suspend = false + } + item.Active = len(cj.Status.Active) + if cj.Status.LastScheduleTime != nil { + item.LastScheduleTime = cj.Status.LastScheduleTime.Time.UTC().Format(time.RFC3339) + } + age := now.Sub(cj.CreationTimestamp.Time) + item.Age = helpers.Human(age) + allCronJobs = append(allCronJobs, item) } if err != nil { http.Error(w, "Internal Error", http.StatusInternalServerError) } w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(AllreplicationController) - + json.NewEncoder(w).Encode(allCronJobs) } func Pod_logs(w http.ResponseWriter, r *http.Request) { Authorization(w, r) - clustername := r.URL.Query().Get("Name") - namespace := r.URL.Query().Get("Namespace") - podName := r.URL.Query().Get("Pod") + + var podlog models.PodLog + _ = json.NewDecoder(r.Body).Decode(&podlog) // containerName := podName - if clustername == "" { + if podlog.Clustername == "" { http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) return } - clientset, _, err := getClientset(w, clustername) + clientset, _, err := getClientset(w, podlog.Clustername) if err != nil { log.Fatal("Error getting Replicationcontrollers: ", err) } podLogOpts := corev1.PodLogOptions{} - req := clientset.CoreV1().Pods(namespace).GetLogs(podName, &podLogOpts) + req := clientset.CoreV1().Pods(podlog.Namespace).GetLogs(podlog.Podname, &podLogOpts) podLogs, err := req.Stream(context.TODO()) if err != nil { http.Error(w, "an error happend in getting logs", http.StatusBadRequest) @@ -729,6 +809,1975 @@ func Pod_logs(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(string(*buf)) } +func Replicaset_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var podlog models.PodLog + _ = json.NewDecoder(r.Body).Decode(&podlog) + // containerName := podName + if podlog.Clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, podlog.Clustername) + + if err != nil { + log.Fatal("Error getting Replicaset: ", err) + } + + pod, err := clientset.AppsV1().ReplicaSets(podlog.Namespace).Get(context.TODO(), podlog.Replicasetname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "an error happend ", http.StatusBadRequest) + return + } + podYAML, err := yaml.Marshal(pod) + if err != nil { + http.Error(w, "an error happend ", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(podYAML)) +} + +func Jobs_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + // Expecting JSON body: {"Clustername":"...", "Namespace":"...", "Jobsname":"..."} + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Jobsname string `json:"Jobsname"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Jobsname == "" { + http.Error(w, "Missing required fields: Clustername and Jobsname", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + job, err := clientset.BatchV1().Jobs(req.Namespace).Get(context.TODO(), req.Jobsname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "Failed to get job: "+err.Error(), http.StatusBadRequest) + return + } + + jobYAML, err := yaml.Marshal(job) + if err != nil { + http.Error(w, "Failed to marshal job yaml: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(jobYAML)) +} + +func CronJobs_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Cronjobsname string `json:"Cronjobsname"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Cronjobsname == "" { + http.Error(w, "Missing required fields: Clustername and CronjobName", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + cj, err := clientset.BatchV1().CronJobs(req.Namespace).Get(context.TODO(), req.Cronjobsname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "Failed to get cronjob: "+err.Error(), http.StatusBadRequest) + return + } + + cjYAML, err := yaml.Marshal(cj) + if err != nil { + http.Error(w, "Failed to marshal cronjob yaml: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(cjYAML)) +} + +func Cronjobs_trigger(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Cronjobsname string `json:"Cronjobsname"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Cronjobsname == "" { + http.Error(w, "Missing required fields: Clustername and Cronjobsname", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + // Get the CronJob + cj, err := clientset.BatchV1().CronJobs(req.Namespace).Get(context.TODO(), req.Cronjobsname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "Failed to get cronjob: "+err.Error(), http.StatusBadRequest) + return + } + + // Build a Job from the CronJob's job template + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: cj.Name + "-", + Namespace: req.Namespace, + Labels: cj.Spec.JobTemplate.Labels, + Annotations: cj.Spec.JobTemplate.Annotations, + }, + Spec: cj.Spec.JobTemplate.Spec, + } + + // Optionally set owner reference so garbage collection can relate it back + block := true + controller := true + job.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: "batch/v1", + Kind: "CronJob", + Name: cj.Name, + UID: cj.UID, + Controller: &controller, + BlockOwnerDeletion: &block, + }, + } + + created, err := clientset.BatchV1().Jobs(req.Namespace).Create(context.TODO(), job, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create job from cronjob: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]string{ + "Clustername": req.Clustername, + "Cronjobsname": req.Cronjobsname, + "Namespace": req.Namespace, + "JobName": created.Name, + }) +} + +func Cronjobs_suspend(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Cronjobsname string `json:"Cronjobsname"` + Suspend bool `json:"Suspend"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Cronjobsname == "" { + http.Error(w, "Missing required fields: Clustername and Cronjobsname", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + cj, err := clientset.BatchV1().CronJobs(req.Namespace).Get(context.TODO(), req.Cronjobsname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "Failed to get cronjob: "+err.Error(), http.StatusBadRequest) + return + } + + cj.Spec.Suspend = &req.Suspend + + updated, err := clientset.BatchV1().CronJobs(req.Namespace).Update(context.TODO(), cj, metav1.UpdateOptions{}) + if err != nil { + http.Error(w, "Failed to update cronjob suspend: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]interface{}{ + "Clustername": req.Clustername, + "Cronjobsname": req.Cronjobsname, + "Namespace": req.Namespace, + "Suspend": req.Suspend, + "ResourceVersion": updated.ResourceVersion, + }) +} + +func Jobs_logs(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Jobsname string `json:"Jobsname"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Jobsname == "" { + http.Error(w, "Missing required fields: Clustername and Jobsname", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + // Get the Job to derive its Pod selector + job, err := clientset.BatchV1().Jobs(req.Namespace).Get(context.TODO(), req.Jobsname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "Failed to get job: "+err.Error(), http.StatusBadRequest) + return + } + + var labelSelector string + if job.Spec.Selector != nil { + if sel, err := metav1.LabelSelectorAsSelector(job.Spec.Selector); err == nil { + labelSelector = sel.String() + } + } + // Fallback commonly used label if selector was empty + if labelSelector == "" { + labelSelector = "job-name=" + req.Jobsname + } + + pods, err := clientset.CoreV1().Pods(req.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + http.Error(w, "Failed to list pods for job: "+err.Error(), http.StatusBadRequest) + return + } + + results := map[string]interface{}{} + for _, p := range pods.Items { + podLogOpts := corev1.PodLogOptions{} + // if multiple containers, fetch logs from each + if len(p.Spec.Containers) <= 1 { + reqLog := clientset.CoreV1().Pods(req.Namespace).GetLogs(p.Name, &podLogOpts) + stream, err := reqLog.Stream(context.TODO()) + if err != nil { + results[p.Name] = map[string]string{"error": err.Error()} + continue + } + buf, _ := io.ReadAll(stream) + _ = stream.Close() + results[p.Name] = strings.Split(strings.TrimSpace(string(buf)), "\n") + } else { + containerLogs := map[string][]string{} + for _, c := range p.Spec.Containers { + opts := corev1.PodLogOptions{Container: c.Name} + reqLog := clientset.CoreV1().Pods(req.Namespace).GetLogs(p.Name, &opts) + stream, err := reqLog.Stream(context.TODO()) + if err != nil { + containerLogs[c.Name] = []string{"error: " + err.Error()} + continue + } + buf, _ := io.ReadAll(stream) + _ = stream.Close() + containerLogs[c.Name] = strings.Split(strings.TrimSpace(string(buf)), "\n") + } + results[p.Name] = containerLogs + } + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(results) +} + +func Pod_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var podlog models.PodLog + _ = json.NewDecoder(r.Body).Decode(&podlog) + // containerName := podName + if podlog.Clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, podlog.Clustername) + + if err != nil { + log.Fatal("Error getting Replicationcontrollers: ", err) + } + + pod, err := clientset.CoreV1().Pods(podlog.Namespace).Get(context.TODO(), podlog.Podname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "an error happend ", http.StatusBadRequest) + return + } + podYAML, err := yaml.Marshal(pod) + if err != nil { + http.Error(w, "an error happend ", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(podYAML)) +} + +func Deployment_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var podlog models.PodLog + _ = json.NewDecoder(r.Body).Decode(&podlog) + // containerName := podName + if podlog.Clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, podlog.Clustername) + + if err != nil { + log.Fatal("Error getting Replicationcontrollers: ", err) + } + + deployment, err := clientset.AppsV1().Deployments(podlog.Namespace).Get(context.TODO(), podlog.Podname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "an error happend ", http.StatusBadRequest) + return + } + deploymentYAML, err := yaml.Marshal(deployment) + if err != nil { + http.Error(w, "an error happend ", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(deploymentYAML)) +} + +func StatefulSet_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var podlog models.PodLog + _ = json.NewDecoder(r.Body).Decode(&podlog) + if podlog.Clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, podlog.Clustername) + if err != nil { + log.Fatal("Error getting statefulset: ", err) + } + + ss, err := clientset.AppsV1().StatefulSets(podlog.Namespace).Get(context.TODO(), podlog.Statefulset, metav1.GetOptions{}) + if err != nil { + http.Error(w, "an error happend ", http.StatusBadRequest) + return + } + ssYAML, err := yaml.Marshal(ss) + if err != nil { + http.Error(w, "an error happend ", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(ssYAML)) +} + +func DaemonSet_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.PodLog + _ = json.NewDecoder(r.Body).Decode(&req) + if req.Clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + log.Fatal("Error getting daemonset: ", err) + } + + // Reuse Podname field to carry DaemonSet name, similar to Deployment_manifest + ds, err := clientset.AppsV1().DaemonSets(req.Namespace).Get(context.TODO(), req.Daemonsetsname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "an error happend ", http.StatusBadRequest) + return + } + dsYAML, err := yaml.Marshal(ds) + if err != nil { + http.Error(w, "an error happend ", http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(dsYAML)) +} + +func ReplicaSet_scale(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.ReplicasetScaleReq + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + // Basic validation + if req.Clustername == "" || req.Replicasetname == "" { + http.Error(w, "missing required fields: Clustername and ReplicaSet", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + if req.Replicas < 0 { + http.Error(w, "Replicas must be >= 0", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + ctx := context.TODO() + + // Get current scale + scale, err := clientset.AppsV1(). + ReplicaSets(req.Namespace). + GetScale(ctx, req.Replicasetname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "failed to get current scale: "+err.Error(), http.StatusBadRequest) + return + } + + // Update desired replicas + scale.Spec.Replicas = req.Replicas + + // Apply via the scale subresource + _, err = clientset.AppsV1(). + ReplicaSets(req.Namespace). + UpdateScale(ctx, req.Replicasetname, &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: req.Replicasetname, + Namespace: req.Namespace, + // ResourceVersion optional for concurrency safety + // ResourceVersion: scale.ResourceVersion, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: req.Replicas, + }, + }, metav1.UpdateOptions{}) + if err != nil { + http.Error(w, "failed to update scale: "+err.Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(models.DeploymentScaleReq{ + Clustername: req.Clustername, + Replicas: req.Replicas, + }) +} + +func Deployment_scale(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.DeploymentScaleReq + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + // Basic validation + if req.Clustername == "" || req.Deployment == "" { + http.Error(w, "missing required fields: Clustername and Deployment", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + if req.Replicas < 0 { + http.Error(w, "Replicas must be >= 0", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + ctx := context.TODO() + + // Get current scale + scale, err := clientset.AppsV1(). + Deployments(req.Namespace). + GetScale(ctx, req.Deployment, metav1.GetOptions{}) + if err != nil { + http.Error(w, "failed to get current scale: "+err.Error(), http.StatusBadRequest) + return + } + + // Update desired replicas + scale.Spec.Replicas = req.Replicas + + // Apply via the scale subresource + _, err = clientset.AppsV1(). + Deployments(req.Namespace). + UpdateScale(ctx, req.Deployment, &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: req.Deployment, + Namespace: req.Namespace, + // Optionally carry resourceVersion to be strict about concurrency: + // ResourceVersion: scale.ResourceVersion, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: req.Replicas, + }, + }, metav1.UpdateOptions{}) + if err != nil { + http.Error(w, "failed to update scale: "+err.Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(models.DeploymentScaleReq{ + Clustername: req.Clustername, + Replicas: req.Replicas, + }) +} + +func StatefulSet_scale(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.StatefulsetRolloutReq + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + + if req.Clustername == "" || req.Statefulset == "" { // reuse struct: Deployment field will carry StatefulSet name + http.Error(w, "missing required fields: Clustername and StatefulSet", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + if req.Replicas < 0 { + http.Error(w, "Replicas must be >= 0", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + ctx := context.TODO() + + scale, err := clientset.AppsV1().StatefulSets(req.Namespace).GetScale(ctx, req.Statefulset, metav1.GetOptions{}) + if err != nil { + http.Error(w, "failed to get current scale: "+err.Error(), http.StatusBadRequest) + return + } + + scale.Spec.Replicas = req.Replicas + + _, err = clientset.AppsV1().StatefulSets(req.Namespace).UpdateScale(ctx, req.Statefulset, &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: req.Statefulset, + Namespace: req.Namespace, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: req.Replicas, + }, + }, metav1.UpdateOptions{}) + if err != nil { + http.Error(w, "failed to update scale: "+err.Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(models.DeploymentScaleReq{ + Clustername: req.Clustername, + Replicas: req.Replicas, + }) +} + +func Deployment_rollout(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.DeploymentRolloutReq + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Deployment == "" || req.Action == "" { + http.Error(w, "missing required fields: Clustername, Deployment, Action", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + ctx := context.TODO() + + switch req.Action { + case "restart": + // Patch the pod template annotation to force a new ReplicaSet + now := time.Now().UTC().Format(time.RFC3339) + patch := []byte(`{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"` + now + `"}}}}}`) + _, err := clientset.AppsV1(). + Deployments(req.Namespace). + Patch(ctx, req.Deployment, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + http.Error(w, "failed to patch deployment for restart: "+err.Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(models.DeploymentRolloutResp{ + Clustername: req.Clustername, + Message: "rollout restart triggered", + }) + return + + case "status": + dep, err := clientset.AppsV1(). + Deployments(req.Namespace). + Get(ctx, req.Deployment, metav1.GetOptions{}) + if err != nil { + http.Error(w, "failed to get deployment: "+err.Error(), http.StatusBadRequest) + return + } + resp := makeStatusSnapshot(dep, req.Namespace) + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(models.DeploymentRolloutResp{ + Clustername: req.Clustername, + Message: "rollout status retrieved", + Status: resp, + }) + return + + default: + http.Error(w, "unsupported Action (use \"restart\" or \"status\")", http.StatusBadRequest) + return + } +} + +func StatefulSet_rollout(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.StatefulsetRolloutReq // reuse struct; Deployment field will carry StatefulSet name + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Statefulset == "" || req.Action == "" { + http.Error(w, "missing required fields: Clustername, StatefulSet, Action", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + ctx := context.TODO() + + switch req.Action { + case "restart": + // Force a rolling restart by updating pod template annotation + now := time.Now().UTC().Format(time.RFC3339) + patch := []byte(`{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"` + now + `"}}}}}`) + _, err := clientset.AppsV1().StatefulSets(req.Namespace).Patch(ctx, req.Statefulset, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + http.Error(w, "failed to patch statefulset for restart: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(models.DeploymentRolloutResp{ + Clustername: req.Clustername, + Message: "rollout restart triggered", + }) + return + case "status": + ss, err := clientset.AppsV1().StatefulSets(req.Namespace).Get(ctx, req.Statefulset, metav1.GetOptions{}) + if err != nil { + http.Error(w, "failed to get statefulset: "+err.Error(), http.StatusBadRequest) + return + } + unavailable := ss.Status.Replicas - ss.Status.ReadyReplicas + status := &models.DeploymentRolloutStatus{ + Deployment: ss.Name, + Namespace: req.Namespace, + ObservedGeneration: ss.Status.ObservedGeneration, + Replicas: ss.Status.Replicas, + UpdatedReplicas: ss.Status.UpdatedReplicas, + ReadyReplicas: ss.Status.ReadyReplicas, + AvailableReplicas: ss.Status.ReadyReplicas, + UnavailableReplicas: unavailable, + } + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(models.DeploymentRolloutResp{ + Clustername: req.Clustername, + Message: "rollout status retrieved", + Status: status, + }) + return + default: + http.Error(w, "unsupported Action (use \"restart\" or \"status\")", http.StatusBadRequest) + return + } +} + +func DaemonSet_rollout(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.DeamonSetsRolloutReq // reuse struct; Deployment carries DaemonSet name + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Daemonsetsname == "" || req.Action == "" { + http.Error(w, "missing required fields: Clustername, DaemonSet, Action", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + ctx := context.TODO() + + switch req.Action { + case "restart": + // Force rolling restart by touching pod template annotation + now := time.Now().UTC().Format(time.RFC3339) + patch := []byte(`{"spec":{"template":{"metadata":{"annotations":{"kubectl.kubernetes.io/restartedAt":"` + now + `"}}}}}`) + _, err := clientset.AppsV1().DaemonSets(req.Namespace).Patch(ctx, req.Daemonsetsname, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + http.Error(w, "failed to patch daemonset for restart: "+err.Error(), http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(models.DeploymentRolloutResp{ + Clustername: req.Clustername, + Message: "rollout restart triggered", + }) + return + case "status": + ds, err := clientset.AppsV1().DaemonSets(req.Namespace).Get(ctx, req.Daemonsetsname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "failed to get daemonset: "+err.Error(), http.StatusBadRequest) + return + } + unavailable := ds.Status.DesiredNumberScheduled - ds.Status.NumberAvailable + status := &models.DeploymentRolloutStatus{ + Deployment: ds.Name, + Namespace: req.Namespace, + ObservedGeneration: ds.Status.ObservedGeneration, + Replicas: ds.Status.DesiredNumberScheduled, + UpdatedReplicas: ds.Status.UpdatedNumberScheduled, + ReadyReplicas: ds.Status.NumberReady, + AvailableReplicas: ds.Status.NumberAvailable, + UnavailableReplicas: unavailable, + } + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(models.DeploymentRolloutResp{ + Clustername: req.Clustername, + Message: "rollout status retrieved", + Status: status, + }) + return + default: + http.Error(w, "unsupported Action (use \"restart\" or \"status\")", http.StatusBadRequest) + return + } +} + +func makeStatusSnapshot(dep *appsv1.Deployment, ns string) *models.DeploymentRolloutStatus { + var condProgressing, condAvailable string + for _, c := range dep.Status.Conditions { + switch c.Type { + case appsv1.DeploymentProgressing: + condProgressing = string(c.Status) + ": " + c.Reason + case appsv1.DeploymentAvailable: + condAvailable = string(c.Status) + ": " + c.Reason + } + } + unavailable := dep.Status.Replicas - dep.Status.AvailableReplicas + return &models.DeploymentRolloutStatus{ + Deployment: dep.Name, + Namespace: ns, + ObservedGeneration: dep.Status.ObservedGeneration, + Replicas: dep.Status.Replicas, + UpdatedReplicas: dep.Status.UpdatedReplicas, + ReadyReplicas: dep.Status.ReadyReplicas, + AvailableReplicas: dep.Status.AvailableReplicas, + UnavailableReplicas: unavailable, + ConditionProgressing: condProgressing, + ConditionAvailable: condAvailable, + } +} + +func Deployment_fromYaml(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.PodLog + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body: "+err.Error(), http.StatusBadRequest) + return + } + + if req.Clustername == "" { + http.Error(w, "Missing 'Clustername' parameter", http.StatusBadRequest) + return + } + + decoded, err := base64.StdEncoding.DecodeString(req.Manifest) + if err != nil { + http.Error(w, "Failed to decode base64: "+err.Error(), http.StatusBadRequest) + return + } + + var deployment appsv1.Deployment + if err := syaml.Unmarshal(decoded, &deployment); err != nil { + http.Error(w, "Failed to unmarshal YAML: "+err.Error(), http.StatusBadRequest) + return + } + + // Debug logs for visibility + log.Println("Decoded Deployment Manifest:", deployment.Name, deployment.Namespace) + + if deployment.APIVersion == "" { + deployment.APIVersion = "apps/v1" + } + if deployment.Kind == "" { + deployment.Kind = "Deployment" + } + + if deployment.Name == "" { + http.Error(w, "Missing deployment name", http.StatusBadRequest) + return + } + + // Ensure at least one container exists in the pod template + if len(deployment.Spec.Template.Spec.Containers) == 0 { + http.Error(w, "Deployment must define at least one container", http.StatusBadRequest) + return + } + + if deployment.Namespace == "" { + deployment.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Error getting clientset: "+err.Error(), http.StatusInternalServerError) + return + } + + created, err := clientset.AppsV1().Deployments(deployment.Namespace).Create(context.TODO(), &deployment, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create deployment: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + }) +} + +func StatefulSet_fromYaml(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.PodLog + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body: "+err.Error(), http.StatusBadRequest) + return + } + + if req.Clustername == "" { + http.Error(w, "Missing 'Clustername' parameter", http.StatusBadRequest) + return + } + + decoded, err := base64.StdEncoding.DecodeString(req.Manifest) + if err != nil { + http.Error(w, "Failed to decode base64: "+err.Error(), http.StatusBadRequest) + return + } + + var ss appsv1.StatefulSet + if err := syaml.Unmarshal(decoded, &ss); err != nil { + http.Error(w, "Failed to unmarshal YAML: "+err.Error(), http.StatusBadRequest) + return + } + + // Debug logs for visibility + log.Println("Decoded StatefulSet Manifest:", ss.Name, ss.Namespace) + + if ss.APIVersion == "" { + ss.APIVersion = "apps/v1" + } + if ss.Kind == "" { + ss.Kind = "StatefulSet" + } + + if ss.Name == "" { + http.Error(w, "Missing statefulset name", http.StatusBadRequest) + return + } + + if len(ss.Spec.Template.Spec.Containers) == 0 { + http.Error(w, "StatefulSet must define at least one container", http.StatusBadRequest) + return + } + + if ss.Namespace == "" { + ss.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Error getting clientset: "+err.Error(), http.StatusInternalServerError) + return + } + + created, err := clientset.AppsV1().StatefulSets(ss.Namespace).Create(context.TODO(), &ss, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create statefulset: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + }) +} +func DaemonSet_fromYaml(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.PodLog + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body: "+err.Error(), http.StatusBadRequest) + return + } + if req.Clustername == "" { + http.Error(w, "Missing 'Clustername' parameter", http.StatusBadRequest) + return + } + + decoded, err := base64.StdEncoding.DecodeString(req.Manifest) + if err != nil { + http.Error(w, "Failed to decode base64: "+err.Error(), http.StatusBadRequest) + return + } + + var ds appsv1.DaemonSet + if err := syaml.Unmarshal(decoded, &ds); err != nil { + http.Error(w, "Failed to unmarshal YAML: "+err.Error(), http.StatusBadRequest) + return + } + + log.Println("Decoded DaemonSet Manifest:", ds.Name, ds.Namespace) + if ds.APIVersion == "" { + ds.APIVersion = "apps/v1" + } + if ds.Kind == "" { + ds.Kind = "DaemonSet" + } + if ds.Name == "" { + http.Error(w, "Missing daemonset name", http.StatusBadRequest) + return + } + if len(ds.Spec.Template.Spec.Containers) == 0 { + http.Error(w, "DaemonSet must define at least one container", http.StatusBadRequest) + return + } + if ds.Namespace == "" { + ds.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Error getting clientset: "+err.Error(), http.StatusInternalServerError) + return + } + created, err := clientset.AppsV1().DaemonSets(ds.Namespace).Create(context.TODO(), &ds, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create daemonset: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + }) +} + +func ReplicationController_fromYaml(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.PodLog + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body: "+err.Error(), http.StatusBadRequest) + return + } + if req.Clustername == "" { + http.Error(w, "Missing 'Clustername' parameter", http.StatusBadRequest) + return + } + + decoded, err := base64.StdEncoding.DecodeString(req.Manifest) + if err != nil { + http.Error(w, "Failed to decode base64: "+err.Error(), http.StatusBadRequest) + return + } + + var rc corev1.ReplicationController + if err := syaml.Unmarshal(decoded, &rc); err != nil { + http.Error(w, "Failed to unmarshal YAML: "+err.Error(), http.StatusBadRequest) + return + } + + if rc.APIVersion == "" { + rc.APIVersion = "v1" + } + if rc.Kind == "" { + rc.Kind = "ReplicationController" + } + if rc.Name == "" { + http.Error(w, "Missing replicationcontroller name", http.StatusBadRequest) + return + } + if rc.Spec.Template == nil || len(rc.Spec.Template.Spec.Containers) == 0 { + http.Error(w, "ReplicationController must define at least one container", http.StatusBadRequest) + return + } + if rc.Namespace == "" { + rc.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Error getting clientset: "+err.Error(), http.StatusInternalServerError) + return + } + + created, err := clientset.CoreV1().ReplicationControllers(rc.Namespace).Create(context.TODO(), &rc, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create replicationcontroller: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + }) +} + +func Replicationcontroller_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Replicationcontroller string `json:"Replicationcontroller"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Replicationcontroller == "" { + http.Error(w, "Missing required fields: Clustername and Replicationcontroller", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + rc, err := clientset.CoreV1().ReplicationControllers(req.Namespace).Get(context.TODO(), req.Replicationcontroller, metav1.GetOptions{}) + if err != nil { + http.Error(w, "Failed to get replicationcontroller: "+err.Error(), http.StatusBadRequest) + return + } + + rcYAML, err := yaml.Marshal(rc) + if err != nil { + http.Error(w, "Failed to marshal replicationcontroller yaml: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(rcYAML)) +} + +func Replicationcontroller_scale(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Replicationcontroller string `json:"Replicationcontroller"` + Replicas int32 `json:"Replicas"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Replicationcontroller == "" { + http.Error(w, "missing required fields: Clustername and Replicationcontroller", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + if req.Replicas < 0 { + http.Error(w, "Replicas must be >= 0", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + ctx := context.TODO() + + scale, err := clientset.CoreV1().ReplicationControllers(req.Namespace).GetScale(ctx, req.Replicationcontroller, metav1.GetOptions{}) + if err != nil { + http.Error(w, "failed to get current scale: "+err.Error(), http.StatusBadRequest) + return + } + + scale.Spec.Replicas = req.Replicas + + _, err = clientset.CoreV1().ReplicationControllers(req.Namespace).UpdateScale(ctx, req.Replicationcontroller, &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: req.Replicationcontroller, + Namespace: req.Namespace, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: req.Replicas, + }, + }, metav1.UpdateOptions{}) + if err != nil { + http.Error(w, "failed to update scale: "+err.Error(), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]interface{}{ + "Clustername": req.Clustername, + "Replicationcontroller": req.Replicationcontroller, + "Namespace": req.Namespace, + "Replicas": req.Replicas, + }) +} + +func Replicationcontroller_migrate(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Replicationcontroller string `json:"Replicationcontroller"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Replicationcontroller == "" { + http.Error(w, "Missing required fields: Clustername and Replicationcontroller", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + // Fetch the source ReplicationController + rc, err := clientset.CoreV1().ReplicationControllers(req.Namespace).Get(context.TODO(), req.Replicationcontroller, metav1.GetOptions{}) + if err != nil { + http.Error(w, "Failed to get replicationcontroller: "+err.Error(), http.StatusBadRequest) + return + } + + // Construct a Deployment with equivalent spec + replicas := int32(1) + if rc.Spec.Replicas != nil { + replicas = *rc.Spec.Replicas + } + if rc.Spec.Template == nil { + http.Error(w, "ReplicationController has no pod template", http.StatusBadRequest) + return + } + dep := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: rc.Name, + Namespace: rc.Namespace, + Labels: rc.Labels, + Annotations: rc.Annotations, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{MatchLabels: rc.Spec.Selector}, + Template: *rc.Spec.Template, + Strategy: appsv1.DeploymentStrategy{Type: appsv1.RollingUpdateDeploymentStrategyType}, + }, + } + + created, err := clientset.AppsV1().Deployments(dep.Namespace).Create(context.TODO(), dep, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create deployment: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + "from": rc.Name, + "kind": "Deployment", + }) +} + +func Service_fromYaml(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.PodLog + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body: "+err.Error(), http.StatusBadRequest) + return + } + if req.Clustername == "" { + http.Error(w, "Missing 'Clustername' parameter", http.StatusBadRequest) + return + } + + decoded, err := base64.StdEncoding.DecodeString(req.Manifest) + if err != nil { + http.Error(w, "Failed to decode base64: "+err.Error(), http.StatusBadRequest) + return + } + + var svc corev1.Service + if err := syaml.Unmarshal(decoded, &svc); err != nil { + http.Error(w, "Failed to unmarshal YAML: "+err.Error(), http.StatusBadRequest) + return + } + + if svc.APIVersion == "" { + svc.APIVersion = "v1" + } + if svc.Kind == "" { + svc.Kind = "Service" + } + if svc.Name == "" { + http.Error(w, "Missing service name", http.StatusBadRequest) + return + } + if svc.Namespace == "" { + svc.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Error getting clientset: "+err.Error(), http.StatusInternalServerError) + return + } + + created, err := clientset.CoreV1().Services(svc.Namespace).Create(context.TODO(), &svc, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create service: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + }) +} + +func Service_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Servicename string `json:"Servicename"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Servicename == "" { + http.Error(w, "Missing required fields: Clustername and Service", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + svc, err := clientset.CoreV1().Services(req.Namespace).Get(context.TODO(), req.Servicename, metav1.GetOptions{}) + if err != nil { + http.Error(w, "Failed to get service: "+err.Error(), http.StatusBadRequest) + return + } + + svcYAML, err := yaml.Marshal(svc) + if err != nil { + http.Error(w, "Failed to marshal service yaml: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(svcYAML)) +} + +func Cluster_configmap(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + clustername := r.URL.Query().Get("Name") + namespace := r.URL.Query().Get("Namespace") + + if clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, clustername) + if err != nil { + http.Error(w, "Error getting clientset", http.StatusInternalServerError) + return + } + + cms, err := clientset.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + http.Error(w, "Error getting list of ConfigMaps", http.StatusInternalServerError) + return + } + + type ConfigMapItem struct { + Name string `json:"Name"` + Namespace string `json:"Namespace"` + DataKeys []string `json:"DataKeys"` + Age string `json:"Age"` + } + + var all []ConfigMapItem + now := time.Now() + for _, cm := range cms.Items { + var keys []string + for k := range cm.Data { + keys = append(keys, k) + } + age := now.Sub(cm.CreationTimestamp.Time) + all = append(all, ConfigMapItem{ + Name: cm.Name, + Namespace: cm.Namespace, + DataKeys: keys, + Age: helpers.Human(age), + }) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(all) +} + +func ConfigMap_fromYaml(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.PodLog + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body: "+err.Error(), http.StatusBadRequest) + return + } + if req.Clustername == "" { + http.Error(w, "Missing 'Clustername' parameter", http.StatusBadRequest) + return + } + + decoded, err := base64.StdEncoding.DecodeString(req.Manifest) + if err != nil { + http.Error(w, "Failed to decode base64: "+err.Error(), http.StatusBadRequest) + return + } + + var cm corev1.ConfigMap + if err := syaml.Unmarshal(decoded, &cm); err != nil { + http.Error(w, "Failed to unmarshal YAML: "+err.Error(), http.StatusBadRequest) + return + } + + if cm.APIVersion == "" { + cm.APIVersion = "v1" + } + if cm.Kind == "" { + cm.Kind = "ConfigMap" + } + if cm.Name == "" { + http.Error(w, "Missing configmap name", http.StatusBadRequest) + return + } + if cm.Namespace == "" { + cm.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Error getting clientset: "+err.Error(), http.StatusInternalServerError) + return + } + + created, err := clientset.CoreV1().ConfigMaps(cm.Namespace).Create(context.TODO(), &cm, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create configmap: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + }) +} + +func Secret_fromYaml(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.PodLog + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body: "+err.Error(), http.StatusBadRequest) + return + } + if req.Clustername == "" { + http.Error(w, "Missing 'Clustername' parameter", http.StatusBadRequest) + return + } + + decoded, err := base64.StdEncoding.DecodeString(req.Manifest) + if err != nil { + http.Error(w, "Failed to decode base64: "+err.Error(), http.StatusBadRequest) + return + } + + var secret corev1.Secret + if err := syaml.Unmarshal(decoded, &secret); err != nil { + http.Error(w, "Failed to unmarshal YAML: "+err.Error(), http.StatusBadRequest) + return + } + + if secret.APIVersion == "" { + secret.APIVersion = "v1" + } + if secret.Kind == "" { + secret.Kind = "Secret" + } + if secret.Name == "" { + http.Error(w, "Missing secret name", http.StatusBadRequest) + return + } + if secret.Namespace == "" { + secret.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Error getting clientset: "+err.Error(), http.StatusInternalServerError) + return + } + + created, err := clientset.CoreV1().Secrets(secret.Namespace).Create(context.TODO(), &secret, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create secret: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + }) +} + +func ConfigMap_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Configmapname string `json:"Configmapname"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Configmapname == "" { + http.Error(w, "Missing required fields: Clustername and Configmapname", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + cm, err := clientset.CoreV1().ConfigMaps(req.Namespace).Get(context.TODO(), req.Configmapname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "Failed to get configmap: "+err.Error(), http.StatusBadRequest) + return + } + + cmYAML, err := yaml.Marshal(cm) + if err != nil { + http.Error(w, "Failed to marshal configmap yaml: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(cmYAML)) +} + +func Configmap_delete(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + clustername := r.URL.Query().Get("Name") + namespace := r.URL.Query().Get("Namespace") + configmapName := r.URL.Query().Get("configmapName") + + if clustername == "" || namespace == "" || configmapName == "" { + http.Error(w, "Missing required parameters (Name, Namespace, configmapName)", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, clustername) + if err != nil { + http.Error(w, "Error getting Kubernetes clientset", http.StatusInternalServerError) + return + } + + err = clientset.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configmapName, metav1.DeleteOptions{}) + if err != nil { + http.Error(w, "Error deleting ConfigMap", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(configmapName + " Has been deleted") +} + +func Cluster_secret(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + clustername := r.URL.Query().Get("Name") + namespace := r.URL.Query().Get("Namespace") + + if clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, clustername) + if err != nil { + http.Error(w, "Error getting clientset", http.StatusInternalServerError) + return + } + + secs, err := clientset.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + http.Error(w, "Error getting list of Secrets", http.StatusInternalServerError) + return + } + + type SecretItem struct { + Name string `json:"Name"` + Namespace string `json:"Namespace"` + Type string `json:"Type"` + DataKeys []string `json:"DataKeys"` + Age string `json:"Age"` + } + + var all []SecretItem + now := time.Now() + for _, s := range secs.Items { + var keys []string + for k := range s.Data { + keys = append(keys, k) + } + age := now.Sub(s.CreationTimestamp.Time) + all = append(all, SecretItem{ + Name: s.Name, + Namespace: s.Namespace, + Type: string(s.Type), + DataKeys: keys, + Age: helpers.Human(age), + }) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(all) +} + +func Secret_manifest(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Secretname string `json:"Secretname"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Clustername == "" || req.Secretname == "" { + http.Error(w, "Missing required fields: Clustername and Secretname", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + s, err := clientset.CoreV1().Secrets(req.Namespace).Get(context.TODO(), req.Secretname, metav1.GetOptions{}) + if err != nil { + http.Error(w, "Failed to get secret: "+err.Error(), http.StatusBadRequest) + return + } + + y, err := yaml.Marshal(s) + if err != nil { + http.Error(w, "Failed to marshal secret yaml: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(string(y)) +} + +func Secret_delete(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + clustername := r.URL.Query().Get("Name") + namespace := r.URL.Query().Get("Namespace") + secretName := r.URL.Query().Get("secretName") + + if clustername == "" || namespace == "" || secretName == "" { + http.Error(w, "Missing required parameters (Name, Namespace, secretName)", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, clustername) + if err != nil { + http.Error(w, "Error getting Kubernetes clientset", http.StatusInternalServerError) + return + } + + err = clientset.CoreV1().Secrets(namespace).Delete(context.TODO(), secretName, metav1.DeleteOptions{}) + if err != nil { + http.Error(w, "Error deleting Secret", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(secretName + " Has been deleted") +} + +func Job_fromYaml(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.PodLog + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body: "+err.Error(), http.StatusBadRequest) + return + } + if req.Clustername == "" { + http.Error(w, "Missing 'Clustername' parameter", http.StatusBadRequest) + return + } + + decoded, err := base64.StdEncoding.DecodeString(req.Manifest) + if err != nil { + http.Error(w, "Failed to decode base64: "+err.Error(), http.StatusBadRequest) + return + } + + var job batchv1.Job + if err := syaml.Unmarshal(decoded, &job); err != nil { + http.Error(w, "Failed to unmarshal YAML: "+err.Error(), http.StatusBadRequest) + return + } + + log.Println("Decoded Job Manifest:", job.Name, job.Namespace) + if job.APIVersion == "" { + job.APIVersion = "batch/v1" + } + if job.Kind == "" { + job.Kind = "Job" + } + if job.Name == "" { + http.Error(w, "Missing job name", http.StatusBadRequest) + return + } + if len(job.Spec.Template.Spec.Containers) == 0 { + http.Error(w, "Job must define at least one container", http.StatusBadRequest) + return + } + if job.Namespace == "" { + job.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Error getting clientset: "+err.Error(), http.StatusInternalServerError) + return + } + created, err := clientset.BatchV1().Jobs(job.Namespace).Create(context.TODO(), &job, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create job: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + }) +} +func CronJob_fromYaml(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req models.PodLog + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body: "+err.Error(), http.StatusBadRequest) + return + } + if req.Clustername == "" { + http.Error(w, "Missing 'Clustername' parameter", http.StatusBadRequest) + return + } + + decoded, err := base64.StdEncoding.DecodeString(req.Manifest) + if err != nil { + http.Error(w, "Failed to decode base64: "+err.Error(), http.StatusBadRequest) + return + } + + var cj batchv1.CronJob + if err := syaml.Unmarshal(decoded, &cj); err != nil { + http.Error(w, "Failed to unmarshal YAML: "+err.Error(), http.StatusBadRequest) + return + } + + log.Println("Decoded CronJob Manifest:", cj.Name, cj.Namespace) + if cj.APIVersion == "" { + cj.APIVersion = "batch/v1" + } + if cj.Kind == "" { + cj.Kind = "CronJob" + } + if cj.Name == "" { + http.Error(w, "Missing cronjob name", http.StatusBadRequest) + return + } + if cj.Spec.JobTemplate.Spec.Template.Spec.Containers == nil || len(cj.Spec.JobTemplate.Spec.Template.Spec.Containers) == 0 { + http.Error(w, "CronJob must define at least one container", http.StatusBadRequest) + return + } + if cj.Namespace == "" { + cj.Namespace = "default" + } + + clientset, _, err := getClientset(w, req.Clustername) + if err != nil { + http.Error(w, "Error getting clientset: "+err.Error(), http.StatusInternalServerError) + return + } + created, err := clientset.BatchV1().CronJobs(cj.Namespace).Create(context.TODO(), &cj, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create cronjob: "+err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + }) +} +func Pod_fromYaml(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var podlog models.PodLog + // Decode the incoming JSON request body to get the podlog structure + if err := json.NewDecoder(r.Body).Decode(&podlog); err != nil { + http.Error(w, "Invalid JSON body: "+err.Error(), http.StatusBadRequest) + return + } + + // Ensure Clustername is provided + if podlog.Clustername == "" { + http.Error(w, "Missing 'Clustername' parameter", http.StatusBadRequest) + return + } + + // Decode the base64-encoded manifest + decoded, err := base64.StdEncoding.DecodeString(podlog.Manifest) + if err != nil { + http.Error(w, "Failed to decode base64: "+err.Error(), http.StatusBadRequest) + return + } + + // Unmarshal the decoded YAML into the pod struct + var pod corev1.Pod + // Use sigs.k8s.io/yaml which respects json tags (maps metadata -> ObjectMeta) + if err := syaml.Unmarshal(decoded, &pod); err != nil { + http.Error(w, "Failed to unmarshal YAML: "+err.Error(), http.StatusBadRequest) + return + } + + // Log the decoded pod structure + log.Println("Decoded Pod Manifest:", pod) + log.Println("Decoded Pod Name:", pod.Name) + log.Println("Decoded Pod Namespace:", pod.Namespace) + log.Println("Decoded Pod Spec:", pod.Spec) + + // Set APIVersion and Kind if they are missing + if pod.APIVersion == "" { + pod.APIVersion = "v1" // Set the default APIVersion + } + if pod.Kind == "" { + pod.Kind = "Pod" // Set the default Kind + } + + // Ensure the pod name is set (in metadata.name) + if pod.Name == "" { + http.Error(w, "Missing pod name", http.StatusBadRequest) + return + } + + // Ensure containerPort is set + if len(pod.Spec.Containers) == 0 || len(pod.Spec.Containers[0].Ports) == 0 || pod.Spec.Containers[0].Ports[0].ContainerPort == 0 { + http.Error(w, "Missing containerPort in pod manifest", http.StatusBadRequest) + return + } + + // Use the provided or default namespace + if pod.Namespace == "" { + pod.Namespace = "default" // Use the namespace from request or "default" + } + + // Get clientset for the specified cluster + clientset, _, err := getClientset(w, podlog.Clustername) + if err != nil { + http.Error(w, "Error getting clientset: "+err.Error(), http.StatusInternalServerError) + return + } + + // Create the pod in the Kubernetes cluster + created, err := clientset.CoreV1().Pods(pod.Namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + if err != nil { + http.Error(w, "Failed to create pod: "+err.Error(), http.StatusInternalServerError) + return + } + + // Respond with the name and namespace of the created pod + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "name": created.Name, + "namespace": created.Namespace, + }) +} + func Pod_exec(w http.ResponseWriter, r *http.Request) { Authorization(w, r) clustername := r.URL.Query().Get("Name") @@ -1112,3 +3161,474 @@ func ClusterStats(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(data) } + +// Cluster_resource_usage returns basic aggregated resource usage across nodes (CPU cores, memory, ephemeral storage). +// It sums capacity and allocatable from Node status. Network is a placeholder (0 total) unless metrics are integrated. +func Cluster_resource_usage(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + clustername := r.URL.Query().Get("Name") + if clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + http.Error(w, "Failed to list nodes: "+err.Error(), http.StatusInternalServerError) + return + } + + var totalCPU, allocCPU resourceapi.Quantity + var totalMem, allocMem resourceapi.Quantity + var totalStorage, allocStorage resourceapi.Quantity + + for _, n := range nodes.Items { + cap := n.Status.Capacity + alloc := n.Status.Allocatable + + totalCPU.Add(cap[corev1.ResourceCPU]) + allocCPU.Add(alloc[corev1.ResourceCPU]) + + totalMem.Add(cap[corev1.ResourceMemory]) + allocMem.Add(alloc[corev1.ResourceMemory]) + + // Ephemeral storage + if q, ok := cap[corev1.ResourceEphemeralStorage]; ok { + totalStorage.Add(q) + } + if q, ok := alloc[corev1.ResourceEphemeralStorage]; ok { + allocStorage.Add(q) + } + } + + // Convert to human-friendly units + cpuTotalCores := float64(totalCPU.MilliValue()) / 1000.0 + cpuAllocCores := float64(allocCPU.MilliValue()) / 1000.0 + + memTotalGi := float64(totalMem.Value()) / (1024.0 * 1024.0 * 1024.0) + memAllocGi := float64(allocMem.Value()) / (1024.0 * 1024.0 * 1024.0) + + storTotalGi := float64(totalStorage.Value()) / (1024.0 * 1024.0 * 1024.0) + storAllocGi := float64(allocStorage.Value()) / (1024.0 * 1024.0 * 1024.0) + + usage := models.ResourceUsage{ + CPU: models.Usage{ + Used: cpuAllocCores, + Total: cpuTotalCores, + Unit: "cores", + }, + Memory: models.Usage{ + Used: memAllocGi, + Total: memTotalGi, + Unit: "GiB", + }, + Storage: models.Usage{ + Used: storAllocGi, + Total: storTotalGi, + Unit: "GiB", + }, + Network: models.Usage{ + Used: 0, + Total: 0, + Unit: "Gbps", + }, + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(usage) +} + +func Cluster_health(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + clustername := r.URL.Query().Get("Name") + if clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + // Nodes health + nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + http.Error(w, "Failed to list nodes: "+err.Error(), http.StatusInternalServerError) + return + } + totalNodes := len(nodes.Items) + healthyNodes := 0 + for _, n := range nodes.Items { + for _, c := range n.Status.Conditions { + if c.Type == corev1.NodeReady && c.Status == corev1.ConditionTrue { + healthyNodes++ + break + } + } + } + + // Pods running + pods, err := clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + http.Error(w, "Failed to list pods: "+err.Error(), http.StatusInternalServerError) + return + } + totalPods := len(pods.Items) + runningPods := 0 + failedOrPending := 0 + for _, p := range pods.Items { + switch p.Status.Phase { + case corev1.PodRunning: + runningPods++ + case corev1.PodFailed, corev1.PodPending, corev1.PodUnknown: + failedOrPending++ + } + } + + // Warnings from events (cluster-wide) + events, _ := clientset.CoreV1().Events("").List(context.TODO(), metav1.ListOptions{}) + warningCount := 0 + if events != nil { + for _, e := range events.Items { + if strings.EqualFold(e.Type, "Warning") { + warningCount++ + } + } + } + + alerts := failedOrPending // treat non-running pods as alerts proxy + + status := "Healthy" + if healthyNodes < totalNodes || alerts > 0 || warningCount > 0 { + status = "Degraded" + } + + resp := models.Health{ + Status: status, + NodesHealthy: healthyNodes, + NodesTotal: totalNodes, + PodsRunning: runningPods, + PodsTotal: totalPods, + Alerts: alerts, + Warnings: warningCount, + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(resp) +} + +func Cluster_uptime(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + clustername := r.URL.Query().Get("Name") + if clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil || len(nodes.Items) == 0 { + http.Error(w, "Failed to list nodes", http.StatusInternalServerError) + return + } + + oldest := time.Now() + for _, n := range nodes.Items { + t := n.CreationTimestamp.Time + if t.Before(oldest) { + oldest = t + } + } + + d := time.Since(oldest) + // Format as 15d 8h 32m + days := int(d.Hours()) / 24 + hours := int(d.Hours()) % 24 + minutes := int(d.Minutes()) % 60 + uptimeStr := fmt.Sprintf("%dd %dh %dm", days, hours, minutes) + + resp := models.Uptime{ + ClusterUptime: uptimeStr, + LastMaintenance: "-", + NextMaintenance: "-", + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(resp) +} + +func Cluster_performance(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + clustername := r.URL.Query().Get("Name") + if clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + // API latency: measure a small list call + apiStart := time.Now() + _, _ = clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{Limit: 1}) + apiLatency := time.Since(apiStart) + + // Gather recent pods (cap to avoid heavy calls) + pods, err := clientset.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{}) + if err != nil { + http.Error(w, "Failed to list pods: "+err.Error(), http.StatusInternalServerError) + return + } + + // Limit sample size + maxSample := 300 + if len(pods.Items) < maxSample { + maxSample = len(pods.Items) + } + + var totalStartup time.Duration + var countStartup int + var totalSched time.Duration + var countSched int + + now := time.Now() + for i := 0; i < maxSample; i++ { + p := pods.Items[i] + + // Skip very old pods to focus on recent behavior (e.g., last 14 days) + if now.Sub(p.CreationTimestamp.Time) > 14*24*time.Hour { + continue + } + + // Scheduler latency: Creation -> PodScheduled=True + var schedTime *time.Time + for _, c := range p.Status.Conditions { + if c.Type == corev1.PodScheduled && c.Status == corev1.ConditionTrue { + t := c.LastTransitionTime.Time + schedTime = &t + break + } + } + if schedTime != nil { + totalSched += schedTime.Sub(p.CreationTimestamp.Time) + countSched++ + } + + // Pod startup time: Creation -> first container StartedAt + // If multiple containers, take the latest StartedAt (worst case) + var startedAt *time.Time + for _, cs := range p.Status.ContainerStatuses { + if cs.State.Running != nil && !cs.State.Running.StartedAt.IsZero() { + t := cs.State.Running.StartedAt.Time + if startedAt == nil || t.After(*startedAt) { + startedAt = &t + } + } + } + if startedAt != nil { + totalStartup += startedAt.Sub(p.CreationTimestamp.Time) + countStartup++ + } + } + + // Format helpers + formatDur := func(d time.Duration) string { + if d < time.Second { + return fmt.Sprintf("%dms", d.Milliseconds()) + } + // show with one decimal up to minutes + if d < time.Minute { + return fmt.Sprintf("%.1fs", d.Seconds()) + } + // mm:ss for longer + return fmt.Sprintf("%dm %ds", int(d.Minutes()), int(d.Seconds())%60) + } + + var avgStartup, avgSched time.Duration + if countStartup > 0 { + avgStartup = time.Duration(int64(totalStartup) / int64(countStartup)) + } + if countSched > 0 { + avgSched = time.Duration(int64(totalSched) / int64(countSched)) + } + + perf := models.Performance{ + PodStartupTime: formatDur(avgStartup), + APILatency: formatDur(apiLatency), + EtcdLatency: "-", // etcd latency requires apiserver/etcd metrics; integrate Prometheus to populate + SchedulerLatency: formatDur(avgSched), + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(perf) +} + +func Cluster_events(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + clustername := r.URL.Query().Get("Name") + namespace := r.URL.Query().Get("Namespace") + if clustername == "" { + http.Error(w, "Missing 'Name' parameter", http.StatusBadRequest) + return + } + + clientset, _, err := getClientset(w, clustername) + if err != nil { + http.Error(w, "Failed to create kubernetes client: "+err.Error(), http.StatusInternalServerError) + return + } + + evList, err := clientset.CoreV1().Events(namespace).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + http.Error(w, "Failed to list events: "+err.Error(), http.StatusInternalServerError) + return + } + + type EventItem struct { + Type string `json:"type"` + Reason string `json:"reason"` + Message string `json:"message"` + Count int32 `json:"count"` + ObjectKind string `json:"objectKind"` + ObjectName string `json:"objectName"` + Namespace string `json:"namespace"` + FirstSeen string `json:"firstSeen,omitempty"` + LastSeen string `json:"lastSeen,omitempty"` + Age string `json:"age"` + } + + var items []EventItem + now := time.Now() + for _, e := range evList.Items { + // Timestamps (handle both v1 Event fields) + first := e.FirstTimestamp.Time + last := e.LastTimestamp.Time + if last.IsZero() && !e.EventTime.IsZero() { + last = e.EventTime.Time + } + if first.IsZero() { + first = last + } + age := "-" + if !last.IsZero() { + age = helpers.Human(now.Sub(last)) + } + + items = append(items, EventItem{ + Type: e.Type, + Reason: e.Reason, + Message: e.Message, + Count: e.Count, + ObjectKind: e.InvolvedObject.Kind, + ObjectName: e.InvolvedObject.Name, + Namespace: e.InvolvedObject.Namespace, + FirstSeen: first.UTC().Format(time.RFC3339), + LastSeen: last.UTC().Format(time.RFC3339), + Age: age, + }) + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(items) +} + +func Helm_install(w http.ResponseWriter, r *http.Request) { + Authorization(w, r) + + var req struct { + Chart string `json:"Chart"` + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Release string `json:"Release"` + Repo string `json:"Repo"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid JSON body", http.StatusBadRequest) + return + } + if req.Chart == "" || req.Clustername == "" || req.Release == "" || req.Repo == "" { + http.Error(w, "Missing required fields: Chart, Clustername, Release, Repo", http.StatusBadRequest) + return + } + if req.Namespace == "" { + req.Namespace = "default" + } + + // Get kubeconfig for the cluster + kubeconfig, err := getClusterConfig(req.Clustername) + if err != nil { + http.Error(w, "Failed to get cluster config: "+err.Error(), http.StatusInternalServerError) + return + } + + // Write kubeconfig to temp file + tmpFile, err := os.CreateTemp("", "kubeconfig-*.yaml") + if err != nil { + http.Error(w, "Failed to create temp file: "+err.Error(), http.StatusInternalServerError) + return + } + defer os.Remove(tmpFile.Name()) + + if _, err := tmpFile.WriteString(kubeconfig); err != nil { + http.Error(w, "Failed to write kubeconfig: "+err.Error(), http.StatusInternalServerError) + return + } + tmpFile.Close() + + // Add repo if not exists + cmd := exec.Command("helm", "repo", "add", "temp-repo", req.Repo) + cmd.Env = append(os.Environ(), "KUBECONFIG="+tmpFile.Name()) + output, err := cmd.CombinedOutput() + if err != nil && !strings.Contains(string(output), "already exists") { + http.Error(w, "Failed to add helm repo: "+string(output), http.StatusInternalServerError) + return + } + + // Update repo + cmd = exec.Command("helm", "repo", "update") + cmd.Env = append(os.Environ(), "KUBECONFIG="+tmpFile.Name()) + output, err = cmd.CombinedOutput() + if err != nil { + http.Error(w, "Failed to update helm repo: "+string(output), http.StatusInternalServerError) + return + } + + // Install chart + cmd = exec.Command("helm", "install", req.Release, req.Chart, "--namespace", req.Namespace, "--create-namespace") + cmd.Env = append(os.Environ(), "KUBECONFIG="+tmpFile.Name()) + output, err = cmd.CombinedOutput() + if err != nil { + http.Error(w, "Failed to install helm chart: "+string(output), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(map[string]string{ + "message": "Helm chart installed successfully", + "release": req.Release, + "namespace": req.Namespace, + "chart": req.Chart, + "output": string(output), + }) +} diff --git a/main.go b/main.go index 6f765ce..6a2aecf 100644 --- a/main.go +++ b/main.go @@ -129,10 +129,17 @@ func main() { router.HandleFunc("/deletecluster", handler.Deletecluster) router.HandleFunc("/clusters", handler.ListUserClusters) router.HandleFunc("/cluster_stats", handler.ClusterStats) + router.HandleFunc("/cluster_resource_usage", handler.Cluster_resource_usage) + router.HandleFunc("/cluster_health", handler.Cluster_health) + router.HandleFunc("/cluster_uptime", handler.Cluster_uptime) + router.HandleFunc("/cluster_performance", handler.Cluster_performance) + router.HandleFunc("/cluster_events", handler.Cluster_events) router.HandleFunc("/connect", handler.Connect) // router.HandleFunc("/cluster_nodes", handler.Cluster_nodes) router.HandleFunc("/cluster_namespaces", handler.Cluster_namespaces) router.HandleFunc("/cluster_services", handler.Cluster_services) + router.HandleFunc("/cluster_configmap", handler.Cluster_configmap) + router.HandleFunc("/cluster_secret", handler.Cluster_secret) router.HandleFunc("/cluster_deployments", handler.Cluster_deployments) router.HandleFunc("/cluster_pods", handler.Cluster_pods) router.HandleFunc("/cluster_statefulset", handler.Cluster_statefulset) @@ -140,19 +147,55 @@ func main() { router.HandleFunc("/cluster_jobs", handler.Cluster_jobs) router.HandleFunc("/cluster_replicasets", handler.Cluster_replicasets) + router.HandleFunc("/replicaset_delete", handler.Replicaset_delete) router.HandleFunc("/cluster_replicationcontrollers", handler.Cluster_replicationcontrollers) + router.HandleFunc("/deployment_manifest", handler.Deployment_manifest) + router.HandleFunc("/daemonsets_manifest", handler.DaemonSet_manifest) + router.HandleFunc("/statefulset_manifest", handler.StatefulSet_manifest) + router.HandleFunc("/replicationcontroller_manifest", handler.Replicationcontroller_manifest) + router.HandleFunc("/deployment_rollout", handler.Deployment_rollout) + router.HandleFunc("/daemonsets_rollout", handler.DaemonSet_rollout) + router.HandleFunc("/deployment_scale", handler.Deployment_scale) + router.HandleFunc("/replicaset_scale", handler.ReplicaSet_scale) + router.HandleFunc("/statefulset_scale", handler.StatefulSet_scale) + router.HandleFunc("/replicationcontroller_scale", handler.Replicationcontroller_scale) + router.HandleFunc("/statefulset_rollout", handler.StatefulSet_rollout) router.HandleFunc("/pod_logs", handler.Pod_logs) + router.HandleFunc("/pod_manifest", handler.Pod_manifest) + router.HandleFunc("/replicaset_manifest", handler.Replicaset_manifest) + router.HandleFunc("/jobs_manifest", handler.Jobs_manifest) + router.HandleFunc("/cronjobs_manifest", handler.CronJobs_manifest) + router.HandleFunc("/configmap_manifest", handler.ConfigMap_manifest) + router.HandleFunc("/secret_manifest", handler.Secret_manifest) + router.HandleFunc("/service_manifest", handler.Service_manifest) + router.HandleFunc("/cronjobs_trigger", handler.Cronjobs_trigger) + router.HandleFunc("/cronjobs_suspend", handler.Cronjobs_suspend) + router.HandleFunc("/jobs_logs", handler.Jobs_logs) + router.HandleFunc("/pod_create", handler.Pod_fromYaml) + router.HandleFunc("/deployment_create", handler.Deployment_fromYaml) + router.HandleFunc("/daemonsets_create", handler.DaemonSet_fromYaml) + router.HandleFunc("/jobs_create", handler.Job_fromYaml) + router.HandleFunc("/statefulset_create", handler.StatefulSet_fromYaml) + router.HandleFunc("/configmap_create", handler.ConfigMap_fromYaml) + router.HandleFunc("/secret_create", handler.Secret_fromYaml) + router.HandleFunc("/service_create", handler.Service_fromYaml) router.HandleFunc("/pod_exec", handler.Pod_exec) router.HandleFunc("/pod_delete", handler.Pod_delete) router.HandleFunc("/service_delete", handler.Service_delete) + router.HandleFunc("/configmap_delete", handler.Configmap_delete) + router.HandleFunc("/secret_delete", handler.Secret_delete) router.HandleFunc("/deployment_delete", handler.Deployment_delete) router.HandleFunc("/statefulSet_delete", handler.StatefulSet_delete) router.HandleFunc("/daemonsets_delete", handler.Daemonsets_delete) router.HandleFunc("/jobsName_delete", handler.JobsName_delete) - router.HandleFunc("/replicaset_delete", handler.Replicaset_delete) router.HandleFunc("/replicationcontroller_delete", handler.Replicationcontroller_delete) - router.HandleFunc("/cronjob_delete", handler.Cronjob_delete) + router.HandleFunc("/cronjobs_delete", handler.Cronjob_delete) + router.HandleFunc("/cronjobs_create", handler.CronJob_fromYaml) + router.HandleFunc("/cluster_cronjobs", handler.Cluster_cronjobs) + router.HandleFunc("/replicationcontroller_create", handler.ReplicationController_fromYaml) + router.HandleFunc("/replicationcontroller_migrate", handler.Replicationcontroller_migrate) + router.HandleFunc("/helm_install", handler.Helm_install) router.HandleFunc("/worker_nodes_plan", handler.Worker_nodes_plan) //handler.RegsiterClusterRoute(router) // Enable CORS diff --git a/models/workloads.go b/models/workloads.go index 1d6bb38..25e96e2 100644 --- a/models/workloads.go +++ b/models/workloads.go @@ -29,12 +29,32 @@ type Header struct { Authorization string `bson:"token"` } +type Namespace struct { + Name string `json:name` + Status string `json:status` + Age string `json:age` +} + +type PodLog struct { + Podname string `json:name` + Namespace string `json:namespace` + Clustername string `json:clustername` + Manifest string `json:clustername` + Replicasetname string `json:replicasetname` + Statefulset string `json:Statefulset` + Daemonsetsname string `json:Daemonsetsname` +} + type Pod struct { Name string `json:name` - Namespace string `json:name` + Namespace string `json:namespace` Status string `json:status` - Restart int32 `json:restart` Age string `json:age` + Ready string `json:ready` + Restarts int32 `json:restars` + Ip string `json:ip` + Node string `json:node` + Image string `json:iamge` } type Service struct { @@ -48,12 +68,17 @@ type Service struct { } type Deployment struct { - Name string `json:name` - Namespace string `json:namespace` - Available string `json:available` - Replicas int32 `json:replicas` - Message string `json:message` - Reason string `json:reason` + Name string `json:name` + Namespace string `json:namespace` + Available string `json:available` + Replicas int32 `json:replicas` + Message string `json:message` + Reason string `json:reason` + Ready string `json:ready` + UpdateToDate int32 `json:uptodate` + Age string `json:age` + Image string `json:iamge` + Strategy string `json:iamge` } type Daemonset struct { @@ -78,6 +103,16 @@ type Jobs struct { Age string `json:age` } +type CronJob struct { + Name string `json:name` + Namespace string `json:namespace` + Schedule string `json:schedule` + Suspend bool `json:suspend` + Active int `json:active` + LastScheduleTime string `json:lastScheduleTime` + Age string `json:age` +} + type Replicaset struct { Name string `json:name` Desired int32 `json:desired` @@ -158,3 +193,65 @@ type Uptime struct { LastMaintenance string `json:"lastMaintenance"` NextMaintenance string `json:"nextMaintenance"` } + +type ReplicasetScaleReq struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Replicasetname string `json:"Replicasetname"` + Replicas int32 `json:"Replicas"` +} + +type ReplicasetRolloutReq struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Replicasetname string `json:"Replicasetname"` + Action string `json:"Action"` // "restart" | "status" +} + +type DeploymentScaleReq struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Deployment string `json:"Deployment"` + Replicas int32 `json:"Replicas"` +} + +type DeploymentRolloutReq struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Deployment string `json:"Deployment"` + Action string `json:"Action"` // "restart" | "status" +} + +type DeamonSetsRolloutReq struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Daemonsetsname string `json:"Daemonsetsname"` + Action string `json:"Action"` // "restart" | "status" +} + +type StatefulsetRolloutReq struct { + Clustername string `json:"Clustername"` + Namespace string `json:"Namespace"` + Statefulset string `json:"Statefulset"` + Action string `json:"Action"` // "restart" | "status" + Replicas int32 `json:"Replicas"` +} + +type DeploymentRolloutStatus struct { + Deployment string `json:"deployment"` + Namespace string `json:"namespace"` + ObservedGeneration int64 `json:"observedGeneration"` + Replicas int32 `json:"replicas"` + UpdatedReplicas int32 `json:"updatedReplicas"` + ReadyReplicas int32 `json:"readyReplicas"` + AvailableReplicas int32 `json:"availableReplicas"` + UnavailableReplicas int32 `json:"unavailableReplicas"` + ConditionProgressing string `json:"conditionProgressing,omitempty"` + ConditionAvailable string `json:"conditionAvailable,omitempty"` +} + +type DeploymentRolloutResp struct { + Clustername string `json:"cluster"` + Message string `json:"message"` + Status *DeploymentRolloutStatus `json:"status,omitempty"` +}