From f46c83dc0707527c4eac2d97be6ef8f9b3758386 Mon Sep 17 00:00:00 2001 From: Pol Nachtergaele Date: Fri, 24 Oct 2025 17:33:14 +0200 Subject: [PATCH 1/3] kube-native aggregator --- actor.go => containers/aggregator/actor.go | 156 +++++++++--------- {auth => containers/aggregator/auth}/auth.go | 103 +++--------- containers/aggregator/auth/config.go | 56 +++++++ .../aggregator/auth}/signed-requests.go | 6 +- {auth => containers/aggregator/auth}/utils.go | 0 .../aggregator/configuration.go | 7 +- go.mod => containers/aggregator/go.mod | 0 go.sum => containers/aggregator/go.sum | 0 main.go => containers/aggregator/main.go | 126 ++++++++------ .../aggregator/proxy}/proxy.go | 0 .../aggregator/resourceRegistration.go | 13 +- k8s/aggregator/aggregator-config.yaml | 16 ++ k8s/aggregator/aggregator-ns.yaml | 7 + k8s/aggregator/aggregator.yaml | 142 ++++++++++++++++ k8s/uma/uma-proxy-ns.yaml | 7 + k8s/uma/uma-proxy-secret.yaml | 10 ++ k8s/uma/uma-proxy.yaml | 62 +++++++ makefile | 153 ++++++++++------- 18 files changed, 586 insertions(+), 278 deletions(-) rename actor.go => containers/aggregator/actor.go (59%) rename {auth => containers/aggregator/auth}/auth.go (91%) create mode 100644 containers/aggregator/auth/config.go rename {auth => containers/aggregator/auth}/signed-requests.go (98%) rename {auth => containers/aggregator/auth}/utils.go (100%) rename configuration_endpoint.go => containers/aggregator/configuration.go (98%) rename go.mod => containers/aggregator/go.mod (100%) rename go.sum => containers/aggregator/go.sum (100%) rename main.go => containers/aggregator/main.go (54%) rename {proxy => containers/aggregator/proxy}/proxy.go (100%) rename resourceRegistration.go => containers/aggregator/resourceRegistration.go (98%) create mode 100644 k8s/aggregator/aggregator-config.yaml create mode 100644 k8s/aggregator/aggregator-ns.yaml create mode 100644 k8s/aggregator/aggregator.yaml create mode 100644 k8s/uma/uma-proxy-ns.yaml create mode 100644 k8s/uma/uma-proxy-secret.yaml create mode 100644 k8s/uma/uma-proxy.yaml diff --git a/actor.go b/containers/aggregator/actor.go similarity index 59% rename from actor.go rename to containers/aggregator/actor.go index 1391141..2bd6a9d 100644 --- a/actor.go +++ b/containers/aggregator/actor.go @@ -3,16 +3,18 @@ package main import ( "aggregator/auth" "fmt" + "io" + "net/http" + "os" + "strings" + "time" + "github.com/google/uuid" "github.com/sirupsen/logrus" "golang.org/x/net/context" - "io" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "net/http" - "strings" - "time" ) var serverMux *http.ServeMux @@ -30,25 +32,39 @@ type Actor struct { // TODO This needs to be more generic and extensible func createActor(pipelineDescription string) (Actor, error) { id := uuid.New().String() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + namespace := "aggregator-ns" - podScafolding := &v1.Pod{ + // ---------------------------- + // 1. Pod spec + // ---------------------------- + image := os.Getenv("TRANSFORMATION") + if image == "" { + return Actor{}, fmt.Errorf("TRANSFORMATION env var is not set") + } + + podSpec := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: id, Labels: map[string]string{ - "app": id, // Important for service selector! + "app": id, }, }, Spec: v1.PodSpec{ Containers: []v1.Container{ { Name: "transformation", - Image: "incremunica", + Image: image, ImagePullPolicy: v1.PullNever, Env: []v1.EnvVar{ - {Name: "PIPELINE_DESCRIPTION", Value: fmt.Sprintf("%v", pipelineDescription)}, - {Name: "HTTP_PROXY", Value: "http://uma-proxy-service.default.svc.cluster.local:8080"}, - {Name: "HTTPS_PROXY", Value: "http://uma-proxy-service.default.svc.cluster.local:8443"}, + {Name: "PIPELINE_DESCRIPTION", Value: pipelineDescription}, {Name: "SSL_CERT_FILE", Value: "/key-pair/uma-proxy.crt"}, + {Name: "HTTP_PROXY", Value: "http://uma-proxy.uma-proxy-ns.svc.cluster.local:8080"}, + {Name: "HTTPS_PROXY", Value: "http://uma-proxy.uma-proxy-ns.svc.cluster.local:8443"}, + {Name: "http_proxy", Value: "http://uma-proxy.uma-proxy-ns.svc.cluster.local:8080"}, + {Name: "https_proxy", Value: "http://uma-proxy.uma-proxy-ns.svc.cluster.local:8443"}, {Name: "LOG_LEVEL", Value: LogLevel.String()}, }, Ports: []v1.ContainerPort{ @@ -77,84 +93,73 @@ func createActor(pipelineDescription string) (Actor, error) { }, } - ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) - pod, err := Clientset.CoreV1().Pods("default").Create(ctx, podScafolding, metav1.CreateOptions{}) + // ---------------------------- + // 2. Create Pod + // ---------------------------- + pod, err := Clientset.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{}) + if err != nil { + return Actor{}, fmt.Errorf("failed to create pod in namespace %s: %w", namespace, err) + } + // ---------------------------- + // 3. Service spec + // ---------------------------- serviceName := "id-" + id + "-service" - svc := &v1.Service{ + serviceSpec := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, }, Spec: v1.ServiceSpec{ - Type: v1.ServiceTypeNodePort, // Or NodePort if you want external access + Type: v1.ServiceTypeClusterIP, Selector: map[string]string{ - "app": id, // Matches pod's label + "app": id, }, Ports: []v1.ServicePort{ { - Port: 80, // The port your client will use - TargetPort: intstr.FromInt(8080), // The port inside the pod + Port: 80, + TargetPort: intstr.FromInt(8080), }, }, }, } - _, err = Clientset.CoreV1().Services("default").Create(ctx, svc, metav1.CreateOptions{}) - + _, err = Clientset.CoreV1().Services(namespace).Create(ctx, serviceSpec, metav1.CreateOptions{}) if err != nil { - return Actor{}, fmt.Errorf("failed to create pod: %v", err) + return Actor{}, fmt.Errorf("failed to create service in namespace %s: %w", namespace, err) } - watcher, _ := Clientset.CoreV1().Pods("default").Watch(ctx, metav1.ListOptions{ + // ---------------------------- + // 4. Wait for Pod Running + // ---------------------------- + watcher, err := Clientset.CoreV1().Pods(namespace).Watch(ctx, metav1.ListOptions{ FieldSelector: fmt.Sprintf("metadata.name=%s", id), }) - - nodes, err := Clientset.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - if err != nil { - logrus.WithFields(logrus.Fields{"err": err}).Error("Failed to list nodes") - return Actor{}, err - } - - nodeIp := "" - for _, node := range nodes.Items { - for _, addr := range node.Status.Addresses { - if addr.Type == v1.NodeExternalIP || addr.Type == v1.NodeInternalIP { - nodeIp = addr.Address - break - } - } - } - - svc, err = Clientset.CoreV1().Services("default").Get(context.Background(), serviceName, metav1.GetOptions{}) if err != nil { return Actor{}, err } + defer watcher.Stop() - nodePort := 0 - for _, port := range svc.Spec.Ports { - if svc.Spec.Type == v1.ServiceTypeNodePort { - nodePort = int(port.NodePort) - } - } - - if nodePort == 0 { - return Actor{}, fmt.Errorf("no NodePort found for service %s", serviceName) - } - +podLoop: for event := range watcher.ResultChan() { - pod := event.Object.(*v1.Pod) - if pod.Status.Phase == v1.PodRunning { - break - } else if pod.Status.Phase == v1.PodFailed { - return Actor{}, fmt.Errorf("pod failed to start: %v", pod.Status.Reason) + p, ok := event.Object.(*v1.Pod) + if !ok { + continue + } + switch p.Status.Phase { + case v1.PodRunning: + break podLoop + case v1.PodFailed: + return Actor{}, fmt.Errorf("pod failed: %v", p.Status.Reason) } } - logrus.WithFields(logrus.Fields{"url": fmt.Sprintf("http://%s:%d", nodeIp, nodePort)}).Info("Pod is running") - - var handleAllRequests = func(w http.ResponseWriter, r *http.Request) { + // ----------------------------- + // 5. Register handler in aggregator + // ----------------------------- + var requestHandler = func(w http.ResponseWriter, r *http.Request) { logrus.WithFields(logrus.Fields{"actor_id": id, "path": r.URL.Path, "method": r.Method}).Info("Received request for actor") if !auth.AuthorizeRequest(w, r, nil) { + http.Error(w, "Unauthorized", http.StatusUnauthorized) return } @@ -163,32 +168,23 @@ func createActor(pipelineDescription string) (Actor, error) { subPath := strings.TrimPrefix(r.URL.Path, actorPrefix) // Construct the target URL with the subpath and query parameters - targetURL := fmt.Sprintf("http://%s:%d/%s", nodeIp, nodePort, subPath) + serviceURL := fmt.Sprintf("http://%s.%s.svc.cluster.local:80/%s", serviceName, namespace, subPath) if r.URL.RawQuery != "" { - targetURL += "?" + r.URL.RawQuery + serviceURL += "?" + r.URL.RawQuery } - // Create a new request with the same method, headers, and body - req, err := http.NewRequest(r.Method, targetURL, r.Body) + req, err := http.NewRequest(r.Method, serviceURL, r.Body) if err != nil { - logrus.WithFields(logrus.Fields{"err": err}).Error("Error creating request") - http.Error(w, "Failed to create request", http.StatusInternalServerError) + http.Error(w, "failed to create request", http.StatusInternalServerError) return } + req.Header = r.Header.Clone() - // Copy headers from original request - for name, values := range r.Header { - for _, value := range values { - req.Header.Add(name, value) - } - } - - // Make the request to the pod - client := &http.Client{} - resp, err := client.Do(req) + // Make the request to the service + resp, err := http.DefaultClient.Do(req) if err != nil { - logrus.WithFields(logrus.Fields{"err": err, "target_url": req.URL.String()}).Error("Error reaching pod service") - http.Error(w, "Failed to reach pod service", http.StatusInternalServerError) + fmt.Println("Failed to reach actor service:", err.Error()) + http.Error(w, "failed to reach actor service", http.StatusBadGateway) return } defer resp.Body.Close() @@ -206,8 +202,8 @@ func createActor(pipelineDescription string) (Actor, error) { // Check if this is a streaming response (like SSE) contentType := resp.Header.Get("Content-Type") isSSE := strings.Contains(contentType, "text/event-stream") - logrus.WithFields(logrus.Fields{"content_type": contentType, "is_sse": isSSE}).Debug("Content-Type response") + if isSSE { // Handle Server-Sent Events streaming flusher, ok := w.(http.Flusher) @@ -237,10 +233,8 @@ func createActor(pipelineDescription string) (Actor, error) { } } - serverMux.HandleFunc("/"+id+"/", handleAllRequests) - - // Also handle exact match without trailing slash for backwards compatibility - serverMux.HandleFunc("/"+id, handleAllRequests) + serverMux.HandleFunc("/"+id+"/", requestHandler) + serverMux.HandleFunc("/"+id, requestHandler) actor := Actor{ Id: id, diff --git a/auth/auth.go b/containers/aggregator/auth/auth.go similarity index 91% rename from auth/auth.go rename to containers/aggregator/auth/auth.go index 84559c0..d36c977 100644 --- a/auth/auth.go +++ b/containers/aggregator/auth/auth.go @@ -9,18 +9,31 @@ import ( "encoding/json" "errors" "fmt" - "github.com/golang-jwt/jwt/v4" - "github.com/sirupsen/logrus" "io" "math/big" "net/http" + "os" "runtime/debug" "strings" + + "github.com/golang-jwt/jwt/v4" + "github.com/sirupsen/logrus" ) -const AS_ISSUER = "http://localhost:4000/uma" +// AS_ISSUER is read from environment variable +var AS_ISSUER string +var AS_DEFINED bool + +func init() { + AS_ISSUER, AS_DEFINED = os.LookupEnv("AS_ISSUER") +} func AuthorizeRequest(response http.ResponseWriter, request *http.Request, extraPermissions []Permission) bool { + // โœ… If AS_ISSUER is not set, always authorize + if !AS_DEFINED { + return true + } + // check if Authorization header is present, if not create ticket if request.Header.Get("Authorization") == "" { logrus.WithFields(logrus.Fields{"method": request.Method, "path": request.URL.Path}).Warn("๐Ÿ” Authorization header missing") @@ -132,14 +145,6 @@ func AuthorizeRequest(response http.ResponseWriter, request *http.Request, extra return false } -type UmaConfig struct { - jwksUri string - issuer string - permissionEndpoint string - introspectionEndpoint string - resourceRegistrationEndpoint string -} - type Permission struct { ResourceID string `json:"resource_id"` ResourceScopes []string `json:"resource_scopes"` @@ -174,7 +179,7 @@ func fetchTicket(permissions map[string][]string, issuer string) (string, error) return "", err } - req, err := http.NewRequest("POST", config.permissionEndpoint, bytes.NewBuffer(jsonData)) + req, err := http.NewRequest("POST", config.PermissionEndpoint, bytes.NewBuffer(jsonData)) if err != nil { return "", err } @@ -208,7 +213,7 @@ func fetchTicket(permissions map[string][]string, issuer string) (string, error) "error while retrieving UMA Ticket: Received status %d with message \"%s\" from '%s'", resp.StatusCode, bodyString, - config.permissionEndpoint, + config.PermissionEndpoint, ) } @@ -278,7 +283,7 @@ func verifyTicket(token string, validIssuers []string) ([]Permission, error) { claims := &UmaClaims{} parser := jwt.NewParser() parsedToken, err := parser.ParseWithClaims(token, claims, func(t *jwt.Token) (interface{}, error) { - return fetchAndSelectKey(config.jwksUri, "TODO") + return fetchAndSelectKey(config.JwksUri, "TODO") // Auth server doesn't have kid's so we just return the first key /* @@ -473,70 +478,6 @@ func decodeJwtPayload(tokenString string) (map[string]interface{}, error) { return payload, nil } -var REQUIRED_METADATA = []string{ - "issuer", - "jwks_uri", - "permission_endpoint", - "introspection_endpoint", - "resource_registration_endpoint", -} - -func fetchUmaConfig(issuer string) (UmaConfig, error) { - resp, err := http.Get(issuer + "/.well-known/uma2-configuration") - if err != nil { - return UmaConfig{}, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - return UmaConfig{}, fmt.Errorf( - "unable to retrieve UMA Configuration for Authorization Server '%s' from '%s'", - issuer, - issuer+"/.well-known/uma2-configuration", - ) - } - - var configuration map[string]interface{} - if err := json.NewDecoder(resp.Body).Decode(&configuration); err != nil { - return UmaConfig{}, err - } - - var umaConfig UmaConfig - for _, value := range REQUIRED_METADATA { - val, ok := configuration[value] - if !ok { - return UmaConfig{}, fmt.Errorf( - "the Authorization Server Metadata of '%s' is missing attributes %s", - issuer, - value, - ) - } - strVal, ok := val.(string) - if !ok { - return UmaConfig{}, fmt.Errorf( - "the Authorization Server Metadata of '%s' should have string attributes %s", - issuer, - value, - ) - } else { - switch value { - case "issuer": - umaConfig.issuer = strVal - case "jwks_uri": - umaConfig.jwksUri = strVal - case "permission_endpoint": - umaConfig.permissionEndpoint = strVal - case "introspection_endpoint": - umaConfig.introspectionEndpoint = strVal - case "resource_registration_endpoint": - umaConfig.resourceRegistrationEndpoint = strVal - } - } - } - - return umaConfig, nil -} - var idIndex = make(map[string]string) // ResourceScope enum-like type for UMA resource scopes @@ -558,7 +499,7 @@ func CreateResource(resourceId string, resourceScopes []ResourceScope) error { } knownUmaId := idIndex[resourceId] - endpoint := config.resourceRegistrationEndpoint + endpoint := config.ResourceRegistrationEndpoint method := "POST" if knownUmaId != "" { endpoint = endpoint + "/" + knownUmaId @@ -650,7 +591,7 @@ func DeleteResource(resourceId string) { req, err := http.NewRequest( "DELETE", - config.resourceRegistrationEndpoint+"/"+authId, + config.ResourceRegistrationEndpoint+"/"+authId, nil, ) if err != nil { @@ -694,7 +635,7 @@ func DeleteAllResources() { req, err := http.NewRequest( "DELETE", - config.resourceRegistrationEndpoint+authId, + config.ResourceRegistrationEndpoint+authId, &bytes.Buffer{}, ) if err != nil { diff --git a/containers/aggregator/auth/config.go b/containers/aggregator/auth/config.go new file mode 100644 index 0000000..e662c2c --- /dev/null +++ b/containers/aggregator/auth/config.go @@ -0,0 +1,56 @@ +package auth + +import ( + "encoding/json" + "fmt" + "io" + "net/http" +) + +type UmaConfig struct { + Issuer string `json:"issuer"` + JwksUri string `json:"jwks_uri"` + PermissionEndpoint string `json:"permission_endpoint"` + IntrospectionEndpoint string `json:"introspection_endpoint"` + ResourceRegistrationEndpoint string `json:"resource_registration_endpoint"` +} + +var requiredFields = map[string]func(UmaConfig) string{ + "issuer": func(c UmaConfig) string { return c.Issuer }, + "jwks_uri": func(c UmaConfig) string { return c.JwksUri }, + "permission_endpoint": func(c UmaConfig) string { return c.PermissionEndpoint }, + "introspection_endpoint": func(c UmaConfig) string { return c.IntrospectionEndpoint }, + "resource_registration_endpoint": func(c UmaConfig) string { return c.ResourceRegistrationEndpoint }, +} + +func fetchUmaConfig(issuer string) (UmaConfig, error) { + url := fmt.Sprintf("%s/.well-known/uma2-configuration", issuer) + + resp, err := http.Get(url) + if err != nil { + return UmaConfig{}, fmt.Errorf("failed GET on UMA config: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + return UmaConfig{}, fmt.Errorf("UMA configuration request failed with status %d at %s", resp.StatusCode, url) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return UmaConfig{}, err + } + + var cfg UmaConfig + if err := json.Unmarshal(body, &cfg); err != nil { + return UmaConfig{}, fmt.Errorf("invalid UMA configuration JSON: %w", err) + } + + for key, getter := range requiredFields { + if getter(cfg) == "" { + return UmaConfig{}, fmt.Errorf("missing required UMA metadata field: %s", key) + } + } + + return cfg, nil +} diff --git a/auth/signed-requests.go b/containers/aggregator/auth/signed-requests.go similarity index 98% rename from auth/signed-requests.go rename to containers/aggregator/auth/signed-requests.go index a68ea83..5d7ab25 100644 --- a/auth/signed-requests.go +++ b/containers/aggregator/auth/signed-requests.go @@ -10,12 +10,13 @@ import ( "encoding/json" "encoding/pem" "fmt" - "github.com/sirupsen/logrus" "io" "math/big" "net/http" "os" "time" + + "github.com/sirupsen/logrus" ) var ( @@ -116,7 +117,6 @@ func setPrivateKey(keyFilePath string) { logrus.WithFields(logrus.Fields{"err": err}).Error("Error writing RSA key to file") return } - return } // makeJWKFromRSAPrivateKey produces a Jwk containing the public portion @@ -146,7 +146,7 @@ func makeJWKFromRSAPrivateKey(kid string) (Jwk, error) { func doSignedRequest(req *http.Request) (*http.Response, error) { // 1) Put your domain in the Authorization header as cred="..." // The Node server uses that to fetch your JWK from JWKS - req.Header.Set("Authorization", fmt.Sprintf(`HttpSig cred=%q`, "http://localhost:5000")) + req.Header.Set("Authorization", fmt.Sprintf(`HttpSig cred=%q`, "http://localhost:5000")) // What is this hard-coded value? label := "sig1" diff --git a/auth/utils.go b/containers/aggregator/auth/utils.go similarity index 100% rename from auth/utils.go rename to containers/aggregator/auth/utils.go diff --git a/configuration_endpoint.go b/containers/aggregator/configuration.go similarity index 98% rename from configuration_endpoint.go rename to containers/aggregator/configuration.go index d981301..7a7ac1f 100644 --- a/configuration_endpoint.go +++ b/containers/aggregator/configuration.go @@ -5,11 +5,12 @@ import ( "crypto/sha256" "encoding/hex" "fmt" - "github.com/sirupsen/logrus" "io" "net/http" "strconv" "strings" + + "github.com/sirupsen/logrus" ) type ConfigurationData struct { @@ -39,7 +40,7 @@ func startConfigurationEndpoint(mux *http.ServeMux) { func (data ConfigurationData) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request), resourceScopes []auth.ResourceScope) { data.serveMux.HandleFunc(pattern, handler) auth.CreateResource( - fmt.Sprintf("%s://%s:%s%s", Protocol, Host, ServerPort, pattern), + fmt.Sprintf("%s://%s:%s%s", Protocol, ExternalHost, ExternalPort, pattern), resourceScopes, ) } @@ -234,7 +235,7 @@ func (data ConfigurationData) deleteActor(response http.ResponseWriter, _ *http. response.WriteHeader(http.StatusOK) auth.DeleteResource( - fmt.Sprintf("%s://%s:%s/config/actors/%s", Protocol, Host, ServerPort, actor.Id), + fmt.Sprintf("%s://%s:%s/config/actors/%s", Protocol, ExternalHost, ExternalPort, actor.Id), ) } diff --git a/go.mod b/containers/aggregator/go.mod similarity index 100% rename from go.mod rename to containers/aggregator/go.mod diff --git a/go.sum b/containers/aggregator/go.sum similarity index 100% rename from go.sum rename to containers/aggregator/go.sum diff --git a/main.go b/containers/aggregator/main.go similarity index 54% rename from main.go rename to containers/aggregator/main.go index f9f1207..cf1a484 100644 --- a/main.go +++ b/containers/aggregator/main.go @@ -3,86 +3,116 @@ package main import ( "aggregator/auth" "aggregator/proxy" - "flag" - "fmt" - "github.com/sirupsen/logrus" - "golang.org/x/net/context" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/util/homedir" "net/http" "os" "os/signal" - "path/filepath" "strings" "syscall" + + "github.com/sirupsen/logrus" + "golang.org/x/net/context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" ) +// Network configuration var Protocol = "http" -var Host = "localhost" -var ServerPort = "5000" -var LogLevel = logrus.InfoLevel +var InternalHost string +var InternalPort string +var ExternalHost string +var ExternalPort string + +// OICD +var webId string +var email string +var password string + +// Logging +var LogLevel logrus.Level var Clientset *kubernetes.Clientset func main() { - // Define CLI flags for Solid OIDC configuration - webId := flag.String("webid", "", "WebID for Solid OIDC authentication") - email := flag.String("email", "", "Email for CSS account login") - password := flag.String("password", "", "Password for CSS account login") - logLevelPtr := flag.String("log-level", "info", "Logging verbosity (debug, info, warn, error)") - flag.Parse() - - logLevelValue := strings.ToLower(*logLevelPtr) - parsedLevel, err := logrus.ParseLevel(logLevelValue) + // ------------------------ + // Set up logging + // ------------------------ + LogLevel, err := logrus.ParseLevel(strings.ToLower(os.Getenv("LOG_LEVEL"))) if err != nil { - parsedLevel = logrus.InfoLevel + LogLevel = logrus.InfoLevel } - LogLevel = parsedLevel logrus.SetLevel(LogLevel) logrus.SetOutput(os.Stdout) - config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(homedir.HomeDir(), ".kube", "config")) - if err != nil { - logrus.WithFields(logrus.Fields{"err": err}).Error("Failed to load Kubernetes config") - os.Exit(1) + // ------------------------ + // Read host and port from environment variables + // ------------------------ + InternalHost = os.Getenv("AGGREGATOR_HOST") + InternalPort = os.Getenv("AGGREGATOR_PORT") + + if InternalHost == "" || InternalPort == "" { + logrus.Fatal("Environment variables AGGREGATOR_HOST and AGGREGATOR_PORT must be set") } - Clientset, err = kubernetes.NewForConfig(config) - if err != nil { - logrus.WithFields(logrus.Fields{"err": err}).Error("Failed to create Kubernetes client") - os.Exit(1) + + ExternalHost = os.Getenv("AGGREGATOR_EXTERNAL_HOST") + ExternalPort = os.Getenv("AGGREGATOR_EXTERNAL_PORT") + + if ExternalHost == "" || ExternalPort == "" { + logrus.Fatal("Environment variables AGGREGATOR_EXTERNAL_HOST and AGGREGATOR_EXTERNAL_PORT must be set") } - // Validate and warn about Solid OIDC configuration - if *webId == "" || *email == "" || *password == "" { + // ------------------------ + // Set up OIDC + // ------------------------ + webId = os.Getenv("WEB_ID") + email = os.Getenv("EMAIL") + password = os.Getenv("PASSWORD") + + if webId == "" || email == "" || password == "" { logrus.Warn("โš ๏ธ WARNING: Solid OIDC configuration incomplete") - if *webId == "" { - logrus.Warn("โš ๏ธ Missing --webid argument") + if webId == "" { + logrus.Warn("โš ๏ธ Missing WEB_ID environment variable") } - if *email == "" { - logrus.Warn("โš ๏ธ Missing --email argument") + if email == "" { + logrus.Warn("โš ๏ธ Missing EMAIL environment variable") } - if *password == "" { - logrus.Warn("โš ๏ธ Missing --password argument") + if password == "" { + logrus.Warn("โš ๏ธ Missing PASSWORD environment variable") } logrus.Warn("โš ๏ธ UMA proxy will run WITHOUT authentication - requests will be passed through as-is") } - // Setup proxy with Solid OIDC configuration + // ------------------------ + // Load in-cluster kubeConfig + // ------------------------ + kubeConfig, err := rest.InClusterConfig() + if err != nil { + logrus.Fatalf("Failed to load in-cluster config: %v", err) + } + + Clientset, err = kubernetes.NewForConfig(kubeConfig) + if err != nil { + logrus.Fatalf("Failed to create Kubernetes client: %v", err) + } + + // ------------------------ + // Set up UMA proxy + // ------------------------ proxyConfig := proxy.ProxyConfig{ - WebId: *webId, - Email: *email, - Password: *password, - LogLevel: logLevelValue, + WebId: webId, + Email: email, + Password: password, + LogLevel: LogLevel.String(), } proxy.SetupProxy(Clientset, proxyConfig) + // ------------------------ + // Start HTTP server + // ------------------------ serverMux := http.NewServeMux() - go func() { - logrus.WithFields(logrus.Fields{"port": ServerPort}).Info("Server listening") - if err := http.ListenAndServe(":"+ServerPort, serverMux); err != nil { + logrus.WithFields(logrus.Fields{"port": InternalPort}).Info("Server listening") + if err := http.ListenAndServe(":"+InternalPort, serverMux); err != nil { logrus.WithFields(logrus.Fields{"err": err}).Error("HTTP server failed") os.Exit(1) } @@ -91,7 +121,7 @@ func main() { InitializeKubernetes(serverMux) startConfigurationEndpoint(serverMux) SetupResourceRegistration() - InitAuthProxy(serverMux, fmt.Sprintf("%s://%s:%s", Protocol, Host, ServerPort)) + // InitAuthProxy(serverMux, fmt.Sprintf("%s://%s:%s", Protocol, Host, ServerPort)) stop := make(chan os.Signal, 1) signal.Notify(stop, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGHUP) diff --git a/proxy/proxy.go b/containers/aggregator/proxy/proxy.go similarity index 100% rename from proxy/proxy.go rename to containers/aggregator/proxy/proxy.go diff --git a/resourceRegistration.go b/containers/aggregator/resourceRegistration.go similarity index 98% rename from resourceRegistration.go rename to containers/aggregator/resourceRegistration.go index 062e043..400ffaa 100644 --- a/resourceRegistration.go +++ b/containers/aggregator/resourceRegistration.go @@ -5,15 +5,16 @@ import ( "context" "encoding/json" "fmt" - "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "net" "net/http" "os" "strings" "time" + + "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) const resourceSubscriptionPort = "4449" @@ -147,7 +148,7 @@ func handleResourceRegistration(w http.ResponseWriter, r *http.Request) { response := map[string]interface{}{ "status": "success", "message": fmt.Sprintf("Resource %s successfully", action), - "external_url": fmt.Sprintf("%s://%s:%s/%s%s", Protocol, Host, ServerPort, actorID, registration.Endpoint), + "external_url": fmt.Sprintf("%s://%s:%s/%s%s", Protocol, ExternalHost, ExternalPort, actorID, registration.Endpoint), "actor_id": actorID, } @@ -315,7 +316,7 @@ func registerResourceWithUMA(actorID string, registration *ResourceRegistration) } // Create the resource ID - this should match the external URL pattern - resourceID := fmt.Sprintf("%s://%s:%s/%s%s", Protocol, Host, ServerPort, actorID, registration.Endpoint) + resourceID := fmt.Sprintf("%s://%s:%s/%s%s", Protocol, ExternalHost, ExternalPort, actorID, registration.Endpoint) // Register the resource with UMA if err := auth.CreateResource(resourceID, resourceScopes); err != nil { diff --git a/k8s/aggregator/aggregator-config.yaml b/k8s/aggregator/aggregator-config.yaml new file mode 100644 index 0000000..047c46b --- /dev/null +++ b/k8s/aggregator/aggregator-config.yaml @@ -0,0 +1,16 @@ +# ------------------------ +# ๐Ÿ”น ConfigMap +# ------------------------ +apiVersion: v1 +kind: ConfigMap +metadata: + name: aggregator-config + namespace: aggregator-ns +data: + # Comment AS_ISSUER out if you do not need UMA Authorization + aggregator_external_host: "$MINIKUBE-IP" # Set automatically during minikube-deploy + # as_issuer: "$UMA-IP" # Replace with your auth server if needed + web_id: "$WEB-ID" # Replace with your WebID + email: "$EMAIL" # Replace with your email + password: "$PASSWORD" # Replace with your password + log_level: "info" # Set log level (debug, info, warn, error) diff --git a/k8s/aggregator/aggregator-ns.yaml b/k8s/aggregator/aggregator-ns.yaml new file mode 100644 index 0000000..aabfe34 --- /dev/null +++ b/k8s/aggregator/aggregator-ns.yaml @@ -0,0 +1,7 @@ +# ------------------------ +# 1๏ธโƒฃ Namespace +# ------------------------ +apiVersion: v1 +kind: Namespace +metadata: + name: aggregator-ns # All aggregator resources will live in this dedicated namespace \ No newline at end of file diff --git a/k8s/aggregator/aggregator.yaml b/k8s/aggregator/aggregator.yaml new file mode 100644 index 0000000..00c5faf --- /dev/null +++ b/k8s/aggregator/aggregator.yaml @@ -0,0 +1,142 @@ +# ------------------------ +# 2๏ธโƒฃ Service Account +# ------------------------ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: aggregator-sa + namespace: aggregator-ns +# ServiceAccount is used to give the aggregator Pod credentials to access the Kubernetes API +--- +# ------------------------ +# 3๏ธโƒฃ Role +# ------------------------ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: aggregator-role + namespace: aggregator-ns +rules: + - apiGroups: [""] # "" indicates the core API group + resources: ["pods", "services"] # The aggregator can manage Pods and Services + verbs: ["get", "list", "create", "delete", "watch"] # Allowed actions on these resources +--- +# ------------------------ +# 4๏ธโƒฃ RoleBinding +# ------------------------ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: aggregator-rolebinding + namespace: aggregator-ns +subjects: + - kind: ServiceAccount + name: aggregator-sa + namespace: aggregator-ns +roleRef: + kind: Role + name: aggregator-role + apiGroup: rbac.authorization.k8s.io +# RoleBinding attaches the Role to the aggregator ServiceAccount so it has permissions +--- +# ------------------------ +# 5๏ธโƒฃ Deployment +# ------------------------ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: aggregator + namespace: aggregator-ns +spec: + replicas: 1 # Only one aggregator Pod for now + selector: + matchLabels: + app: aggregator + template: + metadata: + labels: + app: aggregator + spec: + serviceAccountName: aggregator-sa # Use the ServiceAccount with RBAC permissions + containers: + - name: aggregator + image: aggregator + imagePullPolicy: Never + ports: + - containerPort: 5000 # Aggregator HTTP server listens here + env: + # ------------------------ + # Log level configuration + # ------------------------ + - name: LOG_LEVEL + valueFrom: + configMapKeyRef: + name: aggregator-config + key: log_level + # ------------------------ + # WebID and credentials + # ------------------------ + - name: WEB_ID + valueFrom: + configMapKeyRef: + name: aggregator-config + key: web_id + - name: EMAIL + valueFrom: + configMapKeyRef: + name: aggregator-config + key: email + - name: PASSWORD + valueFrom: + configMapKeyRef: + name: aggregator-config + key: password + # ------------------------ + # In-cluster configuration + # ------------------------ + - name: AGGREGATOR_HOST + value: "aggregator.aggregator-ns.svc.cluster.local" # Internal DNS hostname for other Pods + - name: AGGREGATOR_PORT + value: "5000" # Internal port for in-cluster access + # ------------------------ + # External configuration + # ------------------------ + - name: AGGREGATOR_EXTERNAL_HOST + valueFrom: + configMapKeyRef: + name: aggregator-config + key: aggregator_external_host + - name: AGGREGATOR_EXTERNAL_PORT + value: "30500" # NodePort exposed externally + # ------------------------ + # Authorization configuration + # ------------------------ + - name: AS_ISSUER + valueFrom: + configMapKeyRef: + name: aggregator-config + key: as_issuer + optional: true + # ------------------------ + # Transformation configuration + # ------------------------ + - name: TRANSFORMATION + value: "comunica" +--- +# ------------------------ +# 6๏ธโƒฃ Service +# ------------------------ +apiVersion: v1 +kind: Service +metadata: + name: aggregator + namespace: aggregator-ns +spec: + type: NodePort # Exposes the service externally on a specific port + selector: + app: aggregator # Targets Pods with this label + ports: + - port: 5000 # Internal port inside cluster + targetPort: 5000 # Container port + nodePort: 30500 # External port accessible via Minikube IP +# Clients outside Minikube can reach the aggregator via http://:30500 diff --git a/k8s/uma/uma-proxy-ns.yaml b/k8s/uma/uma-proxy-ns.yaml new file mode 100644 index 0000000..20d29c3 --- /dev/null +++ b/k8s/uma/uma-proxy-ns.yaml @@ -0,0 +1,7 @@ +# ------------------------ +# 1๏ธโƒฃ Namespace +# ------------------------ +apiVersion: v1 +kind: Namespace +metadata: + name: uma-proxy-ns \ No newline at end of file diff --git a/k8s/uma/uma-proxy-secret.yaml b/k8s/uma/uma-proxy-secret.yaml new file mode 100644 index 0000000..261ca1d --- /dev/null +++ b/k8s/uma/uma-proxy-secret.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: uma-proxy-key-pair + namespace: uma-proxy-ns +type: Opaque +stringData: + # These will be populated dynamically by the Makefile + uma-proxy.crt: PLACEHOLDER_CERT + uma-proxy.key: PLACEHOLDER_KEY diff --git a/k8s/uma/uma-proxy.yaml b/k8s/uma/uma-proxy.yaml new file mode 100644 index 0000000..6f872b1 --- /dev/null +++ b/k8s/uma/uma-proxy.yaml @@ -0,0 +1,62 @@ + +# ------------------------ +# 2๏ธโƒฃ Service Account (optional, use default if not needed) +# ------------------------ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: uma-proxy-sa + namespace: uma-proxy-ns +--- +# ------------------------ +# 3๏ธโƒฃ Pod +# ------------------------ +apiVersion: v1 +kind: Pod +metadata: + name: uma-proxy + namespace: uma-proxy-ns + labels: + app: uma-proxy +spec: + serviceAccountName: uma-proxy-sa + containers: + - name: uma-proxy + image: uma-proxy + imagePullPolicy: Never + ports: + - containerPort: 8080 + - containerPort: 8443 + volumeMounts: + - name: key-pair + mountPath: /key-pair + readOnly: true + env: + - name: CERT_PATH + value: "/key-pair/uma-proxy.crt" + - name: KEY_PATH + value: "/key-pair/uma-proxy.key" + volumes: + - name: key-pair + secret: + secretName: uma-proxy-key-pair +--- +# ------------------------ +# 4๏ธโƒฃ Service +# ------------------------ +apiVersion: v1 +kind: Service +metadata: + name: uma-proxy + namespace: uma-proxy-ns +spec: + type: ClusterIP + selector: + app: uma-proxy + ports: + - name: http + port: 8080 + targetPort: 8080 + - name: https + port: 8443 + targetPort: 8443 \ No newline at end of file diff --git a/makefile b/makefile index ffedf03..252a80c 100644 --- a/makefile +++ b/makefile @@ -1,71 +1,112 @@ -# Declare phony targets so make always runs these commands -.PHONY: minikube-init minikube-start minikube-clean minikube-dashboard-start containers-all containers-build containers-load run minikube-generate-key-pair +.PHONY: minikube-init minikube-start minikube-stop minikube-dashboard-start \ + containers-build containers-load containers-all \ + minikube-generate-key-pair \ + enable-localhost disable-localhost \ + minikube-deploy \ + minikube-clean \ + expose-aggregator close-aggregator -# 'init-minikube' target: start minikube, build images, then load them into minikube -minikube-init: minikube-start containers-build containers-load minikube-generate-key-pair minikube-dashboard-start +# ------------------------ +# Minikube targets +# ------------------------ -# deploy minikube dashboard -minikube-dashboard-start: - @echo "๐Ÿš€ Starting kubectl proxy for Minikube dashboard..." - @minikube dashboard +# Initialize Minikube, build/load containers, generate keys, deploy YAML manifests, start dashboard +minikube-init: minikube-start containers-all minikube-generate-key-pair minikube-dashboard-start -# Set up key pair for uma-proxy -minikube-generate-key-pair: - @echo "๐Ÿ”‘ Generating key pair for uma-proxy..." - @openssl genrsa -out uma-proxy.key 4096 - @openssl req -x509 -new -nodes -key uma-proxy.key -sha256 -days 3650 -out uma-proxy.crt -subj "/CN=Aggregator MITM CA" - @echo "๐Ÿ—‘๏ธ Deleting existing Kubernetes secret for uma-proxy key pair if it exists..." - @kubectl delete secret uma-proxy-key-pair -n default --ignore-not-found - @echo "๐Ÿ” Creating Kubernetes secret for uma-proxy key pair..." - @kubectl create secret generic uma-proxy-key-pair --from-file=uma-proxy.crt=uma-proxy.crt --from-file=uma-proxy.key=uma-proxy.key -n default - @echo "๐Ÿ—‘๏ธ Cleaning up generated key pair files..." - @rm uma-proxy.crt uma-proxy.key - -# Start minikube with Docker driver +# Start Minikube with Docker driver minikube-start: @echo "๐Ÿš€ Starting Minikube with Docker driver..." @minikube start --driver=docker -# Stop and delete the minikube cluster (clean up) -minikube-clean: - @echo "๐Ÿงน Stopping and deleting Minikube cluster..." +# Stop and delete the Minikube cluster (clean up) +minikube-stop: + @echo "๐Ÿงน Stopping and deleting Minikube..." @minikube stop @minikube delete -# Build and load Docker images for all containers or a specific container -containers-all: containers-build containers-load +# Deploy Minikube dashboard +minikube-dashboard-start: + @echo "๐Ÿš€ Starting kubectl proxy for Minikube dashboard..." + @minikube dashboard -# Build Docker images for a specific container or all containers +# ------------------------ +# Container targets +# ------------------------ + +# Build Docker images containers-build: - @if [ -n "$(name)" ]; then \ - echo "๐Ÿ“ฆ Building image for container: $(name)"; \ - docker build containers/$(name) -t $(name); \ - else \ - echo "๐Ÿ”จ Building Docker images for all containers..."; \ - for dir in containers/*; do \ - if [ -d "$$dir" ]; then \ - echo "๐Ÿ“ฆ Building image for container: $$(basename $$dir)"; \ - docker build $$dir -t $$(basename $$dir); \ - fi; \ - done; \ - fi + @echo "๐Ÿ”จ Building Docker images for containers..." + @for dir in containers/*; do \ + if [ -d "$$dir" ]; then \ + name=$$(basename $$dir); \ + echo "๐Ÿ“ฆ Building $$name..."; \ + docker build "$$dir" -t "$$name:latest"; \ + fi \ + done -# Load Docker images for a specific container or all containers into Minikube +# Load Docker images into Minikube containers-load: - @if [ -n "$(name)" ]; then \ - echo "๐Ÿ“ฅ Loading image: $(name) into Minikube"; \ - minikube image load $(name); \ - else \ - echo "๐Ÿ“ค Loading Docker images into Minikube..."; \ - for dir in containers/*; do \ - if [ -d "$$dir" ]; then \ - echo "๐Ÿ“ฅ Loading image: $$(basename $$dir) into Minikube"; \ - minikube image load $$(basename $$dir); \ - fi; \ - done; \ - fi + @echo "๐Ÿ“ค Loading container images into Minikube..." + @for dir in containers/*; do \ + if [ -d "$$dir" ]; then \ + name=$$(basename $$dir); \ + echo "๐Ÿ“ฅ Loading $$name into Minikube..."; \ + minikube image load "$$name:latest"; \ + fi \ + done + +# Build and load all containers +containers-all: containers-build containers-load + +# ------------------------ +# Deploy YAML manifests with temporary key pair for uma-proxy +# ------------------------ + +minikube-deploy: + @echo "๐Ÿ”‘ Generating temporary key pair for uma-proxy..." + @openssl genrsa -out uma-proxy.key 4096 + @openssl req -x509 -new -nodes -key uma-proxy.key -sha256 -days 3650 -out uma-proxy.crt -subj "/CN=Aggregator MITM CA" + @echo "๐Ÿ“„ Applying namespaces..." + @kubectl apply -f k8s/uma-proxy-ns.yaml + @kubectl apply -f k8s/aggregator-ns.yaml + @echo "๐Ÿ” Creating Kubernetes secret for uma-proxy..." + @kubectl create secret generic uma-proxy-key-pair \ + --from-file=uma-proxy.crt=uma-proxy.crt \ + --from-file=uma-proxy.key=uma-proxy.key \ + -n uma-proxy-ns --dry-run=client -o yaml | kubectl apply -f - + @echo "๐Ÿ“„ Applying resources..." + @export MINIKUBE_IP=$$(minikube ip); \ + envsubst < k8s/aggregator-config.yaml | kubectl apply -f -; \ + kubectl apply -f k8s/uma-proxy.yaml; \ + kubectl apply -f k8s/aggregator.yaml + @echo "๐Ÿ—‘๏ธ Cleaning up generated key pair files..." + @rm uma-proxy.crt uma-proxy.key + @echo "โœ… Resources deployed to Minikube" + +# ------------------------ +# Cleanup Minikube deployment +# ------------------------ + +minikube-clean: + @echo "๐Ÿงน Deleting aggregator actor pods and services in aggregator-ns..." + @kubectl delete pods,services -n aggregator-ns --all --ignore-not-found + @echo "๐Ÿงน Deleting aggregator deployment, service account, role, rolebinding..." + @kubectl delete deployment,serviceaccount,role,rolebinding -n aggregator-ns --all --ignore-not-found + @echo "๐Ÿงน Deleting aggregator namespace..." + @kubectl delete namespace aggregator-ns --ignore-not-found + @echo "๐Ÿงน Deleting uma-proxy pod and service in uma-proxy-ns..." + @kubectl delete pods,services -n uma-proxy-ns --all --ignore-not-found + @echo "๐Ÿงน Deleting uma-proxy service account..." + @kubectl delete serviceaccount -n uma-proxy-ns uma-proxy-sa --ignore-not-found + @echo "๐Ÿงน Deleting uma-proxy namespace..." + @kubectl delete namespace uma-proxy-ns --ignore-not-found + @echo "โœ… Cleanup complete." + +# ------------------------ +# Aggregator port-forward for WSL +# ------------------------ -# Run the Go application -run: - @echo "๐Ÿƒ Running the Go application..." - @go run . +# Start port-forward +expose-aggregator: + @echo "๐Ÿš€ Port-forwarding aggregator to localhost:5000..." + @kubectl port-forward -n aggregator-ns deployment/aggregator 5000:5000 From 56cc2a9ee2681870c2b0cb358f29010c5a69de98 Mon Sep 17 00:00:00 2001 From: Pol Nachtergaele Date: Mon, 27 Oct 2025 15:18:20 +0100 Subject: [PATCH 2/3] add dockerfile --- containers/aggregator/Dockerfile | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 containers/aggregator/Dockerfile diff --git a/containers/aggregator/Dockerfile b/containers/aggregator/Dockerfile new file mode 100644 index 0000000..ab68cc1 --- /dev/null +++ b/containers/aggregator/Dockerfile @@ -0,0 +1,26 @@ +# ------------------------ +# Stage 1: Build Go aggregator +# ------------------------ +FROM golang:1.23-alpine AS build + +WORKDIR /app +COPY go.mod go.sum ./ +RUN go mod download + +COPY . . +RUN go build -o aggregator . + +# ------------------------ +# Stage 2: Minimal runtime +# ------------------------ +FROM alpine:latest + +WORKDIR /root/ +# Copy the compiled binary from build stage +COPY --from=build /app/aggregator . + +# Expose internal port +EXPOSE 5000 + +# Run the aggregator +CMD ["./aggregator"] From 724c7695bc3de160249bd0215e6d5448eaa131da Mon Sep 17 00:00:00 2001 From: Pol Nachtergaele Date: Tue, 28 Oct 2025 12:08:37 +0100 Subject: [PATCH 3/3] uma bugfix --- containers/aggregator/main.go | 31 ++++++++-------- makefile | 70 +++++++++++++++++++++++------------ uma-proxy.crt | 30 +++++++++++++++ uma-proxy.key | 52 ++++++++++++++++++++++++++ 4 files changed, 145 insertions(+), 38 deletions(-) create mode 100644 uma-proxy.crt create mode 100644 uma-proxy.key diff --git a/containers/aggregator/main.go b/containers/aggregator/main.go index cf1a484..73f9737 100644 --- a/containers/aggregator/main.go +++ b/containers/aggregator/main.go @@ -2,7 +2,6 @@ package main import ( "aggregator/auth" - "aggregator/proxy" "net/http" "os" "os/signal" @@ -98,13 +97,14 @@ func main() { // ------------------------ // Set up UMA proxy // ------------------------ - proxyConfig := proxy.ProxyConfig{ + /* proxyConfig := proxy.ProxyConfig{ WebId: webId, Email: email, Password: password, LogLevel: LogLevel.String(), } proxy.SetupProxy(Clientset, proxyConfig) + */ // ------------------------ // Start HTTP server @@ -128,42 +128,43 @@ func main() { <-stop // wait for signal logrus.Info("Shutting down gracefully...") - // remove all pods (including proxy?) + + // ------------------------ + // 2. Remove remaining pods + // ------------------------ pods, err := Clientset.CoreV1().Pods("default").List(context.Background(), metav1.ListOptions{}) if err != nil { - logrus.WithFields(logrus.Fields{"err": err}).Error("Failed to list pods during shutdown") - os.Exit(1) + logrus.Fatal(err) } - for _, pod := range pods.Items { err := Clientset.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}) if err != nil { - logrus.WithFields(logrus.Fields{"namespace": pod.Namespace, "name": pod.Name, "err": err}).Error("Failed to delete pod") + logrus.Errorf("Failed to delete pod %s/%s: %v", pod.Namespace, pod.Name, err) } else { - logrus.WithFields(logrus.Fields{"namespace": pod.Namespace, "name": pod.Name}).Info("Deleted pod") + logrus.Infof("Deleted pod: %s/%s", pod.Namespace, pod.Name) } } + // ------------------------ + // 3. Remove remaining services + // ------------------------ services, err := Clientset.CoreV1().Services("default").List(context.Background(), metav1.ListOptions{}) if err != nil { - logrus.WithFields(logrus.Fields{"err": err}).Error("Failed to list services during shutdown") - os.Exit(1) + logrus.Fatal(err) } - for _, svc := range services.Items { - // Skip the critical Kubernetes API service if svc.Name == "kubernetes" { continue } err := Clientset.CoreV1().Services(svc.Namespace).Delete(context.Background(), svc.Name, metav1.DeleteOptions{}) if err != nil { - logrus.WithFields(logrus.Fields{"namespace": svc.Namespace, "name": svc.Name, "err": err}).Error("Failed to delete service") + logrus.Errorf("Failed to delete service %s/%s: %v", svc.Namespace, svc.Name, err) } else { - logrus.WithFields(logrus.Fields{"namespace": svc.Namespace, "name": svc.Name}).Info("Deleted service") + logrus.Infof("Deleted service: %s/%s", svc.Namespace, svc.Name) } } - logrus.Info("Cleanup complete. Exiting.") + logrus.Infof("Cleanup complete. Exiting.") // let AS know that all resources need to be deleted auth.DeleteAllResources() diff --git a/makefile b/makefile index 252a80c..9e10dc7 100644 --- a/makefile +++ b/makefile @@ -1,4 +1,4 @@ -.PHONY: minikube-init minikube-start minikube-stop minikube-dashboard-start \ +.PHONY: minikube-init minikube-init-wsl minikube-start minikube-start-wsl minikube-stop minikube-dashboard-start \ containers-build containers-load containers-all \ minikube-generate-key-pair \ enable-localhost disable-localhost \ @@ -12,12 +12,17 @@ # Initialize Minikube, build/load containers, generate keys, deploy YAML manifests, start dashboard minikube-init: minikube-start containers-all minikube-generate-key-pair minikube-dashboard-start +minikube-init-wsl: minikube-start-wsl containers-all minikube-generate-key-pair minikube-dashboard-start # Start Minikube with Docker driver minikube-start: @echo "๐Ÿš€ Starting Minikube with Docker driver..." @minikube start --driver=docker +minikube-start-wsl: + @echo "๐Ÿš€ Starting Minikube with Docker driver..." + @minikube start --driver=docker --ports=127.0.0.1:30500:30500 + # Stop and delete the Minikube cluster (clean up) minikube-stop: @echo "๐Ÿงน Stopping and deleting Minikube..." @@ -36,24 +41,41 @@ minikube-dashboard-start: # Build Docker images containers-build: @echo "๐Ÿ”จ Building Docker images for containers..." - @for dir in containers/*; do \ + @if [ -n "$(CONTAINER)" ]; then \ + dir="containers/$(CONTAINER)"; \ if [ -d "$$dir" ]; then \ - name=$$(basename $$dir); \ - echo "๐Ÿ“ฆ Building $$name..."; \ - docker build "$$dir" -t "$$name:latest"; \ + echo "๐Ÿ“ฆ Building $(CONTAINER)..."; \ + docker build "$$dir" -t "$(CONTAINER):latest"; \ + else \ + echo "โŒ Container $(CONTAINER) does not exist!"; \ + exit 1; \ fi \ - done + else \ + for dir in containers/*; do \ + if [ -d "$$dir" ]; then \ + name=$$(basename $$dir); \ + echo "๐Ÿ“ฆ Building $$name..."; \ + docker build "$$dir" -t "$$name:latest"; \ + fi \ + done \ + fi # Load Docker images into Minikube containers-load: @echo "๐Ÿ“ค Loading container images into Minikube..." - @for dir in containers/*; do \ - if [ -d "$$dir" ]; then \ - name=$$(basename $$dir); \ - echo "๐Ÿ“ฅ Loading $$name into Minikube..."; \ - minikube image load "$$name:latest"; \ - fi \ - done + @if [ -n "$(CONTAINER)" ]; then \ + name="$(CONTAINER)"; \ + echo "๐Ÿ“ฅ Loading $$name into Minikube..."; \ + minikube image load "$$name:latest"; \ + else \ + for dir in containers/*; do \ + if [ -d "$$dir" ]; then \ + name=$$(basename $$dir); \ + echo "๐Ÿ“ฅ Loading $$name into Minikube..."; \ + minikube image load "$$name:latest"; \ + fi \ + done \ + fi # Build and load all containers containers-all: containers-build containers-load @@ -63,6 +85,18 @@ containers-all: containers-build containers-load # ------------------------ minikube-deploy: + @echo "๐Ÿ”‘ Generating temporary key pair for uma-proxy..." + @openssl genrsa -out uma-proxy.key 4096 + @openssl req -x509 -new -nodes -key uma-proxy.key -sha256 -days 3650 -out uma-proxy.crt -subj "/CN=Aggregator MITM CA" + @echo "๐Ÿ“„ Applying namespaces..." + @kubectl apply -f k8s/aggregator/aggregator-ns.yaml + @echo "๐Ÿ“„ Applying resources..." + @export MINIKUBE_IP=$$(minikube ip); \ + envsubst < k8s/aggregator/aggregator-config.yaml | kubectl apply -f -; \ + kubectl apply -f k8s/aggregator/aggregator.yaml + @echo "โœ… Resources deployed to Minikube" + +minikube-deploy-uma: @echo "๐Ÿ”‘ Generating temporary key pair for uma-proxy..." @openssl genrsa -out uma-proxy.key 4096 @openssl req -x509 -new -nodes -key uma-proxy.key -sha256 -days 3650 -out uma-proxy.crt -subj "/CN=Aggregator MITM CA" @@ -82,7 +116,6 @@ minikube-deploy: @echo "๐Ÿ—‘๏ธ Cleaning up generated key pair files..." @rm uma-proxy.crt uma-proxy.key @echo "โœ… Resources deployed to Minikube" - # ------------------------ # Cleanup Minikube deployment # ------------------------ @@ -101,12 +134,3 @@ minikube-clean: @echo "๐Ÿงน Deleting uma-proxy namespace..." @kubectl delete namespace uma-proxy-ns --ignore-not-found @echo "โœ… Cleanup complete." - -# ------------------------ -# Aggregator port-forward for WSL -# ------------------------ - -# Start port-forward -expose-aggregator: - @echo "๐Ÿš€ Port-forwarding aggregator to localhost:5000..." - @kubectl port-forward -n aggregator-ns deployment/aggregator 5000:5000 diff --git a/uma-proxy.crt b/uma-proxy.crt new file mode 100644 index 0000000..8656bac --- /dev/null +++ b/uma-proxy.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFGzCCAwOgAwIBAgIUSfGbJeAp/V3EKgri2ELeVgD76KkwDQYJKoZIhvcNAQEL +BQAwHTEbMBkGA1UEAwwSQWdncmVnYXRvciBNSVRNIENBMB4XDTI1MTAyNzE1MzAy +OFoXDTM1MTAyNTE1MzAyOFowHTEbMBkGA1UEAwwSQWdncmVnYXRvciBNSVRNIENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtIzXFl54pLjkAEWZU/jh +oRW1AZ3b0Tvvo7F/86tLXR9b57lSHixIECzqLQk22wg8sYwi9tJvVpn9rq4OVnRq +1ZkUyupfIoeU99KCku9cY7O927YGrWj5klzfKl96nSK+RK8DjkEKjx0vJD1HPDSz +nKFN+TWCNgiyGkgeBUd/VQIBu3LXOdgCVErVndNgO7v8owBHanSe2/rV/2ICPGG2 +xmOjc5BNQzpm8K28MD5Bigi4EUxQjuIO4EY18CkBHCTr4gfxbodpPJIAKPQ6Xd/9 +dbYq3F5mTI2U7XLLOBWusb3HItSZKKnKDGdBu9cLgOJq5bmkFhGpZz28twcqfHr6 +sE8hKitsafP/YqRm5kzSDQ0sKLCv/Id5AJJQQ3/fXR5cJ+BvVn0WQy+GsOCW8tDv +gJGytMykb5ysF5cU8h8MqpTtW2WYZPdXtPNzTlP+LkM6iOnI7qRqebC+uCVZERVA +lsnYDaXogaNa68/OCgD+lHey2hatla9TaPkgYHQ7MBhqIm+pULa+RwEjKOKnGlNY +HgrQMIf9X08O6cbdwTAtOliJF+4rTlqWJClXE0oYJnZwgpUw+5dzKMHmm2nvY7M4 +tPzqUqKtETnWxdCF7J7prdI4hwMVU+y+EgkPK889QB6fZ7w2o29VXJlzW5XxuJNF +Q9QS0qKbWI33eEccVtBTQZMCAwEAAaNTMFEwHQYDVR0OBBYEFIvAUwgu8/3buMLN +azUC07gW7MMHMB8GA1UdIwQYMBaAFIvAUwgu8/3buMLNazUC07gW7MMHMA8GA1Ud +EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAD1q62eI7zYZzWyq8lPvxko9 +0vk48LBK9qoZwyOyc9INOTaaRiWQgpP3neqwuknjUMx+Qpi9oASLD7V9fx4YU5vg +kfifX514ZYe3kK75t2g8MmJqnqwGmTZqBL4z39AG363+OPIoCiDb4uvnp77NVnrQ +89YK9HrQtPuE6lMQfW16AkIbXjQ8EZI044tIHNNQ+mdNucN0EgnXAbgJxflsT8lV +92wlmlZNOMDQM4sLZOs7fg3A3B9CwRQc+ACunvkCOiH4N7RV8nWFfR8ogtJC7LP9 +x5aMfvP0K7Wlpf+8vCy/TxP8FBVBYgNHr6jhEIe/Uq7ydWpfngzEyjJDjEkqWTAg +OC/pY/THiGAxYA+X/rWJhx+pEI9Nhbg4xyBo4B7yR8D8IvvVHYjb2SMM2DJQ/TRB +5eqojd+sz0Xcmr5XwZR1IhzfE2vUmVh7IOVmHSLzlaKUbxGwPzku7uHble4nZ9Ft +5ty0Du0YlRY1rcXeOnOAkIK8w/fo+nuy/ty1Xs7gMyqq1jYJvZ45XtScg4DhoVrU +AtbqEV8BuUynbAZCwdwMRRrDpkuf6aFF/uVdQmauRA2xdCgmEqZBMwft5YTXGZte +rs91JfwkU4eDFbLsBnhKSdHEbQ0WudXpzqsy8BeOIIGhT7UFg/C6VxxGPakIEd6s +xuuTj3t4+ZXATr5jDt8m +-----END CERTIFICATE----- diff --git a/uma-proxy.key b/uma-proxy.key new file mode 100644 index 0000000..21b003f --- /dev/null +++ b/uma-proxy.key @@ -0,0 +1,52 @@ +-----BEGIN PRIVATE KEY----- +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC0jNcWXnikuOQA +RZlT+OGhFbUBndvRO++jsX/zq0tdH1vnuVIeLEgQLOotCTbbCDyxjCL20m9Wmf2u +rg5WdGrVmRTK6l8ih5T30oKS71xjs73btgataPmSXN8qX3qdIr5ErwOOQQqPHS8k +PUc8NLOcoU35NYI2CLIaSB4FR39VAgG7ctc52AJUStWd02A7u/yjAEdqdJ7b+tX/ +YgI8YbbGY6NzkE1DOmbwrbwwPkGKCLgRTFCO4g7gRjXwKQEcJOviB/Fuh2k8kgAo +9Dpd3/11tircXmZMjZTtcss4Fa6xvcci1JkoqcoMZ0G71wuA4mrluaQWEalnPby3 +Byp8evqwTyEqK2xp8/9ipGbmTNINDSwosK/8h3kAklBDf99dHlwn4G9WfRZDL4aw +4Jby0O+AkbK0zKRvnKwXlxTyHwyqlO1bZZhk91e083NOU/4uQzqI6cjupGp5sL64 +JVkRFUCWydgNpeiBo1rrz84KAP6Ud7LaFq2Vr1No+SBgdDswGGoib6lQtr5HASMo +4qcaU1geCtAwh/1fTw7pxt3BMC06WIkX7itOWpYkKVcTShgmdnCClTD7l3Moweab +ae9jszi0/OpSoq0ROdbF0IXsnumt0jiHAxVT7L4SCQ8rzz1AHp9nvDajb1VcmXNb +lfG4k0VD1BLSoptYjfd4RxxW0FNBkwIDAQABAoICAEFdyDVKCKWiZrGOgR98uLto +L3shWscayvzZs3hVqNhTFSQE3S0dXQ22JSYe8uRDzC1/jd0MfNC+ubB3RauZRGCG +ppd8WOsYaR978FggHrPeDDuyYCKCIfESfJ1Lqgmf3YDiWAg1lgwQRdP594NdlTV8 +1M0HyVekuE55k5zXsMrnCheCUVeY0+cOnN4h+/JEcgfVfwWUqTr9SmLGagbQXarf +wfqB4OcZCtHSJJLWx0WhOHrSxUulnjsmUDrdrZAx8dCIjak1jy0lsrchDMyV3p6y +UDwuVR0cGTIKYHayquegzKrYi4bESelJrOvDxF9qGceH+VcgF9PP7zXYe1JL99wm +5aUs5QrS7dIPi7Izqw79KobVIZ09StEBkBGdrjvgKRcLS+5i/bnUc+gu9sGGOSOj +qUzLXGaoGJY4CNDHMQ4zq1duZpQmhxrZnsUWacQ4ekRJFcRKN+8Zlg/JTMaryIE4 +NUWsj/29vRCK6cGNeoOTN3kpovEM6d3cP3CEMQYnHJ6FHFEwYBl4TBZHXxYngzFc +Ql3ujqJmSdozIc2hdOjCEfJYB947wPlSYCrP/Rf0PJxuHyZs0u7SnEi1rH3zv1Xl +ms+tgGgyPlTOgUiB3lf6hcUXDeJ47qoI48WqU0lxivayIATCs0UEBcZvdHK9oQfv +1yy+WcNpn6dnBRZqVQzxAoIBAQDpgUQZTQHb9rF9rB6Bc4AIkp0kgWX6CP4H/u+j +vRjL+fPXBUcdpz3wUmAd5XKpTyMfj2b7YPGxVPxmovu4LHknUZsLQMFJ0WlfrUop +3/CboOV37rofquoy0ZW9IY5kDeLfaCmeJ3+sAiThYhNsB4eoNvTbRkBcgBUu12GM +BAHcrtnEXQnqMaEIzNQ6YVcIW73ydOVnT3+nG76ZKn8N7X429uLde9v9n+3gQl3D +Ew1hCJdgZAiCPpeGntjNHYUmVeOrrc2Cm4phOrxJSwwzcmDffNQS1Vm/7COXyd4W +ujF+x1bElunT4WcM6nDreCOfwWPnVyWEcESc28nCLKssyZdbAoIBAQDF8Zhhndm/ +67j6/fU8REsMltOBeV2K1jFtSxxntQSrwKbEXMge1hq0f6wrb/4vmnm6HVfjOdMK +0jLzmnfJyLCE+O1xzyXGdMToLtwnzMuvBJtp46ROwXeAX65wUwudR2hZlYlFjGUy +/h8dAdKpVa7pzHz2TqaJcTuIzRI+5EguK/jn8miP9r37pMCqPjckmq3DXFaFpqqz +2Q2VV9ejv432qjWPzcBU1coClqeQc2uQouWvWd/uk1AUTtAh1E0NgNfNaiq6c/jy +oLrlmpbYYvbW5jaf/OTV67fq7FmOQBnCyVYPxg+cz+YQIqZbE7eQEl05L4FA1wUw +RL7xuy+nQUwpAoIBABiAnQG+p+xKBbP3RLg2w3+iyuSSw32qReXW5ZSb+iclUmYE +wsKk0wYNztI0SoTjrf7Y0Giecp42Gco3Xgykdg4fLRLQN7gkRldo2rhsLFZYWtvr +7BH4oFy2y2yuQb/UCTOCPaO/51DIZNBVt04FuEF5itYbmBNcyVxB85V6sitTGbzW +On0duhUvUTHTFwvkfJkdlRjzKaUCb0ypPe3bNvxTidjN73a8unxm2mnm7Od6vE+8 +D3yDTuksb/mOGHO+XYadVUxYP/wV6r4SJvSbEvlAowxa4PEXuWBrKWffH+R6zGsS +nvTB2Utsqm67C5nu9CnJZkFwenKvSYubnlazzLMCggEBAIPncrvaDHiwBBx9jyYR +/FDvURI+Ze9WvKkwHEWfgvVQ89B2Y4To9/LAAWMQxjnCnss/Mongxpm3aOycSqbU +bYyutMvaVjnhJjXyxBJhjnmagFYXtC4nDhMlr4B6VosAQrEvzMHjxKwuW11hEZh1 +2JFECyG3V5LPOfT3cukmBlN+SeeGydblfcRrDVhlDSn/STueFKoLXkVqpnqWhsQ/ +qcJtujOR0FC4Bl2jNxYzU8KWSqiHI4o/8GUX/+oLJy2QZj4kOUT2czZ7lQ/b7BlW +GVcrIcuLIpLK+Uw7I5J/dKswr4DjYxQEFjM6Y9DdWr1O/4BT/7XNk0+PAA7lxeOW +x8ECggEAP1DCxooscHhVc77Qdnnj+Ar/xXKThEk9nYrMdy//CCysBiEa1ZAQ+bzD +i1VWSR+M0wheH+fWuFqfNApb2zaNA7Ke1klmmygA8RmljMDNuUAiw6kGiTIie8k2 +CIgiQZftbkuWkEebZVibf7IT5F7x+ujhBln/j2jXy8TqY7t8LmN35P1yueA3Zoys +xsm8R/XFA/Pfdg2mZ/D1Ogm//CZMkylS8rX0EWFEP9lcqCbL8DdzANoP7E/lXsq1 +6dbde4so4xbbjHbogkTOSib5VQx+gqoc+XBrnwynAh/B0xW6cGrH5/D+NrRT8XFh +yREkKdb9BR17kFFHyjXZCyo2kpuu5w== +-----END PRIVATE KEY-----