I have been trying to setup Argocd with EKS, helm chart and AWS ALB behind a proxy( nginx ingress) and it not working at all. After deployment to eks cluster, if i do port-forawarding, the UI works great, but behing the ALB and nginx, static files and not being loaded. Here's a breakdown of the setup
Argocd
iamRoleArn pulumi.StringInput, argoAdminPassword, argocdDomain string, awsALBControllerRelease, ingressRelease *helmv3.Release) (*helmv3.Release, error) {
// ONLY helps to make sure nginx waits from alb to avoid missing endpoints
argocdNamespace := awsALBControllerRelease.Status.ApplyT(func(status helmv3.ReleaseStatus) string {
if status.Status != "deployed" {
// This will cause the nginx creation to wait until nginx is deployed
return ""
}
// The default service name for nginx ingress controller
return "argocd"
}).(pulumi.StringOutput)
_ = ingressRelease.Status.ApplyT(func(status helmv3.ReleaseStatus) string {
if status.Status != "deployed" {
// This will cause the nginx creation to wait until nginx is deployed
return ""
}
// The default service name for nginx ingress controller
return "argocd"
}).(pulumi.StringOutput)
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(argoAdminPassword), 10)
if err != nil {
return nil, fmt.Errorf("failed to hash admin password: %w", err)
}
logrus.Infof("ArgoCD Domain being set: %s", argocdDomain)
argoRel, err := helmv3.NewRelease(ctx, "argocd", &helmv3.ReleaseArgs{
Chart: pulumi.String("argo-cd"),
Version: pulumi.String("8.3.7"),
Namespace: argocdNamespace,
Name: pulumi.String("argocd"),
CreateNamespace: pulumi.Bool(true),
WaitForJobs: pulumi.Bool(true),
Timeout: pulumi.Int(1140),
RepositoryOpts: helmv3.RepositoryOptsArgs{
Repo: pulumi.String("https://argoproj.github.io/argo-helm"),
},
Values: pulumi.Map{
"global": pulumi.Map{
"domain": pulumi.String(fmt.Sprintf(argocdDomain)),
},
"controller": pulumi.Map{
"serviceAccount": pulumi.Map{
"create": pulumi.Bool(true),
"name": pulumi.String("argocd-sa"),
"annotations": pulumi.StringMap{
"eks.amazonaws.com/role-arn": iamRoleArn,
},
},
},
"configs": pulumi.Map{
"secret": pulumi.Map{
"argocdServerAdminPassword": pulumi.String(string(hashedPassword)),
},
},
"server": pulumi.Map{
// "service": pulumi.Map{
// "port": pulumi.Int(80),
// "targetPort": pulumi.Int(8080),
// },
"config": pulumi.Map{
"tls": pulumi.Map{
"enabled": pulumi.Bool(false),
},
},
"extraArgs": pulumi.StringArray{
pulumi.String("--insecure"),
},
"insecure": pulumi.Bool(true),
"ingress": pulumi.Map{
"enabled": pulumi.Bool(true),
"ingressClassName": pulumi.String("nginx"),
"hostname": pulumi.String(argocdDomain),
"annotations": pulumi.StringMap{
"kubernetes.io/ingress.class": pulumi.String("nginx"),
"nginx.ingress.kubernetes.io/backend-protocol": pulumi.String("HTTP"),
"nginx.ingress.kubernetes.io/force-ssl-redirect": pulumi.String("false"),
"nginx.ingress.kubernetes.io/ssl-redirect": pulumi.String("false"),
"nginx.ingress.kubernetes.io/proxy-body-size": pulumi.String("10m"),
"nginx.ingress.kubernetes.io/proxy-buffer-size": pulumi.String("4k"),
},
"path": pulumi.String("/"),
"pathType": pulumi.String("Prefix"),
},
"resources": pulumi.Map{
"requests": pulumi.Map{
"cpu": pulumi.String("250m"),
"memory": pulumi.String("512Mi"),
},
"limits": pulumi.Map{
"cpu": pulumi.String("500m"),
"memory": pulumi.String("1Gi"),
},
},
},
},
}, pulumi.DependsOn([]pulumi.Resource{awsALBControllerRelease, ingressRelease}))
if err != nil {
return nil, err
}
return argoRel, nil
}
and here's how I set alb with helm chart as well
func InstallALBControllerHelm(ctx *pulumi.Context, vpc *awsx.Vpc, albRole *iam.Role, project utils.Project) (*helmv3.Release, error) {
albController, err := helmv3.NewRelease(ctx, "aws-load-balancer-controller", &helmv3.ReleaseArgs{
Chart: pulumi.String("aws-load-balancer-controller"),
Version: pulumi.String("1.13.3"), // or bump to the latest
// Name: pulumi.String("aws-load-balancer-controller"),
Name: pulumi.String("aws-load-balancer-controller"),
Namespace: pulumi.String("kube-system"),
RepositoryOpts: helmv3.RepositoryOptsArgs{
Repo: pulumi.String("https://aws.github.io/eks-charts"),
},
Values: pulumi.Map{
"installCRDs": pulumi.Bool(true),
"ingressClass": pulumi.String("alb"),
"ingressClassResource": pulumi.Map{
"enabled": pulumi.Bool(true),
"default": pulumi.Bool(false),
"name": pulumi.String("alb"),
},
"clusterName": pulumi.String(project.ProjectName),
"region": pulumi.String(project.Region),
"vpcId": vpc.VpcId,
"serviceAccount": pulumi.Map{
"create": pulumi.Bool(true),
"name": pulumi.String("aws-load-balancer-controller"),
"annotations": pulumi.Map{
"eks.amazonaws.com/role-arn": albRole.Arn,
},
},
"replicaCount": pulumi.Int(1),
"resources": pulumi.Map{
"limits": pulumi.Map{
"cpu": pulumi.String("200m"),
"memory": pulumi.String("500Mi"),
},
"requests": pulumi.Map{
"cpu": pulumi.String("100m"),
"memory": pulumi.String("200Mi"),
},
},
"podSecurityContext": pulumi.Map{
"fsGroup": pulumi.Int(65534),
},
"clusterSecretsPermissions": pulumi.Map{
"allowAllSecrets": pulumi.Bool(true),
},
"securityContext": pulumi.Map{
"allowPrivilegeEscalation": pulumi.Bool(false),
"capabilities": pulumi.Map{
"drop": pulumi.ToStringArray([]string{"ALL"}),
},
"readOnlyRootFilesystem": pulumi.Bool(true),
"runAsNonRoot": pulumi.Bool(true),
"runAsUser": pulumi.Int(65534),
},
"serviceMonitor": pulumi.Map{
"enabled": pulumi.Bool(false),
},
"logLevel": pulumi.String("info"),
},
WaitForJobs: pulumi.Bool(true),
Timeout: pulumi.Int(600),
})
if err != nil {
return nil, err
}
logrus.Info("AWS Load Balancer Controller: Helm release success")
return albController, nil
}
and setting up nginx ingress controller
func SetupNginxIngressController(ctx *pulumi.Context, vpcCidr string, awsALBControllerRelease *helmv3.Release) (*helmv3.Release, error) {
// ONLY helps to make sure nginx waits from alb to avoid missing endpoints
nginxNamespace := awsALBControllerRelease.Status.ApplyT(func(status helmv3.ReleaseStatus) string {
if status.Status != "deployed" {
// This will cause the nginx creation to wait until nginx is deployed
return ""
}
// The default service name for nginx ingress controller
return "nginx"
}).(pulumi.StringOutput)
if err := CreateNamespace(ctx, "nginx"); err != nil {
return nil, fmt.Errorf("failed to create namespace: %s", err)
}
nginx, err := helmv3.NewRelease(ctx, "nginx", &helmv3.ReleaseArgs{
Chart: pulumi.String("ingress-nginx"),
Version: pulumi.String("4.12.0"),
Name: pulumi.String("ingress-nginx-controller"),
Namespace: nginxNamespace,
CreateNamespace: pulumi.Bool(true),
RepositoryOpts: helmv3.RepositoryOptsArgs{
Repo: pulumi.String("https://kubernetes.github.io/ingress-nginx"),
},
Timeout: pulumi.Int(600),
WaitForJobs: pulumi.Bool(true),
Values: pulumi.Map{
"controller": pulumi.Map{
"service": pulumi.Map{
"name": pulumi.String("ingress-nginx-controller"),
"annotations": pulumi.Map{
"service.beta.kubernetes.io/aws-load-balancer-type": pulumi.String("nlb"),
"service.beta.kubernetes.io/aws-load-balancer-internal": pulumi.String("true"),
"service.beta.kubernetes.io/aws-load-balancer-scheme": pulumi.String("internal"),
},
},
"ingressClassResource": pulumi.Map{
"default": pulumi.Bool(false),
"name": pulumi.String("nginx"),
"enabled": pulumi.Bool(true),
"controllerValue": pulumi.String("k8s.io/ingress-nginx"),
},
"config": pulumi.Map{
"use-proxy-protocol": pulumi.String("false"),
"use-forwarded-headers": pulumi.String("true"),
"compute-full-forwarded-for": pulumi.String("true"),
"enable-real-ip": pulumi.String("true"),
"proxy-real-ip-cidr": pulumi.String(vpcCidr),
"forwarded-for-header": pulumi.String("X-Forwarded-For"),
"add-headers": pulumi.String("nginx/security-headers"),
"proxy-buffer-size": pulumi.String("16k"),
"proxy-buffers": pulumi.String("4 64k"),
"proxy-busy-buffers-size": pulumi.String("128k"),
"client-body-buffer-size": pulumi.String("128k"),
"http2-max-field-size": pulumi.String("16k"),
"http2-max-header-size": pulumi.String("32k"),
// Ensure proper SSL handling behind ALB
"ssl-redirect": pulumi.String("false"),
"force-ssl-redirect": pulumi.String("false"),
},
"resources": pulumi.Map{
"requests": pulumi.Map{
"cpu": pulumi.String("100m"),
"memory": pulumi.String("90Mi"),
},
"limits": pulumi.Map{
"cpu": pulumi.String("500m"),
"memory": pulumi.String("500Mi"),
},
},
"metrics": pulumi.Map{
"enabled": pulumi.Bool(true),
"serviceMonitor": pulumi.Map{
"enabled": pulumi.Bool(false),
},
"service": pulumi.Map{
"annotations": pulumi.Map{
"prometheus.io/scrape": pulumi.String("true"),
"prometheus.io/port": pulumi.String("10254"),
"prometheus.io/path": pulumi.String("/metrics"),
},
},
},
},
},
}, pulumi.DependsOn([]pulumi.Resource{awsALBControllerRelease}))
if err != nil {
return nil, err
}
logrus.Info("NGINX Controller: Helm release success")
return nginx, nil
}
and with that, i create an external ingress for ALB.
func CreateExternalALBIngress(ctx *pulumi.Context, baseDomain string, project utils.Project, certificateArn pulumi.StringOutput, nginx *helmv3.Release) (*networkingv1.Ingress, error) {
// Get the nginx service name from the Helm release status
nginxServiceName := nginx.Status.ApplyT(func(status helmv3.ReleaseStatus) string {
if status.Status != "deployed" {
// This will cause the ingress creation to wait until nginx is deployed
return ""
}
// The default service name for nginx ingress controller
return "ingress-nginx-controller-controller"
}).(pulumi.StringOutput)
logrus.WithFields(logrus.Fields{
"BASE_DOMAIN:": baseDomain,
}).Info("CreateExternalALBIngress.info")
ingress, err := networkingv1.NewIngress(ctx, "external-alb-ingress", &networkingv1.IngressArgs{
Metadata: &metav1.ObjectMetaArgs{
Namespace: pulumi.String("nginx"),
Name: pulumi.String(fmt.Sprintf("%s-external-alb", project.ProjectName)),
Annotations: pulumi.StringMap{
"kubernetes.io/ingress.class": pulumi.String("alb"),
"alb.ingress.kubernetes.io/scheme": pulumi.String("internet-facing"),
"alb.ingress.kubernetes.io/target-type": pulumi.String("ip"),
"alb.ingress.kubernetes.io/listen-ports": pulumi.String(`[{"HTTP": 80}, {"HTTPS": 443}]`),
"alb.ingress.kubernetes.io/ssl-redirect": pulumi.String("443"),
"alb.ingress.kubernetes.io/certificate-arn": certificateArn,
"alb.ingress.kubernetes.io/healthcheck-port": pulumi.String("10254"),
"alb.ingress.kubernetes.io/healthcheck-path": pulumi.String("/healthz"),
"alb.ingress.kubernetes.io/tags": pulumi.Sprintf("Environment=%s,ManagedBy=pulumi,ProjectName=%s", project.StackName, project.ProjectName),
"alb.ingress.kubernetes.io/load-balancer-name": pulumi.Sprintf("%s-external-alb", project.ProjectName),
"nginx.ingress.kubernetes.io/ignore": pulumi.String("true"),
"alb.ingress.kubernetes.io/group.name": pulumi.String("external-alb"),
},
},
Spec: &networkingv1.IngressSpecArgs{
Rules: networkingv1.IngressRuleArray{
&networkingv1.IngressRuleArgs{
Host: pulumi.String(fmt.Sprintf("*.%s.%s", project.ProjectName, baseDomain)),
Http: &networkingv1.HTTPIngressRuleValueArgs{
Paths: networkingv1.HTTPIngressPathArray{
&networkingv1.HTTPIngressPathArgs{
Path: pulumi.String("/*"),
PathType: pulumi.String("ImplementationSpecific"),
Backend: &networkingv1.IngressBackendArgs{
Service: &networkingv1.IngressServiceBackendArgs{
Name: nginxServiceName, // Points to Nginx service
Port: &networkingv1.ServiceBackendPortArgs{
Number: pulumi.Int(80),
},
},
},
},
},
},
},
},
},
})
return ingress, err
}
the setup is, traffic --> ALB-->nginx to distribute internal traffic-->service A.
I would appreciate any help on this please. Thank you
helm installcommands? Do you need all of these charts to demonstrate the issue, or could you cut out several of them to create a minimal reproducible example?