If you are using Traefik in kubernetes but you want to use an AWS ALB (application load balancer) this recipe may work for you. You will note a few important things:
- Traefik relies on the underlying kubernetes provider to create an Ingress. If not specified this will be a loadbalancer CLB (classic load balancer). There is a way to make this a NLB (network load balancer) but the AWS provider is not doing an ALB so Traefik can't do an ALB. This recipe therefore relies on a NodePort service and ties the Ingress (ALB) to the NodePort service via the ingressclass annotation. If you do not like or want to use NodePort this is not for you.
- Yet to confirm can this recipe work if you did not intentionally install the AWS LBC (load balancer controller). And does this work on non AWS EKS or self-managed kubernetes on AWS.
- Still looking at why the AWS Target group health check is not able to use the /ping or /dashboard. This may be an issue with my security groups but for now I just created manually a IngressRoute /<>-health on the Traefik web entrypoint and updated the Target Group health check either programatically or in the AWS console.
- I did not want to complicate this with kubernetes so I am using the simplest way for helm to communicate with the cluster and point to the environment kube config to get to the cluster.
- I did some minimal templating to change the helm release name and corresponding kubernetes objects but for this post I just hard coded for simplicity.
- I commented out deployment as Daemonset for my testing. You need to decide what is better in your environment Deployment or Daemonset.
providers.tf
provider "helm" {
kubernetes {
config_path = "~/.kube/config-eks"
}
}
versions.tf
terraform {
required_providers {
helm = {
source = "hashicorp/helm"
version = ">= 2.0.1"
}
}
required_version = ">= 0.15"
}
helm values
additionalArguments:
- --providers.kubernetescrd.ingressclass=traefik-pub
#deployment:
# kind: DaemonSet
service:
enabled: true
type: NodePort
service:
enabled: true
type: NodePort
extraObjects:
- apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-pub
annotations:
kubernetes.io/ingress.class: alb
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/security-groups: sg-<eks-sg1>,<eks-sg2>
#alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig":
# { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}'
alb.ingress.kubernetes.io/backend-protocol: HTTP
alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-east-1:<aws account id>:certificate/b6ead273-66e9-4768-ad25-0924dca35cdb
alb.ingress.kubernetes.io/healthcheck-path: "/traefik-pub-health"
alb.ingress.kubernetes.io/healthcheck-port: "traffic-port"
#alb.ingress.kubernetes.io/healthcheck-protocol: HTTP
alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]'
spec:
defaultBackend:
service:
name: traefik-pub
port:
number: 80
ingressClass:
enabled: true
isDefaultClass: false
ingressRoute:
dashboard:
enabled: true
# Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)
annotations:
kubernetes.io/ingress.class: traefik-pub
# Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)
labels: {}
entryPoints:
- traefik
labels: {}
matchRule: PathPrefix(`/dashboard`) || PathPrefix(`/api`)
middlewares: []
tls: {}
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
variables.tf (shortened for documentation)
variable "traefik_name" {
description = "helm release name"
type = string
default = "traefik-pub"
}
variable "namespace" {
description = "Namespace to install traefik chart into"
type = string
default = "test"
}
variable "traefik_chart_version" {
description = "Version of Traefik chart to install"
type = string
default = "21.2.1"
}
chart.tf
resource "helm_release" "traefik" {
namespace = var.namespace
create_namespace = true
name = var.traefik_name
repository = "https://traefik.github.io/charts"
chart = "traefik"
version = var.traefik_chart_version
timeout = var.timeout_seconds
values = [
file("values.yml")
]
set {
name = "deployment.replicas"
value = var.replica_count
}
}