skip to Main Content

i am running my spring boot application docker image on Kubernetes using Helm chart.

Below is the details of the same

templates/deployment.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: {{ include "xyz.fullname" . }}
  labels:
    {{- include "xyz.labels" . | nindent 4 }}
spec:
  {{- if not .Values.autoscaling.enabled }}
  replicas: {{ .Values.replicaCount }}
  {{- end }}
  selector:
    matchLabels:
      {{- include "xyz.selectorLabels" . | nindent 6 }}
  template:
    metadata:
      {{- with .Values.podAnnotations }}
      annotations:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      labels:
        {{- include "xyz.selectorLabels" . | nindent 8 }}
    spec:
      {{- with .Values.imagePullSecrets }}
      imagePullSecrets:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      serviceAccountName: {{ include "xyz.serviceAccountName" . }}
      securityContext:
        {{- toYaml .Values.podSecurityContext | nindent 8 }}
      containers:
        - name: {{ .Chart.Name }}
          securityContext:
            {{- toYaml .Values.securityContext | nindent 12 }}
          image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
          imagePullPolicy: {{ .Values.image.pullPolicy }}
          env:
          - name: DB_USER_NAME
            valueFrom:
              secretKeyRef:
                name: customsecret
                key: DB_USER_NAME
          - name: DB_PASSWORD
            valueFrom:
              secretKeyRef:
                name: customsecret
                key: DB_PASSWORD
          - name: DB_URL
            valueFrom:
              secretKeyRef:
                name: customsecret
                key: DB_URL
          - name: TOKEN
            valueFrom:
              secretKeyRef:
                name: customsecret
                key: TOKEN
          ports:
            - name: http
              containerPort: {{ .Values.service.port }}
              protocol: TCP
          livenessProbe:
            httpGet:
              path: {{ .Values.service.liveness }}
              port: http
            initialDelaySeconds: 60
            periodSeconds: 60
          readinessProbe:
            httpGet:
              path: {{ .Values.service.readiness }}
              port: {{ .Values.service.port }}
            initialDelaySeconds: 60
            periodSeconds: 30
          resources:
            {{- toYaml .Values.resources | nindent 12 }}
      {{- with .Values.nodeSelector }}
      nodeSelector:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      {{- with .Values.affinity }}
      affinity:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      {{- with .Values.tolerations }}
      tolerations:
        {{- toYaml . | nindent 8 }}
      {{- end }}


Chart.yaml

apiVersion: v2
name: xyz
description: A Helm chart for Kubernetes

# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application

# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: <APP_VERSION_PLACEHOLDER>


values.yaml

# This is a YAML-formatted file.
# Declare variables to be passed into your templates.

### - If we want 3 intances then we will metion 3 -then 3 pods will be created on server
### - For staging env we usually keep 1
replicaCount: 1

image:
### --->We can also give local Image details also here
### --->We can create image in Docker repository and use that image URL here
  repository: gcr.io/mgcp-109-xyz-operations/projectname
  pullPolicy: IfNotPresent
  # Overrides the image tag whose default is the chart appVersion.
  tag: ""

imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""

serviceAccount:
  # Specifies whether a service account should be created
  create: true
  # Annotations to add to the service account
  annotations: {}
  # The name of the service account to use.
  # If not set and create is true, a name is generated using the fullname template
  name: "xyz"

podAnnotations: {}

podSecurityContext: {}
  # fsGroup: 2000

securityContext: {}
  # capabilities:
  #   drop:
  #   - ALL
  # readOnlyRootFilesystem: true
  # runAsNonRoot: true
  # runAsUser: 1000

schedule: "*/5 * * * *"

###SMS2-40 - There are 2 ways how we want to serve our applications-->1st->LoadBalancer or 2-->NodePort
service:
  type: NodePort
  port: 8087
  liveness: /actuator/health/liveness
  readiness: /actuator/health/readiness
###service:
###  type: ClusterIP
###  port: 80

ingress:
  enabled: false
  className: ""
  annotations: {}
    # kubernetes.io/ingress.class: nginx
    # kubernetes.io/tls-acme: "true"
  hosts:
    - host: chart-example.local
      paths:
        - path: /
          pathType: ImplementationSpecific
  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - chart-example.local

resources: {}
  # We usually recommend not to specify default resources and to leave this as a conscious
  # choice for the user. This also increases chances charts run on environments with little
  # resources, such as Minikube. If you do want to specify resources, uncomment the following
  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
  # limits:
  #   cpu: 100m
  #   memory: 128Mi
  # requests:
  #   cpu: 100m
  #   memory: 128Mi

autoscaling:
  enabled: false
  minReplicas: 1
  maxReplicas: 100
  targetCPUUtilizationPercentage: 80
  # targetMemoryUtilizationPercentage: 80

nodeSelector: {}

tolerations: []

affinity: {}
#application:
#  configoveride: "config/application.properties"

templates/cronjob.yaml

apiVersion: batch/v1
kind: CronJob
metadata:
  name: {{ include "xyz.fullname" . }}
spec:
  schedule: {{ .Values.schedule }}
  jobTemplate:
    spec:
      backoffLimit: 5
      template:
        spec:
          containers:
            - name: {{ .Chart.Name }}
              securityContext:
                {{- toYaml .Values.securityContext | nindent 12 }}
              image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
              imagePullPolicy: {{ .Values.image.pullPolicy }}
              env:
              - name: DB_USER_NAME
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: DB_USER_NAME
              - name: DB_PASSWORD
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: DB_PASSWORD
              - name: DB_URL
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: DB_URL
              - name: TOKEN
                valueFrom:
                  secretKeyRef:
                    name: customsecret
                    key: TOKEN
              ports:
                - name: http
                  containerPort: {{ .Values.service.port }}
                  protocol: TCP
              livenessProbe:
                httpGet:
                  path: {{ .Values.service.liveness }}
                  port: http
                initialDelaySeconds: 60
                periodSeconds: 60
              readinessProbe:
                httpGet:
                  path: {{ .Values.service.readiness }}
                  port: {{ .Values.service.port }}
                initialDelaySeconds: 60
                periodSeconds: 30
              resources:
                {{- toYaml .Values.resources | nindent 12 }}
          {{- with .Values.nodeSelector }}

templates/service.yaml


apiVersion: v1
kind: Service
metadata:
  name: {{ include "xyz.fullname" . }}
  labels:
    {{- include "xyz.labels" . | nindent 4 }}
spec:
  type: {{ .Values.service.type }}
  ports:
    - port: {{ .Values.service.port }}
      targetPort: http
      protocol: TCP
      name: http
  selector:
    {{- include "xyz.selectorLabels" . | nindent 4 }}


i ran my application first without cronjob.yaml

once my application started running on kubernetes i tried to conevrt it into kubernetes cron job hence i deleted templates/deployment.yaml and instead added templates/cronjob.yaml

after i deployed my application it ran but when i do
kubectl get cronjobs
it shows in logs No resources found in default namespace.

what i am doing wrong here,unable to figure out
i use below command to install my helm chart helm upgrade –install chartname

2

Answers


  1. Chosen as BEST ANSWER

    I was also deploying deployment.yaml which was a mistake so i deleted deployment.yaml file and kept only cronjob.yaml file whose content is given below

    apiVersion: batch/v1
    kind: CronJob
    metadata:
      name: {{ include "xyz.fullname" . }}
      labels:
        {{ include "xyz.labels" . | nindent 4 }}
    spec:
      schedule: "{{ .Values.schedule }}"
      concurrencyPolicy: Forbid
      failedJobsHistoryLimit: 2
      jobTemplate:
        spec:
          template:
            spec:
              restartPolicy: Never
              containers:
                - name: {{ .Chart.Name }}
                  image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
                  imagePullPolicy: {{ .Values.image.pullPolicy }}
                  env:
                  - name: DB_USER_NAME
                    valueFrom:
                      secretKeyRef:
                        name: customsecret
                        key: DB_USER_NAME
                  - name: DB_PASSWORD
                    valueFrom:
                      secretKeyRef:
                        name: customsecret
                        key: DB_PASSWORD
                  - name: DB_URL
                    valueFrom:
                      secretKeyRef:
                        name: customsecret
                        key: DB_URL
                  - name: TOKEN
                    valueFrom:
                      secretKeyRef:
                        name: customsecret
                        key: TOKEN
                  - name: POD_NAME
                    valueFrom:
                      fieldRef:
                        fieldPath: metadata.name
                  - name: DD_AGENT_HOST
                    valueFrom:
                      fieldRef:
                        fieldPath: status.hostIP  
                  - name: DD_ENV
                    value: {{ .Values.datadog.env }}
                  - name: DD_SERVICE
                    value: {{ include "xyz.name" . }}
                  - name: DD_VERSION
                    value: {{ include "xyz.AppVersion" . }}
                  - name: DD_LOGS_INJECTION
                    value: "true"
                  - name: DD_RUNTIME_METRICS_ENABLED
                    value: "true"
                  volumeMounts:
                    - mountPath: /app/config
                      name: logback
                  ports:
                    - name: http
                      containerPort: {{ .Values.service.port }}
                      protocol: TCP
              volumes:
                - configMap:
                    name: {{ include "xyz.name" . }}
                  name: logback
          backoffLimit: 0
        metadata:
          {{ with .Values.podAnnotations }}
        annotations:
          {{ toYaml . | nindent 8 }}
        labels:
          {{ include "xyz.selectorLabels" . | nindent 8 }}
          {{- end }}
    
    

  2. Not sure if you file is half but it’s not ended properly EOF error might be there when the chart being tested

    End part for cronjob

    {{- with .Values.nodeSelector }}
              nodeSelector:
                {{- toYaml . | nindent 12 }}
              {{- end }} 
    

    The full file should be something like

    apiVersion: batch/v1
    kind: CronJob
    metadata:
      name: test
    spec:
      schedule: {{ .Values.schedule }}
      jobTemplate:
        spec:
          backoffLimit: 5
          template:
            spec:
              containers:
                - name: {{ .Chart.Name }}
                  securityContext:
                    {{- toYaml .Values.securityContext | nindent 12 }}
                  image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
                  imagePullPolicy: {{ .Values.image.pullPolicy }}
                  ports:
                    - name: http
                      containerPort: {{ .Values.service.port }}
                      protocol: TCP
                  livenessProbe:
                    httpGet:
                      path: {{ .Values.service.liveness }}
                      port: http
                    initialDelaySeconds: 60
                    periodSeconds: 60
                  readinessProbe:
                    httpGet:
                      path: {{ .Values.service.readiness }}
                      port: {{ .Values.service.port }}
                    initialDelaySeconds: 60
                    periodSeconds: 30
                  resources:
                    {{- toYaml .Values.resources | nindent 12 }}
              {{- with .Values.nodeSelector }}
              nodeSelector:
                {{- toYaml . | nindent 12 }}
              {{- end }}
    

    i just tested the above it’s working fine.

    Command to test helm chart template

    helm template <chart name> . --output-dir ./yaml
    
    Login or Signup to reply.
Please signup or login to give your own answer.
Back To Top
Search