feat(chart) Development Pipeline and Standard Container

Updating example configmap data and adding an example standardized container
to be used for the gates.

Added Chart tasks in order to make testing easier for the chart workflow.
Removing the triggers and cluster roles settings to get ready to move the pipeline to
a new location.
Added pipelinerun to allow for testing via kubectl create -f

co-author: sshturm@mirantis.com - Combined feat(chart) Chart task in Development Pipeline
Change-Id: Icdb6bfe391e0e30883eeca661668763515a5565a
Signed-off-by: Pete Birley <pete@port.direct>
This commit is contained in:
Stacey Fletcher 2021-01-11 15:59:23 +00:00
parent 85d1116c56
commit 2e8e00f461
67 changed files with 955 additions and 288 deletions

View File

@ -2,85 +2,66 @@ apiVersion: v1
kind: ConfigMap
metadata:
name: deployment-flow
namespace: default
data:
credentials: "Some creds"
kubeconfig.json: "Default kubeconfig.json"
cluster_exists: "true"
production: "false"
development.yaml: |
proxy:
http: ""
https: ""
noproxy: ""
enabled: false
docker_registry: "harbor-core.jarvis.local"
images:
- build_from_source: true
project: "test"
repo: "scratch"
tag: "1.built"
build:
git_repo: "https://review.opendev.org/airship/charts"
checkout_loc: "/src/checkout/scratch"
refspec: "refs/changes/41/770141/7"
version: "refs/changes/*:refs/changes/*"
makefile:
path: "tools/images"
target: "build"
file: "Makefile"
image_name: "scratch"
image_base: "scratch"
- build_from_source: true
repo: "microflow/standard-container"
project: "test"
tag: "1.built"
build:
git_repo: "https://review.opendev.org/airship/charts"
checkout_loc: "/src/checkout/standard-container"
refspec: "refs/changes/41/770141/7"
version: "refs/changes/*:refs/changes/*"
makefile:
path: "tools/images"
target: "build"
file: "Makefile"
image_name: "standard-container"
image_base: "ubuntu:20.04"
- build_from_source: false
remote_registry: "docker.io"
remote_repo: "testing2016/jrunner"
remote_tag: "2.0"
repo: "microflow/standard-container"
project: "test"
tag: "1.existing"
charts:
- name: "tekton-pipelines"
build_from_source: true
build:
git_repo: "https://review.opendev.org/airship/charts"
checkout_loc: "/src/checkout/airship/charts"
refspec: "master"
makefile:
path: "charts"
target: "tekton-pipelines"
file: "Makefile"
- name: "tekton-triggers"
build_from_source: true
build:
git_repo: "https://review.opendev.org/airship/charts"
checkout_loc: "/src/checkout/airship/charts"
refspec: "master"
makefile:
path: "charts"
target: "tekton-triggers"
file: "Makefile"
- name: "tekton-dashboard"
build_from_source: true
build:
git_repo: "https://review.opendev.org/airship/charts"
checkout_loc: "/src/checkout/airship/charts"
refspec: "master"
makefile:
path: "charts"
target: "tekton-dashboard"
file: "Makefile"
cluster.json: |
{
"use_existing_cluster": "true"
}
image.json: |
{
"build_from_source": true,
"image_name": "mongodb",
"project": "mongodb",
"repo": "mongodb",
"tag": "1.built",
"image_fullname": "cnf/mongodb:1.0",
"path": "tools/gate/jarvis/5G-SA-core/mongodb/images/mongodb",
"build": {
"git_repo": "https://review.opendev.org/airship/charts",
"checkout_loc": "/src/checkout/mongodb",
"refspec": "master",
"version": "refs/changes/*:refs/changes/*"
}
}
chart.json: |
{
"chart_name": "mongodb",
"project": "mongodb",
"repo": "mongodb",
"version": "0.1.0",
"build_from_source": true,
"path": "tools/gate/jarvis/5G-SA-core/mongodb/charts",
"build": {
"git_repo": "https://review.opendev.org/airship/charts",
"checkout_loc": "/src/checkout/airship/charts",
"refspec": "master"
},
"namespace": "development-pipeline",
"release_name": "mongodb",
"images": {
"applications": {
"mongodb": {
"tag": "1.0",
"name": "mongodb",
"repo": "mongodb"
}
}
}
}
default.json: |
{
"proxy": {
"http": "",
"https": "",
"noproxy": "",
"enabled": false
},
"chart_registry_url": "harbor-core.jarvis.local/chartrepo",
"chart_repository": "jarvis-harbor",
"docker_registry": "harbor-core.jarvis.local",
"harbor_secret_mounted_path": "/workspace/helm-creds"
}
cleanup.json: |
{
"remove_artifacts": "true"
}

View File

@ -0,0 +1,21 @@
apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
generateName: development-pipeline-run
spec:
pipelineRef:
name: development-pipeline
serviceAccountName: sa-development-pipeline
workspaces:
- name: k8s_cluster_data
configMap:
name: deployment-flow
- name: development_pipeline_data
volumeClaimTemplate:
spec:
storageClassName: standard
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -1,11 +0,0 @@
{{- if $.Values.clusterRole.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ $.Values.clusterRole.name }}
rules:
# EventListeners need to be able to fetch any clustertriggerbindings
- apiGroups: ["triggers.tekton.dev"]
resources: ["clustertriggerbindings"]
verbs: ["get", "list", "watch"]
{{- end }}

View File

@ -1,14 +0,0 @@
{{- if and ($.Values.serviceAccount.create) ($.Values.clusterRole.bind) }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ $.Values.clusterRole.name }}
subjects:
- kind: ServiceAccount
name: {{ $.Values.serviceAccount.name }}
namespace: {{ $.Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ $.Values.clusterRole.name }}
{{- end }}

View File

@ -1,13 +0,0 @@
apiVersion: triggers.tekton.dev/v1alpha1
kind: EventListener
metadata:
name: development-listener
namespace: {{ $.Release.Namespace }}
spec:
serviceAccountName: {{ $.Values.serviceAccount.name }}
triggers:
- name: development-trigger
bindings:
- ref: development-pipeline-binding
template:
name: development-pipeline-template

View File

@ -1,14 +0,0 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: ingress-development-listener
namespace: {{ .Release.Namespace }}
spec:
rules:
- host: el-development.{{- .Release.Namespace }}.tekton.jarvis.local
http:
paths:
- path: /
backend:
serviceName: el-development-listener
servicePort: 8080

View File

@ -8,7 +8,41 @@ spec:
- name: k8s_cluster_data
- name: development_pipeline_data
tasks:
- name: deploy-k8s
- name: microflow-setup-cluster-config
taskRef:
name: setup-cluster-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
- name: microflow-setup-image-config
taskRef:
name: setup-image-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
- name: microflow-setup-chart-config
taskRef:
name: setup-chart-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
- name: microflow-setup-cleanup-config
taskRef:
name: setup-cleanup-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
- name: microflow-k8s
runAfter:
- microflow-setup-cluster-config
taskRef:
name: k8s-cluster
workspaces:
@ -16,7 +50,9 @@ spec:
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
- name: build-image
- name: microflow-images
runAfter:
- microflow-setup-image-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
@ -24,7 +60,9 @@ spec:
workspace: development_pipeline_data
taskRef:
name: build-images
- name: build-chart
- name: microflow-charts
runAfter:
- microflow-setup-chart-config
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
@ -32,17 +70,20 @@ spec:
workspace: development_pipeline_data
taskRef:
name: build-charts
- name: deploy-artifacts
runAfter: [deploy-k8s,build-image,build-chart]
- name: microflow-deployment-manifests
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
- name: development_pipeline_data
workspace: development_pipeline_data
taskRef:
name: deployment
- name: functional-testing
runAfter: [deploy-artifacts]
name: deployment-manifests
- name: microflow-functional
runAfter:
- microflow-deployment-manifests
- microflow-k8s
- microflow-images
- microflow-charts
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
@ -50,8 +91,9 @@ spec:
workspace: development_pipeline_data
taskRef:
name: functional
- name: promote-artifacts
runAfter: [functional-testing]
- name: microflow-promote-artifacts
runAfter:
- microflow-functional
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data
@ -60,7 +102,7 @@ spec:
taskRef:
name: promote
finally:
- name: cleanup
- name: microflow-cleanup
workspaces:
- name: k8s_cluster_data
workspace: k8s_cluster_data

View File

@ -13,10 +13,17 @@ rules:
resources: ["configmaps"]
verbs: ["get", "list", "watch"]
# Permissions to create resources in associated TriggerTemplates
- apiGroups: ["tekton.dev"]
resources: ["pipelineruns", "taskruns"]
verbs: ["create"]
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["impersonate"]
verbs: ["impersonate", "get"]
# Permissions to execute helm dry-run
- apiGroups: [""]
resources: ["secrets", "services"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles", "rolebindings"]
verbs: ["get"]
{{- end }}

View File

@ -10,9 +10,57 @@ spec:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: build-chart
- name: clone
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.chart.buildPlaybook }} -i hosts -e @"$(workspaces.k8s_cluster_data.path)/development.yaml"
# TODO copy JSON to shared workspace to make it available for other tasks
ansible-playbook -vvv {{ $.Values.tasks.chart.clonePlaybook }} -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
- name: set-chart-output
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/chart.json"
- name: lint-chart
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.chart.lintdryrunPlaybook }} -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
- name: set-chart-output-after-lint-dryrun
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/chart.json"
- name: package-chart
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.chart.packagePlaybook }} -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
- name: set-chart-output-after-packaging
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/chart.json"
- name: publish-chart
volumeMounts:
- mountPath: /usr/local/share/ca-certificates/harbor-ca.crt
name: harbor-ca
subPath: harbor-ca
- mountPath: /workspace/helm-creds
name: helm-publish-creds
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
update-ca-certificates
ansible-playbook -vvv {{ $.Values.tasks.chart.publishPlaybook }} -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
- name: set-chart-output-after-publish
image: {{ $.Values.tasks.chart.buildChartImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/chart.json"
volumes:
- name: helm-publish-creds
secret:
secretName: harbor-basic-auth
- name: harbor-ca
secret:
secretName: harbor-ca

View File

@ -10,28 +10,19 @@ spec:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: gather-logs
image: {{ $.Values.tasks.cleanup.cleanupImage }}
script: |
#!/bin/sh
# TODO
echo "gather logs"
echo "publish logs"
echo "notify"
- name: cleanup
image: {{ $.Values.tasks.cleanup.cleanupImage }}
volumeMounts:
- mountPath: $(workspaces.development_pipeline_data.path)/config
name: kubeconfig
script: |
#!/bin/sh
if grep -i "true" "$(workspaces.k8s_cluster_data.path)/production"; then
echo "No cleanup is required. CI was set to false."
exit 0
fi
if grep "true" "$(workspaces.k8s_cluster_data.path)/cluster_exists"; then
# TODO
echo "Cleanup artifacts"
else
# TODO
echo "Teardown k8s cluster"
fi
# TODO Delete sensitive data from shared workspace
rm "$(workspaces.development_pipeline_data.path)/kubeconfig.json"
ansible-playbook -vvv "{{ $.Values.tasks.cleanup.cleanupPlaybook }}" -i hosts \
-e @"$(workspaces.development_pipeline_data.path)/default.json" \
-e @"$(workspaces.development_pipeline_data.path)/chart.json" \
-e @"$(workspaces.development_pipeline_data.path)/image.json" \
-e @"$(workspaces.development_pipeline_data.path)/cluster.json"
volumes:
- name: kubeconfig
secret:
secretName: kubeconfig-secret

View File

@ -0,0 +1,16 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: deployment-manifests
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task will do any validation of manifests required to deploy the CNF
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: example-validation-step
image: {{ $.Values.tasks.deploymentManifests.deploymentManifestsImage }}
script: |
ansible-playbook -vvv "{{ $.Values.tasks.deploymentManifests.deploymentManifestsPlaybook }}" -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json"

View File

@ -1,19 +0,0 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: deployment
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task deploys artifacts from produced by previous tasks
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: deploy-artifacts
image: {{ $.Values.tasks.deployment.deployImage }}
script: |
#!/usr/bin/env sh
# Artifacts details are stored in JSON files as an output of previous tasks.
# TODO copy logs and scan results to shared workspace
echo "Deploying artifacts"

View File

@ -5,21 +5,37 @@ metadata:
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task will validate the k8s cluster using kubeconfig from k8s-cluster task
This task will deploy the CNF and run any tests specified
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: run-functional-tests
image: {{ $.Values.tasks.functional.validateImage }}
- name: deploy-helm-charts
image: {{ $.Values.tasks.functional.functionalDeployImage }}
volumeMounts:
- mountPath: /usr/local/share/ca-certificates/harbor-ca.crt
name: harbor-ca
subPath: harbor-ca
- mountPath: /workspace/helm-creds
name: helm-publish-creds
- mountPath: $(workspaces.development_pipeline_data.path)/config
name: kubeconfig
script: |
#!/bin/sh
if [ -f "$(workspaces.development_pipeline_data.path)/kubeconfig.json" ] ; then
echo "Kubeconfig is found, starting validation"
else
echo "Error: missing kubeconfig.json file"
exit 1
fi
echo "Validating cluster"
echo "ansible-playbook {{ $.Values.tasks.functional.playbook }}"
cat "$(workspaces.development_pipeline_data.path)/kubeconfig.json"
update-ca-certificates
ansible-playbook -vvv "{{ $.Values.tasks.functional.functionalDeployPlaybook }}" -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/cluster.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
- name: run-helm-tests
image: {{ $.Values.tasks.functional.functionalTestImage }}
script: |
#!/bin/sh
ansible-playbook -vvv "{{ $.Values.tasks.functional.functionalTestPlaybook }}" -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
volumes:
- name: helm-publish-creds
secret:
secretName: harbor-basic-auth
- name: harbor-ca
secret:
secretName: harbor-ca
- name: kubeconfig
secret:
secretName: kubeconfig-secret

View File

@ -10,7 +10,23 @@ spec:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: build-image
- name: clone
image: {{ $.Values.tasks.image.buildImage }}
volumeMounts:
- mountPath: /certs/client
name: dind-certs
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.image.clonePlaybook }} -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/image.json"
#docker images
# TODO copy JSON file to shared workspace to make it available for other tasks
# TODO copy logs and scan results to shared location
- name: set-image-output
image: {{ $.Values.tasks.image.buildImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/image.json"
- name: docker-build
image: {{ $.Values.tasks.image.buildImage }}
volumeMounts:
- mountPath: /certs/client
@ -27,10 +43,54 @@ spec:
value: /certs/client
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.image.buildPlaybook }} -i hosts -e @"$(workspaces.k8s_cluster_data.path)/development.yaml"
ansible-playbook -vvv {{ $.Values.tasks.image.buildPlaybook }} -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/image.json"
docker images
# TODO copy JSON file to shared workspace to make it available for other tasks
# TODO copy logs and scan results to shared location
- name: set-image-build-output-after-build
image: {{ $.Values.tasks.image.buildImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/image.json"
- name: publish-and-scan-image
image: {{ $.Values.tasks.image.buildImage }}
volumeMounts:
- mountPath: /certs/client
name: dind-certs
- mountPath: /tekton/home/.docker/config.json
name: image-push-creds
subPath: .dockerconfigjson
env:
# Connect to the sidecar over TCP, with TLS.
- name: DOCKER_HOST
value: tcp://localhost:2376
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: '1'
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.image.pushPlaybook }} -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/image.json"
- name: set-image-output-after-publish-scan
image: {{ $.Values.tasks.image.buildImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/image.json"
- name: get-scan-results
image: {{ $.Values.tasks.image.buildImage }}
volumeMounts:
- mountPath: /certs/client
name: dind-certs
script: |
#!/usr/bin/env sh
ansible-playbook -vvv {{ $.Values.tasks.image.getScanResultsPlaybook }} -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/image.json"
- name: set-image-output-set-scan-results
image: {{ $.Values.tasks.image.buildImage }}
script: |
#!/usr/bin/env sh
cat "$(workspaces.development_pipeline_data.path)/image.json"
sidecars:
- image: {{ $.Values.tasks.image.sidecarServer }}
name: server
@ -57,3 +117,9 @@ spec:
volumes:
- name: dind-certs
emptyDir: {}
- name: image-push-creds
secret:
secretName: harbor-docker-auth
- name: helm-publish-creds
secret:
secretName: harbor-basic-auth

View File

@ -5,26 +5,19 @@ metadata:
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task will create a k8s cluster if needed and store kubeconfig in shared workspace
This task will create a k8s cluster if needed or verify that an existing cluster is reachable
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: generate-kubeconfig
image: {{ $.Values.tasks.cluster.createClusterImage }}
image: {{ $.Values.tasks.kubernetes.createClusterImage }}
script: |
#!/usr/bin/env sh
# Use provided kubeconfig for existing cluster
if grep -i "true" "$(workspaces.k8s_cluster_data.path)/cluster_exists"; then
echo "Use existing cluster."
cp "$(workspaces.k8s_cluster_data.path)/kubeconfig.json" "$(workspaces.development_pipeline_data.path)/kubeconfig.json"
# Deploy cluster and copy kubeconfig
else
echo "Create a k8s cluster"
# TODO
echo "ansible-playbook -vvv {{ $.Values.tasks.cluster.createClusterPlaybook }} -i hosts -e @\"$(workspaces.k8s_cluster_data.path)/development.yaml\""
# TODO copy generated kubeconfig to shared wokspace
echo "New cluster's kubeconfig data" > "$(workspaces.development_pipeline_data.path)/kubeconfig.json"
fi
echo "ansible-playbook -vvv {{ $.Values.tasks.cluster.validateClusterPlaybook }} -i hosts -e @\"$(workspaces.k8s_cluster_data.path)/development.yaml\""
# TODO copy JSON file to shared workspace to make it available for other tasks
ansible-playbook -vvv "{{ $.Values.tasks.kubernetes.getKubeconfigPlaybook }}" -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/cluster.json"
volumeMounts:
- mountPath: $(workspaces.development_pipeline_data.path)/config
name: kubeconfig
volumes:
- name: kubeconfig
secret:
secretName: kubeconfig-secret

View File

@ -5,14 +5,72 @@ metadata:
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task builds charts if source is provided
This task will promote images and chart into a non-test repository
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: build-chart
image: {{ $.Values.tasks.promote.promoteArtifacts }}
- name: promote-artifacts
image: {{ $.Values.tasks.promote.promoteImage }}
env:
# Connect to the sidecar over TCP, with TLS.
- name: DOCKER_HOST
value: tcp://localhost:2376
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: '1'
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
volumeMounts:
- mountPath: /tekton/home/.docker/config.json
name: image-push-creds
subPath: .dockerconfigjson
- mountPath: /certs/client
name: dind-certs
- mountPath: /usr/local/share/ca-certificates/harbor-ca.crt
name: harbor-ca
subPath: harbor-ca
- mountPath: /workspace/helm-creds
name: helm-publish-creds
script: |
#!/usr/bin/env sh
# TODO call playbook to promote artifacts generated in previous tasks
echo "Promote artifacts"
set -ex
update-ca-certificates
ansible-playbook -vvv {{ $.Values.tasks.promote.promoteImagePlaybook }} -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/image.json"
ansible-playbook -vvv {{ $.Values.tasks.promote.promoteChartPlaybook }} -i hosts -e @"$(workspaces.development_pipeline_data.path)/default.json" -e @"$(workspaces.development_pipeline_data.path)/chart.json"
sidecars:
- image: {{ $.Values.tasks.image.sidecarServer }}
name: server
args:
- --storage-driver=vfs
- --userland-proxy=false
- --debug
- --insecure-registry={{ $.Values.tasks.image.insecureRegistry }}
securityContext:
privileged: true
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
volumeMounts:
- mountPath: /certs/client
name: dind-certs
# Wait for the dind daemon to generate the certs it will share with the
# client.
readinessProbe:
periodSeconds: 1
exec:
command: ['ls', '/certs/client/ca.pem']
volumes:
- name: dind-certs
emptyDir: {}
- name: image-push-creds
secret:
secretName: harbor-docker-auth
- name: helm-publish-creds
secret:
secretName: harbor-basic-auth
- name: harbor-ca
secret:
secretName: harbor-ca

View File

@ -0,0 +1,84 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-cluster-config
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task sets configurations that will be used as overrides to the Ansible tasks.
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: setup-cluster-config
image: {{ $.Values.tasks.setup.setupConfigImage }}
script: |
#!/usr/bin/env sh
cp "$(workspaces.k8s_cluster_data.path)/default.json" "$(workspaces.development_pipeline_data.path)/default.json"
cp "$(workspaces.k8s_cluster_data.path)/cluster.json" "$(workspaces.development_pipeline_data.path)/cluster.json"
jq '.cluster_kubeconfig_path="$(workspaces.development_pipeline_data.path)/config"' "$(workspaces.development_pipeline_data.path)/cluster.json" > "$(workspaces.development_pipeline_data.path)/temp_cluster.json" && mv "$(workspaces.development_pipeline_data.path)/temp_cluster.json" "$(workspaces.development_pipeline_data.path)/cluster.json"
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-image-config
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task sets configurations that will be used as overrides to the Ansible tasks.
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: setup-image-config
image: {{ $.Values.tasks.setup.setupConfigImage }}
script: |
#!/usr/bin/env sh
cp "$(workspaces.k8s_cluster_data.path)/image.json" "$(workspaces.development_pipeline_data.path)/image.json"
echo "Set temporary image name to test/scan-image:$(context.taskRun.uid)"
jq '.image_fullname="test/scan-image:$(context.taskRun.uid)"' "$(workspaces.development_pipeline_data.path)/image.json" > "$(workspaces.development_pipeline_data.path)/temp_image.json" && mv "$(workspaces.development_pipeline_data.path)/temp_image.json" "$(workspaces.development_pipeline_data.path)/image.json"
echo "Set tag to context $(context.taskRun.uid)"
jq '.tag="$(context.taskRun.uid)"' "$(workspaces.development_pipeline_data.path)/image.json" > "$(workspaces.development_pipeline_data.path)/temp_image.json" && mv "$(workspaces.development_pipeline_data.path)/temp_image.json" "$(workspaces.development_pipeline_data.path)/image.json"
echo "Set checkout location for git repository to $(workspaces.development_pipeline_data.path)/$(context.taskRun.uid)"
jq '.build.checkout_loc="$(workspaces.development_pipeline_data.path)/$(context.taskRun.uid)"' "$(workspaces.development_pipeline_data.path)/image.json" > "$(workspaces.development_pipeline_data.path)/temp_image.json" && mv "$(workspaces.development_pipeline_data.path)/temp_image.json" "$(workspaces.development_pipeline_data.path)/image.json"
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-chart-config
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task sets configurations that will be used as overrides to the Ansible tasks.
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: setup-chart-config
image: {{ $.Values.tasks.setup.setupConfigImage }}
script: |
#!/usr/bin/env sh
cp "$(workspaces.k8s_cluster_data.path)/default.json" "$(workspaces.development_pipeline_data.path)/default.json"
cp "$(workspaces.k8s_cluster_data.path)/chart.json" "$(workspaces.development_pipeline_data.path)/chart.json"
echo "Set tag to context $(context.taskRun.uid)"
jq '.tag="$(context.taskRun.uid)"' "$(workspaces.development_pipeline_data.path)/chart.json" > "$(workspaces.development_pipeline_data.path)/temp_chart.json" && mv "$(workspaces.development_pipeline_data.path)/temp_chart.json" "$(workspaces.development_pipeline_data.path)/chart.json"
echo "Set checkout location for git repository to $(workspaces.development_pipeline_data.path)/$(context.taskRun.uid)"
jq '.build.checkout_loc="$(workspaces.development_pipeline_data.path)/$(context.taskRun.uid)"' "$(workspaces.development_pipeline_data.path)/chart.json" > "$(workspaces.development_pipeline_data.path)/temp_chart.json" && mv "$(workspaces.development_pipeline_data.path)/temp_chart.json" "$(workspaces.development_pipeline_data.path)/chart.json"
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-cleanup-config
namespace: {{ $.Release.Namespace }}
spec:
description: >-
This task sets configurations that will be used as overrides to the Ansible tasks.
workspaces:
- name: k8s_cluster_data
- name: development_pipeline_data
steps:
- name: setup-cleanup-config
image: {{ $.Values.tasks.setup.setupConfigImage }}
script: |
#!/usr/bin/env sh
cp "$(workspaces.k8s_cluster_data.path)/cleanup.json" "$(workspaces.development_pipeline_data.path)/cleanup.json"

View File

@ -1,5 +0,0 @@
apiVersion: triggers.tekton.dev/v1alpha1
kind: TriggerBinding
metadata:
name: development-pipeline-binding
namespace: {{ $.Release.Namespace }}

View File

@ -1,28 +0,0 @@
apiVersion: triggers.tekton.dev/v1alpha1
kind: TriggerTemplate
metadata:
name: development-pipeline-template
namespace: {{ $.Release.Namespace }}
spec:
resourcetemplates:
- apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
generateName: development-pipeline-run
spec:
pipelineRef:
name: development-pipeline
serviceAccountName: {{ $.Values.serviceAccount.name }}
workspaces:
- name: k8s_cluster_data
configMap:
name: {{ $.Values.trigger.configMap }}
- name: development_pipeline_data
volumeClaimTemplate:
spec:
storageClassName: {{ $.Values.pvc.storageClass }}
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ $.Values.pvc.size }}

View File

@ -7,37 +7,45 @@ role:
name: development-pipeline
create: true
clusterRole:
bind: true
name: development-pipeline
create: true
pvc:
storageClass: standard
size: 1Gi
tasks:
cluster:
createClusterImage: &base_image testing2016/standard-container:1.0
createClusterPlaybook: /playbooks/create_cluster.yaml
validateClusterPlaybook: /playbooks/validate_cluster.yaml
kubernetes:
createClusterImage: &base_image standard-container:1.0
createClusterPlaybook: /playbooks/create-cluster.yaml
getKubeconfigPlaybook: /playbooks/get-kubeconfig.yaml
validateClusterPlaybook: /playbooks/validate-cluster.yaml
setup:
setupConfigImage: *base_image
image:
buildImage: *base_image
sidecarServer: docker:19-dind
buildPlaybook: /playbooks/build_image.yaml
insecureRegistry: harbor-core.jarvis.local
clonePlaybook: /playbooks/clone.yaml
buildPlaybook: /playbooks/build-image.yaml
pushPlaybook: /playbooks/tag-push-image.yaml
getScanResultsPlaybook: /playbooks/get-scan-results.yaml
name: standard-container.yaml
chart:
buildChartImage: *base_image
buildPlaybook: /playbooks/helm_chart.yaml
deployment:
deployImage: *base_image
clonePlaybook: /playbooks/clone.yaml
packagePlaybook: /playbooks/package-chart.yaml
lintdryrunPlaybook: /playbooks/lint-dryrun-chart.yaml
publishPlaybook: /playbooks/publish-chart.yaml
deploymentManifests:
deploymentManifestsImage: *base_image
deploymentManifestsPlaybook: /playbooks/deployment-manifests.yaml
promote:
promoteArtifacts: *base_image
promoteImage: *base_image
promoteImagePlaybook: /playbooks/promote-image.yaml
promoteChartPlaybook: /playbooks/promote-chart.yaml
functional:
validateImage: *base_image
playbook: /playbooks/functional.yaml
functionalDeployImage: *base_image
functionalTestImage: *base_image
functionalDeployPlaybook: /playbooks/functional-deploy.yaml
functionalTestPlaybook: /playbooks/functional-test.yaml
cleanup:
cleanupImage: *base_image
trigger:
configMap: deployment-flow
cleanupPlaybook: /playbooks/cleanup.yaml

View File

@ -0,0 +1,34 @@
#!/bin/bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -ex
# Default wait timeout is 1000 seconds
end=$(date +%s)
timeout=${3:-1000}
end=$((end + timeout))
while true; do
pipelinerunstatus="$(kubectl get pipelinerun -n $1 $(kubectl get pipelinerun -n $1 -o name | awk -F '/' "/$2/ { print \$NF; exit }") | tail -1 | awk '{ print $2 }')"
[ "${pipelinerunstatus}" == "True" ] && break
[ "${pipelinerunstatus}" == "False" ] && exit 1
sleep 5
now=$(date +%s)
if [ $now -gt $end ] ; then
echo "Pipelinerun failed to complete after $timeout seconds"
echo
kubectl get pipelinerun --namespace $1 -o wide
echo "Some pipelineruns are not complete"
exit 1
fi
done

View File

@ -40,6 +40,7 @@ Vagrant.configure("2") do |config|
./tools/gate/jarvis/400-deploy-harbor.sh
./tools/gate/jarvis/500-deploy-gerrit.sh
./tools/gate/jarvis/600-deploy-tekton.sh
./tools/gate/jarvis/650-temporary-setup.sh
./tools/gate/jarvis/700-deploy-jarvis-system.sh
./tools/gate/jarvis/800-deploy-jarvis-projects.sh
SHELL

View File

@ -30,15 +30,6 @@ function validate() {
local chart_dir
chart_dir="$(mktemp -d)"
helm pull jarvis-harbor/library/chartmuseum --destination "${chart_dir}"
#TODO(staceyF) Put this into appropriate jarvis-system tasks
kubectl create ns development-pipeline
kubectl create secret generic harbor-ca --from-file=harbor-ca=/etc/jarvis/certs/ca/ca.pem -n development-pipeline
#NOTE Will not be required once Harbor is backed by LDAP
kubectl create secret generic harbor-basic-auth --from-literal=username='admin' --from-literal=password='Harbor12345' -n development-pipeline
kubectl create secret docker-registry harbor-docker-auth --docker-username=admin --docker-password=Harbor12345 --docker-email=example@gmail.com --docker-server=harbor-core.jarvis.local -n development-pipeline
#TODO(staceyF) Put this into appropriate jarvis-project tasks
curl -X POST "https://harbor-core.jarvis.local/api/v2.0/projects" -H "accept: application/json" -H "X-Request-Id: 12345" -H "authorization: Basic YWRtaW46SGFyYm9yMTIzNDU=" -H "Content-Type: application/json" -d "{ \"project_name\": \"test\", \"public\": true, \"metadata\": { \"auto_scan\": \"true\" }}"
# Tests that we can upload an image
sudo -E docker login harbor-core.jarvis.local --username admin --password Harbor12345
sudo -E docker pull debian:buster-slim

View File

@ -0,0 +1,27 @@
#!/bin/bash
set -ex
#TODO(staceyF) Put this into appropriate jarvis-system tasks
kubectl create ns development-pipeline || true
kubectl create secret generic harbor-ca --from-file=harbor-ca=/etc/jarvis/certs/ca/ca.pem -n development-pipeline || true
kubectl create secret generic kubeconfig-secret --from-file=kubeconfig=$HOME/.kube/config -n development-pipeline || true
#NOTE Will not be required once Harbor is backed by LDAP
kubectl create secret generic harbor-basic-auth --from-literal=username='admin' --from-literal=password='Harbor12345' -n development-pipeline || true
kubectl create secret docker-registry harbor-docker-auth --docker-username=admin --docker-password=Harbor12345 --docker-email=example@gmail.com --docker-server=harbor-core.jarvis.local -n development-pipeline || true
#TODO(staceyF) Put this into appropriate jarvis-project tasks
curl -X POST "https://harbor-core.jarvis.local/api/v2.0/projects" -H "accept: application/json" -H "X-Request-Id: 12345" -H "authorization: Basic YWRtaW46SGFyYm9yMTIzNDU=" -H "Content-Type: application/json" -d "{ \"project_name\": \"mongodb-staging\", \"public\": true, \"metadata\": { \"auto_scan\": \"true\" }}" || true
curl -X POST "https://harbor-core.jarvis.local/api/v2.0/projects" -H "accept: application/json" -H "X-Request-Id: 12345" -H "authorization: Basic YWRtaW46SGFyYm9yMTIzNDU=" -H "Content-Type: application/json" -d "{ \"project_name\": \"mongodb\", \"public\": true, \"metadata\": { \"auto_scan\": \"true\" }}" || true
#NOTE This is temporary to trigger and validate that the development-pipeline is working prior to being refactored.
cd ./tools/images
sudo make build IMAGE_FULLNAME=standard-container:1.0
cd ../../charts
helm upgrade --install development-pipeline -n development-pipeline ./development-pipeline
kubectl apply -n development-pipeline -f ./development-pipeline/config_map.yaml.example
kubectl create -n development-pipeline -f ./development-pipeline/pipelinerun-validation.yaml
../tools/deployment/common/wait-for-pipelinerun.sh development-pipeline development-pipeline

28
tools/images/Makefile Normal file
View File

@ -0,0 +1,28 @@
BUILD_DIR := $(shell mktemp -d)
IMAGE_BASE ?= ubuntu:20.04
IMAGE_PREFIX ?= test
IMAGE_REGISTRY ?= core.harbor.domain
IMAGE_TAG ?= 1.0
IMAGE_NAME ?= standard-container
PROXY ?= http://proxy.foo.com:8000
NO_PROXY ?= localhost,127.0.0.1,.svc.cluster.local
USE_PROXY ?= false
# use this variable for image labels added in internal build process
LABEL ?= org.attcomdev.build=community
COMMIT ?= $(shell git rev-parse HEAD)
DISTRO ?= ubuntu_bionic
IMAGE_FULLNAME := ${IMAGE_REGISTRY}/${IMAGE_PREFIX}/${IMAGE_NAME}:${IMAGE_TAG}
.PHONY: help
SHELL:=/bin/bash
.ONESHELL:
help: ## This help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z0-9_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
build: ## Build the containers.
docker build --tag $(IMAGE_FULLNAME) --build-arg BASE_IMAGE=$(IMAGE_BASE) ./$(IMAGE_NAME)
push: build ## Build and push the containers
docker push $(IMAGE_FULLNAME)

View File

@ -0,0 +1,7 @@
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
SHELL ["bash", "-exc"]
ENV DEBIAN_FRONTEND noninteractive
ENTRYPOINT /entrypoint.sh

View File

@ -0,0 +1,53 @@
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
SHELL ["bash", "-exc"]
ENV DEBIAN_FRONTEND noninteractive
# Update distro and install ansible
RUN apt-get update ;\
apt-get dist-upgrade -y ;\
apt-get install -y \
python3-minimal \
python3-pip \
python3-setuptools \
make \
sudo \
git \
jq \
curl \
git-review \
apt-transport-https \
ca-certificates \
gnupg-agent \
software-properties-common \
gettext-base ;\
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - ;\
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" ;\
apt-get install -y --no-install-recommends \
docker-ce-cli ;\
pip3 install --upgrade wheel ;\
pip3 install ansible ;\
ansible-galaxy collection install community.kubernetes ;\
pip3 install docker ;\
# Install kubectl
apt-get install -y --no-install-recommends \
apt-transport-https \
gnupg2 ;\
curl -o /usr/bin/kubectl -L "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" ;\
chmod +x /usr/bin/kubectl ;\
# Install Helm
curl -fsSL -o helm-install.tar.gz https://get.helm.sh/helm-v3.4.2-linux-amd64.tar.gz \
&& tar -xvf helm-install.tar.gz \
&& rm helm-install.tar.gz \
&& mv linux-amd64/helm /usr/local/bin/helm \
&& chmod +x /usr/local/bin/helm \
&& helm --help ;\
helm plugin install https://github.com/chartmuseum/helm-push ;\
rm -rf /var/lib/apt/lists/*
COPY assets /opt/assets/
RUN cp -ravf /opt/assets/* / ;\
rm -rf /opt/assets
ENTRYPOINT /entrypoint.sh

View File

@ -0,0 +1,3 @@
#!/bin/bash
ansible --version

View File

@ -0,0 +1,8 @@
- hosts: localhost
become: yes
vars:
image_status: "Success"
tasks:
- name: Build image
include_tasks: ./roles/images/tasks/build-image.yaml
when: "{{ build_from_source }}"

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: Clean up release
include_tasks: ./roles/cleanup/tasks/cleanup.yaml

View File

@ -0,0 +1,8 @@
- hosts: localhost
become: yes
vars:
image_status: "Success"
tasks:
- name: Run git clone task
include_tasks: ./roles/common/tasks/git-clone.yaml
when: "{{ build_from_source }}"

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: validate-deployment-manifests-config
include_tasks: ./roles/deployment-manifests/tasks/validate-config.yaml

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: Deploy CNF
include_tasks: ./roles/functional/tasks/functional-deploy.yaml

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: Test deployed CNF
include_tasks: ./roles/functional/tasks/functional-test.yaml

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: Get kubeconfig for Kubernetes cluster to deploy CNF
include_tasks: ./roles/kubernetes/tasks/get-kubeconfig.yaml

View File

@ -0,0 +1,7 @@
- hosts: localhost
become: yes
vars:
image_status: "Success"
tasks:
- name: Get Scan Results
include_tasks: ./roles/images/tasks/get-scan-results.yaml

View File

@ -0,0 +1,6 @@
proxy:
http: ""
https: ""
noproxy: ""
enabled: false
docker_registry: "harbor-core.jarvis.local"

View File

@ -0,0 +1 @@
localhost ansible_connection=local

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: Chart lint and dry-run
include_tasks: ./roles/charts/tasks/lint-dryrun-chart.yaml

View File

@ -0,0 +1,6 @@
- hosts: localhost
become: yes
tasks:
- name: Package Helm Chart
include_tasks: ./roles/charts/tasks/package-chart.yaml
when: "{{ build_from_source }}"

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: Promote charts after testing is successful
include_tasks: ./roles/promote/tasks/promote-chart.yaml

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: Promote images after testing is successful
include_tasks: ./roles/promote/tasks/promote-image.yaml

View File

@ -0,0 +1,5 @@
- hosts: localhost
become: yes
tasks:
- name: Publish Helm Chart
include_tasks: ./roles/charts/tasks/publish-chart.yaml

View File

@ -0,0 +1,6 @@
#Helm Chart is upstream already packaged and versioned
- name: Existing helm_chart
block:
- name: Download helm_chart
get_url:
name: "{{ remote_url }}"

View File

@ -0,0 +1,9 @@
- name: Helm lint "{{ chart_name }}"
command: "helm lint {{ chart_name }}"
args:
chdir: "{{ build.checkout_loc }}/{{ path }}"
- name: Helm Dry-run "{{ chart_name }}"
command: "helm install --dry-run {{ chart_name }} {{ chart_name }}"
args:
chdir: "{{ build.checkout_loc }}/{{ path }}"

View File

@ -0,0 +1,4 @@
- name: Package Helm Chart
shell: helm package "{{ chart_name }}"
args:
chdir: "{{ build.checkout_loc }}/{{ path }}"

View File

@ -0,0 +1,15 @@
# TODO: Bring in secrets securely via K8s
- name: Install Plugin
shell: helm plugin update push || helm plugin install https://github.com/chartmuseum/helm-push || true
- name: Get harbor username
shell: cat {{ harbor_secret_mounted_path }}/username
register: harbor_username
- name: Get harbor password
shell: cat {{ harbor_secret_mounted_path }}/password
register: harbor_password
- name: Add Harbor Helm repository and Test repository
shell: helm repo add "{{ chart_repository }}-staging" "https://{{ docker_registry }}/chartrepo/{{ chart_name }}-staging" --username={{ harbor_username.stdout }} --password={{ harbor_password.stdout }}
- name: Push chart "{{ chart_name }}" to Harbor staging registry
command: helm push "{{ chart_name }}-{{ version }}".tgz "{{ chart_repository }}-staging"
args:
chdir: "{{ build.checkout_loc }}/{{ path }}"

View File

@ -0,0 +1,3 @@
#Remove any resources deployed
- name: Remove test release
shell: helm delete --kubeconfig="{{ cluster_kubeconfig_path }}/kubeconfig" "{{ release_name }}" -n "{{ namespace }}"

View File

@ -0,0 +1,6 @@
#Build docker image using Makefile given git repository location to clone code from
- git:
repo: "{{ build.git_repo }}"
dest: "{{ build.checkout_loc }}"
version: "{{ build.refspec }}"
refspec: "refs/changes/*:refs/changes/*"

View File

@ -0,0 +1,4 @@
- name: This task is to validate deployment manifests
command: echo "It can do anything you want, just put it right here."
args:
chdir: "/workspace"

View File

@ -0,0 +1,13 @@
#Deploy CNF
- name: Get harbor username
shell: cat {{ harbor_secret_mounted_path }}/username
register: harbor_username
- name: Get harbor password
shell: cat {{ harbor_secret_mounted_path }}/password
register: harbor_password
#TODO dex-aio doesn't install, look into another test chart
- name: Add Harbor Helm repository and Test repository
shell: helm repo add "{{ chart_repository }}-staging" "https://{{ docker_registry }}/chartrepo/{{ project }}-staging" --username={{ harbor_username.stdout }} --password={{ harbor_password.stdout }}
- name: Deploy chart "{{ chart_name }}"
#shell: echo "Deploy after mongodb"
shell: helm upgrade --install --kubeconfig="{{ cluster_kubeconfig_path }}/kubeconfig" "{{ release_name }}" "{{ chart_repository }}-staging/{{ chart_name }}" --version="{{ version }}" --namespace="{{ namespace }}" --username="{{ harbor_username.stdout }}" --password="{{ harbor_password.stdout }}" --create-namespace

View File

@ -0,0 +1,4 @@
#Test Deployed CNF
- name: Chart has no Helm Tests, echo for now
shell: echo "There are no helm tests yet"
# shell: echo 'helm test --kubeconfig="{{ cluster_kubeconfig_path }}/kubeconfig" "{{ name }}"'

View File

@ -0,0 +1,5 @@
#Build docker image using Makefile given git repository location to clone code from
- name: Build Docker Image for "{{ image_name }}"
shell: docker build -t "{{ image_fullname }}" .
args:
chdir: "{{ build.checkout_loc }}/{{ path }}"

View File

@ -0,0 +1,25 @@
#Scan results may take some time, putting in some retries and a delay to determine if scan results get finished
- name: output the request
shell: echo "https://{{ docker_registry }}/api/v2.0/projects/{{ project }}-staging/repositories/{{ repo | replace('/','%2F') }}/artifacts/{{ tag }}?page=1&page_size=10&with_tag=true&with_label=false&with_scan_overview=true&with_signature=false&with_immutable_status=false"
- name: Get Scan Results
uri:
validate_certs: false
url: "https://{{ docker_registry }}/api/v2.0/projects/{{ project }}-staging/repositories/{{ repo | replace('/','%2F') }}/artifacts/{{ tag }}?page=1&page_size=10&with_tag=true&with_label=false&with_scan_overview=true&with_signature=false&with_immutable_status=false"
method: GET
body_format: "json"
headers:
accept: "application/json"
X-Request-Id: "12345"
#Change to encoded from configmap
authorization: "Basic YWRtaW46SGFyYm9yMTIzNDU="
register: result
until: result.json.scan_overview["application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0"].scan_status == "Success"
retries: 5
delay: 30
- name: Check Scan Results Summary for High and Critical CVE
#shell: echo '{{ result.json.scan_overview["application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0"] }}'
set_fact:
image_status: "Vulnerable"
when: result.json.scan_overview["application/vnd.scanner.adapter.vuln.report.harbor+json; version=1.0"].severity in ("High","Critical")

View File

@ -0,0 +1,4 @@
- name: Tag and push to promotion repository
docker_image:
pull: true
name: "{{ docker_registry }}/{{ project }}-staging/{{ project }}/{{ repo }}:{{ tag }}"

View File

@ -0,0 +1,6 @@
- name: Tag and push to internal test repository for vulnerability scanning
docker_image:
push: true
name: "{{ image_fullname }}"
repository: "{{ docker_registry }}/{{ project }}-staging/{{ repo }}"
tag: "{{ tag }}"

View File

@ -0,0 +1,15 @@
build_from_source: true
project: "test"
repo: "scratch"
tag: "1.built"
build:
git_repo: "https://review.opendev.org/airship/charts"
checkout_loc: "/src/checkout/scratch"
refspec: "refs/changes/41/770141/7"
version: "refs/changes/*:refs/changes/*"
makefile:
path: "tools/images"
target: "build"
file: "Makefile"
image_name: "scratch"
image_base: "scratch"

View File

@ -0,0 +1,16 @@
build_from_source: true
project: "test"
repo: "scratch"
tag: "1.built"
build:
git_repo: "https://review.opendev.org/airship/charts"
checkout_loc: "/src/checkout/scratch"
refspec: "refs/changes/41/770141/7"
version: "refs/changes/*:refs/changes/*"
makefile:
path: "tools/images"
target: "build"
file: "Makefile"
image_name: "scratch"
image_base: "scratch"

View File

@ -0,0 +1,15 @@
build_from_source: true
repo: "microflow/standard-container"
project: "test"
tag: "1.built"
build:
git_repo: "https://review.opendev.org/airship/charts"
checkout_loc: "/src/checkout/standard-container"
refspec: "refs/changes/41/770141/7"
version: "refs/changes/*:refs/changes/*"
makefile:
path: "tools/images"
target: "build"
file: "Makefile"
image_name: "standard-container"
image_base: "ubuntu:20.04"

View File

@ -0,0 +1,7 @@
build_from_source: false
remote_registry: "docker.io"
remote_repo: "testing2016/jrunner"
remote_tag: "2.0"
repo: "microflow/standard-container"
project: "test"
tag: "1.existing"

View File

@ -0,0 +1,10 @@
#Validate Kubernetes cluster is accessible.
- set_fact:
existing_cluster: "{{ use_existing_cluster }}"
- name: Get kubeconfig
shell: echo "Retrieve kubeconfig from mounted secret"
when: existing_cluster
- name: Validate kubeconfig
shell: kubectl --kubeconfig="{{ cluster_kubeconfig_path }}/kubeconfig" get pods -n development-pipeline
- name: Find existing service accounts
shell: kubectl --kubeconfig="{{ cluster_kubeconfig_path }}/kubeconfig" get serviceaccounts -n development-pipeline

View File

@ -0,0 +1,16 @@
- name: Install Plugin
shell: helm plugin update push || helm plugin install https://github.com/chartmuseum/helm-push || true
- name: Get harbor username
shell: cat {{ harbor_secret_mounted_path }}/username
register: harbor_username
- name: Get harbor password
shell: cat {{ harbor_secret_mounted_path }}/password
register: harbor_password
- name: Install Plugin
shell: helm plugin update push || helm plugin install https://github.com/chartmuseum/helm-push
- name: Add Harbor Helm repository and Test repository
shell: helm repo add "{{ chart_repository }}-staging" "https://{{ chart_registry_url }}/{{ chart_name }}-staging" --username="{{ harbor_username.stdout }}" --password="{{ harbor_password.stdout }}" && helm repo add "{{ chart_repository }}" "https://{{ chart_registry_url }}/{{ chart_name }}" --username="{{ harbor_username.stdout }}" --password="{{ harbor_password.stdout }}"
- name: Pull down Helm Chart
shell: helm pull "{{ chart_repository }}-staging/{{ chart_name }}" --version="{{ version }}"
- name: Push chart "{{ chart_name }}" to Helm registry
command: helm push "{{ chart_name }}-{{ version }}".tgz "{{ chart_repository }}"

View File

@ -0,0 +1,10 @@
- name: Tag and push to promotion repository
docker_image:
pull: true
name: "{{ docker_registry }}/{{ project }}-staging/{{ repo }}:{{ tag }}"
- name: Tag and push to promotion repository
docker_image:
push: true
name: "{{ docker_registry }}/{{ project }}-staging/{{ repo }}"
repository: "{{ docker_registry }}/{{ project }}/{{ repo }}"
tag: "{{ tag }}"

View File

@ -0,0 +1,7 @@
- hosts: localhost
become: yes
vars:
image_status: "Success"
tasks:
- name: Push Image for scanning to Docker Repository
include_tasks: ./roles/images/tasks/tag-push-image.yaml

View File

@ -0,0 +1,5 @@
#!/bin/bash
cp "$(workspaces.k8s_cluster_data.path)/default.json" "$(workspaces.development_pipeline_data.path)/default.json"
cp "$(workspaces.k8s_cluster_data.path)/cluster.json" "$(workspaces.development_pipeline_data.path)/cluster.json"
jq '.cluster_kubeconfig_path="$(workspaces.development_pipeline_data.path)/config"' "$(workspaces.development_pipeline_data.path)/cluster.json" > "$(workspaces.development_pipeline_data.path)/temp_cluster.json" && mv "$(workspaces.development_pipeline_data.path)/temp_cluster.json" "$(workspaces.development_pipeline_data.path)/cluster.json"

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: harbor-helm-creds
namespace: development-pipeline
type: kubernetes.io/basic-auth
stringData:
username: admin
password: Harbor12345

View File

@ -38,6 +38,7 @@
- ./tools/gate/jarvis/400-deploy-harbor.sh
- ./tools/gate/jarvis/500-deploy-gerrit.sh
- ./tools/gate/jarvis/600-deploy-tekton.sh
- ./tools/gate/jarvis/650-temporary-setup.sh
- ./tools/gate/jarvis/700-deploy-jarvis-system.sh
- ./tools/gate/jarvis/800-deploy-jarvis-projects.sh