Browse --- Chat --- Wekan

Verified Commit fba3a79c authored by kc's avatar kc
Browse files

General updates, fixes, and notes

Shell runners for build server only
Sunset addClusterToGitlab.sh
Gitlab 15.3 grafana disabled by default, enable on install

Changelog: changed
parent c8a1811f
Showing with 178 additions and 214 deletions
+178 -214
......@@ -63,9 +63,10 @@ locals {
cidr_blocks = {
"default" = "10.1.0.0/16"
}
## If only 1 machine, 4 runners
## TODO: Unity builder inside pods
## This is for shell runners on build machines only
default_runners_per_machine = {
"default" = 2
"default" = 1
}
gitlab_runner_tokens_list = {
"default" = { service = "" }
......@@ -98,7 +99,7 @@ locals {
server_name_prefix = lookup(local.server_name_prefixes, local.env)
cidr_block = lookup(local.cidr_blocks, local.env)
num_runners_per_machine = lookup(local.default_runners_per_machine, local.env, 2)
num_runners_per_machine = lookup(local.default_runners_per_machine, local.env, 1)
# Configures sendgrid SMTP for gitlab using verified domain sender identity
# TODO: Maybe put in wiki some links to setup and get apikey from sendgrid
......
......@@ -362,10 +362,9 @@ module "cirunners" {
gitlab_runner_tokens = local.gitlab_runner_tokens
lead_servers = local.lead_servers
build_servers = local.build_servers
runners_per_machine = local.runners_per_machine
runners_per_machine = local.num_runners_per_machine
root_domain_name = local.root_domain_name
}
......@@ -590,7 +589,6 @@ locals {
ansible_hosts = module.cloud.ansible_hosts
gitlab_runner_tokens = var.import_gitlab ? local.gitlab_runner_registration_tokens : {service = ""}
runners_per_machine = local.lead_servers + local.build_servers == 1 ? 4 : local.num_runners_per_machine
gitlab_kube_matrix = {
"14.4.2-ce.0" = "1.20.11-00"
......
......@@ -70,7 +70,7 @@ variable "mongo_version" { default = "4.4.6" } ## Not fully implemented - consul
##! NOTE: Packer config variables - Software baked into image
##! kubernetes_version options: Valid version, recent version gitlab supports, or latest
##! ex "1.22.3-00" | "gitlab" | "" (empty/latest uses latest)
##! ex "1.24.7-00" | "gitlab" | "" (empty/latest uses latest)
variable "kubernetes_version" { default = "gitlab" }
variable "gitlab_version" { default = "15.5.3-ce.0" }
variable "docker_version" { default = "20.10.21" }
......
......@@ -4,7 +4,6 @@ variable "predestroy_hostfile" {}
variable "gitlab_runner_tokens" {}
variable "lead_servers" {}
variable "build_servers" {}
variable "runners_per_machine" {}
......@@ -14,7 +13,7 @@ locals {
public_ips = flatten([
for role, hosts in var.ansible_hosts: [
for HOST in hosts: HOST.ip
if contains(HOST.roles, "lead") || contains(HOST.roles, "build")
if contains(HOST.roles, "build")
]
])
}
......@@ -36,9 +35,10 @@ locals {
## If anything since we cant register runners until we have a working cluster, we can refactor this
## a bit and provision kubernetes runners after cluster is up
resource "null_resource" "provision" {
count = var.build_servers > 0 ? 1 : 0
triggers = {
num_machines = sum([var.lead_servers + var.build_servers])
num_runners = sum([var.lead_servers + var.build_servers]) * var.runners_per_machine
num_machines = var.build_servers
num_runners = var.build_servers * var.runners_per_machine
}
provisioner "local-exec" {
command = <<-EOF
......
......@@ -67,7 +67,7 @@
- name: INSTALL CIRUNNERS
hosts: "{{ (groups.admin | length > 0) | ternary(groups.lead+groups.build, []) }}"
hosts: "{{ (groups.admin | length > 0) | ternary(groups.build, []) }}"
remote_user: root
gather_facts: false
tasks:
......@@ -112,7 +112,7 @@
### TODO: Loop through `gitlab_runner_tokens` and register multiple types of runners
- name: Handle runners
hosts: "{{ (groups.admin | length > 0) | ternary(groups.lead+groups.build, []) }}"
hosts: "{{ (groups.admin | length > 0) | ternary(groups.build, []) }}"
remote_user: root
#gather_facts: false
vars:
......@@ -138,8 +138,6 @@
if [ {{ item }} = 1 ]; then
case "$MACHINE_NAME" in
*build*) TAG=${TAG},unity ;;
*admin*) TAG=${TAG},prod ;;
*lead*) TAG=${TAG},prod ;;
*) TAG=${TAG} ;;
esac
fi
......
......@@ -108,6 +108,7 @@ resource "digitalocean_droplet" "main" {
region = var.config.region
size = each.value.cfg.server.size
ssh_keys = [var.config.do_ssh_fingerprint]
## Maybe start tagging with just root_domain_name
tags = compact(flatten([
each.value.role == "admin" ? "gitlab-${replace(var.config.root_domain_name, ".", "-")}" : "",
each.value.cfg.server.roles,
......@@ -138,8 +139,19 @@ resource "digitalocean_droplet" "main" {
ssh-keygen -R ${self.ipv4_address};
if [ "${terraform.workspace}" != "default" ]; then
${contains(self.tags, "admin") ? "ssh-keygen -R \"${replace(regex("gitlab-[a-z]+-[a-z]+", join(",", self.tags)), "-", ".")}\"" : ""}
echo "Not default"
echo "Not default namespace, removing hostnames from known_hosts"
if [ "${contains(self.tags, "admin")}" = "true" ]; then
echo "Has admin tag"
DOMAIN=${replace(regex("gitlab-[a-z]+-[a-z]+", join(",", self.tags)), "-", ".")};
ROOT_DOMAIN=$(echo "$DOMAIN" | sed "s/gitlab\.//");
echo "Removing $DOMAIN from known_hosts";
ssh-keygen -R "$DOMAIN";
echo "Removing $ROOT_DOMAIN from known_hosts";
ssh-keygen -R "$ROOT_DOMAIN";
else
echo "Does not have admin tag"
fi
fi
exit 0;
EOF
......
......@@ -128,7 +128,8 @@ resource "null_resource" "install_gitlab" {
sed -i "s|# letsencrypt\['auto_renew_day_of_month'\]|letsencrypt\['auto_renew_day_of_month'\]|" /etc/gitlab/gitlab.rb
sed -i "s|# nginx\['custom_nginx_config'\]|nginx\['custom_nginx_config'\]|" /etc/gitlab/gitlab.rb
sed -i "s|\"include /etc/nginx/conf\.d/example\.conf;\"|\"include /etc/nginx/conf\.d/\*\.conf;\"|" /etc/gitlab/gitlab.rb
sed -i "s|# gitlab_kas['enable'] = true|gitlab_kas['enable'] = true|" /etc/gitlab/gitlab.rb
sed -i "s|# gitlab_kas\['enable'\] = true|gitlab_kas\['enable'\] = true|" /etc/gitlab/gitlab.rb
sed -i "s|# grafana\['enable'\] = false|grafana\['enable'\] = true|" /etc/gitlab/gitlab.rb
CONFIG="prometheus['scrape_configs'] = [
{
......@@ -256,8 +257,13 @@ resource "null_resource" "restore_gitlab" {
# Change known_hosts to new imported ssh keys from gitlab restore
provisioner "local-exec" {
command = <<-EOF
ssh-keygen -f ~/.ssh/known_hosts -R ${element(local.admin_public_ips, 0)}
ssh-keyscan -H ${element(local.admin_public_ips, 0)} >> ~/.ssh/known_hosts
ssh-keygen -f ~/.ssh/known_hosts -R "${element(local.admin_public_ips, 0)}"
ssh-keygen -f ~/.ssh/known_hosts -R "gitlab.${var.root_domain_name}"
ssh-keygen -f ~/.ssh/known_hosts -R "${var.root_domain_name}"
ssh-keyscan -H "${element(local.admin_public_ips, 0)}" >> ~/.ssh/known_hosts
ssh-keyscan -H "gitlab.${var.root_domain_name}" >> ~/.ssh/known_hosts
ssh-keyscan -H "${var.root_domain_name}" >> ~/.ssh/known_hosts
EOF
}
}
......
......@@ -66,7 +66,7 @@
- name: download cli tools
shell: |
curl -L clidot.net | bash
curl -L https://raw.githubusercontent.com/codeopensrc/os-cli-config/master/setup.sh | bash
sed -i --follow-symlinks "s/use_remote_colors=false/use_remote_colors=true/" $HOME/.tmux.conf
when: not file_data.stat.exists
......
......@@ -17,6 +17,8 @@
### TODO: Maybe launch the kubernetes cluster services from the first 'lead' server
### nginxProxy (soon-to-be deprecated), buildkitd, gitlab-runner/gitlab-agents
- name: HANDLE KUBERNETES ADMIN
hosts: "{{ (groups.admin + groups.lead) | first }}"
remote_user: root
......@@ -142,19 +144,9 @@
rescue:
- name: create cluster serviceaccounts
command: bash $HOME/code/scripts/kube/createClusterAccounts.sh -v {{ runner_img_version }} -d {{ root_domain_name }} \
-a {{ private_ip }} -l {{ root_domain_name }} -b buildkitd-0 {{ (gitlab_tokens['service'] != '') | ternary(kube_tokens, '') }} -u
-a {{ private_ip }} -l {{ root_domain_name }} -b buildkitd-0 {{ (gitlab_tokens['service'] != '') | ternary(kube_tokens, '') }} -u -o
when: admin_servers | int > 0
### TODO: New agent based connection
#- name: add cluster to gitlab
# block:
# - name: check cluster added
# command: consul kv get kube/gitlab_integrated
# changed_when: false
# rescue:
# - name: add cluster
# command: bash $HOME/code/scripts/kube/addClusterToGitlab.sh -d {{ root_domain_name }} -u -r
# when: admin_servers | int > 0
- name: add agents to gitlab
block:
- name: check agents added
......
#!/bin/bash
AGENT_NAME_DEFAULTS=( "review" "dev" "beta" "production" )
###! Discussion/brainstorming regarding agents/runners/service accounts moved to:
###! https://gitlab.codeopensrc.com/os/workbench/-/issues/45
## For now a single agent works - going to use 1 until we have a reason not to
AGENT_NAME_DEFAULTS=( "review" )
#AGENT_NAME_DEFAULTS=( "review" "dev" "beta" "production" )
GL_CLUSTER_AGENT_ROLE_NAME=gitlab-agent-clusterrole
GL_AGENT_FILE_LOCATION=$HOME/.kube/gitlab-agent-service-account.yaml
## For conveniece and so it is not required to keep a PAT available, we create a temp
## PAT and revoke it at the bottom
......@@ -145,11 +153,37 @@ fi
helm repo add gitlab https://charts.gitlab.io
helm repo update
###! Discussion/brainstorming regarding agents/runners/service accounts moved to:
###! https://gitlab.codeopensrc.com/os/workbench/-/issues/45
#cat <<-EOF > $GL_AGENT_FILE_LOCATION
#apiVersion: rbac.authorization.k8s.io/v1
#kind: ClusterRole
#metadata:
# name: $GL_CLUSTER_AGENT_ROLE_NAME
#rules:
#- apiGroups: [""]
# resources: ["pods"]
# verbs: ["get", "list", "watch", "create", "delete"]
#- apiGroups: [""]
# resources: ["pods/exec", "pods/attach"]
# verbs: ["create", "patch", "delete"]
#- apiGroups: [""]
# resources: ["pods/log"]
# verbs: ["get"]
#- apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "create", "update", "delete"]
#- apiGroups: [""]
# resources: ["configmaps"]
# verbs: ["create", "update", "delete"]
#---
#EOF
## Register multiple agents and install via helm
for AGENT_NAME in ${AGENT_NAMES[@]}; do
## Register agent
echo "AGENT_NAME: $AGENT_NAME"
## Register agent
AGENT_ID=$(curl --silent -X POST -H "PRIVATE-TOKEN: ${TOKEN_UUID}" \
-H "Content-Type: application/json" \
--data '{ "name": "'${AGENT_NAME}'" }' \
......@@ -164,6 +198,8 @@ for AGENT_NAME in ${AGENT_NAMES[@]}; do
--url "$GL_API_URL/projects/$GITLAB_AGENT_PROJECT_ID/cluster_agents/$AGENT_ID/tokens" \
| jq -r .token)
###! Discussion/brainstorming regarding agents/runners/service accounts moved to:
###! https://gitlab.codeopensrc.com/os/workbench/-/issues/45
## By default the agent has cluster-admin ClusterRoleBinding
## set rbac.create=false to not attach the cluster role
## We'll create and attach our own roles to each agent like we do runners
......@@ -174,136 +210,53 @@ for AGENT_NAME in ${AGENT_NAMES[@]}; do
NAMESPACE=$AGENT_NAME
## TODO: Review role/rolebinding for agents
if [[ $AGENT_NAME = "review" ]]; then
SERVICE_ACCOUNT=gitlab-review-agent
NAMESPACE=gitlab-review-agent
SERVICE_ACCOUNT=default
NAMESPACE=gitlab-agent
else
SERVICE_ACCOUNT=gitlab-deploy-agent;
NAMESPACE=gitlab-deploy-agent;
fi
## TODO: Mixing agents and runners aint great, but also mixing environments aint great
## Having a NS per runner and per agent seems like overkill but according to the rolebindings docs
## just having edit access in the namespace allows access to any other service account in the NS
## Runners need to be able to access secrets for the registry and agents need to be able to at least create
## pods/runners.. so we're in a bit of a pickle.
## If agents and runners are in their own namespace, they need to be able to create pods in another namespace
## Maybe a compromise is just "review/feature" agent namespace and "dev,beta,prod" agent namespace
## Then the same for runners "review/feature" runner namespace and "dev,beta,prod" runner namespace (we kinda do this with 'review' and 'deploy')
## Then do what we're doing with a namespace for each tier/stage of apps review, dev, beta, and prod
## https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
## Time to revist hierarchiecal namespaces - agent > runner-that-can-create-HNS > HNS
#https://github.com/kubernetes-sigs/hierarchical-namespaces/releases
###### Long winded brainstorm write-up/vomit
## The agent can create/delete runner pods etc.
## Give the runner the ability to create dynamic hierarchical namespace but not delete namespaces
## Then just have a cronjob schedule deletes on HNS's however we see fit
## This gives isolation of pods/resources (kubectl get pods --all -n HNS), nothing actually running in the runner NS to worry about
## (can probably limit ability to create pods in its own NS along with anything else, maybe a setup step to create a SA for that HNS),
## then subsequent pipeline steps use this newly setup SA with admin access of this very isolated namespace, which gives runners
## full ability to manage that HNS's resources, and gives us a way to clean up the namespace without accidently
## deleting important namespace/resources (we can label these namespaces)
## Agent in NS1 creates runner pod in RUNNER_NS1 which can only create HNS in RUNNER_NS1
## POD in RUNNER_NS1 creates DYNAMIC_HNS1 and a service account for DYNAMIC_HNS1 with admin access
## Now followup pipeline steps use (admin) service account in DYNAMIC_HNS1 to create pods/ingress/secrets etc freely but limited to
## DYNAMIC_HNS1 without being able impersonate another service account or worry about other apps/resources
## Cleanup job deletes HNS if its been active/inactive for 3 days or something idk
## The only thing I think that needs to be figured out is dynamically creating a service account with admin for this new HNS then
## having follow up pipeline steps use that service account.
## I think thats how and why we would use the following settings in gitlab
## - bearer_token_overwrite_allowed
## - namespace_overwrite_allowed
## - service_account_overwrite_allowed
## All resources would be something like review-hns-MY_COOL_NAMESPACE etc and pipeline steps then are allow to overrwite to use
## our new unique namespace/resources based on our branch name
## Hopefully in allowing the creation of HNS we're allowed to create service accounts for it, that seems to be a bottleneck
## review-runner-ns
## Hell hopefully we can allow creating HNS only within that namespace and not anywhere else etc.
## Answer to the "only within the HNS", the answer is YES - subnamespaces:
## https://github.com/kubernetes-sigs/multi-tenancy/blob/master/incubator/hnc/docs/user-guide/concepts.md#basic-subns
## kubectl hns create review-runner -n review-agent
## kubectl -n review-runner create serviceaccount review-runner
## kubectl -n review-runner create role create-sa-for-apps --verb=create,delete,update --resource=serviceaccounts (or clusterrole)
## kubectl -n review-runner create rolebindinding create-sa-review-runner --role create-sa-for-apps --serviceaccount=review-agent:review-runner
## Now that the create-sa-review-runner SA has the ability to create rolebindings in review-runner, how can that be abused in CI
## I can now create a rolebinding that allows cool-new-app-sa in cool-new-app to get/create/delete secrets in thatguys-cool-new-app which is bad
## I have project-A with access to review-runner
## He has project-B with access to review-runner
## With access to review-runner, he can create a rolebind for his-cool-app-sa in project-B to access my-cool-app secrets in project-A
## stuck again
## but rolebindings cant cross namespaces tho now that I remember... hmmmm
########
########
########
########
## On push, create a NS and service account for branch that has nothing to do with .gitlab-ci.yaml file, and having the user use
## our shared runner (that becomes the appeal of using it) that can only interact with that NS with that service account
## This is harder than it looks and we're still stuck there
## Letting people use runners for OUR project is not a problem anymore, but new projects to use our shared runner is...
########
## Can we limit a service account to ONLY ALLOW CREATING SUBNAMESPACES to our runner, and only get the service account
## token for the subnamespace we just created
## Can we make like a kubernetes hook to like on `kubectl hns create` or `kubectl create ns` to grab the service account
## token and have it stored in a variable
## Maybe we force the user to provide a KUBE_SERVICE_TOKEN CI variable, auto assign it to this newly created namespaces
## service account, then you overrwrite the bearer_token with this variable to use this new namespace or it just fails
## Forcing the user to provide a unique token to provide to the unique namespaces service account...??
## Basically the review-runner can only create namespaces, which means you cant do a deploy of any kind,
## so for the following steps to be able to deploy/delete/push etc, the runner must have a token provided to overrwrite the default token
## for the new namespace, then use bearer_token_overwrite, using the new namespace, allows the runner to get/create/update as the default accont
## of that namespace
## Can we manually populate this service token based on like the
########
########
########
## review-agent
## |-- review-runner
## AS service account review-runner (with limited access) can
## kubectl hns create review-cool-new-app -n review-runner
## kubectl -n review-cool-new-app create serviceaccount cool-app-sa
## Dont need to create admin as its a clusterrole already
## kubectl -n review-cool-new-app create rolebindinding cool-app-admin --role admin --serviceaccount=review-runner:cool-app-sa
## review-agent
## |-- review-runner
## |-- review-cool-new-app
## now AS service-account cool-app-admin further down in pipelines
## kubectl get deploy,secrets,svc,pod -n review-agent
## NOPE
## kubectl get deploy,secrets,svc,pod -n review-runner
## NOPE
## kubectl get deploy,secrets,svc,pod -n review-cool-new-app
## review-cool-new-app-deploy-1
## review-cool-new-app-secret-1
## review-cool-new-app-svc-1
## review-cool-new-app-pod-1
## Allows us to let runners create unique namespaces and limited service accounts on the fly without giving
## them full access to create/manage namespaces/serviceaccounts cluster-wide
## Is there a way to allow the review-runner service account to make service accounts and rolebindings for dynamic/new namespaces
## Allowing the review-runner SA to create namespaces (as long as it cant delete them) isnt that big of a deal
## But that review-runner SA now needs to be able to now at least be able to create a SA and rolebinding in that new namespace
## Then we have the SA/namespace override to the newly made/dynamic NS/service account that needs to have admin access
## Feels like this can work if we get over those obstacles
#https://github.com/kubernetes-sigs/hierarchical-namespaces/releases
###! Discussion/brainstorming regarding agents/runners/service accounts moved to:
###! https://gitlab.codeopensrc.com/os/workbench/-/issues/45
### For now we'll just mimic runner permissions until we determine exact permissions needed
### Agent names mimic our runner namespaces
#if [[ $AGENT_NAME = "review" ]] || [[ $AGENT_NAME = "dev" ]]; then
#if [[ $AGENT_NAME = "review" ]]; then
# cat <<-EOF >> $GL_AGENT_FILE_LOCATION
# apiVersion: v1
# kind: ServiceAccount
# metadata:
# name: $SERVICE_ACCOUNT
# namespace: $NAMESPACE
# ---
# EOF
#fi
###! Discussion/brainstorming regarding agents/runners/service accounts moved to:
###! https://gitlab.codeopensrc.com/os/workbench/-/issues/45
### Going to try one agent in one namespace with a rolebinding in its own namespace and see if we
### can use our runners with service accounts to deploy to all namespaces based on runner service account permissions
#if [[ $AGENT_NAME = "review" ]]; then
# cat <<-EOF >> $GL_AGENT_FILE_LOCATION
# apiVersion: rbac.authorization.k8s.io/v1
# kind: RoleBinding
# metadata:
# name: gitlab-agent-rolebinding
# namespace: $NAMESPACE
# subjects:
# - kind: ServiceAccount
# name:
# namespace: $NAMESPACE
# roleRef:
# kind: ClusterRole
# name: $GL_CLUSTER_AGENT_ROLE_NAME
# apiGroup: rbac.authorization.k8s.io
# ---
# EOF
#fi
#kubectl apply -f $GL_AGENT_FILE_LOCATION
helm upgrade --install $AGENT_NAME gitlab/gitlab-agent \
--namespace $NAMESPACE \
......
#!/bin/bash
#######
####### SCRIPT SUNSET AS OF 11/20/22
# Gitlab deprecated cert-based k8s cluster integration @ 15.0.0
# Below are some docs revolving around it
# In order to enable the cert based integration until 17 when its removed, in the rails console
# Feature.enable(:certificate_based_clusters)
#https://docs.gitlab.com/ee/update/deprecations.html#self-managed-certificate-based-integration-with-kubernetes
#https://gitlab.com/groups/gitlab-org/configure/-/epics/8#sunsetting-timeline-plan
#https://docs.gitlab.com/ee/administration/feature_flags.html#enable-or-disable-the-feature
#######
CLUSTER_NAME_DEFAULTS=( "review" "dev" "beta" "production" )
## For conveniece and so it is not required to keep a PAT available, we create a temp
......@@ -50,15 +62,32 @@ fi
## Admin > AppSettings > Network > Outbound requests
CLUSTER_API_ADDR="https://$(kubectl get --raw /api | jq -r '.serverAddressByClientCIDRs[].serverAddress')"
SECRET=$(kubectl get secrets | grep default-token | cut -d " " -f1)
CERT=$(kubectl get secret ${SECRET} -o jsonpath="{['data']['ca\.crt']}" | base64 --decode)
CERT=$(kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}' | base64 -d)
## Gitlab accepts \r\n for newlines in cert pem
FORMATTED_CERT=${CERT//$'\n'/'\r\n'}
## NOTE: This requires the gitlab service account in kubernetes before running
SERVICE_TOKEN_1=$(kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep gitlab | awk '{print $1}'))
SERVICE_TOKEN=$(echo "$SERVICE_TOKEN_1" | sed -nr "s/token:\s+(.*)/\1/p")
## NOTE: Adding the service account and clusterrolebinding was removed from createClusterAccounts.sh
## I have not tried adding the service account and clusterrolebinding this way but it should work
kubectl create serviceaccount gitlab --namespace=kube-system
kubectl create clusterrolebinding gitlab-admin --clusterrole=cluster-admin --serviceaccount=kube-system:gitlab
GL_ADMIN_FILE_LOCATION=$HOME/.kube/gitlab-admin-service-account.yaml
cat <<EOF > $GL_ADMIN_FILE_LOCATION
---
apiVersion: v1
kind: Secret
metadata:
name: gitlab-secret
namespace: kube-system
annotations:
kubernetes.io/service-account.name: gitlab
type: kubernetes.io/service-account-token
EOF
kubectl apply -f $GL_ADMIN_FILE_LOCATION
sleep 10;
SERVICE_TOKEN_TXT=$(kubectl -n kube-system describe secret gitlab-secret)
SERVICE_TOKEN=$(echo "$SERVICE_TOKEN_TXT" | sed -nr "s/token:\s+(.*)/\1/p")
for CLUSTER_NAME in ${CLUSTER_NAMES[@]}; do
......
......@@ -75,7 +75,6 @@ BUILDKITD_POD_NAME="buildkitd-0"
GL_DEPLOY_FILE_LOCATION=$HOME/.kube/gitlab-deploy-service-account.yaml
GL_REVIEW_FILE_LOCATION=$HOME/.kube/gitlab-review-service-account.yaml
GL_ADMIN_FILE_LOCATION=$HOME/.kube/gitlab-admin-service-account.yaml
GL_NAMESPACE_FILE_LOCATION=$HOME/.kube/gitlab-namespaces.yaml
GL_BUILDKIT_FILE_LOCATION=$HOME/.kube/gitlab-buildkit-service-account.yaml
GL_BUILDKIT_CLUSTER_ROLE_NAME="buildkit-access-clusterrole"
......@@ -89,7 +88,7 @@ RUNNER_TOKEN=""
KUBE_API_HOST_URL=""
KUBE_VERSION="latest"
while getopts "a:b:d:h:i:l:n:t:v:ru" flag; do
while getopts "a:b:d:h:i:l:n:t:v:ou" flag; do
# These become set during 'getopts' --- $OPTIND $OPTARG
case "$flag" in
a) KUBE_API_HOST_URL=$OPTARG;;
......@@ -101,7 +100,7 @@ while getopts "a:b:d:h:i:l:n:t:v:ru" flag; do
n) NAMESPACE=$OPTARG;;
t) RUNNER_TOKEN=$OPTARG;;
v) KUBE_VERSION=$OPTARG;;
r) REGISTER="true";;
o) OPEN_RUNNERS="true";;
u) USE_NAMESPACES_DEFAULTS="true";;
esac
done
......@@ -109,10 +108,12 @@ done
## TODO: Need to convert a string "dev,beta,production" into array
if [[ -z "$RUNNER_TOKEN" ]]; then RUNNER_TOKEN=$(consul kv get gitlab/runner_token); exit; fi
if [[ -z "$RUNNER_TOKEN" ]]; then
## Get tmp root password if using fresh instance. Otherwise this fails like it should
TMP_ROOT_PW=$(sed -rn "s|Password: (.*)|\1|p" /etc/gitlab/initial_root_password)
RUNNER_TOKEN=$(bash $HOME/code/scripts/misc/getRunnerToken.sh -u root -p $TMP_ROOT_PW -d $RUNNER_HOST_DOMAIN)
consul kv put gitlab/runner_token $RUNNER_TOKEN
fi
if [[ -z "$RUNNER_TOKEN" ]]; then echo "Runner token not provided. Use -t"; exit; fi
......@@ -140,44 +141,17 @@ PORT_REG=":[0-9]{2,5}$"
if [[ ! $KUBE_API_HOST_URL =~ "https://" ]]; then KUBE_API_HOST_URL="https://${KUBE_API_HOST_URL}"; fi
if [[ ! $KUBE_API_HOST_URL =~ $PORT_REG ]]; then KUBE_API_HOST_URL="${KUBE_API_HOST_URL}:6443"; fi
### We're deprecating shell runners completely eventually, only k8s runners going forward
### The _only_ reason we have a shell runner is for unity builds on a build machine until
### we create special runners to build unity assets inside pods
### TODO: Use helm charts to install runners into cluster instead of binary on machine
### TODO: Eventually minio credentials inside helm runners as well
curl -L "https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh" | sudo bash
apt-cache madison gitlab-runner
sudo apt-get install gitlab-runner jq -y
sudo usermod -aG docker gitlab-runner
sed -i "s|concurrent = 1|concurrent = 3|" /etc/gitlab-runner/config.toml
# 1.
## Create gitlab service account clusterwide
### New agent based approach should no longer need this
### TODO: Would like to solve the dynamic namespace per branch/project type approach but
### for now would just like the original functionality to work (minus dynamic namespaces)
#cat <<EOF > $GL_ADMIN_FILE_LOCATION
#apiVersion: v1
#kind: ServiceAccount
#metadata:
# name: gitlab
# namespace: kube-system
#---
#apiVersion: rbac.authorization.k8s.io/v1
#kind: ClusterRoleBinding
#metadata:
# name: gitlab-admin
#subjects:
# - kind: ServiceAccount
# name: gitlab
# namespace: kube-system
#roleRef:
# kind: ClusterRole
# name: cluster-admin
# apiGroup: rbac.authorization.k8s.io
#EOF
#
#kubectl apply -f $GL_ADMIN_FILE_LOCATION
# 2.
## Create namespaces
cat <<EOF > $GL_NAMESPACE_FILE_LOCATION
apiVersion: v1
......@@ -272,12 +246,10 @@ EOF
kubectl apply -f $GL_BUILDKIT_FILE_LOCATION
### CA_FILE
### TODO: Theres no longer a secret/default token to place at the pub-ca.crt location
#SECRET=$(kubectl get secrets | grep default-token | cut -d " " -f1)
#CERT=$(kubectl get secret ${SECRET} -o jsonpath="{['data']['ca\.crt']}" | base64 --decode)
## Another way
CERT=$(kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}' | base64 -d)
#### We can probably just do the above straight into > $PUB_CA_FILE_LOCATION but for now
#### keeping it cause it works and illustrates an interesting workaround
## When storing the decoded cert into a bash var it screws with newlines
## Our best alternative is using printf with newlines, but we it splits the space between BEGIN/END CERT
## This illustrates how to deal with it - would like a better way
......@@ -314,7 +286,6 @@ for NAMESPACE in ${NAMESPACES[@]}; do
BUILDER_TAG_LIST="${BUILDER_TAG_LIST/kubernetes_builder/kubernetes_builder_prod}"
fi
# 3.
## Create service account(s) (Currently doing per NS)
## Create a RoleBinding with the ClusterRole cluster-admin/admin privileges in each of the namespaces
......@@ -469,7 +440,12 @@ for NAMESPACE in ${NAMESPACES[@]}; do
BUILDER_SERVICE_TOKEN=$(echo "$BUILDER_SERVICE_TOKEN_TXT" | sed -nr "s/token:\s+(.*)/\1/p")
sudo gitlab-runner unregister --name "${BUILDER_ACCOUNT_NAME}-kube-builder"
## TODO: These will be default - for testing they are shared atm
if [[ -n $OPEN_RUNNERS ]]; then
PAUSED_AND_LOCKED_ARGS="--locked='false'"
else
PAUSED_AND_LOCKED_ARGS="--paused --locked"
fi
sudo gitlab-runner register \
--url "$DEFAULT_RUNNER_HOST_URL" \
--registration-token "$RUNNER_TOKEN" \
......@@ -477,6 +453,7 @@ for NAMESPACE in ${NAMESPACES[@]}; do
--executor kubernetes \
--tag-list "$TAG_LIST" \
--run-untagged="false" \
${PAUSED_AND_LOCKED_ARGS} \
--access-level="$ACCESS_LEVEL" \
--name "${DEPLOY_ACCOUNT_NAME}-kube-runner" \
--kubernetes-image "$DEFAULT_KUBE_IMAGE" \
......@@ -485,10 +462,7 @@ for NAMESPACE in ${NAMESPACES[@]}; do
--kubernetes-namespace "$DEPLOY_FROM_NAMESPACE_NAME" \
--kubernetes-service-account "$DEPLOY_ACCOUNT_NAME" \
--kubernetes-bearer_token "$DEPLOY_SERVICE_TOKEN" \
--kubernetes-ca-file "$PUB_CA_FILE_LOCATION" \
--paused \
--locked
#--locked="false"
--kubernetes-ca-file "$PUB_CA_FILE_LOCATION"
sudo gitlab-runner register \
--url "$DEFAULT_RUNNER_HOST_URL" \
......@@ -497,6 +471,7 @@ for NAMESPACE in ${NAMESPACES[@]}; do
--executor kubernetes \
--tag-list "${BUILDER_TAG_LIST}" \
--run-untagged="false" \
${PAUSED_AND_LOCKED_ARGS} \
--access-level="$ACCESS_LEVEL" \
--name "${BUILDER_ACCOUNT_NAME}-kube-builder" \
--kubernetes-image "$DEFAULT_KUBE_IMAGE" \
......@@ -505,10 +480,7 @@ for NAMESPACE in ${NAMESPACES[@]}; do
--kubernetes-namespace "$BUILD_FROM_NAMESPACE_NAME" \
--kubernetes-service-account "$BUILDER_ACCOUNT_NAME" \
--kubernetes-bearer_token "$BUILDER_SERVICE_TOKEN" \
--kubernetes-ca-file "$PUB_CA_FILE_LOCATION" \
--paused \
--locked
#--locked="false"
--kubernetes-ca-file "$PUB_CA_FILE_LOCATION"
#--kubernetes-namespace "$NAMESPACE" \
......
......@@ -34,6 +34,9 @@ done
## Encrypt
#gpg --encrypt -r KEY -r KEY FILE
#gpg --encrypt --compress-algo 0 -z 0 "${RECIPIENTS[@]}" -o ${FILE}.gpg $FILE
echo "== Attempting to remove any previous ${FILE}.gpg"
rm ${FILE}.gpg
echo "== Encrypting ${FILE} --> ${FILE}.gpg"
gpg --encrypt -z 0 "${RECIPIENTS[@]}" -o ${FILE}.gpg $FILE
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment