update: remove obsolete traefik service configuration and add playbook for copying K3s config
This commit is contained in:
parent
514e05a1f6
commit
eba7f8322d
4 changed files with 79 additions and 57 deletions
|
|
@ -130,45 +130,6 @@ EOF
|
||||||
helm install traefik traefik/traefik --namespace kube-system -f /tmp/traefik-values.yaml
|
helm install traefik traefik/traefik --namespace kube-system -f /tmp/traefik-values.yaml
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# cat > traefik-turn-service.yaml << EOF
|
|
||||||
# apiVersion: v1
|
|
||||||
# kind: Service
|
|
||||||
# metadata:
|
|
||||||
# name: traefik-turn
|
|
||||||
# namespace: kube-system
|
|
||||||
# labels:
|
|
||||||
# app.kubernetes.io/instance: traefik-traefik
|
|
||||||
# app.kubernetes.io/name: traefik
|
|
||||||
# spec:
|
|
||||||
# type: LoadBalancer
|
|
||||||
# ports:
|
|
||||||
# - name: turn-tcp
|
|
||||||
# port: 1194
|
|
||||||
# protocol: TCP
|
|
||||||
# targetPort: turn-tcp
|
|
||||||
# - name: turn-udp
|
|
||||||
# port: 1194
|
|
||||||
# protocol: UDP
|
|
||||||
# targetPort: turn-udp
|
|
||||||
# selector:
|
|
||||||
# app.kubernetes.io/instance: traefik-traefik
|
|
||||||
# app.kubernetes.io/name: traefik
|
|
||||||
# EOF
|
|
||||||
|
|
||||||
# kubectl apply -f traefik-turn-service.yaml
|
|
||||||
|
|
||||||
# rm -f traefik-turn-service.yaml
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# echo "Don't forget to create TCP and UDP ingress routes for the TURN server with:"
|
|
||||||
# echo "kubectl apply -f k8s-manifests/galene/ingressroute-tcp.yaml"
|
|
||||||
# echo "kubectl apply -f k8s-manifests/galene/ingressroute-udp.yaml"
|
|
||||||
# echo ""
|
|
||||||
echo "To access the dashboard:"
|
echo "To access the dashboard:"
|
||||||
echo "kubectl port-forward -n kube-system \$(kubectl get pods -n kube-system -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
|
echo "kubectl port-forward -n kube-system \$(kubectl get pods -n kube-system -l \"app.kubernetes.io/name=traefik\" -o name) 9000:9000"
|
||||||
echo "Then visit http://localhost:9000/dashboard/ in your browser"
|
echo "Then visit http://localhost:9000/dashboard/ in your browser"
|
||||||
70
vagrant/dev/ubuntu/ansible/copy_k8s_config.yaml
Normal file
70
vagrant/dev/ubuntu/ansible/copy_k8s_config.yaml
Normal file
|
|
@ -0,0 +1,70 @@
|
||||||
|
---
|
||||||
|
- name: Copy K3s configuration from vm1 to localhost
|
||||||
|
hosts: localhost
|
||||||
|
connection: local
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure .kube directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /home/vagrant/.kube
|
||||||
|
state: directory
|
||||||
|
owner: vagrant
|
||||||
|
group: vagrant
|
||||||
|
mode: "0700"
|
||||||
|
|
||||||
|
- name: Copy kubeconfig using scp directly
|
||||||
|
ansible.builtin.command: >
|
||||||
|
scp -i ~/.ssh/vm1_key -o StrictHostKeyChecking=no
|
||||||
|
vagrant@192.168.56.80:/home/vagrant/.kube/config
|
||||||
|
/home/vagrant/.kube/config
|
||||||
|
become: false
|
||||||
|
|
||||||
|
- name: Ensure proper ownership
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /home/vagrant/.kube/config
|
||||||
|
owner: vagrant
|
||||||
|
group: vagrant
|
||||||
|
mode: "0600"
|
||||||
|
become: false
|
||||||
|
|
||||||
|
- name: copy k3s binary to /usr/local/bin/k3s
|
||||||
|
ansible.builtin.command: >
|
||||||
|
scp -i ~/.ssh/vm1_key -o StrictHostKeyChecking=no
|
||||||
|
vagrant@192.168.56.80:/usr/local/bin/k3s
|
||||||
|
/usr/local/bin/k3s
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: symlink k3s to kubectl
|
||||||
|
ansible.builtin.file:
|
||||||
|
src: /usr/local/bin/k3s
|
||||||
|
dest: /usr/local/bin/kubectl
|
||||||
|
state: link
|
||||||
|
become: true
|
||||||
|
|
||||||
|
- name: Ensure KUBECONFIG is set in vagrant .bashrc
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /home/vagrant/.bashrc
|
||||||
|
line: "export KUBECONFIG=~/.kube/config"
|
||||||
|
state: present
|
||||||
|
insertafter: EOF
|
||||||
|
owner: vagrant
|
||||||
|
group: vagrant
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: Ensure kubectl completion is sourced in vagrant .bashrc
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /home/vagrant/.bashrc
|
||||||
|
line: "source <(kubectl completion bash)"
|
||||||
|
state: present
|
||||||
|
insertafter: EOF
|
||||||
|
owner: vagrant
|
||||||
|
group: vagrant
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: replace 127.0.0.1:6443 192.168.56.80:6443 in .kube/config
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: /home/vagrant/.kube/config
|
||||||
|
regexp: "127.0.0.1:6443"
|
||||||
|
replace: "192.168.56.80:6443"
|
||||||
|
|
@ -103,23 +103,6 @@ else
|
||||||
echo "Provisioning block already present in $BASHRC"
|
echo "Provisioning block already present in $BASHRC"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "user id is $(id)"
|
|
||||||
echo "group id is $(groups)"
|
|
||||||
|
|
||||||
ls -al /home/vagrant/ansible
|
|
||||||
echo ""
|
|
||||||
cat /vagrant/ansible/ansible_inventory.ini
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
echo "root keys"
|
|
||||||
|
|
||||||
ls -al ~/.ssh/vm*_key
|
|
||||||
|
|
||||||
echo "vagrant keys"
|
|
||||||
|
|
||||||
ls -al /home/vagrant/.ssh/vm*_key
|
|
||||||
|
|
||||||
ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping all
|
ANSIBLE_HOST_KEY_CHECKING=False ansible --inventory-file /home/vagrant/ansible/ansible_inventory.ini -m ping all
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
|
|
@ -136,9 +119,17 @@ fi
|
||||||
echo "Keepalived installation completed."
|
echo "Keepalived installation completed."
|
||||||
|
|
||||||
# install_k3s_3node.yaml
|
# install_k3s_3node.yaml
|
||||||
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini | cat
|
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook install_k3s_3node.yaml --inventory-file ansible_inventory.ini
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
|
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# copy_k8s_config.yaml
|
||||||
|
ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook copy_k8s_config.yaml --inventory-file ansible_inventory.ini
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Ansible playbook failed. Please check your Vagrant VMs and network configuration."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue