diff --git a/app/directory_generators/ansible_generator.py b/app/directory_generators/ansible_generator.py index e3965501..06b13344 100644 --- a/app/directory_generators/ansible_generator.py +++ b/app/directory_generators/ansible_generator.py @@ -1,816 +1,87 @@ import os project_name = "app/media/MyAnsible" -ansible_dir = project_name -group_vars_dir = os.path.join(ansible_dir, "group_vars") -host_vars_dir = os.path.join(ansible_dir, "host_vars") -roles_dir = os.path.join(ansible_dir, "roles") # Create project directories -os.makedirs(group_vars_dir, exist_ok=True) -os.makedirs(host_vars_dir, exist_ok=True) -os.makedirs(roles_dir, exist_ok=True) - -preinstall_dir = os.path.join(roles_dir, "preinstall") -k8s_dir = os.path.join(roles_dir, "k8s") -init_k8s_dir = os.path.join(roles_dir, "init_k8s") -join_master_dir = os.path.join(roles_dir, "join_master") -join_worker_dir = os.path.join(roles_dir, "join_worker") - -os.makedirs(preinstall_dir, exist_ok=True) -os.makedirs(k8s_dir, exist_ok=True) -os.makedirs(init_k8s_dir, exist_ok=True) -os.makedirs(join_master_dir, exist_ok=True) -os.makedirs(join_worker_dir, exist_ok=True) +os.makedirs(os.path.join(project_name, "group_vars"), exist_ok=True) +os.makedirs(os.path.join(project_name, "host_vars"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "defaults"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "files"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "handlers"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "tasks"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "templates"), exist_ok=True) +os.makedirs(os.path.join(project_name, "roles", "install_docker", "vars"), exist_ok=True) # Create ansible.cfg -with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg_file: - ansible_cfg_file.write("[defaults]\nhost_key_checking=false\n") - -# Create group_vars/all -with open(os.path.join(group_vars_dir, "all"), "w") as group_vars_file: - group_vars_file.write("""# General -install_ansible_modules: "true" -disable_transparent_huge_pages: "true" - -setup_interface: "false" - -# Network Calico see here for more details https://github.com/projectcalico/calico/releases -calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" -calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" -pod_network_cidr: "192.168.0.0/16" - -# DNS -resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - -# Sanction shekan -use_iran: "true" # change it to "false" if you are outside of iran - -# Docker -docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" -docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" -docker_apt_repo: "https://download.docker.com/linux/ubuntu" - -# Kubernetes -kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" -kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" -kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" -k8s_version: "1.31.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases +with open(os.path.join(project_name, "ansible.cfg"), "w") as ansible_cfg: + ansible_cfg.write("[defaults]\n") + ansible_cfg.write("host_key_checking=false\n") -# CRI -cri_socket: unix:///var/run/containerd/containerd.sock - -# Ansible Connection -ansible_user: root -ansible_port: 22 -ansible_python_interpreter: "/usr/bin/python3" -domain: "devopsgpt.com" -apiserver_url: "devopsgpt.com" -""") +# Create group_vars/docker_nodes +with open(os.path.join(project_name, "group_vars", "docker_nodes"), "w") as docker_nodes: + docker_nodes.write("ansible_port: 22\n") + docker_nodes.write("ansible_user: root\n") # Create hosts -with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: - hosts_file.write("""[all] -string private_ip=x.x.x.x -string private_ip=x.x.x.x - -[k8s] -string -string - -[k8s_masters] -string - -[k8s_workers] -string -""") - -# Create kubernetes_playbook.yml -with open(os.path.join(ansible_dir, "kubernetes_playbook.yml"), "w") as playbook_file: - playbook_file.write("""- hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] - -- hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] - -- hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] - -- hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] - -- hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] -""") - -# Create preinstall files -preinstall_defaults_dir = os.path.join(preinstall_dir, "defaults") -preinstall_files_dir = os.path.join(preinstall_dir, "files") -preinstall_handlers_dir = os.path.join(preinstall_dir, "handlers") -preinstall_tasks_dir = os.path.join(preinstall_dir, "tasks") -preinstall_templates_dir = os.path.join(preinstall_dir, "templates") -preinstall_vars_dir = os.path.join(preinstall_dir, "vars") - -os.makedirs(preinstall_defaults_dir, exist_ok=True) -os.makedirs(preinstall_files_dir, exist_ok=True) -os.makedirs(preinstall_handlers_dir, exist_ok=True) -os.makedirs(preinstall_tasks_dir, exist_ok=True) -os.makedirs(preinstall_templates_dir, exist_ok=True) -os.makedirs(preinstall_vars_dir, exist_ok=True) - -with open(os.path.join(preinstall_defaults_dir, "main.yml"), "w") as defaults_file: - defaults_file.write("") - -with open(os.path.join(preinstall_files_dir, "sample.sh"), "w") as files_file: - files_file.write("") - -with open(os.path.join(preinstall_handlers_dir, "main.yml"), "w") as handlers_file: - handlers_file.write("") - -with open(os.path.join(preinstall_tasks_dir, "basic.yml"), "w") as basic_tasks_file: - basic_tasks_file.write("""- name: Set timezone to UTC - timezone: - name: Etc/UTC - -- name: Set hostname - command: hostnamectl set-hostname {{ inventory_hostname }} - -- name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" - -- name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" - -- name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\\.0\\.0\\.1' - line: "127.0.0.1 {{ inventory_hostname }} localhost" - owner: root - group: root - mode: 0644 - -- name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree - -- name: Fix broken packages - apt: - state: fixed -""") - -with open(os.path.join(preinstall_tasks_dir, "main.yml"), "w") as tasks_main_file: - tasks_main_file.write("""--- -- name: basic setup - include_tasks: basic.yml -""") - -# Create k8s files -k8s_defaults_dir = os.path.join(k8s_dir, "defaults") -k8s_files_dir = os.path.join(k8s_dir, "files") -k8s_handlers_dir = os.path.join(k8s_dir, "handlers") -k8s_tasks_dir = os.path.join(k8s_dir, "tasks") -k8s_templates_dir = os.path.join(k8s_dir, "templates") -k8s_vars_dir = os.path.join(k8s_dir, "vars") - -os.makedirs(k8s_defaults_dir, exist_ok=True) -os.makedirs(k8s_files_dir, exist_ok=True) -os.makedirs(k8s_handlers_dir, exist_ok=True) -os.makedirs(k8s_tasks_dir, exist_ok=True) -os.makedirs(k8s_templates_dir, exist_ok=True) -os.makedirs(k8s_vars_dir, exist_ok=True) - -with open(os.path.join(k8s_defaults_dir, "main.yml"), "w") as k8s_defaults_file: - k8s_defaults_file.write("") - -with open(os.path.join(k8s_files_dir, "sample.sh"), "w") as k8s_files_file: - k8s_files_file.write("") - -with open(os.path.join(k8s_handlers_dir, "main.yml"), "w") as k8s_handlers_file: - k8s_handlers_file.write("""--- -# handlers file for k8s - -- name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent - -- name: Restart kubelet - service: - name: kubelet - state: restarted -""") - -with open(os.path.join(k8s_tasks_dir, "k8s.yml"), "w") as k8s_tasks_k8s_file: - k8s_tasks_k8s_file.write("""- name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - -- name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\\sswap\\s+sw\\s+.*)$' - replace: '# \\1' - -- name: Check if ufw is installed - package_facts: - manager: "auto" - -- name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "'ufw' in ansible_facts.packages" - -- name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{{ item }}" - create: yes - state: present - loop: - - overlay - - br_netfilter - -- name: Load kernel modules - command: - cmd: "modprobe {{ item }}" - loop: - - overlay - - br_netfilter - -- name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {mark} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - -- name: Reload sysctl settings - command: - cmd: sysctl --system - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - state: present - update_cache: yes - -- name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - -- name: Remove existing Docker GPG key if it exists - file: - path: '{{ docker_gpg_key_path }}' - state: absent - -- name: Download Docker GPG key - shell: | - curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} - -- name: Determine the architecture - command: dpkg --print-architecture - register: architecture - -- name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - -- name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" - state: present - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - -- name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - -- name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - -- name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - -- name: Enable containerd service - systemd: - name: containerd - enabled: yes - -- name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{{ kubernetes_gpg_keyring_path }}' - state: absent - -- name: Download Kubernetes GPG key - shell: | - curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' - -- name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" - state: present - filename: kubernetes.list - -- name: Update apt cache - apt: - update_cache: yes - -- name: Install Kubernetes packages - apt: - name: "{{ item }}" - state: present - loop: - - kubeadm=1.31.2-1.1 - - kubelet=1.31.2-1.1 - - kubectl=1.31.2-1.1 - -- name: Hold Kubernetes packages - dpkg_selections: - name: "{{ item }}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - -- name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} - create: yes - state: present - notify: Restart kubelet - -- name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" - state: present - create: no - loop: "{{ groups['all'] }}" - when: hostvars[item].private_ip is defined - -- name: Add apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: present - -- name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull -""") - -with open(os.path.join(k8s_tasks_dir, "main.yml"), "w") as k8s_tasks_main_file: - k8s_tasks_main_file.write("""--- -- name: Install kubernetes packages - include_tasks: k8s.yml -""") - -# Create init_k8s files -init_k8s_defaults_dir = os.path.join(init_k8s_dir, "defaults") -init_k8s_files_dir = os.path.join(init_k8s_dir, "files") -init_k8s_handlers_dir = os.path.join(init_k8s_dir, "handlers") -init_k8s_tasks_dir = os.path.join(init_k8s_dir, "tasks") -init_k8s_templates_dir = os.path.join(init_k8s_dir, "templates") -init_k8s_vars_dir = os.path.join(init_k8s_dir, "vars") - -os.makedirs(init_k8s_defaults_dir, exist_ok=True) -os.makedirs(init_k8s_files_dir, exist_ok=True) -os.makedirs(init_k8s_handlers_dir, exist_ok=True) -os.makedirs(init_k8s_tasks_dir, exist_ok=True) -os.makedirs(init_k8s_templates_dir, exist_ok=True) -os.makedirs(init_k8s_vars_dir, exist_ok=True) - -with open(os.path.join(init_k8s_defaults_dir, "main.yml"), "w") as init_k8s_defaults_file: - init_k8s_defaults_file.write("") - -with open(os.path.join(init_k8s_files_dir, "sample.sh"), "w") as init_k8s_files_file: - init_k8s_files_file.write("") - -with open(os.path.join(init_k8s_handlers_dir, "main.yml"), "w") as init_k8s_handlers_file: - init_k8s_handlers_file.write("") - -with open(os.path.join(init_k8s_tasks_dir, "cni.yml"), "w") as init_k8s_tasks_cni_file: - init_k8s_tasks_cni_file.write("""- block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_operator_url }} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {{ calico_crd_url }} - retries: 3 - delay: 3 - delegate_to: "{{ groups['k8s_masters'][0] }}" - when: calico_crd_check.rc != 0 - run_once: true -""") - -with open(os.path.join(init_k8s_tasks_dir, "initk8s.yml"), "w") as init_k8s_tasks_initk8s_file: - init_k8s_tasks_initk8s_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - -- name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - -- name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." -""") - -with open(os.path.join(init_k8s_tasks_dir, "main.yml"), "w") as init_k8s_tasks_main_file: - init_k8s_tasks_main_file.write("""--- -# tasks file for init_k8s - -- name: Initialize kubernetes cluster - include_tasks: initk8s.yml - -- name: Initialize Calico CNI - include_tasks: cni.yml -""") - -# Create join_master files -join_master_defaults_dir = os.path.join(join_master_dir, "defaults") -join_master_files_dir = os.path.join(join_master_dir, "files") -join_master_handlers_dir = os.path.join(join_master_dir, "handlers") -join_master_tasks_dir = os.path.join(join_master_dir, "tasks") -join_master_templates_dir = os.path.join(join_master_dir, "templates") -join_master_vars_dir = os.path.join(join_master_dir, "vars") - -os.makedirs(join_master_defaults_dir, exist_ok=True) -os.makedirs(join_master_files_dir, exist_ok=True) -os.makedirs(join_master_handlers_dir, exist_ok=True) -os.makedirs(join_master_tasks_dir, exist_ok=True) -os.makedirs(join_master_templates_dir, exist_ok=True) -os.makedirs(join_master_vars_dir, exist_ok=True) - -with open(os.path.join(join_master_defaults_dir, "main.yml"), "w") as join_master_defaults_file: - join_master_defaults_file.write("") - -with open(os.path.join(join_master_files_dir, "join-command"), "w") as join_master_files_file: - join_master_files_file.write("") - -with open(os.path.join(join_master_handlers_dir, "main.yml"), "w") as join_master_handlers_file: - join_master_handlers_file.write("") - -with open(os.path.join(join_master_tasks_dir, "join_master.yml"), "w") as join_master_tasks_join_master_file: - join_master_tasks_join_master_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - -- block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - run_once: false - delegate_facts: true - -- name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" - state: absent - -- name: Add apiserver_url to point to the masters - lineinfile: - dest: /etc/hosts - line: "{{ private_ip }} {{ apiserver_url }}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] -""") - -with open(os.path.join(join_master_tasks_dir, "main.yml"), "w") as join_master_tasks_main_file: - join_master_tasks_main_file.write("""--- -# tasks file for join_master - -- name: Join master(s) node to cluster - include_tasks: join_master.yml -""") - -# Create join_worker files -join_worker_defaults_dir = os.path.join(join_worker_dir, "defaults") -join_worker_files_dir = os.path.join(join_worker_dir, "files") -join_worker_handlers_dir = os.path.join(join_worker_dir, "handlers") -join_worker_tasks_dir = os.path.join(join_worker_dir, "tasks") -join_worker_templates_dir = os.path.join(join_worker_dir, "templates") -join_worker_vars_dir = os.path.join(join_worker_dir, "vars") - -os.makedirs(join_worker_defaults_dir, exist_ok=True) -os.makedirs(join_worker_files_dir, exist_ok=True) -os.makedirs(join_worker_handlers_dir, exist_ok=True) -os.makedirs(join_worker_tasks_dir, exist_ok=True) -os.makedirs(join_worker_templates_dir, exist_ok=True) -os.makedirs(join_worker_vars_dir, exist_ok=True) - -with open(os.path.join(join_worker_defaults_dir, "main.yml"), "w") as join_worker_defaults_file: - join_worker_defaults_file.write("") - -with open(os.path.join(join_worker_files_dir, "join-command"), "w") as join_worker_files_file: - join_worker_files_file.write("") - -with open(os.path.join(join_worker_handlers_dir, "main.yml"), "w") as join_worker_handlers_file: - join_worker_handlers_file.write("") - -with open(os.path.join(join_worker_tasks_dir, "join_worker.yml"), "w") as join_worker_tasks_join_worker_file: - join_worker_tasks_join_worker_file.write("""- name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - -- block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{{ join_command.stdout_lines[0] }}" - - - name: Copy join command to local file - become: false - local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" - - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{{ groups['k8s_masters'][0] }}" - -- block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - -- name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists -""") - -with open(os.path.join(join_worker_tasks_dir, "main.yml"), "w") as join_worker_tasks_main_file: - join_worker_tasks_main_file.write("""--- -# tasks file for join_worker - -- name: Join worker(s) node to cluster - include_tasks: join_worker.yml -""") \ No newline at end of file +with open(os.path.join(project_name, "hosts"), "w") as hosts_file: + hosts_file.write("[docker_nodes]\n") + hosts_file.write("www.example.com\n") + +# Create docker_playbook.yml +with open(os.path.join(project_name, "docker_playbook.yml"), "w") as playbook: + playbook.write("- hosts: all\n") + playbook.write(" roles:\n") + playbook.write(" - install_docker\n") + +# Create install_docker/tasks/main.yml +with open(os.path.join(project_name, "roles", "install_docker", "tasks", "main.yml"), "w") as tasks_file: + tasks_file.write("---\n") + tasks_file.write("- name: Install prerequisite packages\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ prerequisite_packages }}\"\n") + tasks_file.write("- name: Create directory for Docker keyrings\n") + tasks_file.write(" file:\n") + tasks_file.write(" path: /etc/apt/keyrings\n") + tasks_file.write(" state: directory\n") + tasks_file.write(" mode: '0755'\n") + tasks_file.write("- name: Download Docker's official GPG key\n") + tasks_file.write(" get_url:\n") + tasks_file.write(" url: https://download.docker.com/linux/ubuntu/gpg\n") + tasks_file.write(" dest: /etc/apt/keyrings/docker.asc\n") + tasks_file.write(" mode: '0644'\n") + tasks_file.write("- name: Add Docker repository to apt sources\n") + tasks_file.write(" copy:\n") + tasks_file.write(" content: |\n") + tasks_file.write(" deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable\n") + tasks_file.write(" dest: /etc/apt/sources.list.d/docker.list\n") + tasks_file.write("- name: Update apt cache after adding Docker repo\n") + tasks_file.write(" apt:\n") + tasks_file.write(" update_cache: yes\n") + tasks_file.write("- name: Install Docker packages\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ docker_packages }}\"\n") + tasks_file.write("- name: Ensure Docker and containerd services are started and enabled\n") + tasks_file.write(" service:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: started\n") + tasks_file.write(" enabled: yes\n") + tasks_file.write(" loop: \"{{ docker_services }}\"\n") + +# Create install_docker/vars/main.yml +with open(os.path.join(project_name, "roles", "install_docker", "vars", "main.yml"), "w") as vars_file: + vars_file.write("prerequisite_packages:\n") + vars_file.write(" - ca-certificates\n") + vars_file.write(" - curl\n\n") + vars_file.write("docker_services:\n") + vars_file.write(" - docker\n") + vars_file.write(" - containerd\n\n") + vars_file.write("docker_packages:\n") + vars_file.write(" - docker-ce\n") + vars_file.write(" - docker-ce-cli\n") + vars_file.write(" - containerd.io\n") + vars_file.write(" - docker-buildx-plugin\n") + vars_file.write(" - docker-compose-plugin\n") \ No newline at end of file diff --git a/app/media/MyAnsible/group_vars/all b/app/media/MyAnsible/group_vars/all index 03bf2832..eb422e93 100644 --- a/app/media/MyAnsible/group_vars/all +++ b/app/media/MyAnsible/group_vars/all @@ -24,7 +24,7 @@ docker_apt_repo: "https://download.docker.com/linux/ubuntu" kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" -k8s_version: "1.31.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases +k8s_version: 1.31 # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases # CRI cri_socket: unix:///var/run/containerd/containerd.sock @@ -35,3 +35,4 @@ ansible_port: 22 ansible_python_interpreter: "/usr/bin/python3" domain: "devopsgpt.com" apiserver_url: "devopsgpt.com" + \ No newline at end of file diff --git a/app/media/MyAnsible/hosts b/app/media/MyAnsible/hosts index 79eace5b..dec5110a 100644 --- a/app/media/MyAnsible/hosts +++ b/app/media/MyAnsible/hosts @@ -10,4 +10,4 @@ string string [k8s_workers] -string +string \ No newline at end of file diff --git a/app/media/MyAnsible/kubernetes_playbook.yml b/app/media/MyAnsible/kubernetes_playbook.yml index ea5f7985..d674b26f 100644 --- a/app/media/MyAnsible/kubernetes_playbook.yml +++ b/app/media/MyAnsible/kubernetes_playbook.yml @@ -1,3 +1,4 @@ + - hosts: all roles: - role: preinstall @@ -36,3 +37,5 @@ gather_facts: yes any_errors_fatal: true tags: [join_worker] + + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml index 516dbff3..c12926c6 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/cni.yml @@ -18,3 +18,5 @@ delegate_to: "{{ groups['k8s_masters'][0] }}" when: calico_crd_check.rc != 0 run_once: true + + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml index a1836485..3d616552 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/initk8s.yml @@ -62,3 +62,4 @@ - name: Example Task After Reboot debug: msg: "Server back online and ready for tasks." + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/init_k8s/tasks/main.yml b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml index bb40ddec..10fa230e 100644 --- a/app/media/MyAnsible/roles/init_k8s/tasks/main.yml +++ b/app/media/MyAnsible/roles/init_k8s/tasks/main.yml @@ -6,3 +6,4 @@ - name: Initialize Calico CNI include_tasks: cni.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_master/tasks/join_master.yml b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml index f82dbee0..b6855cbf 100644 --- a/app/media/MyAnsible/roles/join_master/tasks/join_master.yml +++ b/app/media/MyAnsible/roles/join_master/tasks/join_master.yml @@ -98,3 +98,4 @@ state: present when: - inventory_hostname in groups['k8s_masters'] + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_master/tasks/main.yml b/app/media/MyAnsible/roles/join_master/tasks/main.yml index 316b5b1d..a5bf581f 100644 --- a/app/media/MyAnsible/roles/join_master/tasks/main.yml +++ b/app/media/MyAnsible/roles/join_master/tasks/main.yml @@ -3,3 +3,4 @@ - name: Join master(s) node to cluster include_tasks: join_master.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml index b9b94947..899a0522 100644 --- a/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml +++ b/app/media/MyAnsible/roles/join_worker/tasks/join_worker.yml @@ -36,3 +36,4 @@ when: - inventory_hostname not in groups['k8s_masters'] - not kubeadm_already_run.stat.exists + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/join_worker/tasks/main.yml b/app/media/MyAnsible/roles/join_worker/tasks/main.yml index a43175cc..2cf615b3 100644 --- a/app/media/MyAnsible/roles/join_worker/tasks/main.yml +++ b/app/media/MyAnsible/roles/join_worker/tasks/main.yml @@ -3,3 +3,4 @@ - name: Join worker(s) node to cluster include_tasks: join_worker.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/handlers/main.yml b/app/media/MyAnsible/roles/k8s/handlers/main.yml index de036f51..989212ab 100644 --- a/app/media/MyAnsible/roles/k8s/handlers/main.yml +++ b/app/media/MyAnsible/roles/k8s/handlers/main.yml @@ -10,3 +10,4 @@ service: name: kubelet state: restarted + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml index 4620eef3..d91bbf41 100644 --- a/app/media/MyAnsible/roles/k8s/tasks/k8s.yml +++ b/app/media/MyAnsible/roles/k8s/tasks/k8s.yml @@ -193,3 +193,4 @@ - name: Pull Kubernetes images | If you got error check your dns and sanction command: cmd: kubeadm config images pull + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/k8s/tasks/main.yml b/app/media/MyAnsible/roles/k8s/tasks/main.yml index a0ac6054..ddd388cb 100644 --- a/app/media/MyAnsible/roles/k8s/tasks/main.yml +++ b/app/media/MyAnsible/roles/k8s/tasks/main.yml @@ -1,3 +1,4 @@ --- - name: Install kubernetes packages include_tasks: k8s.yml + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml index 43fae8cd..46c59409 100644 --- a/app/media/MyAnsible/roles/preinstall/tasks/basic.yml +++ b/app/media/MyAnsible/roles/preinstall/tasks/basic.yml @@ -80,3 +80,4 @@ - name: Fix broken packages apt: state: fixed + \ No newline at end of file diff --git a/app/media/MyAnsible/roles/preinstall/tasks/main.yml b/app/media/MyAnsible/roles/preinstall/tasks/main.yml index 56a88e66..38be7807 100644 --- a/app/media/MyAnsible/roles/preinstall/tasks/main.yml +++ b/app/media/MyAnsible/roles/preinstall/tasks/main.yml @@ -1,3 +1,4 @@ --- - name: basic setup include_tasks: basic.yml + \ No newline at end of file diff --git a/app/routes/ansible.py b/app/routes/ansible.py index f1db3387..367f543c 100644 --- a/app/routes/ansible.py +++ b/app/routes/ansible.py @@ -8,43 +8,54 @@ from app.template_generators.ansible.install.main import ansible_install_template import os +import shutil @app.post("/api/ansible-install/nginx/") async def ansible_install_generation_nginx(request:AnsibleInstallNginx) -> Output: + + if os.environ.get("TEST"): return Output(output='output') - generated_prompt = ansible_install_template(request,"nginx") + + dir = 'app/media/MyAnsible' + if os.path.exists(dir): + shutil.rmtree(dir) + + ansible_install_template(request,"nginx") - output = gpt_service(generated_prompt) - edit_directory_generator("ansible_generator",output) - execute_pythonfile("MyAnsible","ansible_generator") + return Output(output='output') @app.post("/api/ansible-install/docker/") async def ansible_install_generation_docker(request:AnsibleInstallDocker) -> Output: + if os.environ.get("TEST"): return Output(output='output') - generated_prompt = ansible_install_template(request,"docker") + + dir = 'app/media/MyAnsible' + if os.path.exists(dir): + shutil.rmtree(dir) + + ansible_install_template(request,"docker") - output = gpt_service(generated_prompt) - edit_directory_generator("ansible_generator",output) - execute_pythonfile("MyAnsible","ansible_generator") return Output(output='output') @app.post("/api/ansible-install/kuber/") async def ansible_install_generation_kuber(request:AnsibleInstallKuber) -> Output: - + + if os.environ.get("TEST"): return Output(output='output') - generated_prompt = ansible_install_template(request,"kuber") - - output = gpt_service(generated_prompt) - edit_directory_generator("ansible_generator",output) - execute_pythonfile("MyAnsible","ansible_generator") + + dir = 'app/media/MyAnsible' + if os.path.exists(dir): + shutil.rmtree(dir) + + ansible_install_template(request,"kuber") add_files_to_folder(files = ['app/media/kuber_configs/resolv.conf.j2'] , folder='app/media/MyAnsible/roles/preinstall/templates/') add_files_to_folder(files = ['app/media/kuber_configs/kubeadmcnf.yml.j2'] , folder='app/media/MyAnsible/roles/init_k8s/templates/') add_files_to_folder(files = ['app/media/kuber_configs/kubeadmcnf-join.yml.j2'] , folder='app/media/MyAnsible/roles/join_master/templates/') diff --git a/app/template_generators/ansible/install/docker.py b/app/template_generators/ansible/install/docker.py index 74e362df..7445b3f3 100644 --- a/app/template_generators/ansible/install/docker.py +++ b/app/template_generators/ansible/install/docker.py @@ -1,3 +1,4 @@ +import os def ansible_docker_install(input): docker_hosts = input.hosts @@ -13,131 +14,91 @@ def ansible_docker_install(input): - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: + + - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── docker_nodes - │   - ├── hosts - ├── host_vars - ├── docker_playbook.yml - └── roles - └── install_docker - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "docker_nodes" and the content of this file must be as follows: - ``` - ansible_port: {docker_ansible_port} - ansible_user: {docker_ansible_user} - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {docker_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "docker_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - install_docker - ``` - - There is a directory called "roles" which a sub-directory called "install_docker" (roles/install_docker) - "install_docker" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (install_docker/tasks): This path has a file called "main.yml" which its content must be as follows: - ``` - --- - - name: Install prerequisite packages - apt: - name: "{docker_items_in_task}" - state: present - loop: "{docker_prerequisite_packages_in_task}"" - - name: Create directory for Docker keyrings - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' - - name: Download Docker's official GPG key - get_url: - url: https://download.docker.com/linux/ubuntu/gpg - dest: /etc/apt/keyrings/docker.asc - mode: '0644' - - name: Add Docker repository to apt sources - copy: - content: | - deb [arch={ansible_architecture_in_task} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {ansible_distribution_release_in_task} stable - dest: /etc/apt/sources.list.d/docker.list - - name: Update apt cache after adding Docker repo - apt: - update_cache: yes - - name: Install Docker packages - apt: - name: "{docker_items_in_task}" - state: present - loop: "{docker_packages_in_task}"" - - name: Ensure Docker and containerd services are started and enabled - service: - name: "{docker_items_in_task}" - state: started - enabled: yes - loop: "{docker_services_in_task}"" - ``` - - (install_docker/vars): This path has a file called "main.yml" which its content must be as follows: - ``` - prerequisite_packages: - - ca-certificates - - curl + project_name = "app/media/MyAnsible" - docker_services: - - docker - - containerd + # Create project directories + os.makedirs(os.path.join(project_name, "group_vars"), exist_ok=True) + os.makedirs(os.path.join(project_name, "host_vars"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "defaults"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "files"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "handlers"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "tasks"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "templates"), exist_ok=True) + os.makedirs(os.path.join(project_name, "roles", "install_docker", "vars"), exist_ok=True) - docker_packages: - - docker-ce - - docker-ce-cli - - containerd.io - - docker-buildx-plugin - - docker-compose-plugin - ``` + # Create ansible.cfg + with open(os.path.join(project_name, "ansible.cfg"), "w") as ansible_cfg: + ansible_cfg.write("[defaults]\n") + ansible_cfg.write("host_key_checking=false\n") - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! + # Create group_vars/docker_nodes + with open(os.path.join(project_name, "group_vars", "docker_nodes"), "w") as docker_nodes: + docker_nodes.write(f"ansible_port: {docker_ansible_port}\n") + docker_nodes.write(f"ansible_user: {docker_ansible_user}\n") - the python code you give me, must have structure like that: + # Create hosts + with open(os.path.join(project_name, "hosts"), "w") as hosts_file: + hosts_file.write(f"{docker_inventory}\n") + - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") + # Create docker_playbook.yml + with open(os.path.join(project_name, "docker_playbook.yml"), "w") as playbook: + playbook.write("- hosts: all\n") + playbook.write(" roles:\n") + playbook.write(" - install_docker\n") - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) + # Create install_docker/tasks/main.yml + with open(os.path.join(project_name, "roles", "install_docker", "tasks", "main.yml"), "w") as tasks_file: + tasks_file.write("---\n") + tasks_file.write("- name: Install prerequisite packages\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ prerequisite_packages }}\"\n") + tasks_file.write("- name: Create directory for Docker keyrings\n") + tasks_file.write(" file:\n") + tasks_file.write(" path: /etc/apt/keyrings\n") + tasks_file.write(" state: directory\n") + tasks_file.write(" mode: '0755'\n") + tasks_file.write("- name: Download Docker's official GPG key\n") + tasks_file.write(" get_url:\n") + tasks_file.write(" url: https://download.docker.com/linux/ubuntu/gpg\n") + tasks_file.write(" dest: /etc/apt/keyrings/docker.asc\n") + tasks_file.write(" mode: '0644'\n") + tasks_file.write("- name: Add Docker repository to apt sources\n") + tasks_file.write(" copy:\n") + tasks_file.write(" content: |\n") + tasks_file.write(" deb [arch={{ ansible_architecture }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable\n") + tasks_file.write(" dest: /etc/apt/sources.list.d/docker.list\n") + tasks_file.write("- name: Update apt cache after adding Docker repo\n") + tasks_file.write(" apt:\n") + tasks_file.write(" update_cache: yes\n") + tasks_file.write("- name: Install Docker packages\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" loop: \"{{ docker_packages }}\"\n") + tasks_file.write("- name: Ensure Docker and containerd services are started and enabled\n") + tasks_file.write(" service:\n") + tasks_file.write(" name: \"{{ item }}\"\n") + tasks_file.write(" state: started\n") + tasks_file.write(" enabled: yes\n") + tasks_file.write(" loop: \"{{ docker_services }}\"\n") - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt + # Create install_docker/vars/main.yml + with open(os.path.join(project_name, "roles", "install_docker", "vars", "main.yml"), "w") as vars_file: + vars_file.write("prerequisite_packages:\n") + vars_file.write(" - ca-certificates\n") + vars_file.write(" - curl\n\n") + vars_file.write("docker_services:\n") + vars_file.write(" - docker\n") + vars_file.write(" - containerd\n\n") + vars_file.write("docker_packages:\n") + vars_file.write(" - docker-ce\n") + vars_file.write(" - docker-ce-cli\n") + vars_file.write(" - containerd.io\n") + vars_file.write(" - docker-buildx-plugin\n") + vars_file.write(" - docker-compose-plugin\n") diff --git a/app/template_generators/ansible/install/kuber.py b/app/template_generators/ansible/install/kuber.py index 5c01e4d0..9bcf8362 100644 --- a/app/template_generators/ansible/install/kuber.py +++ b/app/template_generators/ansible/install/kuber.py @@ -1,4 +1,4 @@ - +import os def ansible_kuber_install(input): kubernetes_ansible_port = input.ansible_port @@ -14,826 +14,809 @@ def ansible_kuber_install(input): } kubernetes_inventory = "\n\n".join(f"{section}\n" + "\n".join(entries) for section, entries in sections.items()) - inventory_hostname = "{{ inventory_hostname }}" - item_in_task = "{{ item }}" - ufw_in_task = "'ufw'" - docker_gpg_key_path_in_task = "{{ docker_gpg_key_path }}" - docker_gpg_key_url_in_task = "{{ docker_gpg_key_url }}" - architecture_stdout_in_task = "{{ architecture.stdout }}" - docker_apt_repo_in_task = "{{ docker_apt_repo }}" - distribution_codename_stdout_in_task = "{{ distribution_codename.stdout }}" - kubernetes_gpg_keyring_path_in_task = "{{ kubernetes_gpg_keyring_path }}" - kubernetes_gpg_key_url_in_task = "{{ kubernetes_gpg_key_url }}" - kubernetes_apt_repo_in_task = "{{ kubernetes_apt_repo }}" - private_ip_in_task = "{{ private_ip }}" - hostvars_private_ip_in_task = "{{ hostvars[item].private_ip }}" - domain_in_task = "{{ domain }}" - groups_all_in_task = "{{ groups['all'] }}" - hostvars_groups_k8s_masters_private_ip_in_task = "{{ hostvars[groups['k8s_masters'][0]].private_ip }}" - apiserver_url_in_task = "{{ apiserver_url }}" - groups_k8s_masters_in_task = "{{ groups['k8s_masters'][0] }}" - calico_operator_url_in_task = "{{ calico_operator_url }}" - calico_crd_url_in_task = "{{ calico_crd_url }}" - join_command_stdout_lines_in_task = "{{ join_command.stdout_lines[0] }}" - kubeadm_cert_key_stdout_lines_in_task = "{{ kubeadm_cert_key.stdout_lines[2] }}" - hostvars_k8s_masters_control_plane_certkey_in_task = "{{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }}" - cri_socket_in_task = "{{ cri_socket }}" - - - - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: - - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── all - │   - ├── hosts - ├── host_vars - ├── kubernetes_playbook.yml - └── roles - └── preinstall - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── basic.yml - │   └── main.yml - ├── templates - │   └── resolv.conf.j2 - └── vars - | └── main.yml - k8s - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── k8s.yml - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - | └── main.yml - init_k8s - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── cni.yml - │   └── initk8s.yml - │   └── main.yml - ├── templates - │   └── kubeadmcnf.yml.j2 - └── vars - | └── main.yml - join_master - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── join_master.yml - │   └── main.yml - ├── templates - │   └── kubeadmcnf-join.yml.j2 - └── vars - | └── main.yml - join_worker - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── join_worker.yml - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "all" and the content of this file must be as follows: - ``` - # General - install_ansible_modules: "true" - disable_transparent_huge_pages: "true" - - setup_interface: "false" - - # Network Calico see here for more details https://github.com/projectcalico/calico/releases - calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" - calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" - pod_network_cidr: "192.168.0.0/16" - - # DNS - resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online - - # Sanction shekan - use_iran: "true" # change it to "false" if you are outside of iran - - # Docker - docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" - docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" - docker_apt_repo: "https://download.docker.com/linux/ubuntu" - - # Kubernetes - kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" - kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/Release.key" - kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v{k8s_version}/deb/" - k8s_version: "{k8s_version}.2" # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases - - # CRI - cri_socket: unix:///var/run/containerd/containerd.sock - - # Ansible Connection - - ansible_user: {kubernetes_ansible_user} - ansible_port: {kubernetes_ansible_port} - ansible_python_interpreter: "/usr/bin/python3" - domain: "devopsgpt.com" - apiserver_url: "devopsgpt.com" - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {kubernetes_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "kubernetes_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - role: preinstall - gather_facts: yes - any_errors_fatal: true - tags: [preinstall] - - - hosts: k8s - roles: - - role: k8s - gather_facts: yes - any_errors_fatal: true - tags: [k8s] - - - hosts: k8s - roles: - - role: init_k8s - gather_facts: yes - any_errors_fatal: true - tags: [init_k8s] - - - hosts: k8s_masters - roles: - - role: preinstall - - role: k8s - - role: join_master - gather_facts: yes - any_errors_fatal: true - tags: [join_master] - - - hosts: k8s_workers - roles: - - role: preinstall - - role: k8s - - role: join_worker - gather_facts: yes - any_errors_fatal: true - tags: [join_worker] - ``` - - There is a directory called "roles" which a sub-directory called "preinstall" (roles/preinstall): - "preinstall" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (preinstall/tasks): This path has two files called "basic.yml" and "main.yml". - - 1. Create "preinstall/tasks/basic.yml" and it must be as follows:" - ``` - - name: Set timezone to UTC - timezone: - name: Etc/UTC - - - name: Set hostname - command: hostnamectl set-hostname {inventory_hostname} - - - name: Remove symlink resolve.conf - file: - path: "/etc/resolv.conf" - state: absent - ignore_errors: true - when: use_iran == "true" - - - name: Configure resolv.conf - template: - src: "resolv.conf.j2" - dest: "/etc/resolv.conf" - mode: "0644" - when: use_iran == "true" - - - name: Add hostname - lineinfile: - path: /etc/hosts - regexp: '^127\.0\.0\.1' - line: "127.0.0.1 {inventory_hostname} localhost" - owner: root - group: root - mode: 0644 - - - name: Install necessary tools - apt: - state: latest - update_cache: true - name: - - vim - - sudo - - wget - - curl - - telnet - - nload - - s3cmd - - cron - - ipset - - lvm2 - - python3 - - python3-setuptools - - python3-pip - - python3-apt - - intel-microcode - - htop - - tcpdump - - net-tools - - screen - - tmux - - byobu - - iftop - - bmon - - iperf - - sysstat - - ethtool - - plocate - - thin-provisioning-tools - - conntrack - - stress - - cpufrequtils - - rsync - - xz-utils - - build-essential - - apt-transport-https - - ca-certificates - - software-properties-common - - gnupg-agent - - iptables-persistent - - open-iscsi - - nfs-common - - tzdata - - tree - - - name: Fix broken packages - apt: - state: fixed - ``` - - 2. Create preinstall/tasks/main.yml and it must be as follows:" - ``` - --- - - name: basic setup - include_tasks: basic.yml - ``` - - There is a directory called "roles" which a sub-directory called "k8s" (roles/k8s): - "k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (k8s/tasks): This path has two files called "k8s.yml" and "main.yml". - - 1. Create k8s/tasks/k8s.yml and it must be as follows:" - ``` - - name: Disable SWAP since kubernetes can't work with swap enabled - shell: | - swapoff -a - - - name: Disable SWAP in fstab since kubernetes can't work with swap enabled - replace: - path: /etc/fstab - regexp: '^([^#].*?\sswap\s+sw\s+.*)$' - replace: '# \\1' - - - name: Check if ufw is installed - package_facts: - manager: "auto" - - - name: Disable ufw # just in Ubuntu - ufw: - state: disabled - when: "{ufw_in_task} in ansible_facts.packages" - - - name: Ensure kernel modules for containerd are enabled - lineinfile: - path: /etc/modules-load.d/containerd.conf - line: "{item_in_task}" - create: yes - state: present - loop: - - overlay - - br_netfilter - - - name: Load kernel modules - command: - cmd: "modprobe {item_in_task}" - loop: - - overlay - - br_netfilter - - - name: Ensure sysctl settings for Kubernetes are present - blockinfile: - path: /etc/sysctl.d/kubernetes.conf - block: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - create: yes - marker: "# {{mark}} ANSIBLE MANAGED BLOCK" - owner: root - group: root - mode: '0644' - - - name: Reload sysctl settings - command: - cmd: sysctl --system - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages - apt: - pkg: - - ca-certificates - - curl - - gnupg - - lsb-release - - gpg - - state: present - update_cache: yes - - - name: Ensure the /etc/apt/keyrings directory exists - file: - path: /etc/apt/keyrings - state: directory - mode: '0755' # Adjust the permissions as necessary - owner: root # Set the owner, if required - group: root - - - name: Remove existing Docker GPG key if it exists - file: - path: '{docker_gpg_key_path_in_task}' - state: absent - - - name: Download Docker GPG key - shell: | - curl -fsSL {docker_gpg_key_url_in_task} | gpg --dearmor -o {docker_gpg_key_path_in_task} - - - name: Determine the architecture - command: dpkg --print-architecture - register: architecture - - - name: Determine the distribution codename - command: lsb_release -cs - register: distribution_codename - - - name: Add Docker APT repository - lineinfile: - path: /etc/apt/sources.list.d/docker.list - create: yes - line: "deb [arch={architecture_stdout_in_task} signed-by={docker_gpg_key_path_in_task}] {docker_apt_repo_in_task} {distribution_codename_stdout_in_task} stable" - state: present - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install required packages (containerd) - apt: - pkg: - - containerd.io - state: present - - - name: Generate default containerd configuration - shell: - cmd: containerd config default > /etc/containerd/config.toml - - - name: Replace SystemdCgroup from false to true in containerd config - replace: - path: /etc/containerd/config.toml - regexp: 'SystemdCgroup = false' - replace: 'SystemdCgroup = true' - - - name: Restart containerd service - systemd: - name: containerd - state: restarted - daemon_reload: yes - - - name: Enable containerd service - systemd: - name: containerd - enabled: yes - - name: Delete the existing Kubernetes APT keyring file if it exists - file: - path: '{kubernetes_gpg_keyring_path_in_task}' - state: absent - - - name: Download Kubernetes GPG key - shell: | - curl -fsSL '{kubernetes_gpg_key_url_in_task}' | gpg --dearmor -o '{kubernetes_gpg_keyring_path_in_task}' - - - name: Add Kubernetes repo - apt_repository: - repo: "deb [signed-by={kubernetes_gpg_keyring_path_in_task}] {kubernetes_apt_repo_in_task} /" - state: present - filename: kubernetes.list - - - name: Update apt cache - apt: - update_cache: yes - - - name: Install Kubernetes packages - apt: - name: "{item_in_task}" - state: present - loop: - - kubeadm={k8s_version}.2-1.1 - - kubelet={k8s_version}.2-1.1 - - kubectl={k8s_version}.2-1.1 - - - name: Hold Kubernetes packages - dpkg_selections: - name: "{item_in_task}" - selection: hold - loop: - - kubeadm - - kubelet - - kubectl - - containerd.io - - - name: Configure node ip - lineinfile: - path: /etc/default/kubelet - line: KUBELET_EXTRA_ARGS=--node-ip={private_ip_in_task} - create: yes - state: present - notify: Restart kubelet - - - name: Add hosts to /etc/hosts - lineinfile: - path: /etc/hosts - line: "{hostvars_private_ip_in_task} {item_in_task} {item_in_task}.{domain_in_task}" - state: present - create: no - loop: "{groups_all_in_task}" - when: hostvars[item].private_ip is defined - - - name: Add apiserver_url to point to the masters temporary" - lineinfile: - dest: /etc/hosts - line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" - state: present - - - name: Pull Kubernetes images | If you got error check your dns and sanction - command: - cmd: kubeadm config images pull - ``` - 2. Create k8s/tasks/main.yml and it must be as follows:" - ``` - --- - - name: Install kubernetes packages - include_tasks: k8s.yml - ``` - - (k8s/handlers): This path has a file called "main.yml". - - 3. Create k8s/handlers/main.yml and it must be as follows:" - ``` - --- - # handlers file for k8s - - - name: Remove temporary GPG key file - file: - path: "/tmp/docker.list" - state: absent - - - name: Restart kubelet - service: - name: kubelet - state: restarted - ``` - - There is a directory called "roles" which a sub-directory called "init_k8s" (roles/init_k8s): - "init_k8s" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (init_k8s/tasks): This path has three files called "cni.yml", "initk8s.yml" and "main.yml". - - 1. Create init_k8s/tasks/cni.yml and it must be as follows:" - ``` - - block: - - name: Check if Calico CRDs exist - command: kubectl get crd felixconfigurations.crd.projectcalico.org - register: calico_crd_check - ignore_errors: true - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Apply CNI plugin (Calico) - command: kubectl create -f {calico_operator_url_in_task} - retries: 3 - delay: 3 - - - name: Apply CNI plugin (Calico) - command: kubectl create -f {calico_crd_url_in_task} - retries: 3 - delay: 3 - delegate_to: "{groups_k8s_masters_in_task}" - when: calico_crd_check.rc != 0 - run_once: true - ``` - 2. Create init_k8s/tasks/initk8s.yml and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Init cluster | Copy kubeadmcnf.yaml - template: - src: kubeadmcnf.yml.j2 - dest: /root/kubeadmcnf.yaml - - - name: Init cluster | Initiate cluster on node groups['kube_master'][0] - shell: kubeadm init --config=/root/kubeadmcnf.yaml - register: kubeadm_init - # Retry is because upload config sometimes fails - until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr - notify: Restart kubelet - - when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - name: Sleep for 300 seconds and reboot the Master1 server - wait_for: - timeout: 300 - delegate_to: localhost - - - name: Reboot the servers - command: reboot - async: 1 - poll: 0 - # ignore_errors: yes - delegate_to: "{groups_k8s_masters_in_task}" - - - name: Sleep for 300 seconds to Master1 up and running - wait_for: - timeout: 300 - delegate_to: localhost - # when: use_iran == "true" - - - name: Example Task After Reboot - debug: - msg: "Server back online and ready for tasks." - ``` - 3. Create init_k8s/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for init_k8s - - - name: Initialize kubernetes cluster - include_tasks: initk8s.yml - - - name: Initialize Calico CNI - include_tasks: cni.yml - ``` - - There is a directory called "roles" which a sub-directory called "join_master" (roles/join_master): - "join_master" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (join_master/tasks): This path has two files called "join_master.yml" and "main.yml". - - 1. Create "join_master/tasks/join_master.yml" and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{join_command_stdout_lines_in_task}" - - - name: Copy join command to local file - become: false - local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_master/files/join-command" - - - name: copy kubeadmcnf.yaml - template: - src: kubeadmcnf-join.yml.j2 - dest: /root/kubeadm-config.yaml - - when: - - inventory_hostname == groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_master/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: get certificate key - shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml - register: kubeadm_cert_key - - - name: Print certificate key - debug: - msg: "{kubeadm_cert_key_stdout_lines_in_task}" - - - name: register the cert key - set_fact: - control_plane_certkey: "{kubeadm_cert_key_stdout_lines_in_task}" - - when: - - inventory_hostname in groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - run_once: false - delegate_facts: true - - - name: Join | Join control-plane to cluster - command: "sh /root/join-command.sh --control-plane --certificate-key={hostvars_k8s_masters_control_plane_certkey_in_task} --cri-socket={cri_socket_in_task}" - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - block: - - name: Create kubectl directory - file: - path: /root/.kube - state: directory - - - name: Configure kubectl - copy: - src: /etc/kubernetes/admin.conf - dest: /root/.kube/config - remote_src: yes - - - name: Fetch kubeconfig - fetch: - src: /etc/kubernetes/admin.conf - dest: kubeconfig/ - flat: yes - when: - - inventory_hostname != groups['k8s_masters'][0] - - inventory_hostname in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: remove apiserver_url to point to the masters temporary - lineinfile: - dest: /etc/hosts - line: "{hostvars_groups_k8s_masters_private_ip_in_task} {apiserver_url_in_task}" - state: absent - - - - name: Add apiserver_url to point to the masters" - lineinfile: - dest: /etc/hosts - line: "{private_ip_in_task} {apiserver_url_in_task}" - state: present - when: - - inventory_hostname in groups['k8s_masters'] - ``` - 2. Create join_master/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for join_master - - - name: Join master(s) node to cluster - include_tasks: join_master.yml - - ``` - - There is a directory called "roles" which a sub-directory called "join_worker" (roles/join_worker): - "join_worker" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (join_worker/tasks): This path has two files called "join_worker.yml" and "main.yml". - - 1. Create "join_worker/tasks/join_worker.yml" and it must be as follows:" - ``` - - name: Init cluster | Check if kubeadm has already run - stat: - path: "/var/lib/kubelet/config.yaml" - register: kubeadm_already_run - - - block: - - name: Generate join command - command: kubeadm token create --print-join-command - register: join_command - - - name: Print join command - debug: - msg: "{join_command_stdout_lines_in_task}" - - - name: Copy join command to local file - become: false - local_action: copy content="{join_command_stdout_lines_in_task} $@" dest="roles/join_worker/files/join-command" - - when: - - inventory_hostname not in groups['k8s_masters'][0] - delegate_to: "{groups_k8s_masters_in_task}" - - - block: - - name: Copy the join command to server location - copy: - src: roles/join_worker/files/join-command - dest: /root/join-command.sh - mode: "0777" - - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - - - name: Join | Join worker nodes to the cluster - command: sh /root/join-command.sh - when: - - inventory_hostname not in groups['k8s_masters'] - - not kubeadm_already_run.stat.exists - ``` - 2. Create join_worker/tasks/main.yml and it must be as follows:" - ``` - --- - # tasks file for join_worker - - - name: Join worker(s) node to cluster - include_tasks: join_worker.yml - ``` - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! - - the python code you give me, must have structure like that: - - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") - - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) - - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt + + project_name = "app/media/MyAnsible" + ansible_dir = project_name + group_vars_dir = os.path.join(ansible_dir, "group_vars") + host_vars_dir = os.path.join(ansible_dir, "host_vars") + roles_dir = os.path.join(ansible_dir, "roles") + + # Create project directories + os.makedirs(group_vars_dir, exist_ok=True) + os.makedirs(host_vars_dir, exist_ok=True) + os.makedirs(roles_dir, exist_ok=True) + + preinstall_dir = os.path.join(roles_dir, "preinstall") + k8s_dir = os.path.join(roles_dir, "k8s") + init_k8s_dir = os.path.join(roles_dir, "init_k8s") + join_master_dir = os.path.join(roles_dir, "join_master") + join_worker_dir = os.path.join(roles_dir, "join_worker") + + os.makedirs(preinstall_dir, exist_ok=True) + os.makedirs(k8s_dir, exist_ok=True) + os.makedirs(init_k8s_dir, exist_ok=True) + os.makedirs(join_master_dir, exist_ok=True) + os.makedirs(join_worker_dir, exist_ok=True) + + # Create ansible.cfg + with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg_file: + ansible_cfg_file.write("[defaults]\nhost_key_checking=false\n") + + # Create group_vars/all + with open(os.path.join(group_vars_dir, "all"), "w") as group_vars_file: + group_vars_file.write(f"""# General +install_ansible_modules: "true" +disable_transparent_huge_pages: "true" + +setup_interface: "false" + +# Network Calico see here for more details https://github.com/projectcalico/calico/releases +calico_operator_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/tigera-operator.yaml" +calico_crd_url: "https://raw.githubusercontent.com/projectcalico/calico/v3.29.0/manifests/custom-resources.yaml" +pod_network_cidr: "192.168.0.0/16" + +# DNS +resolv_nameservers: [8.8.8.8, 4.2.2.4] # 403.online + +# Sanction shekan +use_iran: "true" # change it to "false" if you are outside of iran + +# Docker +docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" +docker_gpg_key_path: "/etc/apt/keyrings/docker.gpg" +docker_apt_repo: "https://download.docker.com/linux/ubuntu" + +# Kubernetes +kubernetes_gpg_keyring_path: "/etc/apt/keyrings/kubernetes-apt-keyring.gpg" +kubernetes_gpg_key_url: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key" +kubernetes_apt_repo: "https://pkgs.k8s.io/core:/stable:/v1.31/deb/" +k8s_version: {k8s_version} # see here https://kubernetes.io/releases/patch-releases/ and https://github.com/kubernetes/kubernetes/releases + +# CRI +cri_socket: unix:///var/run/containerd/containerd.sock + +# Ansible Connection +ansible_user: {kubernetes_ansible_user} +ansible_port: {kubernetes_ansible_port} +ansible_python_interpreter: "/usr/bin/python3" +domain: "devopsgpt.com" +apiserver_url: "devopsgpt.com" + """) + + # Create hosts + with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: + hosts_file.write(f"""{kubernetes_inventory}""") + + # Create kubernetes_playbook.yml + with open(os.path.join(ansible_dir, "kubernetes_playbook.yml"), "w") as playbook_file: + playbook_file.write(""" +- hosts: all + roles: + - role: preinstall + gather_facts: yes + any_errors_fatal: true + tags: [preinstall] + +- hosts: k8s + roles: + - role: k8s + gather_facts: yes + any_errors_fatal: true + tags: [k8s] + +- hosts: k8s + roles: + - role: init_k8s + gather_facts: yes + any_errors_fatal: true + tags: [init_k8s] + +- hosts: k8s_masters + roles: + - role: preinstall + - role: k8s + - role: join_master + gather_facts: yes + any_errors_fatal: true + tags: [join_master] + +- hosts: k8s_workers + roles: + - role: preinstall + - role: k8s + - role: join_worker + gather_facts: yes + any_errors_fatal: true + tags: [join_worker] + + """) + + # Create preinstall files + preinstall_defaults_dir = os.path.join(preinstall_dir, "defaults") + preinstall_files_dir = os.path.join(preinstall_dir, "files") + preinstall_handlers_dir = os.path.join(preinstall_dir, "handlers") + preinstall_tasks_dir = os.path.join(preinstall_dir, "tasks") + preinstall_templates_dir = os.path.join(preinstall_dir, "templates") + preinstall_vars_dir = os.path.join(preinstall_dir, "vars") + + os.makedirs(preinstall_defaults_dir, exist_ok=True) + os.makedirs(preinstall_files_dir, exist_ok=True) + os.makedirs(preinstall_handlers_dir, exist_ok=True) + os.makedirs(preinstall_tasks_dir, exist_ok=True) + os.makedirs(preinstall_templates_dir, exist_ok=True) + os.makedirs(preinstall_vars_dir, exist_ok=True) + + with open(os.path.join(preinstall_defaults_dir, "main.yml"), "w") as defaults_file: + defaults_file.write("") + + with open(os.path.join(preinstall_files_dir, "sample.sh"), "w") as files_file: + files_file.write("") + + with open(os.path.join(preinstall_handlers_dir, "main.yml"), "w") as handlers_file: + handlers_file.write("") + + with open(os.path.join(preinstall_tasks_dir, "basic.yml"), "w") as basic_tasks_file: + basic_tasks_file.write("""- name: Set timezone to UTC + timezone: + name: Etc/UTC + +- name: Set hostname + command: hostnamectl set-hostname {{ inventory_hostname }} + +- name: Remove symlink resolve.conf + file: + path: "/etc/resolv.conf" + state: absent + ignore_errors: true + when: use_iran == "true" + +- name: Configure resolv.conf + template: + src: "resolv.conf.j2" + dest: "/etc/resolv.conf" + mode: "0644" + when: use_iran == "true" + +- name: Add hostname + lineinfile: + path: /etc/hosts + regexp: '^127\\.0\\.0\\.1' + line: "127.0.0.1 {{ inventory_hostname }} localhost" + owner: root + group: root + mode: 0644 + +- name: Install necessary tools + apt: + state: latest + update_cache: true + name: + - vim + - sudo + - wget + - curl + - telnet + - nload + - s3cmd + - cron + - ipset + - lvm2 + - python3 + - python3-setuptools + - python3-pip + - python3-apt + - intel-microcode + - htop + - tcpdump + - net-tools + - screen + - tmux + - byobu + - iftop + - bmon + - iperf + - sysstat + - ethtool + - plocate + - thin-provisioning-tools + - conntrack + - stress + - cpufrequtils + - rsync + - xz-utils + - build-essential + - apt-transport-https + - ca-certificates + - software-properties-common + - gnupg-agent + - iptables-persistent + - open-iscsi + - nfs-common + - tzdata + - tree + +- name: Fix broken packages + apt: + state: fixed + """) + + with open(os.path.join(preinstall_tasks_dir, "main.yml"), "w") as tasks_main_file: + tasks_main_file.write("""--- +- name: basic setup + include_tasks: basic.yml + """) + + # Create k8s files + k8s_defaults_dir = os.path.join(k8s_dir, "defaults") + k8s_files_dir = os.path.join(k8s_dir, "files") + k8s_handlers_dir = os.path.join(k8s_dir, "handlers") + k8s_tasks_dir = os.path.join(k8s_dir, "tasks") + k8s_templates_dir = os.path.join(k8s_dir, "templates") + k8s_vars_dir = os.path.join(k8s_dir, "vars") + + os.makedirs(k8s_defaults_dir, exist_ok=True) + os.makedirs(k8s_files_dir, exist_ok=True) + os.makedirs(k8s_handlers_dir, exist_ok=True) + os.makedirs(k8s_tasks_dir, exist_ok=True) + os.makedirs(k8s_templates_dir, exist_ok=True) + os.makedirs(k8s_vars_dir, exist_ok=True) + + with open(os.path.join(k8s_defaults_dir, "main.yml"), "w") as k8s_defaults_file: + k8s_defaults_file.write("") + + with open(os.path.join(k8s_files_dir, "sample.sh"), "w") as k8s_files_file: + k8s_files_file.write("") + + with open(os.path.join(k8s_handlers_dir, "main.yml"), "w") as k8s_handlers_file: + k8s_handlers_file.write("""--- +# handlers file for k8s + +- name: Remove temporary GPG key file + file: + path: "/tmp/docker.list" + state: absent + +- name: Restart kubelet + service: + name: kubelet + state: restarted + """) + + with open(os.path.join(k8s_tasks_dir, "k8s.yml"), "w") as k8s_tasks_k8s_file: + k8s_tasks_k8s_file.write("""- name: Disable SWAP since kubernetes can't work with swap enabled + shell: | + swapoff -a + +- name: Disable SWAP in fstab since kubernetes can't work with swap enabled + replace: + path: /etc/fstab + regexp: '^([^#].*?\\sswap\\s+sw\\s+.*)$' + replace: '# \\1' + +- name: Check if ufw is installed + package_facts: + manager: "auto" + +- name: Disable ufw # just in Ubuntu + ufw: + state: disabled + when: "'ufw' in ansible_facts.packages" + +- name: Ensure kernel modules for containerd are enabled + lineinfile: + path: /etc/modules-load.d/containerd.conf + line: "{{ item }}" + create: yes + state: present + loop: + - overlay + - br_netfilter + +- name: Load kernel modules + command: + cmd: "modprobe {{ item }}" + loop: + - overlay + - br_netfilter + +- name: Ensure sysctl settings for Kubernetes are present + blockinfile: + path: /etc/sysctl.d/kubernetes.conf + block: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + net.ipv4.ip_forward = 1 + create: yes + marker: "# {mark} ANSIBLE MANAGED BLOCK" + owner: root + group: root + mode: '0644' + +- name: Reload sysctl settings + command: + cmd: sysctl --system + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages + apt: + pkg: + - ca-certificates + - curl + - gnupg + - lsb-release + - gpg + state: present + update_cache: yes + +- name: Ensure the /etc/apt/keyrings directory exists + file: + path: /etc/apt/keyrings + state: directory + mode: '0755' # Adjust the permissions as necessary + owner: root # Set the owner, if required + group: root + +- name: Remove existing Docker GPG key if it exists + file: + path: '{{ docker_gpg_key_path }}' + state: absent + +- name: Download Docker GPG key + shell: | + curl -fsSL {{ docker_gpg_key_url }} | gpg --dearmor -o {{ docker_gpg_key_path }} + +- name: Determine the architecture + command: dpkg --print-architecture + register: architecture + +- name: Determine the distribution codename + command: lsb_release -cs + register: distribution_codename + +- name: Add Docker APT repository + lineinfile: + path: /etc/apt/sources.list.d/docker.list + create: yes + line: "deb [arch={{ architecture.stdout }} signed-by={{ docker_gpg_key_path }}] {{ docker_apt_repo }} {{ distribution_codename.stdout }} stable" + state: present + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install required packages (containerd) + apt: + pkg: + - containerd.io + state: present + +- name: Generate default containerd configuration + shell: + cmd: containerd config default > /etc/containerd/config.toml + +- name: Replace SystemdCgroup from false to true in containerd config + replace: + path: /etc/containerd/config.toml + regexp: 'SystemdCgroup = false' + replace: 'SystemdCgroup = true' + +- name: Restart containerd service + systemd: + name: containerd + state: restarted + daemon_reload: yes + +- name: Enable containerd service + systemd: + name: containerd + enabled: yes + +- name: Delete the existing Kubernetes APT keyring file if it exists + file: + path: '{{ kubernetes_gpg_keyring_path }}' + state: absent + +- name: Download Kubernetes GPG key + shell: | + curl -fsSL '{{ kubernetes_gpg_key_url }}' | gpg --dearmor -o '{{ kubernetes_gpg_keyring_path }}' + +- name: Add Kubernetes repo + apt_repository: + repo: "deb [signed-by={{ kubernetes_gpg_keyring_path }}] {{ kubernetes_apt_repo }} /" + state: present + filename: kubernetes.list + +- name: Update apt cache + apt: + update_cache: yes + +- name: Install Kubernetes packages + apt: + name: "{{ item }}" + state: present + loop: + - kubeadm=1.31.2-1.1 + - kubelet=1.31.2-1.1 + - kubectl=1.31.2-1.1 + +- name: Hold Kubernetes packages + dpkg_selections: + name: "{{ item }}" + selection: hold + loop: + - kubeadm + - kubelet + - kubectl + - containerd.io + +- name: Configure node ip + lineinfile: + path: /etc/default/kubelet + line: KUBELET_EXTRA_ARGS=--node-ip={{ private_ip }} + create: yes + state: present + notify: Restart kubelet + +- name: Add hosts to /etc/hosts + lineinfile: + path: /etc/hosts + line: "{{ hostvars[item].private_ip }} {{ item }} {{ item }}.{{ domain }}" + state: present + create: no + loop: "{{ groups['all'] }}" + when: hostvars[item].private_ip is defined + +- name: Add apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: present + +- name: Pull Kubernetes images | If you got error check your dns and sanction + command: + cmd: kubeadm config images pull + """) + + with open(os.path.join(k8s_tasks_dir, "main.yml"), "w") as k8s_tasks_main_file: + k8s_tasks_main_file.write("""--- +- name: Install kubernetes packages + include_tasks: k8s.yml + """) + + # Create init_k8s files + init_k8s_defaults_dir = os.path.join(init_k8s_dir, "defaults") + init_k8s_files_dir = os.path.join(init_k8s_dir, "files") + init_k8s_handlers_dir = os.path.join(init_k8s_dir, "handlers") + init_k8s_tasks_dir = os.path.join(init_k8s_dir, "tasks") + init_k8s_templates_dir = os.path.join(init_k8s_dir, "templates") + init_k8s_vars_dir = os.path.join(init_k8s_dir, "vars") + + os.makedirs(init_k8s_defaults_dir, exist_ok=True) + os.makedirs(init_k8s_files_dir, exist_ok=True) + os.makedirs(init_k8s_handlers_dir, exist_ok=True) + os.makedirs(init_k8s_tasks_dir, exist_ok=True) + os.makedirs(init_k8s_templates_dir, exist_ok=True) + os.makedirs(init_k8s_vars_dir, exist_ok=True) + + with open(os.path.join(init_k8s_defaults_dir, "main.yml"), "w") as init_k8s_defaults_file: + init_k8s_defaults_file.write("") + + with open(os.path.join(init_k8s_files_dir, "sample.sh"), "w") as init_k8s_files_file: + init_k8s_files_file.write("") + + with open(os.path.join(init_k8s_handlers_dir, "main.yml"), "w") as init_k8s_handlers_file: + init_k8s_handlers_file.write("") + + with open(os.path.join(init_k8s_tasks_dir, "cni.yml"), "w") as init_k8s_tasks_cni_file: + init_k8s_tasks_cni_file.write("""- block: + - name: Check if Calico CRDs exist + command: kubectl get crd felixconfigurations.crd.projectcalico.org + register: calico_crd_check + ignore_errors: true + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_operator_url }} + retries: 3 + delay: 3 + + - name: Apply CNI plugin (Calico) + command: kubectl create -f {{ calico_crd_url }} + retries: 3 + delay: 3 + delegate_to: "{{ groups['k8s_masters'][0] }}" + when: calico_crd_check.rc != 0 + run_once: true + + """) + + with open(os.path.join(init_k8s_tasks_dir, "initk8s.yml"), "w") as init_k8s_tasks_initk8s_file: + init_k8s_tasks_initk8s_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Init cluster | Copy kubeadmcnf.yaml + template: + src: kubeadmcnf.yml.j2 + dest: /root/kubeadmcnf.yaml + + - name: Init cluster | Initiate cluster on node groups['kube_master'][0] + shell: kubeadm init --config=/root/kubeadmcnf.yaml + register: kubeadm_init + # Retry is because upload config sometimes fails + until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr + notify: Restart kubelet + + when: inventory_hostname == groups['k8s_masters'][0] and not kubeadm_already_run.stat.exists + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- name: Sleep for 300 seconds and reboot the Master1 server + wait_for: + timeout: 300 + delegate_to: localhost + +- name: Reboot the servers + command: reboot + async: 1 + poll: 0 + # ignore_errors: yes + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- name: Sleep for 300 seconds to Master1 up and running + wait_for: + timeout: 300 + delegate_to: localhost + # when: use_iran == "true" + +- name: Example Task After Reboot + debug: + msg: "Server back online and ready for tasks." + """) + + with open(os.path.join(init_k8s_tasks_dir, "main.yml"), "w") as init_k8s_tasks_main_file: + init_k8s_tasks_main_file.write("""--- +# tasks file for init_k8s + +- name: Initialize kubernetes cluster + include_tasks: initk8s.yml + +- name: Initialize Calico CNI + include_tasks: cni.yml + """) + + # Create join_master files + join_master_defaults_dir = os.path.join(join_master_dir, "defaults") + join_master_files_dir = os.path.join(join_master_dir, "files") + join_master_handlers_dir = os.path.join(join_master_dir, "handlers") + join_master_tasks_dir = os.path.join(join_master_dir, "tasks") + join_master_templates_dir = os.path.join(join_master_dir, "templates") + join_master_vars_dir = os.path.join(join_master_dir, "vars") + + os.makedirs(join_master_defaults_dir, exist_ok=True) + os.makedirs(join_master_files_dir, exist_ok=True) + os.makedirs(join_master_handlers_dir, exist_ok=True) + os.makedirs(join_master_tasks_dir, exist_ok=True) + os.makedirs(join_master_templates_dir, exist_ok=True) + os.makedirs(join_master_vars_dir, exist_ok=True) + + with open(os.path.join(join_master_defaults_dir, "main.yml"), "w") as join_master_defaults_file: + join_master_defaults_file.write("") + + with open(os.path.join(join_master_files_dir, "join-command"), "w") as join_master_files_file: + join_master_files_file.write("") + + with open(os.path.join(join_master_handlers_dir, "main.yml"), "w") as join_master_handlers_file: + join_master_handlers_file.write("") + + with open(os.path.join(join_master_tasks_dir, "join_master.yml"), "w") as join_master_tasks_join_master_file: + join_master_tasks_join_master_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_master/files/join-command" + + - name: copy kubeadmcnf.yaml + template: + src: kubeadmcnf-join.yml.j2 + dest: /root/kubeadm-config.yaml + + when: + - inventory_hostname == groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_master/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: get certificate key + shell: kubeadm init phase upload-certs --upload-certs --config=/root/kubeadm-config.yaml + register: kubeadm_cert_key + + - name: Print certificate key + debug: + msg: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + - name: register the cert key + set_fact: + control_plane_certkey: "{{ kubeadm_cert_key.stdout_lines[2] }}" + + when: + - inventory_hostname in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + run_once: false + delegate_facts: true + +- name: Join | Join control-plane to cluster + command: "sh /root/join-command.sh --control-plane --certificate-key={{ hostvars[groups['k8s_masters'][0]].control_plane_certkey }} --cri-socket={{ cri_socket }}" + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- block: + - name: Create kubectl directory + file: + path: /root/.kube + state: directory + + - name: Configure kubectl + copy: + src: /etc/kubernetes/admin.conf + dest: /root/.kube/config + remote_src: yes + + - name: Fetch kubeconfig + fetch: + src: /etc/kubernetes/admin.conf + dest: kubeconfig/ + flat: yes + when: + - inventory_hostname != groups['k8s_masters'][0] + - inventory_hostname in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: remove apiserver_url to point to the masters temporary + lineinfile: + dest: /etc/hosts + line: "{{ hostvars[groups['k8s_masters'][0]].private_ip }} {{ apiserver_url }}" + state: absent + +- name: Add apiserver_url to point to the masters + lineinfile: + dest: /etc/hosts + line: "{{ private_ip }} {{ apiserver_url }}" + state: present + when: + - inventory_hostname in groups['k8s_masters'] + """) + + with open(os.path.join(join_master_tasks_dir, "main.yml"), "w") as join_master_tasks_main_file: + join_master_tasks_main_file.write("""--- +# tasks file for join_master + +- name: Join master(s) node to cluster + include_tasks: join_master.yml + """) + + # Create join_worker files + join_worker_defaults_dir = os.path.join(join_worker_dir, "defaults") + join_worker_files_dir = os.path.join(join_worker_dir, "files") + join_worker_handlers_dir = os.path.join(join_worker_dir, "handlers") + join_worker_tasks_dir = os.path.join(join_worker_dir, "tasks") + join_worker_templates_dir = os.path.join(join_worker_dir, "templates") + join_worker_vars_dir = os.path.join(join_worker_dir, "vars") + + os.makedirs(join_worker_defaults_dir, exist_ok=True) + os.makedirs(join_worker_files_dir, exist_ok=True) + os.makedirs(join_worker_handlers_dir, exist_ok=True) + os.makedirs(join_worker_tasks_dir, exist_ok=True) + os.makedirs(join_worker_templates_dir, exist_ok=True) + os.makedirs(join_worker_vars_dir, exist_ok=True) + + with open(os.path.join(join_worker_defaults_dir, "main.yml"), "w") as join_worker_defaults_file: + join_worker_defaults_file.write("") + + with open(os.path.join(join_worker_files_dir, "join-command"), "w") as join_worker_files_file: + join_worker_files_file.write("") + + with open(os.path.join(join_worker_handlers_dir, "main.yml"), "w") as join_worker_handlers_file: + join_worker_handlers_file.write("") + + with open(os.path.join(join_worker_tasks_dir, "join_worker.yml"), "w") as join_worker_tasks_join_worker_file: + join_worker_tasks_join_worker_file.write("""- name: Init cluster | Check if kubeadm has already run + stat: + path: "/var/lib/kubelet/config.yaml" + register: kubeadm_already_run + +- block: + - name: Generate join command + command: kubeadm token create --print-join-command + register: join_command + + - name: Print join command + debug: + msg: "{{ join_command.stdout_lines[0] }}" + + - name: Copy join command to local file + become: false + local_action: copy content="{{ join_command.stdout_lines[0] }} $@" dest="roles/join_worker/files/join-command" + + when: + - inventory_hostname not in groups['k8s_masters'][0] + delegate_to: "{{ groups['k8s_masters'][0] }}" + +- block: + - name: Copy the join command to server location + copy: + src: roles/join_worker/files/join-command + dest: /root/join-command.sh + mode: "0777" + + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + +- name: Join | Join worker nodes to the cluster + command: sh /root/join-command.sh + when: + - inventory_hostname not in groups['k8s_masters'] + - not kubeadm_already_run.stat.exists + """) + + with open(os.path.join(join_worker_tasks_dir, "main.yml"), "w") as join_worker_tasks_main_file: + join_worker_tasks_main_file.write("""--- +# tasks file for join_worker + +- name: Join worker(s) node to cluster + include_tasks: join_worker.yml + """) \ No newline at end of file diff --git a/app/template_generators/ansible/install/nginx.py b/app/template_generators/ansible/install/nginx.py index 38175395..aed87f43 100644 --- a/app/template_generators/ansible/install/nginx.py +++ b/app/template_generators/ansible/install/nginx.py @@ -1,3 +1,4 @@ +import os def ansible_nginx_install_ubuntu(input): nginx_hosts = input.hosts @@ -10,120 +11,92 @@ def ansible_nginx_install_ubuntu(input): nginx_version_in_task = "nginx={{ nginx_version }}~{{ ansible_distribution_release }}" - prompt = f""" - Generate a Python code to generate an Ansible project (project name is app/media/MyAnsible) - that dynamically provisions Ansible resources ensuring a modular, flexible structure. Only provide - Python code, no explanations or markdown formatting, without ```python entry. - The project should be organized as follows: + + + project_name = "app/media/MyAnsible" + ansible_dir = project_name + group_vars_dir = os.path.join(ansible_dir, "group_vars") + host_vars_dir = os.path.join(ansible_dir, "host_vars") + roles_dir = os.path.join(ansible_dir, "roles") + install_nginx_dir = os.path.join(roles_dir, "install_nginx") + tasks_dir = os.path.join(install_nginx_dir, "tasks") + vars_dir = os.path.join(install_nginx_dir, "vars") + defaults_dir = os.path.join(install_nginx_dir, "defaults") + files_dir = os.path.join(install_nginx_dir, "files") + handlers_dir = os.path.join(install_nginx_dir, "handlers") + templates_dir = os.path.join(install_nginx_dir, "templates") - The structure of this project must be as follows: - ``` - ├── ansible.cfg - ├── group_vars - │   |── nginx_nodes - │   - ├── hosts - ├── host_vars - ├── nginx_playbook.yml - └── roles - └── install_nginx - ├── defaults - │   └── main.yml - ├── files - │   └── sample.sh - ├── handlers - │   └── main.yml - ├── tasks - │   └── main.yml - ├── templates - │   └── sample.j2 - └── vars - └── main.yml - ``` - - The content of ansible.cfg must be as follows: - ``` - [defaults] - host_key_checking=false - ``` - - group_vars directory includes a single file called "nginx_nodes" and the content of this file must be as follows: - ``` - ansible_port: {nginx_ansible_port} - ansible_user: {nginx_ansible_user} - ``` - - there is file called "hosts" which its content must be as follows: - ``` - {nginx_inventory} - ``` - - There is an empty directory called "host_vars" with no files included - - There is a file called "nginx_playbook.yml" which its content must be as follows: - ``` - - hosts: all - roles: - - install_nginx - ``` - - There is a directory called "roles" which a sub-directory called "install_nginx" (roles/install_nginx) - "install_nginx" has multiple sub-directories, so let's dive deeper into each its sub-directories: - - (install_nginx/tasks): This path has a file called "main.yml" which its content must be as follows: - ``` - --- - - name: Install CA certificates to ensure HTTPS connections work - apt: - name: ca-certificates - state: present + # Create project directories + os.makedirs(group_vars_dir, exist_ok=True) + os.makedirs(host_vars_dir, exist_ok=True) + os.makedirs(roles_dir, exist_ok=True) + os.makedirs(install_nginx_dir, exist_ok=True) + os.makedirs(tasks_dir, exist_ok=True) + os.makedirs(vars_dir, exist_ok=True) + os.makedirs(defaults_dir, exist_ok=True) + os.makedirs(files_dir, exist_ok=True) + os.makedirs(handlers_dir, exist_ok=True) + os.makedirs(templates_dir, exist_ok=True) - - name: Add Nginx signing key - apt_key: - url: "{nginx_repo_key_in_task}" - state: present + # Create ansible.cfg + with open(os.path.join(ansible_dir, "ansible.cfg"), "w") as ansible_cfg: + ansible_cfg.write("[defaults]\n") + ansible_cfg.write("host_key_checking=false\n") - - name: Add Nginx repository - apt_repository: - repo: "{nginx_repo_in_task}" - state: present - filename: nginx + # Create group_vars/nginx_nodes + with open(os.path.join(group_vars_dir, "nginx_nodes"), "w") as nginx_nodes: + nginx_nodes.write(f"ansible_port : {nginx_ansible_port}\n") + nginx_nodes.write(f"ansible_user : {nginx_ansible_user}\n") - - name: Update apt cache - apt: - update_cache: yes + # Create hosts + with open(os.path.join(ansible_dir, "hosts"), "w") as hosts_file: + + hosts_file.write(f"{nginx_inventory}") + - - name: Install specific version of Nginx - apt: - name: "{nginx_version_in_task}" - state: present + # Create empty host_vars directory (already created) - - name: Ensure Nginx service is running and enabled - service: - name: nginx - state: started - enabled: yes - ``` - - (install_nginx/vars): This path has a file called "main.yml" which its content must be as follows: - ``` - nginx_repo_key_url: "https://nginx.org/keys/nginx_signing.key" - nginx_repo_url: "http://nginx.org/packages/mainline/ubuntu/" - nginx_version: "{nginx_version}" - ``` - - finally just give me a python code without any note that can generate a project folder with the - given schema without ```python entry. and we dont need any base directory in the python code. - the final ansible template must work very well without any error! - - the python code you give me, must have structure like that: - - import os - project_name = "app/media/MyAnsible" - foo_dir = os.path.join(project_name, "bar") - x_dir = os.path.join(modules_dir, "y") + # Create nginx_playbook.yml + with open(os.path.join(ansible_dir, "nginx_playbook.yml"), "w") as playbook: + playbook.write("- hosts: all\n") + playbook.write(" roles:\n") + playbook.write(" - install_nginx\n") - # Create project directories - os.makedirs(ansible_dir, exist_ok=True) + # Create install_nginx/tasks/main.yml + with open(os.path.join(tasks_dir, "main.yml"), "w") as tasks_file: + tasks_file.write("---\n") + tasks_file.write("- name: Install CA certificates to ensure HTTPS connections work\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: ca-certificates\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Add Nginx signing key\n") + tasks_file.write(" apt_key:\n") + tasks_file.write(" url: \"{ nginx_repo_key_url }\"\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Add Nginx repository\n") + tasks_file.write(" apt_repository:\n") + tasks_file.write(" repo: \"deb {{ nginx_repo_url }} {{ ansible_distribution_release }} nginx\"\n") + tasks_file.write(" state: present\n") + tasks_file.write(" filename: nginx\n\n") + tasks_file.write("- name: Update apt cache\n") + tasks_file.write(" apt:\n") + tasks_file.write(" update_cache: yes\n\n") + tasks_file.write("- name: Install specific version of Nginx\n") + tasks_file.write(" apt:\n") + tasks_file.write(" name: \"nginx={{ nginx_version }}~{{ ansible_distribution_release }}\"\n") + tasks_file.write(" state: present\n\n") + tasks_file.write("- name: Ensure Nginx service is running and enabled\n") + tasks_file.write(" service:\n") + tasks_file.write(" name: nginx\n") + tasks_file.write(" state: started\n") + tasks_file.write(" enabled: yes\n") - # Create main.tf - with open(os.path.join(project_name, "main.tf"), "w") as main_file: - # any thing you need - """ - return prompt - + # Create install_nginx/vars/main.yml + with open(os.path.join(vars_dir, "main.yml"), "w") as vars_file: + vars_file.write("nginx_repo_key_url: \"https://nginx.org/keys/nginx_signing.key\"\n") + vars_file.write("nginx_repo_url: \"http://nginx.org/packages/mainline/ubuntu/\"\n") + vars_file.write(f"nginx_version: \"{nginx_version}\"\n") + def ansible_nginx_install(input):