--- # Main playbook for LLM Labs deployment # This playbook orchestrates all installation and configuration tasks - name: Deploy LLM Labs Environment hosts: localhost gather_facts: yes vars: platform: "{{ platform | default('linux-cpu') }}" gpu_type: "{{ gpu_type | default('none') }}" user_home: "{{ ansible_env.HOME }}" # Use the original user's home directory, not root's when using become llmlab_base: "{{ ansible_env.HOME | default('/home/' + ansible_user_id) }}" tasks: - name: Display platform information ansible.builtin.debug: msg: "Deploying on platform: {{ platform }}" - name: Include common setup ansible.builtin.import_role: name: common - name: Include GPU setup (Linux/WSL) ansible.builtin.import_role: name: linux-gpu when: platform in ['linux-gpu', 'linux-amd', 'wsl'] - name: Include Ollama setup ansible.builtin.import_role: name: ollama - name: Include Netron setup ansible.builtin.import_role: name: netron - name: Include Lab 1 asset setup ansible.builtin.import_role: name: lab1_assets - name: Include llama.cpp setup ansible.builtin.import_role: name: llama-cpp - name: Include Unsloth Studio setup ansible.builtin.import_role: name: unsloth - name: Include Open WebUI setup ansible.builtin.import_role: name: open-webui - name: Include ChunkViz setup ansible.builtin.import_role: name: chunkviz - name: Include Promptfoo setup ansible.builtin.import_role: name: promptfoo - name: Include lab scripts setup ansible.builtin.import_role: name: lab-scripts - name: Display completion message ansible.builtin.debug: msg: "LLM Labs deployment completed successfully!"