Initial snapshot before transformerlab recovery
This commit is contained in:
@@ -0,0 +1,172 @@
|
||||
---
|
||||
# llama.cpp installation and build
|
||||
|
||||
- name: Check if running on WSL
|
||||
ansible.builtin.command: grep -qi microsoft /proc/version
|
||||
register: wsl_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Set WSL fact
|
||||
ansible.builtin.set_fact:
|
||||
is_wsl: "{{ wsl_check.rc == 0 }}"
|
||||
|
||||
- name: Detect GPU on Linux/WSL
|
||||
ansible.builtin.command: nvidia-smi
|
||||
register: nvidia_smi_output
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: ansible_os_family == "Debian" or is_wsl | default(false)
|
||||
|
||||
- name: Set GPU type for WSL/Linux
|
||||
ansible.builtin.set_fact:
|
||||
gpu_type: "{{ 'nvidia' if nvidia_smi_output.rc == 0 else 'none' }}"
|
||||
when: is_wsl | default(false) or ansible_os_family == "Debian"
|
||||
|
||||
- name: Check for Metal GPU on macOS
|
||||
ansible.builtin.command: system_profiler SPDisplaysDataType
|
||||
register: metal_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: ansible_os_family == "Darwin"
|
||||
|
||||
- name: Set GPU type for macOS
|
||||
ansible.builtin.set_fact:
|
||||
gpu_type: "metal"
|
||||
when: ansible_os_family == "Darwin" and metal_check.rc == 0
|
||||
|
||||
- name: Display detected GPU type
|
||||
ansible.builtin.debug:
|
||||
msg: "llama.cpp GPU type: {{ gpu_type | default('none') }}"
|
||||
|
||||
- name: Check if llama.cpp already exists
|
||||
ansible.builtin.stat:
|
||||
path: "{{ llmlab_base }}/lab2/llama.cpp"
|
||||
register: llama_cpp_stat
|
||||
|
||||
- name: Check existing build config
|
||||
ansible.builtin.command:
|
||||
cmd: grep -q "^GGML_CUDA:BOOL=ON" "{{ llmlab_base }}/lab2/llama.cpp/build/CMakeCache.txt" 2>/dev/null && echo "cuda" || echo "none"
|
||||
register: existing_gpu_check
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: llama_cpp_stat.stat.exists
|
||||
|
||||
- name: Determine if rebuild needed
|
||||
ansible.builtin.set_fact:
|
||||
needs_rebuild: >-
|
||||
{{
|
||||
not llama_cpp_stat.stat.exists or
|
||||
(gpu_type == 'nvidia' and existing_gpu_check.stdout != 'cuda') or
|
||||
(gpu_type == 'metal' and existing_gpu_check.stdout != 'metal') or
|
||||
(gpu_type == 'amd' and existing_gpu_check.stdout != 'amd')
|
||||
}}
|
||||
|
||||
- name: Clean build directory for rebuild
|
||||
ansible.builtin.file:
|
||||
path: "{{ llmlab_base }}/lab2/llama.cpp/build"
|
||||
state: absent
|
||||
become: no
|
||||
when: needs_rebuild | default(false)
|
||||
|
||||
- name: Clone llama.cpp repository
|
||||
ansible.builtin.git:
|
||||
repo: https://github.com/ggerganov/llama.cpp
|
||||
dest: "{{ llmlab_base }}/lab2/llama.cpp"
|
||||
version: master
|
||||
update: no
|
||||
become: no
|
||||
when: not llama_cpp_stat.stat.exists
|
||||
|
||||
- name: Create build directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ llmlab_base }}/lab2/llama.cpp/build"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
become: no
|
||||
|
||||
- name: Configure llama.cpp only if not already configured
|
||||
ansible.builtin.command:
|
||||
cmd: test -f CMakeCache.txt
|
||||
args:
|
||||
chdir: "{{ llmlab_base }}/lab2/llama.cpp/build"
|
||||
register: cmake_configured
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Configure llama.cpp with CUDA (NVIDIA GPU)
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- cmake
|
||||
- ..
|
||||
- -G Ninja
|
||||
- -DCMAKE_BUILD_TYPE=Release
|
||||
- -DGGML_CUDA=on
|
||||
args:
|
||||
chdir: "{{ llmlab_base }}/lab2/llama.cpp/build"
|
||||
when: gpu_type == 'nvidia' and cmake_configured.rc != 0
|
||||
become: no
|
||||
|
||||
- name: Configure llama.cpp for AMD (ROCm)
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- cmake
|
||||
- ..
|
||||
- -G Ninja
|
||||
- -DCMAKE_BUILD_TYPE=Release
|
||||
- -DGGML_ROCM=on
|
||||
args:
|
||||
chdir: "{{ llmlab_base }}/lab2/llama.cpp/build"
|
||||
when: gpu_type == 'amd' and cmake_configured.rc != 0
|
||||
become: no
|
||||
|
||||
- name: Configure llama.cpp for Metal (macOS)
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- cmake
|
||||
- ..
|
||||
- -G Ninja
|
||||
- -DCMAKE_BUILD_TYPE=Release
|
||||
- -DGGML_METAL=on
|
||||
args:
|
||||
chdir: "{{ llmlab_base }}/lab2/llama.cpp/build"
|
||||
when: gpu_type == 'metal' and cmake_configured.rc != 0
|
||||
become: no
|
||||
|
||||
- name: Configure llama.cpp for CPU only
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- cmake
|
||||
- ..
|
||||
- -G Ninja
|
||||
- -DCMAKE_BUILD_TYPE=Release
|
||||
args:
|
||||
chdir: "{{ llmlab_base }}/lab2/llama.cpp/build"
|
||||
when: gpu_type | default('none') == 'none' and cmake_configured.rc != 0
|
||||
become: no
|
||||
|
||||
- name: Build llama.cpp
|
||||
ansible.builtin.command:
|
||||
argv:
|
||||
- ninja
|
||||
args:
|
||||
chdir: "{{ llmlab_base }}/lab2/llama.cpp/build"
|
||||
become: no
|
||||
register: build_output
|
||||
|
||||
- name: Display build output
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ build_output.stdout_lines[-10:] }}"
|
||||
when: build_output.stdout_lines is defined
|
||||
|
||||
- name: Add llama.cpp to user PATH
|
||||
ansible.builtin.lineinfile:
|
||||
path: "{{ llmlab_base }}/.bashrc"
|
||||
line: 'export PATH="$HOME/lab2/llama.cpp/build/bin:$PATH"'
|
||||
state: present
|
||||
insertafter: EOF
|
||||
notify: Shell updated
|
||||
|
||||
- name: Display llama.cpp installation
|
||||
ansible.builtin.debug:
|
||||
msg: "llama.cpp installed to {{ llmlab_base }}/lab2/llama.cpp"
|
||||
Reference in New Issue
Block a user