Align installer with updated lab models

This commit is contained in:
OpenCode
2026-04-24 20:08:56 -06:00
parent 7360cd040a
commit e915d87ec6
6 changed files with 16 additions and 52 deletions
+6 -5
View File
@@ -23,15 +23,16 @@ This project builds a student-friendly local lab environment for the courseware
Lab 1 is now provisioned directly by the installer: Lab 1 is now provisioned directly by the installer:
- The `Qwen3-0.6B-Q8_0.gguf` and `Llama-3.2-1B.Q4_K_M.gguf` files are mirrored into `state/models/lab1/`. - The `Llama-3.2-1B.Q4_K_M.gguf` file is mirrored into `state/models/lab1/`.
- The Qwen GGUF is pre-registered in Ollama as `lab1-qwen3-0.6b-q8_0`. - The Lab 1 confidence widget uses the pre-pulled Gemma 4 E2B Q4 Ollama model, `batiai/gemma4-e2b:q4`.
- The wiki serves same-host download links for both GGUFs through `/api/lab1/models/...`. - The wiki serves a same-host download link for the Llama GGUF through `/api/lab1/models/...`.
- Lab 1 confidence visualization requires Ollama `0.12.11` or newer because it depends on logprobs. - Lab 1 confidence visualization requires Ollama `0.12.11` or newer because it depends on logprobs.
## Lab 2 Defaults ## Lab 2 Defaults
`./labctl up` now pre-pulls the Lab 2 Gemma 4 E2B Ollama variants used by the wiki widget: `./labctl up` now pre-pulls the Gemma 4 E2B Ollama variants used by the wiki widgets:
- `cajina/gemma4_e2b-q2_k_xl:v01`
- `batiai/gemma4-e2b:q4` - `batiai/gemma4-e2b:q4`
- `batiai/gemma4-e2b:q6` - `batiai/gemma4-e2b:q6`
@@ -97,7 +98,7 @@ If CUDA is already mounted or preinstalled outside `PATH`, the installer detects
- The default deployment is centered on Ollama-backed local inference and browser-based tools such as Netron and the wiki. - The default deployment is centered on Ollama-backed local inference and browser-based tools such as Netron and the wiki.
- Netron is installed into a managed Python virtual environment and served locally instead of being provisioned as a desktop package. - Netron is installed into a managed Python virtual environment and served locally instead of being provisioned as a desktop package.
- Lab 1 model downloads are mirrored locally during `./labctl up`, so students do not have to fetch them manually from the original source. - Lab 1's Llama GGUF download is mirrored locally during `./labctl up`, so students do not have to fetch it manually from the original source.
- WhiteRabbitNeo assets remain a separate Lab 2 flow and are still handled outside the default `./labctl up` run. - WhiteRabbitNeo assets remain a separate Lab 2 flow and are still handled outside the default `./labctl up` run.
- Run `./labctl assets lab2` when you want to populate repo-local Lab 2 assets in `assets/lab2/` from Hugging Face. - Run `./labctl assets lab2` when you want to populate repo-local Lab 2 assets in `assets/lab2/` from Hugging Face.
- After base setup, run `state/lab2/download_whiterabbitneo-gguf.sh` to fetch only the `Q4_K_M`, `Q8_0`, and `IQ2_M` files from `bartowski/WhiteRabbitNeo_WhiteRabbitNeo-V3-7B-GGUF` and register local Ollama models `WhiteRabbitNeo`, `WhiteRabbitNeo-Q4`, `WhiteRabbitNeo-Q8`, and `WhiteRabbitNeo-IQ2`. - After base setup, run `state/lab2/download_whiterabbitneo-gguf.sh` to fetch only the `Q4_K_M`, `Q8_0`, and `IQ2_M` files from `bartowski/WhiteRabbitNeo_WhiteRabbitNeo-V3-7B-GGUF` and register local Ollama models `WhiteRabbitNeo`, `WhiteRabbitNeo-Q4`, `WhiteRabbitNeo-Q8`, and `WhiteRabbitNeo-IQ2`.
+3 -4
View File
@@ -50,10 +50,7 @@ courseware_wiki_repo: "https://git.zuccaro.me/bzuccaro/LLM-Labs.git"
courseware_open_webui_spec: "open-webui" courseware_open_webui_spec: "open-webui"
courseware_embedding_atlas_spec: "embedding-atlas" courseware_embedding_atlas_spec: "embedding-atlas"
courseware_lab1_qwen_filename: "Qwen3-0.6B-Q8_0.gguf" courseware_lab1_ollama_model_alias: "batiai/gemma4-e2b:q4"
courseware_lab1_qwen_download_url: "https://huggingface.co/Qwen/Qwen3-0.6B-GGUF/resolve/main/Qwen3-0.6B-Q8_0.gguf?download=true"
courseware_lab1_qwen_local_path: "{{ courseware_lab1_models_dir }}/{{ courseware_lab1_qwen_filename }}"
courseware_lab1_qwen_model_alias: "lab1-qwen3-0.6b-q8_0"
courseware_lab1_llama_filename: "Llama-3.2-1B.Q4_K_M.gguf" courseware_lab1_llama_filename: "Llama-3.2-1B.Q4_K_M.gguf"
courseware_lab1_llama_download_url: "https://huggingface.co/DevQuasar-3/meta-llama.Llama-3.2-1B-GGUF/resolve/main/Llama-3.2-1B.Q4_K_M.gguf?download=true" courseware_lab1_llama_download_url: "https://huggingface.co/DevQuasar-3/meta-llama.Llama-3.2-1B-GGUF/resolve/main/Llama-3.2-1B.Q4_K_M.gguf?download=true"
courseware_lab1_llama_local_path: "{{ courseware_lab1_models_dir }}/{{ courseware_lab1_llama_filename }}" courseware_lab1_llama_local_path: "{{ courseware_lab1_models_dir }}/{{ courseware_lab1_llama_filename }}"
@@ -74,6 +71,8 @@ courseware_white_rabbit_variants:
quant: "IQ2_M" quant: "IQ2_M"
filename: "WhiteRabbitNeo_WhiteRabbitNeo-V3-7B-IQ2_M.gguf" filename: "WhiteRabbitNeo_WhiteRabbitNeo-V3-7B-IQ2_M.gguf"
courseware_lab2_ollama_models: courseware_lab2_ollama_models:
- label: "Gemma 4 E2B Q2 XL"
value: "cajina/gemma4_e2b-q2_k_xl:v01"
- label: "Gemma 4 E2B Q4" - label: "Gemma 4 E2B Q4"
value: "batiai/gemma4-e2b:q4" value: "batiai/gemma4-e2b:q4"
- label: "Gemma 4 E2B Q6" - label: "Gemma 4 E2B Q6"
-33
View File
@@ -31,41 +31,8 @@
- courseware_lab1_ollama_semver | length == 0 - courseware_lab1_ollama_semver | length == 0
or not (courseware_lab1_ollama_semver is version(courseware_ollama_min_version, '>=')) or not (courseware_lab1_ollama_semver is version(courseware_ollama_min_version, '>='))
- name: Download mirrored Lab 1 Qwen model
get_url:
url: "{{ courseware_lab1_qwen_download_url }}"
dest: "{{ courseware_lab1_qwen_local_path }}"
mode: "0644"
- name: Download mirrored Lab 1 Llama model - name: Download mirrored Lab 1 Llama model
get_url: get_url:
url: "{{ courseware_lab1_llama_download_url }}" url: "{{ courseware_lab1_llama_download_url }}"
dest: "{{ courseware_lab1_llama_local_path }}" dest: "{{ courseware_lab1_llama_local_path }}"
mode: "0644" mode: "0644"
- name: Write Lab 1 Ollama Modelfile
copy:
dest: "{{ courseware_lab1_dir }}/Modelfile.{{ courseware_lab1_qwen_model_alias }}"
mode: "0644"
content: |
FROM {{ courseware_lab1_qwen_local_path }}
- name: Start Ollama before Lab 1 model registration
command:
argv:
- "{{ courseware_root }}/scripts/service_manager.sh"
- start
- ollama
changed_when: false
- name: Register Lab 1 Qwen model with Ollama
command:
argv:
- "{{ courseware_ollama_bin }}"
- create
- "{{ courseware_lab1_qwen_model_alias }}"
- -f
- "{{ courseware_lab1_dir }}/Modelfile.{{ courseware_lab1_qwen_model_alias }}"
environment:
OLLAMA_HOST: "{{ courseware_bind_host }}:{{ courseware_ports.ollama }}"
OLLAMA_MODELS: "{{ courseware_ollama_models_dir }}"
+1 -2
View File
@@ -26,9 +26,8 @@ EMBEDDING_ATLAS_VENV="{{ courseware_venvs_dir }}/embedding-atlas"
TTPS_DATASET_PATH="{{ courseware_datasets_dir }}/ttps_dataset.parquet" TTPS_DATASET_PATH="{{ courseware_datasets_dir }}/ttps_dataset.parquet"
WIKI_TEST_RAW_PATH="{{ courseware_datasets_dir }}/wiki.test.raw" WIKI_TEST_RAW_PATH="{{ courseware_datasets_dir }}/wiki.test.raw"
COURSEWARE_OLLAMA_BASE_URL="http://{{ courseware_url_host }}:{{ courseware_ports.ollama }}" COURSEWARE_OLLAMA_BASE_URL="http://{{ courseware_url_host }}:{{ courseware_ports.ollama }}"
COURSEWARE_LAB1_QWEN_MODEL_PATH="{{ courseware_lab1_qwen_local_path }}"
COURSEWARE_LAB1_LLAMA_MODEL_PATH="{{ courseware_lab1_llama_local_path }}" COURSEWARE_LAB1_LLAMA_MODEL_PATH="{{ courseware_lab1_llama_local_path }}"
COURSEWARE_LAB1_OLLAMA_MODEL_ALIAS="{{ courseware_lab1_qwen_model_alias }}" COURSEWARE_LAB1_OLLAMA_MODEL_ALIAS="{{ courseware_lab1_ollama_model_alias }}"
UNSLOTH_BIN="{{ ansible_env.HOME }}/.local/bin/unsloth" UNSLOTH_BIN="{{ ansible_env.HOME }}/.local/bin/unsloth"
PROMPTFOO_DIR="{{ courseware_promptfoo_dir }}" PROMPTFOO_DIR="{{ courseware_promptfoo_dir }}"
PROMPTFOO_BIN="{{ courseware_tools_dir }}/promptfoo/node_modules/.bin/promptfoo" PROMPTFOO_BIN="{{ courseware_tools_dir }}/promptfoo/node_modules/.bin/promptfoo"
+4 -4
View File
@@ -84,8 +84,8 @@ WARNING: THIS SCRIPT WILL CONFIGURE YOUR ENVIRONMENT WILL THE FOLLOWING SOFTWARE
- Unsloth Studio - Unsloth Studio
- Kiln Desktop - Kiln Desktop
- Course-specific support assets for lab 1, lab 2, and lab 4 - Course-specific support assets for lab 1, lab 2, and lab 4
- Pre-pulled Lab 2 Ollama models for Q4 and Q6 Gemma 4 E2B - Pre-pulled Gemma 4 E2B Ollama models for Lab 1 and Lab 2
- A pre-registered Lab 1 Ollama model (requires Ollama ${min_ollama}+) - Lab 1 confidence support through Gemma 4 E2B Q4 (requires Ollama ${min_ollama}+)
IT IS RECOMMENDED TO RUN THIS IN AN ISLOATED ENVIRONMENT (Dedicated WSL, VM, etc.) IT IS RECOMMENDED TO RUN THIS IN AN ISLOATED ENVIRONMENT (Dedicated WSL, VM, etc.)
@@ -109,8 +109,8 @@ WARNING: THIS SCRIPT WILL CONFIGURE YOUR ENVIRONMENT WILL THE FOLLOWING SOFTWARE
- Unsloth Studio - Unsloth Studio
- Kiln Desktop - Kiln Desktop
- Course-specific support assets for lab 1, lab 2, and lab 4 - Course-specific support assets for lab 1, lab 2, and lab 4
- Pre-pulled Lab 2 Ollama models for Q4 and Q6 Gemma 4 E2B - Pre-pulled Gemma 4 E2B Ollama models for Lab 1 and Lab 2
- A pre-registered Lab 1 Ollama model (requires Ollama ${min_ollama}+) - Lab 1 confidence support through Gemma 4 E2B Q4 (requires Ollama ${min_ollama}+)
IT IS RECOMMENDED TO RUN THIS IN AN ISLOATED ENVIRONMENT (Dedicated WSL, VM, etc.) IT IS RECOMMENDED TO RUN THIS IN AN ISLOATED ENVIRONMENT (Dedicated WSL, VM, etc.)
+2 -4
View File
@@ -28,9 +28,8 @@ load_runtime_env() {
: "${WIKI_DIR:=$COURSEWARE_STATE_DIR/repos/LLM-Labs}" : "${WIKI_DIR:=$COURSEWARE_STATE_DIR/repos/LLM-Labs}"
: "${WIKI_RUNTIME_CONFIG_PATH:=$WIKI_DIR/public/courseware-runtime.json}" : "${WIKI_RUNTIME_CONFIG_PATH:=$WIKI_DIR/public/courseware-runtime.json}"
: "${COURSEWARE_OLLAMA_BASE_URL:=http://$COURSEWARE_URL_HOST:$COURSEWARE_OLLAMA_PORT}" : "${COURSEWARE_OLLAMA_BASE_URL:=http://$COURSEWARE_URL_HOST:$COURSEWARE_OLLAMA_PORT}"
: "${COURSEWARE_LAB1_QWEN_MODEL_PATH:=$COURSEWARE_STATE_DIR/models/lab1/Qwen3-0.6B-Q8_0.gguf}"
: "${COURSEWARE_LAB1_LLAMA_MODEL_PATH:=$COURSEWARE_STATE_DIR/models/lab1/Llama-3.2-1B.Q4_K_M.gguf}" : "${COURSEWARE_LAB1_LLAMA_MODEL_PATH:=$COURSEWARE_STATE_DIR/models/lab1/Llama-3.2-1B.Q4_K_M.gguf}"
: "${COURSEWARE_LAB1_OLLAMA_MODEL_ALIAS:=lab1-qwen3-0.6b-q8_0}" : "${COURSEWARE_LAB1_OLLAMA_MODEL_ALIAS:=batiai/gemma4-e2b:q4}"
: "${LLAMA_CPP_BIN_DIR:=$COURSEWARE_STATE_DIR/repos/llama.cpp/build/bin}" : "${LLAMA_CPP_BIN_DIR:=$COURSEWARE_STATE_DIR/repos/llama.cpp/build/bin}"
if [ -n "${OLLAMA_BIN:-}" ] && [[ "$OLLAMA_BIN" != */* ]] && command -v "$OLLAMA_BIN" >/dev/null 2>&1; then if [ -n "${OLLAMA_BIN:-}" ] && [[ "$OLLAMA_BIN" != */* ]] && command -v "$OLLAMA_BIN" >/dev/null 2>&1; then
@@ -149,11 +148,10 @@ service_command() {
"$COURSEWARE_PROMPTFOO_PORT" "$COURSEWARE_PROMPTFOO_PORT"
;; ;;
wiki) wiki)
printf 'cd "%s" && PATH="%s:$PATH" exec env COURSEWARE_OLLAMA_BASE_URL="%s" COURSEWARE_LAB1_QWEN_MODEL_PATH="%s" COURSEWARE_LAB1_LLAMA_MODEL_PATH="%s" COURSEWARE_LAB1_OLLAMA_MODEL_ALIAS="%s" "./node_modules/.bin/next" start --hostname %s --port %s' \ printf 'cd "%s" && PATH="%s:$PATH" exec env COURSEWARE_OLLAMA_BASE_URL="%s" COURSEWARE_LAB1_LLAMA_MODEL_PATH="%s" COURSEWARE_LAB1_OLLAMA_MODEL_ALIAS="%s" "./node_modules/.bin/next" start --hostname %s --port %s' \
"$WIKI_DIR" \ "$WIKI_DIR" \
"$NODE_RUNTIME_BIN_DIR" \ "$NODE_RUNTIME_BIN_DIR" \
"$COURSEWARE_OLLAMA_BASE_URL" \ "$COURSEWARE_OLLAMA_BASE_URL" \
"$COURSEWARE_LAB1_QWEN_MODEL_PATH" \
"$COURSEWARE_LAB1_LLAMA_MODEL_PATH" \ "$COURSEWARE_LAB1_LLAMA_MODEL_PATH" \
"$COURSEWARE_LAB1_OLLAMA_MODEL_ALIAS" \ "$COURSEWARE_LAB1_OLLAMA_MODEL_ALIAS" \
"$COURSEWARE_BIND_HOST" \ "$COURSEWARE_BIND_HOST" \