This commit is contained in:
OpenCode
2026-04-23 14:48:06 -06:00
parent 30f80fe058
commit 78676ece59
5 changed files with 48 additions and 7 deletions
+12 -1
View File
@@ -4,7 +4,7 @@ This project builds a student-friendly local lab environment for the courseware
- `./deploy-courseware.sh` installs and configures the environment, then starts every managed service. - `./deploy-courseware.sh` installs and configures the environment, then starts every managed service.
- `./destroy-courseware.sh` stops the managed services, uninstalls courseware-managed Ollama, and removes the project-owned lab state. - `./destroy-courseware.sh` stops the managed services, uninstalls courseware-managed Ollama, and removes the project-owned lab state.
- `./labctl` provides day-two controls such as `assets lab2`, `start`, `stop`, `status`, `urls`, `logs`, and `open kiln`. - `./labctl` provides day-two controls such as `assets lab2`, `ollama_models`, `start`, `stop`, `status`, `urls`, `logs`, and `open kiln`.
## What It Installs ## What It Installs
@@ -28,6 +28,16 @@ Lab 1 is now provisioned directly by the installer:
- The wiki serves same-host download links for both GGUFs through `/api/lab1/models/...`. - The wiki serves same-host download links for both GGUFs through `/api/lab1/models/...`.
- Lab 1 confidence visualization requires Ollama `0.12.11` or newer because it depends on logprobs. - Lab 1 confidence visualization requires Ollama `0.12.11` or newer because it depends on logprobs.
## Lab 2 Defaults
`./labctl up` now pre-pulls the Lab 2 Gemma 4 E2B Ollama variants used by the wiki widget:
- `cajina/gemma4_e2b-q2_k_xl:v01`
- `batiai/gemma4-e2b:q4`
- `bjoernb/gemma4-e2b-fast:latest`
If you want to re-pull just those managed Ollama models later, run `./labctl ollama_models`.
## Supported Host Profiles ## Supported Host Profiles
This build intentionally avoids the reference VM's hardware workarounds. This build intentionally avoids the reference VM's hardware workarounds.
@@ -126,6 +136,7 @@ The deployment will:
- `./labctl up` installs the environment and then starts every managed service. - `./labctl up` installs the environment and then starts every managed service.
- `./labctl versions` shows the pinned Netron version, minimum Ollama version, and Ansible runtime version used by this workspace. - `./labctl versions` shows the pinned Netron version, minimum Ollama version, and Ansible runtime version used by this workspace.
- `./labctl assets lab2` is a separate manual step that clones the base WhiteRabbitNeo repo into `assets/lab2/WhiteRabbitNeo-V3-7B` and downloads the supported `Q4_K_M`, `Q8_0`, and `IQ2_M` GGUFs into `assets/lab2/WhiteRabbitNeo_WhiteRabbitNeo-V3-7B-GGUF`. - `./labctl assets lab2` is a separate manual step that clones the base WhiteRabbitNeo repo into `assets/lab2/WhiteRabbitNeo-V3-7B` and downloads the supported `Q4_K_M`, `Q8_0`, and `IQ2_M` GGUFs into `assets/lab2/WhiteRabbitNeo_WhiteRabbitNeo-V3-7B-GGUF`.
- `./labctl ollama_models` re-pulls the managed Lab 2 Gemma 4 E2B Ollama model set without rerunning the full installer.
- `./labctl start core` starts only `ollama` and `open-webui`. - `./labctl start core` starts only `ollama` and `open-webui`.
- `./labctl start all` starts every managed web service. - `./labctl start all` starts every managed web service.
- `./labctl open kiln` launches the Kiln desktop app installed into the project state. - `./labctl open kiln` launches the Kiln desktop app installed into the project state.
+9 -6
View File
@@ -73,12 +73,15 @@ courseware_white_rabbit_variants:
- ollama_model: "WhiteRabbitNeo-IQ2" - ollama_model: "WhiteRabbitNeo-IQ2"
quant: "IQ2_M" quant: "IQ2_M"
filename: "WhiteRabbitNeo_WhiteRabbitNeo-V3-7B-IQ2_M.gguf" filename: "WhiteRabbitNeo_WhiteRabbitNeo-V3-7B-IQ2_M.gguf"
courseware_ollama_models: courseware_lab2_ollama_models:
- "llama3.2" - label: "Gemma 4 E2B Q2"
- "qwen3.5:4b" value: "cajina/gemma4_e2b-q2_k_xl:v01"
- "gemma3n:e2b" - label: "Gemma 4 E2B Q4"
courseware_optional_ollama_models: value: "batiai/gemma4-e2b:q4"
- "gemma3:12b-it-qat" - label: "Gemma 4 E2B Q8"
value: "bjoernb/gemma4-e2b-fast:latest"
courseware_ollama_models: "{{ courseware_lab2_ollama_models | map(attribute='value') | list }}"
courseware_optional_ollama_models: []
courseware_install_optional_heavy_models: false courseware_install_optional_heavy_models: false
courseware_wsl_cuda_pin_url: "https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin" courseware_wsl_cuda_pin_url: "https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin"
+1
View File
@@ -15,6 +15,7 @@
- open_webui - open_webui
- chunkviz - chunkviz
- promptfoo - promptfoo
- { role: ollama_models, tags: ["ollama_models"] }
- wiki - wiki
- kiln - kiln
- unsloth - unsloth
@@ -1,4 +1,13 @@
{ {
"lab1NetronUrl": "http://{{ courseware_url_host }}:{{ courseware_ports.netron }}", "lab1NetronUrl": "http://{{ courseware_url_host }}:{{ courseware_ports.netron }}",
"lab2OllamaUrl": "http://{{ courseware_url_host }}:{{ courseware_ports.ollama }}",
"lab2OllamaModels": [
{% for model in courseware_lab2_ollama_models %}
{
"label": "{{ model.label }}",
"value": "{{ model.value }}"
}{% if not loop.last %},{% endif %}
{% endfor %}
],
"lab3TerminalUrl": "http://{{ courseware_url_host }}:{{ courseware_ports.wetty }}{{ courseware_wetty_base_path }}" "lab3TerminalUrl": "http://{{ courseware_url_host }}:{{ courseware_ports.wetty }}{{ courseware_wetty_base_path }}"
} }
+17
View File
@@ -18,6 +18,7 @@ usage() {
Usage: Usage:
./labctl up ./labctl up
./labctl down ./labctl down
./labctl ollama_models
./labctl preflight ./labctl preflight
./labctl versions ./labctl versions
./labctl assets lab2 [--refresh] ./labctl assets lab2 [--refresh]
@@ -82,6 +83,7 @@ WARNING: THIS SCRIPT WILL CONFIGURE YOUR ENVIRONMENT WILL THE FOLLOWING SOFTWARE
- Unsloth Studio - Unsloth Studio
- Kiln Desktop - Kiln Desktop
- Course-specific support assets for lab 1, lab 2, and lab 4 - Course-specific support assets for lab 1, lab 2, and lab 4
- Pre-pulled Lab 2 Ollama models for Q2, Q4, and Q8 Gemma 4 E2B
- A pre-registered Lab 1 Ollama model (requires Ollama ${min_ollama}+) - A pre-registered Lab 1 Ollama model (requires Ollama ${min_ollama}+)
IT IS RECOMMENDED TO RUN THIS IN AN ISLOATED ENVIRONMENT (Dedicated WSL, VM, etc.) IT IS RECOMMENDED TO RUN THIS IN AN ISLOATED ENVIRONMENT (Dedicated WSL, VM, etc.)
@@ -106,6 +108,7 @@ WARNING: THIS SCRIPT WILL CONFIGURE YOUR ENVIRONMENT WILL THE FOLLOWING SOFTWARE
- Unsloth Studio - Unsloth Studio
- Kiln Desktop - Kiln Desktop
- Course-specific support assets for lab 1, lab 2, and lab 4 - Course-specific support assets for lab 1, lab 2, and lab 4
- Pre-pulled Lab 2 Ollama models for Q2, Q4, and Q8 Gemma 4 E2B
- A pre-registered Lab 1 Ollama model (requires Ollama ${min_ollama}+) - A pre-registered Lab 1 Ollama model (requires Ollama ${min_ollama}+)
IT IS RECOMMENDED TO RUN THIS IN AN ISLOATED ENVIRONMENT (Dedicated WSL, VM, etc.) IT IS RECOMMENDED TO RUN THIS IN AN ISLOATED ENVIRONMENT (Dedicated WSL, VM, etc.)
@@ -536,6 +539,17 @@ handle_assets_command() {
esac esac
} }
refresh_ollama_models() {
if [ ! -f "$ROOT_DIR/state/runtime.env" ]; then
cat <<'EOF' >&2
Missing state/runtime.env. Run ./labctl up first so the managed Ollama service is configured before pulling models.
EOF
exit 1
fi
run_playbook up.yml --tags ollama_models
}
main() { main() {
local cmd=${1:-} local cmd=${1:-}
shift || true shift || true
@@ -546,6 +560,9 @@ main() {
run_playbook up.yml run_playbook up.yml
run_project_script "$ROOT_DIR/scripts/service_manager.sh" start all run_project_script "$ROOT_DIR/scripts/service_manager.sh" start all
;; ;;
ollama_models)
refresh_ollama_models
;;
down) down)
run_project_script "$ROOT_DIR/scripts/service_manager.sh" stop all || true run_project_script "$ROOT_DIR/scripts/service_manager.sh" stop all || true
run_playbook down.yml run_playbook down.yml