46 lines
1.3 KiB
JSON
46 lines
1.3 KiB
JSON
{
|
|
"name": "LocalAGI",
|
|
"slug": "localagi",
|
|
"categories": [
|
|
20,
|
|
3
|
|
],
|
|
"date_created": "2026-03-03",
|
|
"type": "ct",
|
|
"updateable": true,
|
|
"privileged": false,
|
|
"interface_port": 8080,
|
|
"documentation": "https://github.com/mudler/LocalAGI#installation-options",
|
|
"website": "https://github.com/mudler/LocalAGI",
|
|
"logo": "https://github.com/mudler/LocalAGI/raw/main/webui/react-ui/public/logo_1.png",
|
|
"config_path": "/opt/localagi/docker-compose.yaml",
|
|
"description": "LocalAGI is a self-hostable AI agent platform with a web UI, OpenAI-compatible APIs, and local-first model orchestration.",
|
|
"install_methods": [
|
|
{
|
|
"type": "default",
|
|
"script": "ct/localagi.sh",
|
|
"resources": {
|
|
"cpu": 4,
|
|
"ram": 8192,
|
|
"hdd": 30,
|
|
"os": "Debian",
|
|
"version": "13"
|
|
}
|
|
}
|
|
],
|
|
"default_credentials": {
|
|
"username": null,
|
|
"password": null
|
|
},
|
|
"notes": [
|
|
{
|
|
"text": "GPU passthrough can be enabled during CT creation. Backend auto-selects (`cu128` for NVIDIA, `rocm7.2` for AMD) and falls back to CPU.",
|
|
"type": "info"
|
|
},
|
|
{
|
|
"text": "Set `var_localagi_backend=cpu|cu128|rocm7.2` (or `var_torch_backend`) to force a specific backend profile.",
|
|
"type": "info"
|
|
}
|
|
]
|
|
}
|