app-store/apps/ollama-nvidia/config.json

19 lines
561 B
JSON
Executable File

{
"$schema": "../schema.json",
"name": "Ollama - Nvidia",
"available": true,
"exposable": true,
"port": 11435,
"id": "ollama-nvidia",
"tipi_version": 10,
"version": "0.1.46",
"categories": ["ai"],
"description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.",
"short_desc": "LLMs inference server with OpenAI compatible API",
"author": "ollama",
"source": "https://github.com/ollama/ollama",
"website": "https://ollama.com",
"form_fields": [],
"supported_architectures": ["arm64", "amd64"]
}