{ "$schema": "../schema.json", "name": "Ollama - CPU", "available": true, "exposable": true, "port": 11436, "id": "ollama-cpu", "tipi_version": 10, "version": "0.1.46", "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", "author": "ollama", "source": "https://github.com/ollama/ollama", "website": "https://ollama.com", "form_fields": [], "supported_architectures": ["arm64", "amd64"] }