diff --git a/apps/ollama-amd/config.json b/apps/ollama-amd/config.json index b0493d93..8a6945d9 100755 --- a/apps/ollama-amd/config.json +++ b/apps/ollama-amd/config.json @@ -5,19 +5,14 @@ "exposable": true, "port": 11434, "id": "ollama-amd", - "tipi_version": 2, - "version": "0.1.37-rocm", - "categories": [ - "ai" - ], + "tipi_version": 3, + "version": "0.1.38-rocm", + "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", "author": "ollama", "source": "https://github.com/ollama/ollama", "website": "https://ollama.com", "form_fields": [], - "supported_architectures": [ - "arm64", - "amd64" - ] + "supported_architectures": ["arm64", "amd64"] } diff --git a/apps/ollama-amd/docker-compose.yml b/apps/ollama-amd/docker-compose.yml index 3a794977..1d17141b 100755 --- a/apps/ollama-amd/docker-compose.yml +++ b/apps/ollama-amd/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-amd: - image: ollama/ollama:0.1.37-rocm + image: ollama/ollama:0.1.38-rocm restart: unless-stopped container_name: ollama-amd environment: diff --git a/apps/ollama-cpu/config.json b/apps/ollama-cpu/config.json index 0a27653c..83ee32a4 100755 --- a/apps/ollama-cpu/config.json +++ b/apps/ollama-cpu/config.json @@ -5,19 +5,14 @@ "exposable": true, "port": 11436, "id": "ollama-cpu", - "tipi_version": 2, - "version": "0.1.37", - "categories": [ - "ai" - ], + "tipi_version": 3, + "version": "0.1.38", + "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", "author": "ollama", "source": "https://github.com/ollama/ollama", "website": "https://ollama.com", "form_fields": [], - "supported_architectures": [ - "arm64", - "amd64" - ] + "supported_architectures": ["arm64", "amd64"] } diff --git a/apps/ollama-cpu/docker-compose.yml b/apps/ollama-cpu/docker-compose.yml index 77db3dde..93c8c1a5 100755 --- a/apps/ollama-cpu/docker-compose.yml +++ b/apps/ollama-cpu/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-cpu: - image: ollama/ollama:0.1.37 + image: ollama/ollama:0.1.38 restart: unless-stopped container_name: ollama-cpu ports: diff --git a/apps/ollama-nvidia/config.json b/apps/ollama-nvidia/config.json index 43cd9fcc..4d984571 100755 --- a/apps/ollama-nvidia/config.json +++ b/apps/ollama-nvidia/config.json @@ -5,19 +5,14 @@ "exposable": true, "port": 11435, "id": "ollama-nvidia", - "tipi_version": 2, - "version": "0.1.37", - "categories": [ - "ai" - ], + "tipi_version": 3, + "version": "0.1.38", + "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", "author": "ollama", "source": "https://github.com/ollama/ollama", "website": "https://ollama.com", "form_fields": [], - "supported_architectures": [ - "arm64", - "amd64" - ] + "supported_architectures": ["arm64", "amd64"] } diff --git a/apps/ollama-nvidia/docker-compose.yml b/apps/ollama-nvidia/docker-compose.yml index 22912ccb..be8da216 100755 --- a/apps/ollama-nvidia/docker-compose.yml +++ b/apps/ollama-nvidia/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-nvidia: - image: ollama/ollama:0.1.37 + image: ollama/ollama:0.1.38 restart: unless-stopped container_name: ollama-nvidia ports: