diff --git a/apps/ollama-amd/config.json b/apps/ollama-amd/config.json index 85933c15..10eb47d2 100755 --- a/apps/ollama-amd/config.json +++ b/apps/ollama-amd/config.json @@ -5,8 +5,8 @@ "exposable": true, "port": 11434, "id": "ollama-amd", - "tipi_version": 4, - "version": "0.1.39-rocm", + "tipi_version": 5, + "version": "0.1.41-rocm", "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", diff --git a/apps/ollama-amd/docker-compose.yml b/apps/ollama-amd/docker-compose.yml index 21c127b7..acd1fcb4 100755 --- a/apps/ollama-amd/docker-compose.yml +++ b/apps/ollama-amd/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-amd: - image: ollama/ollama:0.1.39-rocm + image: ollama/ollama:0.1.41-rocm restart: unless-stopped container_name: ollama-amd environment: diff --git a/apps/ollama-cpu/config.json b/apps/ollama-cpu/config.json index 5a2e91b0..728216d7 100755 --- a/apps/ollama-cpu/config.json +++ b/apps/ollama-cpu/config.json @@ -5,8 +5,8 @@ "exposable": true, "port": 11436, "id": "ollama-cpu", - "tipi_version": 4, - "version": "0.1.39", + "tipi_version": 5, + "version": "0.1.41", "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", diff --git a/apps/ollama-cpu/docker-compose.yml b/apps/ollama-cpu/docker-compose.yml index 39dc9c10..6c017ce4 100755 --- a/apps/ollama-cpu/docker-compose.yml +++ b/apps/ollama-cpu/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-cpu: - image: ollama/ollama:0.1.39 + image: ollama/ollama:0.1.41 restart: unless-stopped container_name: ollama-cpu ports: diff --git a/apps/ollama-nvidia/config.json b/apps/ollama-nvidia/config.json index 3439841e..d9fa057b 100755 --- a/apps/ollama-nvidia/config.json +++ b/apps/ollama-nvidia/config.json @@ -5,8 +5,8 @@ "exposable": true, "port": 11435, "id": "ollama-nvidia", - "tipi_version": 4, - "version": "0.1.39", + "tipi_version": 5, + "version": "0.1.41", "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", diff --git a/apps/ollama-nvidia/docker-compose.yml b/apps/ollama-nvidia/docker-compose.yml index 442fb599..9d12389d 100755 --- a/apps/ollama-nvidia/docker-compose.yml +++ b/apps/ollama-nvidia/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-nvidia: - image: ollama/ollama:0.1.39 + image: ollama/ollama:0.1.41 restart: unless-stopped container_name: ollama-nvidia ports: