diff --git a/apps/ollama-amd/config.json b/apps/ollama-amd/config.json index 28d46b15..201fe292 100755 --- a/apps/ollama-amd/config.json +++ b/apps/ollama-amd/config.json @@ -5,8 +5,8 @@ "exposable": true, "port": 11434, "id": "ollama-amd", - "tipi_version": 9, - "version": "0.1.45-rocm", + "tipi_version": 10, + "version": "0.1.46-rocm", "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", diff --git a/apps/ollama-amd/docker-compose.yml b/apps/ollama-amd/docker-compose.yml index fe04eee2..f2728773 100755 --- a/apps/ollama-amd/docker-compose.yml +++ b/apps/ollama-amd/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-amd: - image: ollama/ollama:0.1.45-rocm + image: ollama/ollama:0.1.46-rocm restart: unless-stopped container_name: ollama-amd environment: diff --git a/apps/ollama-cpu/config.json b/apps/ollama-cpu/config.json index b271b0fa..a76b2214 100755 --- a/apps/ollama-cpu/config.json +++ b/apps/ollama-cpu/config.json @@ -5,8 +5,8 @@ "exposable": true, "port": 11436, "id": "ollama-cpu", - "tipi_version": 9, - "version": "0.1.45", + "tipi_version": 10, + "version": "0.1.46", "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", diff --git a/apps/ollama-cpu/docker-compose.yml b/apps/ollama-cpu/docker-compose.yml index ba9c3d04..51f2b1f4 100755 --- a/apps/ollama-cpu/docker-compose.yml +++ b/apps/ollama-cpu/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-cpu: - image: ollama/ollama:0.1.45 + image: ollama/ollama:0.1.46 restart: unless-stopped container_name: ollama-cpu ports: diff --git a/apps/ollama-nvidia/config.json b/apps/ollama-nvidia/config.json index c2e770ef..c20acfe3 100755 --- a/apps/ollama-nvidia/config.json +++ b/apps/ollama-nvidia/config.json @@ -5,8 +5,8 @@ "exposable": true, "port": 11435, "id": "ollama-nvidia", - "tipi_version": 9, - "version": "0.1.45", + "tipi_version": 10, + "version": "0.1.46", "categories": ["ai"], "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "short_desc": "LLMs inference server with OpenAI compatible API", diff --git a/apps/ollama-nvidia/docker-compose.yml b/apps/ollama-nvidia/docker-compose.yml index 29787f56..a59f1847 100755 --- a/apps/ollama-nvidia/docker-compose.yml +++ b/apps/ollama-nvidia/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.7' services: ollama-nvidia: - image: ollama/ollama:0.1.45 + image: ollama/ollama:0.1.46 restart: unless-stopped container_name: ollama-nvidia ports: