chore(deps): update ollama/ollama docker tag to v0.1.45 (#3851)

This commit is contained in:
renovate[bot] 2024-06-21 21:48:10 +00:00 committed by GitHub
parent 9d45fc1c23
commit cdb541f3bb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 9 additions and 9 deletions

View File

@ -5,8 +5,8 @@
"exposable": true, "exposable": true,
"port": 11434, "port": 11434,
"id": "ollama-amd", "id": "ollama-amd",
"tipi_version": 8, "tipi_version": 9,
"version": "0.1.44-rocm", "version": "0.1.45-rocm",
"categories": ["ai"], "categories": ["ai"],
"description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.",
"short_desc": "LLMs inference server with OpenAI compatible API", "short_desc": "LLMs inference server with OpenAI compatible API",

View File

@ -2,7 +2,7 @@ version: '3.7'
services: services:
ollama-amd: ollama-amd:
image: ollama/ollama:0.1.44-rocm image: ollama/ollama:0.1.45-rocm
restart: unless-stopped restart: unless-stopped
container_name: ollama-amd container_name: ollama-amd
environment: environment:

View File

@ -5,8 +5,8 @@
"exposable": true, "exposable": true,
"port": 11436, "port": 11436,
"id": "ollama-cpu", "id": "ollama-cpu",
"tipi_version": 8, "tipi_version": 9,
"version": "0.1.44", "version": "0.1.45",
"categories": ["ai"], "categories": ["ai"],
"description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.",
"short_desc": "LLMs inference server with OpenAI compatible API", "short_desc": "LLMs inference server with OpenAI compatible API",

View File

@ -2,7 +2,7 @@ version: '3.7'
services: services:
ollama-cpu: ollama-cpu:
image: ollama/ollama:0.1.44 image: ollama/ollama:0.1.45
restart: unless-stopped restart: unless-stopped
container_name: ollama-cpu container_name: ollama-cpu
ports: ports:

View File

@ -5,8 +5,8 @@
"exposable": true, "exposable": true,
"port": 11435, "port": 11435,
"id": "ollama-nvidia", "id": "ollama-nvidia",
"tipi_version": 8, "tipi_version": 9,
"version": "0.1.44", "version": "0.1.45",
"categories": ["ai"], "categories": ["ai"],
"description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.",
"short_desc": "LLMs inference server with OpenAI compatible API", "short_desc": "LLMs inference server with OpenAI compatible API",

View File

@ -2,7 +2,7 @@ version: '3.7'
services: services:
ollama-nvidia: ollama-nvidia:
image: ollama/ollama:0.1.44 image: ollama/ollama:0.1.45
restart: unless-stopped restart: unless-stopped
container_name: ollama-nvidia container_name: ollama-nvidia
ports: ports: