chore(deps): update ollama/ollama docker tag to v0.1.38 (#3522)

This commit is contained in:
renovate[bot] 2024-05-20 17:02:24 +00:00 committed by GitHub
parent 7c97f7fded
commit e27f30e1a5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 15 additions and 30 deletions

View File

@ -5,19 +5,14 @@
"exposable": true,
"port": 11434,
"id": "ollama-amd",
"tipi_version": 2,
"version": "0.1.37-rocm",
"categories": [
"ai"
],
"tipi_version": 3,
"version": "0.1.38-rocm",
"categories": ["ai"],
"description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.",
"short_desc": "LLMs inference server with OpenAI compatible API",
"author": "ollama",
"source": "https://github.com/ollama/ollama",
"website": "https://ollama.com",
"form_fields": [],
"supported_architectures": [
"arm64",
"amd64"
]
"supported_architectures": ["arm64", "amd64"]
}

View File

@ -2,7 +2,7 @@ version: '3.7'
services:
ollama-amd:
image: ollama/ollama:0.1.37-rocm
image: ollama/ollama:0.1.38-rocm
restart: unless-stopped
container_name: ollama-amd
environment:

View File

@ -5,19 +5,14 @@
"exposable": true,
"port": 11436,
"id": "ollama-cpu",
"tipi_version": 2,
"version": "0.1.37",
"categories": [
"ai"
],
"tipi_version": 3,
"version": "0.1.38",
"categories": ["ai"],
"description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.",
"short_desc": "LLMs inference server with OpenAI compatible API",
"author": "ollama",
"source": "https://github.com/ollama/ollama",
"website": "https://ollama.com",
"form_fields": [],
"supported_architectures": [
"arm64",
"amd64"
]
"supported_architectures": ["arm64", "amd64"]
}

View File

@ -2,7 +2,7 @@ version: '3.7'
services:
ollama-cpu:
image: ollama/ollama:0.1.37
image: ollama/ollama:0.1.38
restart: unless-stopped
container_name: ollama-cpu
ports:

View File

@ -5,19 +5,14 @@
"exposable": true,
"port": 11435,
"id": "ollama-nvidia",
"tipi_version": 2,
"version": "0.1.37",
"categories": [
"ai"
],
"tipi_version": 3,
"version": "0.1.38",
"categories": ["ai"],
"description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.",
"short_desc": "LLMs inference server with OpenAI compatible API",
"author": "ollama",
"source": "https://github.com/ollama/ollama",
"website": "https://ollama.com",
"form_fields": [],
"supported_architectures": [
"arm64",
"amd64"
]
"supported_architectures": ["arm64", "amd64"]
}

View File

@ -2,7 +2,7 @@ version: '3.7'
services:
ollama-nvidia:
image: ollama/ollama:0.1.37
image: ollama/ollama:0.1.38
restart: unless-stopped
container_name: ollama-nvidia
ports: