diff --git a/apps/ollama-cpu/config.json b/apps/ollama-cpu/config.json new file mode 100755 index 00000000..102666a8 --- /dev/null +++ b/apps/ollama-cpu/config.json @@ -0,0 +1,18 @@ +{ + "$schema": "../schema.json", + "name": "Ollama - CPU", + "available": true, + "exposable": true, + "port": 11436, + "id": "ollama-cpu", + "tipi_version": 1, + "version": "0.1.32", + "categories": ["ai"], + "description": "Get up and running with Llama 3, Mistral, Gemma, and other large language models.", + "short_desc": "LLMs inference server with OpenAI compatible API", + "author": "ollama", + "source": "https://github.com/ollama/ollama", + "website": "https://ollama.com", + "form_fields": [], + "supported_architectures": ["arm64", "amd64"] +} diff --git a/apps/ollama-cpu/docker-compose.yml b/apps/ollama-cpu/docker-compose.yml new file mode 100755 index 00000000..7622c833 --- /dev/null +++ b/apps/ollama-cpu/docker-compose.yml @@ -0,0 +1,40 @@ +version: '3.7' + +services: + ollama-cpu: + image: ollama/ollama + restart: unless-stopped + container_name: ollama-cpu + environment: + - PORT=11436 + ports: + - '${APP_PORT}:11436' + networks: + - tipi_main_network + volumes: + - ${APP_DATA_DIR}/.ollama:/root/.ollama + labels: + # Main + traefik.enable: true + traefik.http.middlewares.ollama-cpu-web-redirect.redirectscheme.scheme: https + traefik.http.services.ollama-cpu.loadbalancer.server.port: 11436 + # Web + traefik.http.routers.ollama-cpu-insecure.rule: Host(`${APP_DOMAIN}`) + traefik.http.routers.ollama-cpu-insecure.entrypoints: web + traefik.http.routers.ollama-cpu-insecure.service: ollama-cpu + traefik.http.routers.ollama-cpu-insecure.middlewares: ollama-cpu-web-redirect + # Websecure + traefik.http.routers.ollama-cpu.rule: Host(`${APP_DOMAIN}`) + traefik.http.routers.ollama-cpu.entrypoints: websecure + traefik.http.routers.ollama-cpu.service: ollama-cpu + traefik.http.routers.ollama-cpu.tls.certresolver: myresolver + # Local domain + traefik.http.routers.ollama-cpu-local-insecure.rule: Host(`ollama-cpu.${LOCAL_DOMAIN}`) + traefik.http.routers.ollama-cpu-local-insecure.entrypoints: web + traefik.http.routers.ollama-cpu-local-insecure.service: ollama-cpu + traefik.http.routers.ollama-cpu-local-insecure.middlewares: ollama-cpu-web-redirect + # Local domain secure + traefik.http.routers.ollama-cpu-local.rule: Host(`ollama-cpu.${LOCAL_DOMAIN}`) + traefik.http.routers.ollama-cpu-local.entrypoints: websecure + traefik.http.routers.ollama-cpu-local.service: ollama-cpu + traefik.http.routers.ollama-cpu-local.tls: true diff --git a/apps/ollama-cpu/metadata/description.md b/apps/ollama-cpu/metadata/description.md new file mode 100755 index 00000000..41a38af2 --- /dev/null +++ b/apps/ollama-cpu/metadata/description.md @@ -0,0 +1,65 @@ +# Ollama - CPU +[Ollama](https://github.com/ollama/ollama) allows you to run open-source large language models, such as Llama 3 & , locally. Ollama bundles model weights, configuration, and data into a single package, defined by a Modelfile. + +--- + +## Usage + +### Use with a frontend +- [LobeChat](https://github.com/lobehub/lobe-chat) +- [LibreChat](https://github.com/danny-avila/LibreChat) +- [OpenWebUI](https://github.com/open-webui/open-webui) +- [And more ...](https://github.com/ollama/ollama) + +--- + +### Try the REST API +Ollama has a REST API for running and managing models. + +**Generate a response** +```sh +curl http://localhost:11434/api/generate -d '{ + "model": "llama3", + "prompt":"Why is the sky blue?" +}' +``` + +**Chat with a model** +```sh +curl http://localhost:11434/api/chat -d '{ + "model": "llama3", + "messages": [ + { "role": "user", "content": "why is the sky blue?" } + ] +}' +``` +--- + +### Try in terminal +```sh +docker exec -it ollama-cpu ollama run llama3 --verbose +``` + +--- + +## Model library +Ollama supports a list of models available on [ollama.com/library](https://ollama.com/library 'ollama model library') + +Here are some example models that can be downloaded: + +| Model | Parameters | Size | Download | +| ------------------ | ---------- | ----- | ------------------------------ | +| Llama 3 | 8B | 4.7GB | `ollama run llama3` | +| Llama 3 | 70B | 40GB | `ollama run llama3:70b` | +| Phi-3 | 3,8B | 2.3GB | `ollama run phi3` | +| Mistral | 7B | 4.1GB | `ollama run mistral` | +| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` | +| Starling | 7B | 4.1GB | `ollama run starling-lm` | +| Code Llama | 7B | 3.8GB | `ollama run codellama` | +| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` | +| LLaVA | 7B | 4.5GB | `ollama run llava` | +| Gemma | 2B | 1.4GB | `ollama run gemma:2b` | +| Gemma | 7B | 4.8GB | `ollama run gemma:7b` | +| Solar | 10.7B | 6.1GB | `ollama run solar` | + +> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models. \ No newline at end of file diff --git a/apps/ollama-cpu/metadata/logo.jpg b/apps/ollama-cpu/metadata/logo.jpg new file mode 100644 index 00000000..4f75a79c Binary files /dev/null and b/apps/ollama-cpu/metadata/logo.jpg differ