From 17fcdea698283ce6734cfde9c822d9f453edce1a Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sun, 12 Jan 2025 22:45:47 -0800 Subject: [PATCH 01/68] readme: move discord link --- README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 908d31eb..caf7cf7f 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,6 @@ # Ollama -[Discord](https://discord.gg/ollama) - Get up and running with large language models. ### macOS @@ -35,6 +33,11 @@ The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `olla - [ollama-python](https://github.com/ollama/ollama-python) - [ollama-js](https://github.com/ollama/ollama-js) +### Community + +- [Discord](https://discord.gg/ollama) +- [Reddit](https://reddit.com/r/ollama) + ## Quickstart To run and chat with [Llama 3.2](https://ollama.com/library/llama3.2): From 84a2314463a2ba0e7863b43a20a59d6564c21124 Mon Sep 17 00:00:00 2001 From: Parth Sareen Date: Mon, 13 Jan 2025 11:26:22 -0800 Subject: [PATCH 02/68] examples: remove codified examples (#8267) --- api/examples/README.md | 17 + .../go-chat => api/examples/chat}/main.go | 0 .../examples/generate-streaming}/main.go | 0 .../examples/generate}/main.go | 0 .../examples/multimodal}/main.go | 0 .../examples/pull-progress}/main.go | 0 examples/README.md => docs/examples.md | 6 + examples/.gitignore | 174 - examples/flyio/.gitignore | 1 - examples/flyio/README.md | 67 - examples/go-http-generate/main.go | 29 - examples/jupyter-notebook/README.md | 5 - examples/jupyter-notebook/ollama.ipynb | 102 - examples/kubernetes/README.md | 38 - examples/kubernetes/cpu.yaml | 42 - examples/kubernetes/gpu.yaml | 58 - .../langchain-python-rag-document/README.md | 29 - .../langchain-python-rag-document/main.py | 61 - .../requirements.txt | 109 - .../.gitignore | 170 - .../langchain-python-rag-privategpt/LICENSE | 201 - .../langchain-python-rag-privategpt/README.md | 91 - .../constants.py | 11 - .../langchain-python-rag-privategpt/ingest.py | 170 - .../poetry.lock | 3833 ----------------- .../privateGPT.py | 74 - .../pyproject.toml | 26 - .../requirements.txt | 15 - .../langchain-python-rag-websummary/README.md | 23 - .../langchain-python-rag-websummary/main.py | 12 - .../requirements.txt | 1 - examples/langchain-python-simple/README.md | 23 - examples/langchain-python-simple/main.py | 6 - .../langchain-python-simple/requirements.txt | 1 - .../langchain-typescript-simple/README.md | 23 - examples/langchain-typescript-simple/main.ts | 25 - .../package-lock.json | 997 ----- .../langchain-typescript-simple/package.json | 13 - examples/modelfile-mario/Modelfile | 5 - examples/modelfile-mario/logo.png | Bin 456296 -> 0 bytes examples/modelfile-mario/readme.md | 43 - examples/python-dockerit/Modelfile | 20 - examples/python-dockerit/README.md | 31 - examples/python-dockerit/dockerit.py | 17 - examples/python-dockerit/requirements.txt | 1 - .../README.md | 93 - .../main.py | 137 - .../requirements.txt | 8 - .../main.py | 53 - .../readme.md | 54 - .../requirements.txt | 1 - .../predefinedschema.py | 31 - .../randomaddresses.py | 31 - examples/python-json-datagenerator/readme.md | 60 - .../requirements.txt | 1 - examples/python-loganalysis/Modelfile | 8 - examples/python-loganalysis/loganalysis.py | 41 - examples/python-loganalysis/logtest.logfile | 32 - examples/python-loganalysis/readme.md | 72 - examples/python-loganalysis/requirements.txt | 1 - examples/python-rag-newssummary/README.md | 35 - .../python-rag-newssummary/requirements.txt | 9 - examples/python-rag-newssummary/summ.py | 86 - examples/python-rag-newssummary/utils.py | 108 - examples/python-simplechat/client.py | 48 - examples/python-simplechat/readme.md | 44 - examples/python-simplechat/requirements.txt | 1 - examples/python-simplegenerate/README.md | 29 - examples/python-simplegenerate/client.py | 40 - .../python-simplegenerate/requirements.txt | 1 - .../extractemail.ts | 118 - .../typescript-functioncalling/extractwp.ts | 38 - examples/typescript-functioncalling/info.txt | 17 - .../package-lock.json | 519 --- .../typescript-functioncalling/package.json | 9 - examples/typescript-functioncalling/readme.md | 28 - examples/typescript-functioncalling/wp.txt | 183 - examples/typescript-mentors/.gitignore | 2 - examples/typescript-mentors/README.md | 65 - .../typescript-mentors/character-generator.ts | 26 - examples/typescript-mentors/mentors.ts | 60 - examples/typescript-mentors/package.json | 15 - examples/typescript-simplechat/client.ts | 77 - examples/typescript-simplechat/package.json | 12 - examples/typescript-simplechat/readme.md | 35 - 85 files changed, 23 insertions(+), 8775 deletions(-) create mode 100644 api/examples/README.md rename {examples/go-chat => api/examples/chat}/main.go (100%) rename {examples/go-generate-streaming => api/examples/generate-streaming}/main.go (100%) rename {examples/go-generate => api/examples/generate}/main.go (100%) rename {examples/go-multimodal => api/examples/multimodal}/main.go (100%) rename {examples/go-pull-progress => api/examples/pull-progress}/main.go (100%) rename examples/README.md => docs/examples.md (69%) delete mode 100644 examples/.gitignore delete mode 100644 examples/flyio/.gitignore delete mode 100644 examples/flyio/README.md delete mode 100644 examples/go-http-generate/main.go delete mode 100644 examples/jupyter-notebook/README.md delete mode 100644 examples/jupyter-notebook/ollama.ipynb delete mode 100644 examples/kubernetes/README.md delete mode 100644 examples/kubernetes/cpu.yaml delete mode 100644 examples/kubernetes/gpu.yaml delete mode 100644 examples/langchain-python-rag-document/README.md delete mode 100644 examples/langchain-python-rag-document/main.py delete mode 100644 examples/langchain-python-rag-document/requirements.txt delete mode 100644 examples/langchain-python-rag-privategpt/.gitignore delete mode 100644 examples/langchain-python-rag-privategpt/LICENSE delete mode 100644 examples/langchain-python-rag-privategpt/README.md delete mode 100644 examples/langchain-python-rag-privategpt/constants.py delete mode 100755 examples/langchain-python-rag-privategpt/ingest.py delete mode 100644 examples/langchain-python-rag-privategpt/poetry.lock delete mode 100755 examples/langchain-python-rag-privategpt/privateGPT.py delete mode 100644 examples/langchain-python-rag-privategpt/pyproject.toml delete mode 100644 examples/langchain-python-rag-privategpt/requirements.txt delete mode 100644 examples/langchain-python-rag-websummary/README.md delete mode 100644 examples/langchain-python-rag-websummary/main.py delete mode 100644 examples/langchain-python-rag-websummary/requirements.txt delete mode 100644 examples/langchain-python-simple/README.md delete mode 100644 examples/langchain-python-simple/main.py delete mode 100644 examples/langchain-python-simple/requirements.txt delete mode 100644 examples/langchain-typescript-simple/README.md delete mode 100644 examples/langchain-typescript-simple/main.ts delete mode 100644 examples/langchain-typescript-simple/package-lock.json delete mode 100644 examples/langchain-typescript-simple/package.json delete mode 100644 examples/modelfile-mario/Modelfile delete mode 100644 examples/modelfile-mario/logo.png delete mode 100644 examples/modelfile-mario/readme.md delete mode 100644 examples/python-dockerit/Modelfile delete mode 100644 examples/python-dockerit/README.md delete mode 100644 examples/python-dockerit/dockerit.py delete mode 100644 examples/python-dockerit/requirements.txt delete mode 100644 examples/python-grounded-factuality-rag-check/README.md delete mode 100644 examples/python-grounded-factuality-rag-check/main.py delete mode 100644 examples/python-grounded-factuality-rag-check/requirements.txt delete mode 100644 examples/python-grounded-factuality-simple-check/main.py delete mode 100644 examples/python-grounded-factuality-simple-check/readme.md delete mode 100644 examples/python-grounded-factuality-simple-check/requirements.txt delete mode 100644 examples/python-json-datagenerator/predefinedschema.py delete mode 100644 examples/python-json-datagenerator/randomaddresses.py delete mode 100644 examples/python-json-datagenerator/readme.md delete mode 100644 examples/python-json-datagenerator/requirements.txt delete mode 100644 examples/python-loganalysis/Modelfile delete mode 100644 examples/python-loganalysis/loganalysis.py delete mode 100644 examples/python-loganalysis/logtest.logfile delete mode 100644 examples/python-loganalysis/readme.md delete mode 100644 examples/python-loganalysis/requirements.txt delete mode 100644 examples/python-rag-newssummary/README.md delete mode 100644 examples/python-rag-newssummary/requirements.txt delete mode 100644 examples/python-rag-newssummary/summ.py delete mode 100644 examples/python-rag-newssummary/utils.py delete mode 100644 examples/python-simplechat/client.py delete mode 100644 examples/python-simplechat/readme.md delete mode 100644 examples/python-simplechat/requirements.txt delete mode 100644 examples/python-simplegenerate/README.md delete mode 100644 examples/python-simplegenerate/client.py delete mode 100644 examples/python-simplegenerate/requirements.txt delete mode 100644 examples/typescript-functioncalling/extractemail.ts delete mode 100644 examples/typescript-functioncalling/extractwp.ts delete mode 100644 examples/typescript-functioncalling/info.txt delete mode 100644 examples/typescript-functioncalling/package-lock.json delete mode 100644 examples/typescript-functioncalling/package.json delete mode 100644 examples/typescript-functioncalling/readme.md delete mode 100644 examples/typescript-functioncalling/wp.txt delete mode 100644 examples/typescript-mentors/.gitignore delete mode 100644 examples/typescript-mentors/README.md delete mode 100644 examples/typescript-mentors/character-generator.ts delete mode 100644 examples/typescript-mentors/mentors.ts delete mode 100644 examples/typescript-mentors/package.json delete mode 100644 examples/typescript-simplechat/client.ts delete mode 100644 examples/typescript-simplechat/package.json delete mode 100644 examples/typescript-simplechat/readme.md diff --git a/api/examples/README.md b/api/examples/README.md new file mode 100644 index 00000000..b5a8917f --- /dev/null +++ b/api/examples/README.md @@ -0,0 +1,17 @@ +# Ollama API Examples + +Run the examples in this directory with: + +``` +go run example_name/main.go +``` +## Chat - Chat with a model +- [chat/main.go](chat/main.go) + +## Generate - Generate text from a model +- [generate/main.go](generate/main.go) +- [generate-streaming/main.go](generate-streaming/main.go) + +## Pull - Pull a model +- [pull-progress/main.go](pull-progress/main.go) + diff --git a/examples/go-chat/main.go b/api/examples/chat/main.go similarity index 100% rename from examples/go-chat/main.go rename to api/examples/chat/main.go diff --git a/examples/go-generate-streaming/main.go b/api/examples/generate-streaming/main.go similarity index 100% rename from examples/go-generate-streaming/main.go rename to api/examples/generate-streaming/main.go diff --git a/examples/go-generate/main.go b/api/examples/generate/main.go similarity index 100% rename from examples/go-generate/main.go rename to api/examples/generate/main.go diff --git a/examples/go-multimodal/main.go b/api/examples/multimodal/main.go similarity index 100% rename from examples/go-multimodal/main.go rename to api/examples/multimodal/main.go diff --git a/examples/go-pull-progress/main.go b/api/examples/pull-progress/main.go similarity index 100% rename from examples/go-pull-progress/main.go rename to api/examples/pull-progress/main.go diff --git a/examples/README.md b/docs/examples.md similarity index 69% rename from examples/README.md rename to docs/examples.md index 7f349f72..25f6563a 100644 --- a/examples/README.md +++ b/docs/examples.md @@ -12,3 +12,9 @@ Ollama JavaScript examples at [ollama-js/examples](https://github.com/ollama/oll ## OpenAI compatibility examples Ollama OpenAI compatibility examples at [ollama/examples/openai](../docs/openai.md) + + +## Community examples + +- [LangChain Ollama Python](https://python.langchain.com/docs/integrations/chat/ollama/) +- [LangChain Ollama JS](https://js.langchain.com/docs/integrations/chat/ollama/) diff --git a/examples/.gitignore b/examples/.gitignore deleted file mode 100644 index b60652b6..00000000 --- a/examples/.gitignore +++ /dev/null @@ -1,174 +0,0 @@ -node_modules -bun.lockb -.vscode -# OSX -.DS_STORE - - -# Models -models/ - -# Local Chroma db -.chroma/ -db/ - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ diff --git a/examples/flyio/.gitignore b/examples/flyio/.gitignore deleted file mode 100644 index 0501d092..00000000 --- a/examples/flyio/.gitignore +++ /dev/null @@ -1 +0,0 @@ -fly.toml diff --git a/examples/flyio/README.md b/examples/flyio/README.md deleted file mode 100644 index 09b90aad..00000000 --- a/examples/flyio/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Deploy Ollama to Fly.io - -> Note: this example exposes a public endpoint and does not configure authentication. Use with care. - -## Prerequisites - -- Ollama: https://ollama.com/download -- Fly.io account. Sign up for a free account: https://fly.io/app/sign-up - -## Steps - -1. Login to Fly.io - - ```bash - fly auth login - ``` - -1. Create a new Fly app - - ```bash - fly launch --name --image ollama/ollama --internal-port 11434 --vm-size shared-cpu-8x --now - ``` - -1. Pull and run `orca-mini:3b` - - ```bash - OLLAMA_HOST=https://.fly.dev ollama run orca-mini:3b - ``` - -`shared-cpu-8x` is a free-tier eligible machine type. For better performance, switch to a `performance` or `dedicated` machine type or attach a GPU for hardware acceleration (see below). - -## (Optional) Persistent Volume - -By default Fly Machines use ephemeral storage which is problematic if you want to use the same model across restarts without pulling it again. Create and attach a persistent volume to store the downloaded models: - -1. Create the Fly Volume - - ```bash - fly volume create ollama - ``` - -1. Update `fly.toml` and add `[mounts]` - - ```toml - [mounts] - source = "ollama" - destination = "/mnt/ollama/models" - ``` - -1. Update `fly.toml` and add `[env]` - - ```toml - [env] - OLLAMA_MODELS = "/mnt/ollama/models" - ``` - -1. Deploy your app - - ```bash - fly deploy - ``` - -## (Optional) Hardware Acceleration - -Fly.io GPU is currently in waitlist. Sign up for the waitlist: https://fly.io/gpu - -Once you've been accepted, create the app with the additional flags `--vm-gpu-kind a100-pcie-40gb` or `--vm-gpu-kind a100-pcie-80gb`. diff --git a/examples/go-http-generate/main.go b/examples/go-http-generate/main.go deleted file mode 100644 index e5b64348..00000000 --- a/examples/go-http-generate/main.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io" - "log" - "net/http" - "os" -) - -func main() { - body := []byte(`{"model":"mistral"}`) - resp, err := http.Post("http://localhost:11434/api/generate", "application/json", bytes.NewBuffer(body)) - - if err != nil { - fmt.Print(err.Error()) - os.Exit(1) - } - - defer resp.Body.Close() - - responseData, err := io.ReadAll(resp.Body) - if err != nil { - log.Fatal(err) - } - fmt.Println(string(responseData)) - -} diff --git a/examples/jupyter-notebook/README.md b/examples/jupyter-notebook/README.md deleted file mode 100644 index fba6802f..00000000 --- a/examples/jupyter-notebook/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Ollama Jupyter Notebook - -This example downloads and installs Ollama in a Jupyter instance such as Google Colab. It will start the Ollama service and expose an endpoint using `ngrok` which can be used to communicate with the Ollama instance remotely. - -For best results, use an instance with GPU accelerator. diff --git a/examples/jupyter-notebook/ollama.ipynb b/examples/jupyter-notebook/ollama.ipynb deleted file mode 100644 index bee353cb..00000000 --- a/examples/jupyter-notebook/ollama.ipynb +++ /dev/null @@ -1,102 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "id": "93f59dcb-c588-41b8-a792-55d88ade739c", - "metadata": {}, - "outputs": [], - "source": [ - "# Download and run the Ollama Linux install script\n", - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!command -v systemctl >/dev/null && sudo systemctl stop ollama" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "658c147e-c7f8-490e-910e-62b80f577dda", - "metadata": {}, - "outputs": [], - "source": [ - "!pip install aiohttp pyngrok\n", - "\n", - "import os\n", - "import asyncio\n", - "from aiohttp import ClientSession\n", - "\n", - "# Set LD_LIBRARY_PATH so the system NVIDIA library becomes preferred\n", - "# over the built-in library. This is particularly important for \n", - "# Google Colab which installs older drivers\n", - "os.environ.update({'LD_LIBRARY_PATH': '/usr/lib64-nvidia'})\n", - "\n", - "async def run(cmd):\n", - " '''\n", - " run is a helper function to run subcommands asynchronously.\n", - " '''\n", - " print('>>> starting', *cmd)\n", - " p = await asyncio.subprocess.create_subprocess_exec(\n", - " *cmd,\n", - " stdout=asyncio.subprocess.PIPE,\n", - " stderr=asyncio.subprocess.PIPE,\n", - " )\n", - "\n", - " async def pipe(lines):\n", - " async for line in lines:\n", - " print(line.strip().decode('utf-8'))\n", - "\n", - " await asyncio.gather(\n", - " pipe(p.stdout),\n", - " pipe(p.stderr),\n", - " )\n", - "\n", - "\n", - "await asyncio.gather(\n", - " run(['ollama', 'serve']),\n", - " run(['ngrok', 'http', '--log', 'stderr', '11434']),\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e7735a55-9aad-4caf-8683-52e2163ba53b", - "metadata": {}, - "source": [ - "The previous cell starts two processes, `ollama` and `ngrok`. The log output will show a line like the following which describes the external address.\n", - "\n", - "```\n", - "t=2023-11-12T22:55:56+0000 lvl=info msg=\"started tunnel\" obj=tunnels name=command_line addr=http://localhost:11434 url=https://8249-34-125-179-11.ngrok.io\n", - "```\n", - "\n", - "The external address in this case is `https://8249-34-125-179-11.ngrok.io` which can be passed into `OLLAMA_HOST` to access this instance.\n", - "\n", - "```bash\n", - "export OLLAMA_HOST=https://8249-34-125-179-11.ngrok.io\n", - "ollama list\n", - "ollama run mistral\n", - "```" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.6" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/examples/kubernetes/README.md b/examples/kubernetes/README.md deleted file mode 100644 index 2e2444c7..00000000 --- a/examples/kubernetes/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Deploy Ollama to Kubernetes - -## Prerequisites - -- Ollama: https://ollama.com/download -- Kubernetes cluster. This example will use Google Kubernetes Engine. - -## Steps - -1. Create the Ollama namespace, deployment, and service - - ```bash - kubectl apply -f cpu.yaml - ``` - -## (Optional) Hardware Acceleration - -Hardware acceleration in Kubernetes requires NVIDIA's [`k8s-device-plugin`](https://github.com/NVIDIA/k8s-device-plugin) which is deployed in Kubernetes in form of daemonset. Follow the link for more details. - -Once configured, create a GPU enabled Ollama deployment. - -```bash -kubectl apply -f gpu.yaml -``` - -## Test - -1. Port forward the Ollama service to connect and use it locally - - ```bash - kubectl -n ollama port-forward service/ollama 11434:80 - ``` - -1. Pull and run a model, for example `orca-mini:3b` - - ```bash - ollama run orca-mini:3b - ``` \ No newline at end of file diff --git a/examples/kubernetes/cpu.yaml b/examples/kubernetes/cpu.yaml deleted file mode 100644 index b8ddcdde..00000000 --- a/examples/kubernetes/cpu.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: ollama ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ollama - namespace: ollama -spec: - selector: - matchLabels: - name: ollama - template: - metadata: - labels: - name: ollama - spec: - containers: - - name: ollama - image: ollama/ollama:latest - ports: - - name: http - containerPort: 11434 - protocol: TCP ---- -apiVersion: v1 -kind: Service -metadata: - name: ollama - namespace: ollama -spec: - type: ClusterIP - selector: - name: ollama - ports: - - port: 80 - name: http - targetPort: http - protocol: TCP diff --git a/examples/kubernetes/gpu.yaml b/examples/kubernetes/gpu.yaml deleted file mode 100644 index ba90abb6..00000000 --- a/examples/kubernetes/gpu.yaml +++ /dev/null @@ -1,58 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - name: ollama ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ollama - namespace: ollama -spec: - strategy: - type: Recreate - selector: - matchLabels: - name: ollama - template: - metadata: - labels: - name: ollama - spec: - containers: - - name: ollama - image: ollama/ollama:latest - env: - - name: PATH - value: /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - - name: LD_LIBRARY_PATH - value: /usr/local/nvidia/lib:/usr/local/nvidia/lib64 - - name: NVIDIA_DRIVER_CAPABILITIES - value: compute,utility - ports: - - name: http - containerPort: 11434 - protocol: TCP - resources: - limits: - nvidia.com/gpu: 1 - tolerations: - - key: nvidia.com/gpu - operator: Exists - effect: NoSchedule ---- -apiVersion: v1 -kind: Service -metadata: - name: ollama - namespace: ollama -spec: - type: ClusterIP - selector: - name: ollama - ports: - - port: 80 - name: http - targetPort: http - protocol: TCP diff --git a/examples/langchain-python-rag-document/README.md b/examples/langchain-python-rag-document/README.md deleted file mode 100644 index d37afc9d..00000000 --- a/examples/langchain-python-rag-document/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# LangChain Document QA - -This example provides an interface for asking questions to a PDF document. - -## Setup - -1. Ensure you have the `llama3.2` model installed: - -``` -ollama pull llama3.2 -``` - -2. Install the Python Requirements. - -``` -pip install -r requirements.txt -``` - -## Run - -``` -python main.py -``` - -A prompt will appear, where questions may be asked: - -``` -Query: How many locations does WeWork have? -``` diff --git a/examples/langchain-python-rag-document/main.py b/examples/langchain-python-rag-document/main.py deleted file mode 100644 index b93828f8..00000000 --- a/examples/langchain-python-rag-document/main.py +++ /dev/null @@ -1,61 +0,0 @@ -from langchain_community.document_loaders import OnlinePDFLoader -from langchain_community.vectorstores import Chroma -from langchain_community.embeddings import GPT4AllEmbeddings -from langchain_core.prompts import PromptTemplate -from langchain_community.llms import Ollama -from langchain.callbacks.manager import CallbackManager -from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler -from langchain.chains import RetrievalQA -import sys -import os - -class SuppressStdout: - def __enter__(self): - self._original_stdout = sys.stdout - self._original_stderr = sys.stderr - sys.stdout = open(os.devnull, 'w') - sys.stderr = open(os.devnull, 'w') - - def __exit__(self, exc_type, exc_val, exc_tb): - sys.stdout.close() - sys.stdout = self._original_stdout - sys.stderr = self._original_stderr - -# load the pdf and split it into chunks -loader = OnlinePDFLoader("https://d18rn0p25nwr6d.cloudfront.net/CIK-0001813756/975b3e9b-268e-4798-a9e4-2a9a7c92dc10.pdf") -data = loader.load() - -from langchain.text_splitter import RecursiveCharacterTextSplitter -text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) -all_splits = text_splitter.split_documents(data) - -with SuppressStdout(): - vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings()) - -while True: - query = input("\nQuery: ") - if query == "exit": - break - if query.strip() == "": - continue - - # Prompt - template = """Use the following pieces of context to answer the question at the end. - If you don't know the answer, just say that you don't know, don't try to make up an answer. - Use three sentences maximum and keep the answer as concise as possible. - {context} - Question: {question} - Helpful Answer:""" - QA_CHAIN_PROMPT = PromptTemplate( - input_variables=["context", "question"], - template=template, - ) - - llm = Ollama(model="llama3.2", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])) - qa_chain = RetrievalQA.from_chain_type( - llm, - retriever=vectorstore.as_retriever(), - chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}, - ) - - result = qa_chain({"query": query}) diff --git a/examples/langchain-python-rag-document/requirements.txt b/examples/langchain-python-rag-document/requirements.txt deleted file mode 100644 index 09a54191..00000000 --- a/examples/langchain-python-rag-document/requirements.txt +++ /dev/null @@ -1,109 +0,0 @@ -absl-py==1.4.0 -aiohttp==3.8.5 -aiosignal==1.3.1 -anyio==3.7.1 -astunparse==1.6.3 -async-timeout==4.0.3 -attrs==23.1.0 -backoff==2.2.1 -beautifulsoup4==4.12.2 -bs4==0.0.1 -cachetools==5.3.1 -certifi==2023.7.22 -cffi==1.15.1 -chardet==5.2.0 -charset-normalizer==3.2.0 -Chroma==0.2.0 -chroma-hnswlib==0.7.2 -chromadb==0.4.5 -click==8.1.6 -coloredlogs==15.0.1 -cryptography==41.0.3 -dataclasses-json==0.5.14 -fastapi==0.99.1 -filetype==1.2.0 -flatbuffers==23.5.26 -frozenlist==1.4.0 -gast==0.4.0 -google-auth==2.22.0 -google-auth-oauthlib==1.0.0 -google-pasta==0.2.0 -gpt4all==1.0.8 -grpcio==1.57.0 -h11==0.14.0 -h5py==3.9.0 -httptools==0.6.0 -humanfriendly==10.0 -idna==3.4 -importlib-resources==6.0.1 -joblib==1.3.2 -keras==2.13.1 -langchain==0.0.261 -langsmith==0.0.21 -libclang==16.0.6 -lxml==4.9.3 -Markdown==3.4.4 -MarkupSafe==2.1.3 -marshmallow==3.20.1 -monotonic==1.6 -mpmath==1.3.0 -multidict==6.0.4 -mypy-extensions==1.0.0 -nltk==3.8.1 -numexpr==2.8.5 -numpy==1.24.3 -oauthlib==3.2.2 -onnxruntime==1.15.1 -openapi-schema-pydantic==1.2.4 -opt-einsum==3.3.0 -overrides==7.4.0 -packaging==23.1 -pdf2image==1.16.3 -pdfminer==20191125 -pdfminer.six==20221105 -Pillow==10.0.0 -posthog==3.0.1 -protobuf==4.24.0 -pulsar-client==3.2.0 -pyasn1==0.5.0 -pyasn1-modules==0.3.0 -pycparser==2.21 -pycryptodome==3.18.0 -pydantic==1.10.12 -PyPika==0.48.9 -python-dateutil==2.8.2 -python-dotenv==1.0.0 -python-magic==0.4.27 -PyYAML==6.0.1 -regex==2023.8.8 -requests==2.31.0 -requests-oauthlib==1.3.1 -rsa==4.9 -six==1.16.0 -sniffio==1.3.0 -soupsieve==2.4.1 -SQLAlchemy==2.0.19 -starlette==0.27.0 -sympy==1.12 -tabulate==0.9.0 -tenacity==8.2.2 -tensorboard==2.13.0 -tensorboard-data-server==0.7.1 -tensorflow==2.13.0 -tensorflow-estimator==2.13.0 -tensorflow-hub==0.14.0 -tensorflow-macos==2.13.0 -termcolor==2.3.0 -tokenizers==0.13.3 -tqdm==4.66.1 -typing-inspect==0.9.0 -typing_extensions==4.5.0 -unstructured==0.9.2 -urllib3==1.26.16 -uvicorn==0.23.2 -uvloop==0.17.0 -watchfiles==0.19.0 -websockets==11.0.3 -Werkzeug==2.3.6 -wrapt==1.15.0 -yarl==1.9.2 diff --git a/examples/langchain-python-rag-privategpt/.gitignore b/examples/langchain-python-rag-privategpt/.gitignore deleted file mode 100644 index 240b29e5..00000000 --- a/examples/langchain-python-rag-privategpt/.gitignore +++ /dev/null @@ -1,170 +0,0 @@ -# OSX -.DS_STORE - -# Models -models/ - -# Local Chroma db -.chroma/ -db/ - -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ diff --git a/examples/langchain-python-rag-privategpt/LICENSE b/examples/langchain-python-rag-privategpt/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/examples/langchain-python-rag-privategpt/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/examples/langchain-python-rag-privategpt/README.md b/examples/langchain-python-rag-privategpt/README.md deleted file mode 100644 index 0133fd88..00000000 --- a/examples/langchain-python-rag-privategpt/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# PrivateGPT with Llama 2 uncensored - -https://github.com/ollama/ollama/assets/3325447/20cf8ec6-ff25-42c6-bdd8-9be594e3ce1b - -> Note: this example is a slightly modified version of PrivateGPT using models such as Llama 2 Uncensored. All credit for PrivateGPT goes to Iván Martínez who is the creator of it, and you can find his GitHub repo [here](https://github.com/imartinez/privateGPT). - -### Setup - -Set up a virtual environment (optional): - -``` -python3 -m venv .venv -source .venv/bin/activate -``` - -Install the Python dependencies: - -```shell -pip install -r requirements.txt -``` - -Pull the model you'd like to use: - -``` -ollama pull llama2-uncensored -``` - -### Getting WeWork's latest quarterly earnings report (10-Q) - -``` -mkdir source_documents -curl https://d18rn0p25nwr6d.cloudfront.net/CIK-0001813756/975b3e9b-268e-4798-a9e4-2a9a7c92dc10.pdf -o source_documents/wework.pdf -``` - -### Ingesting files - -```shell -python ingest.py -``` - -Output should look like this: - -```shell -Creating new vectorstore -Loading documents from source_documents -Loading new documents: 100%|██████████████████████| 1/1 [00:01<00:00, 1.73s/it] -Loaded 1 new documents from source_documents -Split into 90 chunks of text (max. 500 tokens each) -Creating embeddings. May take some minutes... -Using embedded DuckDB with persistence: data will be stored in: db -Ingestion complete! You can now run privateGPT.py to query your documents -``` - -### Ask questions - -```shell -python privateGPT.py - -Enter a query: How many locations does WeWork have? - -> Answer (took 17.7 s.): -As of June 2023, WeWork has 777 locations worldwide, including 610 Consolidated Locations (as defined in the section entitled Key Performance Indicators). -``` - -### Try a different model: - -``` -ollama pull llama2:13b -MODEL=llama2:13b python privateGPT.py -``` - -## Adding more files - -Put any and all your files into the `source_documents` directory - -The supported extensions are: - -- `.csv`: CSV, -- `.docx`: Word Document, -- `.doc`: Word Document, -- `.enex`: EverNote, -- `.eml`: Email, -- `.epub`: EPub, -- `.html`: HTML File, -- `.md`: Markdown, -- `.msg`: Outlook Message, -- `.odt`: Open Document Text, -- `.pdf`: Portable Document Format (PDF), -- `.pptx` : PowerPoint Document, -- `.ppt` : PowerPoint Document, -- `.txt`: Text file (UTF-8), diff --git a/examples/langchain-python-rag-privategpt/constants.py b/examples/langchain-python-rag-privategpt/constants.py deleted file mode 100644 index 56dda795..00000000 --- a/examples/langchain-python-rag-privategpt/constants.py +++ /dev/null @@ -1,11 +0,0 @@ -import os -from chromadb.config import Settings - -# Define the folder for storing database -PERSIST_DIRECTORY = os.environ.get('PERSIST_DIRECTORY', 'db') - -# Define the Chroma settings -CHROMA_SETTINGS = Settings( - persist_directory=PERSIST_DIRECTORY, - anonymized_telemetry=False -) diff --git a/examples/langchain-python-rag-privategpt/ingest.py b/examples/langchain-python-rag-privategpt/ingest.py deleted file mode 100755 index 0f71ccf0..00000000 --- a/examples/langchain-python-rag-privategpt/ingest.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python3 -import os -import glob -from typing import List -from multiprocessing import Pool -from tqdm import tqdm - -from langchain.document_loaders import ( - CSVLoader, - EverNoteLoader, - PyMuPDFLoader, - TextLoader, - UnstructuredEmailLoader, - UnstructuredEPubLoader, - UnstructuredHTMLLoader, - UnstructuredMarkdownLoader, - UnstructuredODTLoader, - UnstructuredPowerPointLoader, - UnstructuredWordDocumentLoader, -) - -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.vectorstores import Chroma -from langchain.embeddings import HuggingFaceEmbeddings -from langchain.docstore.document import Document -from constants import CHROMA_SETTINGS - - -# Load environment variables -persist_directory = os.environ.get('PERSIST_DIRECTORY', 'db') -source_directory = os.environ.get('SOURCE_DIRECTORY', 'source_documents') -embeddings_model_name = os.environ.get('EMBEDDINGS_MODEL_NAME', 'all-MiniLM-L6-v2') -chunk_size = 500 -chunk_overlap = 50 - -# Custom document loaders -class MyElmLoader(UnstructuredEmailLoader): - """Wrapper to fallback to text/plain when default does not work""" - - def load(self) -> List[Document]: - """Wrapper adding fallback for elm without html""" - try: - try: - doc = UnstructuredEmailLoader.load(self) - except ValueError as e: - if 'text/html content not found in email' in str(e): - # Try plain text - self.unstructured_kwargs["content_source"]="text/plain" - doc = UnstructuredEmailLoader.load(self) - else: - raise - except Exception as e: - # Add file_path to exception message - raise type(e)(f"{self.file_path}: {e}") from e - - return doc - - -# Map file extensions to document loaders and their arguments -LOADER_MAPPING = { - ".csv": (CSVLoader, {}), - # ".docx": (Docx2txtLoader, {}), - ".doc": (UnstructuredWordDocumentLoader, {}), - ".docx": (UnstructuredWordDocumentLoader, {}), - ".enex": (EverNoteLoader, {}), - ".eml": (MyElmLoader, {}), - ".epub": (UnstructuredEPubLoader, {}), - ".html": (UnstructuredHTMLLoader, {}), - ".md": (UnstructuredMarkdownLoader, {}), - ".odt": (UnstructuredODTLoader, {}), - ".pdf": (PyMuPDFLoader, {}), - ".ppt": (UnstructuredPowerPointLoader, {}), - ".pptx": (UnstructuredPowerPointLoader, {}), - ".txt": (TextLoader, {"encoding": "utf8"}), - # Add more mappings for other file extensions and loaders as needed -} - - -def load_single_document(file_path: str) -> List[Document]: - if os.path.getsize(file_path) != 0: - filename, ext = os.path.splitext(file_path) - if ext in LOADER_MAPPING: - loader_class, loader_args = LOADER_MAPPING[ext] - try: - loader = loader_class(file_path, **loader_args) - if loader: - return loader.load() - except: - print(f"Corrupted file {file_path}. Ignoring it.") - else: - print(f"Unsupported file {file_path}. Ignoring it.") - else: - print(f"Empty file {file_path}. Ignoring it.") - - -def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]: - """ - Loads all documents from the source documents directory, ignoring specified files - """ - all_files = [] - for ext in LOADER_MAPPING: - all_files.extend( - glob.glob(os.path.join(source_dir, f"**/*{ext}"), recursive=True) - ) - filtered_files = [file_path for file_path in all_files if file_path not in ignored_files] - - with Pool(processes=os.cpu_count()) as pool: - results = [] - with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar: - for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)): - if docs: - results.extend(docs) - pbar.update() - - return results - -def process_documents(ignored_files: List[str] = []) -> List[Document]: - """ - Load documents and split in chunks - """ - print(f"Loading documents from {source_directory}") - documents = load_documents(source_directory, ignored_files) - if not documents: - print("No new documents to load") - exit(0) - print(f"Loaded {len(documents)} new documents from {source_directory}") - text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) - texts = text_splitter.split_documents(documents) - print(f"Split into {len(texts)} chunks of text (max. {chunk_size} tokens each)") - return texts - -def does_vectorstore_exist(persist_directory: str) -> bool: - """ - Checks if vectorstore exists - """ - if os.path.exists(os.path.join(persist_directory, 'index')): - if os.path.exists(os.path.join(persist_directory, 'chroma-collections.parquet')) and os.path.exists(os.path.join(persist_directory, 'chroma-embeddings.parquet')): - list_index_files = glob.glob(os.path.join(persist_directory, 'index/*.bin')) - list_index_files += glob.glob(os.path.join(persist_directory, 'index/*.pkl')) - # At least 3 documents are needed in a working vectorstore - if len(list_index_files) > 3: - return True - return False - -def main(): - # Create embeddings - embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name) - - if does_vectorstore_exist(persist_directory): - # Update and store locally vectorstore - print(f"Appending to existing vectorstore at {persist_directory}") - db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS) - collection = db.get() - texts = process_documents([metadata['source'] for metadata in collection['metadatas']]) - print(f"Creating embeddings. May take some minutes...") - db.add_documents(texts) - else: - # Create and store locally vectorstore - print("Creating new vectorstore") - texts = process_documents() - print(f"Creating embeddings. May take some minutes...") - db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory) - db.persist() - db = None - - print(f"Ingestion complete! You can now run privateGPT.py to query your documents") - - -if __name__ == "__main__": - main() diff --git a/examples/langchain-python-rag-privategpt/poetry.lock b/examples/langchain-python-rag-privategpt/poetry.lock deleted file mode 100644 index f02b1c5b..00000000 --- a/examples/langchain-python-rag-privategpt/poetry.lock +++ /dev/null @@ -1,3833 +0,0 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. - -[[package]] -name = "aiohttp" -version = "3.8.4" -description = "Async http client/server framework (asyncio)" -optional = false -python-versions = ">=3.6" -files = [ - {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5ce45967538fb747370308d3145aa68a074bdecb4f3a300869590f725ced69c1"}, - {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b744c33b6f14ca26b7544e8d8aadff6b765a80ad6164fb1a430bbadd593dfb1a"}, - {file = "aiohttp-3.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a45865451439eb320784918617ba54b7a377e3501fb70402ab84d38c2cd891b"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d42d7cba1cec432d47ab13b6637bee393a10f664c425ea7b305d1301ca1a3"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee3c36df21b5714d49fc4580247947aa64bcbe2939d1b77b4c8dcb8f6c9faecc"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:176a64b24c0935869d5bbc4c96e82f89f643bcdf08ec947701b9dbb3c956b7dd"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c844fd628851c0bc309f3c801b3a3d58ce430b2ce5b359cd918a5a76d0b20cb5"}, - {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5393fb786a9e23e4799fec788e7e735de18052f83682ce2dfcabaf1c00c2c08e"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e4b09863aae0dc965c3ef36500d891a3ff495a2ea9ae9171e4519963c12ceefd"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:adfbc22e87365a6e564c804c58fc44ff7727deea782d175c33602737b7feadb6"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:147ae376f14b55f4f3c2b118b95be50a369b89b38a971e80a17c3fd623f280c9"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:eafb3e874816ebe2a92f5e155f17260034c8c341dad1df25672fb710627c6949"}, - {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6cc15d58053c76eacac5fa9152d7d84b8d67b3fde92709195cb984cfb3475ea"}, - {file = "aiohttp-3.8.4-cp310-cp310-win32.whl", hash = "sha256:59f029a5f6e2d679296db7bee982bb3d20c088e52a2977e3175faf31d6fb75d1"}, - {file = "aiohttp-3.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:fe7ba4a51f33ab275515f66b0a236bcde4fb5561498fe8f898d4e549b2e4509f"}, - {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d8ef1a630519a26d6760bc695842579cb09e373c5f227a21b67dc3eb16cfea4"}, - {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b3f2e06a512e94722886c0827bee9807c86a9f698fac6b3aee841fab49bbfb4"}, - {file = "aiohttp-3.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a80464982d41b1fbfe3154e440ba4904b71c1a53e9cd584098cd41efdb188ef"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b631e26df63e52f7cce0cce6507b7a7f1bc9b0c501fcde69742130b32e8782f"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f43255086fe25e36fd5ed8f2ee47477408a73ef00e804cb2b5cba4bf2ac7f5e"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d347a172f866cd1d93126d9b239fcbe682acb39b48ee0873c73c933dd23bd0f"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3fec6a4cb5551721cdd70473eb009d90935b4063acc5f40905d40ecfea23e05"}, - {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80a37fe8f7c1e6ce8f2d9c411676e4bc633a8462844e38f46156d07a7d401654"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d1e6a862b76f34395a985b3cd39a0d949ca80a70b6ebdea37d3ab39ceea6698a"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cd468460eefef601ece4428d3cf4562459157c0f6523db89365202c31b6daebb"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:618c901dd3aad4ace71dfa0f5e82e88b46ef57e3239fc7027773cb6d4ed53531"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:652b1bff4f15f6287550b4670546a2947f2a4575b6c6dff7760eafb22eacbf0b"}, - {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80575ba9377c5171407a06d0196b2310b679dc752d02a1fcaa2bc20b235dbf24"}, - {file = "aiohttp-3.8.4-cp311-cp311-win32.whl", hash = "sha256:bbcf1a76cf6f6dacf2c7f4d2ebd411438c275faa1dc0c68e46eb84eebd05dd7d"}, - {file = "aiohttp-3.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:6e74dd54f7239fcffe07913ff8b964e28b712f09846e20de78676ce2a3dc0bfc"}, - {file = "aiohttp-3.8.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:880e15bb6dad90549b43f796b391cfffd7af373f4646784795e20d92606b7a51"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb96fa6b56bb536c42d6a4a87dfca570ff8e52de2d63cabebfd6fb67049c34b6"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a6cadebe132e90cefa77e45f2d2f1a4b2ce5c6b1bfc1656c1ddafcfe4ba8131"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f352b62b45dff37b55ddd7b9c0c8672c4dd2eb9c0f9c11d395075a84e2c40f75"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ab43061a0c81198d88f39aaf90dae9a7744620978f7ef3e3708339b8ed2ef01"}, - {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9cb1565a7ad52e096a6988e2ee0397f72fe056dadf75d17fa6b5aebaea05622"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:1b3ea7edd2d24538959c1c1abf97c744d879d4e541d38305f9bd7d9b10c9ec41"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:7c7837fe8037e96b6dd5cfcf47263c1620a9d332a87ec06a6ca4564e56bd0f36"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3b90467ebc3d9fa5b0f9b6489dfb2c304a1db7b9946fa92aa76a831b9d587e99"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:cab9401de3ea52b4b4c6971db5fb5c999bd4260898af972bf23de1c6b5dd9d71"}, - {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d1f9282c5f2b5e241034a009779e7b2a1aa045f667ff521e7948ea9b56e0c5ff"}, - {file = "aiohttp-3.8.4-cp36-cp36m-win32.whl", hash = "sha256:5e14f25765a578a0a634d5f0cd1e2c3f53964553a00347998dfdf96b8137f777"}, - {file = "aiohttp-3.8.4-cp36-cp36m-win_amd64.whl", hash = "sha256:4c745b109057e7e5f1848c689ee4fb3a016c8d4d92da52b312f8a509f83aa05e"}, - {file = "aiohttp-3.8.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:aede4df4eeb926c8fa70de46c340a1bc2c6079e1c40ccf7b0eae1313ffd33519"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ddaae3f3d32fc2cb4c53fab020b69a05c8ab1f02e0e59665c6f7a0d3a5be54f"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4eb3b82ca349cf6fadcdc7abcc8b3a50ab74a62e9113ab7a8ebc268aad35bb9"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bcb89336efa095ea21b30f9e686763f2be4478f1b0a616969551982c4ee4c3b"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c08e8ed6fa3d477e501ec9db169bfac8140e830aa372d77e4a43084d8dd91ab"}, - {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6cd05ea06daca6ad6a4ca3ba7fe7dc5b5de063ff4daec6170ec0f9979f6c332"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7a00a9ed8d6e725b55ef98b1b35c88013245f35f68b1b12c5cd4100dddac333"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:de04b491d0e5007ee1b63a309956eaed959a49f5bb4e84b26c8f5d49de140fa9"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:40653609b3bf50611356e6b6554e3a331f6879fa7116f3959b20e3528783e699"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dbf3a08a06b3f433013c143ebd72c15cac33d2914b8ea4bea7ac2c23578815d6"}, - {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854f422ac44af92bfe172d8e73229c270dc09b96535e8a548f99c84f82dde241"}, - {file = "aiohttp-3.8.4-cp37-cp37m-win32.whl", hash = "sha256:aeb29c84bb53a84b1a81c6c09d24cf33bb8432cc5c39979021cc0f98c1292a1a"}, - {file = "aiohttp-3.8.4-cp37-cp37m-win_amd64.whl", hash = "sha256:db3fc6120bce9f446d13b1b834ea5b15341ca9ff3f335e4a951a6ead31105480"}, - {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fabb87dd8850ef0f7fe2b366d44b77d7e6fa2ea87861ab3844da99291e81e60f"}, - {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91f6d540163f90bbaef9387e65f18f73ffd7c79f5225ac3d3f61df7b0d01ad15"}, - {file = "aiohttp-3.8.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d265f09a75a79a788237d7f9054f929ced2e69eb0bb79de3798c468d8a90f945"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d89efa095ca7d442a6d0cbc755f9e08190ba40069b235c9886a8763b03785da"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dac314662f4e2aa5009977b652d9b8db7121b46c38f2073bfeed9f4049732cd"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe11310ae1e4cd560035598c3f29d86cef39a83d244c7466f95c27ae04850f10"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ddb2a2026c3f6a68c3998a6c47ab6795e4127315d2e35a09997da21865757f8"}, - {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e75b89ac3bd27d2d043b234aa7b734c38ba1b0e43f07787130a0ecac1e12228a"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6e601588f2b502c93c30cd5a45bfc665faaf37bbe835b7cfd461753068232074"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a5d794d1ae64e7753e405ba58e08fcfa73e3fad93ef9b7e31112ef3c9a0efb52"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a1f4689c9a1462f3df0a1f7e797791cd6b124ddbee2b570d34e7f38ade0e2c71"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3032dcb1c35bc330134a5b8a5d4f68c1a87252dfc6e1262c65a7e30e62298275"}, - {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8189c56eb0ddbb95bfadb8f60ea1b22fcfa659396ea36f6adcc521213cd7b44d"}, - {file = "aiohttp-3.8.4-cp38-cp38-win32.whl", hash = "sha256:33587f26dcee66efb2fff3c177547bd0449ab7edf1b73a7f5dea1e38609a0c54"}, - {file = "aiohttp-3.8.4-cp38-cp38-win_amd64.whl", hash = "sha256:e595432ac259af2d4630008bf638873d69346372d38255774c0e286951e8b79f"}, - {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5a7bdf9e57126dc345b683c3632e8ba317c31d2a41acd5800c10640387d193ed"}, - {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:22f6eab15b6db242499a16de87939a342f5a950ad0abaf1532038e2ce7d31567"}, - {file = "aiohttp-3.8.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7235604476a76ef249bd64cb8274ed24ccf6995c4a8b51a237005ee7a57e8643"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea9eb976ffdd79d0e893869cfe179a8f60f152d42cb64622fca418cd9b18dc2a"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92c0cea74a2a81c4c76b62ea1cac163ecb20fb3ba3a75c909b9fa71b4ad493cf"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:493f5bc2f8307286b7799c6d899d388bbaa7dfa6c4caf4f97ef7521b9cb13719"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a63f03189a6fa7c900226e3ef5ba4d3bd047e18f445e69adbd65af433add5a2"}, - {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10c8cefcff98fd9168cdd86c4da8b84baaa90bf2da2269c6161984e6737bf23e"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bca5f24726e2919de94f047739d0a4fc01372801a3672708260546aa2601bf57"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:03baa76b730e4e15a45f81dfe29a8d910314143414e528737f8589ec60cf7391"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8c29c77cc57e40f84acef9bfb904373a4e89a4e8b74e71aa8075c021ec9078c2"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:03543dcf98a6619254b409be2d22b51f21ec66272be4ebda7b04e6412e4b2e14"}, - {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17b79c2963db82086229012cff93ea55196ed31f6493bb1ccd2c62f1724324e4"}, - {file = "aiohttp-3.8.4-cp39-cp39-win32.whl", hash = "sha256:34ce9f93a4a68d1272d26030655dd1b58ff727b3ed2a33d80ec433561b03d67a"}, - {file = "aiohttp-3.8.4-cp39-cp39-win_amd64.whl", hash = "sha256:41a86a69bb63bb2fc3dc9ad5ea9f10f1c9c8e282b471931be0268ddd09430b04"}, - {file = "aiohttp-3.8.4.tar.gz", hash = "sha256:bf2e1a9162c1e441bf805a1fd166e249d574ca04e03b34f97e2928769e91ab5c"}, -] - -[package.dependencies] -aiosignal = ">=1.1.2" -async-timeout = ">=4.0.0a3,<5.0" -attrs = ">=17.3.0" -charset-normalizer = ">=2.0,<4.0" -frozenlist = ">=1.1.1" -multidict = ">=4.5,<7.0" -yarl = ">=1.0,<2.0" - -[package.extras] -speedups = ["Brotli", "aiodns", "cchardet"] - -[[package]] -name = "aiosignal" -version = "1.3.1" -description = "aiosignal: a list of registered asynchronous callbacks" -optional = false -python-versions = ">=3.7" -files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, -] - -[package.dependencies] -frozenlist = ">=1.1.0" - -[[package]] -name = "anyio" -version = "3.7.1" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.7" -files = [ - {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, - {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, -] - -[package.dependencies] -exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" - -[package.extras] -doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] -test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (<0.22)"] - -[[package]] -name = "argilla" -version = "0.0.1" -description = "" -optional = false -python-versions = "*" -files = [ - {file = "argilla-0.0.1-py3-none-any.whl", hash = "sha256:8bdc3c505bcfb47ba4b91f5658034eae53bf7d4f9317980397605c0c55817396"}, - {file = "argilla-0.0.1.tar.gz", hash = "sha256:5017854754e89f573b31af25b25b803f51cea9ca1fa0bcf00505dee1f45cf7c9"}, -] - -[[package]] -name = "async-timeout" -version = "4.0.2" -description = "Timeout context manager for asyncio programs" -optional = false -python-versions = ">=3.6" -files = [ - {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"}, - {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"}, -] - -[[package]] -name = "attrs" -version = "23.1.0" -description = "Classes Without Boilerplate" -optional = false -python-versions = ">=3.7" -files = [ - {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, - {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, -] - -[package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[docs,tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] - -[[package]] -name = "backoff" -version = "2.2.1" -description = "Function decoration for backoff and retry" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, - {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, -] - -[[package]] -name = "beautifulsoup4" -version = "4.12.2" -description = "Screen-scraping library" -optional = false -python-versions = ">=3.6.0" -files = [ - {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, - {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, -] - -[package.dependencies] -soupsieve = ">1.2" - -[package.extras] -html5lib = ["html5lib"] -lxml = ["lxml"] - -[[package]] -name = "certifi" -version = "2023.5.7" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, - {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, -] - -[[package]] -name = "cffi" -version = "1.15.1" -description = "Foreign Function Interface for Python calling C code." -optional = false -python-versions = "*" -files = [ - {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, - {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, - {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, - {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, - {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, - {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, - {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, - {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, - {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, - {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, - {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, - {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, - {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, - {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, - {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, - {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, - {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, - {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, - {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, -] - -[package.dependencies] -pycparser = "*" - -[[package]] -name = "chardet" -version = "5.1.0" -description = "Universal encoding detector for Python 3" -optional = false -python-versions = ">=3.7" -files = [ - {file = "chardet-5.1.0-py3-none-any.whl", hash = "sha256:362777fb014af596ad31334fde1e8c327dfdb076e1960d1694662d46a6917ab9"}, - {file = "chardet-5.1.0.tar.gz", hash = "sha256:0d62712b956bc154f85fb0a266e2a3c5913c2967e00348701b32411d6def31e5"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.2.0" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"}, - {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"}, - {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"}, - {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"}, - {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"}, - {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"}, - {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"}, -] - -[[package]] -name = "chromadb" -version = "0.3.26" -description = "Chroma." -optional = false -python-versions = ">=3.7" -files = [ - {file = "chromadb-0.3.26-py3-none-any.whl", hash = "sha256:45a7848ee3ed8b694ca5789e5fd723406b76a13fa46f9a9a769f93317f29894c"}, - {file = "chromadb-0.3.26.tar.gz", hash = "sha256:a9b596d507f081993f2e32a7dcacabbbec2f6aebc2b6defe524442b07e265296"}, -] - -[package.dependencies] -clickhouse-connect = ">=0.5.7" -duckdb = ">=0.7.1" -fastapi = ">=0.85.1" -hnswlib = ">=0.7" -numpy = ">=1.21.6" -onnxruntime = ">=1.14.1" -overrides = ">=7.3.1" -pandas = ">=1.3" -posthog = ">=2.4.0" -pulsar-client = ">=3.1.0" -pydantic = ">=1.9" -requests = ">=2.28" -tokenizers = ">=0.13.2" -tqdm = ">=4.65.0" -typing-extensions = ">=4.5.0" -uvicorn = {version = ">=0.18.3", extras = ["standard"]} - -[[package]] -name = "click" -version = "8.1.4" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.7" -files = [ - {file = "click-8.1.4-py3-none-any.whl", hash = "sha256:2739815aaa5d2c986a88f1e9230c55e17f0caad3d958a5e13ad0797c166db9e3"}, - {file = "click-8.1.4.tar.gz", hash = "sha256:b97d0c74955da062a7d4ef92fadb583806a585b2ea81958a81bd72726cbb8e37"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "clickhouse-connect" -version = "0.6.6" -description = "ClickHouse Database Core Driver for Python, Pandas, and Superset" -optional = false -python-versions = "~=3.7" -files = [ - {file = "clickhouse-connect-0.6.6.tar.gz", hash = "sha256:28d261b95fe9818f4d8bc4ad48087cbff3c9f0b6574ff04d234ed5bca6619474"}, - {file = "clickhouse_connect-0.6.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:31187a9947f5771c9e2a4c5d5c33d8c42f1c0f83b1223277c8faf47da0fcd1dc"}, - {file = "clickhouse_connect-0.6.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1e1713d1f9f294c0cf05ded6f7eff227dde2b19f0d19423fbbeb05fbf5d7c484"}, - {file = "clickhouse_connect-0.6.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:961c463de6f0de93fc11f1c1f81efc1ec5b5895481cfdf79b3f832e0e242e7e1"}, - {file = "clickhouse_connect-0.6.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18900f1a13b3b120252fc3583ca1e0fc4d3a33ea98fcf63d33d168a469561056"}, - {file = "clickhouse_connect-0.6.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4cbbea1a943e742ea649c82f85109b9a9928e61b038923de2813977966acd76"}, - {file = "clickhouse_connect-0.6.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2714ab61f063a65419278b97f8785ce2440fdb1ef46d9a6703cef9cd38517521"}, - {file = "clickhouse_connect-0.6.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:823756569f6bea58ff9286cf494abaca5db8652e33ee4a6e7ecb40efbf945088"}, - {file = "clickhouse_connect-0.6.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:11aff145aacfae92b941b95ec5943fb62ea241ec2225b8ecefc4cadadf699893"}, - {file = "clickhouse_connect-0.6.6-cp310-cp310-win32.whl", hash = "sha256:4f5f9e3dcece211dc711088a5b264e66e8198b878bdf99619a3a7c54976c118d"}, - {file = "clickhouse_connect-0.6.6-cp310-cp310-win_amd64.whl", hash = "sha256:8268927ef8d476ef4c81d9562d049f38bc534c4d1d441e072cf8428f08ff6eaa"}, - {file = "clickhouse_connect-0.6.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5f9cb2ebe0deaa78c942888aad32fa42beb4e75c2377e8784baf3d737c23e5f1"}, - {file = "clickhouse_connect-0.6.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d502b7f35008facf2774f411eed6b35010923acaac254a8c5683fdf8a11abd62"}, - {file = "clickhouse_connect-0.6.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87e0f2afe464be0947947d98482eb12b25be8857ae1a31c1aaa17a67f616174d"}, - {file = "clickhouse_connect-0.6.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69f2c517943eeb7663a9d42bd9b737b8ec5513ddcf58f2372f8b2074a315bae2"}, - {file = "clickhouse_connect-0.6.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa6c2b488cf9558c2b71a2599d812fe4368d5199edaa011731a8bc7bfe019751"}, - {file = "clickhouse_connect-0.6.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:df9e80d0b3f5614d38026e7e2e7e7412dec942df8d765c082177879b37e678e2"}, - {file = "clickhouse_connect-0.6.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:a20351fb2ae47aac1ae9b1de0585949616baedd6dbdee5272f466a2aea6ec4dd"}, - {file = "clickhouse_connect-0.6.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:af40eaa20998d96198563748a6fd9796843b6f22e9e95b2136aabd917db33fff"}, - {file = "clickhouse_connect-0.6.6-cp311-cp311-win32.whl", hash = "sha256:9591a9bfa58ace467544227f83226b22a1554e2db4cfcf658f25f43c9d94e960"}, - {file = "clickhouse_connect-0.6.6-cp311-cp311-win_amd64.whl", hash = "sha256:3b6f6159f8eddb0cad4d7e0cbad5944e97e0146ee9f416fc663f7bd3d4e9ea46"}, - {file = "clickhouse_connect-0.6.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8b941c85fe9ddd5e5edf6fc7458563d9e51ad900d95fe0b87b0458be166693a1"}, - {file = "clickhouse_connect-0.6.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c642696a758fa726c86ca624dd40acded100d79a9f4bd9f5b56ba0ea4dc44099"}, - {file = "clickhouse_connect-0.6.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57b6b36b316451c1bdc4450f9418c017af84af57d52d03cd4deb85480819a934"}, - {file = "clickhouse_connect-0.6.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17cfb1d103b47350c3ba824641fb5ba730e6e29274077a6f8975a3394a1abadb"}, - {file = "clickhouse_connect-0.6.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d52c7e7560666b93c078bf082e4ed87689fd283e6295a6d8d1dd491d4d7b6072"}, - {file = "clickhouse_connect-0.6.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0a6d498b689aa09e9d1b0051480a04ecc3509002f54bfb82998d030b4675bb24"}, - {file = "clickhouse_connect-0.6.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:28c876f7a4713662af2ded7350a0262756ec4da9262bb76cc85cfe2e88015b74"}, - {file = "clickhouse_connect-0.6.6-cp37-cp37m-win32.whl", hash = "sha256:74bf0a95c7c5644948be0ba9c0abcad7615b806fd2545501862526dbe684db71"}, - {file = "clickhouse_connect-0.6.6-cp37-cp37m-win_amd64.whl", hash = "sha256:0aaa4194d11cb7513de69b791911ff60b3ad8b86f125446a37347208e9b9ae6d"}, - {file = "clickhouse_connect-0.6.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3b873d138dfedbe761f2d66ad1257ea253394c4f8dcffd6ff34dfb990f13a18b"}, - {file = "clickhouse_connect-0.6.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7140705d05a05ac39eecf86727ab55985e5dba9d1734df8921cc417853a18b7f"}, - {file = "clickhouse_connect-0.6.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69085fa0f4e5da5cef4ae5249e19f10d91e57ae78628e49e8853b71b6003dbae"}, - {file = "clickhouse_connect-0.6.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e6ec081d87cc37be3ecf60b88002c58add76a72b4124525cb5cd28539e7d488"}, - {file = "clickhouse_connect-0.6.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1afe04eb239b72bc9fa4f1999cd292f82af507cbe1f07546f26a3332c50a294b"}, - {file = "clickhouse_connect-0.6.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:244bbf7ad92f1f030378412358c47cd377aa6d469b548dba2406a7894c8da2ab"}, - {file = "clickhouse_connect-0.6.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:69e91bdb25166b6fa4eb55601d86fa57dee82070bce9b97a858c8973615ab8b8"}, - {file = "clickhouse_connect-0.6.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d2627c8a9625e1c9058cfb5b231a0d0180ed9215d901b601d367de598f27a90d"}, - {file = "clickhouse_connect-0.6.6-cp38-cp38-win32.whl", hash = "sha256:87fb937b34b561703eaba5781404736120bab691f4525096d5dfb4b99d4890a6"}, - {file = "clickhouse_connect-0.6.6-cp38-cp38-win_amd64.whl", hash = "sha256:366c5765e6b7863b3a8d565d5a3b27f9f8731f6f4b016048fa172c6ad6485594"}, - {file = "clickhouse_connect-0.6.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0c1b0d8bee6399f5b68bb0832fae51fd0f5e4bcb539bae2df36d8433b6e38a0b"}, - {file = "clickhouse_connect-0.6.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3f7e3ead1429ec82b9cd0cf7b807bacf69d895042f75276f63d732378344376"}, - {file = "clickhouse_connect-0.6.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36df02ebfbfa4dbe3667bf5b3402ff0193d0f682b9aa09d71469c15745473d8e"}, - {file = "clickhouse_connect-0.6.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa636b0cbbff52c9fafe287d1d818fc9947feaa840c951b8bfd8f8d4d1ee45a0"}, - {file = "clickhouse_connect-0.6.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af4968b6b48baae43d62c241bee9e1c8f680ee3d054254e3959c2d2fb7d370ee"}, - {file = "clickhouse_connect-0.6.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a4156de52fe1f9b19f8c3a820d57c012a55644c56a87c8d31ecff89115959d60"}, - {file = "clickhouse_connect-0.6.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:fccbe34878e6202ff5715284cbe57e748d36f4c8ad6217f9c80f84a086013fb9"}, - {file = "clickhouse_connect-0.6.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:70bfe48c0e4340ccf234b691fbd52f32db74649cb84ca28b98a211cc3e30b30c"}, - {file = "clickhouse_connect-0.6.6-cp39-cp39-win32.whl", hash = "sha256:9f80b64e2268293a918721e1c122c54e2a1592bb74824fdd70e9add9fbcea31a"}, - {file = "clickhouse_connect-0.6.6-cp39-cp39-win_amd64.whl", hash = "sha256:04a5030b76ee930b18eb3aeb7847146c2fa29da0feb0ec7dd3a0564a3de944f1"}, - {file = "clickhouse_connect-0.6.6-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:75e84c827c8180d5dc66b0e99dba422a3ffd2c7d8ee5ba80e00b9c942dff8a36"}, - {file = "clickhouse_connect-0.6.6-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e627061336142d02e9c900a96bcd87372e88f05755bf19b158e68472b99a921"}, - {file = "clickhouse_connect-0.6.6-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:194f72e8f3f24c207aa87113b8d11674dab12b35232fd8b7b19b97257796be45"}, - {file = "clickhouse_connect-0.6.6-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf755b46089ee6a7f1ab3e24fc6fbacefc54cfefceb0ed81ebf198abf6937dac"}, - {file = "clickhouse_connect-0.6.6-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:39e58756a13872a24304b1987fafb7d5112ea88469eb55303b1183ebdd7a0be5"}, - {file = "clickhouse_connect-0.6.6-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1e29de1264ffa26eb822e57c5715974c9818ae8e16bb114e54352d66947cdf7f"}, - {file = "clickhouse_connect-0.6.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a74ed74427aaf10d2e8f7697b8ec53479f6068287ea695a5f3d3927db40be3c3"}, - {file = "clickhouse_connect-0.6.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abc910b0f6c93d0d703809fd92cf19b71dcaf8c6d5f328deddae1709061a0aa2"}, - {file = "clickhouse_connect-0.6.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23b17236e08da8b5d737ccd983db56a2d2222955a49c4b312b12e4a2b4a06c9b"}, - {file = "clickhouse_connect-0.6.6-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d4d76560d0ce84d0ba550918433dd1f8da6983edabe2685cd84679cd7a90c179"}, - {file = "clickhouse_connect-0.6.6-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:815bd0d5f40174716ffdf1adab066cd0e36c82c81b227224fb7281bdf8734eb6"}, - {file = "clickhouse_connect-0.6.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82abd319ba51e0c5c2d123e2cf30b1604b0d46f4de694096aa911ddd63701f60"}, - {file = "clickhouse_connect-0.6.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa3eea5dac3a7cd52523b556ecd05940c4710c96b6e39ec5a05ed7859bddc7f6"}, - {file = "clickhouse_connect-0.6.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0bbc28cdf903b4b2805199ce7d4580814a8b9bb4766ddd835cab46a81e6fcd63"}, - {file = "clickhouse_connect-0.6.6-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:5fc4deda5a97e672135b4330d81109b443266aa948b09a24a02db58c0fc96bc1"}, -] - -[package.dependencies] -certifi = "*" -importlib-metadata = "*" -lz4 = "*" -pytz = "*" -urllib3 = ">=1.26" -zstandard = "*" - -[package.extras] -arrow = ["pyarrow"] -numpy = ["numpy"] -orjson = ["orjson"] -pandas = ["pandas"] -sqlalchemy = ["sqlalchemy (>1.3.21,<2.0)"] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "colorclass" -version = "2.2.2" -description = "Colorful worry-free console applications for Linux, Mac OS X, and Windows." -optional = false -python-versions = ">=2.6" -files = [ - {file = "colorclass-2.2.2-py2.py3-none-any.whl", hash = "sha256:6f10c273a0ef7a1150b1120b6095cbdd68e5cf36dfd5d0fc957a2500bbf99a55"}, - {file = "colorclass-2.2.2.tar.gz", hash = "sha256:6d4fe287766166a98ca7bc6f6312daf04a0481b1eda43e7173484051c0ab4366"}, -] - -[[package]] -name = "coloredlogs" -version = "15.0.1" -description = "Colored terminal output for Python's logging module" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, - {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, -] - -[package.dependencies] -humanfriendly = ">=9.1" - -[package.extras] -cron = ["capturer (>=2.4)"] - -[[package]] -name = "compressed-rtf" -version = "1.0.6" -description = "Compressed Rich Text Format (RTF) compression and decompression package" -optional = false -python-versions = "*" -files = [ - {file = "compressed_rtf-1.0.6.tar.gz", hash = "sha256:c1c827f1d124d24608981a56e8b8691eb1f2a69a78ccad6440e7d92fde1781dd"}, -] - -[[package]] -name = "cryptography" -version = "41.0.1" -description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -optional = false -python-versions = ">=3.7" -files = [ - {file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:f73bff05db2a3e5974a6fd248af2566134d8981fd7ab012e5dd4ddb1d9a70699"}, - {file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1a5472d40c8f8e91ff7a3d8ac6dfa363d8e3138b961529c996f3e2df0c7a411a"}, - {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fa01527046ca5facdf973eef2535a27fec4cb651e4daec4d043ef63f6ecd4ca"}, - {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b46e37db3cc267b4dea1f56da7346c9727e1209aa98487179ee8ebed09d21e43"}, - {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d198820aba55660b4d74f7b5fd1f17db3aa5eb3e6893b0a41b75e84e4f9e0e4b"}, - {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:948224d76c4b6457349d47c0c98657557f429b4e93057cf5a2f71d603e2fc3a3"}, - {file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:059e348f9a3c1950937e1b5d7ba1f8e968508ab181e75fc32b879452f08356db"}, - {file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b4ceb5324b998ce2003bc17d519080b4ec8d5b7b70794cbd2836101406a9be31"}, - {file = "cryptography-41.0.1-cp37-abi3-win32.whl", hash = "sha256:8f4ab7021127a9b4323537300a2acfb450124b2def3756f64dc3a3d2160ee4b5"}, - {file = "cryptography-41.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:1fee5aacc7367487b4e22484d3c7e547992ed726d14864ee33c0176ae43b0d7c"}, - {file = "cryptography-41.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9a6c7a3c87d595608a39980ebaa04d5a37f94024c9f24eb7d10262b92f739ddb"}, - {file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5d092fdfedaec4cbbffbf98cddc915ba145313a6fdaab83c6e67f4e6c218e6f3"}, - {file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a8e6c2de6fbbcc5e14fd27fb24414507cb3333198ea9ab1258d916f00bc3039"}, - {file = "cryptography-41.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb33ccf15e89f7ed89b235cff9d49e2e62c6c981a6061c9c8bb47ed7951190bc"}, - {file = "cryptography-41.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5f0ff6e18d13a3de56f609dd1fd11470918f770c6bd5d00d632076c727d35485"}, - {file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7bfc55a5eae8b86a287747053140ba221afc65eb06207bedf6e019b8934b477c"}, - {file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:eb8163f5e549a22888c18b0d53d6bb62a20510060a22fd5a995ec8a05268df8a"}, - {file = "cryptography-41.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8dde71c4169ec5ccc1087bb7521d54251c016f126f922ab2dfe6649170a3b8c5"}, - {file = "cryptography-41.0.1.tar.gz", hash = "sha256:d34579085401d3f49762d2f7d6634d6b6c2ae1242202e860f4d26b046e3a1006"}, -] - -[package.dependencies] -cffi = ">=1.12" - -[package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -nox = ["nox"] -pep8test = ["black", "check-sdist", "mypy", "ruff"] -sdist = ["build"] -ssh = ["bcrypt (>=3.1.5)"] -test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] -test-randomorder = ["pytest-randomly"] - -[[package]] -name = "dataclasses-json" -version = "0.5.9" -description = "Easily serialize dataclasses to and from JSON" -optional = false -python-versions = ">=3.6" -files = [ - {file = "dataclasses-json-0.5.9.tar.gz", hash = "sha256:e9ac87b73edc0141aafbce02b44e93553c3123ad574958f0fe52a534b6707e8e"}, - {file = "dataclasses_json-0.5.9-py3-none-any.whl", hash = "sha256:1280542631df1c375b7bc92e5b86d39e06c44760d7e3571a537b3b8acabf2f0c"}, -] - -[package.dependencies] -marshmallow = ">=3.3.0,<4.0.0" -marshmallow-enum = ">=1.5.1,<2.0.0" -typing-inspect = ">=0.4.0" - -[package.extras] -dev = ["flake8", "hypothesis", "ipython", "mypy (>=0.710)", "portray", "pytest (>=7.2.0)", "setuptools", "simplejson", "twine", "types-dataclasses", "wheel"] - -[[package]] -name = "duckdb" -version = "0.8.1" -description = "DuckDB embedded database" -optional = false -python-versions = "*" -files = [ - {file = "duckdb-0.8.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:14781d21580ee72aba1f5dcae7734674c9b6c078dd60470a08b2b420d15b996d"}, - {file = "duckdb-0.8.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f13bf7ab0e56ddd2014ef762ae4ee5ea4df5a69545ce1191b8d7df8118ba3167"}, - {file = "duckdb-0.8.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4032042d8363e55365bbca3faafc6dc336ed2aad088f10ae1a534ebc5bcc181"}, - {file = "duckdb-0.8.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31a71bd8f0b0ca77c27fa89b99349ef22599ffefe1e7684ae2e1aa2904a08684"}, - {file = "duckdb-0.8.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24568d6e48f3dbbf4a933109e323507a46b9399ed24c5d4388c4987ddc694fd0"}, - {file = "duckdb-0.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297226c0dadaa07f7c5ae7cbdb9adba9567db7b16693dbd1b406b739ce0d7924"}, - {file = "duckdb-0.8.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5792cf777ece2c0591194006b4d3e531f720186102492872cb32ddb9363919cf"}, - {file = "duckdb-0.8.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:12803f9f41582b68921d6b21f95ba7a51e1d8f36832b7d8006186f58c3d1b344"}, - {file = "duckdb-0.8.1-cp310-cp310-win32.whl", hash = "sha256:d0953d5a2355ddc49095e7aef1392b7f59c5be5cec8cdc98b9d9dc1f01e7ce2b"}, - {file = "duckdb-0.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:6e6583c98a7d6637e83bcadfbd86e1f183917ea539f23b6b41178f32f813a5eb"}, - {file = "duckdb-0.8.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fad7ed0d4415f633d955ac24717fa13a500012b600751d4edb050b75fb940c25"}, - {file = "duckdb-0.8.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:81ae602f34d38d9c48dd60f94b89f28df3ef346830978441b83c5b4eae131d08"}, - {file = "duckdb-0.8.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7d75cfe563aaa058d3b4ccaaa371c6271e00e3070df5de72361fd161b2fe6780"}, - {file = "duckdb-0.8.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8dbb55e7a3336f2462e5e916fc128c47fe1c03b6208d6bd413ac11ed95132aa0"}, - {file = "duckdb-0.8.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a6df53efd63b6fdf04657385a791a4e3c4fb94bfd5db181c4843e2c46b04fef5"}, - {file = "duckdb-0.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b188b80b70d1159b17c9baaf541c1799c1ce8b2af4add179a9eed8e2616be96"}, - {file = "duckdb-0.8.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5ad481ee353f31250b45d64b4a104e53b21415577943aa8f84d0af266dc9af85"}, - {file = "duckdb-0.8.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1d1b1729993611b1892509d21c21628917625cdbe824a61ce891baadf684b32"}, - {file = "duckdb-0.8.1-cp311-cp311-win32.whl", hash = "sha256:2d8f9cc301e8455a4f89aa1088b8a2d628f0c1f158d4cf9bc78971ed88d82eea"}, - {file = "duckdb-0.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:07457a43605223f62d93d2a5a66b3f97731f79bbbe81fdd5b79954306122f612"}, - {file = "duckdb-0.8.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d2c8062c3e978dbcd80d712ca3e307de8a06bd4f343aa457d7dd7294692a3842"}, - {file = "duckdb-0.8.1-cp36-cp36m-win32.whl", hash = "sha256:fad486c65ae944eae2de0d590a0a4fb91a9893df98411d66cab03359f9cba39b"}, - {file = "duckdb-0.8.1-cp36-cp36m-win_amd64.whl", hash = "sha256:86fa4506622c52d2df93089c8e7075f1c4d0ba56f4bf27faebde8725355edf32"}, - {file = "duckdb-0.8.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:60e07a62782f88420046e30cc0e3de842d0901c4fd5b8e4d28b73826ec0c3f5e"}, - {file = "duckdb-0.8.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f18563675977f8cbf03748efee0165b4c8ef64e0cbe48366f78e2914d82138bb"}, - {file = "duckdb-0.8.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16e179443832bea8439ae4dff93cf1e42c545144ead7a4ef5f473e373eea925a"}, - {file = "duckdb-0.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a413d5267cb41a1afe69d30dd6d4842c588256a6fed7554c7e07dad251ede095"}, - {file = "duckdb-0.8.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3784680df59eadd683b0a4c2375d451a64470ca54bd171c01e36951962b1d332"}, - {file = "duckdb-0.8.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:67a1725c2b01f9b53571ecf3f92959b652f60156c1c48fb35798302e39b3c1a2"}, - {file = "duckdb-0.8.1-cp37-cp37m-win32.whl", hash = "sha256:197d37e2588c5ad063e79819054eedb7550d43bf1a557d03ba8f8f67f71acc42"}, - {file = "duckdb-0.8.1-cp37-cp37m-win_amd64.whl", hash = "sha256:3843feb79edf100800f5037c32d5d5a5474fb94b32ace66c707b96605e7c16b2"}, - {file = "duckdb-0.8.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:624c889b0f2d656794757b3cc4fc58030d5e285f5ad2ef9fba1ea34a01dab7fb"}, - {file = "duckdb-0.8.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fcbe3742d77eb5add2d617d487266d825e663270ef90253366137a47eaab9448"}, - {file = "duckdb-0.8.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47516c9299d09e9dbba097b9fb339b389313c4941da5c54109df01df0f05e78c"}, - {file = "duckdb-0.8.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf1ba718b7522d34399446ebd5d4b9fcac0b56b6ac07bfebf618fd190ec37c1d"}, - {file = "duckdb-0.8.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e36e35d38a9ae798fe8cf6a839e81494d5b634af89f4ec9483f4d0a313fc6bdb"}, - {file = "duckdb-0.8.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23493313f88ce6e708a512daacad13e83e6d1ea0be204b175df1348f7fc78671"}, - {file = "duckdb-0.8.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1fb9bf0b6f63616c8a4b9a6a32789045e98c108df100e6bac783dc1e36073737"}, - {file = "duckdb-0.8.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:12fc13ecd5eddd28b203b9e3999040d3a7374a8f4b833b04bd26b8c5685c2635"}, - {file = "duckdb-0.8.1-cp38-cp38-win32.whl", hash = "sha256:a12bf4b18306c9cb2c9ba50520317e6cf2de861f121d6f0678505fa83468c627"}, - {file = "duckdb-0.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:e4e809358b9559c00caac4233e0e2014f3f55cd753a31c4bcbbd1b55ad0d35e4"}, - {file = "duckdb-0.8.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7acedfc00d97fbdb8c3d120418c41ef3cb86ef59367f3a9a30dff24470d38680"}, - {file = "duckdb-0.8.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:99bfe264059cdc1e318769103f656f98e819cd4e231cd76c1d1a0327f3e5cef8"}, - {file = "duckdb-0.8.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:538b225f361066231bc6cd66c04a5561de3eea56115a5dd773e99e5d47eb1b89"}, - {file = "duckdb-0.8.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae0be3f71a18cd8492d05d0fc1bc67d01d5a9457b04822d025b0fc8ee6efe32e"}, - {file = "duckdb-0.8.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd82ba63b58672e46c8ec60bc9946aa4dd7b77f21c1ba09633d8847ad9eb0d7b"}, - {file = "duckdb-0.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:780a34559aaec8354e83aa4b7b31b3555f1b2cf75728bf5ce11b89a950f5cdd9"}, - {file = "duckdb-0.8.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:01f0d4e9f7103523672bda8d3f77f440b3e0155dd3b2f24997bc0c77f8deb460"}, - {file = "duckdb-0.8.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:31f692decb98c2d57891da27180201d9e93bb470a3051fcf413e8da65bca37a5"}, - {file = "duckdb-0.8.1-cp39-cp39-win32.whl", hash = "sha256:e7fe93449cd309bbc67d1bf6f6392a6118e94a9a4479ab8a80518742e855370a"}, - {file = "duckdb-0.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:81d670bc6807672f038332d9bf587037aabdd741b0810de191984325ed307abd"}, - {file = "duckdb-0.8.1.tar.gz", hash = "sha256:a54d37f4abc2afc4f92314aaa56ecf215a411f40af4bffe1e86bd25e62aceee9"}, -] - -[[package]] -name = "easygui" -version = "0.98.3" -description = "EasyGUI is a module for very simple, very easy GUI programming in Python. EasyGUI is different from other GUI generators in that EasyGUI is NOT event-driven. Instead, all GUI interactions are invoked by simple function calls." -optional = false -python-versions = "*" -files = [ - {file = "easygui-0.98.3-py2.py3-none-any.whl", hash = "sha256:33498710c68b5376b459cd3fc48d1d1f33822139eb3ed01defbc0528326da3ba"}, - {file = "easygui-0.98.3.tar.gz", hash = "sha256:d653ff79ee1f42f63b5a090f2f98ce02335d86ad8963b3ce2661805cafe99a04"}, -] - -[[package]] -name = "ebcdic" -version = "1.1.1" -description = "Additional EBCDIC codecs" -optional = false -python-versions = "*" -files = [ - {file = "ebcdic-1.1.1-py2.py3-none-any.whl", hash = "sha256:33b4cb729bc2d0bf46cc1847b0e5946897cb8d3f53520c5b9aa5fa98d7e735f1"}, -] - -[[package]] -name = "et-xmlfile" -version = "1.1.0" -description = "An implementation of lxml.xmlfile for the standard library" -optional = false -python-versions = ">=3.6" -files = [ - {file = "et_xmlfile-1.1.0-py3-none-any.whl", hash = "sha256:a2ba85d1d6a74ef63837eed693bcb89c3f752169b0e3e7ae5b16ca5e1b3deada"}, - {file = "et_xmlfile-1.1.0.tar.gz", hash = "sha256:8eb9e2bc2f8c97e37a2dc85a09ecdcdec9d8a396530a6d5a33b30b9a92da0c5c"}, -] - -[[package]] -name = "exceptiongroup" -version = "1.1.2" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "exceptiongroup-1.1.2-py3-none-any.whl", hash = "sha256:e346e69d186172ca7cf029c8c1d16235aa0e04035e5750b4b95039e65204328f"}, - {file = "exceptiongroup-1.1.2.tar.gz", hash = "sha256:12c3e887d6485d16943a309616de20ae5582633e0a2eda17f4e10fd61c1e8af5"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "extract-msg" -version = "0.41.5" -description = "Extracts emails and attachments saved in Microsoft Outlook's .msg files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "extract_msg-0.41.5-py2.py3-none-any.whl", hash = "sha256:ad70dcdab3701b0fae554168c9642ad4ebef7f2ec283313c55e895a6518911e5"}, - {file = "extract_msg-0.41.5.tar.gz", hash = "sha256:99d4fdc0c0912c836370bf9fbb6e77558bb978499c1b5fdd31634684e323885c"}, -] - -[package.dependencies] -beautifulsoup4 = ">=4.11.1,<4.13" -chardet = ">=4.0.0,<6" -compressed-rtf = ">=1.0.6,<2" -ebcdic = ">=1.1.1,<2" -imapclient = ">=2.3.0,<3" -olefile = "0.46" -red-black-tree-mod = "1.20" -RTFDE = "0.0.2" -tzlocal = ">=4.2,<6" - -[package.extras] -all = ["extract-msg[mime]"] -mime = ["python-magic (>=0.4.27,<0.5)"] - -[[package]] -name = "fastapi" -version = "0.100.0" -description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" -optional = false -python-versions = ">=3.7" -files = [ - {file = "fastapi-0.100.0-py3-none-any.whl", hash = "sha256:271662daf986da8fa98dc2b7c7f61c4abdfdccfb4786d79ed8b2878f172c6d5f"}, - {file = "fastapi-0.100.0.tar.gz", hash = "sha256:acb5f941ea8215663283c10018323ba7ea737c571b67fc7e88e9469c7eb1d12e"}, -] - -[package.dependencies] -pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<3.0.0" -starlette = ">=0.27.0,<0.28.0" -typing-extensions = ">=4.5.0" - -[package.extras] -all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] - -[[package]] -name = "filelock" -version = "3.12.2" -description = "A platform independent file lock." -optional = false -python-versions = ">=3.7" -files = [ - {file = "filelock-3.12.2-py3-none-any.whl", hash = "sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec"}, - {file = "filelock-3.12.2.tar.gz", hash = "sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81"}, -] - -[package.extras] -docs = ["furo (>=2023.5.20)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)", "pytest-timeout (>=2.1)"] - -[[package]] -name = "filetype" -version = "1.2.0" -description = "Infer file type and MIME type of any file/buffer. No external dependencies." -optional = false -python-versions = "*" -files = [ - {file = "filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25"}, - {file = "filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb"}, -] - -[[package]] -name = "flatbuffers" -version = "23.5.26" -description = "The FlatBuffers serialization format for Python" -optional = false -python-versions = "*" -files = [ - {file = "flatbuffers-23.5.26-py2.py3-none-any.whl", hash = "sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1"}, - {file = "flatbuffers-23.5.26.tar.gz", hash = "sha256:9ea1144cac05ce5d86e2859f431c6cd5e66cd9c78c558317c7955fb8d4c78d89"}, -] - -[[package]] -name = "frozenlist" -version = "1.3.3" -description = "A list-like structure which implements collections.abc.MutableSequence" -optional = false -python-versions = ">=3.7" -files = [ - {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff8bf625fe85e119553b5383ba0fb6aa3d0ec2ae980295aaefa552374926b3f4"}, - {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dfbac4c2dfcc082fcf8d942d1e49b6aa0766c19d3358bd86e2000bf0fa4a9cf0"}, - {file = "frozenlist-1.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b1c63e8d377d039ac769cd0926558bb7068a1f7abb0f003e3717ee003ad85530"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fdfc24dcfce5b48109867c13b4cb15e4660e7bd7661741a391f821f23dfdca7"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c926450857408e42f0bbc295e84395722ce74bae69a3b2aa2a65fe22cb14b99"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1841e200fdafc3d51f974d9d377c079a0694a8f06de2e67b48150328d66d5483"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f470c92737afa7d4c3aacc001e335062d582053d4dbe73cda126f2d7031068dd"}, - {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:783263a4eaad7c49983fe4b2e7b53fa9770c136c270d2d4bbb6d2192bf4d9caf"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:924620eef691990dfb56dc4709f280f40baee568c794b5c1885800c3ecc69816"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae4dc05c465a08a866b7a1baf360747078b362e6a6dbeb0c57f234db0ef88ae0"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bed331fe18f58d844d39ceb398b77d6ac0b010d571cba8267c2e7165806b00ce"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f"}, - {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9545a33965d0d377b0bc823dcabf26980e77f1b6a7caa368a365a9497fb09420"}, - {file = "frozenlist-1.3.3-cp310-cp310-win32.whl", hash = "sha256:d5cd3ab21acbdb414bb6c31958d7b06b85eeb40f66463c264a9b343a4e238642"}, - {file = "frozenlist-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:b756072364347cb6aa5b60f9bc18e94b2f79632de3b0190253ad770c5df17db1"}, - {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4395e2f8d83fbe0c627b2b696acce67868793d7d9750e90e39592b3626691b7"}, - {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14143ae966a6229350021384870458e4777d1eae4c28d1a7aa47f24d030e6678"}, - {file = "frozenlist-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d8860749e813a6f65bad8285a0520607c9500caa23fea6ee407e63debcdbef6"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23d16d9f477bb55b6154654e0e74557040575d9d19fe78a161bd33d7d76808e8"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb82dbba47a8318e75f679690190c10a5e1f447fbf9df41cbc4c3afd726d88cb"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9309869032abb23d196cb4e4db574232abe8b8be1339026f489eeb34a4acfd91"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a97b4fe50b5890d36300820abd305694cb865ddb7885049587a5678215782a6b"}, - {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c188512b43542b1e91cadc3c6c915a82a5eb95929134faf7fd109f14f9892ce4"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:303e04d422e9b911a09ad499b0368dc551e8c3cd15293c99160c7f1f07b59a48"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0771aed7f596c7d73444c847a1c16288937ef988dc04fb9f7be4b2aa91db609d"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:66080ec69883597e4d026f2f71a231a1ee9887835902dbe6b6467d5a89216cf6"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:41fe21dc74ad3a779c3d73a2786bdf622ea81234bdd4faf90b8b03cad0c2c0b4"}, - {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f20380df709d91525e4bee04746ba612a4df0972c1b8f8e1e8af997e678c7b81"}, - {file = "frozenlist-1.3.3-cp311-cp311-win32.whl", hash = "sha256:f30f1928162e189091cf4d9da2eac617bfe78ef907a761614ff577ef4edfb3c8"}, - {file = "frozenlist-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a6394d7dadd3cfe3f4b3b186e54d5d8504d44f2d58dcc89d693698e8b7132b32"}, - {file = "frozenlist-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8df3de3a9ab8325f94f646609a66cbeeede263910c5c0de0101079ad541af332"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0693c609e9742c66ba4870bcee1ad5ff35462d5ffec18710b4ac89337ff16e27"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4210baef299717db0a600d7a3cac81d46ef0e007f88c9335db79f8979c0d3d"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:394c9c242113bfb4b9aa36e2b80a05ffa163a30691c7b5a29eba82e937895d5e"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6327eb8e419f7d9c38f333cde41b9ae348bec26d840927332f17e887a8dcb70d"}, - {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e24900aa13212e75e5b366cb9065e78bbf3893d4baab6052d1aca10d46d944c"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3843f84a6c465a36559161e6c59dce2f2ac10943040c2fd021cfb70d58c4ad56"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:84610c1502b2461255b4c9b7d5e9c48052601a8957cd0aea6ec7a7a1e1fb9420"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:c21b9aa40e08e4f63a2f92ff3748e6b6c84d717d033c7b3438dd3123ee18f70e"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:efce6ae830831ab6a22b9b4091d411698145cb9b8fc869e1397ccf4b4b6455cb"}, - {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:40de71985e9042ca00b7953c4f41eabc3dc514a2d1ff534027f091bc74416401"}, - {file = "frozenlist-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:180c00c66bde6146a860cbb81b54ee0df350d2daf13ca85b275123bbf85de18a"}, - {file = "frozenlist-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9bbbcedd75acdfecf2159663b87f1bb5cfc80e7cd99f7ddd9d66eb98b14a8411"}, - {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:034a5c08d36649591be1cbb10e09da9f531034acfe29275fc5454a3b101ce41a"}, - {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba64dc2b3b7b158c6660d49cdb1d872d1d0bf4e42043ad8d5006099479a194e5"}, - {file = "frozenlist-1.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47df36a9fe24054b950bbc2db630d508cca3aa27ed0566c0baf661225e52c18e"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:841ea19b43d438a80b4de62ac6ab21cfe6827bb8a9dc62b896acc88eaf9cecba"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e235688f42b36be2b6b06fc37ac2126a73b75fb8d6bc66dd632aa35286238703"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca713d4af15bae6e5d79b15c10c8522859a9a89d3b361a50b817c98c2fb402a2"}, - {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ac5995f2b408017b0be26d4a1d7c61bce106ff3d9e3324374d66b5964325448"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4ae8135b11652b08a8baf07631d3ebfe65a4c87909dbef5fa0cdde440444ee4"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4ea42116ceb6bb16dbb7d526e242cb6747b08b7710d9782aa3d6732bd8d27649"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:810860bb4bdce7557bc0febb84bbd88198b9dbc2022d8eebe5b3590b2ad6c842"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ee78feb9d293c323b59a6f2dd441b63339a30edf35abcb51187d2fc26e696d13"}, - {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0af2e7c87d35b38732e810befb9d797a99279cbb85374d42ea61c1e9d23094b3"}, - {file = "frozenlist-1.3.3-cp38-cp38-win32.whl", hash = "sha256:899c5e1928eec13fd6f6d8dc51be23f0d09c5281e40d9cf4273d188d9feeaf9b"}, - {file = "frozenlist-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:7f44e24fa70f6fbc74aeec3e971f60a14dde85da364aa87f15d1be94ae75aeef"}, - {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2b07ae0c1edaa0a36339ec6cce700f51b14a3fc6545fdd32930d2c83917332cf"}, - {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ebb86518203e12e96af765ee89034a1dbb0c3c65052d1b0c19bbbd6af8a145e1"}, - {file = "frozenlist-1.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5cf820485f1b4c91e0417ea0afd41ce5cf5965011b3c22c400f6d144296ccbc0"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c11e43016b9024240212d2a65043b70ed8dfd3b52678a1271972702d990ac6d"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa3c6e3305aa1146b59a09b32b2e04074945ffcfb2f0931836d103a2c38f936"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:352bd4c8c72d508778cf05ab491f6ef36149f4d0cb3c56b1b4302852255d05d5"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65a5e4d3aa679610ac6e3569e865425b23b372277f89b5ef06cf2cdaf1ebf22b"}, - {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e2c1185858d7e10ff045c496bbf90ae752c28b365fef2c09cf0fa309291669"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f163d2fd041c630fed01bc48d28c3ed4a3b003c00acd396900e11ee5316b56bb"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:05cdb16d09a0832eedf770cb7bd1fe57d8cf4eaf5aced29c4e41e3f20b30a784"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8bae29d60768bfa8fb92244b74502b18fae55a80eac13c88eb0b496d4268fd2d"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eedab4c310c0299961ac285591acd53dc6723a1ebd90a57207c71f6e0c2153ab"}, - {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3bbdf44855ed8f0fbcd102ef05ec3012d6a4fd7c7562403f76ce6a52aeffb2b1"}, - {file = "frozenlist-1.3.3-cp39-cp39-win32.whl", hash = "sha256:efa568b885bca461f7c7b9e032655c0c143d305bf01c30caf6db2854a4532b38"}, - {file = "frozenlist-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfe33efc9cb900a4c46f91a5ceba26d6df370ffddd9ca386eb1d4f0ad97b9ea9"}, - {file = "frozenlist-1.3.3.tar.gz", hash = "sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a"}, -] - -[[package]] -name = "fsspec" -version = "2023.6.0" -description = "File-system specification" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fsspec-2023.6.0-py3-none-any.whl", hash = "sha256:1cbad1faef3e391fba6dc005ae9b5bdcbf43005c9167ce78c915549c352c869a"}, - {file = "fsspec-2023.6.0.tar.gz", hash = "sha256:d0b2f935446169753e7a5c5c55681c54ea91996cc67be93c39a154fb3a2742af"}, -] - -[package.extras] -abfs = ["adlfs"] -adl = ["adlfs"] -arrow = ["pyarrow (>=1)"] -dask = ["dask", "distributed"] -devel = ["pytest", "pytest-cov"] -dropbox = ["dropbox", "dropboxdrivefs", "requests"] -full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] -fuse = ["fusepy"] -gcs = ["gcsfs"] -git = ["pygit2"] -github = ["requests"] -gs = ["gcsfs"] -gui = ["panel"] -hdfs = ["pyarrow (>=1)"] -http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] -libarchive = ["libarchive-c"] -oci = ["ocifs"] -s3 = ["s3fs"] -sftp = ["paramiko"] -smb = ["smbprotocol"] -ssh = ["paramiko"] -tqdm = ["tqdm"] - -[[package]] -name = "gpt4all" -version = "1.0.3" -description = "Python bindings for GPT4All" -optional = false -python-versions = ">=3.8" -files = [ - {file = "gpt4all-1.0.3-py3-none-macosx_10_9_universal2.whl", hash = "sha256:11bbc8bdb183b100b57e3e8e0c67650cd84e49d9b875dd15c8bb26cfcf72988d"}, - {file = "gpt4all-1.0.3-py3-none-manylinux1_x86_64.whl", hash = "sha256:75248b2f160bd7834b807196395d51792cd3e41969c870a418566d1007ec5c83"}, - {file = "gpt4all-1.0.3-py3-none-win_amd64.whl", hash = "sha256:dcb4901f6320d938bc267ee0b28eaee8676221d22ecbf31b00246d0b4973a31a"}, -] - -[package.dependencies] -requests = "*" -tqdm = "*" - -[package.extras] -dev = ["black", "isort", "mkautodoc", "mkdocs-jupyter", "mkdocs-material", "mkdocstrings[python]", "pytest", "setuptools", "twine", "wheel"] - -[[package]] -name = "greenlet" -version = "2.0.2" -description = "Lightweight in-process concurrent programming" -optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" -files = [ - {file = "greenlet-2.0.2-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:bdfea8c661e80d3c1c99ad7c3ff74e6e87184895bbaca6ee8cc61209f8b9b85d"}, - {file = "greenlet-2.0.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:9d14b83fab60d5e8abe587d51c75b252bcc21683f24699ada8fb275d7712f5a9"}, - {file = "greenlet-2.0.2-cp27-cp27m-win32.whl", hash = "sha256:6c3acb79b0bfd4fe733dff8bc62695283b57949ebcca05ae5c129eb606ff2d74"}, - {file = "greenlet-2.0.2-cp27-cp27m-win_amd64.whl", hash = "sha256:283737e0da3f08bd637b5ad058507e578dd462db259f7f6e4c5c365ba4ee9343"}, - {file = "greenlet-2.0.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:d27ec7509b9c18b6d73f2f5ede2622441de812e7b1a80bbd446cb0633bd3d5ae"}, - {file = "greenlet-2.0.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:30bcf80dda7f15ac77ba5af2b961bdd9dbc77fd4ac6105cee85b0d0a5fcf74df"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26fbfce90728d82bc9e6c38ea4d038cba20b7faf8a0ca53a9c07b67318d46088"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9190f09060ea4debddd24665d6804b995a9c122ef5917ab26e1566dcc712ceeb"}, - {file = "greenlet-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d75209eed723105f9596807495d58d10b3470fa6732dd6756595e89925ce2470"}, - {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3a51c9751078733d88e013587b108f1b7a1fb106d402fb390740f002b6f6551a"}, - {file = "greenlet-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:76ae285c8104046b3a7f06b42f29c7b73f77683df18c49ab5af7983994c2dd91"}, - {file = "greenlet-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:2d4686f195e32d36b4d7cf2d166857dbd0ee9f3d20ae349b6bf8afc8485b3645"}, - {file = "greenlet-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c4302695ad8027363e96311df24ee28978162cdcdd2006476c43970b384a244c"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c48f54ef8e05f04d6eff74b8233f6063cb1ed960243eacc474ee73a2ea8573ca"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1846f1b999e78e13837c93c778dcfc3365902cfb8d1bdb7dd73ead37059f0d0"}, - {file = "greenlet-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a06ad5312349fec0ab944664b01d26f8d1f05009566339ac6f63f56589bc1a2"}, - {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:eff4eb9b7eb3e4d0cae3d28c283dc16d9bed6b193c2e1ace3ed86ce48ea8df19"}, - {file = "greenlet-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5454276c07d27a740c5892f4907c86327b632127dd9abec42ee62e12427ff7e3"}, - {file = "greenlet-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:7cafd1208fdbe93b67c7086876f061f660cfddc44f404279c1585bbf3cdc64c5"}, - {file = "greenlet-2.0.2-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:910841381caba4f744a44bf81bfd573c94e10b3045ee00de0cbf436fe50673a6"}, - {file = "greenlet-2.0.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:18a7f18b82b52ee85322d7a7874e676f34ab319b9f8cce5de06067384aa8ff43"}, - {file = "greenlet-2.0.2-cp35-cp35m-win32.whl", hash = "sha256:03a8f4f3430c3b3ff8d10a2a86028c660355ab637cee9333d63d66b56f09d52a"}, - {file = "greenlet-2.0.2-cp35-cp35m-win_amd64.whl", hash = "sha256:4b58adb399c4d61d912c4c331984d60eb66565175cdf4a34792cd9600f21b394"}, - {file = "greenlet-2.0.2-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:703f18f3fda276b9a916f0934d2fb6d989bf0b4fb5a64825260eb9bfd52d78f0"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:32e5b64b148966d9cccc2c8d35a671409e45f195864560829f395a54226408d3"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dd11f291565a81d71dab10b7033395b7a3a5456e637cf997a6f33ebdf06f8db"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0f72c9ddb8cd28532185f54cc1453f2c16fb417a08b53a855c4e6a418edd099"}, - {file = "greenlet-2.0.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd021c754b162c0fb55ad5d6b9d960db667faad0fa2ff25bb6e1301b0b6e6a75"}, - {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:3c9b12575734155d0c09d6c3e10dbd81665d5c18e1a7c6597df72fd05990c8cf"}, - {file = "greenlet-2.0.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b9ec052b06a0524f0e35bd8790686a1da006bd911dd1ef7d50b77bfbad74e292"}, - {file = "greenlet-2.0.2-cp36-cp36m-win32.whl", hash = "sha256:dbfcfc0218093a19c252ca8eb9aee3d29cfdcb586df21049b9d777fd32c14fd9"}, - {file = "greenlet-2.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:9f35ec95538f50292f6d8f2c9c9f8a3c6540bbfec21c9e5b4b751e0a7c20864f"}, - {file = "greenlet-2.0.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:d5508f0b173e6aa47273bdc0a0b5ba055b59662ba7c7ee5119528f466585526b"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:f82d4d717d8ef19188687aa32b8363e96062911e63ba22a0cff7802a8e58e5f1"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9c59a2120b55788e800d82dfa99b9e156ff8f2227f07c5e3012a45a399620b7"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2780572ec463d44c1d3ae850239508dbeb9fed38e294c68d19a24d925d9223ca"}, - {file = "greenlet-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937e9020b514ceedb9c830c55d5c9872abc90f4b5862f89c0887033ae33c6f73"}, - {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:36abbf031e1c0f79dd5d596bfaf8e921c41df2bdf54ee1eed921ce1f52999a86"}, - {file = "greenlet-2.0.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:18e98fb3de7dba1c0a852731c3070cf022d14f0d68b4c87a19cc1016f3bb8b33"}, - {file = "greenlet-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:3f6ea9bd35eb450837a3d80e77b517ea5bc56b4647f5502cd28de13675ee12f7"}, - {file = "greenlet-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7492e2b7bd7c9b9916388d9df23fa49d9b88ac0640db0a5b4ecc2b653bf451e3"}, - {file = "greenlet-2.0.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:b864ba53912b6c3ab6bcb2beb19f19edd01a6bfcbdfe1f37ddd1778abfe75a30"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba2956617f1c42598a308a84c6cf021a90ff3862eddafd20c3333d50f0edb45b"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc3a569657468b6f3fb60587e48356fe512c1754ca05a564f11366ac9e306526"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8eab883b3b2a38cc1e050819ef06a7e6344d4a990d24d45bc6f2cf959045a45b"}, - {file = "greenlet-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acd2162a36d3de67ee896c43effcd5ee3de247eb00354db411feb025aa319857"}, - {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0bf60faf0bc2468089bdc5edd10555bab6e85152191df713e2ab1fcc86382b5a"}, - {file = "greenlet-2.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b0ef99cdbe2b682b9ccbb964743a6aca37905fda5e0452e5ee239b1654d37f2a"}, - {file = "greenlet-2.0.2-cp38-cp38-win32.whl", hash = "sha256:b80f600eddddce72320dbbc8e3784d16bd3fb7b517e82476d8da921f27d4b249"}, - {file = "greenlet-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:4d2e11331fc0c02b6e84b0d28ece3a36e0548ee1a1ce9ddde03752d9b79bba40"}, - {file = "greenlet-2.0.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:88d9ab96491d38a5ab7c56dd7a3cc37d83336ecc564e4e8816dbed12e5aaefc8"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:561091a7be172ab497a3527602d467e2b3fbe75f9e783d8b8ce403fa414f71a6"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:971ce5e14dc5e73715755d0ca2975ac88cfdaefcaab078a284fea6cfabf866df"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be4ed120b52ae4d974aa40215fcdfde9194d63541c7ded40ee12eb4dda57b76b"}, - {file = "greenlet-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94c817e84245513926588caf1152e3b559ff794d505555211ca041f032abbb6b"}, - {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1a819eef4b0e0b96bb0d98d797bef17dc1b4a10e8d7446be32d1da33e095dbb8"}, - {file = "greenlet-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7efde645ca1cc441d6dc4b48c0f7101e8d86b54c8530141b09fd31cef5149ec9"}, - {file = "greenlet-2.0.2-cp39-cp39-win32.whl", hash = "sha256:ea9872c80c132f4663822dd2a08d404073a5a9b5ba6155bea72fb2a79d1093b5"}, - {file = "greenlet-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:db1a39669102a1d8d12b57de2bb7e2ec9066a6f2b3da35ae511ff93b01b5d564"}, - {file = "greenlet-2.0.2.tar.gz", hash = "sha256:e7c8dc13af7db097bed64a051d2dd49e9f0af495c26995c00a9ee842690d34c0"}, -] - -[package.extras] -docs = ["Sphinx", "docutils (<0.18)"] -test = ["objgraph", "psutil"] - -[[package]] -name = "h11" -version = "0.14.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.7" -files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] - -[[package]] -name = "hnswlib" -version = "0.7.0" -description = "hnswlib" -optional = false -python-versions = "*" -files = [ - {file = "hnswlib-0.7.0.tar.gz", hash = "sha256:bc459668e7e44bb7454b256b90c98c5af750653919d9a91698dafcf416cf64c4"}, -] - -[package.dependencies] -numpy = "*" - -[[package]] -name = "httptools" -version = "0.6.0" -description = "A collection of framework independent HTTP protocol utils." -optional = false -python-versions = ">=3.5.0" -files = [ - {file = "httptools-0.6.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:818325afee467d483bfab1647a72054246d29f9053fd17cc4b86cda09cc60339"}, - {file = "httptools-0.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72205730bf1be875003692ca54a4a7c35fac77b4746008966061d9d41a61b0f5"}, - {file = "httptools-0.6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33eb1d4e609c835966e969a31b1dedf5ba16b38cab356c2ce4f3e33ffa94cad3"}, - {file = "httptools-0.6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdc6675ec6cb79d27e0575750ac6e2b47032742e24eed011b8db73f2da9ed40"}, - {file = "httptools-0.6.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:463c3bc5ef64b9cf091be9ac0e0556199503f6e80456b790a917774a616aff6e"}, - {file = "httptools-0.6.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:82f228b88b0e8c6099a9c4757ce9fdbb8b45548074f8d0b1f0fc071e35655d1c"}, - {file = "httptools-0.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:0781fedc610293a2716bc7fa142d4c85e6776bc59d617a807ff91246a95dea35"}, - {file = "httptools-0.6.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:721e503245d591527cddd0f6fd771d156c509e831caa7a57929b55ac91ee2b51"}, - {file = "httptools-0.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:274bf20eeb41b0956e34f6a81f84d26ed57c84dd9253f13dcb7174b27ccd8aaf"}, - {file = "httptools-0.6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:259920bbae18740a40236807915def554132ad70af5067e562f4660b62c59b90"}, - {file = "httptools-0.6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03bfd2ae8a2d532952ac54445a2fb2504c804135ed28b53fefaf03d3a93eb1fd"}, - {file = "httptools-0.6.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f959e4770b3fc8ee4dbc3578fd910fab9003e093f20ac8c621452c4d62e517cb"}, - {file = "httptools-0.6.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6e22896b42b95b3237eccc42278cd72c0df6f23247d886b7ded3163452481e38"}, - {file = "httptools-0.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:38f3cafedd6aa20ae05f81f2e616ea6f92116c8a0f8dcb79dc798df3356836e2"}, - {file = "httptools-0.6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:47043a6e0ea753f006a9d0dd076a8f8c99bc0ecae86a0888448eb3076c43d717"}, - {file = "httptools-0.6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35a541579bed0270d1ac10245a3e71e5beeb1903b5fbbc8d8b4d4e728d48ff1d"}, - {file = "httptools-0.6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65d802e7b2538a9756df5acc062300c160907b02e15ed15ba035b02bce43e89c"}, - {file = "httptools-0.6.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:26326e0a8fe56829f3af483200d914a7cd16d8d398d14e36888b56de30bec81a"}, - {file = "httptools-0.6.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e41ccac9e77cd045f3e4ee0fc62cbf3d54d7d4b375431eb855561f26ee7a9ec4"}, - {file = "httptools-0.6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4e748fc0d5c4a629988ef50ac1aef99dfb5e8996583a73a717fc2cac4ab89932"}, - {file = "httptools-0.6.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:cf8169e839a0d740f3d3c9c4fa630ac1a5aaf81641a34575ca6773ed7ce041a1"}, - {file = "httptools-0.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5dcc14c090ab57b35908d4a4585ec5c0715439df07be2913405991dbb37e049d"}, - {file = "httptools-0.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d0b0571806a5168013b8c3d180d9f9d6997365a4212cb18ea20df18b938aa0b"}, - {file = "httptools-0.6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fb4a608c631f7dcbdf986f40af7a030521a10ba6bc3d36b28c1dc9e9035a3c0"}, - {file = "httptools-0.6.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:93f89975465133619aea8b1952bc6fa0e6bad22a447c6d982fc338fbb4c89649"}, - {file = "httptools-0.6.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:73e9d66a5a28b2d5d9fbd9e197a31edd02be310186db423b28e6052472dc8201"}, - {file = "httptools-0.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:22c01fcd53648162730a71c42842f73b50f989daae36534c818b3f5050b54589"}, - {file = "httptools-0.6.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3f96d2a351b5625a9fd9133c95744e8ca06f7a4f8f0b8231e4bbaae2c485046a"}, - {file = "httptools-0.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:72ec7c70bd9f95ef1083d14a755f321d181f046ca685b6358676737a5fecd26a"}, - {file = "httptools-0.6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b703d15dbe082cc23266bf5d9448e764c7cb3fcfe7cb358d79d3fd8248673ef9"}, - {file = "httptools-0.6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82c723ed5982f8ead00f8e7605c53e55ffe47c47465d878305ebe0082b6a1755"}, - {file = "httptools-0.6.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b0a816bb425c116a160fbc6f34cece097fd22ece15059d68932af686520966bd"}, - {file = "httptools-0.6.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:dea66d94e5a3f68c5e9d86e0894653b87d952e624845e0b0e3ad1c733c6cc75d"}, - {file = "httptools-0.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:23b09537086a5a611fad5696fc8963d67c7e7f98cb329d38ee114d588b0b74cd"}, - {file = "httptools-0.6.0.tar.gz", hash = "sha256:9fc6e409ad38cbd68b177cd5158fc4042c796b82ca88d99ec78f07bed6c6b796"}, -] - -[package.extras] -test = ["Cython (>=0.29.24,<0.30.0)"] - -[[package]] -name = "huggingface-hub" -version = "0.16.4" -description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "huggingface_hub-0.16.4-py3-none-any.whl", hash = "sha256:0d3df29932f334fead024afc7cb4cc5149d955238b8b5e42dcf9740d6995a349"}, - {file = "huggingface_hub-0.16.4.tar.gz", hash = "sha256:608c7d4f3d368b326d1747f91523dbd1f692871e8e2e7a4750314a2dd8b63e14"}, -] - -[package.dependencies] -filelock = "*" -fsspec = "*" -packaging = ">=20.9" -pyyaml = ">=5.1" -requests = "*" -tqdm = ">=4.42.1" -typing-extensions = ">=3.7.4.3" - -[package.extras] -all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] -cli = ["InquirerPy (==0.3.4)"] -dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] -fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] -inference = ["aiohttp", "pydantic"] -quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"] -tensorflow = ["graphviz", "pydot", "tensorflow"] -testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] -torch = ["torch"] -typing = ["pydantic", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] - -[[package]] -name = "humanfriendly" -version = "10.0" -description = "Human friendly output for text interfaces using Python" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, - {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, -] - -[package.dependencies] -pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""} - -[[package]] -name = "idna" -version = "3.4" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] - -[[package]] -name = "imapclient" -version = "2.3.1" -description = "Easy-to-use, Pythonic and complete IMAP client library" -optional = false -python-versions = "*" -files = [ - {file = "IMAPClient-2.3.1-py2.py3-none-any.whl", hash = "sha256:057f28025d2987c63e065afb0e4370b0b850b539b0e1494cea0427e88130108c"}, - {file = "IMAPClient-2.3.1.zip", hash = "sha256:26ea995664fae3a88b878ebce2aff7402931697b86658b7882043ddb01b0e6ba"}, -] - -[package.dependencies] -six = "*" - -[package.extras] -doc = ["sphinx"] -test = ["mock (>=1.3.0)"] - -[[package]] -name = "importlib-metadata" -version = "6.8.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"}, - {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"}, -] - -[package.dependencies] -zipp = ">=0.5" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] - -[[package]] -name = "jinja2" -version = "3.1.2" -description = "A very fast and expressive template engine." -optional = false -python-versions = ">=3.7" -files = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, -] - -[package.dependencies] -MarkupSafe = ">=2.0" - -[package.extras] -i18n = ["Babel (>=2.7)"] - -[[package]] -name = "joblib" -version = "1.3.1" -description = "Lightweight pipelining with Python functions" -optional = false -python-versions = ">=3.7" -files = [ - {file = "joblib-1.3.1-py3-none-any.whl", hash = "sha256:89cf0529520e01b3de7ac7b74a8102c90d16d54c64b5dd98cafcd14307fdf915"}, - {file = "joblib-1.3.1.tar.gz", hash = "sha256:1f937906df65329ba98013dc9692fe22a4c5e4a648112de500508b18a21b41e3"}, -] - -[[package]] -name = "langchain" -version = "0.0.261" -description = "Building applications with LLMs through composability" -optional = false -python-versions = ">=3.8.1,<4.0" -files = [ - {file = "langchain-0.0.261-py3-none-any.whl", hash = "sha256:d2aa7c48c62e9febd7440d06eb067066ce2623db6d8b367f2742c9a78c315ce8"}, - {file = "langchain-0.0.261.tar.gz", hash = "sha256:1ec501b8323811bf8fa2db10b7703a654c57235646344eefdd0bae764c9e4335"}, -] - -[package.dependencies] -aiohttp = ">=3.8.3,<4.0.0" -async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} -dataclasses-json = ">=0.5.7,<0.6.0" -langsmith = ">=0.0.11,<0.1.0" -numexpr = ">=2.8.4,<3.0.0" -numpy = ">=1,<2" -openapi-schema-pydantic = ">=1.2,<2.0" -pydantic = ">=1,<2" -PyYAML = ">=5.3" -requests = ">=2,<3" -SQLAlchemy = ">=1.4,<3" -tenacity = ">=8.1.0,<9.0.0" - -[package.extras] -all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amadeus (>=8.1.0)", "anthropic (>=0.3,<0.4)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.9,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=4,<5)", "deeplake (>=3.6.8,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "libdeeplake (>=0.0.60,<0.0.61)", "librosa (>=0.10.0.post2,<0.11.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=0.11.0,<0.12.0)", "momento (>=1.5.0,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "octoai-sdk (>=0.1.1,<0.2.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "python-arango (>=7.5.9,<8.0.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)", "xinference (>=0.0.6,<0.0.7)"] -azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0b6)", "openai (>=0,<1)"] -clarifai = ["clarifai (>=9.1.0)"] -cohere = ["cohere (>=4,<5)"] -docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] -embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["amazon-textract-caller (<2)", "anthropic (>=0.3,<0.4)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.0.7,<0.0.8)", "chardet (>=5.1.0,<6.0.0)", "esprima (>=4.0.1,<5.0.0)", "feedparser (>=6.0.10,<7.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "openai (>=0,<1)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tqdm (>=4.48.0)", "xata (>=1.0.0a7,<2.0.0)", "xinference (>=0.0.6,<0.0.7)", "xmltodict (>=0.13.0,<0.14.0)", "zep-python (>=0.32)"] -javascript = ["esprima (>=4.0.1,<5.0.0)"] -llms = ["anthropic (>=0.3,<0.4)", "clarifai (>=9.1.0)", "cohere (>=4,<5)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (>=0,<1)", "openllm (>=0.1.19)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)", "xinference (>=0.0.6,<0.0.7)"] -openai = ["openai (>=0,<1)", "tiktoken (>=0.3.2,<0.4.0)"] -qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"] -scheduled-testing = ["openai (>=0,<1)"] -text-helpers = ["chardet (>=5.1.0,<6.0.0)"] - -[[package]] -name = "langsmith" -version = "0.0.21" -description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform." -optional = false -python-versions = ">=3.8.1,<4.0" -files = [ - {file = "langsmith-0.0.21-py3-none-any.whl", hash = "sha256:a04c6eb3b4fc6205b15a559705f726fd0114ee2b3bd8668a0bd11cf29d5c5992"}, - {file = "langsmith-0.0.21.tar.gz", hash = "sha256:ec90ddab6beee6c344cf0ed8ae7d68948740cf98e119dd97c571f3190555644e"}, -] - -[package.dependencies] -pydantic = ">=1,<2" -requests = ">=2,<3" - -[[package]] -name = "lark-parser" -version = "0.12.0" -description = "a modern parsing library" -optional = false -python-versions = "*" -files = [ - {file = "lark-parser-0.12.0.tar.gz", hash = "sha256:15967db1f1214013dca65b1180745047b9be457d73da224fcda3d9dd4e96a138"}, - {file = "lark_parser-0.12.0-py2.py3-none-any.whl", hash = "sha256:0eaf30cb5ba787fe404d73a7d6e61df97b21d5a63ac26c5008c78a494373c675"}, -] - -[package.extras] -atomic-cache = ["atomicwrites"] -nearley = ["js2py"] -regex = ["regex"] - -[[package]] -name = "lxml" -version = "4.9.3" -description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" -files = [ - {file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"}, - {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"}, - {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"}, - {file = "lxml-4.9.3-cp27-cp27m-win32.whl", hash = "sha256:2c74524e179f2ad6d2a4f7caf70e2d96639c0954c943ad601a9e146c76408ed7"}, - {file = "lxml-4.9.3-cp27-cp27m-win_amd64.whl", hash = "sha256:4f1026bc732b6a7f96369f7bfe1a4f2290fb34dce00d8644bc3036fb351a4ca1"}, - {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"}, - {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"}, - {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"}, - {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"}, - {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"}, - {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"}, - {file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"}, - {file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"}, - {file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"}, - {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"}, - {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"}, - {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"}, - {file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"}, - {file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"}, - {file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"}, - {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"}, - {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"}, - {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"}, - {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"}, - {file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"}, - {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56dc1f1ebccc656d1b3ed288f11e27172a01503fc016bcabdcbc0978b19352b7"}, - {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:578695735c5a3f51569810dfebd05dd6f888147a34f0f98d4bb27e92b76e05c2"}, - {file = "lxml-4.9.3-cp35-cp35m-win32.whl", hash = "sha256:704f61ba8c1283c71b16135caf697557f5ecf3e74d9e453233e4771d68a1f42d"}, - {file = "lxml-4.9.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c41bfca0bd3532d53d16fd34d20806d5c2b1ace22a2f2e4c0008570bf2c58833"}, - {file = "lxml-4.9.3-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:64f479d719dc9f4c813ad9bb6b28f8390360660b73b2e4beb4cb0ae7104f1c12"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:dd708cf4ee4408cf46a48b108fb9427bfa00b9b85812a9262b5c668af2533ea5"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c31c7462abdf8f2ac0577d9f05279727e698f97ecbb02f17939ea99ae8daa98"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e3cd95e10c2610c360154afdc2f1480aea394f4a4f1ea0a5eacce49640c9b190"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:4930be26af26ac545c3dffb662521d4e6268352866956672231887d18f0eaab2"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4aec80cde9197340bc353d2768e2a75f5f60bacda2bab72ab1dc499589b3878c"}, - {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:14e019fd83b831b2e61baed40cab76222139926b1fb5ed0e79225bc0cae14584"}, - {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0c0850c8b02c298d3c7006b23e98249515ac57430e16a166873fc47a5d549287"}, - {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aca086dc5f9ef98c512bac8efea4483eb84abbf926eaeedf7b91479feb092458"}, - {file = "lxml-4.9.3-cp36-cp36m-win32.whl", hash = "sha256:50baa9c1c47efcaef189f31e3d00d697c6d4afda5c3cde0302d063492ff9b477"}, - {file = "lxml-4.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bef4e656f7d98aaa3486d2627e7d2df1157d7e88e7efd43a65aa5dd4714916cf"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:46f409a2d60f634fe550f7133ed30ad5321ae2e6630f13657fb9479506b00601"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4c28a9144688aef80d6ea666c809b4b0e50010a2aca784c97f5e6bf143d9f129"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:141f1d1a9b663c679dc524af3ea1773e618907e96075262726c7612c02b149a4"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:53ace1c1fd5a74ef662f844a0413446c0629d151055340e9893da958a374f70d"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17a753023436a18e27dd7769e798ce302963c236bc4114ceee5b25c18c52c693"}, - {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d298a1bd60c067ea75d9f684f5f3992c9d6766fadbc0bcedd39750bf344c2f4"}, - {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:081d32421db5df44c41b7f08a334a090a545c54ba977e47fd7cc2deece78809a"}, - {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:23eed6d7b1a3336ad92d8e39d4bfe09073c31bfe502f20ca5116b2a334f8ec02"}, - {file = "lxml-4.9.3-cp37-cp37m-win32.whl", hash = "sha256:1509dd12b773c02acd154582088820893109f6ca27ef7291b003d0e81666109f"}, - {file = "lxml-4.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:120fa9349a24c7043854c53cae8cec227e1f79195a7493e09e0c12e29f918e52"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4d2d1edbca80b510443f51afd8496be95529db04a509bc8faee49c7b0fb6d2cc"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d7e43bd40f65f7d97ad8ef5c9b1778943d02f04febef12def25f7583d19baac"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:71d66ee82e7417828af6ecd7db817913cb0cf9d4e61aa0ac1fde0583d84358db"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6fc3c450eaa0b56f815c7b62f2b7fba7266c4779adcf1cece9e6deb1de7305ce"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65299ea57d82fb91c7f019300d24050c4ddeb7c5a190e076b5f48a2b43d19c42"}, - {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eadfbbbfb41b44034a4c757fd5d70baccd43296fb894dba0295606a7cf3124aa"}, - {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e9bdd30efde2b9ccfa9cb5768ba04fe71b018a25ea093379c857c9dad262c40"}, - {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcdd00edfd0a3001e0181eab3e63bd5c74ad3e67152c84f93f13769a40e073a7"}, - {file = "lxml-4.9.3-cp38-cp38-win32.whl", hash = "sha256:57aba1bbdf450b726d58b2aea5fe47c7875f5afb2c4a23784ed78f19a0462574"}, - {file = "lxml-4.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:92af161ecbdb2883c4593d5ed4815ea71b31fafd7fd05789b23100d081ecac96"}, - {file = "lxml-4.9.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9bb6ad405121241e99a86efff22d3ef469024ce22875a7ae045896ad23ba2340"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8ed74706b26ad100433da4b9d807eae371efaa266ffc3e9191ea436087a9d6a7"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fbf521479bcac1e25a663df882c46a641a9bff6b56dc8b0fafaebd2f66fb231b"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:303bf1edce6ced16bf67a18a1cf8339d0db79577eec5d9a6d4a80f0fb10aa2da"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:5515edd2a6d1a5a70bfcdee23b42ec33425e405c5b351478ab7dc9347228f96e"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:690dafd0b187ed38583a648076865d8c229661ed20e48f2335d68e2cf7dc829d"}, - {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6420a005548ad52154c8ceab4a1290ff78d757f9e5cbc68f8c77089acd3c432"}, - {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb3bb49c7a6ad9d981d734ef7c7193bc349ac338776a0360cc671eaee89bcf69"}, - {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d27be7405547d1f958b60837dc4c1007da90b8b23f54ba1f8b728c78fdb19d50"}, - {file = "lxml-4.9.3-cp39-cp39-win32.whl", hash = "sha256:8df133a2ea5e74eef5e8fc6f19b9e085f758768a16e9877a60aec455ed2609b2"}, - {file = "lxml-4.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:4dd9a263e845a72eacb60d12401e37c616438ea2e5442885f65082c276dfb2b2"}, - {file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"}, - {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"}, - {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"}, - {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"}, - {file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"}, - {file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"}, - {file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"}, -] - -[package.extras] -cssselect = ["cssselect (>=0.7)"] -html5 = ["html5lib"] -htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=0.29.35)"] - -[[package]] -name = "lz4" -version = "4.3.2" -description = "LZ4 Bindings for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "lz4-4.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1c4c100d99eed7c08d4e8852dd11e7d1ec47a3340f49e3a96f8dfbba17ffb300"}, - {file = "lz4-4.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:edd8987d8415b5dad25e797043936d91535017237f72fa456601be1479386c92"}, - {file = "lz4-4.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7c50542b4ddceb74ab4f8b3435327a0861f06257ca501d59067a6a482535a77"}, - {file = "lz4-4.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f5614d8229b33d4a97cb527db2a1ac81308c6e796e7bdb5d1309127289f69d5"}, - {file = "lz4-4.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8f00a9ba98f6364cadda366ae6469b7b3568c0cced27e16a47ddf6b774169270"}, - {file = "lz4-4.3.2-cp310-cp310-win32.whl", hash = "sha256:b10b77dc2e6b1daa2f11e241141ab8285c42b4ed13a8642495620416279cc5b2"}, - {file = "lz4-4.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:86480f14a188c37cb1416cdabacfb4e42f7a5eab20a737dac9c4b1c227f3b822"}, - {file = "lz4-4.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7c2df117def1589fba1327dceee51c5c2176a2b5a7040b45e84185ce0c08b6a3"}, - {file = "lz4-4.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1f25eb322eeb24068bb7647cae2b0732b71e5c639e4e4026db57618dcd8279f0"}, - {file = "lz4-4.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8df16c9a2377bdc01e01e6de5a6e4bbc66ddf007a6b045688e285d7d9d61d1c9"}, - {file = "lz4-4.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f571eab7fec554d3b1db0d666bdc2ad85c81f4b8cb08906c4c59a8cad75e6e22"}, - {file = "lz4-4.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7211dc8f636ca625abc3d4fb9ab74e5444b92df4f8d58ec83c8868a2b0ff643d"}, - {file = "lz4-4.3.2-cp311-cp311-win32.whl", hash = "sha256:867664d9ca9bdfce840ac96d46cd8838c9ae891e859eb98ce82fcdf0e103a947"}, - {file = "lz4-4.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:a6a46889325fd60b8a6b62ffc61588ec500a1883db32cddee9903edfba0b7584"}, - {file = "lz4-4.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a85b430138882f82f354135b98c320dafb96fc8fe4656573d95ab05de9eb092"}, - {file = "lz4-4.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65d5c93f8badacfa0456b660285e394e65023ef8071142e0dcbd4762166e1be0"}, - {file = "lz4-4.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b50f096a6a25f3b2edca05aa626ce39979d63c3b160687c8c6d50ac3943d0ba"}, - {file = "lz4-4.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:200d05777d61ba1ff8d29cb51c534a162ea0b4fe6d3c28be3571a0a48ff36080"}, - {file = "lz4-4.3.2-cp37-cp37m-win32.whl", hash = "sha256:edc2fb3463d5d9338ccf13eb512aab61937be50aa70734bcf873f2f493801d3b"}, - {file = "lz4-4.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:83acfacab3a1a7ab9694333bcb7950fbeb0be21660d236fd09c8337a50817897"}, - {file = "lz4-4.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7a9eec24ec7d8c99aab54de91b4a5a149559ed5b3097cf30249b665689b3d402"}, - {file = "lz4-4.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:31d72731c4ac6ebdce57cd9a5cabe0aecba229c4f31ba3e2c64ae52eee3fdb1c"}, - {file = "lz4-4.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83903fe6db92db0be101acedc677aa41a490b561567fe1b3fe68695b2110326c"}, - {file = "lz4-4.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:926b26db87ec8822cf1870efc3d04d06062730ec3279bbbd33ba47a6c0a5c673"}, - {file = "lz4-4.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e05afefc4529e97c08e65ef92432e5f5225c0bb21ad89dee1e06a882f91d7f5e"}, - {file = "lz4-4.3.2-cp38-cp38-win32.whl", hash = "sha256:ad38dc6a7eea6f6b8b642aaa0683253288b0460b70cab3216838747163fb774d"}, - {file = "lz4-4.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:7e2dc1bd88b60fa09b9b37f08553f45dc2b770c52a5996ea52b2b40f25445676"}, - {file = "lz4-4.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:edda4fb109439b7f3f58ed6bede59694bc631c4b69c041112b1b7dc727fffb23"}, - {file = "lz4-4.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0ca83a623c449295bafad745dcd399cea4c55b16b13ed8cfea30963b004016c9"}, - {file = "lz4-4.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5ea0e788dc7e2311989b78cae7accf75a580827b4d96bbaf06c7e5a03989bd5"}, - {file = "lz4-4.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a98b61e504fb69f99117b188e60b71e3c94469295571492a6468c1acd63c37ba"}, - {file = "lz4-4.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4931ab28a0d1c133104613e74eec1b8bb1f52403faabe4f47f93008785c0b929"}, - {file = "lz4-4.3.2-cp39-cp39-win32.whl", hash = "sha256:ec6755cacf83f0c5588d28abb40a1ac1643f2ff2115481089264c7630236618a"}, - {file = "lz4-4.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:4caedeb19e3ede6c7a178968b800f910db6503cb4cb1e9cc9221157572139b49"}, - {file = "lz4-4.3.2.tar.gz", hash = "sha256:e1431d84a9cfb23e6773e72078ce8e65cad6745816d4cbf9ae67da5ea419acda"}, -] - -[package.extras] -docs = ["sphinx (>=1.6.0)", "sphinx-bootstrap-theme"] -flake8 = ["flake8"] -tests = ["psutil", "pytest (!=3.3.0)", "pytest-cov"] - -[[package]] -name = "markdown" -version = "3.4.3" -description = "Python implementation of John Gruber's Markdown." -optional = false -python-versions = ">=3.7" -files = [ - {file = "Markdown-3.4.3-py3-none-any.whl", hash = "sha256:065fd4df22da73a625f14890dd77eb8040edcbd68794bcd35943be14490608b2"}, - {file = "Markdown-3.4.3.tar.gz", hash = "sha256:8bf101198e004dc93e84a12a7395e31aac6a9c9942848ae1d99b9d72cf9b3520"}, -] - -[package.extras] -testing = ["coverage", "pyyaml"] - -[[package]] -name = "markupsafe" -version = "2.1.3" -description = "Safely add untrusted strings to HTML/XML markup." -optional = false -python-versions = ">=3.7" -files = [ - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, - {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, - {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, - {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, - {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, - {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, - {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, -] - -[[package]] -name = "marshmallow" -version = "3.19.0" -description = "A lightweight library for converting complex datatypes to and from native Python datatypes." -optional = false -python-versions = ">=3.7" -files = [ - {file = "marshmallow-3.19.0-py3-none-any.whl", hash = "sha256:93f0958568da045b0021ec6aeb7ac37c81bfcccbb9a0e7ed8559885070b3a19b"}, - {file = "marshmallow-3.19.0.tar.gz", hash = "sha256:90032c0fd650ce94b6ec6dc8dfeb0e3ff50c144586462c389b81a07205bedb78"}, -] - -[package.dependencies] -packaging = ">=17.0" - -[package.extras] -dev = ["flake8 (==5.0.4)", "flake8-bugbear (==22.10.25)", "mypy (==0.990)", "pre-commit (>=2.4,<3.0)", "pytest", "pytz", "simplejson", "tox"] -docs = ["alabaster (==0.7.12)", "autodocsumm (==0.2.9)", "sphinx (==5.3.0)", "sphinx-issues (==3.0.1)", "sphinx-version-warning (==1.1.2)"] -lint = ["flake8 (==5.0.4)", "flake8-bugbear (==22.10.25)", "mypy (==0.990)", "pre-commit (>=2.4,<3.0)"] -tests = ["pytest", "pytz", "simplejson"] - -[[package]] -name = "marshmallow-enum" -version = "1.5.1" -description = "Enum field for Marshmallow" -optional = false -python-versions = "*" -files = [ - {file = "marshmallow-enum-1.5.1.tar.gz", hash = "sha256:38e697e11f45a8e64b4a1e664000897c659b60aa57bfa18d44e226a9920b6e58"}, - {file = "marshmallow_enum-1.5.1-py2.py3-none-any.whl", hash = "sha256:57161ab3dbfde4f57adeb12090f39592e992b9c86d206d02f6bd03ebec60f072"}, -] - -[package.dependencies] -marshmallow = ">=2.0.0" - -[[package]] -name = "monotonic" -version = "1.6" -description = "An implementation of time.monotonic() for Python 2 & < 3.3" -optional = false -python-versions = "*" -files = [ - {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"}, - {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"}, -] - -[[package]] -name = "mpmath" -version = "1.3.0" -description = "Python library for arbitrary-precision floating-point arithmetic" -optional = false -python-versions = "*" -files = [ - {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, - {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, -] - -[package.extras] -develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] -docs = ["sphinx"] -gmpy = ["gmpy2 (>=2.1.0a4)"] -tests = ["pytest (>=4.6)"] - -[[package]] -name = "msg-parser" -version = "1.2.0" -description = "This module enables reading, parsing and converting Microsoft Outlook MSG E-Mail files." -optional = false -python-versions = ">=3.4" -files = [ - {file = "msg_parser-1.2.0-py2.py3-none-any.whl", hash = "sha256:d47a2f0b2a359cb189fad83cc991b63ea781ecc70d91410324273fbf93e95375"}, - {file = "msg_parser-1.2.0.tar.gz", hash = "sha256:0de858d4fcebb6c8f6f028da83a17a20fe01cdce67c490779cf43b3b0162aa66"}, -] - -[package.dependencies] -olefile = ">=0.46" - -[package.extras] -rtf = ["compressed-rtf (>=1.0.5)"] - -[[package]] -name = "msoffcrypto-tool" -version = "5.0.1" -description = "Python tool and library for decrypting MS Office files with passwords or other keys" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "msoffcrypto_tool-5.0.1-py3-none-any.whl", hash = "sha256:2b489c8a2b13bec07b94c8f5ce9054111dec3223ff8bedfd486cae3c299be54b"}, - {file = "msoffcrypto_tool-5.0.1.tar.gz", hash = "sha256:9efd0ef5cc3e086e2d175e7a5d7b2b8cb59836c896b8a486d362bbca166db645"}, -] - -[package.dependencies] -cryptography = ">=35.0" -olefile = ">=0.46" - -[[package]] -name = "multidict" -version = "6.0.4" -description = "multidict implementation" -optional = false -python-versions = ">=3.7" -files = [ - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, - {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, - {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, - {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, - {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, - {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, - {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, - {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, - {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, - {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, - {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, - {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, - {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, -] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "networkx" -version = "3.1" -description = "Python package for creating and manipulating graphs and networks" -optional = false -python-versions = ">=3.8" -files = [ - {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, - {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, -] - -[package.extras] -default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] -developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] -doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] -extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] -test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] - -[[package]] -name = "nltk" -version = "3.8.1" -description = "Natural Language Toolkit" -optional = false -python-versions = ">=3.7" -files = [ - {file = "nltk-3.8.1-py3-none-any.whl", hash = "sha256:fd5c9109f976fa86bcadba8f91e47f5e9293bd034474752e92a520f81c93dda5"}, - {file = "nltk-3.8.1.zip", hash = "sha256:1834da3d0682cba4f2cede2f9aad6b0fafb6461ba451db0efb6f9c39798d64d3"}, -] - -[package.dependencies] -click = "*" -joblib = "*" -regex = ">=2021.8.3" -tqdm = "*" - -[package.extras] -all = ["matplotlib", "numpy", "pyparsing", "python-crfsuite", "requests", "scikit-learn", "scipy", "twython"] -corenlp = ["requests"] -machine-learning = ["numpy", "python-crfsuite", "scikit-learn", "scipy"] -plot = ["matplotlib"] -tgrep = ["pyparsing"] -twitter = ["twython"] - -[[package]] -name = "numexpr" -version = "2.8.4" -description = "Fast numerical expression evaluator for NumPy" -optional = false -python-versions = ">=3.7" -files = [ - {file = "numexpr-2.8.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a75967d46b6bd56455dd32da6285e5ffabe155d0ee61eef685bbfb8dafb2e484"}, - {file = "numexpr-2.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db93cf1842f068247de631bfc8af20118bf1f9447cd929b531595a5e0efc9346"}, - {file = "numexpr-2.8.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bca95f4473b444428061d4cda8e59ac564dc7dc6a1dea3015af9805c6bc2946"}, - {file = "numexpr-2.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e34931089a6bafc77aaae21f37ad6594b98aa1085bb8b45d5b3cd038c3c17d9"}, - {file = "numexpr-2.8.4-cp310-cp310-win32.whl", hash = "sha256:f3a920bfac2645017110b87ddbe364c9c7a742870a4d2f6120b8786c25dc6db3"}, - {file = "numexpr-2.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:6931b1e9d4f629f43c14b21d44f3f77997298bea43790cfcdb4dd98804f90783"}, - {file = "numexpr-2.8.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9400781553541f414f82eac056f2b4c965373650df9694286b9bd7e8d413f8d8"}, - {file = "numexpr-2.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6ee9db7598dd4001138b482342b96d78110dd77cefc051ec75af3295604dde6a"}, - {file = "numexpr-2.8.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff5835e8af9a212e8480003d731aad1727aaea909926fd009e8ae6a1cba7f141"}, - {file = "numexpr-2.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:655d84eb09adfee3c09ecf4a89a512225da153fdb7de13c447404b7d0523a9a7"}, - {file = "numexpr-2.8.4-cp311-cp311-win32.whl", hash = "sha256:5538b30199bfc68886d2be18fcef3abd11d9271767a7a69ff3688defe782800a"}, - {file = "numexpr-2.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:3f039321d1c17962c33079987b675fb251b273dbec0f51aac0934e932446ccc3"}, - {file = "numexpr-2.8.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c867cc36cf815a3ec9122029874e00d8fbcef65035c4a5901e9b120dd5d626a2"}, - {file = "numexpr-2.8.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:059546e8f6283ccdb47c683101a890844f667fa6d56258d48ae2ecf1b3875957"}, - {file = "numexpr-2.8.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:845a6aa0ed3e2a53239b89c1ebfa8cf052d3cc6e053c72805e8153300078c0b1"}, - {file = "numexpr-2.8.4-cp37-cp37m-win32.whl", hash = "sha256:a38664e699526cb1687aefd9069e2b5b9387da7feac4545de446141f1ef86f46"}, - {file = "numexpr-2.8.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eaec59e9bf70ff05615c34a8b8d6c7bd042bd9f55465d7b495ea5436f45319d0"}, - {file = "numexpr-2.8.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b318541bf3d8326682ebada087ba0050549a16d8b3fa260dd2585d73a83d20a7"}, - {file = "numexpr-2.8.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b076db98ca65eeaf9bd224576e3ac84c05e451c0bd85b13664b7e5f7b62e2c70"}, - {file = "numexpr-2.8.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90f12cc851240f7911a47c91aaf223dba753e98e46dff3017282e633602e76a7"}, - {file = "numexpr-2.8.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c368aa35ae9b18840e78b05f929d3a7b3abccdba9630a878c7db74ca2368339"}, - {file = "numexpr-2.8.4-cp38-cp38-win32.whl", hash = "sha256:b96334fc1748e9ec4f93d5fadb1044089d73fb08208fdb8382ed77c893f0be01"}, - {file = "numexpr-2.8.4-cp38-cp38-win_amd64.whl", hash = "sha256:a6d2d7740ae83ba5f3531e83afc4b626daa71df1ef903970947903345c37bd03"}, - {file = "numexpr-2.8.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:77898fdf3da6bb96aa8a4759a8231d763a75d848b2f2e5c5279dad0b243c8dfe"}, - {file = "numexpr-2.8.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:df35324666b693f13a016bc7957de7cc4d8801b746b81060b671bf78a52b9037"}, - {file = "numexpr-2.8.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ac9cfe6d0078c5fc06ba1c1bbd20b8783f28c6f475bbabd3cad53683075cab"}, - {file = "numexpr-2.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df3a1f6b24214a1ab826e9c1c99edf1686c8e307547a9aef33910d586f626d01"}, - {file = "numexpr-2.8.4-cp39-cp39-win32.whl", hash = "sha256:7d71add384adc9119568d7e9ffa8a35b195decae81e0abf54a2b7779852f0637"}, - {file = "numexpr-2.8.4-cp39-cp39-win_amd64.whl", hash = "sha256:9f096d707290a6a00b6ffdaf581ee37331109fb7b6c8744e9ded7c779a48e517"}, - {file = "numexpr-2.8.4.tar.gz", hash = "sha256:d5432537418d18691b9115d615d6daa17ee8275baef3edf1afbbf8bc69806147"}, -] - -[package.dependencies] -numpy = ">=1.13.3" - -[[package]] -name = "numpy" -version = "1.25.1" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.9" -files = [ - {file = "numpy-1.25.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:77d339465dff3eb33c701430bcb9c325b60354698340229e1dff97745e6b3efa"}, - {file = "numpy-1.25.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d736b75c3f2cb96843a5c7f8d8ccc414768d34b0a75f466c05f3a739b406f10b"}, - {file = "numpy-1.25.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a90725800caeaa160732d6b31f3f843ebd45d6b5f3eec9e8cc287e30f2805bf"}, - {file = "numpy-1.25.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c6c9261d21e617c6dc5eacba35cb68ec36bb72adcff0dee63f8fbc899362588"}, - {file = "numpy-1.25.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0def91f8af6ec4bb94c370e38c575855bf1d0be8a8fbfba42ef9c073faf2cf19"}, - {file = "numpy-1.25.1-cp310-cp310-win32.whl", hash = "sha256:fd67b306320dcadea700a8f79b9e671e607f8696e98ec255915c0c6d6b818503"}, - {file = "numpy-1.25.1-cp310-cp310-win_amd64.whl", hash = "sha256:c1516db588987450b85595586605742879e50dcce923e8973f79529651545b57"}, - {file = "numpy-1.25.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6b82655dd8efeea69dbf85d00fca40013d7f503212bc5259056244961268b66e"}, - {file = "numpy-1.25.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e8f6049c4878cb16960fbbfb22105e49d13d752d4d8371b55110941fb3b17800"}, - {file = "numpy-1.25.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41a56b70e8139884eccb2f733c2f7378af06c82304959e174f8e7370af112e09"}, - {file = "numpy-1.25.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5154b1a25ec796b1aee12ac1b22f414f94752c5f94832f14d8d6c9ac40bcca6"}, - {file = "numpy-1.25.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38eb6548bb91c421261b4805dc44def9ca1a6eef6444ce35ad1669c0f1a3fc5d"}, - {file = "numpy-1.25.1-cp311-cp311-win32.whl", hash = "sha256:791f409064d0a69dd20579345d852c59822c6aa087f23b07b1b4e28ff5880fcb"}, - {file = "numpy-1.25.1-cp311-cp311-win_amd64.whl", hash = "sha256:c40571fe966393b212689aa17e32ed905924120737194b5d5c1b20b9ed0fb171"}, - {file = "numpy-1.25.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3d7abcdd85aea3e6cdddb59af2350c7ab1ed764397f8eec97a038ad244d2d105"}, - {file = "numpy-1.25.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a180429394f81c7933634ae49b37b472d343cccb5bb0c4a575ac8bbc433722f"}, - {file = "numpy-1.25.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d412c1697c3853c6fc3cb9751b4915859c7afe6a277c2bf00acf287d56c4e625"}, - {file = "numpy-1.25.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20e1266411120a4f16fad8efa8e0454d21d00b8c7cee5b5ccad7565d95eb42dd"}, - {file = "numpy-1.25.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f76aebc3358ade9eacf9bc2bb8ae589863a4f911611694103af05346637df1b7"}, - {file = "numpy-1.25.1-cp39-cp39-win32.whl", hash = "sha256:247d3ffdd7775bdf191f848be8d49100495114c82c2bd134e8d5d075fb386a1c"}, - {file = "numpy-1.25.1-cp39-cp39-win_amd64.whl", hash = "sha256:1d5d3c68e443c90b38fdf8ef40e60e2538a27548b39b12b73132456847f4b631"}, - {file = "numpy-1.25.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:35a9527c977b924042170a0887de727cd84ff179e478481404c5dc66b4170009"}, - {file = "numpy-1.25.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d3fe3dd0506a28493d82dc3cf254be8cd0d26f4008a417385cbf1ae95b54004"}, - {file = "numpy-1.25.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:012097b5b0d00a11070e8f2e261128c44157a8689f7dedcf35576e525893f4fe"}, - {file = "numpy-1.25.1.tar.gz", hash = "sha256:9a3a9f3a61480cc086117b426a8bd86869c213fc4072e606f01c4e4b66eb92bf"}, -] - -[[package]] -name = "olefile" -version = "0.46" -description = "Python package to parse, read and write Microsoft OLE2 files (Structured Storage or Compound Document, Microsoft Office)" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "olefile-0.46.zip", hash = "sha256:133b031eaf8fd2c9399b78b8bc5b8fcbe4c31e85295749bb17a87cba8f3c3964"}, -] - -[[package]] -name = "oletools" -version = "0.60.1" -description = "Python tools to analyze security characteristics of MS Office and OLE files (also called Structured Storage, Compound File Binary Format or Compound Document File Format), for Malware Analysis and Incident Response #DFIR" -optional = false -python-versions = "*" -files = [ - {file = "oletools-0.60.1-py2.py3-none-any.whl", hash = "sha256:edef92374e688989a39269eb9a11142fb20a023629c23538c849c14d1d1144ea"}, - {file = "oletools-0.60.1.zip", hash = "sha256:67a796da4c4b8e2feb9a6b2495bef8798a3323a75512de4e5669d9dc9d1fae31"}, -] - -[package.dependencies] -colorclass = "*" -easygui = "*" -msoffcrypto-tool = {version = "*", markers = "platform_python_implementation != \"PyPy\" or python_version >= \"3\" and (platform_system != \"Windows\" and platform_system != \"Darwin\")"} -olefile = ">=0.46" -pcodedmp = ">=1.2.5" -pyparsing = ">=2.1.0,<3" - -[package.extras] -full = ["XLMMacroDeobfuscator"] - -[[package]] -name = "onnxruntime" -version = "1.15.1" -description = "ONNX Runtime is a runtime accelerator for Machine Learning models" -optional = false -python-versions = "*" -files = [ - {file = "onnxruntime-1.15.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:baad59e6a763237fa39545325d29c16f98b8a45d2dfc524c67631e2e3ba44d16"}, - {file = "onnxruntime-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:568c2db848f619a0a93e843c028e9fb4879929d40b04bd60f9ba6eb8d2e93421"}, - {file = "onnxruntime-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69088d7784bb04dedfd9e883e2c96e4adf8ae0451acdd0abb78d68f59ecc6d9d"}, - {file = "onnxruntime-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cef43737b2cd886d5d718d100f56ec78c9c476c5db5f8f946e95024978fe754"}, - {file = "onnxruntime-1.15.1-cp310-cp310-win32.whl", hash = "sha256:79d7e65abb44a47c633ede8e53fe7b9756c272efaf169758c482c983cca98d7e"}, - {file = "onnxruntime-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:8bc4c47682933a7a2c79808688aad5f12581305e182be552de50783b5438e6bd"}, - {file = "onnxruntime-1.15.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:652b2cb777f76446e3cc41072dd3d1585a6388aeff92b9de656724bc22e241e4"}, - {file = "onnxruntime-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:89b86dbed15740abc385055a29c9673a212600248d702737ce856515bdeddc88"}, - {file = "onnxruntime-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed5cdd9ee748149a57f4cdfa67187a0d68f75240645a3c688299dcd08742cc98"}, - {file = "onnxruntime-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f748cce6a70ed38c19658615c55f4eedb9192765a4e9c4bd2682adfe980698d"}, - {file = "onnxruntime-1.15.1-cp311-cp311-win32.whl", hash = "sha256:e0312046e814c40066e7823da58075992d51364cbe739eeeb2345ec440c3ac59"}, - {file = "onnxruntime-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:f0980969689cb956c22bd1318b271e1be260060b37f3ddd82c7d63bd7f2d9a79"}, - {file = "onnxruntime-1.15.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:345986cfdbd6f4b20a89b6a6cd9abd3e2ced2926ae0b6e91fefa8149f95c0f09"}, - {file = "onnxruntime-1.15.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a4d7b3ad75e040f1e95757f69826a11051737b31584938a26d466a0234c6de98"}, - {file = "onnxruntime-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3603d07b829bcc1c14963a76103e257aade8861eb208173b300cc26e118ec2f8"}, - {file = "onnxruntime-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3df0625b9295daf1f7409ea55f72e1eeb38d54f5769add53372e79ddc3cf98d"}, - {file = "onnxruntime-1.15.1-cp38-cp38-win32.whl", hash = "sha256:f68b47fdf1a0406c0292f81ac993e2a2ae3e8b166b436d590eb221f64e8e187a"}, - {file = "onnxruntime-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:52d762d297cc3f731f54fa65a3e329b813164970671547bef6414d0ed52765c9"}, - {file = "onnxruntime-1.15.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:99228f9f03dc1fc8af89a28c9f942e8bd3e97e894e263abe1a32e4ddb1f6363b"}, - {file = "onnxruntime-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:45db7f96febb0cf23e3af147f35c4f8de1a37dd252d1cef853c242c2780250cd"}, - {file = "onnxruntime-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bafc112a36db25c821b90ab747644041cb4218f6575889775a2c12dd958b8c3"}, - {file = "onnxruntime-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:985693d18f2d46aa34fd44d7f65ff620660b2c8fa4b8ec365c2ca353f0fbdb27"}, - {file = "onnxruntime-1.15.1-cp39-cp39-win32.whl", hash = "sha256:708eb31b0c04724bf0f01c1309a9e69bbc09b85beb750e5662c8aed29f1ff9fd"}, - {file = "onnxruntime-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:73d6de4c42dfde1e9dbea04773e6dc23346c8cda9c7e08c6554fafc97ac60138"}, -] - -[package.dependencies] -coloredlogs = "*" -flatbuffers = "*" -numpy = ">=1.21.6" -packaging = "*" -protobuf = "*" -sympy = "*" - -[[package]] -name = "openapi-schema-pydantic" -version = "1.2.4" -description = "OpenAPI (v3) specification schema as pydantic class" -optional = false -python-versions = ">=3.6.1" -files = [ - {file = "openapi-schema-pydantic-1.2.4.tar.gz", hash = "sha256:3e22cf58b74a69f752cc7e5f1537f6e44164282db2700cbbcd3bb99ddd065196"}, - {file = "openapi_schema_pydantic-1.2.4-py3-none-any.whl", hash = "sha256:a932ecc5dcbb308950282088956e94dea069c9823c84e507d64f6b622222098c"}, -] - -[package.dependencies] -pydantic = ">=1.8.2" - -[[package]] -name = "openpyxl" -version = "3.1.2" -description = "A Python library to read/write Excel 2010 xlsx/xlsm files" -optional = false -python-versions = ">=3.6" -files = [ - {file = "openpyxl-3.1.2-py2.py3-none-any.whl", hash = "sha256:f91456ead12ab3c6c2e9491cf33ba6d08357d802192379bb482f1033ade496f5"}, - {file = "openpyxl-3.1.2.tar.gz", hash = "sha256:a6f5977418eff3b2d5500d54d9db50c8277a368436f4e4f8ddb1be3422870184"}, -] - -[package.dependencies] -et-xmlfile = "*" - -[[package]] -name = "overrides" -version = "7.3.1" -description = "A decorator to automatically detect mismatch when overriding a method." -optional = false -python-versions = ">=3.6" -files = [ - {file = "overrides-7.3.1-py3-none-any.whl", hash = "sha256:6187d8710a935d09b0bcef8238301d6ee2569d2ac1ae0ec39a8c7924e27f58ca"}, - {file = "overrides-7.3.1.tar.gz", hash = "sha256:8b97c6c1e1681b78cbc9424b138d880f0803c2254c5ebaabdde57bb6c62093f2"}, -] - -[[package]] -name = "packaging" -version = "23.1" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, - {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, -] - -[[package]] -name = "pandas" -version = "2.0.3" -description = "Powerful data structures for data analysis, time series, and statistics" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pandas-2.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e4c7c9f27a4185304c7caf96dc7d91bc60bc162221152de697c98eb0b2648dd8"}, - {file = "pandas-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f167beed68918d62bffb6ec64f2e1d8a7d297a038f86d4aed056b9493fca407f"}, - {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce0c6f76a0f1ba361551f3e6dceaff06bde7514a374aa43e33b588ec10420183"}, - {file = "pandas-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba619e410a21d8c387a1ea6e8a0e49bb42216474436245718d7f2e88a2f8d7c0"}, - {file = "pandas-2.0.3-cp310-cp310-win32.whl", hash = "sha256:3ef285093b4fe5058eefd756100a367f27029913760773c8bf1d2d8bebe5d210"}, - {file = "pandas-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:9ee1a69328d5c36c98d8e74db06f4ad518a1840e8ccb94a4ba86920986bb617e"}, - {file = "pandas-2.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b084b91d8d66ab19f5bb3256cbd5ea661848338301940e17f4492b2ce0801fe8"}, - {file = "pandas-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:37673e3bdf1551b95bf5d4ce372b37770f9529743d2498032439371fc7b7eb26"}, - {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9cb1e14fdb546396b7e1b923ffaeeac24e4cedd14266c3497216dd4448e4f2d"}, - {file = "pandas-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9cd88488cceb7635aebb84809d087468eb33551097d600c6dad13602029c2df"}, - {file = "pandas-2.0.3-cp311-cp311-win32.whl", hash = "sha256:694888a81198786f0e164ee3a581df7d505024fbb1f15202fc7db88a71d84ebd"}, - {file = "pandas-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:6a21ab5c89dcbd57f78d0ae16630b090eec626360085a4148693def5452d8a6b"}, - {file = "pandas-2.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e4da0d45e7f34c069fe4d522359df7d23badf83abc1d1cef398895822d11061"}, - {file = "pandas-2.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:32fca2ee1b0d93dd71d979726b12b61faa06aeb93cf77468776287f41ff8fdc5"}, - {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:258d3624b3ae734490e4d63c430256e716f488c4fcb7c8e9bde2d3aa46c29089"}, - {file = "pandas-2.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eae3dc34fa1aa7772dd3fc60270d13ced7346fcbcfee017d3132ec625e23bb0"}, - {file = "pandas-2.0.3-cp38-cp38-win32.whl", hash = "sha256:f3421a7afb1a43f7e38e82e844e2bca9a6d793d66c1a7f9f0ff39a795bbc5e02"}, - {file = "pandas-2.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:69d7f3884c95da3a31ef82b7618af5710dba95bb885ffab339aad925c3e8ce78"}, - {file = "pandas-2.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5247fb1ba347c1261cbbf0fcfba4a3121fbb4029d95d9ef4dc45406620b25c8b"}, - {file = "pandas-2.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81af086f4543c9d8bb128328b5d32e9986e0c84d3ee673a2ac6fb57fd14f755e"}, - {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1994c789bf12a7c5098277fb43836ce090f1073858c10f9220998ac74f37c69b"}, - {file = "pandas-2.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ec591c48e29226bcbb316e0c1e9423622bc7a4eaf1ef7c3c9fa1a3981f89641"}, - {file = "pandas-2.0.3-cp39-cp39-win32.whl", hash = "sha256:04dbdbaf2e4d46ca8da896e1805bc04eb85caa9a82e259e8eed00254d5e0c682"}, - {file = "pandas-2.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:1168574b036cd8b93abc746171c9b4f1b83467438a5e45909fed645cf8692dbc"}, - {file = "pandas-2.0.3.tar.gz", hash = "sha256:c02f372a88e0d17f36d3093a644c73cfc1788e876a7c4bcb4020a77512e2043c"}, -] - -[package.dependencies] -numpy = [ - {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, - {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, -] -python-dateutil = ">=2.8.2" -pytz = ">=2020.1" -tzdata = ">=2022.1" - -[package.extras] -all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] -aws = ["s3fs (>=2021.08.0)"] -clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] -compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] -computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] -feather = ["pyarrow (>=7.0.0)"] -fss = ["fsspec (>=2021.07.0)"] -gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] -hdf5 = ["tables (>=3.6.1)"] -html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] -mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] -parquet = ["pyarrow (>=7.0.0)"] -performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] -plot = ["matplotlib (>=3.6.1)"] -postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] -spss = ["pyreadstat (>=1.1.2)"] -sql-other = ["SQLAlchemy (>=1.4.16)"] -test = ["hypothesis (>=6.34.2)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.6.3)"] - -[[package]] -name = "pandoc" -version = "2.3" -description = "Pandoc Documents for Python" -optional = false -python-versions = "*" -files = [ - {file = "pandoc-2.3.tar.gz", hash = "sha256:e772c2c6d871146894579828dbaf1efd538eb64fc7e71d4a6b3a11a18baef90d"}, -] - -[package.dependencies] -plumbum = "*" -ply = "*" - -[[package]] -name = "pcodedmp" -version = "1.2.6" -description = "A VBA p-code disassembler" -optional = false -python-versions = "*" -files = [ - {file = "pcodedmp-1.2.6-py2.py3-none-any.whl", hash = "sha256:4441f7c0ab4cbda27bd4668db3b14f36261d86e5059ce06c0828602cbe1c4278"}, - {file = "pcodedmp-1.2.6.tar.gz", hash = "sha256:025f8c809a126f45a082ffa820893e6a8d990d9d7ddb68694b5a9f0a6dbcd955"}, -] - -[package.dependencies] -oletools = ">=0.54" -win-unicode-console = {version = "*", markers = "platform_system == \"Windows\" and platform_python_implementation != \"PyPy\""} - -[[package]] -name = "pdf2image" -version = "1.16.3" -description = "A wrapper around the pdftoppm and pdftocairo command line tools to convert PDF to a PIL Image list." -optional = false -python-versions = "*" -files = [ - {file = "pdf2image-1.16.3-py3-none-any.whl", hash = "sha256:b6154164af3677211c22cbb38b2bd778b43aca02758e962fe1e231f6d3b0e380"}, - {file = "pdf2image-1.16.3.tar.gz", hash = "sha256:74208810c2cef4d9e347769b8e62a52303982ddb4f2dfd744c7ab4b940ae287e"}, -] - -[package.dependencies] -pillow = "*" - -[[package]] -name = "pdfminer-six" -version = "20221105" -description = "PDF parser and analyzer" -optional = false -python-versions = ">=3.6" -files = [ - {file = "pdfminer.six-20221105-py3-none-any.whl", hash = "sha256:1eaddd712d5b2732f8ac8486824533514f8ba12a0787b3d5fe1e686cd826532d"}, - {file = "pdfminer.six-20221105.tar.gz", hash = "sha256:8448ab7b939d18b64820478ecac5394f482d7a79f5f7eaa7703c6c959c175e1d"}, -] - -[package.dependencies] -charset-normalizer = ">=2.0.0" -cryptography = ">=36.0.0" - -[package.extras] -dev = ["black", "mypy (==0.931)", "nox", "pytest"] -docs = ["sphinx", "sphinx-argparse"] -image = ["Pillow"] - -[[package]] -name = "pillow" -version = "10.0.0" -description = "Python Imaging Library (Fork)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"}, - {file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"}, - {file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"}, - {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"}, - {file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"}, - {file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"}, - {file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"}, - {file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"}, - {file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"}, - {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"}, - {file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"}, - {file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"}, - {file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"}, - {file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"}, - {file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"}, - {file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"}, - {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"}, - {file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"}, - {file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"}, - {file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"}, - {file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"}, - {file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"}, - {file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"}, - {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"}, - {file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"}, - {file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"}, - {file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"}, - {file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"}, - {file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"}, - {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"}, - {file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"}, - {file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"}, - {file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"}, - {file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"}, - {file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "plumbum" -version = "1.8.2" -description = "Plumbum: shell combinators library" -optional = false -python-versions = ">=3.6" -files = [ - {file = "plumbum-1.8.2-py3-none-any.whl", hash = "sha256:3ad9e5f56c6ec98f6f7988f7ea8b52159662ea9e915868d369dbccbfca0e367e"}, - {file = "plumbum-1.8.2.tar.gz", hash = "sha256:9e6dc032f4af952665f32f3206567bc23b7858b1413611afe603a3f8ad9bfd75"}, -] - -[package.dependencies] -pywin32 = {version = "*", markers = "platform_system == \"Windows\" and platform_python_implementation != \"PyPy\""} - -[package.extras] -dev = ["paramiko", "psutil", "pytest (>=6.0)", "pytest-cov", "pytest-mock", "pytest-timeout"] -docs = ["sphinx (>=4.0.0)", "sphinx-rtd-theme (>=1.0.0)"] -ssh = ["paramiko"] - -[[package]] -name = "ply" -version = "3.11" -description = "Python Lex & Yacc" -optional = false -python-versions = "*" -files = [ - {file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"}, - {file = "ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3"}, -] - -[[package]] -name = "posthog" -version = "3.0.1" -description = "Integrate PostHog into any python application." -optional = false -python-versions = "*" -files = [ - {file = "posthog-3.0.1-py2.py3-none-any.whl", hash = "sha256:9c7f92fecc713257d4b2710d05b456569c9156fbdd3e85655ba7ba5ba6c7b3ae"}, - {file = "posthog-3.0.1.tar.gz", hash = "sha256:57d2791ff5752ce56ba0f9bb8876faf3ca9208f1c2c6ceaeb5a2504c34493767"}, -] - -[package.dependencies] -backoff = ">=1.10.0" -monotonic = ">=1.5" -python-dateutil = ">2.1" -requests = ">=2.7,<3.0" -six = ">=1.5" - -[package.extras] -dev = ["black", "flake8", "flake8-print", "isort", "pre-commit"] -sentry = ["django", "sentry-sdk"] -test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest"] - -[[package]] -name = "protobuf" -version = "4.23.4" -description = "" -optional = false -python-versions = ">=3.7" -files = [ - {file = "protobuf-4.23.4-cp310-abi3-win32.whl", hash = "sha256:5fea3c64d41ea5ecf5697b83e41d09b9589e6f20b677ab3c48e5f242d9b7897b"}, - {file = "protobuf-4.23.4-cp310-abi3-win_amd64.whl", hash = "sha256:7b19b6266d92ca6a2a87effa88ecc4af73ebc5cfde194dc737cf8ef23a9a3b12"}, - {file = "protobuf-4.23.4-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8547bf44fe8cec3c69e3042f5c4fb3e36eb2a7a013bb0a44c018fc1e427aafbd"}, - {file = "protobuf-4.23.4-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:fee88269a090ada09ca63551bf2f573eb2424035bcf2cb1b121895b01a46594a"}, - {file = "protobuf-4.23.4-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:effeac51ab79332d44fba74660d40ae79985901ac21bca408f8dc335a81aa597"}, - {file = "protobuf-4.23.4-cp37-cp37m-win32.whl", hash = "sha256:c3e0939433c40796ca4cfc0fac08af50b00eb66a40bbbc5dee711998fb0bbc1e"}, - {file = "protobuf-4.23.4-cp37-cp37m-win_amd64.whl", hash = "sha256:9053df6df8e5a76c84339ee4a9f5a2661ceee4a0dab019e8663c50ba324208b0"}, - {file = "protobuf-4.23.4-cp38-cp38-win32.whl", hash = "sha256:e1c915778d8ced71e26fcf43c0866d7499891bca14c4368448a82edc61fdbc70"}, - {file = "protobuf-4.23.4-cp38-cp38-win_amd64.whl", hash = "sha256:351cc90f7d10839c480aeb9b870a211e322bf05f6ab3f55fcb2f51331f80a7d2"}, - {file = "protobuf-4.23.4-cp39-cp39-win32.whl", hash = "sha256:6dd9b9940e3f17077e820b75851126615ee38643c2c5332aa7a359988820c720"}, - {file = "protobuf-4.23.4-cp39-cp39-win_amd64.whl", hash = "sha256:0a5759f5696895de8cc913f084e27fd4125e8fb0914bb729a17816a33819f474"}, - {file = "protobuf-4.23.4-py3-none-any.whl", hash = "sha256:e9d0be5bf34b275b9f87ba7407796556abeeba635455d036c7351f7c183ef8ff"}, - {file = "protobuf-4.23.4.tar.gz", hash = "sha256:ccd9430c0719dce806b93f89c91de7977304729e55377f872a92465d548329a9"}, -] - -[[package]] -name = "pulsar-client" -version = "3.2.0" -description = "Apache Pulsar Python client library" -optional = false -python-versions = "*" -files = [ - {file = "pulsar_client-3.2.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:da53bbe1903026ca1253d36a67bde0ae88513497091658aee8c5514c3e567483"}, - {file = "pulsar_client-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec595a71b7a25f1a72a1350efd6680a511b53253c3cac1911ba3d6c4d71fa64c"}, - {file = "pulsar_client-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3557c65463d74ec8d2864752389beb06761ab591dd134a164e0b1303c66719b"}, - {file = "pulsar_client-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d51dc76fec48217489bde95754ad58288c9389361de42f5a27d64e19840d27fb"}, - {file = "pulsar_client-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9ef2baf85311e0fe1b98342fdafbb93a1818a08ef999eaa524234fedf6f3b941"}, - {file = "pulsar_client-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:0928b02beda0c98e77178f4e30e962ddb8ee8c3320e4c7304a78b0796e976523"}, - {file = "pulsar_client-3.2.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:584f44b03474a69906be711a597a4d516263a55be31e49fc07be503dc8406821"}, - {file = "pulsar_client-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a637b9a3b30860c61e68a7b8ea650e0987d89e82f73b6a3df1ab662a6438fdda"}, - {file = "pulsar_client-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4a187fdc5febcf16f725179dcf2c476f31eeebd8353794d91754a3202dd5072"}, - {file = "pulsar_client-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5ff879f868cf1fd29db99f39fdb22b3ec3e749c648aca28526689756d922d1c5"}, - {file = "pulsar_client-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4a5f85d0cc414f739a5b51d843f213b54b2cd768c3a34f7c27cca410712b1f81"}, - {file = "pulsar_client-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:4fe748283848d829a80c0323558faeebea4c240d69fa58314ac90344f6999d17"}, - {file = "pulsar_client-3.2.0-cp37-cp37m-macosx_10_15_universal2.whl", hash = "sha256:06b91c26def86dbbc35be15257999fd8a2afbadf32983916ea3eef44f4d4cab4"}, - {file = "pulsar_client-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39ec897bc8d232e6b118793378fc662a844334b829a28a1b4ad1c5fe8d019135"}, - {file = "pulsar_client-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa37c96c25c1b5aff3bad0fd0194b385ec190b2c67a2f439ac91577f81ae18d3"}, - {file = "pulsar_client-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d49cdd4d1b7fc2e80d100acf14e6fd3898f6e099e403fc56ed22a690245b2fec"}, - {file = "pulsar_client-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0058ca3191fd24528ccf94dba6f12e4093831454a2597166f96900d0717271bf"}, - {file = "pulsar_client-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:cb69b0411008e0b56df51de0aab20aa1c1a12aef3019b9ceba89afbae1f07fe2"}, - {file = "pulsar_client-3.2.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:f7d33e99602352df7a30707eab4e5781654602212fb618928bffb5523f2bcf35"}, - {file = "pulsar_client-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad1ac15a175ca90555c681a4d0134568771c6346b97a172f3ef14006556a50ae"}, - {file = "pulsar_client-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369e08ef1d5cb196dd9271039928800f90b4701a9c9df90bc068b44260d2fb11"}, - {file = "pulsar_client-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a52ba2b6736a2ebeed31b590e75d417dda149e333461655860efa84d898a3eb4"}, - {file = "pulsar_client-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5c801334b3b569b23976481a2922bcea0c6dd990fc26544658dd9e9c8f78ca36"}, - {file = "pulsar_client-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:cd01fd419280e9013d1655bc53662248be2656b623b1506480e1a985aa7dadd2"}, - {file = "pulsar_client-3.2.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:0abe54d84db76435a6cd88ce27610352cabc7efae9fa3e7f874e032ec2ca0b3f"}, - {file = "pulsar_client-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9a1b6a806eb4819d8cbab1c4ae44ebf2110a94204a46c365f5757e1455252f2"}, - {file = "pulsar_client-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:34ea2a6b75ae0e303d522e5b57c75a4ff03dc18b9bfc14151fb14dfaf5866f17"}, - {file = "pulsar_client-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:be6d3a9b2e1db3b6d1a7db5e13f7b4ed420674cf072cdb520fb004c4cd54c0af"}, - {file = "pulsar_client-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b6b733e6239ffb505f7084df0175baf9d0215f14d0a02e9bbd1fdf71a2d6ea17"}, - {file = "pulsar_client-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:edc2135d02b4793efb086edca0ffaa6e8ac9133961c2cdc17ae487e0a53da481"}, -] - -[package.dependencies] -certifi = "*" - -[package.extras] -all = ["apache-bookkeeper-client (>=4.16.1)", "fastavro (==1.7.3)", "grpcio (>=1.8.2)", "prometheus-client", "protobuf (>=3.6.1,<=3.20.3)", "ratelimit"] -avro = ["fastavro (==1.7.3)"] -functions = ["apache-bookkeeper-client (>=4.16.1)", "grpcio (>=1.8.2)", "prometheus-client", "protobuf (>=3.6.1,<=3.20.3)", "ratelimit"] - -[[package]] -name = "pycparser" -version = "2.21" -description = "C parser in Python" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, -] - -[[package]] -name = "pydantic" -version = "1.10.11" -description = "Data validation and settings management using python type hints" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pydantic-1.10.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ff44c5e89315b15ff1f7fdaf9853770b810936d6b01a7bcecaa227d2f8fe444f"}, - {file = "pydantic-1.10.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c098d4ab5e2d5b3984d3cb2527e2d6099d3de85630c8934efcfdc348a9760e"}, - {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16928fdc9cb273c6af00d9d5045434c39afba5f42325fb990add2c241402d151"}, - {file = "pydantic-1.10.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0588788a9a85f3e5e9ebca14211a496409cb3deca5b6971ff37c556d581854e7"}, - {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9baf78b31da2dc3d3f346ef18e58ec5f12f5aaa17ac517e2ffd026a92a87588"}, - {file = "pydantic-1.10.11-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:373c0840f5c2b5b1ccadd9286782852b901055998136287828731868027a724f"}, - {file = "pydantic-1.10.11-cp310-cp310-win_amd64.whl", hash = "sha256:c3339a46bbe6013ef7bdd2844679bfe500347ac5742cd4019a88312aa58a9847"}, - {file = "pydantic-1.10.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:08a6c32e1c3809fbc49debb96bf833164f3438b3696abf0fbeceb417d123e6eb"}, - {file = "pydantic-1.10.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a451ccab49971af043ec4e0d207cbc8cbe53dbf148ef9f19599024076fe9c25b"}, - {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b02d24f7b2b365fed586ed73582c20f353a4c50e4be9ba2c57ab96f8091ddae"}, - {file = "pydantic-1.10.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f34739a89260dfa420aa3cbd069fbcc794b25bbe5c0a214f8fb29e363484b66"}, - {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e297897eb4bebde985f72a46a7552a7556a3dd11e7f76acda0c1093e3dbcf216"}, - {file = "pydantic-1.10.11-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d185819a7a059550ecb85d5134e7d40f2565f3dd94cfd870132c5f91a89cf58c"}, - {file = "pydantic-1.10.11-cp311-cp311-win_amd64.whl", hash = "sha256:4400015f15c9b464c9db2d5d951b6a780102cfa5870f2c036d37c23b56f7fc1b"}, - {file = "pydantic-1.10.11-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2417de68290434461a266271fc57274a138510dca19982336639484c73a07af6"}, - {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:331c031ba1554b974c98679bd0780d89670d6fd6f53f5d70b10bdc9addee1713"}, - {file = "pydantic-1.10.11-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8268a735a14c308923e8958363e3a3404f6834bb98c11f5ab43251a4e410170c"}, - {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:44e51ba599c3ef227e168424e220cd3e544288c57829520dc90ea9cb190c3248"}, - {file = "pydantic-1.10.11-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d7781f1d13b19700b7949c5a639c764a077cbbdd4322ed505b449d3ca8edcb36"}, - {file = "pydantic-1.10.11-cp37-cp37m-win_amd64.whl", hash = "sha256:7522a7666157aa22b812ce14c827574ddccc94f361237ca6ea8bb0d5c38f1629"}, - {file = "pydantic-1.10.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc64eab9b19cd794a380179ac0e6752335e9555d214cfcb755820333c0784cb3"}, - {file = "pydantic-1.10.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8dc77064471780262b6a68fe67e013298d130414d5aaf9b562c33987dbd2cf4f"}, - {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe429898f2c9dd209bd0632a606bddc06f8bce081bbd03d1c775a45886e2c1cb"}, - {file = "pydantic-1.10.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:192c608ad002a748e4a0bed2ddbcd98f9b56df50a7c24d9a931a8c5dd053bd3d"}, - {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ef55392ec4bb5721f4ded1096241e4b7151ba6d50a50a80a2526c854f42e6a2f"}, - {file = "pydantic-1.10.11-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e0bb6efe86281623abbeeb0be64eab740c865388ee934cd3e6a358784aca6e"}, - {file = "pydantic-1.10.11-cp38-cp38-win_amd64.whl", hash = "sha256:265a60da42f9f27e0b1014eab8acd3e53bd0bad5c5b4884e98a55f8f596b2c19"}, - {file = "pydantic-1.10.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:469adf96c8e2c2bbfa655fc7735a2a82f4c543d9fee97bd113a7fb509bf5e622"}, - {file = "pydantic-1.10.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e6cbfbd010b14c8a905a7b10f9fe090068d1744d46f9e0c021db28daeb8b6de1"}, - {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abade85268cc92dff86d6effcd917893130f0ff516f3d637f50dadc22ae93999"}, - {file = "pydantic-1.10.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9738b0f2e6c70f44ee0de53f2089d6002b10c33264abee07bdb5c7f03038303"}, - {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:787cf23e5a0cde753f2eabac1b2e73ae3844eb873fd1f5bdbff3048d8dbb7604"}, - {file = "pydantic-1.10.11-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:174899023337b9fc685ac8adaa7b047050616136ccd30e9070627c1aaab53a13"}, - {file = "pydantic-1.10.11-cp39-cp39-win_amd64.whl", hash = "sha256:1954f8778489a04b245a1e7b8b22a9d3ea8ef49337285693cf6959e4b757535e"}, - {file = "pydantic-1.10.11-py3-none-any.whl", hash = "sha256:008c5e266c8aada206d0627a011504e14268a62091450210eda7c07fabe6963e"}, - {file = "pydantic-1.10.11.tar.gz", hash = "sha256:f66d479cf7eb331372c470614be6511eae96f1f120344c25f3f9bb59fb1b5528"}, -] - -[package.dependencies] -typing-extensions = ">=4.2.0" - -[package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] - -[[package]] -name = "pymupdf" -version = "1.22.5" -description = "Python bindings for the PDF toolkit and renderer MuPDF" -optional = false -python-versions = ">=3.7" -files = [ - {file = "PyMuPDF-1.22.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:640b8e4cb116dd87a3c854e49808a4f63625e663a7bc5b1efc971db5b4775367"}, - {file = "PyMuPDF-1.22.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:17efbbf0e2d99d24cfc302fac512928eb294f10b7b67d597d04dafd012812e4e"}, - {file = "PyMuPDF-1.22.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc9b9bf0f2beea3911750d2d66247608be8cbad33b7a050cacec9e4c105a1ca"}, - {file = "PyMuPDF-1.22.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7734a32a91eea4b502b8f9d2915cdba0a372226e14fb983876d763110dcefef"}, - {file = "PyMuPDF-1.22.5-cp310-cp310-win32.whl", hash = "sha256:c2fd70ca9961f7871810dce1b7d0a42a69eb8ff2d786621123952bd505a6867e"}, - {file = "PyMuPDF-1.22.5-cp310-cp310-win_amd64.whl", hash = "sha256:add310c96df6933cfb4ce3821c9c7b5c133e8aa609a4c9416e1c7af546163488"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:017aaba511526facfc928e9d95d2c10d28a2821b05b9039bf422031a7da8584e"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6fe5e44a14864d921fb96669a82f9635846806176f77f1d73c61feb84ebf4d84"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e74d766f79e41e10c51865233042ab2cc4612ca7942812dca0603f4d0f8f73d"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe8175452fcc99a0af6429d8acd87682a3a70c5879d73532c7327f71ce508a35"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-win32.whl", hash = "sha256:42f59f4999d7f8b35c850050bd965e98c081a7d9b92d5f9dcf30203b30d06876"}, - {file = "PyMuPDF-1.22.5-cp311-cp311-win_amd64.whl", hash = "sha256:3d71c47aa14b73f2df7d03be8c547a05df6c6898d8c63a0f752b26f206eefd3c"}, - {file = "PyMuPDF-1.22.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4bcad7ea4b3ab82c46fe8da27ec738d38c213ed9935ef67d98ed09574d9a234e"}, - {file = "PyMuPDF-1.22.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b04a83ddcb3f7c935c75a1f7f6050c85fe4062a2ea64c47ee6bda788d037761"}, - {file = "PyMuPDF-1.22.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d02ee28663077f15d529b04d27588b174fa937daf73a294df279bbf70c468f5c"}, - {file = "PyMuPDF-1.22.5-cp37-cp37m-win32.whl", hash = "sha256:411fc35f6dae16ec940b6b0406e84be6ff29f93b30908ea1427e2a4bd594d4ba"}, - {file = "PyMuPDF-1.22.5-cp37-cp37m-win_amd64.whl", hash = "sha256:7c8c0f686865e330de90b93d53b100f7f07c2f10f5449ceb721121f459f7cc4a"}, - {file = "PyMuPDF-1.22.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:64ae9f81b8fe0a3e6386a24887a92736793479c5918ecac3b7deac2d02abf1f2"}, - {file = "PyMuPDF-1.22.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7562436dadf8382e59ac3739fbbf9d5b2d807fafc7f28cb884863430e0de6505"}, - {file = "PyMuPDF-1.22.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0c22046e5f2cf0d72f9809a967340db1b238fefe58322896bc7c3f3d1d10b42"}, - {file = "PyMuPDF-1.22.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efa601dc4116c17a6b09255b031b5a1891e3ac18b50ec536452a725a6b75db8d"}, - {file = "PyMuPDF-1.22.5-cp38-cp38-win32.whl", hash = "sha256:3d0fe749e648f5245059d5f771fb50c1a988a1d2e82268b56377b2176a9fee5d"}, - {file = "PyMuPDF-1.22.5-cp38-cp38-win_amd64.whl", hash = "sha256:4fbc5bfe6ecc53929e3fd0db9846fb7da084ddb4b1fc1063857245fa783974d9"}, - {file = "PyMuPDF-1.22.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:87b36e0797ab7fbb7ef594c7a6e0febc7ffb4101a42ea796726a8288391a3769"}, - {file = "PyMuPDF-1.22.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:01119edb7e4c3dd8c154d237b8ac927bd359eea8d31468f9a89aa308b5bca04e"}, - {file = "PyMuPDF-1.22.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fde02fcb387863873b56730f4b9f65515d87c92c12299f0f0a74b3ccdfe35062"}, - {file = "PyMuPDF-1.22.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30c55814bbf6461aef9b34cb524d1d14857d5ec6ccfbb78ecfb1d07dfc40eeb8"}, - {file = "PyMuPDF-1.22.5-cp39-cp39-win32.whl", hash = "sha256:0542178c3a399282903705a8cc298e7f33f4770605e0a9db344aff5d375bcf0b"}, - {file = "PyMuPDF-1.22.5-cp39-cp39-win_amd64.whl", hash = "sha256:f8ca46a6987e14f58ec8dfda2d2376bacd113c1fec5f58bebf90838bb4408ab9"}, - {file = "PyMuPDF-1.22.5.tar.gz", hash = "sha256:5ec8d5106752297529d0d68d46cfc4ce99914aabd99be843f1599a1842d63fe9"}, -] - -[[package]] -name = "pypandoc" -version = "1.11" -description = "Thin wrapper for pandoc." -optional = false -python-versions = ">=3.6" -files = [ - {file = "pypandoc-1.11-py3-none-any.whl", hash = "sha256:b260596934e9cfc6513056110a7c8600171d414f90558bf4407e68b209be8007"}, - {file = "pypandoc-1.11.tar.gz", hash = "sha256:7f6d68db0e57e0f6961bec2190897118c4d305fc2d31c22cd16037f22ee084a5"}, -] - -[[package]] -name = "pyparsing" -version = "2.4.7" -description = "Python parsing module" -optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, - {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, -] - -[[package]] -name = "pyreadline3" -version = "3.4.1" -description = "A python implementation of GNU readline." -optional = false -python-versions = "*" -files = [ - {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, - {file = "pyreadline3-3.4.1.tar.gz", hash = "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae"}, -] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "python-docx" -version = "0.8.11" -description = "Create and update Microsoft Word .docx files." -optional = false -python-versions = "*" -files = [ - {file = "python-docx-0.8.11.tar.gz", hash = "sha256:1105d233a0956dd8dd1e710d20b159e2d72ac3c301041b95f4d4ceb3e0ebebc4"}, -] - -[package.dependencies] -lxml = ">=2.3.2" - -[[package]] -name = "python-dotenv" -version = "1.0.0" -description = "Read key-value pairs from a .env file and set them as environment variables" -optional = false -python-versions = ">=3.8" -files = [ - {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, - {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, -] - -[package.extras] -cli = ["click (>=5.0)"] - -[[package]] -name = "python-magic" -version = "0.4.27" -description = "File type identification using libmagic" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "python-magic-0.4.27.tar.gz", hash = "sha256:c1ba14b08e4a5f5c31a302b7721239695b2f0f058d125bd5ce1ee36b9d9d3c3b"}, - {file = "python_magic-0.4.27-py2.py3-none-any.whl", hash = "sha256:c212960ad306f700aa0d01e5d7a325d20548ff97eb9920dcd29513174f0294d3"}, -] - -[[package]] -name = "python-pptx" -version = "0.6.21" -description = "Generate and manipulate Open XML PowerPoint (.pptx) files" -optional = false -python-versions = "*" -files = [ - {file = "python-pptx-0.6.21.tar.gz", hash = "sha256:7798a2aaf89563565b3c7120c0acfe9aff775db0db3580544e3bf4840c2e378f"}, -] - -[package.dependencies] -lxml = ">=3.1.0" -Pillow = ">=3.3.2" -XlsxWriter = ">=0.5.7" - -[[package]] -name = "pytz" -version = "2023.3" -description = "World timezone definitions, modern and historical" -optional = false -python-versions = "*" -files = [ - {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, - {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, -] - -[[package]] -name = "pywin32" -version = "306" -description = "Python for Window Extensions" -optional = false -python-versions = "*" -files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, -] - -[[package]] -name = "pyyaml" -version = "6.0" -description = "YAML parser and emitter for Python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, - {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, - {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, - {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, - {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, - {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, - {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, - {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, - {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, - {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, - {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, - {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, - {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, - {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, - {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, - {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, - {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, - {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, -] - -[[package]] -name = "red-black-tree-mod" -version = "1.20" -description = "Flexible python implementation of red black trees" -optional = false -python-versions = "*" -files = [ - {file = "red-black-tree-mod-1.20.tar.gz", hash = "sha256:2448e6fc9cbf1be204c753f352c6ee49aa8156dbf1faa57dfc26bd7705077e0a"}, -] - -[[package]] -name = "regex" -version = "2023.6.3" -description = "Alternative regular expression module, to replace re." -optional = false -python-versions = ">=3.6" -files = [ - {file = "regex-2023.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:824bf3ac11001849aec3fa1d69abcb67aac3e150a933963fb12bda5151fe1bfd"}, - {file = "regex-2023.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:05ed27acdf4465c95826962528f9e8d41dbf9b1aa8531a387dee6ed215a3e9ef"}, - {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b49c764f88a79160fa64f9a7b425620e87c9f46095ef9c9920542ab2495c8bc"}, - {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8e3f1316c2293e5469f8f09dc2d76efb6c3982d3da91ba95061a7e69489a14ef"}, - {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:43e1dd9d12df9004246bacb79a0e5886b3b6071b32e41f83b0acbf293f820ee8"}, - {file = "regex-2023.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4959e8bcbfda5146477d21c3a8ad81b185cd252f3d0d6e4724a5ef11c012fb06"}, - {file = "regex-2023.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:af4dd387354dc83a3bff67127a124c21116feb0d2ef536805c454721c5d7993d"}, - {file = "regex-2023.6.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2239d95d8e243658b8dbb36b12bd10c33ad6e6933a54d36ff053713f129aa536"}, - {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:890e5a11c97cf0d0c550eb661b937a1e45431ffa79803b942a057c4fb12a2da2"}, - {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a8105e9af3b029f243ab11ad47c19b566482c150c754e4c717900a798806b222"}, - {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:25be746a8ec7bc7b082783216de8e9473803706723b3f6bef34b3d0ed03d57e2"}, - {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:3676f1dd082be28b1266c93f618ee07741b704ab7b68501a173ce7d8d0d0ca18"}, - {file = "regex-2023.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:10cb847aeb1728412c666ab2e2000ba6f174f25b2bdc7292e7dd71b16db07568"}, - {file = "regex-2023.6.3-cp310-cp310-win32.whl", hash = "sha256:dbbbfce33cd98f97f6bffb17801b0576e653f4fdb1d399b2ea89638bc8d08ae1"}, - {file = "regex-2023.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:c5f8037000eb21e4823aa485149f2299eb589f8d1fe4b448036d230c3f4e68e0"}, - {file = "regex-2023.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c123f662be8ec5ab4ea72ea300359023a5d1df095b7ead76fedcd8babbedf969"}, - {file = "regex-2023.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9edcbad1f8a407e450fbac88d89e04e0b99a08473f666a3f3de0fd292badb6aa"}, - {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcba6dae7de533c876255317c11f3abe4907ba7d9aa15d13e3d9710d4315ec0e"}, - {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29cdd471ebf9e0f2fb3cac165efedc3c58db841d83a518b082077e612d3ee5df"}, - {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12b74fbbf6cbbf9dbce20eb9b5879469e97aeeaa874145517563cca4029db65c"}, - {file = "regex-2023.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c29ca1bd61b16b67be247be87390ef1d1ef702800f91fbd1991f5c4421ebae8"}, - {file = "regex-2023.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77f09bc4b55d4bf7cc5eba785d87001d6757b7c9eec237fe2af57aba1a071d9"}, - {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ea353ecb6ab5f7e7d2f4372b1e779796ebd7b37352d290096978fea83c4dba0c"}, - {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:10590510780b7541969287512d1b43f19f965c2ece6c9b1c00fc367b29d8dce7"}, - {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e2fbd6236aae3b7f9d514312cdb58e6494ee1c76a9948adde6eba33eb1c4264f"}, - {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:6b2675068c8b56f6bfd5a2bda55b8accbb96c02fd563704732fd1c95e2083461"}, - {file = "regex-2023.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74419d2b50ecb98360cfaa2974da8689cb3b45b9deff0dcf489c0d333bcc1477"}, - {file = "regex-2023.6.3-cp311-cp311-win32.whl", hash = "sha256:fb5ec16523dc573a4b277663a2b5a364e2099902d3944c9419a40ebd56a118f9"}, - {file = "regex-2023.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:09e4a1a6acc39294a36b7338819b10baceb227f7f7dbbea0506d419b5a1dd8af"}, - {file = "regex-2023.6.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0654bca0cdf28a5956c83839162692725159f4cda8d63e0911a2c0dc76166525"}, - {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:463b6a3ceb5ca952e66550a4532cef94c9a0c80dc156c4cc343041951aec1697"}, - {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87b2a5bb5e78ee0ad1de71c664d6eb536dc3947a46a69182a90f4410f5e3f7dd"}, - {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6343c6928282c1f6a9db41f5fd551662310e8774c0e5ebccb767002fcf663ca9"}, - {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6192d5af2ccd2a38877bfef086d35e6659566a335b1492786ff254c168b1693"}, - {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74390d18c75054947e4194019077e243c06fbb62e541d8817a0fa822ea310c14"}, - {file = "regex-2023.6.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:742e19a90d9bb2f4a6cf2862b8b06dea5e09b96c9f2df1779e53432d7275331f"}, - {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8abbc5d54ea0ee80e37fef009e3cec5dafd722ed3c829126253d3e22f3846f1e"}, - {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:c2b867c17a7a7ae44c43ebbeb1b5ff406b3e8d5b3e14662683e5e66e6cc868d3"}, - {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:d831c2f8ff278179705ca59f7e8524069c1a989e716a1874d6d1aab6119d91d1"}, - {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:ee2d1a9a253b1729bb2de27d41f696ae893507c7db224436abe83ee25356f5c1"}, - {file = "regex-2023.6.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:61474f0b41fe1a80e8dfa70f70ea1e047387b7cd01c85ec88fa44f5d7561d787"}, - {file = "regex-2023.6.3-cp36-cp36m-win32.whl", hash = "sha256:0b71e63226e393b534105fcbdd8740410dc6b0854c2bfa39bbda6b0d40e59a54"}, - {file = "regex-2023.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bbb02fd4462f37060122e5acacec78e49c0fbb303c30dd49c7f493cf21fc5b27"}, - {file = "regex-2023.6.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b862c2b9d5ae38a68b92e215b93f98d4c5e9454fa36aae4450f61dd33ff48487"}, - {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:976d7a304b59ede34ca2921305b57356694f9e6879db323fd90a80f865d355a3"}, - {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:83320a09188e0e6c39088355d423aa9d056ad57a0b6c6381b300ec1a04ec3d16"}, - {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9427a399501818a7564f8c90eced1e9e20709ece36be701f394ada99890ea4b3"}, - {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7178bbc1b2ec40eaca599d13c092079bf529679bf0371c602edaa555e10b41c3"}, - {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:837328d14cde912af625d5f303ec29f7e28cdab588674897baafaf505341f2fc"}, - {file = "regex-2023.6.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2d44dc13229905ae96dd2ae2dd7cebf824ee92bc52e8cf03dcead37d926da019"}, - {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d54af539295392611e7efbe94e827311eb8b29668e2b3f4cadcfe6f46df9c777"}, - {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7117d10690c38a622e54c432dfbbd3cbd92f09401d622902c32f6d377e2300ee"}, - {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bb60b503ec8a6e4e3e03a681072fa3a5adcbfa5479fa2d898ae2b4a8e24c4591"}, - {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:65ba8603753cec91c71de423a943ba506363b0e5c3fdb913ef8f9caa14b2c7e0"}, - {file = "regex-2023.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:271f0bdba3c70b58e6f500b205d10a36fb4b58bd06ac61381b68de66442efddb"}, - {file = "regex-2023.6.3-cp37-cp37m-win32.whl", hash = "sha256:9beb322958aaca059f34975b0df135181f2e5d7a13b84d3e0e45434749cb20f7"}, - {file = "regex-2023.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fea75c3710d4f31389eed3c02f62d0b66a9da282521075061ce875eb5300cf23"}, - {file = "regex-2023.6.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8f56fcb7ff7bf7404becdfc60b1e81a6d0561807051fd2f1860b0d0348156a07"}, - {file = "regex-2023.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d2da3abc88711bce7557412310dfa50327d5769a31d1c894b58eb256459dc289"}, - {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a99b50300df5add73d307cf66abea093304a07eb017bce94f01e795090dea87c"}, - {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5708089ed5b40a7b2dc561e0c8baa9535b77771b64a8330b684823cfd5116036"}, - {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:687ea9d78a4b1cf82f8479cab23678aff723108df3edeac098e5b2498879f4a7"}, - {file = "regex-2023.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d3850beab9f527f06ccc94b446c864059c57651b3f911fddb8d9d3ec1d1b25d"}, - {file = "regex-2023.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8915cc96abeb8983cea1df3c939e3c6e1ac778340c17732eb63bb96247b91d2"}, - {file = "regex-2023.6.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:841d6e0e5663d4c7b4c8099c9997be748677d46cbf43f9f471150e560791f7ff"}, - {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9edce5281f965cf135e19840f4d93d55b3835122aa76ccacfd389e880ba4cf82"}, - {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b956231ebdc45f5b7a2e1f90f66a12be9610ce775fe1b1d50414aac1e9206c06"}, - {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:36efeba71c6539d23c4643be88295ce8c82c88bbd7c65e8a24081d2ca123da3f"}, - {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:cf67ca618b4fd34aee78740bea954d7c69fdda419eb208c2c0c7060bb822d747"}, - {file = "regex-2023.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b4598b1897837067a57b08147a68ac026c1e73b31ef6e36deeeb1fa60b2933c9"}, - {file = "regex-2023.6.3-cp38-cp38-win32.whl", hash = "sha256:f415f802fbcafed5dcc694c13b1292f07fe0befdb94aa8a52905bd115ff41e88"}, - {file = "regex-2023.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:d4f03bb71d482f979bda92e1427f3ec9b220e62a7dd337af0aa6b47bf4498f72"}, - {file = "regex-2023.6.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccf91346b7bd20c790310c4147eee6ed495a54ddb6737162a36ce9dbef3e4751"}, - {file = "regex-2023.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b28f5024a3a041009eb4c333863d7894d191215b39576535c6734cd88b0fcb68"}, - {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0bb18053dfcfed432cc3ac632b5e5e5c5b7e55fb3f8090e867bfd9b054dbcbf"}, - {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a5bfb3004f2144a084a16ce19ca56b8ac46e6fd0651f54269fc9e230edb5e4a"}, - {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c6b48d0fa50d8f4df3daf451be7f9689c2bde1a52b1225c5926e3f54b6a9ed1"}, - {file = "regex-2023.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:051da80e6eeb6e239e394ae60704d2b566aa6a7aed6f2890a7967307267a5dc6"}, - {file = "regex-2023.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4c3b7fa4cdaa69268748665a1a6ff70c014d39bb69c50fda64b396c9116cf77"}, - {file = "regex-2023.6.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:457b6cce21bee41ac292d6753d5e94dcbc5c9e3e3a834da285b0bde7aa4a11e9"}, - {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:aad51907d74fc183033ad796dd4c2e080d1adcc4fd3c0fd4fd499f30c03011cd"}, - {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0385e73da22363778ef2324950e08b689abdf0b108a7d8decb403ad7f5191938"}, - {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c6a57b742133830eec44d9b2290daf5cbe0a2f1d6acee1b3c7b1c7b2f3606df7"}, - {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3e5219bf9e75993d73ab3d25985c857c77e614525fac9ae02b1bebd92f7cecac"}, - {file = "regex-2023.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e5087a3c59eef624a4591ef9eaa6e9a8d8a94c779dade95d27c0bc24650261cd"}, - {file = "regex-2023.6.3-cp39-cp39-win32.whl", hash = "sha256:20326216cc2afe69b6e98528160b225d72f85ab080cbdf0b11528cbbaba2248f"}, - {file = "regex-2023.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:bdff5eab10e59cf26bc479f565e25ed71a7d041d1ded04ccf9aee1d9f208487a"}, - {file = "regex-2023.6.3.tar.gz", hash = "sha256:72d1a25bf36d2050ceb35b517afe13864865268dfb45910e2e17a84be6cbfeb0"}, -] - -[[package]] -name = "requests" -version = "2.31.0" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.7" -files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "rtfde" -version = "0.0.2" -description = "A library for extracting HTML content from RTF encapsulated HTML as commonly found in the exchange MSG email format." -optional = false -python-versions = ">=3.6" -files = [ - {file = "RTFDE-0.0.2-py3-none-any.whl", hash = "sha256:18386e4f060cee12a2a8035b0acf0cc99689f5dff1bf347bab7e92351860a21d"}, - {file = "RTFDE-0.0.2.tar.gz", hash = "sha256:b86b5d734950fe8745a5b89133f50554252dbd67c6d1b9265e23ee140e7ea8a2"}, -] - -[package.dependencies] -lark-parser = ">=0.11" -oletools = ">=0.56" - -[package.extras] -dev = ["lxml (>=4.6)"] -msg-parse = ["extract-msg (>=0.27)"] - -[[package]] -name = "safetensors" -version = "0.3.2" -description = "Fast and Safe Tensor serialization" -optional = false -python-versions = "*" -files = [ - {file = "safetensors-0.3.2-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b6a66989075c2891d743153e8ba9ca84ee7232c8539704488f454199b8b8f84d"}, - {file = "safetensors-0.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:670d6bc3a3b377278ce2971fa7c36ebc0a35041c4ea23b9df750a39380800195"}, - {file = "safetensors-0.3.2-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:564f42838721925b5313ae864ba6caa6f4c80a9fbe63cf24310c3be98ab013cd"}, - {file = "safetensors-0.3.2-cp310-cp310-macosx_13_0_x86_64.whl", hash = "sha256:7f80af7e4ab3188daaff12d43d078da3017a90d732d38d7af4eb08b6ca2198a5"}, - {file = "safetensors-0.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec30d78f20f1235b252d59cbb9755beb35a1fde8c24c89b3c98e6a1804cfd432"}, - {file = "safetensors-0.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16063d94d8f600768d3c331b1e97964b1bf3772e19710105fe24ec5a6af63770"}, - {file = "safetensors-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb44e140bf2aeda98d9dde669dbec15f7b77f96a9274469b91a6cf4bcc5ec3b"}, - {file = "safetensors-0.3.2-cp310-cp310-win32.whl", hash = "sha256:2961c1243fd0da46aa6a1c835305cc4595486f8ac64632a604d0eb5f2de76175"}, - {file = "safetensors-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c813920482c337d1424d306e1b05824a38e3ef94303748a0a287dea7a8c4f805"}, - {file = "safetensors-0.3.2-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:707df34bd9b9047e97332136ad98e57028faeccdb9cfe1c3b52aba5964cc24bf"}, - {file = "safetensors-0.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:becc5bb85b2947eae20ed23b407ebfd5277d9a560f90381fe2c42e6c043677ba"}, - {file = "safetensors-0.3.2-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:30a75707be5cc9686490bde14b9a371cede4af53244ea72b340cfbabfffdf58a"}, - {file = "safetensors-0.3.2-cp311-cp311-macosx_13_0_universal2.whl", hash = "sha256:54ad6af663e15e2b99e2ea3280981b7514485df72ba6d014dc22dae7ba6a5e6c"}, - {file = "safetensors-0.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37764b3197656ef507a266c453e909a3477dabc795962b38e3ad28226f53153b"}, - {file = "safetensors-0.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4939067736783acd8391d83cd97d6c202f94181951ce697d519f9746381b6a39"}, - {file = "safetensors-0.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada0fac127ff8fb04834da5c6d85a8077e6a1c9180a11251d96f8068db922a17"}, - {file = "safetensors-0.3.2-cp311-cp311-win32.whl", hash = "sha256:155b82dbe2b0ebff18cde3f76b42b6d9470296e92561ef1a282004d449fa2b4c"}, - {file = "safetensors-0.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:a86428d196959619ce90197731be9391b5098b35100a7228ef4643957648f7f5"}, - {file = "safetensors-0.3.2-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:c1f8ab41ed735c5b581f451fd15d9602ff51aa88044bfa933c5fa4b1d0c644d1"}, - {file = "safetensors-0.3.2-cp37-cp37m-macosx_13_0_x86_64.whl", hash = "sha256:bc9cfb3c9ea2aec89685b4d656f9f2296f0f0d67ecf2bebf950870e3be89b3db"}, - {file = "safetensors-0.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ace5d471e3d78e0d93f952707d808b5ab5eac77ddb034ceb702e602e9acf2be9"}, - {file = "safetensors-0.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de3e20a388b444381bcda1a3193cce51825ddca277e4cf3ed1fe8d9b2d5722cd"}, - {file = "safetensors-0.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d7d70d48585fe8df00725aa788f2e64fd24a4c9ae07cd6be34f6859d0f89a9c"}, - {file = "safetensors-0.3.2-cp37-cp37m-win32.whl", hash = "sha256:6ff59bc90cdc857f68b1023be9085fda6202bbe7f2fd67d06af8f976d6adcc10"}, - {file = "safetensors-0.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8b05c93da15fa911763a89281906ca333ed800ab0ef1c7ce53317aa1a2322f19"}, - {file = "safetensors-0.3.2-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:8969cfd9e8d904e8d3c67c989e1bd9a95e3cc8980d4f95e4dcd43c299bb94253"}, - {file = "safetensors-0.3.2-cp38-cp38-macosx_13_0_x86_64.whl", hash = "sha256:f54148ac027556eb02187e9bc1556c4d916c99ca3cb34ca36a7d304d675035c1"}, - {file = "safetensors-0.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caec25fedbcf73f66c9261984f07885680f71417fc173f52279276c7f8a5edd3"}, - {file = "safetensors-0.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:50224a1d99927ccf3b75e27c3d412f7043280431ab100b4f08aad470c37cf99a"}, - {file = "safetensors-0.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa98f49e95f02eb750d32c4947e7d5aa43883149ebd0414920866446525b70f0"}, - {file = "safetensors-0.3.2-cp38-cp38-win32.whl", hash = "sha256:33409df5e28a83dc5cc5547a3ac17c0f1b13a1847b1eb3bc4b3be0df9915171e"}, - {file = "safetensors-0.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:e04a7cbbb3856159ab99e3adb14521544f65fcb8548cce773a1435a0f8d78d27"}, - {file = "safetensors-0.3.2-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:7c864cf5dcbfb608c5378f83319c60cc9c97263343b57c02756b7613cd5ab4dd"}, - {file = "safetensors-0.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:14e8c19d6dc51d4f70ee33c46aff04c8ba3f95812e74daf8036c24bc86e75cae"}, - {file = "safetensors-0.3.2-cp39-cp39-macosx_13_0_arm64.whl", hash = "sha256:042a60f633c3c7009fdf6a7c182b165cb7283649d2a1e9c7a4a1c23454bd9a5b"}, - {file = "safetensors-0.3.2-cp39-cp39-macosx_13_0_x86_64.whl", hash = "sha256:fafd95e5ef41e8f312e2a32b7031f7b9b2a621b255f867b221f94bb2e9f51ae8"}, - {file = "safetensors-0.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ed77cf358abce2307f03634694e0b2a29822e322a1623e0b1aa4b41e871bf8b"}, - {file = "safetensors-0.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d344e8b2681a33aafc197c90b0def3229b3317d749531c72fa6259d0caa5c8c"}, - {file = "safetensors-0.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87ff0024ef2e5722a79af24688ce4a430f70601d0cf712a744105ed4b8f67ba5"}, - {file = "safetensors-0.3.2-cp39-cp39-win32.whl", hash = "sha256:827af9478b78977248ba93e2fd97ea307fb63f463f80cef4824460f8c2542a52"}, - {file = "safetensors-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:9b09f27c456efa301f98681ea14b12f81f2637889f6336223ccab71e42c34541"}, - {file = "safetensors-0.3.2.tar.gz", hash = "sha256:2dbd34554ed3b99435a0e84df077108f5334c8336b5ed9cb8b6b98f7b10da2f6"}, -] - -[package.extras] -all = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (==2.11.0)", "torch (>=1.10)"] -dev = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (==2.11.0)", "torch (>=1.10)"] -jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)"] -numpy = ["numpy (>=1.21.6)"] -paddlepaddle = ["paddlepaddle (>=2.4.1)"] -pinned-tf = ["tensorflow (==2.11.0)"] -quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] -tensorflow = ["tensorflow (>=2.11.0)"] -testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "numpy (>=1.21.6)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)"] -torch = ["torch (>=1.10)"] - -[[package]] -name = "scikit-learn" -version = "1.3.0" -description = "A set of python modules for machine learning and data mining" -optional = false -python-versions = ">=3.8" -files = [ - {file = "scikit-learn-1.3.0.tar.gz", hash = "sha256:8be549886f5eda46436b6e555b0e4873b4f10aa21c07df45c4bc1735afbccd7a"}, - {file = "scikit_learn-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:981287869e576d42c682cf7ca96af0c6ac544ed9316328fd0d9292795c742cf5"}, - {file = "scikit_learn-1.3.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:436aaaae2c916ad16631142488e4c82f4296af2404f480e031d866863425d2a2"}, - {file = "scikit_learn-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7e28d8fa47a0b30ae1bd7a079519dd852764e31708a7804da6cb6f8b36e3630"}, - {file = "scikit_learn-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae80c08834a473d08a204d966982a62e11c976228d306a2648c575e3ead12111"}, - {file = "scikit_learn-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:552fd1b6ee22900cf1780d7386a554bb96949e9a359999177cf30211e6b20df6"}, - {file = "scikit_learn-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:79970a6d759eb00a62266a31e2637d07d2d28446fca8079cf9afa7c07b0427f8"}, - {file = "scikit_learn-1.3.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:850a00b559e636b23901aabbe79b73dc604b4e4248ba9e2d6e72f95063765603"}, - {file = "scikit_learn-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee04835fb016e8062ee9fe9074aef9b82e430504e420bff51e3e5fffe72750ca"}, - {file = "scikit_learn-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d953531f5d9f00c90c34fa3b7d7cfb43ecff4c605dac9e4255a20b114a27369"}, - {file = "scikit_learn-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:151ac2bf65ccf363664a689b8beafc9e6aae36263db114b4ca06fbbbf827444a"}, - {file = "scikit_learn-1.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a885a9edc9c0a341cab27ec4f8a6c58b35f3d449c9d2503a6fd23e06bbd4f6a"}, - {file = "scikit_learn-1.3.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9877af9c6d1b15486e18a94101b742e9d0d2f343d35a634e337411ddb57783f3"}, - {file = "scikit_learn-1.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c470f53cea065ff3d588050955c492793bb50c19a92923490d18fcb637f6383a"}, - {file = "scikit_learn-1.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd6e2d7389542eae01077a1ee0318c4fec20c66c957f45c7aac0c6eb0fe3c612"}, - {file = "scikit_learn-1.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:3a11936adbc379a6061ea32fa03338d4ca7248d86dd507c81e13af428a5bc1db"}, - {file = "scikit_learn-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:998d38fcec96584deee1e79cd127469b3ad6fefd1ea6c2dfc54e8db367eb396b"}, - {file = "scikit_learn-1.3.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:ded35e810438a527e17623ac6deae3b360134345b7c598175ab7741720d7ffa7"}, - {file = "scikit_learn-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e8102d5036e28d08ab47166b48c8d5e5810704daecf3a476a4282d562be9a28"}, - {file = "scikit_learn-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7617164951c422747e7c32be4afa15d75ad8044f42e7d70d3e2e0429a50e6718"}, - {file = "scikit_learn-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:1d54fb9e6038284548072df22fd34777e434153f7ffac72c8596f2d6987110dd"}, -] - -[package.dependencies] -joblib = ">=1.1.1" -numpy = ">=1.17.3" -scipy = ">=1.5.0" -threadpoolctl = ">=2.0.0" - -[package.extras] -benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] -examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] -tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] - -[[package]] -name = "scipy" -version = "1.9.3" -description = "Fundamental algorithms for scientific computing in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, - {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, - {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, - {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, - {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, - {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, - {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, - {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, - {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, - {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, - {file = "scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"}, - {file = "scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"}, - {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"}, - {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"}, - {file = "scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"}, - {file = "scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"}, - {file = "scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"}, - {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"}, - {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"}, - {file = "scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"}, - {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, -] - -[package.dependencies] -numpy = ">=1.18.5,<1.26.0" - -[package.extras] -dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] -doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] -test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] - -[[package]] -name = "sentence-transformers" -version = "2.2.2" -description = "Multilingual text embeddings" -optional = false -python-versions = ">=3.6.0" -files = [ - {file = "sentence-transformers-2.2.2.tar.gz", hash = "sha256:dbc60163b27de21076c9a30d24b5b7b6fa05141d68cf2553fa9a77bf79a29136"}, -] - -[package.dependencies] -huggingface-hub = ">=0.4.0" -nltk = "*" -numpy = "*" -scikit-learn = "*" -scipy = "*" -sentencepiece = "*" -torch = ">=1.6.0" -torchvision = "*" -tqdm = "*" -transformers = ">=4.6.0,<5.0.0" - -[[package]] -name = "sentencepiece" -version = "0.1.99" -description = "SentencePiece python wrapper" -optional = false -python-versions = "*" -files = [ - {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0eb528e70571b7c02723e5804322469b82fe7ea418c96051d0286c0fa028db73"}, - {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:77d7fafb2c4e4659cbdf303929503f37a26eabc4ff31d3a79bf1c5a1b338caa7"}, - {file = "sentencepiece-0.1.99-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be9cf5b9e404c245aeb3d3723c737ba7a8f5d4ba262ef233a431fa6c45f732a0"}, - {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baed1a26464998f9710d20e52607c29ffd4293e7c71c6a1f83f51ad0911ec12c"}, - {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9832f08bb372d4c8b567612f8eab9e36e268dff645f1c28f9f8e851be705f6d1"}, - {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:019e7535108e309dae2b253a75834fc3128240aa87c00eb80732078cdc182588"}, - {file = "sentencepiece-0.1.99-cp310-cp310-win32.whl", hash = "sha256:fa16a830416bb823fa2a52cbdd474d1f7f3bba527fd2304fb4b140dad31bb9bc"}, - {file = "sentencepiece-0.1.99-cp310-cp310-win_amd64.whl", hash = "sha256:14b0eccb7b641d4591c3e12ae44cab537d68352e4d3b6424944f0c447d2348d5"}, - {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6d3c56f24183a1e8bd61043ff2c58dfecdc68a5dd8955dc13bab83afd5f76b81"}, - {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed6ea1819fd612c989999e44a51bf556d0ef6abfb553080b9be3d347e18bcfb7"}, - {file = "sentencepiece-0.1.99-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2a0260cd1fb7bd8b4d4f39dc2444a8d5fd4e0a0c4d5c899810ef1abf99b2d45"}, - {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a1abff4d1ff81c77cac3cc6fefa34fa4b8b371e5ee51cb7e8d1ebc996d05983"}, - {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:004e6a621d4bc88978eecb6ea7959264239a17b70f2cbc348033d8195c9808ec"}, - {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db361e03342c41680afae5807590bc88aa0e17cfd1a42696a160e4005fcda03b"}, - {file = "sentencepiece-0.1.99-cp311-cp311-win32.whl", hash = "sha256:2d95e19168875b70df62916eb55428a0cbcb834ac51d5a7e664eda74def9e1e0"}, - {file = "sentencepiece-0.1.99-cp311-cp311-win_amd64.whl", hash = "sha256:f90d73a6f81248a909f55d8e6ef56fec32d559e1e9af045f0b0322637cb8e5c7"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:62e24c81e74bd87a6e0d63c51beb6527e4c0add67e1a17bac18bcd2076afcfeb"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57efcc2d51caff20d9573567d9fd3f854d9efe613ed58a439c78c9f93101384a"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a904c46197993bd1e95b93a6e373dca2f170379d64441041e2e628ad4afb16f"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d89adf59854741c0d465f0e1525b388c0d174f611cc04af54153c5c4f36088c4"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-win32.whl", hash = "sha256:47c378146928690d1bc106fdf0da768cebd03b65dd8405aa3dd88f9c81e35dba"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-win_amd64.whl", hash = "sha256:9ba142e7a90dd6d823c44f9870abdad45e6c63958eb60fe44cca6828d3b69da2"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b7b1a9ae4d7c6f1f867e63370cca25cc17b6f4886729595b885ee07a58d3cec3"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0f644c9d4d35c096a538507b2163e6191512460035bf51358794a78515b74f7"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8843d23a0f686d85e569bd6dcd0dd0e0cbc03731e63497ca6d5bacd18df8b85"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33e6f690a1caebb4867a2e367afa1918ad35be257ecdb3455d2bbd787936f155"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-win32.whl", hash = "sha256:8a321866c2f85da7beac74a824b4ad6ddc2a4c9bccd9382529506d48f744a12c"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-win_amd64.whl", hash = "sha256:c42f753bcfb7661c122a15b20be7f684b61fc8592c89c870adf52382ea72262d"}, - {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:85b476406da69c70586f0bb682fcca4c9b40e5059814f2db92303ea4585c650c"}, - {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cfbcfe13c69d3f87b7fcd5da168df7290a6d006329be71f90ba4f56bc77f8561"}, - {file = "sentencepiece-0.1.99-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:445b0ec381af1cd4eef95243e7180c63d9c384443c16c4c47a28196bd1cda937"}, - {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6890ea0f2b4703f62d0bf27932e35808b1f679bdb05c7eeb3812b935ba02001"}, - {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb71af492b0eefbf9f2501bec97bcd043b6812ab000d119eaf4bd33f9e283d03"}, - {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27b866b5bd3ddd54166bbcbf5c8d7dd2e0b397fac8537991c7f544220b1f67bc"}, - {file = "sentencepiece-0.1.99-cp38-cp38-win32.whl", hash = "sha256:b133e8a499eac49c581c3c76e9bdd08c338cc1939e441fee6f92c0ccb5f1f8be"}, - {file = "sentencepiece-0.1.99-cp38-cp38-win_amd64.whl", hash = "sha256:0eaf3591dd0690a87f44f4df129cf8d05d8a4029b5b6709b489b8e27f9a9bcff"}, - {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38efeda9bbfb55052d482a009c6a37e52f42ebffcea9d3a98a61de7aee356a28"}, - {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c030b081dc1e1bcc9fadc314b19b740715d3d566ad73a482da20d7d46fd444c"}, - {file = "sentencepiece-0.1.99-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84dbe53e02e4f8a2e45d2ac3e430d5c83182142658e25edd76539b7648928727"}, - {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b0f55d0a0ee1719b4b04221fe0c9f0c3461dc3dabd77a035fa2f4788eb3ef9a"}, - {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e800f206cd235dc27dc749299e05853a4e4332e8d3dfd81bf13d0e5b9007d9"}, - {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae1c40cda8f9d5b0423cfa98542735c0235e7597d79caf318855cdf971b2280"}, - {file = "sentencepiece-0.1.99-cp39-cp39-win32.whl", hash = "sha256:c84ce33af12ca222d14a1cdd37bd76a69401e32bc68fe61c67ef6b59402f4ab8"}, - {file = "sentencepiece-0.1.99-cp39-cp39-win_amd64.whl", hash = "sha256:350e5c74d739973f1c9643edb80f7cc904dc948578bcb1d43c6f2b173e5d18dd"}, - {file = "sentencepiece-0.1.99.tar.gz", hash = "sha256:189c48f5cb2949288f97ccdb97f0473098d9c3dcf5a3d99d4eabe719ec27297f"}, -] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "sniffio" -version = "1.3.0" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -files = [ - {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, - {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, -] - -[[package]] -name = "soupsieve" -version = "2.4.1" -description = "A modern CSS selector implementation for Beautiful Soup." -optional = false -python-versions = ">=3.7" -files = [ - {file = "soupsieve-2.4.1-py3-none-any.whl", hash = "sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8"}, - {file = "soupsieve-2.4.1.tar.gz", hash = "sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea"}, -] - -[[package]] -name = "sqlalchemy" -version = "2.0.18" -description = "Database Abstraction Library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "SQLAlchemy-2.0.18-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7ddd6d35c598af872f9a0a5bce7f7c4a1841684a72dab3302e3df7f17d1b5249"}, - {file = "SQLAlchemy-2.0.18-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:00aa050faf24ce5f2af643e2b86822fa1d7149649995f11bc1e769bbfbf9010b"}, - {file = "SQLAlchemy-2.0.18-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b52c6741073de5a744d27329f9803938dcad5c9fee7e61690c705f72973f4175"}, - {file = "SQLAlchemy-2.0.18-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7db97eabd440327c35b751d5ebf78a107f505586485159bcc87660da8bb1fdca"}, - {file = "SQLAlchemy-2.0.18-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:589aba9a35869695b319ed76c6f673d896cd01a7ff78054be1596df7ad9b096f"}, - {file = "SQLAlchemy-2.0.18-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9da4ee8f711e077633730955c8f3cd2485c9abf5ea0f80aac23221a3224b9a8c"}, - {file = "SQLAlchemy-2.0.18-cp310-cp310-win32.whl", hash = "sha256:5dd574a37be388512c72fe0d7318cb8e31743a9b2699847a025e0c08c5bf579d"}, - {file = "SQLAlchemy-2.0.18-cp310-cp310-win_amd64.whl", hash = "sha256:6852cd34d96835e4c9091c1e6087325efb5b607b75fd9f7075616197d1c4688a"}, - {file = "SQLAlchemy-2.0.18-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10e001a84f820fea2640e4500e12322b03afc31d8f4f6b813b44813b2a7c7e0d"}, - {file = "SQLAlchemy-2.0.18-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bffd6cd47c2e68970039c0d3e355c9ed761d3ca727b204e63cd294cad0e3df90"}, - {file = "SQLAlchemy-2.0.18-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b7b3ebfa9416c8eafaffa65216e229480c495e305a06ba176dcac32710744e6"}, - {file = "SQLAlchemy-2.0.18-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79228a7b90d95957354f37b9d46f2cc8926262ae17b0d3ed8f36c892f2a37e06"}, - {file = "SQLAlchemy-2.0.18-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ba633b51835036ff0f402c21f3ff567c565a22ff0a5732b060a68f4660e2a38f"}, - {file = "SQLAlchemy-2.0.18-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8da677135eff43502b7afab5a1e641edfb2dc734ba7fc146e9b1b86817a728e2"}, - {file = "SQLAlchemy-2.0.18-cp311-cp311-win32.whl", hash = "sha256:82edf3a6090554a83942cec79151d6b5eb96e63d143e80e4cf6671e5d772f6be"}, - {file = "SQLAlchemy-2.0.18-cp311-cp311-win_amd64.whl", hash = "sha256:69ae0e9509c43474e33152abe1385b8954922544616426bf793481e1a37e094f"}, - {file = "SQLAlchemy-2.0.18-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:09397a18733fa2a4c7680b746094f980060666ee549deafdb5e102a99ce4619b"}, - {file = "SQLAlchemy-2.0.18-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45b07470571bda5ee7f5ec471271bbde97267cc8403fce05e280c36ea73f4754"}, - {file = "SQLAlchemy-2.0.18-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1aac42a21a7fa6c9665392c840b295962992ddf40aecf0a88073bc5c76728117"}, - {file = "SQLAlchemy-2.0.18-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:da46beef0ce882546d92b7b2e8deb9e04dbb8fec72945a8eb28b347ca46bc15a"}, - {file = "SQLAlchemy-2.0.18-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a6f1d8256d06f58e6ece150fbe05c63c7f9510df99ee8ac37423f5476a2cebb4"}, - {file = "SQLAlchemy-2.0.18-cp37-cp37m-win32.whl", hash = "sha256:67fbb40db3985c0cfb942fe8853ad94a5e9702d2987dec03abadc2f3b6a24afb"}, - {file = "SQLAlchemy-2.0.18-cp37-cp37m-win_amd64.whl", hash = "sha256:afb322ca05e2603deedbcd2e9910f11a3fd2f42bdeafe63018e5641945c7491c"}, - {file = "SQLAlchemy-2.0.18-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:908c850b98cac1e203ababd4ba76868d19ae0d7172cdc75d3f1b7829b16837d2"}, - {file = "SQLAlchemy-2.0.18-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10514adc41fc8f5922728fbac13d401a1aefcf037f009e64ca3b92464e33bf0e"}, - {file = "SQLAlchemy-2.0.18-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b791577c546b6bbd7b43953565fcb0a2fec63643ad605353dd48afbc3c48317"}, - {file = "SQLAlchemy-2.0.18-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:420bc6d06d4ae7fb6921524334689eebcbea7bf2005efef070a8562cc9527a37"}, - {file = "SQLAlchemy-2.0.18-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ebdd2418ab4e2e26d572d9a1c03877f8514a9b7436729525aa571862507b3fea"}, - {file = "SQLAlchemy-2.0.18-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:556dc18e39b6edb76239acfd1c010e37395a54c7fde8c57481c15819a3ffb13e"}, - {file = "SQLAlchemy-2.0.18-cp38-cp38-win32.whl", hash = "sha256:7b8cba5a25e95041e3413d91f9e50616bcfaec95afa038ce7dc02efefe576745"}, - {file = "SQLAlchemy-2.0.18-cp38-cp38-win_amd64.whl", hash = "sha256:0f7fdcce52cd882b559a57b484efc92e108efeeee89fab6b623aba1ac68aad2e"}, - {file = "SQLAlchemy-2.0.18-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d7a2c1e711ce59ac9d0bba780318bcd102d2958bb423209f24c6354d8c4da930"}, - {file = "SQLAlchemy-2.0.18-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5c95e3e7cc6285bf7ff263eabb0d3bfe3def9a1ff98124083d45e5ece72f4579"}, - {file = "SQLAlchemy-2.0.18-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc44e50f9d5e96af1a561faa36863f9191f27364a4df3eb70bca66e9370480b6"}, - {file = "SQLAlchemy-2.0.18-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfa1a0f83bdf8061db8d17c2029454722043f1e4dd1b3d3d3120d1b54e75825a"}, - {file = "SQLAlchemy-2.0.18-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:194f2d5a7cb3739875c4d25b3fe288ab0b3dc33f7c857ba2845830c8c51170a0"}, - {file = "SQLAlchemy-2.0.18-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ebc542d2289c0b016d6945fd07a7e2e23f4abc41e731ac8ad18a9e0c2fd0ec2"}, - {file = "SQLAlchemy-2.0.18-cp39-cp39-win32.whl", hash = "sha256:774bd401e7993452ba0596e741c0c4d6d22f882dd2a798993859181dbffadc62"}, - {file = "SQLAlchemy-2.0.18-cp39-cp39-win_amd64.whl", hash = "sha256:2756485f49e7df5c2208bdc64263d19d23eba70666f14ad12d6d8278a2fff65f"}, - {file = "SQLAlchemy-2.0.18-py3-none-any.whl", hash = "sha256:6c5bae4c288bda92a7550fe8de9e068c0a7cd56b1c5d888aae5b40f0e13b40bd"}, - {file = "SQLAlchemy-2.0.18.tar.gz", hash = "sha256:1fb792051db66e09c200e7bc3bda3b1eb18a5b8eb153d2cedb2b14b56a68b8cb"}, -] - -[package.dependencies] -greenlet = {version = "!=0.4.17", markers = "platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\""} -typing-extensions = ">=4.2.0" - -[package.extras] -aiomysql = ["aiomysql", "greenlet (!=0.4.17)"] -aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17)"] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] -mssql = ["pyodbc"] -mssql-pymssql = ["pymssql"] -mssql-pyodbc = ["pyodbc"] -mypy = ["mypy (>=0.910)"] -mysql = ["mysqlclient (>=1.4.0)"] -mysql-connector = ["mysql-connector-python"] -oracle = ["cx-oracle (>=7)"] -oracle-oracledb = ["oracledb (>=1.0.1)"] -postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] -postgresql-pg8000 = ["pg8000 (>=1.29.1)"] -postgresql-psycopg = ["psycopg (>=3.0.7)"] -postgresql-psycopg2binary = ["psycopg2-binary"] -postgresql-psycopg2cffi = ["psycopg2cffi"] -postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] -pymysql = ["pymysql"] -sqlcipher = ["sqlcipher3-binary"] - -[[package]] -name = "starlette" -version = "0.27.0" -description = "The little ASGI library that shines." -optional = false -python-versions = ">=3.7" -files = [ - {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, - {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"}, -] - -[package.dependencies] -anyio = ">=3.4.0,<5" - -[package.extras] -full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] - -[[package]] -name = "sympy" -version = "1.12" -description = "Computer algebra system (CAS) in Python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, - {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, -] - -[package.dependencies] -mpmath = ">=0.19" - -[[package]] -name = "tabulate" -version = "0.9.0" -description = "Pretty-print tabular data" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, - {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, -] - -[package.extras] -widechars = ["wcwidth"] - -[[package]] -name = "tenacity" -version = "8.2.2" -description = "Retry code until it succeeds" -optional = false -python-versions = ">=3.6" -files = [ - {file = "tenacity-8.2.2-py3-none-any.whl", hash = "sha256:2f277afb21b851637e8f52e6a613ff08734c347dc19ade928e519d7d2d8569b0"}, - {file = "tenacity-8.2.2.tar.gz", hash = "sha256:43af037822bd0029025877f3b2d97cc4d7bb0c2991000a3d59d71517c5c969e0"}, -] - -[package.extras] -doc = ["reno", "sphinx", "tornado (>=4.5)"] - -[[package]] -name = "threadpoolctl" -version = "3.2.0" -description = "threadpoolctl" -optional = false -python-versions = ">=3.8" -files = [ - {file = "threadpoolctl-3.2.0-py3-none-any.whl", hash = "sha256:2b7818516e423bdaebb97c723f86a7c6b0a83d3f3b0970328d66f4d9104dc032"}, - {file = "threadpoolctl-3.2.0.tar.gz", hash = "sha256:c96a0ba3bdddeaca37dc4cc7344aafad41cdb8c313f74fdfe387a867bba93355"}, -] - -[[package]] -name = "tokenizers" -version = "0.13.3" -description = "Fast and Customizable Tokenizers" -optional = false -python-versions = "*" -files = [ - {file = "tokenizers-0.13.3-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:f3835c5be51de8c0a092058a4d4380cb9244fb34681fd0a295fbf0a52a5fdf33"}, - {file = "tokenizers-0.13.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4ef4c3e821730f2692489e926b184321e887f34fb8a6b80b8096b966ba663d07"}, - {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5fd1a6a25353e9aa762e2aae5a1e63883cad9f4e997c447ec39d071020459bc"}, - {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee0b1b311d65beab83d7a41c56a1e46ab732a9eed4460648e8eb0bd69fc2d059"}, - {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ef4215284df1277dadbcc5e17d4882bda19f770d02348e73523f7e7d8b8d396"}, - {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4d53976079cff8a033f778fb9adca2d9d69d009c02fa2d71a878b5f3963ed30"}, - {file = "tokenizers-0.13.3-cp310-cp310-win32.whl", hash = "sha256:1f0e3b4c2ea2cd13238ce43548959c118069db7579e5d40ec270ad77da5833ce"}, - {file = "tokenizers-0.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:89649c00d0d7211e8186f7a75dfa1db6996f65edce4b84821817eadcc2d3c79e"}, - {file = "tokenizers-0.13.3-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:56b726e0d2bbc9243872b0144515ba684af5b8d8cd112fb83ee1365e26ec74c8"}, - {file = "tokenizers-0.13.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:cc5c022ce692e1f499d745af293ab9ee6f5d92538ed2faf73f9708c89ee59ce6"}, - {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f55c981ac44ba87c93e847c333e58c12abcbb377a0c2f2ef96e1a266e4184ff2"}, - {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f247eae99800ef821a91f47c5280e9e9afaeed9980fc444208d5aa6ba69ff148"}, - {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e3215d048e94f40f1c95802e45dcc37c5b05eb46280fc2ccc8cd351bff839"}, - {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ba2b0bf01777c9b9bc94b53764d6684554ce98551fec496f71bc5be3a03e98b"}, - {file = "tokenizers-0.13.3-cp311-cp311-win32.whl", hash = "sha256:cc78d77f597d1c458bf0ea7c2a64b6aa06941c7a99cb135b5969b0278824d808"}, - {file = "tokenizers-0.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:ecf182bf59bd541a8876deccf0360f5ae60496fd50b58510048020751cf1724c"}, - {file = "tokenizers-0.13.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:0527dc5436a1f6bf2c0327da3145687d3bcfbeab91fed8458920093de3901b44"}, - {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07cbb2c307627dc99b44b22ef05ff4473aa7c7cc1fec8f0a8b37d8a64b1a16d2"}, - {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4560dbdeaae5b7ee0d4e493027e3de6d53c991b5002d7ff95083c99e11dd5ac0"}, - {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64064bd0322405c9374305ab9b4c07152a1474370327499911937fd4a76d004b"}, - {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8c6e2ab0f2e3d939ca66aa1d596602105fe33b505cd2854a4c1717f704c51de"}, - {file = "tokenizers-0.13.3-cp37-cp37m-win32.whl", hash = "sha256:6cc29d410768f960db8677221e497226e545eaaea01aa3613fa0fdf2cc96cff4"}, - {file = "tokenizers-0.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fc2a7fdf864554a0dacf09d32e17c0caa9afe72baf9dd7ddedc61973bae352d8"}, - {file = "tokenizers-0.13.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8791dedba834c1fc55e5f1521be325ea3dafb381964be20684b92fdac95d79b7"}, - {file = "tokenizers-0.13.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:d607a6a13718aeb20507bdf2b96162ead5145bbbfa26788d6b833f98b31b26e1"}, - {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3791338f809cd1bf8e4fee6b540b36822434d0c6c6bc47162448deee3f77d425"}, - {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2f35f30e39e6aab8716f07790f646bdc6e4a853816cc49a95ef2a9016bf9ce6"}, - {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310204dfed5aa797128b65d63538a9837cbdd15da2a29a77d67eefa489edda26"}, - {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0f9b92ea052305166559f38498b3b0cae159caea712646648aaa272f7160963"}, - {file = "tokenizers-0.13.3-cp38-cp38-win32.whl", hash = "sha256:9a3fa134896c3c1f0da6e762d15141fbff30d094067c8f1157b9fdca593b5806"}, - {file = "tokenizers-0.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:8e7b0cdeace87fa9e760e6a605e0ae8fc14b7d72e9fc19c578116f7287bb873d"}, - {file = "tokenizers-0.13.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:00cee1e0859d55507e693a48fa4aef07060c4bb6bd93d80120e18fea9371c66d"}, - {file = "tokenizers-0.13.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a23ff602d0797cea1d0506ce69b27523b07e70f6dda982ab8cf82402de839088"}, - {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70ce07445050b537d2696022dafb115307abdffd2a5c106f029490f84501ef97"}, - {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:280ffe95f50eaaf655b3a1dc7ff1d9cf4777029dbbc3e63a74e65a056594abc3"}, - {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97acfcec592f7e9de8cadcdcda50a7134423ac8455c0166b28c9ff04d227b371"}, - {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd7730c98a3010cd4f523465867ff95cd9d6430db46676ce79358f65ae39797b"}, - {file = "tokenizers-0.13.3-cp39-cp39-win32.whl", hash = "sha256:48625a108029cb1ddf42e17a81b5a3230ba6888a70c9dc14e81bc319e812652d"}, - {file = "tokenizers-0.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:bc0a6f1ba036e482db6453571c9e3e60ecd5489980ffd95d11dc9f960483d783"}, - {file = "tokenizers-0.13.3.tar.gz", hash = "sha256:2e546dbb68b623008a5442353137fbb0123d311a6d7ba52f2667c8862a75af2e"}, -] - -[package.extras] -dev = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] -docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] -testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] - -[[package]] -name = "torch" -version = "2.0.1" -description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "torch-2.0.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:8ced00b3ba471856b993822508f77c98f48a458623596a4c43136158781e306a"}, - {file = "torch-2.0.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:359bfaad94d1cda02ab775dc1cc386d585712329bb47b8741607ef6ef4950747"}, - {file = "torch-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:7c84e44d9002182edd859f3400deaa7410f5ec948a519cc7ef512c2f9b34d2c4"}, - {file = "torch-2.0.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:567f84d657edc5582d716900543e6e62353dbe275e61cdc36eda4929e46df9e7"}, - {file = "torch-2.0.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:787b5a78aa7917465e9b96399b883920c88a08f4eb63b5a5d2d1a16e27d2f89b"}, - {file = "torch-2.0.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e617b1d0abaf6ced02dbb9486803abfef0d581609b09641b34fa315c9c40766d"}, - {file = "torch-2.0.1-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b6019b1de4978e96daa21d6a3ebb41e88a0b474898fe251fd96189587408873e"}, - {file = "torch-2.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:dbd68cbd1cd9da32fe5d294dd3411509b3d841baecb780b38b3b7b06c7754434"}, - {file = "torch-2.0.1-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:ef654427d91600129864644e35deea761fb1fe131710180b952a6f2e2207075e"}, - {file = "torch-2.0.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:25aa43ca80dcdf32f13da04c503ec7afdf8e77e3a0183dd85cd3e53b2842e527"}, - {file = "torch-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5ef3ea3d25441d3957348f7e99c7824d33798258a2bf5f0f0277cbcadad2e20d"}, - {file = "torch-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:0882243755ff28895e8e6dc6bc26ebcf5aa0911ed81b2a12f241fc4b09075b13"}, - {file = "torch-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:f66aa6b9580a22b04d0af54fcd042f52406a8479e2b6a550e3d9f95963e168c8"}, - {file = "torch-2.0.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:1adb60d369f2650cac8e9a95b1d5758e25d526a34808f7448d0bd599e4ae9072"}, - {file = "torch-2.0.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:1bcffc16b89e296826b33b98db5166f990e3b72654a2b90673e817b16c50e32b"}, - {file = "torch-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e10e1597f2175365285db1b24019eb6f04d53dcd626c735fc502f1e8b6be9875"}, - {file = "torch-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:423e0ae257b756bb45a4b49072046772d1ad0c592265c5080070e0767da4e490"}, - {file = "torch-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8742bdc62946c93f75ff92da00e3803216c6cce9b132fbca69664ca38cfb3e18"}, - {file = "torch-2.0.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:c62df99352bd6ee5a5a8d1832452110435d178b5164de450831a3a8cc14dc680"}, - {file = "torch-2.0.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:671a2565e3f63b8fe8e42ae3e36ad249fe5e567435ea27b94edaa672a7d0c416"}, -] - -[package.dependencies] -filelock = "*" -jinja2 = "*" -networkx = "*" -sympy = "*" -typing-extensions = "*" - -[package.extras] -opt-einsum = ["opt-einsum (>=3.3)"] - -[[package]] -name = "torchvision" -version = "0.15.2" -description = "image and video datasets and models for torch deep learning" -optional = false -python-versions = ">=3.8" -files = [ - {file = "torchvision-0.15.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7754088774e810c5672b142a45dcf20b1bd986a5a7da90f8660c43dc43fb850c"}, - {file = "torchvision-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37eb138e13f6212537a3009ac218695483a635c404b6cc1d8e0d0d978026a86d"}, - {file = "torchvision-0.15.2-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:54143f7cc0797d199b98a53b7d21c3f97615762d4dd17ad45a41c7e80d880e73"}, - {file = "torchvision-0.15.2-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:1eefebf5fbd01a95fe8f003d623d941601c94b5cec547b420da89cb369d9cf96"}, - {file = "torchvision-0.15.2-cp310-cp310-win_amd64.whl", hash = "sha256:96fae30c5ca8423f4b9790df0f0d929748e32718d88709b7b567d2f630c042e3"}, - {file = "torchvision-0.15.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5f35f6bd5bcc4568e6522e4137fa60fcc72f4fa3e615321c26cd87e855acd398"}, - {file = "torchvision-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:757505a0ab2be7096cb9d2bf4723202c971cceddb72c7952a7e877f773de0f8a"}, - {file = "torchvision-0.15.2-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:012ad25cfd9019ff9b0714a168727e3845029be1af82296ff1e1482931fa4b80"}, - {file = "torchvision-0.15.2-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b02a7ffeaa61448737f39a4210b8ee60234bda0515a0c0d8562f884454105b0f"}, - {file = "torchvision-0.15.2-cp311-cp311-win_amd64.whl", hash = "sha256:10be76ceded48329d0a0355ac33da131ee3993ff6c125e4a02ab34b5baa2472c"}, - {file = "torchvision-0.15.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8f12415b686dba884fb086f53ac803f692be5a5cdd8a758f50812b30fffea2e4"}, - {file = "torchvision-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:31211c01f8b8ec33b8a638327b5463212e79a03e43c895f88049f97af1bd12fd"}, - {file = "torchvision-0.15.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:c55f9889e436f14b4f84a9c00ebad0d31f5b4626f10cf8018e6c676f92a6d199"}, - {file = "torchvision-0.15.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:9a192f2aa979438f23c20e883980b23d13268ab9f819498774a6d2eb021802c2"}, - {file = "torchvision-0.15.2-cp38-cp38-win_amd64.whl", hash = "sha256:c07071bc8d02aa8fcdfe139ab6a1ef57d3b64c9e30e84d12d45c9f4d89fb6536"}, - {file = "torchvision-0.15.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4790260fcf478a41c7ecc60a6d5200a88159fdd8d756e9f29f0f8c59c4a67a68"}, - {file = "torchvision-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:987ab62225b4151a11e53fd06150c5258ced24ac9d7c547e0e4ab6fbca92a5ce"}, - {file = "torchvision-0.15.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:63df26673e66cba3f17e07c327a8cafa3cce98265dbc3da329f1951d45966838"}, - {file = "torchvision-0.15.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b85f98d4cc2f72452f6792ab4463a3541bc5678a8cdd3da0e139ba2fe8b56d42"}, - {file = "torchvision-0.15.2-cp39-cp39-win_amd64.whl", hash = "sha256:07c462524cc1bba5190c16a9d47eac1fca024d60595a310f23c00b4ffff18b30"}, -] - -[package.dependencies] -numpy = "*" -pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" -requests = "*" -torch = "2.0.1" - -[package.extras] -scipy = ["scipy"] - -[[package]] -name = "tqdm" -version = "4.65.0" -description = "Fast, Extensible Progress Meter" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"}, - {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -dev = ["py-make (>=0.1.0)", "twine", "wheel"] -notebook = ["ipywidgets (>=6)"] -slack = ["slack-sdk"] -telegram = ["requests"] - -[[package]] -name = "transformers" -version = "4.31.0" -description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "transformers-4.31.0-py3-none-any.whl", hash = "sha256:8487aab0195ce1c2a5ae189305118b9720daddbc7b688edb09ccd79e3b149f6b"}, - {file = "transformers-4.31.0.tar.gz", hash = "sha256:4302fba920a1c24d3a429a29efff6a63eac03f3f3cf55b55927fc795d01cb273"}, -] - -[package.dependencies] -filelock = "*" -huggingface-hub = ">=0.14.1,<1.0" -numpy = ">=1.17" -packaging = ">=20.0" -pyyaml = ">=5.1" -regex = "!=2019.12.17" -requests = "*" -safetensors = ">=0.3.1" -tokenizers = ">=0.11.1,<0.11.3 || >0.11.3,<0.14" -tqdm = ">=4.27" - -[package.extras] -accelerate = ["accelerate (>=0.20.3)"] -agents = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.9,!=1.12.0)"] -all = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.2.8,!=0.3.2,<=0.4.13)", "jaxlib (>=0.1.65,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.14)", "tensorflow-text (<2.14)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"] -audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] -codecarbon = ["codecarbon (==1.2.0)"] -deepspeed = ["accelerate (>=0.20.3)", "deepspeed (>=0.9.3)"] -deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "timeout-decorator"] -dev = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.2.8,!=0.3.2,<=0.4.13)", "jaxlib (>=0.1.65,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.6,<2.14)", "tensorflow-text (<2.14)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.6,<2.14)", "tensorflow-text (<2.14)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "urllib3 (<2.0.0)"] -dev-torch = ["GitPython (<3.1.19)", "Pillow (<10.0.0)", "accelerate (>=0.20.3)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -docs = ["Pillow (<10.0.0)", "accelerate (>=0.20.3)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.2.8,!=0.3.2,<=0.4.13)", "jaxlib (>=0.1.65,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.14)", "tensorflow-text (<2.14)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"] -docs-specific = ["hf-doc-builder"] -fairscale = ["fairscale (>0.3)"] -flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.2.8,!=0.3.2,<=0.4.13)", "jaxlib (>=0.1.65,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] -flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] -ftfy = ["ftfy"] -integrations = ["optuna", "ray[tune]", "sigopt"] -ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] -modelcreation = ["cookiecutter (==1.7.3)"] -natten = ["natten (>=0.14.6)"] -onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] -onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] -optuna = ["optuna"] -quality = ["GitPython (<3.1.19)", "black (>=23.1,<24.0)", "datasets (!=2.5.0)", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "ruff (>=0.0.241,<=0.0.259)", "urllib3 (<2.0.0)"] -ray = ["ray[tune]"] -retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] -sagemaker = ["sagemaker (>=2.31.0)"] -sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] -serving = ["fastapi", "pydantic (<2)", "starlette", "uvicorn"] -sigopt = ["sigopt"] -sklearn = ["scikit-learn"] -speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "timeout-decorator"] -tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.14)", "tensorflow-text (<2.14)", "tf2onnx"] -tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.14)", "tensorflow-text (<2.14)", "tf2onnx"] -tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] -timm = ["timm"] -tokenizers = ["tokenizers (>=0.11.1,!=0.11.3,<0.14)"] -torch = ["accelerate (>=0.20.3)", "torch (>=1.9,!=1.12.0)"] -torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -torch-vision = ["Pillow (<10.0.0)", "torchvision"] -torchhub = ["filelock", "huggingface-hub (>=0.14.1,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "tqdm (>=4.27)"] -video = ["av (==9.2.0)", "decord (==0.6.0)"] -vision = ["Pillow (<10.0.0)"] - -[[package]] -name = "typing-extensions" -version = "4.7.1" -description = "Backported and Experimental Type Hints for Python 3.7+" -optional = false -python-versions = ">=3.7" -files = [ - {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"}, - {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"}, -] - -[[package]] -name = "typing-inspect" -version = "0.9.0" -description = "Runtime inspection utilities for typing module." -optional = false -python-versions = "*" -files = [ - {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, - {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, -] - -[package.dependencies] -mypy-extensions = ">=0.3.0" -typing-extensions = ">=3.7.4" - -[[package]] -name = "tzdata" -version = "2023.3" -description = "Provider of IANA time zone data" -optional = false -python-versions = ">=2" -files = [ - {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, - {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, -] - -[[package]] -name = "tzlocal" -version = "5.0.1" -description = "tzinfo object for the local timezone" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tzlocal-5.0.1-py3-none-any.whl", hash = "sha256:f3596e180296aaf2dbd97d124fe76ae3a0e3d32b258447de7b939b3fd4be992f"}, - {file = "tzlocal-5.0.1.tar.gz", hash = "sha256:46eb99ad4bdb71f3f72b7d24f4267753e240944ecfc16f25d2719ba89827a803"}, -] - -[package.dependencies] -tzdata = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -devenv = ["black", "check-manifest", "flake8", "pyroma", "pytest (>=4.3)", "pytest-cov", "pytest-mock (>=3.3)", "zest.releaser"] - -[[package]] -name = "unstructured" -version = "0.8.0" -description = "A library that prepares raw documents for downstream ML tasks." -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "unstructured-0.8.0-py3-none-any.whl", hash = "sha256:d6f574327f6b371f8bbe8d2c6861d8b40114ead9b920054c3601449ec72c3e42"}, - {file = "unstructured-0.8.0.tar.gz", hash = "sha256:528b9140ef56bee3f7eabd23e7203f3f6890c1a72188d640985f86a3f842c565"}, -] - -[package.dependencies] -argilla = "*" -chardet = "*" -filetype = "*" -lxml = "*" -markdown = "*" -msg-parser = "*" -nltk = "*" -openpyxl = "*" -pandas = "*" -pdf2image = "*" -"pdfminer.six" = "*" -pillow = "*" -pypandoc = "*" -python-docx = "*" -python-magic = "*" -python-pptx = "*" -requests = "*" -tabulate = "*" -xlrd = "*" - -[package.extras] -azure = ["adlfs", "fsspec"] -discord = ["discord-py"] -dropbox = ["dropboxdrivefs", "fsspec"] -elasticsearch = ["elasticsearch", "jq"] -gcs = ["fsspec", "gcsfs"] -github = ["pygithub (==1.58.2)"] -gitlab = ["python-gitlab"] -google-drive = ["google-api-python-client"] -huggingface = ["langdetect", "sacremoses", "sentencepiece", "torch", "transformers"] -local-inference = ["unstructured-inference (==0.5.5)"] -reddit = ["praw"] -s3 = ["fsspec", "s3fs"] -slack = ["slack-sdk"] -wikipedia = ["wikipedia"] - -[[package]] -name = "urllib3" -version = "2.0.3" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.7" -files = [ - {file = "urllib3-2.0.3-py3-none-any.whl", hash = "sha256:48e7fafa40319d358848e1bc6809b208340fafe2096f1725d05d67443d0483d1"}, - {file = "urllib3-2.0.3.tar.gz", hash = "sha256:bee28b5e56addb8226c96f7f13ac28cb4c301dd5ea8a6ca179c0b9835e032825"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "uvicorn" -version = "0.22.0" -description = "The lightning-fast ASGI server." -optional = false -python-versions = ">=3.7" -files = [ - {file = "uvicorn-0.22.0-py3-none-any.whl", hash = "sha256:e9434d3bbf05f310e762147f769c9f21235ee118ba2d2bf1155a7196448bd996"}, - {file = "uvicorn-0.22.0.tar.gz", hash = "sha256:79277ae03db57ce7d9aa0567830bbb51d7a612f54d6e1e3e92da3ef24c2c8ed8"}, -] - -[package.dependencies] -click = ">=7.0" -colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} -h11 = ">=0.8" -httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standard\""} -python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} -pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} -uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} -watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} -websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} - -[package.extras] -standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] - -[[package]] -name = "uvloop" -version = "0.17.0" -description = "Fast implementation of asyncio event loop on top of libuv" -optional = false -python-versions = ">=3.7" -files = [ - {file = "uvloop-0.17.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ce9f61938d7155f79d3cb2ffa663147d4a76d16e08f65e2c66b77bd41b356718"}, - {file = "uvloop-0.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:68532f4349fd3900b839f588972b3392ee56042e440dd5873dfbbcd2cc67617c"}, - {file = "uvloop-0.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0949caf774b9fcefc7c5756bacbbbd3fc4c05a6b7eebc7c7ad6f825b23998d6d"}, - {file = "uvloop-0.17.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff3d00b70ce95adce264462c930fbaecb29718ba6563db354608f37e49e09024"}, - {file = "uvloop-0.17.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a5abddb3558d3f0a78949c750644a67be31e47936042d4f6c888dd6f3c95f4aa"}, - {file = "uvloop-0.17.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8efcadc5a0003d3a6e887ccc1fb44dec25594f117a94e3127954c05cf144d811"}, - {file = "uvloop-0.17.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3378eb62c63bf336ae2070599e49089005771cc651c8769aaad72d1bd9385a7c"}, - {file = "uvloop-0.17.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6aafa5a78b9e62493539456f8b646f85abc7093dd997f4976bb105537cf2635e"}, - {file = "uvloop-0.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c686a47d57ca910a2572fddfe9912819880b8765e2f01dc0dd12a9bf8573e539"}, - {file = "uvloop-0.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:864e1197139d651a76c81757db5eb199db8866e13acb0dfe96e6fc5d1cf45fc4"}, - {file = "uvloop-0.17.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2a6149e1defac0faf505406259561bc14b034cdf1d4711a3ddcdfbaa8d825a05"}, - {file = "uvloop-0.17.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6708f30db9117f115eadc4f125c2a10c1a50d711461699a0cbfaa45b9a78e376"}, - {file = "uvloop-0.17.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:23609ca361a7fc587031429fa25ad2ed7242941adec948f9d10c045bfecab06b"}, - {file = "uvloop-0.17.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2deae0b0fb00a6af41fe60a675cec079615b01d68beb4cc7b722424406b126a8"}, - {file = "uvloop-0.17.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45cea33b208971e87a31c17622e4b440cac231766ec11e5d22c76fab3bf9df62"}, - {file = "uvloop-0.17.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9b09e0f0ac29eee0451d71798878eae5a4e6a91aa275e114037b27f7db72702d"}, - {file = "uvloop-0.17.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbbaf9da2ee98ee2531e0c780455f2841e4675ff580ecf93fe5c48fe733b5667"}, - {file = "uvloop-0.17.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a4aee22ece20958888eedbad20e4dbb03c37533e010fb824161b4f05e641f738"}, - {file = "uvloop-0.17.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:307958f9fc5c8bb01fad752d1345168c0abc5d62c1b72a4a8c6c06f042b45b20"}, - {file = "uvloop-0.17.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ebeeec6a6641d0adb2ea71dcfb76017602ee2bfd8213e3fcc18d8f699c5104f"}, - {file = "uvloop-0.17.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1436c8673c1563422213ac6907789ecb2b070f5939b9cbff9ef7113f2b531595"}, - {file = "uvloop-0.17.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8887d675a64cfc59f4ecd34382e5b4f0ef4ae1da37ed665adba0c2badf0d6578"}, - {file = "uvloop-0.17.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3db8de10ed684995a7f34a001f15b374c230f7655ae840964d51496e2f8a8474"}, - {file = "uvloop-0.17.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7d37dccc7ae63e61f7b96ee2e19c40f153ba6ce730d8ba4d3b4e9738c1dccc1b"}, - {file = "uvloop-0.17.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cbbe908fda687e39afd6ea2a2f14c2c3e43f2ca88e3a11964b297822358d0e6c"}, - {file = "uvloop-0.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d97672dc709fa4447ab83276f344a165075fd9f366a97b712bdd3fee05efae8"}, - {file = "uvloop-0.17.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1e507c9ee39c61bfddd79714e4f85900656db1aec4d40c6de55648e85c2799c"}, - {file = "uvloop-0.17.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c092a2c1e736086d59ac8e41f9c98f26bbf9b9222a76f21af9dfe949b99b2eb9"}, - {file = "uvloop-0.17.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:30babd84706115626ea78ea5dbc7dd8d0d01a2e9f9b306d24ca4ed5796c66ded"}, - {file = "uvloop-0.17.0.tar.gz", hash = "sha256:0ddf6baf9cf11a1a22c71487f39f15b2cf78eb5bde7e5b45fbb99e8a9d91b9e1"}, -] - -[package.extras] -dev = ["Cython (>=0.29.32,<0.30.0)", "Sphinx (>=4.1.2,<4.2.0)", "aiohttp", "flake8 (>=3.9.2,<3.10.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=22.0.0,<22.1.0)", "pycodestyle (>=2.7.0,<2.8.0)", "pytest (>=3.6.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] -docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] -test = ["Cython (>=0.29.32,<0.30.0)", "aiohttp", "flake8 (>=3.9.2,<3.10.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=22.0.0,<22.1.0)", "pycodestyle (>=2.7.0,<2.8.0)"] - -[[package]] -name = "watchfiles" -version = "0.19.0" -description = "Simple, modern and high performance file watching and code reload in python." -optional = false -python-versions = ">=3.7" -files = [ - {file = "watchfiles-0.19.0-cp37-abi3-macosx_10_7_x86_64.whl", hash = "sha256:91633e64712df3051ca454ca7d1b976baf842d7a3640b87622b323c55f3345e7"}, - {file = "watchfiles-0.19.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:b6577b8c6c8701ba8642ea9335a129836347894b666dd1ec2226830e263909d3"}, - {file = "watchfiles-0.19.0-cp37-abi3-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:18b28f6ad871b82df9542ff958d0c86bb0d8310bb09eb8e87d97318a3b5273af"}, - {file = "watchfiles-0.19.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fac19dc9cbc34052394dbe81e149411a62e71999c0a19e1e09ce537867f95ae0"}, - {file = "watchfiles-0.19.0-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:09ea3397aecbc81c19ed7f025e051a7387feefdb789cf768ff994c1228182fda"}, - {file = "watchfiles-0.19.0-cp37-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c0376deac92377817e4fb8f347bf559b7d44ff556d9bc6f6208dd3f79f104aaf"}, - {file = "watchfiles-0.19.0-cp37-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c75eff897786ee262c9f17a48886f4e98e6cfd335e011c591c305e5d083c056"}, - {file = "watchfiles-0.19.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb5d45c4143c1dd60f98a16187fd123eda7248f84ef22244818c18d531a249d1"}, - {file = "watchfiles-0.19.0-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:79c533ff593db861ae23436541f481ec896ee3da4e5db8962429b441bbaae16e"}, - {file = "watchfiles-0.19.0-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3d7d267d27aceeeaa3de0dd161a0d64f0a282264d592e335fff7958cc0cbae7c"}, - {file = "watchfiles-0.19.0-cp37-abi3-win32.whl", hash = "sha256:176a9a7641ec2c97b24455135d58012a5be5c6217fc4d5fef0b2b9f75dbf5154"}, - {file = "watchfiles-0.19.0-cp37-abi3-win_amd64.whl", hash = "sha256:945be0baa3e2440151eb3718fd8846751e8b51d8de7b884c90b17d271d34cae8"}, - {file = "watchfiles-0.19.0-cp37-abi3-win_arm64.whl", hash = "sha256:0089c6dc24d436b373c3c57657bf4f9a453b13767150d17284fc6162b2791911"}, - {file = "watchfiles-0.19.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:cae3dde0b4b2078f31527acff6f486e23abed307ba4d3932466ba7cdd5ecec79"}, - {file = "watchfiles-0.19.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f3920b1285a7d3ce898e303d84791b7bf40d57b7695ad549dc04e6a44c9f120"}, - {file = "watchfiles-0.19.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9afd0d69429172c796164fd7fe8e821ade9be983f51c659a38da3faaaaac44dc"}, - {file = "watchfiles-0.19.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68dce92b29575dda0f8d30c11742a8e2b9b8ec768ae414b54f7453f27bdf9545"}, - {file = "watchfiles-0.19.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:5569fc7f967429d4bc87e355cdfdcee6aabe4b620801e2cf5805ea245c06097c"}, - {file = "watchfiles-0.19.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5471582658ea56fca122c0f0d0116a36807c63fefd6fdc92c71ca9a4491b6b48"}, - {file = "watchfiles-0.19.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b538014a87f94d92f98f34d3e6d2635478e6be6423a9ea53e4dd96210065e193"}, - {file = "watchfiles-0.19.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20b44221764955b1e703f012c74015306fb7e79a00c15370785f309b1ed9aa8d"}, - {file = "watchfiles-0.19.0.tar.gz", hash = "sha256:d9b073073e048081e502b6c6b0b88714c026a1a4c890569238d04aca5f9ca74b"}, -] - -[package.dependencies] -anyio = ">=3.0.0" - -[[package]] -name = "websockets" -version = "11.0.3" -description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "websockets-11.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3ccc8a0c387629aec40f2fc9fdcb4b9d5431954f934da3eaf16cdc94f67dbfac"}, - {file = "websockets-11.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d67ac60a307f760c6e65dad586f556dde58e683fab03323221a4e530ead6f74d"}, - {file = "websockets-11.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:84d27a4832cc1a0ee07cdcf2b0629a8a72db73f4cf6de6f0904f6661227f256f"}, - {file = "websockets-11.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffd7dcaf744f25f82190856bc26ed81721508fc5cbf2a330751e135ff1283564"}, - {file = "websockets-11.0.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7622a89d696fc87af8e8d280d9b421db5133ef5b29d3f7a1ce9f1a7bf7fcfa11"}, - {file = "websockets-11.0.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bceab846bac555aff6427d060f2fcfff71042dba6f5fca7dc4f75cac815e57ca"}, - {file = "websockets-11.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:54c6e5b3d3a8936a4ab6870d46bdd6ec500ad62bde9e44462c32d18f1e9a8e54"}, - {file = "websockets-11.0.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:41f696ba95cd92dc047e46b41b26dd24518384749ed0d99bea0a941ca87404c4"}, - {file = "websockets-11.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:86d2a77fd490ae3ff6fae1c6ceaecad063d3cc2320b44377efdde79880e11526"}, - {file = "websockets-11.0.3-cp310-cp310-win32.whl", hash = "sha256:2d903ad4419f5b472de90cd2d40384573b25da71e33519a67797de17ef849b69"}, - {file = "websockets-11.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:1d2256283fa4b7f4c7d7d3e84dc2ece74d341bce57d5b9bf385df109c2a1a82f"}, - {file = "websockets-11.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e848f46a58b9fcf3d06061d17be388caf70ea5b8cc3466251963c8345e13f7eb"}, - {file = "websockets-11.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aa5003845cdd21ac0dc6c9bf661c5beddd01116f6eb9eb3c8e272353d45b3288"}, - {file = "websockets-11.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b58cbf0697721120866820b89f93659abc31c1e876bf20d0b3d03cef14faf84d"}, - {file = "websockets-11.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:660e2d9068d2bedc0912af508f30bbeb505bbbf9774d98def45f68278cea20d3"}, - {file = "websockets-11.0.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1f0524f203e3bd35149f12157438f406eff2e4fb30f71221c8a5eceb3617b6b"}, - {file = "websockets-11.0.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:def07915168ac8f7853812cc593c71185a16216e9e4fa886358a17ed0fd9fcf6"}, - {file = "websockets-11.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b30c6590146e53149f04e85a6e4fcae068df4289e31e4aee1fdf56a0dead8f97"}, - {file = "websockets-11.0.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:619d9f06372b3a42bc29d0cd0354c9bb9fb39c2cbc1a9c5025b4538738dbffaf"}, - {file = "websockets-11.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:01f5567d9cf6f502d655151645d4e8b72b453413d3819d2b6f1185abc23e82dd"}, - {file = "websockets-11.0.3-cp311-cp311-win32.whl", hash = "sha256:e1459677e5d12be8bbc7584c35b992eea142911a6236a3278b9b5ce3326f282c"}, - {file = "websockets-11.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:e7837cb169eca3b3ae94cc5787c4fed99eef74c0ab9506756eea335e0d6f3ed8"}, - {file = "websockets-11.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9f59a3c656fef341a99e3d63189852be7084c0e54b75734cde571182c087b152"}, - {file = "websockets-11.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2529338a6ff0eb0b50c7be33dc3d0e456381157a31eefc561771ee431134a97f"}, - {file = "websockets-11.0.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34fd59a4ac42dff6d4681d8843217137f6bc85ed29722f2f7222bd619d15e95b"}, - {file = "websockets-11.0.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:332d126167ddddec94597c2365537baf9ff62dfcc9db4266f263d455f2f031cb"}, - {file = "websockets-11.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6505c1b31274723ccaf5f515c1824a4ad2f0d191cec942666b3d0f3aa4cb4007"}, - {file = "websockets-11.0.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f467ba0050b7de85016b43f5a22b46383ef004c4f672148a8abf32bc999a87f0"}, - {file = "websockets-11.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9d9acd80072abcc98bd2c86c3c9cd4ac2347b5a5a0cae7ed5c0ee5675f86d9af"}, - {file = "websockets-11.0.3-cp37-cp37m-win32.whl", hash = "sha256:e590228200fcfc7e9109509e4d9125eace2042fd52b595dd22bbc34bb282307f"}, - {file = "websockets-11.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:b16fff62b45eccb9c7abb18e60e7e446998093cdcb50fed33134b9b6878836de"}, - {file = "websockets-11.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fb06eea71a00a7af0ae6aefbb932fb8a7df3cb390cc217d51a9ad7343de1b8d0"}, - {file = "websockets-11.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8a34e13a62a59c871064dfd8ffb150867e54291e46d4a7cf11d02c94a5275bae"}, - {file = "websockets-11.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4841ed00f1026dfbced6fca7d963c4e7043aa832648671b5138008dc5a8f6d99"}, - {file = "websockets-11.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a073fc9ab1c8aff37c99f11f1641e16da517770e31a37265d2755282a5d28aa"}, - {file = "websockets-11.0.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:68b977f21ce443d6d378dbd5ca38621755f2063d6fdb3335bda981d552cfff86"}, - {file = "websockets-11.0.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1a99a7a71631f0efe727c10edfba09ea6bee4166a6f9c19aafb6c0b5917d09c"}, - {file = "websockets-11.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bee9fcb41db2a23bed96c6b6ead6489702c12334ea20a297aa095ce6d31370d0"}, - {file = "websockets-11.0.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4b253869ea05a5a073ebfdcb5cb3b0266a57c3764cf6fe114e4cd90f4bfa5f5e"}, - {file = "websockets-11.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1553cb82942b2a74dd9b15a018dce645d4e68674de2ca31ff13ebc2d9f283788"}, - {file = "websockets-11.0.3-cp38-cp38-win32.whl", hash = "sha256:f61bdb1df43dc9c131791fbc2355535f9024b9a04398d3bd0684fc16ab07df74"}, - {file = "websockets-11.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:03aae4edc0b1c68498f41a6772d80ac7c1e33c06c6ffa2ac1c27a07653e79d6f"}, - {file = "websockets-11.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:777354ee16f02f643a4c7f2b3eff8027a33c9861edc691a2003531f5da4f6bc8"}, - {file = "websockets-11.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8c82f11964f010053e13daafdc7154ce7385ecc538989a354ccc7067fd7028fd"}, - {file = "websockets-11.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3580dd9c1ad0701169e4d6fc41e878ffe05e6bdcaf3c412f9d559389d0c9e016"}, - {file = "websockets-11.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f1a3f10f836fab6ca6efa97bb952300b20ae56b409414ca85bff2ad241d2a61"}, - {file = "websockets-11.0.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df41b9bc27c2c25b486bae7cf42fccdc52ff181c8c387bfd026624a491c2671b"}, - {file = "websockets-11.0.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:279e5de4671e79a9ac877427f4ac4ce93751b8823f276b681d04b2156713b9dd"}, - {file = "websockets-11.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1fdf26fa8a6a592f8f9235285b8affa72748dc12e964a5518c6c5e8f916716f7"}, - {file = "websockets-11.0.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69269f3a0b472e91125b503d3c0b3566bda26da0a3261c49f0027eb6075086d1"}, - {file = "websockets-11.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:97b52894d948d2f6ea480171a27122d77af14ced35f62e5c892ca2fae9344311"}, - {file = "websockets-11.0.3-cp39-cp39-win32.whl", hash = "sha256:c7f3cb904cce8e1be667c7e6fef4516b98d1a6a0635a58a57528d577ac18a128"}, - {file = "websockets-11.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c792ea4eabc0159535608fc5658a74d1a81020eb35195dd63214dcf07556f67e"}, - {file = "websockets-11.0.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f2e58f2c36cc52d41f2659e4c0cbf7353e28c8c9e63e30d8c6d3494dc9fdedcf"}, - {file = "websockets-11.0.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de36fe9c02995c7e6ae6efe2e205816f5f00c22fd1fbf343d4d18c3d5ceac2f5"}, - {file = "websockets-11.0.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ac56b661e60edd453585f4bd68eb6a29ae25b5184fd5ba51e97652580458998"}, - {file = "websockets-11.0.3-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e052b8467dd07d4943936009f46ae5ce7b908ddcac3fda581656b1b19c083d9b"}, - {file = "websockets-11.0.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:42cc5452a54a8e46a032521d7365da775823e21bfba2895fb7b77633cce031bb"}, - {file = "websockets-11.0.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e6316827e3e79b7b8e7d8e3b08f4e331af91a48e794d5d8b099928b6f0b85f20"}, - {file = "websockets-11.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8531fdcad636d82c517b26a448dcfe62f720e1922b33c81ce695d0edb91eb931"}, - {file = "websockets-11.0.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c114e8da9b475739dde229fd3bc6b05a6537a88a578358bc8eb29b4030fac9c9"}, - {file = "websockets-11.0.3-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e063b1865974611313a3849d43f2c3f5368093691349cf3c7c8f8f75ad7cb280"}, - {file = "websockets-11.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:92b2065d642bf8c0a82d59e59053dd2fdde64d4ed44efe4870fa816c1232647b"}, - {file = "websockets-11.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0ee68fe502f9031f19d495dae2c268830df2760c0524cbac5d759921ba8c8e82"}, - {file = "websockets-11.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcacf2c7a6c3a84e720d1bb2b543c675bf6c40e460300b628bab1b1efc7c034c"}, - {file = "websockets-11.0.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b67c6f5e5a401fc56394f191f00f9b3811fe843ee93f4a70df3c389d1adf857d"}, - {file = "websockets-11.0.3-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d5023a4b6a5b183dc838808087033ec5df77580485fc533e7dab2567851b0a4"}, - {file = "websockets-11.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ed058398f55163a79bb9f06a90ef9ccc063b204bb346c4de78efc5d15abfe602"}, - {file = "websockets-11.0.3-py3-none-any.whl", hash = "sha256:6681ba9e7f8f3b19440921e99efbb40fc89f26cd71bf539e45d8c8a25c976dc6"}, - {file = "websockets-11.0.3.tar.gz", hash = "sha256:88fc51d9a26b10fc331be344f1781224a375b78488fc343620184e95a4b27016"}, -] - -[[package]] -name = "win-unicode-console" -version = "0.5" -description = "Enable Unicode input and display when running Python from Windows console." -optional = false -python-versions = "*" -files = [ - {file = "win_unicode_console-0.5.zip", hash = "sha256:d4142d4d56d46f449d6f00536a73625a871cba040f0bc1a2e305a04578f07d1e"}, -] - -[[package]] -name = "xlrd" -version = "2.0.1" -description = "Library for developers to extract data from Microsoft Excel (tm) .xls spreadsheet files" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" -files = [ - {file = "xlrd-2.0.1-py2.py3-none-any.whl", hash = "sha256:6a33ee89877bd9abc1158129f6e94be74e2679636b8a205b43b85206c3f0bbdd"}, - {file = "xlrd-2.0.1.tar.gz", hash = "sha256:f72f148f54442c6b056bf931dbc34f986fd0c3b0b6b5a58d013c9aef274d0c88"}, -] - -[package.extras] -build = ["twine", "wheel"] -docs = ["sphinx"] -test = ["pytest", "pytest-cov"] - -[[package]] -name = "xlsxwriter" -version = "3.1.2" -description = "A Python module for creating Excel XLSX files." -optional = false -python-versions = ">=3.6" -files = [ - {file = "XlsxWriter-3.1.2-py3-none-any.whl", hash = "sha256:331508ff39d610ecdaf979e458840bc1eab6e6a02cfd5d08f044f0f73636236f"}, - {file = "XlsxWriter-3.1.2.tar.gz", hash = "sha256:78751099a770273f1c98b8d6643351f68f98ae8e6acf9d09d37dc6798f8cd3de"}, -] - -[[package]] -name = "yarl" -version = "1.9.2" -description = "Yet another URL library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"}, - {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"}, - {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"}, - {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"}, - {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"}, - {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"}, - {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"}, - {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"}, - {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"}, - {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"}, - {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"}, - {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"}, - {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"}, - {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"}, - {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"}, - {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"}, - {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"}, - {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"}, - {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"}, - {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"}, - {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"}, - {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"}, - {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"}, - {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"}, - {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"}, - {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"}, - {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"}, - {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"}, -] - -[package.dependencies] -idna = ">=2.0" -multidict = ">=4.0" - -[[package]] -name = "zipp" -version = "3.16.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "zipp-3.16.0-py3-none-any.whl", hash = "sha256:5dadc3ad0a1f825fe42ce1bce0f2fc5a13af2e6b2d386af5b0ff295bc0a287d3"}, - {file = "zipp-3.16.0.tar.gz", hash = "sha256:1876cb065531855bbe83b6c489dcf69ecc28f1068d8e95959fe8bbc77774c941"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff"] - -[[package]] -name = "zstandard" -version = "0.21.0" -description = "Zstandard bindings for Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "zstandard-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:649a67643257e3b2cff1c0a73130609679a5673bf389564bc6d4b164d822a7ce"}, - {file = "zstandard-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:144a4fe4be2e747bf9c646deab212666e39048faa4372abb6a250dab0f347a29"}, - {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b72060402524ab91e075881f6b6b3f37ab715663313030d0ce983da44960a86f"}, - {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8257752b97134477fb4e413529edaa04fc0457361d304c1319573de00ba796b1"}, - {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c053b7c4cbf71cc26808ed67ae955836232f7638444d709bfc302d3e499364fa"}, - {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2769730c13638e08b7a983b32cb67775650024632cd0476bf1ba0e6360f5ac7d"}, - {file = "zstandard-0.21.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7d3bc4de588b987f3934ca79140e226785d7b5e47e31756761e48644a45a6766"}, - {file = "zstandard-0.21.0-cp310-cp310-win32.whl", hash = "sha256:67829fdb82e7393ca68e543894cd0581a79243cc4ec74a836c305c70a5943f07"}, - {file = "zstandard-0.21.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6048a287f8d2d6e8bc67f6b42a766c61923641dd4022b7fd3f7439e17ba5a4d"}, - {file = "zstandard-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7f2afab2c727b6a3d466faee6974a7dad0d9991241c498e7317e5ccf53dbc766"}, - {file = "zstandard-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff0852da2abe86326b20abae912d0367878dd0854b8931897d44cfeb18985472"}, - {file = "zstandard-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d12fa383e315b62630bd407477d750ec96a0f438447d0e6e496ab67b8b451d39"}, - {file = "zstandard-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1b9703fe2e6b6811886c44052647df7c37478af1b4a1a9078585806f42e5b15"}, - {file = "zstandard-0.21.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df28aa5c241f59a7ab524f8ad8bb75d9a23f7ed9d501b0fed6d40ec3064784e8"}, - {file = "zstandard-0.21.0-cp311-cp311-win32.whl", hash = "sha256:0aad6090ac164a9d237d096c8af241b8dcd015524ac6dbec1330092dba151657"}, - {file = "zstandard-0.21.0-cp311-cp311-win_amd64.whl", hash = "sha256:48b6233b5c4cacb7afb0ee6b4f91820afbb6c0e3ae0fa10abbc20000acdf4f11"}, - {file = "zstandard-0.21.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e7d560ce14fd209db6adacce8908244503a009c6c39eee0c10f138996cd66d3e"}, - {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e6e131a4df2eb6f64961cea6f979cdff22d6e0d5516feb0d09492c8fd36f3bc"}, - {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1e0c62a67ff425927898cf43da2cf6b852289ebcc2054514ea9bf121bec10a5"}, - {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1545fb9cb93e043351d0cb2ee73fa0ab32e61298968667bb924aac166278c3fc"}, - {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe6c821eb6870f81d73bf10e5deed80edcac1e63fbc40610e61f340723fd5f7c"}, - {file = "zstandard-0.21.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ddb086ea3b915e50f6604be93f4f64f168d3fc3cef3585bb9a375d5834392d4f"}, - {file = "zstandard-0.21.0-cp37-cp37m-win32.whl", hash = "sha256:57ac078ad7333c9db7a74804684099c4c77f98971c151cee18d17a12649bc25c"}, - {file = "zstandard-0.21.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1243b01fb7926a5a0417120c57d4c28b25a0200284af0525fddba812d575f605"}, - {file = "zstandard-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ea68b1ba4f9678ac3d3e370d96442a6332d431e5050223626bdce748692226ea"}, - {file = "zstandard-0.21.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8070c1cdb4587a8aa038638acda3bd97c43c59e1e31705f2766d5576b329e97c"}, - {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4af612c96599b17e4930fe58bffd6514e6c25509d120f4eae6031b7595912f85"}, - {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff891e37b167bc477f35562cda1248acc115dbafbea4f3af54ec70821090965"}, - {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9fec02ce2b38e8b2e86079ff0b912445495e8ab0b137f9c0505f88ad0d61296"}, - {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0bdbe350691dec3078b187b8304e6a9c4d9db3eb2d50ab5b1d748533e746d099"}, - {file = "zstandard-0.21.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b69cccd06a4a0a1d9fb3ec9a97600055cf03030ed7048d4bcb88c574f7895773"}, - {file = "zstandard-0.21.0-cp38-cp38-win32.whl", hash = "sha256:9980489f066a391c5572bc7dc471e903fb134e0b0001ea9b1d3eff85af0a6f1b"}, - {file = "zstandard-0.21.0-cp38-cp38-win_amd64.whl", hash = "sha256:0e1e94a9d9e35dc04bf90055e914077c80b1e0c15454cc5419e82529d3e70728"}, - {file = "zstandard-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d2d61675b2a73edcef5e327e38eb62bdfc89009960f0e3991eae5cc3d54718de"}, - {file = "zstandard-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:25fbfef672ad798afab12e8fd204d122fca3bc8e2dcb0a2ba73bf0a0ac0f5f07"}, - {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62957069a7c2626ae80023998757e27bd28d933b165c487ab6f83ad3337f773d"}, - {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14e10ed461e4807471075d4b7a2af51f5234c8f1e2a0c1d37d5ca49aaaad49e8"}, - {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9cff89a036c639a6a9299bf19e16bfb9ac7def9a7634c52c257166db09d950e7"}, - {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52b2b5e3e7670bd25835e0e0730a236f2b0df87672d99d3bf4bf87248aa659fb"}, - {file = "zstandard-0.21.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b1367da0dde8ae5040ef0413fb57b5baeac39d8931c70536d5f013b11d3fc3a5"}, - {file = "zstandard-0.21.0-cp39-cp39-win32.whl", hash = "sha256:db62cbe7a965e68ad2217a056107cc43d41764c66c895be05cf9c8b19578ce9c"}, - {file = "zstandard-0.21.0-cp39-cp39-win_amd64.whl", hash = "sha256:a8d200617d5c876221304b0e3fe43307adde291b4a897e7b0617a61611dfff6a"}, - {file = "zstandard-0.21.0.tar.gz", hash = "sha256:f08e3a10d01a247877e4cb61a82a319ea746c356a3786558bed2481e6c405546"}, -] - -[package.dependencies] -cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\""} - -[package.extras] -cffi = ["cffi (>=1.11)"] - -[metadata] -lock-version = "2.0" -python-versions = "^3.10" -content-hash = "92c090ae111eaa2821badd95cf3880b678fcbf647241c9edc8f789009acf76c8" diff --git a/examples/langchain-python-rag-privategpt/privateGPT.py b/examples/langchain-python-rag-privategpt/privateGPT.py deleted file mode 100755 index 7d97a567..00000000 --- a/examples/langchain-python-rag-privategpt/privateGPT.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 -from langchain.chains import RetrievalQA -from langchain.embeddings import HuggingFaceEmbeddings -from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler -from langchain.vectorstores import Chroma -from langchain.llms import Ollama -import chromadb -import os -import argparse -import time - -model = os.environ.get("MODEL", "llama2-uncensored") -# For embeddings model, the example uses a sentence-transformers model -# https://www.sbert.net/docs/pretrained_models.html -# "The all-mpnet-base-v2 model provides the best quality, while all-MiniLM-L6-v2 is 5 times faster and still offers good quality." -embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME", "all-MiniLM-L6-v2") -persist_directory = os.environ.get("PERSIST_DIRECTORY", "db") -target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4)) - -from constants import CHROMA_SETTINGS - -def main(): - # Parse the command line arguments - args = parse_arguments() - embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name) - - db = Chroma(persist_directory=persist_directory, embedding_function=embeddings) - - retriever = db.as_retriever(search_kwargs={"k": target_source_chunks}) - # activate/deactivate the streaming StdOut callback for LLMs - callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()] - - llm = Ollama(model=model, callbacks=callbacks) - - qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents= not args.hide_source) - # Interactive questions and answers - while True: - query = input("\nEnter a query: ") - if query == "exit": - break - if query.strip() == "": - continue - - # Get the answer from the chain - start = time.time() - res = qa(query) - answer, docs = res['result'], [] if args.hide_source else res['source_documents'] - end = time.time() - - # Print the result - print("\n\n> Question:") - print(query) - print(answer) - - # Print the relevant sources used for the answer - for document in docs: - print("\n> " + document.metadata["source"] + ":") - print(document.page_content) - -def parse_arguments(): - parser = argparse.ArgumentParser(description='privateGPT: Ask questions to your documents without an internet connection, ' - 'using the power of LLMs.') - parser.add_argument("--hide-source", "-S", action='store_true', - help='Use this flag to disable printing of source documents used for answers.') - - parser.add_argument("--mute-stream", "-M", - action='store_true', - help='Use this flag to disable the streaming StdOut callback for LLMs.') - - return parser.parse_args() - - -if __name__ == "__main__": - main() diff --git a/examples/langchain-python-rag-privategpt/pyproject.toml b/examples/langchain-python-rag-privategpt/pyproject.toml deleted file mode 100644 index fa65a737..00000000 --- a/examples/langchain-python-rag-privategpt/pyproject.toml +++ /dev/null @@ -1,26 +0,0 @@ -[tool.poetry] -name = "privategpt" -version = "0.1.0" -description = "" -authors = ["Ivan Martinez "] -license = "Apache Version 2.0" -readme = "README.md" - -[tool.poetry.dependencies] -python = "^3.10" -langchain = "0.0.261" -gpt4all = "^1.0.3" -chromadb = "^0.3.26" -PyMuPDF = "^1.22.5" -python-dotenv = "^1.0.0" -unstructured = "^0.8.0" -extract-msg = "^0.41.5" -tabulate = "^0.9.0" -pandoc = "^2.3" -pypandoc = "^1.11" -tqdm = "^4.65.0" -sentence-transformers = "^2.2.2" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/examples/langchain-python-rag-privategpt/requirements.txt b/examples/langchain-python-rag-privategpt/requirements.txt deleted file mode 100644 index 4f2cee25..00000000 --- a/examples/langchain-python-rag-privategpt/requirements.txt +++ /dev/null @@ -1,15 +0,0 @@ -langchain==0.0.274 -gpt4all==1.0.8 -chromadb==0.5.0 -llama-cpp-python==0.1.81 -urllib3==2.0.4 -PyMuPDF==1.23.5 -python-dotenv==1.0.0 -unstructured==0.10.8 -extract-msg==0.45.0 -tabulate==0.9.0 -pandoc==2.3 -pypandoc==1.11 -tqdm==4.66.1 -sentence_transformers==2.2.2 -numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/examples/langchain-python-rag-websummary/README.md b/examples/langchain-python-rag-websummary/README.md deleted file mode 100644 index 746c47ab..00000000 --- a/examples/langchain-python-rag-websummary/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# LangChain Web Summarization - -This example summarizes the website, [https://ollama.com/blog/run-llama2-uncensored-locally](https://ollama.com/blog/run-llama2-uncensored-locally) - -## Running the Example - -1. Ensure you have the `llama3.2` model installed: - - ```bash - ollama pull llama3.2 - ``` - -2. Install the Python Requirements. - - ```bash - pip install -r requirements.txt - ``` - -3. Run the example: - - ```bash - python main.py - ``` diff --git a/examples/langchain-python-rag-websummary/main.py b/examples/langchain-python-rag-websummary/main.py deleted file mode 100644 index 56f8bd24..00000000 --- a/examples/langchain-python-rag-websummary/main.py +++ /dev/null @@ -1,12 +0,0 @@ -from langchain_community.llms import Ollama -from langchain_community.document_loaders import WebBaseLoader -from langchain.chains.summarize import load_summarize_chain - -loader = WebBaseLoader("https://ollama.com/blog/run-llama2-uncensored-locally") -docs = loader.load() - -llm = Ollama(model="llama3.2") -chain = load_summarize_chain(llm, chain_type="stuff") - -result = chain.invoke(docs) -print(result) diff --git a/examples/langchain-python-rag-websummary/requirements.txt b/examples/langchain-python-rag-websummary/requirements.txt deleted file mode 100644 index 33cf51b3..00000000 --- a/examples/langchain-python-rag-websummary/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -langchain==0.0.259 diff --git a/examples/langchain-python-simple/README.md b/examples/langchain-python-simple/README.md deleted file mode 100644 index 680ab560..00000000 --- a/examples/langchain-python-simple/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# LangChain - -This example is a basic "hello world" of using LangChain with Ollama. - -## Running the Example - -1. Ensure you have the `llama3.2` model installed: - - ```bash - ollama pull llama3.2 - ``` - -2. Install the Python Requirements. - - ```bash - pip install -r requirements.txt - ``` - -3. Run the example: - - ```bash - python main.py - ``` diff --git a/examples/langchain-python-simple/main.py b/examples/langchain-python-simple/main.py deleted file mode 100644 index dafff827..00000000 --- a/examples/langchain-python-simple/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from langchain.llms import Ollama - -input = input("What is your question?\n> ") -llm = Ollama(model="llama3.2") -res = llm.invoke(input) -print (res) diff --git a/examples/langchain-python-simple/requirements.txt b/examples/langchain-python-simple/requirements.txt deleted file mode 100644 index 33cf51b3..00000000 --- a/examples/langchain-python-simple/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -langchain==0.0.259 diff --git a/examples/langchain-typescript-simple/README.md b/examples/langchain-typescript-simple/README.md deleted file mode 100644 index 7c65ccfa..00000000 --- a/examples/langchain-typescript-simple/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# LangChain - -This example is a basic "hello world" of using LangChain with Ollama using Node.js and Typescript. - -## Running the Example - -1. Install the prerequisites: - - ```bash - npm install - ``` - -2. Ensure the `mistral` model is available: - - ```bash - ollama pull mistral - ``` - -3. Run the example: - - ```bash - npm start - ``` diff --git a/examples/langchain-typescript-simple/main.ts b/examples/langchain-typescript-simple/main.ts deleted file mode 100644 index 53a58371..00000000 --- a/examples/langchain-typescript-simple/main.ts +++ /dev/null @@ -1,25 +0,0 @@ -import { Ollama } from 'langchain/llms/ollama'; -import * as readline from "readline"; - -async function main() { - const ollama = new Ollama({ - model: 'mistral' - // other parameters can be found at https://js.langchain.com/docs/api/llms_ollama/classes/Ollama - }); - - const rl = readline.createInterface({ - input: process.stdin, - output: process.stdout, - }); - - rl.question("What is your question: \n", async (user_input) => { - const stream = await ollama.stream(user_input); - - for await (const chunk of stream) { - process.stdout.write(chunk); - } - rl.close(); - }) -} - -main(); \ No newline at end of file diff --git a/examples/langchain-typescript-simple/package-lock.json b/examples/langchain-typescript-simple/package-lock.json deleted file mode 100644 index 90587d20..00000000 --- a/examples/langchain-typescript-simple/package-lock.json +++ /dev/null @@ -1,997 +0,0 @@ -{ - "name": "langchain-typescript-simple", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "dependencies": { - "langchain": "^0.0.165" - }, - "devDependencies": { - "typescript": "^5.2.2" - } - }, - "node_modules/@anthropic-ai/sdk": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.6.2.tgz", - "integrity": "sha512-fB9PUj9RFT+XjkL+E9Ol864ZIJi+1P8WnbHspN3N3/GK2uSzjd0cbVIKTGgf4v3N8MwaQu+UWnU7C4BG/fap/g==", - "dependencies": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "digest-fetch": "^1.3.0", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7" - } - }, - "node_modules/@types/node": { - "version": "18.18.4", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.18.4.tgz", - "integrity": "sha512-t3rNFBgJRugIhackit2mVcLfF6IRc0JE4oeizPQL8Zrm8n2WY/0wOdpOPhdtG0V9Q2TlW/axbF1MJ6z+Yj/kKQ==" - }, - "node_modules/@types/node-fetch": { - "version": "2.6.6", - "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.6.tgz", - "integrity": "sha512-95X8guJYhfqiuVVhRFxVQcf4hW/2bCuoPwDasMf/531STFoNoWTT7YDnWdXHEZKqAGUigmpG31r2FE70LwnzJw==", - "dependencies": { - "@types/node": "*", - "form-data": "^4.0.0" - } - }, - "node_modules/@types/retry": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", - "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==" - }, - "node_modules/@types/uuid": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.5.tgz", - "integrity": "sha512-xfHdwa1FMJ082prjSJpoEI57GZITiQz10r3vEJCHa2khEFQjKy91aWKz6+zybzssCvXUwE1LQWgWVwZ4nYUvHQ==" - }, - "node_modules/abort-controller": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", - "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", - "dependencies": { - "event-target-shim": "^5.0.0" - }, - "engines": { - "node": ">=6.5" - } - }, - "node_modules/agentkeepalive": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz", - "integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==", - "dependencies": { - "humanize-ms": "^1.2.1" - }, - "engines": { - "node": ">= 8.0.0" - } - }, - "node_modules/ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" - }, - "node_modules/base-64": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/base-64/-/base-64-0.1.0.tgz", - "integrity": "sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA==" - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", - "engines": { - "node": ">=8" - } - }, - "node_modules/binary-search": { - "version": "1.3.6", - "resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz", - "integrity": "sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA==" - }, - "node_modules/camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/charenc": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/charenc/-/charenc-0.0.2.tgz", - "integrity": "sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==", - "engines": { - "node": "*" - } - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", - "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", - "engines": { - "node": ">=14" - } - }, - "node_modules/crypt": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz", - "integrity": "sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==", - "engines": { - "node": "*" - } - }, - "node_modules/decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/digest-fetch": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/digest-fetch/-/digest-fetch-1.3.0.tgz", - "integrity": "sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==", - "dependencies": { - "base-64": "^0.1.0", - "md5": "^2.3.0" - } - }, - "node_modules/event-target-shim": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz", - "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", - "engines": { - "node": ">=6" - } - }, - "node_modules/eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" - }, - "node_modules/expr-eval": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/expr-eval/-/expr-eval-2.0.2.tgz", - "integrity": "sha512-4EMSHGOPSwAfBiibw3ndnP0AvjDWLsMvGOvWEZ2F96IGk0bIVdjQisOHxReSkE13mHcfbuCiXw+G4y0zv6N8Eg==" - }, - "node_modules/flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", - "bin": { - "flat": "cli.js" - } - }, - "node_modules/form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/form-data-encoder": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz", - "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==" - }, - "node_modules/formdata-node": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz", - "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==", - "dependencies": { - "node-domexception": "1.0.0", - "web-streams-polyfill": "4.0.0-beta.3" - }, - "engines": { - "node": ">= 12.20" - } - }, - "node_modules/humanize-ms": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz", - "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", - "dependencies": { - "ms": "^2.0.0" - } - }, - "node_modules/is-any-array": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-any-array/-/is-any-array-2.0.1.tgz", - "integrity": "sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ==" - }, - "node_modules/is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" - }, - "node_modules/js-tiktoken": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.7.tgz", - "integrity": "sha512-biba8u/clw7iesNEWLOLwrNGoBP2lA+hTaBLs/D45pJdUPFXyxD6nhcDVtADChghv4GgyAiMKYMiRx7x6h7Biw==", - "dependencies": { - "base64-js": "^1.5.1" - } - }, - "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsonpointer": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz", - "integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/langchain": { - "version": "0.0.165", - "resolved": "https://registry.npmjs.org/langchain/-/langchain-0.0.165.tgz", - "integrity": "sha512-CpbNpjwaE+9lzjdw+pZz0VgnRrFivEgr7CVp9dDaAb5JpaJAA4V2v6uQ9ZPN+TSqupTQ79HFn2sfyZVEl2EG7Q==", - "dependencies": { - "@anthropic-ai/sdk": "^0.6.2", - "ansi-styles": "^5.0.0", - "binary-extensions": "^2.2.0", - "camelcase": "6", - "decamelize": "^1.2.0", - "expr-eval": "^2.0.2", - "flat": "^5.0.2", - "js-tiktoken": "^1.0.7", - "js-yaml": "^4.1.0", - "jsonpointer": "^5.0.1", - "langchainhub": "~0.0.6", - "langsmith": "~0.0.31", - "ml-distance": "^4.0.0", - "object-hash": "^3.0.0", - "openai": "~4.4.0", - "openapi-types": "^12.1.3", - "p-queue": "^6.6.2", - "p-retry": "4", - "uuid": "^9.0.0", - "yaml": "^2.2.1", - "zod": "^3.22.3", - "zod-to-json-schema": "^3.20.4" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@aws-crypto/sha256-js": "^5.0.0", - "@aws-sdk/client-bedrock-runtime": "^3.422.0", - "@aws-sdk/client-dynamodb": "^3.310.0", - "@aws-sdk/client-kendra": "^3.352.0", - "@aws-sdk/client-lambda": "^3.310.0", - "@aws-sdk/client-s3": "^3.310.0", - "@aws-sdk/client-sagemaker-runtime": "^3.310.0", - "@aws-sdk/client-sfn": "^3.310.0", - "@aws-sdk/credential-provider-node": "^3.388.0", - "@azure/storage-blob": "^12.15.0", - "@clickhouse/client": "^0.0.14", - "@cloudflare/ai": "^1.0.12", - "@elastic/elasticsearch": "^8.4.0", - "@getmetal/metal-sdk": "*", - "@getzep/zep-js": "^0.7.0", - "@gomomento/sdk": "^1.23.0", - "@google-ai/generativelanguage": "^0.2.1", - "@google-cloud/storage": "^6.10.1", - "@huggingface/inference": "^1.5.1", - "@mozilla/readability": "*", - "@notionhq/client": "^2.2.10", - "@opensearch-project/opensearch": "*", - "@pinecone-database/pinecone": "^1.1.0", - "@planetscale/database": "^1.8.0", - "@qdrant/js-client-rest": "^1.2.0", - "@raycast/api": "^1.55.2", - "@smithy/eventstream-codec": "^2.0.5", - "@smithy/protocol-http": "^3.0.6", - "@smithy/signature-v4": "^2.0.10", - "@smithy/util-utf8": "^2.0.0", - "@supabase/postgrest-js": "^1.1.1", - "@supabase/supabase-js": "^2.10.0", - "@tensorflow-models/universal-sentence-encoder": "*", - "@tensorflow/tfjs-converter": "*", - "@tensorflow/tfjs-core": "*", - "@upstash/redis": "^1.20.6", - "@vercel/postgres": "^0.5.0", - "@writerai/writer-sdk": "^0.40.2", - "@xata.io/client": "^0.25.1", - "@xenova/transformers": "^2.5.4", - "@zilliz/milvus2-sdk-node": ">=2.2.7", - "apify-client": "^2.7.1", - "axios": "*", - "cassandra-driver": "^4.6.4", - "cheerio": "^1.0.0-rc.12", - "chromadb": "*", - "cohere-ai": ">=6.0.0", - "d3-dsv": "^2.0.0", - "epub2": "^3.0.1", - "faiss-node": "^0.3.0", - "fast-xml-parser": "^4.2.7", - "firebase-admin": "^11.9.0", - "google-auth-library": "^8.9.0", - "googleapis": "^126.0.1", - "hnswlib-node": "^1.4.2", - "html-to-text": "^9.0.5", - "ignore": "^5.2.0", - "ioredis": "^5.3.2", - "jsdom": "*", - "llmonitor": "*", - "lodash": "^4.17.21", - "mammoth": "*", - "mongodb": "^5.2.0", - "mysql2": "^3.3.3", - "neo4j-driver": "*", - "node-llama-cpp": "*", - "notion-to-md": "^3.1.0", - "pdf-parse": "1.1.1", - "peggy": "^3.0.2", - "pg": "^8.11.0", - "pg-copy-streams": "^6.0.5", - "pickleparser": "^0.1.0", - "playwright": "^1.32.1", - "portkey-ai": "^0.1.11", - "puppeteer": "^19.7.2", - "redis": "^4.6.4", - "replicate": "^0.18.0", - "sonix-speech-recognition": "^2.1.1", - "srt-parser-2": "^1.2.2", - "typeorm": "^0.3.12", - "typesense": "^1.5.3", - "usearch": "^1.1.1", - "vectordb": "^0.1.4", - "voy-search": "0.6.2", - "weaviate-ts-client": "^1.4.0", - "web-auth-library": "^1.0.3", - "youtube-transcript": "^1.0.6", - "youtubei.js": "^5.8.0" - }, - "peerDependenciesMeta": { - "@aws-crypto/sha256-js": { - "optional": true - }, - "@aws-sdk/client-bedrock-runtime": { - "optional": true - }, - "@aws-sdk/client-dynamodb": { - "optional": true - }, - "@aws-sdk/client-kendra": { - "optional": true - }, - "@aws-sdk/client-lambda": { - "optional": true - }, - "@aws-sdk/client-s3": { - "optional": true - }, - "@aws-sdk/client-sagemaker-runtime": { - "optional": true - }, - "@aws-sdk/client-sfn": { - "optional": true - }, - "@aws-sdk/credential-provider-node": { - "optional": true - }, - "@azure/storage-blob": { - "optional": true - }, - "@clickhouse/client": { - "optional": true - }, - "@cloudflare/ai": { - "optional": true - }, - "@elastic/elasticsearch": { - "optional": true - }, - "@getmetal/metal-sdk": { - "optional": true - }, - "@getzep/zep-js": { - "optional": true - }, - "@gomomento/sdk": { - "optional": true - }, - "@google-ai/generativelanguage": { - "optional": true - }, - "@google-cloud/storage": { - "optional": true - }, - "@huggingface/inference": { - "optional": true - }, - "@mozilla/readability": { - "optional": true - }, - "@notionhq/client": { - "optional": true - }, - "@opensearch-project/opensearch": { - "optional": true - }, - "@pinecone-database/pinecone": { - "optional": true - }, - "@planetscale/database": { - "optional": true - }, - "@qdrant/js-client-rest": { - "optional": true - }, - "@raycast/api": { - "optional": true - }, - "@smithy/eventstream-codec": { - "optional": true - }, - "@smithy/protocol-http": { - "optional": true - }, - "@smithy/signature-v4": { - "optional": true - }, - "@smithy/util-utf8": { - "optional": true - }, - "@supabase/postgrest-js": { - "optional": true - }, - "@supabase/supabase-js": { - "optional": true - }, - "@tensorflow-models/universal-sentence-encoder": { - "optional": true - }, - "@tensorflow/tfjs-converter": { - "optional": true - }, - "@tensorflow/tfjs-core": { - "optional": true - }, - "@upstash/redis": { - "optional": true - }, - "@vercel/postgres": { - "optional": true - }, - "@writerai/writer-sdk": { - "optional": true - }, - "@xata.io/client": { - "optional": true - }, - "@xenova/transformers": { - "optional": true - }, - "@zilliz/milvus2-sdk-node": { - "optional": true - }, - "apify-client": { - "optional": true - }, - "axios": { - "optional": true - }, - "cassandra-driver": { - "optional": true - }, - "cheerio": { - "optional": true - }, - "chromadb": { - "optional": true - }, - "cohere-ai": { - "optional": true - }, - "d3-dsv": { - "optional": true - }, - "epub2": { - "optional": true - }, - "faiss-node": { - "optional": true - }, - "fast-xml-parser": { - "optional": true - }, - "firebase-admin": { - "optional": true - }, - "google-auth-library": { - "optional": true - }, - "googleapis": { - "optional": true - }, - "hnswlib-node": { - "optional": true - }, - "html-to-text": { - "optional": true - }, - "ignore": { - "optional": true - }, - "ioredis": { - "optional": true - }, - "jsdom": { - "optional": true - }, - "llmonitor": { - "optional": true - }, - "lodash": { - "optional": true - }, - "mammoth": { - "optional": true - }, - "mongodb": { - "optional": true - }, - "mysql2": { - "optional": true - }, - "neo4j-driver": { - "optional": true - }, - "node-llama-cpp": { - "optional": true - }, - "notion-to-md": { - "optional": true - }, - "pdf-parse": { - "optional": true - }, - "peggy": { - "optional": true - }, - "pg": { - "optional": true - }, - "pg-copy-streams": { - "optional": true - }, - "pickleparser": { - "optional": true - }, - "playwright": { - "optional": true - }, - "portkey-ai": { - "optional": true - }, - "puppeteer": { - "optional": true - }, - "redis": { - "optional": true - }, - "replicate": { - "optional": true - }, - "sonix-speech-recognition": { - "optional": true - }, - "srt-parser-2": { - "optional": true - }, - "typeorm": { - "optional": true - }, - "typesense": { - "optional": true - }, - "usearch": { - "optional": true - }, - "vectordb": { - "optional": true - }, - "voy-search": { - "optional": true - }, - "weaviate-ts-client": { - "optional": true - }, - "web-auth-library": { - "optional": true - }, - "youtube-transcript": { - "optional": true - }, - "youtubei.js": { - "optional": true - } - } - }, - "node_modules/langchainhub": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/langchainhub/-/langchainhub-0.0.6.tgz", - "integrity": "sha512-SW6105T+YP1cTe0yMf//7kyshCgvCTyFBMTgH2H3s9rTAR4e+78DA/BBrUL/Mt4Q5eMWui7iGuAYb3pgGsdQ9w==" - }, - "node_modules/langsmith": { - "version": "0.0.42", - "resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.0.42.tgz", - "integrity": "sha512-sFuN+e7E+pPBIRaRgFqZh/BRBWNHTZNAwi6uj4kydQawooCZYoJmM5snOkiQrhVSvAhgu6xFhLvmfvkPcKzD7w==", - "dependencies": { - "@types/uuid": "^9.0.1", - "commander": "^10.0.1", - "p-queue": "^6.6.2", - "p-retry": "4", - "uuid": "^9.0.0" - }, - "bin": { - "langsmith": "dist/cli/main.cjs" - } - }, - "node_modules/md5": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz", - "integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==", - "dependencies": { - "charenc": "0.0.2", - "crypt": "0.0.2", - "is-buffer": "~1.1.6" - } - }, - "node_modules/mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dependencies": { - "mime-db": "1.52.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/ml-array-mean": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/ml-array-mean/-/ml-array-mean-1.1.6.tgz", - "integrity": "sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ==", - "dependencies": { - "ml-array-sum": "^1.1.6" - } - }, - "node_modules/ml-array-sum": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/ml-array-sum/-/ml-array-sum-1.1.6.tgz", - "integrity": "sha512-29mAh2GwH7ZmiRnup4UyibQZB9+ZLyMShvt4cH4eTK+cL2oEMIZFnSyB3SS8MlsTh6q/w/yh48KmqLxmovN4Dw==", - "dependencies": { - "is-any-array": "^2.0.0" - } - }, - "node_modules/ml-distance": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/ml-distance/-/ml-distance-4.0.1.tgz", - "integrity": "sha512-feZ5ziXs01zhyFUUUeZV5hwc0f5JW0Sh0ckU1koZe/wdVkJdGxcP06KNQuF0WBTj8FttQUzcvQcpcrOp/XrlEw==", - "dependencies": { - "ml-array-mean": "^1.1.6", - "ml-distance-euclidean": "^2.0.0", - "ml-tree-similarity": "^1.0.0" - } - }, - "node_modules/ml-distance-euclidean": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ml-distance-euclidean/-/ml-distance-euclidean-2.0.0.tgz", - "integrity": "sha512-yC9/2o8QF0A3m/0IXqCTXCzz2pNEzvmcE/9HFKOZGnTjatvBbsn4lWYJkxENkA4Ug2fnYl7PXQxnPi21sgMy/Q==" - }, - "node_modules/ml-tree-similarity": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/ml-tree-similarity/-/ml-tree-similarity-1.0.0.tgz", - "integrity": "sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg==", - "dependencies": { - "binary-search": "^1.3.5", - "num-sort": "^2.0.0" - } - }, - "node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" - }, - "node_modules/node-domexception": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", - "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/jimmywarting" - }, - { - "type": "github", - "url": "https://paypal.me/jimmywarting" - } - ], - "engines": { - "node": ">=10.5.0" - } - }, - "node_modules/node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "dependencies": { - "whatwg-url": "^5.0.0" - }, - "engines": { - "node": "4.x || >=6.0.0" - }, - "peerDependencies": { - "encoding": "^0.1.0" - }, - "peerDependenciesMeta": { - "encoding": { - "optional": true - } - } - }, - "node_modules/num-sort": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/num-sort/-/num-sort-2.1.0.tgz", - "integrity": "sha512-1MQz1Ed8z2yckoBeSfkQHHO9K1yDRxxtotKSJ9yvcTUUxSvfvzEq5GwBrjjHEpMlq/k5gvXdmJ1SbYxWtpNoVg==", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/object-hash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", - "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", - "engines": { - "node": ">= 6" - } - }, - "node_modules/openai": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/openai/-/openai-4.4.0.tgz", - "integrity": "sha512-JN0t628Kh95T0IrXl0HdBqnlJg+4Vq0Bnh55tio+dfCnyzHvMLiWyCM9m726MAJD2YkDU4/8RQB6rNbEq9ct2w==", - "dependencies": { - "@types/node": "^18.11.18", - "@types/node-fetch": "^2.6.4", - "abort-controller": "^3.0.0", - "agentkeepalive": "^4.2.1", - "digest-fetch": "^1.3.0", - "form-data-encoder": "1.7.2", - "formdata-node": "^4.3.2", - "node-fetch": "^2.6.7" - }, - "bin": { - "openai": "bin/cli" - } - }, - "node_modules/openapi-types": { - "version": "12.1.3", - "resolved": "https://registry.npmjs.org/openapi-types/-/openapi-types-12.1.3.tgz", - "integrity": "sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==" - }, - "node_modules/p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", - "engines": { - "node": ">=4" - } - }, - "node_modules/p-queue": { - "version": "6.6.2", - "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", - "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", - "dependencies": { - "eventemitter3": "^4.0.4", - "p-timeout": "^3.2.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/p-retry": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz", - "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==", - "dependencies": { - "@types/retry": "0.12.0", - "retry": "^0.13.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/p-timeout": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", - "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", - "dependencies": { - "p-finally": "^1.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/retry": { - "version": "0.13.1", - "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", - "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", - "engines": { - "node": ">= 4" - } - }, - "node_modules/tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" - }, - "node_modules/typescript": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", - "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", - "dev": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/uuid": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", - "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", - "funding": [ - "https://github.com/sponsors/broofa", - "https://github.com/sponsors/ctavan" - ], - "bin": { - "uuid": "dist/bin/uuid" - } - }, - "node_modules/web-streams-polyfill": { - "version": "4.0.0-beta.3", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz", - "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==", - "engines": { - "node": ">= 14" - } - }, - "node_modules/webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" - }, - "node_modules/whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "dependencies": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, - "node_modules/yaml": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz", - "integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==", - "engines": { - "node": ">= 14" - } - }, - "node_modules/zod": { - "version": "3.22.4", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.22.4.tgz", - "integrity": "sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg==", - "funding": { - "url": "https://github.com/sponsors/colinhacks" - } - }, - "node_modules/zod-to-json-schema": { - "version": "3.21.4", - "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.21.4.tgz", - "integrity": "sha512-fjUZh4nQ1s6HMccgIeE0VP4QG/YRGPmyjO9sAh890aQKPEk3nqbfUXhMFaC+Dr5KvYBm8BCyvfpZf2jY9aGSsw==", - "peerDependencies": { - "zod": "^3.21.4" - } - } - } -} diff --git a/examples/langchain-typescript-simple/package.json b/examples/langchain-typescript-simple/package.json deleted file mode 100644 index 5d6a5b88..00000000 --- a/examples/langchain-typescript-simple/package.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "scripts": { - "start": "tsx main.ts" - }, - "devDependencies": { - "tsx": "^4.6.2", - "typescript": "^5.3.3" - }, - "dependencies": { - "langchain": "^0.0.165", - "readline": "^1.3.0" - } -} diff --git a/examples/modelfile-mario/Modelfile b/examples/modelfile-mario/Modelfile deleted file mode 100644 index b8e49667..00000000 --- a/examples/modelfile-mario/Modelfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM llama3.2 -PARAMETER temperature 1 -SYSTEM """ -You are Mario from super mario bros, acting as an assistant. -""" diff --git a/examples/modelfile-mario/logo.png b/examples/modelfile-mario/logo.png deleted file mode 100644 index 1ef2564623051121def212c7f366e55ebff0ca72..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 456296 zcmV)3K+C_0P)PyA07*naRCr#ry$76SRh2!w?!7NouIe1t_ z2T)N*L5!%Vpcp_w8B~IT==`1ej|o8(QOVsjG^uk}S66sdFWvk7)(+?1SB=j1oB6&{ zpz4Kt&pl!9wbx!}pXf`!x-N@L8M0=ED2h<6l`%Uzhd4=4t5wl#G!aJ;+U*Vo2K&%$ zcQHHLL|bWOcF@<~kC~|%!~mj_{M-MJZnujp%}}q^fH*^^ z-377?olX~heKn-rE?UhNs@1Bzo~B(yQH&&s5y!yXYzvi28Leg$w3{N$x~SJ{NZF=V8(9QYN@aAjF0w2`xtt)5Vsx_<-ENAQP2|7qhrjW2Q5+-U zA1V8XZ$|n$e~Tjhj{aYs3lX1J|K{h}_DsH>g1_)P{9B(bMU=#HJzhVFBcy&EK>A&- z#e2wi=q?zI&n!GyWdkr9;@>NKiqHq|5K9o#cOGvqRdqwj$(t)OM)9Q5(+Z z5qrsZGQCGc!(-zlw%h!W+c&j>`L*S~g%#{;nSEMi8$N%_H5uXslnxU~Y=QBS!yV9dk2tG9Uv({g|DZMW@+8t*@d^ zHi-}?5jw4|I?h^2uEpHZw2pAeuv(o-jB|%(@19j*s?*FCXnndA?xb%xL-|um#A^30@PBmID?x zq6pITcbx-1CulSk45X-4D){-YZ^K8=ya3ZXC-tHQRP%RF-w&TXnNcmH(VvHce1&6N1D;F(7t-~OOK|^c+$^d|aV6h!JsGu{U{pn;~Y(x_$-LW2< z=X7lkEnbQ{ZoL)ztywKjbT5#~>0!^;gM)33MNx)kyMvQXIvGE>{yGH_S&CD}`tg~y zYh|7#QG!@(6Wd#G2?an*C(dV5`=x`Sb1etJIcjkGW`kx^ULXL0kUSc%N9PW-x?RMz zDt@+Y8-BTC7oNOe7<<}HT)KM~9`AIJA_1ZY7_JyX}JV4YIxO2TM%dOHM0!6lQJ%Od=swNwjI;*U}|du zP?L3|1C;XDgVtc_@h6}$uv9=_lCl5Q9*NcWv2@35J|G7rnaq{&Lw9FEs62aDUXVG- ztGat9kXO5I#@2%$yHIGam>q(a3@QKn`Cnd>B?J}L$?dro`Um^ffe;FTPP!=*p;D=e zL!X{*2um<9&@Z21B;K5BN#wp{$wG|p+>LsF4eeGJ?M@2={r%FoiSbF)`l`YjG-etS z@zX)i%uJ(PE}^g9hq>9=fUvm0`2A|7ikJ@;XM%$E?3qUYKn)QR%r+WwO)?LhZFKx( z8EE{Z;zE41sA*@?|Juo0mc$>f-a*-V`n&rFZ<>d zc*_}Y!Ca?_r4ew!GY-LtOBNz(chG8gQSGatO=p#qaKj_paMm67V6!n%2ONALuKmt; zaLALN64)e~6raET3`OwffUjr5^*n9Q4C`4RHmid^EJ*Q7=BJviFz zK)AJIUGHyW0mS(mz$PPMAu@h9HI3^Z-G;v!8$`F;!ex^axUbo;Ud&L*GF-5HHU56p za+HvwE3B&maA)ScUm0vkdwarp2Vx3j)T3KyVlOzG>MU-UI8JxS#3Z^`Bh*bw*d9mt z zs>md8_L6g`TuSnQQYMK$y9A~^Ghe?~>?`jeA6s2_7NI3EqKkiXeU_v#=B8#*8>pgO zE@Nt95~Wf}oM@-jL0?~=&^mPR64m$hq1|fBwFd|KF*7}bPP>D0wIq|PUawNPN4doXpF7~dM&{_;5WU(A z!lmvcDI+fY1P^Z9fae{33?6^*VN~NT&Rnws?>TTahRaN8G13D{l?pnY6j7XF zHm>4}cWuC@?%xEOT>h+GcL2Wq&2Qo9yax>>uWx`V%vD1@IrQ^UE{xYav=xl2M~_7> z;#AsRvvO>;zX$aZdX||O%th2;j{DgupTY7$SAlJ0Xr&Xyz=(D zuztFsyj2|IyVqWi6OTLA47XD17Sx6%5zJ2y9l!@Br5)qKi!Q>u-}#O(_f8%d#^(+` z0A=1Z@a7JTbNgogN1T`X7H1#LE~s%(-(-!DyV{tO5+}+j#KZz1I$WPoBq<3U>KONS zn)t^0P5A4@OMzws-<#Qk-!vx0=*p<4DNY|-iSrL!g?iSup||KciqJJ%CxCzqOIIDe zFm%pt6azCe7eb|5w9he0!o0NLrZ9z^^^nv~3Uq8*K9ZJj-h=D$4A8w7?3uBlq;KbeG2Vgc=0AsWr|Nt%n%JhZxNr>zxTs%^<5*U;&{ zB#QRZZJZ6B(^l%Ra)%PuTrqOD_nkfPdj||yaO&>ft#Lh3y0=f7C96NUUR&h)np5Lp z(Fff@o{A`XI`zwHVB8J|G#cimvn|#s1(f5U!g^Sur96+|sJ7Tzr{#+ouG}ecp*ejS zfQdUFw$q}E0KDaIr}MZt`oJKvjxkcUm|LwmobjeJ z@Xaf}jii*|B_nlQ^yGuFvfpX^MACFhQPV-G@z|ckIPdoR@QsaI(2`iBi+xtEz*jH* zDqec@(MADt1WfnR6JHcZHlM2s6Scpd=%@Gnc@ax*_)~cc{ zq3d&iZ8M{wT~WtR_VJtF{3b5A-~v4M*yH)NX%n&x$M)56?g8s?@BnR0iZ+dMoG4f- zJh$hka}|hC0P4u4r!z#IF7{L^IN=ZX;;xyQ&|{zf@>g)?KfOtfd;s6Q&?oPGqr3%F z>4WmIzjpa$_@_7hlWL~246hj+z^C^=0JX9)W$Jc9ew^*1U31&!Q6*XLD2NW`rDSLz$XzMp%e>f`A1>7(M=#hz>Yd(?w(scs=UPf*v6cyo%15 z&n4rK0cEorak-?dvYADNoTk^1IcX`vV0p~08-pS?J-5utx)5Ys^o#Fjm4t#g6&KOT zk1iO&p7A|GWzfZ0`Jvk+>504IG_pS+4D`m~WQ$W#biSS}*a0rdYv@j3#{yN&twnY3C71sNz zB23)6Wjn?ej-k_TquFd?cz77ixjBiB2vC?_5Eh|a5}+XLL`A!j?G(eL9@lqM6ei(VML-`Wg>v-HfWN#6*I2vm$k^(KB<5^MYnG zU&Z!Gwn8Xz6A#%0ap&xh;DW;*CajPLEc${#iO;{EBh&&2$h#{DW*?)ewQ%IC%OT$`GI6H{o?H{G7Zu|&$Hw}P7uEJn=%i@7)OmM(@SK=Qi}xZ zr9NEr$* z!uxK!4_7_91#N3w-{2rV^5Ku*ZEraP)tc&TitgMahT!l*lwq^IXF=rAU~w%xF{SU& z!Q9UiV+?KOJRH!2m%{=oF;tmE##`eI7v1*&4qdPq!_5Y6MGIGq z?+`~sr^(MRtJm@E0}sN}2dd&n0M6$;UCNlB;9FCNZZUIa(9^k2gL_C4^Z|uV4fdrF zDQlFj&Q~3ABzbo}RtE`gooe9ykFCd_T62gc>91)(2ApJm*d#{hLMX4^2a8{OGCBhz zU=~KSW7cHa_@KLSn_QTDywcd{AWCV5?K}3MM3H?EgDa|3(PQXz1%p9Tr7piXwi=z; zJP>_Ggy;*ux+Y_Wp)u7!|6rfcPXqx}q;p*9KnDkz1&f4+rz2jlaDkwj-Mc5SXwgE^ z2$afYq2j3vn3zq=;DL4;FsPozrE zwFj>J4C+H!>v?3Zi?CoZ1AAVGJfA2iv$yj$tI_QF+eCHFSNFJsqUH79?Rk3)S3%I^ ztt`G@^chZ5`Qp&e=>qq0nIBIp&1^u3H!!iLQhiFzhE#Qplina#FHwS ztOcR*3aKFWi->PHPe{3!N|@X_g@5|{ci_?Wo95Z?c~3wUZUmg34B<7EI{u@Y;!{)O zqMKRwq$lD3{OCsPvud?be+p9Q*tc%kEYbR1cixRk47`2y5Z?EU!!W@9YS9>!b)IBo zy8x6ciWF%u`3$8Pk98Bg_ZPS0nr*w0T7*XC?4?H?jd#A|?RfrkpC_p~>50Pg{yB&A z-@X2q9GcG!AJ5?Ii}Nf@mic!oMxO2r=8+NVDaA;O5%0F~```Tm-?;3n_|Db;hN+3% z5f|_Mj8cq`u3d{~j}0S{aaP4QC2JX#ZGn>3)giaA96rT2xuhRPc9q z--lbLL}4gDZ++K$@P!LMrL01M0VqCg5gX-wW@*1H!oPj*YMl1kQ$;62tnr$methOh z2caq{f&^U$3$o3~^2nw$@~)Plc3V(L0Tz1JL~NcT%K^Nz5u)oM(_kq8ysEx1g+JQNlpOK_h`brVLvg!c5Y-ETk0GacGA!vtF?tY=Ji+To2 z8x-c~UU_gzN10_f(0Z#ZqC8T^fT5VEmd&C?g^ax@CIyQWS_>8PSPv` zoe2R~CJ}!pPEdZvUt#Gn$73=o$vo2GjaAd;?ejHgsR0F0T`XQWg5i1%k36MjH7#hHAgSqvRI7q%u#6|(j)Z~;nZaVmKwX7)u08^9G7#tc9=RGk#fxZEz5mJf9 zN#~c67;|$?xz^C|Af~w>GRUr zgL3@>W8f4?F(&kAqIf@X!i9(+h%S2oN4`ySzTwns6(Vu#KX-JJFVsn*4qt|*zyjrH zCV&(wtwpeeF8ay?xbCW(aKQ&Yju zVx&zrQ>jwLbD#Z89QTUj@T_M&6U&ybz@kNqkd(`^ z{{QDcKC*>i-XfC!`8EBU!6n+*6(*>CWMX1Be)(U&z!g`08$bC!|Bk6Wz0Vy6DFU8d zt>b;G*Wfu4fpt_&Ort84tEbJnD(tefmpq+L6U)o0(~y^h72w}pG?NNmch7qKY|mt1 zSx$Z9KjE8Sy~M^^Lg>9151F?X{p;@qv2hj&m^tmWuNB8YdE%*KL-@?0hah2EmpiS6 zSyP)Qw$8WhNyBr|tn363nmH%+4QA;5U4`!{u7Dm2x&>Q5bTG`@3+0{ylSzaNH*Uo8 zzCrAt01u=se0BFOjJL_gs*^7#89uap70z6<5}gjywzlKPbtKNH1p1)>`0}Nz_9+B5 z1>EjC0CE|hJc$gYZ~6B8zJM5}8n`WxbcX5&CXwLME!%MMmWMGRBENjAGJy#lJbg(i z1KQ;>248k8`kqZa00BcPsb^soBF-t>sTDx32BBHn#X!A+fm#i76rPq7OzxhMIj4b$ zH9F%?JNmyA)3b9DbaRKCC34MHdxU0vBJo6olwjhbo3G814h1b8luEHU)c(GHan7X8 zNzKlX;!n7RPmMrhW>%ay5eOrEitgBGzCW>RkJJkgbYc#lh=h|)oOO-VPFI)!M*p*O zvm)MO{+_QfXV2WdQ~2B!z;r{S(Li5+p9pYvjgMn^gt`OTy(Vf^Ub8LqK3|g*1a$zV zlB5>+L ze9nV*HEJH22j|)Y|9f3-PmI~K0Q8SMAFIZsCr594TXPZOd|+V)JVXe^g_pwz2PC|X z=FrtBDtOR|=Zr$2_8b+fcMKm^kmBud%Fn3zAf_>c$w*ugL+(#(D<{WRmAbhFbE?ei zO!V}J7;6;+Ya7L)*}~KgaDXVbEmL*AY_y6Jv>O@T{id^U$8YWkZ3)0fa&H+18KuSj zIBswZclKqtaAF&F&9t#(`EuNF{WW;%)1Dy!!uKiDSls-ppX1o0UyjMyX^dsS=bpSj zUVX?al(G~l#jcVwWn@r7*0nh=pqY_nS$5Gd%xYx`e!F=yKK#cA@Uv;I4-5U~dE%s8 z!GZ;2Sh0K=mM>e1l`B_Zcz6s87cCThPQAZR>UmVFH38_f1Jv8F54ff&7H0a6>>X_i z!F{<>3E=U6@A&`x722JS*ooVJcL#p<^Pk}-Ke-9_-gA#=lm61rGb?3$X7xHeW$_TY z-8sZD)0C>D=a{RIo@|Qh4=CcJ5f>+CbK5~_(AaQ%c++kd%~A#baPJ2EbYjBs0!}#j zG+ce<kw0YkZHO?gmboT zf}N2d*fc3-6GA?DKKnCI2NMuXm_Ak~z;+h!Wn}nsssc zo^d=jH;0rC$s**xU$`6}U$+`bw}m(=Ni<9lCd<-TPYs~WlFZZle9pq^Erka{%q^W!jGd_B^N(%9<-4|Ej`WUqDARD9FSLItbtDnmV`Er&@~e@p zK0s1>2{AdJha&r~)_)P8&(xBM|I)OB!Fmn-l?oo;vIDiDKBe>}Xf-wx8Ttn*=rmKb z+9}!u7ZFK6*1OSpnx_w)5w+PR03b$m(XX$|`pXr`xl>6$H`_#ie;spkbK>|%MusHk z!iCM~os0mTFP;13o+&I?v;eb>S(#jwsut*>!)LCasR26Uk8vD{Ytax>`Op8#E`)YVgndIpgP7c-yA}u#*lrp;nFLX! z1%!k)(H~KP(oh<`O#Ol9qKi>)?na7^_%EBR95CdhxuZ+R@5M;FNKqFjp5Mitzc|%^ z_+L*J@q6|Z07*naRL_lw z?ntnj4@SopyC^)5(|vLe^6ECaSqB-n-E|V&blp$!v2%1=mVy|w5r$C?j1rt(S%Rgt zDz2kjFxLf+Kk;~6@r}z+ulFncFV|MF+6gDT8vpSh{~_0U&5{vZ_^d;)a41GL zC#FNqA@lzHO^Su+ykRby=95PD@yRB>e%}MQ?9m>q&}?Gfz&3xPFerm=0yRy^|PBiOKE1AhPe-{YS9@5Mt8ZjylG|LxB+YgL@L zVjZ5bbQp0uC+QREOFDX~0mA*$>I@>5Au;v88Js{atFTPdE}}6aIP7L#1JorvqMzDgW{&M}?eIcW4{QLrmvr+_Zz3qSXHyyu_aCEul7@%-h>@DKa% zi*B2Rj5rrOy`U6&&yU#yX|iq0b(>NI3QhsY5_P#n9eEQLc zQAUCzYE^8`y0~&`0{1s&&~ag21UzqW4BtF>9s0YvRWggHw&gZE_GiH|Tbe@cy4t{C zPQrrgC1VXY=;O<0@>rP~GL8Mr_Ib?^Ne>g~d6^G_l?0p83?F*vKKyiY5?!t^>kCol zOMy4V+!+#-4_t=@C%qbT^&zcY699wGb3U6CVq&uhAkr>IhWb#+BJ9|?8>K3Jpc;Du zY)w;U7+KhlsoisEwNk{T2oagc5?Kok267g)msMiXGOYtZ5nxUBuiaFfIrHqS2q-yp6MDFE)wk^pj||FP4F$y#$b4{%}sM`=}T)VRCfImzX@E*FA1?YA%bUQ7f zFWb$Q?l52n3XCTp*Ci;07d$#Fc)6l(GPmHq#YZQi=UsXMLy>~ik=FMMOWJ<*tMpFO z!txAU`#e_ADRlFg=8p>zwBE0{)^%=Md+ZCvzk^7~oUntAyp7hbuzz0v=Ew6|auRC< zifaD-!&A(QCY-X%#XFiS%ll66MU?Jam2%XrE!Xu?ME!Pm@9X)zVE#A?qKbbTfD9uM zFqG~V5a%kA023{WLYF1Px=6}3+i@H*xyuZxSal7U7Egt1;rr@GVbI%m)8dPpMY02Ptl8cI4T8c^(qSc>Ai=c=hs?=#QFOe}Ot7>Vv|*asmLIG+GSMC~#Y2 z0=&fWvxiw`FBTDCr&7Zi_iexrcZ~-C@X}+C!wuJc-wM9u03fhoMKA1=R78Ec`p8ba z?aVXr72l5xUs$sWuUxfUz#r37?$nB7>p6cn8g4`KE6?-ph1etD#R7Qn=+u{? z+=~|_Ht(cpCowL1{86-98J;uHkFDJnuAZ63?`EbXdi9?J`v>s7!=HqekrL!m(9CFg zH`9SV4g1~;_AvxY!OMoQ-vNLT36}h@Du7;-q`2rkWnlFp1WZxv7XxWh#%(jRIP0PN zaUaE(Su74%AO*n$*Cf*h+!mQsP=C?iVD!aDV~%UDt-Bl?wFrC2N}`@t$}-d|B@EW8 z*fG9GfIZhj6lYRHOrD=ul zKtW);Ez!NG`=@6xI5a5C0e`3&(1<<}KtS})-}zlK3>*ih1*i|G*Xo#^YuJ`CO+iS7 zz)n{jJ`IPaBD}L{8`bH1$wluI6@X}!Kb>S&1~yd4+bhej(j=3D`|T-`OPN= z?3xCO)R^QdeWjyvE9;+D0AeLDgHG9VkY?gs;AdY~wC}o!{HoG^Pchg|g5bA{B1!8C zA7f`GoYvP!DBlw=*R|q5ygq-YBk=MNQs!AevFZRSPNWXy`PxJH&SpKVV^j| zyXs4EL|VZewFn=ad;}A1;DEL3aMMjc#D4p&)h(-*m)yGfF&y=>m*dX6?i2%a;-V3J z;mE@<%p_y8i&CW~l(c|mX?M}El(B7k4wv7(315HYaXi+hw2GPl8r2MA0AA4Fho=q? zqn&kd|I92lv|8AlwlUexzytwh1a6*-F2Uh!(c>0hd!qOMTkrh;(+3FohhyMLl|CFb zJc_?vvKVXn`mv=siwhro5Z~Lg+v+1I=voy6Z(Fqnr>$8f#ebqsQviVG{FUz4h>y-d z1=Gdd6Ta*vJ;3w<*NPB)_}iAIcT<2!yjYUe4$$dJkhw9-E$z}2*#)bS>w*Y6e0m<@5BcdSV6iqs{Wg$ zDz4qR9Tz;j5j%udcF?YcmvlFX{?i@k$HG@0kIGY?j=5$_`i=}WzsC6~89J-x$0bZp zG}_mXiK!`m&~}w77?LzY+L3fN<5;)1wK&3>B@6Muww=OSDV0>1OYR9cg~{4|;aAsY zWlbBjl@&UJ<$2bCCSHg5)nuc)ia{$ zv|9o$2o~tj>6}aQAc|lZ*%58KCV-t!hJk2BH3VjjkGVsihps@a82@L(a=;0?5*GQJLqFd#WzwX3&wB`lFuRustqU{c4D@ zb-fgFH1q9VP+u4BtBWm2S@V^)S4F50fa_(07qs6kiqUN)g}hg8B|MzYfkVLx^fLG+ zxGNUmC>Vk8*#fJe06nifrntbrvv8y3j1 zaljyL~jXn;e;;jcu%f^vj)wHlsRAHee#j^f~v z1sEVpBAPtb?vJn|i}1<2@4}T6<52!UjX1&4KkTy{?^tsHM$tkiWt|incGjs=wy7jG zixxO95bFx~fYQ^hMul_&l?vXy{sCOIeP>_*p8w*P;@^Mt!yvMB5Wd&bdx##KoPOe# z+it_ljyehyI&xhd(O1XihaQBbtVJ&c=qv?bWrK9Rd3f$2{GadZ+!k5TVa4|6kC&$f z+_rIEw#eAg!F7R7662DsTd;GcfftO7V0XKPo6{D4Fg}i16Moa0^d}j!N9qX>VX?5!@X(Mj0yOqS#Z>e2p2`GtWIG`bC8D9JR?9*M(pd7d zr1Mn+prRN4PC&rah3tD34S-fT)KbUhh}{{Qfp?$jk)|W_oZikCOo(>p(a*W_^F)D; zf<}~ikr|)RMQ1%fm*q${e6JYQ7cqe>Zi|YGLBBOO2uYqmXzb4szGg;383Zj`8AvsG z2+O@m&bdIt7Yuw}U%+=&IC3vW@i{%^Y(+6|F5d_XPeU0W4=>=P`G3!6J1tT;c*v=+ zkODE~bNHU>Lj+_vV$SIuOMhy5!*g}2*45hGvXE0{t7s(NeDb^Sr{Dak*Q&-57N;5B zKd=Z-srF%ay@C&oKZ-wchfSq|uYc`QoOaslK>?dk^KpWmJGbK%$G#lD`R(ljOHN+A z0H6Jvr(>krMw~XqxXa-n!jVcCgDfgzW;Vl>cixZBZG04)I*d}e3xF1;gB1~Q+N#BP z{Q>)9pDHQMF4`PhW2#9k$mJMybhQuJq;@wIaS_2kS6Ey2M{B_xmNRC*$ze2P4Xh;Y zTqhL+Hq~rmBI{stqlv9#7}`xd-fm-imWm3zk*1g>RVqbu43}C0B;QNnQLH|y#5de| z#pY0^kE?fX z6`&Fci0Wc60{&s?QoQSc{V<9Qon{lU008CqVve`vW3{xd&JpPaKVVA?I1n>KRIcE? z4?HBb;38b`$xq<@=e$qeVV}HiWC6}UXg`!` zoz<% zIfWy~Mls#);J0YwJ3Dt`j%gMH-mKVi&WhD|>#CK=+N@h?JAdRJh206(o?w)$Z$dh^ zu+u09Re9b2;`cmIc6L$q2AD&zD2QvD$m8?E;x!L6D`2U*LA5!vg@wZ?u1<@ zY3mIHvV-1@_;*_cy>Znc7DqXF*WXWm`~A*oc_b1=hP z9QXgT`X8M*Yw(j1U@Cxg25&%7AXx+#l(6(KBX@!YF9sxeidm{ZQ&IWzsVLwZ92mf^ zo#RqPka>Sb^mP7`6XhX4af0dT84>(Z>CYVl0|R}bEvypAf<&Bv1TQqqWG!@;0*joe znpaiotc1l}sX8Y|i{Z3A7vdNj?(uuksuzWB{SgP#gF*8C$h8YO*zrn!PD2*+UtSF?j8RcAB-CFnfC}ja>!w~% zpfCEIQ7fT)a_L+NbRT(eGv08*n=!s~yvLT)QN6fQ#~Jlej8)6HrxfF?tq);O3anVU z4}SJvKgE9glm6EYXGy7yZ(s3Eyzce?AnAocM*q)z5?;LT3MAb*85fBdSlr5lSM8*Z z`*!caXMcMyuG>C=X{qneZcU}u`g8lrc9$Vn{~$TChng zeo_d@2k^5bNYB$SGi|ahPVkux>+ubO0C`3g`}JiRPG7PB|GeKpSQ2;95$aXH!#0UZ z!>YY;t%+k70j4tpJe`yPAVarW#lLLagl}!x768BtUUC#}y5V{S00jWQ_o~-*5Tt+^ zuzO-Rjyd*N+;};+O>sG1F}0&ZvXS$9G_gsR}bSq{B~HxC|dV z@Bq})SuJ=bW3Q342r~<5&Z2>oIV?kLqDKQgsuI*54W*>RL*%wISQet{#qcwqhZg-%w1%6P7eZcERTQ0l@4Et;Hq9J)dl)@LaGXMq%Li)nl zgFqSO_X~^53`YP&#t>-WY$Cv5JoYwY9$m~YwHrz*8x8Lj0gI^>PXTDgAwT@0Nd1LA zE$n-CabUpgMF3<6Ysh#6W9a>zZ$orj;vzsjuG7I4Q%pd;+J|eteFHx7FCPo33LRiz z7{J^5MsZ}N52acS*R>~c$<7@@iy!r}qjAkO-_x?zR@|%6n8C>>o`@g)`%h)Ay>M_4 z-~QXDVzeBK(U9UcvhP@(>TEZ{Pd9DH$M3oae`s{kiJ6|G*tHabN~s>qxav87c%)NxffDL~5Mn4dQ3X%jgj!O{$NV2P{Rg~BvDOQ)

e zFcDSpg@^9PSGI4#jC`N%PSMZEdhueMy>6{+!Ax1Jp5}xV;3&otqa4fmBwC?_06?n( z$}z#D!8<9kN)=~qdt@)yLUs$Yq5{f#%^)RRxf%*?Dj zz+trtzH-pPIB>8mXGn+y%j15FXrTgzK<8UH!i zz}Xw`#TJ4@Uo+y@z5F;+0SUTGmSN;ICm~t0T22kj+Fg{b$d4fyRFVh-aYxp58?5hP z;4k9&0+91`*t*7COXi*QgRCha7X2Kxcv=ik84jHZIyY?>z?Xh~eb!&8O7zXhnM&M= zi9M)QD^hTWj(&3Y6vlW&BVdQ?7=6xt`~3r$+A}4zJ{Q;Y^b|%$Mg$x%dMB9Rk*Q<_ z=;R41s5M|IUhX62oT3nr_u?vHJphUW*=nBRC$wURO8*Kfr3#uY&C(GBxJH5BA=2Sz zSjA9sa|9wXsWMgnvw%^XMw5pts1`s|0G5L(*c|CETL3woUKG4&)K|>a%?A&~Ian}h zX84?~%?oh&_d`HcbWq+Fr(vA!bx3H2!52_yzX1SW10JpUgm5uo&TObRP{pkqoxN>? zf*+^K)j23c&z-+GYGVS#96BPm)9n1JL11VV81=R~(cKZMK-1iUE%dNu#{0>%40%oL zLV+R!MDD=FAKPIZ&ZHEh{#+dB(}NCH&AK#C5~~&=3&<_YTqW1pU+=@ezUPy;{;D4Y z)Z6&Wx!P{+ADAHg3REm8P>-~(sl!yo>z>Qxv$Br$IJ)h}@L(Z`}O z*FayI;ljfY!0QiRjkwiT#XoqIHJcJ+dmFgoo(J)TdmqA9R_&y$DA6dCMOV%~a2<|W zwG8$4tT=ndP~6JIXQlHZz-RFhWg@khiLx;Y4%89nY*fw`Xge4!YkEoNgPhAQN8;+? zI8wfqV`M@rKgp5nmfcCiC424dJa?J8L3dGbJ$63B-XxY8Kn-y{X)`16O|5(`*c@`^U zo5MacR&rBqoaWRm9Vs95anE1Z^;pb-_FC`o*F2*f0E%^r2s{oO461#l);6l zr(LWs$9V6yhp>nF>slZF?YrN`F-IS(M_JQpM8KJEc`Lqr>DOf8Keexdt6un2tRAU| z@}I_4&^>hm%?>{OyY=|`BU>Q#?IH>TyR!Z0$l*bp{a1(LDfJS{?K#N_)A)3yrah^C z%_-_ER)-ZLe0Lna6DCwp)cmuTc(uEF{CA7hJ}Iy@!BjzGs>{VAbOlm$L{vQ2 zYZa*xQjQY==+3No%1+qgNE4ontnRZcTABD3N6cCX?kTe0c z6Xgot^T0#+*0wwaaMbay!nId_2i20L0QRyCIlz|Zl<|mh+wXpdV~#!syLR#T=nRVi zeEGlw@!XM7X_pxIaBwAS8R&P#wt1C81M~if3bfrh7M*&p164*qe}Z1X*S?q)6{s3|PVn9Fo%re2t$59nbjHTTRKZ_~}zPk1xJb$!L<`(C)*j)k0 z#>56Amj@DAa*0KCOO9Kr^*z+`^YaQh*ed{_7(f zaP9awI$X%!PgXRF*I^2)Tdtz-1<%LmkuO0@wz=xsrFHaLV z++jI!ro87E5ET4Orz1Lma;bvHwr!Vf@EY0WSVZ#|JP!Dm*Jh;M8O4wVAQBOW%58Tn zL&)-iqNvw$x&!?+aimnbQhdjVg;71X<_!!CNTohf^}3CZ`S_;f*?E0Bc>(~6^5lRX zp(#|H$GyOe64;B(-81SZNZ?S4z%WT8ATiF9cdiJ}XLG2dMw!ZYx3pRSHF5Q`806qIDL-pHQx?$vS14L@1|uX0m2C|Lj)0>y2j%@C_z1j<7Jw z@WJ5~IHXiVEspV{<|IBdu}xI-t5@%f8*lsp4m|i^6OJW#k1Pd^allv~mwB$WuAQWYELGMs8OY zH;^5sJtJ(55}daHUO=J0{(gLKd|c8ruC3v|qqnq61NHy_AOJ~3K~%5U2WPI_A0rW? zFP2x<_>9OW5i{16`U@0wb)v3m0J`W_s(9yy2l1Wl+r!Fu#cTc!-@Wn*Nt^1ivB9F` z&cnf&wUPA@-u8}n;wu+l9IV322K(`;gVtdoV}3F&RqraVDOOoM|$#JZonGWVt zb^^q}r}kfm<3{>biDC0g;Hecg(oB*`E{(;=`_4qi);qzD`ED$)Wm4GP6CAa#+>jnK zn~_g172Zi&Rp%tvo%bs zTdOu`?mBf4)Unuf^@$ahdro%;&lOJdDy}+@h=N-zT)`Ud!n!d2dSs?fr1{6)7QXIg zC`5JodAk&6NCf!#l7%;Ca5skElETB7?9Md-Pgn)A<(2KFdY4P}0A zw$s#l9b^7n;qPE4AOr32aEONO>DT^ncEk63TgUm)i*`tqt9+%jH1A0|ZlH#XKXWNA z{p2@#JCY~Mk@_83j=^$-cBz6dj6aSaH>M>=`|1;p!?&*dmK-_CrgMH?_Kk1gjsN&o zsZ_W)>)`t59EKxSE=0uA)}*a#eRyDU7VrP{pYX#S6KEN@qT^l|1LqvR4kxZyhLP43 zx~Y+eVmLL5(ww|iAQMHfPDY8e4NM6rqBD{5+AIdvL2I^+RW{UQtLP1}(gi1NlBfD)KN?^%~fFWIFz|7N8!J6B8^9a!6 zRV&~lItQzL@2r`K8JR{%BWy+)7p%Vz*N*SBLP-j4hcn=vD_7$!`|XGRF1Ize&EfKU z8jOftWyCFgN}*sEt#So#-S9w99e}_4`_pmd*S@Or$lC2|B!q3jv8uBBSGU}Z6JB)! zwr|}g-x!L3kF8vV*Q{J5ng*#RX8=p;pym{1h}=a(Q5Y~^H(~My2RlzQ?5Q4O;AQCi zvlb-_(_oeJ?#9ao7g3%Bj)x}s~MiXdS9He&tfDU z9^yuk8Vl{XZS-oSI&rlAN3U%x+TReY6#$Ju2XW-NO;$UWds%HrBOZ~ zW!qznZn+OPj*sKqP4{A(FfOX>SKy^~SIW`R5wF+>i%&Tj*`mehv^C%*fGedyIm@tS z|0Q@}(^kY0lL87iq+KFnR|Znp9S8HaQ%BC+L58_z8wveosF2Gh8vwZE=IiA&K%SmQ z2g*{sq~b`!jgRk^3SG?OGy2vFe7bj$&UpNn?`sZ?S}Llu2G8Wp>C2atwFuF7;dz4oE|eDz7QdH>aWD{`*ZS6S_`&4v z-ALJh)|9)ys0*bmk%#OsTyHRoZ$VB zKZwVqnn;Fo-uEx~$VWdSku-5WIt|b4hAYLCtDN)VM ziz)!!c8b<)R|LF_^ivACEJ2_`NbTI-(z1TV5OWps!#+P8T)E5jd+TWAn@!ojnhN4r zuog+JR+0i_Uc*8oPTSdIhC2wtT+sPMGtf?$lN}3%&joFW9P+>|!(}|0M)=@;_u|IM z-D3YG?u$|Z0dHTu3TLj~9|N5^iQp_A6(#S^~`xiamt#qq zV5XemiiutL<>a1x%|wwnzSk~Zjt}j(67`N2&5;wq&QxuDQ8q_O3Q#e?I;7` zrK9~ApO{A13iIjuEs6ktsmB2;f=R{sb4wp7`oZBrskP1D838Z?XReN^0vZICBqmT` zgP9H>fZ*wSa*QvjQ3_R>E!ncCR5wL_Z8>U`=>Qf5;+izqRskO?4)Sx<4p8MU zClOYtX3QjiPt5^;C)i+noz9mjN90r%WfYnOA2r^CouMaA2{Jb!k)HSLl%kVKUTMjq zUb-}Ah^+8-0r6#^OyjsiXnf-{AhJr1jfhA+_cHrINiY@7Q%lisFfZiaiI zgPH7re{w&s-v|Jw$Ay_e^1b-(1`GhmHBB9^hgswcQ*{Qe2gr;6u>Vv(W9b5mzL}6= zt!sisk=WRYd}hjm;m_F%7cP2Ar6?XNJcK@6ppT1V#ugf%5JW_Nwt#{Ybwcu~pli^6 z<$+W3ev;t6dp6)5r@a&7+gXIe5Jd!x#SzXMU4nJxKE&l1ccF_9Zr+5cEJh_s@U1Jq zjuTFNjjS^Q>4!FL!gHU06t+C}2>P-vE;w)v{_&}+5H;zzt9X2_h4=pM9$de5x9lsH zd!-qkJ2r@qKI3p4#)Gh?C)8*P<;WvpB6R6EyBR7>BUqI-P085yI)b$>&}Id|*$(EW zTS(j5ZyM^u)&hqp~fQPl8yKFg5U%5<9nUoX{YY+IY0kKKC)=Wv! z3jo}sxYI1~5FR<_!|skRcf8)wVN7l6i-R_2<{H|c&Q9XX4?Kuhk1fTjavd{q7gx_r z;@@{u$KcZ{1}iPRcxVh49dZCh(~gcG$ICK`QsvIrBez@1Toq>4{HD$a1ZETR8yCdg z@?JscU|>IJ3_`k4=Z(bol3MOay$4Ad#}#Z&%lN>9cj3Azu1Ecn%q?Su*hD=wu!?~f zy$GW(cpf?lcLLcC8L4fPAfvx^Gi>eVb;y#*Ay3pIs18v6k?ppf4qkBRTKw^Whq0&K z6>!2InUN|YLUie^H)I*NegrX-4z-XK~_ofsA(vtw0dmzou1;#!7 z1OPPHybfQ}<9AhU&-X=8SSrijeWnly40t^$%wr)PSpi`h*cNtF63KZ0BGQvXKqxTM zDuNtOA|MtYDwVklfX5QIL{~tDpjxg9v%&8$WgxVaoam+MXB`Y@#|ku=9ikANaSr=b zoaSOR_w2TCibgGGn+@xQZ7M-y8J1ZO6(6EybI8G9*{bs3pl6=vZar_ar5UV^>Xh|g z;gqr>=NBNZnwtO{iqxr5igX$-lnl(GA#>B`STt#39YdUfVX^Bg^=8bAZQ1kv{}PR= zDa@T|?o*9X$^n-~uZ0SZ{@Fq#;18yI*-h*jqC*|maMpnAIDU}M&8eWXWQ5mJaD!w-}fv@h`CZ~uk99w|@{J%fJ z!G|1bD*OmH+;Agaed4Kd%HZ;>gYQ4{P#m#*5bYFrycyyA-`tI>ckV$`4nE=uWf@Ld zJdA&N+M!s@rOyh1tR$H7{CWxs%>j$Jj_tKQ*Quu3Xrw7(YBf7(PB+nRh=NZ`3I}Rc zpVNK3v&X%TJO^AdZUyCi?!=tk3SE?=0t3W>5i0l?P4l?`j$^H9yHc5HMrsCBBu3*# z9g|d#D^{nZ081*$xkxM|c*Fgr9=nzc3I~Wuo>vmt3I5nj@!q@d#P3?O$^uAVrdR^t zytN17ghk6x>&{Al`&K||Erey@OiokmC?z=YPj}&t>0I>okqbVKbI(322LQQV%MS6$ z#j#tqZp8^FoQPX~eX}|Q08g)%anS+mZ~%qY@}iyd5Ga0k`+PIkTLx_7?|SNOLa^G`EoRN6yfJ{Q@C*bdc0y}F%GT{q8W8?ZL5K6c5Exc z4xz`N+&_S?J?TK~Qxp_)K8kM0gu=GNBH#Kq51905o$@Cxf+|k|m~}KWkuP$$<-X?i zb8aC`d>2wvWUI7tAvHHVP4f0e18>`SH|}FKE$VPwcqxph7P4Zhtur)?MaLhD`jeg_ z6)AZKIXakqC_+lN=?2Q<7?oNHbFq4g|y(kQnkk_qUxnt1*JtMTy8No<;&5+>e( zF4Gjkethxg*JQH&z*N^f6_6E%cymT1a?GhI=PA@{&dv$CW-S2b`x*H&>K8h`)mBG8 zz+JO>p4T0=6zGBK3FJh;PDh+EsXZE2()?N=XjZ^NIVoXkYDS`e*|NvOiYlxR-I7R& zB0)XvPZx-6JFKx{U|F;PlArZcyqG_~_N-!qR^b4pr~s1>6qAowBx5J%oSYt@Kb*-Nn% z>^s3izO_%k6i$+|rOkDnAGhm$pq~vz=n8cYd^?JfULj&snm_n`J65%Dil1-MvqOKv zLNx@g+6;5Af%>)zVWvq4YKy|&Ti=o3l>_DZdde5)ng#*^)x`MeT)GAVz%XV)wG>tf zFG_I!xu3#yS6-h37b*Rl;^e*|ymn+E>MWd9ujAuWTku~Kd(g=e9RB3P@Si{Z35G^S z!f9v!`mg_r^FI0sIeG3W^)mkL*-ypN{s@mZTKL%Q8}PkddoatQ11zG23~ySy0`FM2 z9~QJ`kuo1Gls^!6co2+Iz)~ex#$6*7sV&aF>~JSd8*?)R_idqSS==OGUYnN%rgfeb zRR+iCf$-iF0fQn7?c23-DxsHE2nA+jj=R?}kZw$$2oGaDlYynctCd(-0R}L9UH->3 zi%j0UAd2Qn$^PZFJ=CO|S+|sc;8W&V+j@^qRK_i{Eu3}FpKxERsZpPdc8XOA@bQBV z!LbV$A!;=(-zt=&9umb{rYSZjF^>P^A926bMd443uYc=uyy3Ldv^~WI>kGvkE9cw$ z&V4^V{mIXSeD4T=vzIK#>8q9qZQkJ)Ql@bPtOX9B$Ds!n$83|2UAT22rBnJmpax6{619g?CYaN7)k-Wp@f4&>m#fp4 z-7TyUpw3&3iw3W9$}Nu+=I#MwndDpweuU!FZX3%hHO$Vn!32x=%D{9f@Q=QH%MY?@ zlAvKUI}hMtX8S<#R0aSIa$oI8;aID)6`}@SHvv`3V@-y2IrzF(?4P+2Bu-kYx0Dop14J-N+ z%fm_GACoTAh%@?^t$(bi+-k|a2ry)!v62^YmlOrk9HKZxL(MWMg>7rv?-s2(9psZ{ zULGj~m8L0BV_Fd070|Q_tz5ui4mF1kJw+{^F#T?OWAho6qjB}@h8FFf9u(*t&w)u$ z{~DB6fn>qa7q$xpA(nKKIdhr7xz0cVkx^a+p;o>NqKHm%h!cQ_e7~?W!D?L-e2P&K zs#C|%J{qC>I zMhkB|>1|kl_kG^e(l;Y;?4$P zACTd}D8=!=|05n|O%l?-Miaf3s>i@Uaj~uubE9<(|t81c6S&PQ%c^~F*Aa_E+KzM)WOSouA!td^bf<3fx3D*&p)uRso zoheu?F?DOJfsa109?u>e!*lw_5JxTiw9~}bHb36OO>n)9mJ(ci-~o8?&=9gNZNH@q z1H{gA^&Z5SxeL3ieA;@j|2mC%$2XxxJhHU@qugg{`mii@2AlrD- zRDNfkpY3CgUn&r$+-jsp154B=209bgfcgNUA8Hc#7E=IZEx2`vBM{ON1`0fCJL9;kS3}lB0vqV71L@S7*P%-0FJ>! zqFV~76%YnM&uh?_UE3;Kw`5PeF@0h0x}MO+oUW8wX0+91P>`V=)o{c31m3sdZfuic zK-SoZ^%|F76G_uzy0u%g$t0daZEkP+AZv+u@O<>ul?AsMe(Mm zyNjb9`q{^|kTNVXGJxf)r+kL}!!O}loh zUu7PV_Ep9uLG-1cUz15)?Y^2gX`Tx}JwS(Lcx79iMaP*&Lv;}e>OTK!wT6lD3G@y0 z$s%H;-q&9f^)L-3QwDU_WCYkzlM|C7+7po;Yi3JrfwsKHbc0mv%k(-!!vpdi9t0$5 zfySHw2%;Q<4qjhRK+5FUU^*fK0olQjP{W`_703#(-L6#-YS&f79(nsjC{lo7TVTEg zO~FDwqa29^S|&JSOBWPYQ&8EPQ}XRb+A(O~?xfI~rA@83l!IAf)dCooml$3rOB7co0uX z1F^pKwL zE7#E@J2kJy+Eoixc+U2leSa5^1t6QYS@#Ng~D|rVfBCh8Sd~%z)XMHfE<<5(!XjsccA2 ztE~1yR~2wfMv^8;MUu2#XiedI3_JPYBl70 zCimd9*S{V&{@{n&n5 zbSXzW^?z}gxQgGK!`>6?XC!RZWuVE6b zsLjco!gnNPO)(}>wPmXIr+|-Q_H;wU@;|pR$ zm;UAk*69>_p1F1w@9|@PwaQRaqJLK9qd0GNmW6#ZLX$0jgWR^DIb+iGJlSt>Xh1}R zdstGK1%9Nu8O1uJ_UH6e!Pz zgKeWtEMp6JAi1?eiD0pKqfHb1jdih0^N* z1jVc5^qV+Sr`p|$inG&*!zm-a5-JUf19q{JIr6aP3MSCnFZpS6_i|m&j};;$Pj?3g zZ`&S=X#n$gI-z><4SIkE4VmWqbkT{9#1@n;K>b6vJ_VjVUtFPJi@6TDp|#xddiiyn zYVk#)7Z<=}rB+!(mMJMcoW^!@5Ez){A^oc?s7ndGFNo_5sxaq zbLHhY_7%s8qew~>{QizR@V75H8sifa^1Mfk4B!vDCsoJ?U_qAQyuVt9SMReJB^FPy z@^}OQv;~~&oVVQn%dr*X3$BL_8WT-)+g1kI(o)i^#o*8ZT8~?B!`vJH7yR;n8~GP0 z;-bUJuh{EUd=3}y;jYR7h?;ygzG8!9P32BRuc*dQM3G`EVhK`c>(p*(!CHPzv7y-p zZjqGI$hOa0;s63k)_yPX#AVEus`&aNkKyx=Zp4J;c4@>30$#OX1fM+cFpN=zEY%ki zbj#WQ!`yp-Szeas!}sauOmEu@Y+>15stAa^R|JhEVxrMlVhf_8Vl;_HP3#g75%V_~ zqhbpJ8a0TBR6&Y#cG)t!Y@aQ&(`QcouKOwP?>%Q`fh6DmyS}+DWoPEhufFe7?&p5) z2fsLU2=Cgk2~Ghd`SaFWZ$Wowm%8HMi}jlLw;%iX$MKaff7xLHS%aq+a`^K4by%0@ z$X_FJ8oHJ-XBfB>jsisP)gi$qGMgH!P+;*V>E_S#gpu^HD+-_hhR0r|sJ@9f^7IO|S*_4ZZ#N{Q6_iMhR%i(r_sVHU+T8+Lpb1ihV@Yoxh7Ju&HwtZ)Cpcnf={GL^-A@{n*Y!4= z*n+XvM0G#c0bmwAHad>3?oMImI}$)hv0R>!EZb}LG@h*S^ZMwF1#V@=Rjs0EB?kUI z&&!em9V*)k`9wOpI>fm)^R`bTG&N zQl3#H7X*^??g8Cx#_E>$sUjmT#^GvZJ{i!5^{8{Str3WGkuJV%vH-aSCmk6lt1*j+ zgBdV4Zu;MdL2c*qSS7>|a_tUy`(GvSx}|pEb?Q3tP3pmo{wyH?x;yvUty@z^6B1=U zzaCk#E~Meoags=RWU3n=JU=hD*r~zhCH|b+6yB~#{fqa$=>vG+ z?guR|(f8Mr%itaDT{xq?NB-SYEaIQ{?81-6Db7>JqDAxZpV#~cjyvIGanqSx0XN@z zE6zUqH5eTk(LEzl0n`{xqrFdrv?md4VG{EalyLv=$&D?r;J)Li_3TI!N<34hf@Hb`}`N+SHJwF zmM=p9;8NIWeB-iz!Ta9#9?SIf2`vKfxfLsM_Phbq>t!K&3Mwv>5^b)7Bmy1+`O9g3 zf8mt`bCZNt?)>$IN5NU}{s|HzTRCL2Zpkb=w>3t`ovGLHP_2xQY}$wwxi-AMXCZnr z4cwQm;M3c;V?s!o$@(Hr{QRzNeEqmJ=$lbqyv(mKo}IR`+T4d=KoclYc{^qsU7a+b z9U-h~TEk&|H-gZK58$ThDb!TK zC!lbm-pD(Zsex_iIQu2&JMB5Ba6EOHWIvsTDZDi!KFZ|INMTiwaY@o^Slr)_y+a43 zU6?*u%icT(RVqmWy!EOE>kASA*BjzonVD1Mr_|9VB$|=oQM8rV%g+)3O9hkw=FMq) zU1g@C2bGLFD-b?2bZ#7?2OKcO3!a>sLU&iEkp2k6D>0Cq1CAdqBg?29DQP35!!Ne6 z{Vy%01J*BiJpMe(6ii5fQxXKQi-60990isr#OVv6kTRtT`OAw9%x{|=Z?$NFLETC4 z0a@pR)POC*3e)-mGLQrT@d}a*)x4uFu?P(aQDjSn)wKp3x{6+=JMNertV2>FQlXK3 zbeWn%v^vjR=&qtANzf9b5boFo%D0Hzlv2XZOe*}gxk6MDXNXQf)Ws^;ft3JcvD!Y> zJBc=<8(wY-gX!b)iFAC_2;-*3AuGwt2rUdcMeIo>WceG_f6zfQ(q}JAK}lKsQ?3)4 zVMnNr+Vn&fygk%L=5P&(p%Gyh8=6B$K*%?n$Ka!bc-No59|!gw($Tl_(Cb)~&*EbX z7GVkU=xQ|3)7g%{-LW0lPfVg#uVVGO6}a+;Kfv0x$MZ^&%@*;8JMY3vUUsGi0G^RC zbFDXU{_5p8|M;VkA0O9BgT+F!to#SmP~cV6Q<$2V!OVC?+L4@Fq1kZKC$A*{|8j>h z+gry!Y5r|Y^h?b5tRu`3030chP)|6^E;s)msz>WDqCaRlaC7*;Pl+O$p+cf5P|8Z# zGs_ps5}_+x$`n5e`fyuueu5fxnIcF%1wPe!8pEj^zP@7+m+jqwi6{u@Yyj_EHXk2f zxDLhYG{y>bTr{{7U){GS1OTr;=Nw$|z3r+7eiw& zYT^@>dbm1+4{h9l<(UG`>06B64De{ChEHtUhOvmB!Bf%Mom?v6UyfUgl~UE?yVNW= z=#E6?O0l+)P3ON=&fV5WfV!G%oK5y8Mr_4tI1=kiEMor0Isr{fP>G>Zh7)Q(g9CQD z>3k0A9A10yFh26w2J93eD(eLBSpoE=l8B^%qn4omwXZ{A=_(QY;o8*S>9gQERvMO$ zNcPvj?`n-2mi6~z_ue7Yvsq1qZGA|=kg@8exBRq0VgUbuj?`K5G_axzz&YYMM#|Ln zFH45)us)r3yJ+)6pmvrA$jtBQ?Fn+i6iQ(>&-Oh2o52H70`d)5V&KDPz`*m7d?*8g zO3EEj=|Yo6lzY^+Kk4&#Ca<;hDSNgdx&V$I5INB7P&d8IMz*h**C7kpkzgC_qVRuW zTEg;9M${?HD$Mn@;DdLvxTfVGH7vWK!Ps6k!1e`M=OI= zGzfl{!w(>+p#-v+EtrU&m)nwL!zjF`WdXGk&Y*Xnp1_7~bkt#ox|K&LR!#{U z+pa;5M^9~B6dB;upzb!`H-kTQ_9Vp|Iy{Eq0|&5a!&dCxwHt@_AH>M;FvdnkWsOaX z(Jg^sp%3@o0H{`vE;bm=0@8|V{cY5szF$QQDbVolFXYD-Pz?{^}$^9ex>(lCp_(b!7Pr zBooX^)<1c83iT={#A#hobkmI?>?ns4GOfqF$+#yh`PO{jmtt7G7JMe3i(uIJnE$T_ z0IdN+OT-Y)7o~Exw;d&r(xB`Qeul|R4y9ri>>_aTu5eh)fyl~bcM$_cia3#=B^87; zhSC{)ac~oUaBv@{B%@}gTEjpc_`3y5@MlM@#At02pWL(+-ya*WVJ|3BXQN4)ygufb#6w}l&}s>(b1I`RHhYcRlCFxQ|Y58eWoIJRVdqt9XRa*4h< zw>rsFoTE!AF$3|(^xu~8hf1Hf10@V2<>6?wDf|Rxznkl2oV)R1EJ_vd=Y31ip8+1r z0Uz166(dBkT9gV_G1msuF1O z;zR-HlTUdH=D+gQs1>`=FcGhOt{|M08Fsgsq_@VwOooV1R*}u8Fi>pA;jsx!3sGAW zXtC2e7?%OSRSibCT1HCC6rzdANpx~zpJYBtisHZNYWoZt_(dq5hP+l$NWQ(JB(?qu)rqVYM+ePTJ&q1`g+3mGfdV@L*d4&&!Gimq)L0zs` zK%rEWGT55+tFV6kF?jm3pN3Uyk3nBgpTN4*rTE5${|6U-{t{u-6L#Y@Q~vj~LJ=RF zw;1^fu(VV{y^z8=n>S*M1R-@i=cUiYPpzg{M zt$^>Gak7Z~q$`THv5Abl0GUV5+98E}swqr!H0V@SHn*}gu3P0cPo&U^4c@o_#GmQb+P$?^Qpo8#-S-jiy8IZsmq zmGvcym_lf2lw4`h*yQS0gk*sl+0&`47?9TIPcB}@N*K{?2111kDtXSKmG}^W4hMU*tBUA&UxdTaL?U;2=_(jd3Jjn zK6%V)EH9QsBuZ>5-<^z*2$|mGFCf|FS68vT03?$j7 zqMlI=K9xb064Z@4wlykv@1u{PyOG0N`xawSE{&b}6h6FdD-MzW)e-;t= zXNQ?)Blz0nMwitMcF>=!*cxr?)S{}|GPgyx%EJQI+lXzQDU^m?Xv32NPr&_yZBiSv ztfzG%wMQ!c2$c*;MKA94!CDm`+VBu=oS=9VNv6@P0OK?<(Wtexq5G9*qVx3UVum7P zjXH`%*x6kt^976p#&;c%+XjwWGJsW`UD$JQ6dMm5KqgPJJKnhmroj=G zDz}@HS|VZov5_%k*-u+2V))=;lw@qLX4I_d^I>XMozTnDHY`$WcM?(CuxcZLwlP_3r|i@39Lar z0--v}7W3h=%fMT}bgZwU8w#5ktYd_kc;R?%>dsoBLS>BmTxaQ>jGVTjF42YwODb__ zv9(L0*U0II<>@(q8@9-C4HNPN2f%Pz$? zke4Ey8xavA95m2B3&=#0S%l{ll|0N=1Okl)EK25-AWM>9PL^a42n-!Kgxh{|JMO>t z9{ll+dvI{?5E@k2`v2&w&{4wjRm*YGiRph&I^57>6H_?njc>(oufJaI zh*{sqk6wy%k6i{#QX7LKfN4lr&d6zdRVFL!e%AUM463CJz$tV@h*W{j+uGI8N9)U% zhsC~;YmoELk+{$CvpB14c@g1Ma~-MK42QrpT*MLnCWh~x#Cvbbccc6DOijXk>yptr zu0ppg38)@Q4h5DYy=#C3Dt7-F<32+&s6nzjIX7VQBlyL{VO0YZS?d~BXEON5b<1(l zqkC~{SqBVLL*kOlzJd3={T%|=a-9$D+lx2<#b4sOU;kIQGZr|{%;)iU$E?AMw!FL) zc6&$yVSZc6;%b-@r`FOh;kGYM09v1LobZIV-=qfVX2JWFLUb9{i#k)~&$zwgv&hyP z*wLusT^lzbUjyFJzXZz*S?tcI@X^7o*jv^1GkK)MSxq6IOCxpB4L@!avRM)5VJ1&v9_#eXs;Q+C#`SqZ5_guPT4i-X}FaGPl!m4wf1C042gUXX1%y zKrdtBkiE>f=C5^qU3JR-k9+Se!gLNnoQj8fe3CN7^>bZJ%@MXBW-Z+X!TBZtg)QW$ zozI-EFUF+u8J!~953vn(BB1ZSkde#z6aq~uz0tjgV8J#1t@W-Y0Eh=Isl~DlAUn#~ zs5EGH9)&e&Ak$JVLM4NT?|B&4UV9yG_|1(txPAANF#G@aKSBq5^vY$ZS1Z`FYe)jz zMwRP3hrSx{wzdJhsL&yDo$cuic4x}?^DUcjAf?*$FMP%GaK(2oM^}4~5?+Z6G*bBV zr#^=-edR)d0vD!%Z=HBNp4C%CeR2kkYz_^Q)t z=AeV+oE>1sPQfAK)=xE?)8xA&yY|`d;7NTp4p8QLjjd5igwi#C{74OhR)F$#9dT~s zTDI1IO-PAjSt(!6VMIitm~~~c`iyD!C=yi9pxVe{W37fyY`zaSPfvvM0YkFqw&k%k zmBY4)3AxOCp$%93=m+?dvtKEX|G?gTc<+18#g$iI+2s6BD`fGxHEVFPj6RqCQQ^VL z-n1bzn!CFD)XXz+`1;F`3V858$Foh!e9e1aKI+Lpc>XFtGmEeh@egDPTFWdo?|0TK zc-N*!k*U`4uD(TB(O$&fY#J8~Zov+U)wBcv41hnlbSeIN(L!Xa6ti(fM+UZPEVcUW zS-08GR>jb$%qe()`tc)#xN$;f|87dGyaZvZ*`H_lGw-7EXJbM=R*t1_5e{7{jmbtH zUmbiDmkjOG5#8nwynUCqMczP-i2DgAW8mzwkm~G{apUdTEaolj!_K`2L+6ubtlH{I zQ85t*nqfJBqI<@-5+NX)i*wnnS4+o@$nvtcKVQJXg9nAUM?~Q8;Smw)A<<9f3fENs zUmZvq(^JgW)d5hUPt^V-w5IbXf{@qDoCF7%BhKl89jX*K%?X5gAqw(vGT6tmqiTi2 zf(Yd|lz%W?7G#0LfmqLFMo;I!>t^YISwF8!0t-qfnkW$KM(R-6ib8@9`^J^=FKar2 zx55gy1?5?;WTJ8H$sBtEk=qtWX@(8#-bP%0F+NEL#Hg6up6_;Ou>#w;E(_^ZD_WzHsX7?ytizL-l3LWhQ^j8~r zMXnPs%y#1FYzI0^ZP=Qfz?*h#!oFHr9QcaWt8w)&e~7g!*Q%xk>vq{ZZus5xc-7hG zV0vZ(%Q7ikwr)M1-qi+7PNU8sH(!u4$;9{+W~M5lvL-8#!IZC!Cbhf7`UkCs_#GvY zaCo%pJd!7!5W(km;ZMr3M=vfoqa(e{Sx!Fs;GDmU-$(Pm_2Y?qOpesdSFljt!zF8B z^Gr^-e**gx87LNWIta>Sck{yX2I>_i2|4_JW&)qs`4AqME{7n1B)C*Aj|#_?H|pr< z?!&de{x3Y^^i#2K&knrzJs-eNuemB*D>KFCXMqnMyB4Q*bRwUwqAL0(+W8@S&TcId z-wXBmCweFT`__G{kE*o~BvGOb3|uOXYe;$wqS6Em1=)edQ_G^-ZF1KKC^Ze@|9`htZ0CvPCtS61kOzKy)L7IdNY7&=gEE1~@b&qdEOUI=EEUVC%9K84=akZAoqc>vGjR%Ouy#qLfNe}# zYgdSupC_AN>h3nbU66qZxdQ z8vhyL5wL}aS-v0?1r-)jUjb+T@qX+} zO^cWZC9hxbis$02v(7|UTQ?TXTZC1|tinWn04~{(fGmQZWQ>9L;2iBG>c4+FvHvUe^FSRsg zGCABdIf~Ej*nmwU9A(eIAZR{;B}lk6w5 z1FAuw__ZWo+}91@K%<7YZQ6jbsVdGJScEm59XMP_;bU6{v0;k6{>@)}4zKL(#g~p< ziEeVzOjkg3@(nJvCOS<oZuVJ&N#o5a^ zU~MO4KtrTJDl#(fJb|o3b{<~0!Z$LL+2HSZNKkNw0V}g;v?=S?ofs10z!FGEcb=Jh zO-dcQg}=6WJg|`L(g<>TnMR6>&KY+D1ma&@jtCh;Yp`P9M^K(_2p@!NMW|qS)hY&* zz_S43Lil|po63i-nDUfCR8qQOtv{qN6xHG6#}an$IDl_{^&9xj&#wh4|2vl)9;vzOM;w8Xe3L?ax?7nP9s|-A(??%1~-ll<1>4=U^9_{mIwRX=RXTyzW8%^ z_g}srci;8La6wF~UX)GYL#tNer0x!+a~Tndp^k&JcvHnuIsmLPYQEVny-DvQDFs-d zE^w$iCXYq@k%pizjv8W;pkX6!rzFc3M`)|SHjrlDhL6fGRYZ#=fZ=o<@7%Nz2PVsS zfB!

*>T;A&XCJ9mE3@oGH`%#Q^JRot^mR`eU#(%MLJ2OZ0u{r(N$rcY*$YlDxR# z5?msb$@EX{M#D+{y*DqiR-3_($8FRJw<)=L9$)7_=(TEW65XAnb@We2<8UR54{d!2 zKcfJcN#BQ9%x*!-7U>K!>rOz|%U+IL-vDYe)6%9&HB$0bCMlE8E<5q1z(7|Cqnz-` z=TDxI`6U^jk+o@nc`E0X3GuTRT7`9(NmF{4J^LgDPEJg#JTFw&UncI!@Evug-X%ad zo?Qa0Q84Rd*+2&M$V_fJ@R}Tl$M8W0B>7Pci05bEp|b!Qf$D_g4L^VLPs02sG?d+OU+-p{Y)In_fhN9)JseD zv>6$_KI09Dt!r+~i15Sam2IW(1WSpZyLBHXgk!TUzGhsMOE>^2DFKY7>v;R7jo33b ziF5lG;)K3FOyo28)O>=nP6*+%3#Z$<7cx<(HO znvehse&QI|1ELt}3`ns?86hZ?*BzpsPpRX>a^B{l7ZWiTM7KyE;5<}#!}TIcGLx$_~b zd~Z=4GL33?cc(0@iHQlx{8_uF%x`yhmpFXZC>RxS+%E$RmJp;YAez~Asqs@th(F`; z*mg)%g6vM=u$lk>AOJ~3K~zN!Lg3Gc91tqSj;@)fO;$Guc_KGC>RAM+Wbs&a%QImZT$PN(pqLKj}p(~IMJ=A)f@ z|Ll}t51I&rc)z-?B>f-dnxme6gJeU^Mka{ar6)^DuVfC7Im_od+3QGmA88Cn@$BO~ ztJX6_s+-|5TR!vO3b?6V7$BLDxg~kp|BVfnh~mQuF~dfn!T3C3&gCP)_CW ztI-3vaA+$wiznAF>ZFn?%`c8*FA?qODB@i!R$^Uy3585uGX9LLZp&buz-Dcr`tB{u zbb%O?K95(E@^36Si0nf0vh|w6)|Xq1Nt8+Vh(O>{ir`0V+_D=Uhv3CV>5w@o0kG;_ zYhW_fz}q)Jf*oU1cz@qQoH)>jav_U<*tQL~jgHO=0QedfXLI=0v8!=<2a#rusMi5b z0w&7zLwg^s2*6xzjfQ^sJ4jMCB@@PuJ(Ozg!%e$0iVZ#CnGL2z2!Pj^L~=rRXWay+ za*ed`N+t^he16kLTsAa>vLPHg=2&gK#DJnPK!KoT=zhtW$Sqll27O;Dqd@?r{AcJF zfm2RgiCnFMdmh?>QJE#6(-c`k7cD?`0bF*=)s5QJXkQwlaX#tQAHxAkZ>yUvr*4do3slrPbsqXU~HR{j`3U$Un}uVUaZs3pS$hhSeLB zVf2RyAHmcPV~giy2@6tm;$h^4S~f?x$56LST$IPOA;NCHWZg)iEZ`Yw3_t2!XOPCs zR2AR6_;OtNy&s9(<>P*tA#=ySysdy|9=94VJL5#0cEVaL>uWh+s6V(dJ>AI|s9CFBKbLj3YVP01WU4=Xr&+A4pm&e#RgMu+! zw{-{)3?0PQVYYDT5Fn%M+P|BxI`*IO`%R39KMQ}RYA~B#R~!<^pc%;Em*u%4j?Naa zBA*wv!3Cuf268#{r3+}Qr=>?Yo7SANVy#$2<5(Z$Iu7GQ?0hyPS57VQsb@FQh5J8` zm`O(#)z?J0mnZF{Bdcoru=RlixOLX~l21PA(*?_rbm`1xN)tF1%;OQCMCovWXLQ1`9^s z;>-t!l|6P3TQ`d0cv%5$q%*c?Fiw7=%5;*jM45~7o)lGbd|zCdqXtnq(bd2GojbIy zpZD?0X;g#+n8HjZg?Dd#7#oKtaqhqZoHEdlY9WtLZyUr-BO`Mn0Db8UE;(ieUOvAc zRoZ=ry{Gn%wA>Qilv?8`z-eB|8XyHoSOboX5jb{$xdw!AowuvO!T5X5W&baT=C9 zJ(c}s9i3zaJ~_7Mk!4REbY}k~;nA=VHDE?hhtAJaQH{r;^e-bcX7c=d*7W(HN&eFZ zpbLaBJ<$PnAdHWX2{ORizx+KRwQN>C!{I==Oiqk4I|ld}@(u|7GtiJ$s&q=~A0!4c zxFF0-6oDs{X-Y2X>JgPhMb=grIZ&UC;Ou{WCP@pRMu#d=6IO<2^sQFB6%{b!rqQux zXz@l0nWY5|nlT5flhofWl9%9QRze@m%65luYd*C78I-9EA0MKDiR09W2JZceM_wtwH z%;%hlb?fFMUz-8;9K_*=HsH29AHfZq_TZtthq3q2B&G=aH<-Dv?a01Ht0)DuE5579I9fyn%z>ls&25(ui z0B0{WIWwiBB6dXly(x%_h5qMJK5?zs_^MD|!jq4^2Wea(}_WVL?uUi&Z3^WG2 zm6X(h*>S=nv1jB`;gaRVHF-0MEe5B%kgK67obibVkn|tmyJS5-sc?1RDx34y+r#lthBf+67sF) zAR=BJ@w|%2Jj*{l?M&82vS=J(7GJ~xM{HXs`=4tf--aLV+l{~5wh5!sHg7j$GYz!N z%Ag^IT308!Uj7nv9D4#}JdmjSrBF;~QJoye!X*n)>}Z}P)#Vk`7q@p5t zsc+nJRil`rBh_r2j+oN`wOx+pup#YzBE}=Fb#;OKndP$vFKU3}6H*qSQ)b^iwF3Ay zcsyq6ZS8H+_NT;&G{(oqguT!9FP@JQ!kU?zV2?wj(iv3~ILG{1*$e%mqg6G zk1bLRk2{Ls{&KeEirTgO597_}`_BF_an$aA&3H$)OFt7le+We!60}&vj+ijWXF;K% z-*2`74)-3jfGv|0r_E%(v^uBeC5;sj9*^;OUsCk;QC#{xL(KVMcsJc zoBtHAd&3K{bnyaU@*ryWKZFNwegMC?bG}R|zj!e-zgA4`AD& z5&U-J7Hm8;DWWW)$aRblYR*JPBCDup3h53Tp<5&wY_jRD8l) zh#Y;h;cebC|JxuS&K32VS`xik7uw`DY=4=RNJw7xcdkp`1=wMTc&99W|2OnOr2=8CL9O)|M>1P=@wbxMzK)!FshFVEN}heN2-4y9IJOH`I|aUVs7*&0eQJtn!@=6w*r$2L8LYYvY> z#{bxCa*Bn?_9U7@-d#O~{HmkT|BACv>*`fah#U(>DU{d}pJq~%LX|^=1PRyH6`oHz zO8Aq@Zn~;bnVAv7AFoH0_Bz7w z$$-Zv0>J=81|(!NQodj*z_W6N%s6C)+yE*RQf*5)4eqpjtZ#yEC1e|m0+ftlo7pE6 zHgAp?RExax90{GCH~K|z^uEsG=DUbD;8ZJ?b#KH8#~zKH+qYo%{(aarG=#ke z4#^Do@v$ihdi}iwEB7S)=v*J62_SO7;DBVa-c(j(Wmgy5u%^_871<&drgP{@Wu*In z#RBDT_#iEY30}Ig`mnb_`2vj7DPQMdJqCO{x;$U2@xD+8I6GHM{wxWsBWDob^qfC# z&E}$g*p%AoKA2@K^E@UFto5c+7|!Q#>Apc+aqxh&i~h@T zt8r#WKWgPE)QCuCi0e>u-j3=LuT9U`hwjABDZZc92M|494-(|?&373}n)c3;P)SAC z^v6ct^+3p!0>$lH4x_Pxt`QXi(hRkpYT$j_H{z~?$(K!)u zwBsPvT+cVzsv*JhXCz6CI(Yd}n5CdY^+e8@Hd5YA8aIqh;IB77jGbkMPWtZTm`tc_ z{5989Etf<4GoFr))1HY2JK);#QaVbLe?nP(D(kXSK>VH|N~9H#Q%9e6>5W%5STAHt zo$9Syw1NiOJ31u0rqk}~>=e05!u&)6_)%fGr)?-@@)?Rx|JPdJbOVm2s)+L)-H%G+!x5X52y}*Es9U z7ot$80gvs%hF|;^mw)G4{N~}!ph=jYE8zH22`862u%=W(FG;3^3QfjYgs1}kp~hW< zX)_gG>XW@Du1_{2>vNTK3KNYwcGoMovpR(v4<5$g_yi#?tXS5A^+zwkuJIAvde0!H zeJ=bH!TC18<3(qjj7vWKDXdw(6w{c&RK1Gv@kxx1O=91kVLZBRCpK)~gzIj&9fObU z!ju@mP@VZ7h60fzwSa(VL1#LJCHVr5&9`A)p$)5YMJ&jYTaZKEM&Z_}fomlLd2+s9 zjSv?&1P(m8Bk_e8{E)5D7$R%gGs(J30?MX)kM_Z=Jupk3KrGFi&#NhBd7LNOltj;U zZjXj6cy5B#Zw z=}6HV+P-=&qa^@J0n@QFi}t!Y4fZs%?SfA6qeO5%uh0PikAAdug_niIDW#}($Yol zKs0f$$zs_G2yZ{yNcN247QCi9n`2ipJy0WWuiYt`hsSC@ezM5ydcE5qBK9elpvCYS zSI3bF-Q-##@zSoG&+6u0zw!mnNHYWmQa7xlPR5dRYPfi7)TQP~%m#|tcKqt+H{$c3 z_%zC6M_~JV)}IF6{Q8&S{J;AMj#;q?smcUKZ@d-X{l*XR@4vep!_&ZmR0gMXbmG~C zHmq*%Krx#aA{HZMcDgjG3YST*uxA2Vz)`xkIWXaP2n9wXZDuU2zjC8gQ+1@Ob>!+a zWN8f7YIv|Qjq9g|arfa7OsDHOX7yqmbJQZ-dG8=L?m8%}e^uZ=@)w_54h_6;#Q^@{ zRWHCvC$14zFN(cL_4WhxEKur{>l&Y&!RD>Iar49X;RiqeFFd?)6Glc3KUuKhzZEEO z>2N2LH?S<9$Lc~GjxDugRlbNN={)*U8DzoOQMTbKfS#s)Jm%x8zM zO&hM|%9}r(?Azv_N7h$1@7#{R>Ci20adSSfrjrD3KkITs-Pdd6On5lKl0;5k4g&$& zuwoA1AKin08+;6ZwtO+pU3L^OJtf3HsruVEVn_6xldqhgM5)irg9WqA_A2sA&9@ZH zQ37>{(rCDwZIHQ7*k9E9iUm>s<83no z=P|}d$AwU+GX?7MSz-R$xMBW1g9Vu?=#sv*nmTy4C5jAjMuH9^07NB77*wieLplZo zSui6nuPKCJobJW01HC>cvdM!8QMK;|T|&Nr6JHrc(H2#DcC3p~+knEBX|ZYkZ7`1Z zgVr_^TXEKgsA`Bh3D4ShV#wa?k2(7Rk-;Ea0C$@jkrq zuU?C`jxON92=4g)f8q;Y{TA-ob3oYerx!Z#{LWq+*V&0K!ePR$(rGNu z6|i0qf_ALR6)-QIK~IXE?>dRh4O0lLY>y_rIL)=z5G*tq+1MZ_!FP*WkBZ$*xKtDz zH?R4}eI8FH0EjbzDAFd>S7 zlptmnPubx*#Ie)pN*6#@FP=um1%lWX$bf~%;b%xhR7FAR9jiK^%mxU$-3I^K_|2FM z=ZS(s@ySLC+sk&H^k$lJUqTr48ot-ijXiIRAVB+3D_*?NLoi^!_qCG*54z#Q8jC*= z`LPfTL;*s8!mPv)&Kuju2r1CtCq|{|P^LBQn_B~A&ja+6B425LB#Mfl#sfu8^7z_k zF2oPMmkc{;_B#sW96i4mmwfJHc*&o<7`Z|f*g1s%{Fm?J@4s~o9vhj&@>CYD?e4%! zI{Pt@ArfT*UtAy5fvBeiMpb8M_hDtI(AgZd=1NGCq#%!gfqF3fQx6804%-Oft)?`7_c7)rkhq>gmGYoO}w_Et!YZ z!aS;7?MUT2kxmyS-~cB9<}*ljbs*i}kIce(Ku-rAxqBn7y8gHL^@e+K)18mV;E&T! zT8)E;rm=0yUS#SuOjNj66SKy%T6`4VcC*p*w;p*qc1ch{w2=b)c-WqGbUu%hOPyGs zFJWn>fc{hl1yLMR5-o#4D|$B7G6RRkaP@>XmcVUK7*NmRqC7#!(`umYu?!eoBKuVaG~XZXIML-omf+_jz^PzXDb1OQTI)PQ^+hb%|45KXsnP;yiiO-*1Dh_^X;^xIY|5@_5_iGw^cJBEKE3Wbsqt9P^KNc( zuJ$+K?`#umeZXOO&tD?JjOdubaYl9jcy%k6Ai0K6tC#ocE+esW@n;nVOk^;nGGviz&@R=X}2BWnaPRq69t@8$OT3ZL& zI7L$6{frpbnnC)6r+HD-AD1^2CN)ODklVzNN+r^$YPPM;%HY$w<^mze->3H#OXi)% zbe6DP89y1^fqSddC@<{7(Bu@#qwFC_)<_&$skvfVs*X|}IIFD#f4lanIBxko)cRAX z6|zWWi)f_s(nV3HhzAL~9Esi8jz(_}Qf=)hEMA21nM3&ZZ~qkk@!LP(;lZ6)^pw?@ zx3CB6=XYWG;swapD!BK~jTo<2QB47l?HA%(DO3rK_zY#2NB7|6Ivpdi3yL`L-1;(ADc(COk(^ z1e>GD*uBqz3TTkqlte#4{1ATK5(EUqFqA*+lNLDozLoM4`?%9}v0G$o^Eu`+1?)i$ zH*VaBGml@3evWzNfHV=giCu~FdFHy%BZM!ZE3?J3hp^B0u0HD|FpD2w(&A@tAkj9l z)9RhLHik)3+cZVKRR+;4eWbD(oWJc++3!7QHW5!3^3z&2QJ%vK3D)!uQS(xpqHqaMB65;P>oFk#ixM zQ>2(!5kG5tXZDya22q>jWm2=DZqN1jI`<}($>Y}(V>oZa{n%eM^;$a>5d-<6&*L-B zX3_SnGtlw$XQSNKDKkU5y4$flU&3jt7vkQ*z1TW*NH}#YyR%Cmb@2^9Y2}^}wR2`{fH+l!|RQJUk*O0%3ot-A_(S z4T93FJSIyDWcyQ&mGy{xjzflOm=c~rP8_}>0hIvLOKBiSmBO-+3wdmH{90q?%VA*P zdI5~CWIQpomZhdVQi(?P&Uh&BfCg&4s~1M)q0t0-8iH6ik#16neeD&%nc;r;m8@BuuKuA^4UV)XEo z1gu9k-Gt{F$Tu2zNm~a#z3Moeu%H{M4x$-3AXn5nUOtB!_X`8xd=`z4B2X+M)7OPu zX9=m!eoWW%xbddj@YP>lgL@v?h6AXhYhVFh{Ol9)%JnO7`cv0p{i*9@8t2I5B(^;C z7`ATRfm`p{fZOlegvWO6#n?<)2x-BvTPey>C^cmI3^lmX0Ead1wp1GPQ)%hKII+}$ zwS_huoiCzGramTp60$xb&VXk@nvo~R3-4V7g z{2;SZ<(4-#%lWq))ULyW0-p!3&qp-etZPY!v*_ps7IE`(#_=&+uyHdcTL&iC#diSs`=!h97mMd1Ehw-h z9^Q`_fgDYtQ2f3E4MgCw5eIK@ujR+bpErryZxb)_q%ITTu%V^!{vJCSfBrn5sa#?Z0xt#ag*93BB z=1(pF9k`4OE-UliVR#}2{24Dmvw4kbrDadDa#j1|-?VqM=_pth#_BbhJ3t4|o6F@? zOTgLu#e80FIKzp6f-+PD_NSv{Yastd+7@L7fleS)xLyfgLI`YmAa6NJ!YJNd{v9hh_EtSuw2gp(BPDc5flq*M)4WGS)h1 zI)nu*PI!xLUW_sX!x1`RJmQY-OHUYp0$u!(+%Q$nb2)6he*?~c`v)<+f275t>gP+D z2EO`{cjDcD^H$WSXHeTej4yoRGFoL^;p3&BY3zjU#x_TBVwwa29PKo@g5R7D> z)s5_DS%F7dma5K5F5>rW_o4eWp;>BR5K-IwZ)_Nga6rs@uw#JltjcMWxh(dk()h2v zd-2`Z%oUZbPG(l`+6VTFmAFa#}d` zvXDcab-i2)skS1r9O9KPAl21{($Xau&DC-5!yE9mtAC1L-mw9bA~se;cV7XgtY3h) zzVrn+^X!+Rf9)C|ok#u9VT?^yu|U0&x-+5RfOZzEs4C*>;@N-ihT@EKH|_5l@s(oMiCn+F~Y-(RJU${xU-yW>Q3^ zK5UWjBqKZ_*yiLPfxP}?i~q9&ftFan&^+DMO(BY{KNp#z`lO4Lg~T(Hmi4jkz%Ul~ zccFvAEe=sfv|mLa-E&0Oq>`idgE{Pl{VMz>@$+`A{+rfg1S^%8A%m~g3+d-$9R)fw zLFS|jf=K{*G~_=hFXuzsw&1oyBl!D;^Ksg;C31+nrYG_KM>b(hlDXEe6iD@c@4SV$ zV9he*rL3z-khTJ9u$Q>m*2iGi7y}uzqJ$U?G6yRiP5~21P*7iw@*1N0tAFpo$*lWG zI1!GNDlI@Wc@27Ha32~CGXHHaq%yc`s)F}E`XIK>PzWjhiY5k%Y)uN;r#=;3XTB1( zt{$ZFSzu}!3rp>&OiW|E&b|O0R0IwI{Q6B-lD8#7K<(`%shf`+9uX$JFx_d)2)$?7 zD5O%n9Nfb%1_)I3lX1btyr}dM3840Wg&9PxE;WCNnKBtnvt>_aX|v|92m^xwN(FOJ zi?r==HUR6LG^A|jBf`M3!&GXkDTjb}z;lzEASAO|RWeOFeTUJL34Tica>9ti)xjZ# z^&MpbRwt6o-_QXM65JtZP~ojPu|sTb<&%qz0|IB%+FO+2vEkpkpl11ecj|L)4YO%2 z|;0-@YCP>J>b#Sj68hS&a3mBGT20 zsAknfU)%*|k_RXOvTaZ=C8=X-{!(M%khA_SG+!ikO@A(txxH@>04&Fp&1A3Go{s!Q zjoEW1hv{qv50s~H>er;ii%EtOL@qby%6|;&A zrw8(XY*!ZeHqjU1$JvF&!f!+7rszb|5VX!!=4duq$!Ei*wyb&!&XYT_pdtJ`apsPo zcu?VMJGK?VvGLNDivu-Xvt7Tn0kVUSJ$xV9u@$!+7{xy=S%{MtFOmRZW21_9KDYq~ zt#hk&zY!=U%Q+VL!pPYrpx)V_^7KC%si7ayCW#Iy4bEbSs65yq`T-BgBiVFBt zbSfDU0ig}*DP%ZMy&+P-bhLE%gxhtDuZrvN=N#liLn%su#k`RGh!QZ%&)PsAeuiBF zB>wRk(8+T~0JG7VDPA181X`bGJ0J}Lf6veIniwoFm>}}t$bwH-Q;h*_4`G`iqZ}d| z;^e{9v=+Hdi$Ab|MGDOS3$ZB$A z*>z@KjF1!JRR4Komwa(X`D^6j>>2~6Z)Q5Wrey!bhAgS=Y2aW4$ky|QKh|=9yaO^> z^Jz>@mT~@DK8#21zJCr|oWlG<0l&EXlQ{JiX8`;6;)ZWtg?C(Z6?WGfSd-4>Y;eM%M`Y{SqT)nYdmx@@c_N?By@l3HIJ9EgpWiwGD*O`?rvM-%dpg$KSBRpn5% z1BAtGrHLxi7|!MKi_wGl$)4TVDQadm%`do&#P#UlpgKAlz}x!!@zEv6V15>;X32`B zljj&2Jv$vggJ+6>(0QbDIW(l?)es_NHCIGw$$XTKUWiH-xaE(J;H%f)ir;P70Zv~h zNj8f*IC1{jPdx^2`HQph^cOw{^NwD^9!ixHl%OLIR5LiVV;=?|c^J3db{B5C{T|%$ z;AZR_8JG60h_wNK#SxkNls!2IbxX2`Cq}ccFVGgZ+nw(8$+?R9sDEbrt|frYqzHI_DSRcV_St6H_R4 zlu#(>_q>OyjQgc(Ajbk%#DMho^=hkNUB!UJX%Y^lh>ytq>KI3v*cVOAqcf4=K2!p< zetI&Pc`RoAI-WQ!tbTSFkOx3DH?gZ(4v{h9VxfR+CM~2zz>D$u7I;Qlo13Etz~`h63rv9@ea9=E6$0L1Y|sK8O47 ze*_=>iw|OKc(f$|P=5Ryp0*C(|G_7P^*Z>gyYRa6zlr;2rZIpFKC^H>Ue?x!1_f}4 z_E1?(f`1u{rACju6)On@Ln#;6a-jyZdk`-h(WmG|P@elAwOt-4H9HG$bCRJ%w%#Zy ztkzx`y}Isd()vX-X)0g99py=UZ)iL28XrfQA1BNgKtNq;`lixoq#Ni@HSmtUg?R76 z#qw`y3#G0GCn$1{SviYYvuRdKAdt!Wl-1-U}6;d@wHGN!Hb(A^Al*5>64J^-R@w8G8p4-)n zRp|n{`Ht&VHKIc3)MN2*eAn9klsLxtb<2C5n?D%VU#qEaS(r!ow&kzldcWOj*b~;q zCmC%VgrX{ zPd*nWy(Z*T4g8ZRG;p!}LTw=`@|yS!Efu&$vh^t+&@K=Cm%SlD2~CS7%?3e)TfK@?M>yeB;ARj!~0Qfkd&N7y--Bz zlAEqsy4saTS%oe}t{+cM|M9S2$6u%!=qBim#nc;HCf_@lnhuVAOaxSq!DPS4?v_q zol}z0d$uk!1M-?B2?u~}gX9cEYD#=40;5>3Zp;FeC1qU#NuH+<+h0rwD_RQ6;tZS- z>#HpWpyy}7&_-S^dd#6XS#qq(P z#!K5W-S9hATXb?tvDk+1edoux^dByi@=o(t3Z0oWuKxV{@q%+s!{nno@xF5}!4;40 zMy8R*U$^(*ujemDnjI?oU?tNequBVb`lNjtn_DgWJ_-I>&Kmt*f;?Fl&Vsi07YwfR zGc1TSrxpSLix#}>pfI=!5k>qYAwp6_uYxRgrW^Rd{$2RR;r$pDtFPxUQx+5p<*G6b z^aFVNym@%FaRL*)PJ{=bVZ{PcN#~8FnKgzi6?P9BOs; z0Hjfw$l$=>Hr#vjt@zn5ufuH*ZBaUU&i=8zD$@NAq{A;Af+xsFmQjd_5mIX<_$ zA1CEY7$9`cx-jP@uXbUdA`6>zI2$|E_ zvj31ylh8RGEq_MGN@&9)SZS|gi{3PARatTV93a$IEMl6qe<9)J#Hq9OkJd;;g-Rv) zUiA6NpeDR;j+{4HuPP~!&RQ}n6D{DiI6O|GpR@nz?3`Sv(!W$0tP8}(;6xlhuU-C2 z4P|Rnu}s3VTZL^bTb8Ui`Ci3^EJBku9{rpw^VkBkxHhv25gjOT>=tjw@nHxqTEk!6 z&vEG>sarJ#Kvud)$`ralTkT&=^3w&HM4!oZB@F7YX>4J7Ej)w(?zJqN*6E4*tobL| zY!RRT`!C|>-<`Yn{~0Tm;n!DOfcE(%T>g*0zy~hB3guiLPtRp=;gVJ8o1x2Ry`P!C zD$i+_W9W!v+r~0>G9c}lbS6Zmq2*o08A6N{cwFrzfjA)3quKdAx^UxY#zpJYy7MU@ z#Bvqa6G!oK0Fp>sr*b%$PvMt`_v5=myRo}Ykt&C06DOl*BKfb5`I!dZGH)T?(Z3J_ zWoj+dkY)WpTaZ$SEYk=Ll0IMl%qJI9_=v<@PL^Y542?@Ri%PMC{h17Yx?>BzyL$&V zO--XF23~9KIn*&98JuU!ntO$C1V20f0RL2?(s_90~-M$V7%l6A(XQ zbnj7c;Wrt0Gv+{>?2C2H=nK|BQl4-_rGQZ(|Dl18>>9*$_H0wEZMOD-MW0j^|bJ_*moGId#nY$LpXx!0Ihp|FGb0Q1R$#el<*tHX1 z-}#uLo^nqfQy3wz-axLAMuozCuRjaDdh77P>qnsCJ~VipRIqI-7AzTGXtmy7zHSVFlBIxG5sYX zqF9R+*c{523^{U){G4j}tAlojDD312D(6e{UVC1LGU#pdH)ZL`VoIECvZg3* zb9}Cx{8+~MZ~qV;yz{G!8AA_k$60T`9Cz*Ck9pY?{%Of-yeO4N zxi%$^lct0vL}sSy`|UY-0277oiU{bnnVpTG^$2Ef3HP1MsDrzl_^0Mw8%_N^MT}ub z1Fai?=cE*|Owu!?L69wT3Nrh$ z0p8uc2=lnsGY#a(kI4~zQ%4}JqkK=6)H#Ql=$Cfj*l;l${VT1!%);{p?5G-4yJ{Bvo6?AvBjuiLTPVkKMbk*qBJ@eqSqsc0UF= z>!l{g^q@v#e-lUZmfsL>c|dICnDB^{1E72y2Iy1*m=dD_f%*>Pq7D0tnxohHIXT3bX{^*!)1H}dy z%GqQjqo?b+oBe*S@Gt~OCVCewvnV`twAzCKM_1gD=qrsw3+Z$)vLpR8hfCdCt>OKT zK7dCiXAGZlbk|C}W`p>A(x5FCXK$HUNrU{M5dZTxmX|x@aje}o5d6- z`58Nvj=oUH$wnsBe(1n3Iy*Zg+n<@9MtfVEssNIVY@>wvdv*(y%QNWg>=c$h9W1;OkR>K5MnOeVqjumuo`Vq~ z5^j^snDHI$+OZ$!z4=`1+p()907#<^X>*AI>?ie++{a<()pHRH#C zfOU2~uW8`hOOnsZ=uQ37Sp+E=#Yw7?$Y5nkXryKcbE%mkD#d|XNMjRfxP0g_+%i6j zX^Hxi{LVuT>7Y;5(Vb13*fvE#S z$P~L!TXO>N)Dw_delpS-YIUTL$&o0W1IU(LonS%{9lk>5a$3dxz=V+^1q58=OaR7qs1#gN5T6w%h5%9KaKI)TidL@4g8 zTRVMXClW(|VUHxPQg6lfS8`waEIbkMK+A(^6?gO<=tqrH-y{ig4wE3*d^Kp`1KS5N zI5CN@uUL+GJze6&nf(3xu6_8_j-4t8JV$pXiGXMI4B!iESE7&YY?5qvLKg)tQE=zB zF1T~EkLpL7TPnl-+5SvIiU{|M(Sz9Tn7@$ZCnP9b2Wm6LMLQ2bN`CS%?D^~35C8Av zW9bM`dqnAD9S>c{R62)?cI?2{_U*)sWEx2cQ(vbBLK)i1t_~F5@J7_0dKxMdQ|M~z zMsZ{Usi`W^QAFx%H(uGuvDJ@kd}{X-sgcZ@<9{nUu}>B-;Y;@DPolTKN7VZ$Y{JFC zw!q2pNofUCCtk6^Kq)C9jEzm8y{#nef8-1J^gcR&b_q(*Fok?P2#yo=O5@+7DKFra}VZ*bJ zQk53cXzMh=fGEq~dU=cQh~t7anD=1d!I?VF+wuq=n4Hwh)09=Ww%sZ_0(d8xQ%*wXouN*Wk!W{2w^dcC)Cb%V`--V03ZNKL_t(8 z0Fw3y)l&~(bYxUESVvcf1OzM*Fk3H{N~$kzwRc(M4RzrBy~qUT^Rg-Ez^A5W1l}j2 zz+Qg+d0vQyq6k>vz@Qlu^C9<1XSoYnP^$_Dfsv!M6sFTCPtWMaXNzE10iGS(cvls0 z53eeDJ@&$*tUiH(9ZpWN1mBJ<6^IY=wiC<#;(Q{B zb~p^md$h8bn3Hq{Bgo*ssR?{%&lddZz(I`q zyccuUkt9>!LZ)U}K1qs5u^M^&YI79Ns!!l?rE2IJ5P>KfKG~D1HK<{QIyB`<7Xiau43sDWhmgz{PQ zV=EI<9a&6xkEMwMTS0tUo@Ow79e*dVC+SmE+#)x?2LCu|PYi-}75F)NJDfv9;%Gu` z+cGBeddeEl>cG1Hqu{_M1nMGjkKv0Z#FsJXV+JJ-4s?ZvMdllMVBv<(@V|%X|J1om zc70j+?=V_8a#Iq<7imDqK0X37r<5JS+SJW#ET@x7 z#pqDC!Z)mFpBrbv&WT7Jp-q+xWH^ijBqaD~a3cTpQmi14JC_&m;e&hdaHkyri~gsn zj*U?KZ;nmi4@OsEoXVZ(pjs=7GH20XW|0{&XRjrX%vpJc8OjERnmC#xWe@2u(+PJlo8i4Aohc z|BE@NnG4K*bI~ynG4z8;NJFmBp##~$HB8aKaYS8MEQc$?8P_?9@N8RuYcMLqP8`C zlMx7`HZ}Wolwx{m(blkdpD28CujCvU(Vyzz^^a~xr=8*-wrs%={fcx(^sqne;j;T4 zz>nvY4-=eObP4qYc*n-gc=_sapi4ryCr)l(`%5cvnNPwW`iA(ChVSEF+Vux0-^GLU z`c7$C7wc^S={>hK?lVl5!x_jJl^^P}7^zh7D;Vo(&(A(j`U zKYD3dh=)ENSZn%;3@p&BU*TIH{KG)NI|CS~Frbj`0VxSkIEVoQ36C9l*q*&+&Qj}f z@|Yu}N89L(S?Tp~gK4D-8ujG+vBLiHM&7VkE#?!OlEFE)mqQgc5LU96n?rD&0Ry+9 z03+KP8I12)h@Vf0J}o;4@RGsXAtH_7!jnG2?t2y>*tOUIATIgbl z&e6v4D!gV9vPZo7OJ!TT)iZ-B%ybJhr5?*^b{tAYLMb^WxdgRB0p%ozdZCDVE-(Kz z_;0a@O2T%olN0lV#Ym&jUsrie9uZcZ*YUz9xsb!XNqznA_>@j@w&`#WOViwCD8p(2H z87`HQ$4DOd<&A6bvP&<;lh&?9ZTN5yC{r|Vr6n2^5>p~Wb2toh8sw#4eE>{ z5r2?JcX0+g@45%?{l`z@8{hp2cAq$A^3R!*iL*5ZZR?Q3Gn*_8OyQhDS*9SSNmpA3 zC0nFNHu&7ofPQaaS#2Ld(1T$M$fF203jnO`PauEB8jhG{-8*8(0w+L+qp6dQAf?P7 zge@-n)HiG7c}^YYhg}^=tQqJx;PA~^>pQH(V%O%=@xtB~24ALslL-;2iw=QC2te#U zn#?n85uZVkrKO~YKi>5)%3a{yTehHDqTrk&dMo)n-m+sSK5^)X>OO?MovnQm0e^dJ z75?`0^(b{(Qko9vM0n4@a39Zg;_k1A@a=JuYHIjThS8e6)PIyK=Fd)F8b??eiY!=@ z1wbZckp&JCgnLAw>p8oJu~?Jc11rA>O2q_%ojfTpTP)$NkL<)J4;(;8`i$-TqxGw8 zy#Us%LjGm10M0lIIC>Pdd=>e66Uhf|{d`(3Q8&LJj+lOoPLMTtGW{v-%OK$Ru^9}H z3<)GbB|jQ>nIF*Anm^nA=u`>k&(6$9Ilwjf=|Bko6YivQCl`RPrK%q{1~YyJ7liid zyys@;w47khUow9hGRsi-8or+a1G@)YwwQ>4IB;tfQaFfhh1v=zT>*AXU-;pHM)oxj zM55)8EsM;-Xx8tGBF^5kO>O?ypQLU20c-9^K?njY_4Jo{CJ*UZWwc2`#lVdjUu;=zxm?BdANw5M`;LFI(V*Eec?FkWxEb$z z-Lvs)fB7Hy{^11_%D^;->8x}lgI>y$DNm$_0iHTozE8)iW(LA@4u$J8D=86wv?q1_ zWaLjb;8`#fDlPgs_89qj3>-MRwIYZ?9yQ8q=5rDlj2225%@;6|D`2ctLNn2S%QE92 zk8(~Yzwze+b-IW`FgPHw)!J2ALRcbSz~rY_(gZtmJzR5WFK(MTamqRAo=Xr3cww!9 zS52+O)JhAjq=V*29b+TI8YYPly9vIyrIB)kaZW5LVj0cv3C)v~Ac&5LhJp5SOUFQ( z!@*nuUp=r7pV_xtN(40UvhB>z2wx**f}h}N8#dw9Pq-MDZkk5DHIKsLvS=8fo=11p zB(P~M@~54H;`D0tSl;81lDuX)-Q^V=zVD~_%*VchfB(X_@Zg~X1`WpsrJXsc{0P9CEg@2jF@Om_1?0N@b>G z7cy3NT4w*G^k(H0+h{6h5hXqj8W-0>jBdpZ2==F?CpD(6J{M{{}H zc;p!Vc>BXxlCiia2LW_w7d4vr_<3hys!L3b_gNuyZ@TjU7McLq^4c|$N2PV;xf zL<0y32)H+dn`SddOy5J3x@>Jkv6jd3o+kXLtr~fLF=llk8%k#*u^iUj`K=Q%w z-KevO%os4sW?h>yy!@V}0oLuE4A7b?mzrv}2 z9U<#g%4N|E;1~4+_&Rb9eEemv%Tk0YJ<&KyfWUgG1QB{pd@VD69!rTE78$Y5lqVw4 zmOw-6@+$Na5Afm5QDLDB+HvGU(T2>yx|8om*zG}v2r!-t-~gIMc3L0M)z;_WBPSLj z&*(jN9&sq(;fB_XEQIk>^~Do$3*tyMAOPmEWtJ1+kf{K{Wkhr8!ju&T-oZdhbz zJ7oAWo9d*f2;`MRN1mx>SIk-2!bX-od&65CPL=Y%E*3dZs@uU#r;Yhu5A*G|)ZLX5 zX1Y6OnDmsQ>y6IRVymEx+Gmc!{#Hj9f`w?#JK?tqg{;y<(1YPZ5tF47CUZrsu9PrU zC}J$1M>AK%i0mm=G;^YN-)VKx%IC2=m&f&o_u%GZhecs8Gc>+3R84URz%!~t_`Qi~ zOfvIITNrCLFg8+0jzd`366AxhgoNm^o;@%lsrkyH?gH;W$G7Eqmxb7vqSvNK5G4e1 z=oV|(xzfU?4(-Kf4(-LGiz|}YIASEeja?j_9>xm^o_X3vTz1LDxM*rU8tfuUPavtb zM6bL%I)>!5(@{A69He7Spj?%i2x*aJ8zL%s9NN1d*MI7ZxaN~z$35HkpiSbR_cg{A z*q+TABzV^FC|)?c8mH&W=yY4ME=4sljNtI_F~$>oo`h%VeDcl-Hwf5*LxunO-1JoV zGuNudN@`bW!(_6X1(mdCdXFM~kC26Dk*%-4ED#v9BcAkSRwA=Z;+3PY#aC}c1hv0s z_Rd>h`{?Nz!OYP<-!o=JG|}?S710<)y+%udyyxTqETmoh)t((PDe{lku9x9JJgkhi z?WNe&>*BZXzYp6N*&m}y z&m@)5`1nb7fq)>p^_`$Z^@@A+GUY%sT=|B3!>7h09)r+(-?*`4&o3t5pY}*WwgC z2n;`WVpbe*qh7vH`g7j`*R=h8bp8$4wdZ8-s_XDLS4+an%7$+!kt8^{=7q2YmuO3e3={4 zjpVG_0Kdo3wteUzWZy?9;sTA8ui|awf9tD%WPAl@6$jrqkUd)QM2SohG^njj4$lMS zLK&aB`g;7!-@Ui54UsP*X#V<3HsUW|`XoI4uWrBt%gea1QNeEyuSQjJWg7-(yPruc zjfAt0mGEPCO`=^T-ML;sC()9@Qkr6>-NNy13;SD(IM!?9XnR>iX;#QDa6}-oG7*yI zFHq2JbEI&{IuUy1bsfe|e3f5H`SX*nL(oMiQ z9Vdz@zIfy?Zr;Byv>HZnie^Dcib@LnO0|kV8e4-^X^Q2vgOQ;s)=Z6ox|npt(j^f} zmExg|L196x)ktN|f_FR+z2g~6R!bIpB9k~J+b=#C(3#J)*0vG4=*?eTbotnI&#YXYa(HqTRVdM#tq0#Paz$yp-19eAw{lO zMUoUTw|_sr_1T;7!E3*c@87>23t4E_e_KRf09c>P;e{g;c-GJ)rVDJB}a z0T4Z=6!5kOa7k?huU@xKkREAq%@;JFPYU=y+qUBqM-STZ2LpfvMWlG!x^;NX<~2y$ zoNlMYN>xF0l42CZIX5pZvBdYszjwc2$&_-8AX@xPd=o*EyHcVeG1-%--~-&YN7#k` zO8)ipb1X9iP;SSpoEjXtbv?PIqavn}&81|>*7{rtpPD_6zkc`uEOmT%lm!Cz;rxd| zd#6-F;RP>5;nGX6yts@)V^oI$mF4DlRlDClK>GbD2xP5(Yz<`g%>_>9Pa>d>Va#m!QbF_{I-&2qY+lK;2?crN^G z?Fwi~WG#FGDOn`a^DjNmyg?Rm3xs3POkVq0-}LyF3Kb5gOqcc*qL8hC2{}glN9cl4aSAf3RojnL+92 zN`!ZKrsEE65SR;#-T`TS=Dfsmp@L6+^m@GiUGE!^0cKR4;L69Z!C(IFlktoI-C6+kQCTLESA;+`CalAPodF!2vyz86 z|M|3yW1SVuc9*f(ZDE$PFS>0kr5!AH85|IvH#UD{K*IeC_d$z5x`jhX65UqI4x$_~ zGo}3skIp0DQL2SJCdwsjsWq^s(m->(hMjW@xOvB;!ihNfOVKVG$6qehaQVb)tY_h& zwTz)!5mQscDDy@#QxddJQuVAq+43#Tp4^yH3P{asniP~#@8U*8)p!zur1b>SKUS$o zr@$xnJ%S${I{~T|YtpE9MkIgP9@hoVAFARH&U-vweAby5<7XCD&}Fb*%ps|jkT!>q zTeAx3`c=SG6A5hw;bIDc6-yFC^_GC!zjQOM`||hjr8|C#Lo4&AyfZva3JRT-FW}i@ ztMJUm1lDwwkS0n3GGD^;wTvXE+gG2iXnwf9gtN6HzL_|D$^Lb*%B%|h-PWoE0`_`U z%QPj#IEpvN6VQQZjRi-dp2Td_K#N`!ruI3f81)Z<>B*UOwzgwWIZ%cWBCz=~xjhE+ z%Y(Li7h3Af{mU!|m2^Iy_VCVK593**6ZrM937JVR=x)kR9|pxaeEGz2{Nc`ru+-J= zoY)@vIlgdg4F7QER@7S*jnne5*67LA%F7g4d7u5>19tCq@MoKmN$?3ENrPH=#7aWU?q}c!u-~zrFhg%uf#9Y7qF|!qRHd?~7nFmU#*RrpM%OveiKXeAtY?IW&psNCWU#Z`p2O9NEhgSXx{Woq$@cg5yVz zOS_*i_X`CXE<|A=*7J*$9=4eT(lsDKhhB+HE;DFfditHUEW|yM05vL;p+mwg(Afs= zp?x9Ai+o3U0uToR=3Mli1#CTw&a+Zc*hN}f5)Z1a4bPno|477uMqO&tw##I57zd|(a1^3k<8BhQjv8?{OvYo|7aSi*0OOyQ}GaWvR&De}!aU6LFDuVahpFtMR_wSrwm1)SsKsxUp{EkOrof3_7rWaXY%1q4B=X!W<7lIzvMZYk%Bfy+20?0}#hSSS|p&jnb2GCT8mfz9o|&D+P;&@}4;?|H(GX~TVSXNC zBO_?FTbP?&!0_;pIA*Ock}Q>zt^t~_;pQ-o9X%$c1&#rxt^oN091|?F1SFf+vH*|G z41Z;L$+!V+=>m`mfJHAAbR_CxyP}LAF0&1h@QkJ!GIqF7uzEj#RzoKnjb#H11~eLv zBY^M-GxaTV{vLez^L1xyzCM&4`r!AFmHVsWM{LG5?8AXbM4q>&oYDJ@WarUcR*Uan zK<>{6BU_DQtacUZpAkPH%3rgXz&}4bn*J?1dK64$P@rB@IbXq7ZoCOsz44vstgwO4 zzcg5Q<(9Sh=imEzyzrf0$Gsik1yfTvqg7S7*$gA!A(^B6&RYKlN;t|^&c+Z6d5sP$ z=YfD8PFNpuYfbpWBLk7!`S==KkT0Ru zZArOddc1*VwW1Jk3n{EHR6&{tIy~_D z3FNm-BAFgVTCXD~h(wB%r2}SVL&HF!j(zv;#QQ(+d3@%3cj3|b<3AG-r~|mPRKs&e z#_`1RsOT!Jblag`Etw-*J2}Wkf&h+J)?rJzZck<;R7xyQ>WE_A873#PQmsTO%9U0U z;QADiD3(y@JiHYwNLzdS9XkVeDwel~sC^(q4~w9$rKxXi#BqM#3iUr&6zQuo*g4_& z=ze!+m$kx0cGHj&u^8rKl_Y5BkKH#;{rk}F9eDAowK%s@6LzgmOVsiz*T{UnfIolu zVSM)J5snWyi37m3(@1)_YRhK4WNJ+G?WL$Ft)5(G+JY%jjtVkZxiPSC#cwgiS;)V|Tr(_k%!;zbt&&wn1$<^+-t-|^*? z@~X_%H6zap0>JB{L#A_O%|niqGP`E$h4$Ce0SiN)Wr5`t>Hk;tJ+^3e1tB1TP%{TE ze)j$MOzDqKpY4Eb^;4aH*815wz|SvN%YryiEQrKFY7UV7Ph-W-gBTL$-@KJVe(w)O zNst;nMH#y87u`)B2)L|80OO5y*`Hl!Tc_Vi>hFUqkBZ1%FVA2*dzELi!7cb^%AEe- zM(xWJe8akcP~SEka7a<`e-c6v0SbD4ZSitfzPU&-^!=H1T(17vySLSNg!sr z{Cuz630n_D1AHgS-@NV9NJ=_@h0as!

vZh4I0q6Zqk=gV?{kD4GO)5qn>j)c3^x zmmf03=zjrYPr?~^mSXIfm4@;|oY?67B@Df}RKjJ$QSYYd001BWNklunZsB0)^lg5!t&&8-^n{e z=Y^LD`+bHiLo3Bei)*)w$I>$1v}YT>dGKglpUDCVtNBV9VPXvFt1d%v_3237_wCQ7 zRl;gEDT$dsVSLfB7Djyw!_5&aE>fM3Dt>t^E-fj9&-cg_K85q?+yx;R8A5vlx@anAbK3{=HGz3NbPct218Hm{=xZX=WsrCv=|u=ozva&yy4L#j zXFF_I+5HKO2aVVl5PUQP~+b+Lr`)M0^nOmER2WH^6Pd+6Hog;nvd_x|h`bgV-&`pt*tsd8%}8&yk#yjWMao9DnNQldZ3fg;*i@;})X+y>maVdjBVJ<9+ww2uG0rj4yuf(`&hB}z&<>G-D->{#8G@u0)tNtMj0C2yU@mEyY9oo^Ni?D^_5^_xrmQlcpe_l z{J+E507QkX3~6goY(WgD4jf8u&!9svFDz3{d#XnI&0}-;{NW?`lXcVBLfsqTc9^=X zmwfvp+lV}&wnn=|;+Y(w027gpLs(f_mQ0(@`N-j;G7N|}gie>= z(-|);E@HSjB#xVlg&9190Zt2K@Ibcz+{`>i*w&K*^YaU+QUkzr}PqGzLLx%+*QS$(wS@#P8e4;B zmKqqPbLzDP?OQ!Df_kNd9>*@5f~5f2l9gq3W**n+abm=@NkDTY%hH5XAy&@TmrcGC zDrYW_xk3p)SXjcpJ+d9Q96cuRuKOT8I*;2!sRvwGt>6!~oQY>`T8%Mgb}S(k66DAY zND4v_O@_)y&RUP+nQPH5EFmp+WXf5-SOZE8kPX4I06In7w|zg}^^uR`3wQoE7Eh8@ z(g#Otvbm7MZ`3C7lE(E-BY8hiru}WNc*m1+VZpt`}*WklvBKRWZ9`e>kdw-Co zb?6}rR(-E)@N3&%YCf4-od+8ZU;94f&?{L|X^LG7OZfi2L->sit1u=ifU3|avmfc4 zc#p^#>lN{aM|R-ThY$UXU6liig>N}yD_*^NHHwS#(rPIGDS5+)fu2$K`+3c}!%}rF zW9EqExhY~zA+66HIEpVHK7u!IUWYBEigp1Ea>ODscNDwH2W$CN&nqCUAqcbI=q9r- z4bWsaMNypIdqtA?oyv`qh@aqxX%DZx|8DGER)xe2=gtnvdn-cP7oUyt)1E~gfG?z- z71!a{Av|=Ly^hwy>J;?hsNZ4qS$TPvv7*?*~Ic9|`n&r2O&8UEVaZ#F;|H9G}* zDVoippe{&<-RiWpm*47OQc74_5>gBqkszjgYcVf@i%Sx_ zSde!h=;v!ME>GB{Q8Vb&vOaxEArsb`c@!Lk03s?I_=}=aLTKah9T;kB5@LU0h4$R;Um%knl-TS~m0Fc9mN)_L}{3875C)@Bh-#d(7TYowx7by&0lKVo_ zU?H1s43D(!bNv~HEn|c~H$0k$>H9+aljgg@Ky<8MTOsc*>rgn5uHe=~k6_2bY|xO; z(uk9`)z5VMx<%uFM0!=_^(4ej43A;`)M~6>y$%!2F>GA32E(-m%2m$X;M7dj^k?g! zI1PT8!&&aMaD3(%zJB|y_|{$jEdhLFI>L_EcQI(~VJw%&bH`WVS4%@!O9TWxl)62v zn;5~6X>ZtumCTp&xk|mTTCS-Y>h06@>zn{tw>G#*o{KT&Sqh{xWbiEzY_WvHl`5{= zvj?BtwF{4OkH=1hpevncnqoKwp1EcOuibJEE*_mg!mb~s4Ip2sAXm<#o9m(5C?a>x z6bh$rLeiK*PILoU!|9>7vZQKpV;g|t5N^5UJ9yW}ug5L-K6q+=0Bv3hoL?y8*N3KX zacKzkGy!?Z#2;DLCXy-FZe1xl1(clxv{|%$w1YsiQ5G(Sj5?O7rMe(EVO8Pf46Tj# zmDnuvw2H>~B06QNnpt;!c>Syh9%m7f7wCIJ0;xVGE#>fmM;^m1$7k@C zjcagjW7rT+4`6+mkZ<^qwq!oV?U&rIff7JaCyv85<3{+Vwi_fPex>ZFQJ8#^m0b>& zi+JPqhw-(;Cv22=#v*V-6BdFw*aIR>1Sb>41AGnlT5BnXxBdBBaO>A^ z>2s6>u9~?VzVedCq0*kmYi`(y^QX4rjD$7QXiA-bUUU+|jt}jZQIr-1BW^QQ(N2RC z--FH(pam8PO$PQT>T{^q#!W|eV)w$Fi+*M>>PeY@RwBs|m*_I(0~vU#)321u*t&5O z9)H?-IA_Zln4X+KwLlflygD}4npxL3(wUIYe3+2b^b`6PxejTMBov=7;QM#~2seEH zCLBJZ*^(y`!KhlNHc5B2d=4)fUWMlst60TOnIuK0-NSgJjCB*EVDFxk&`9jFb)r## z*0r;40n=3%u@r@ICA^mNHOE40W2$(5D9?IZl{$Vpzkm-vv>o4^If9u1G(aD>9%pMI z!5^Kz70+9>0n^B#z;Ve1Fe98;~&k8c|R@YHGz&#$k>W@dfdn?2U^bLg?v)Ph`|hxWDT2H^T^ z_u3*J-HQ@HUtoFFc`?3M|f-=V3*TnWJAeQFDy{i!n2HP>V5g9KRvTE!o?;+lEbdWMI39d;NsD7q#gXu-T#BVi&~BuFsE`(Q^gejdhUgIdc6X4R)h!Ob6&hNOUQth zNChHz6jSvf0ql@56pI~kbhh&e@aBhh;GX#_v4BRsE=F#Ceh$qhTLe4$dBuXMX65Ab7)a2_5ls*UCbsdBgwrBX zkfaM{`Z^y#B~%sOmo`L$`PCWf9qLyA^u?XwY*8{Szf=bqZ7jRN0qSwvF28s3iyhaO zXS@Pp1i_U0?qnV~1_aHx_!h`EgGY98(jTKQSqq|}O9SWU5QOp*`XR`^ zz{fXW^g(79!q)WrB$CT+btqlI z&4(Vr?&Udwd(GtjuXX!S((!v3B>+5|=I{ufbpFrbDHlBv8>UvH$c@?Vio`JoRf&d$ zWz=G%S(=9G z%~fa!6T2sVJ@l(Hj^2=?6-=@c($1t|13GkP{{CC z+aH!yDDc1&2&$10Vi0@eLDy`-if|?*urtZvVgdIYK8nS54;N34qDEpT9kv;)K)mnt zWSt+cl<>NT?!#9P9T)VaAH>&dWn1TEtJmXAr)@xGX-W1Dkt;DjsvjO^V0h`u%eYxk zYPZk$2huu7J3(h2yi^C&EjN%{OmA@-Mf1 zHf8UnY5?RhKQ}L}eH_EP(rROLbVS-*|$U@*bc}C>H~lnERgw6!^vI5 z-zx_|I||e}X!hjNu?|}GkpaEt-`72pkp;7YTxOK~+4`n&Cn*2M!CK@{Jwrd$rq$;g zL28-RjxFIpRnMJy{Jtz=bmthCFa{%2hrb5*vD*M&j{d&b;l{^`f>^unj!Wq|R?9>9 z{B>WzRd1#iKz~;+{m1Wboy0#r?_B)pNBuO5;f;qg3c2XThK~H^m091`}Hs4TX)`KwZtNML1Jl*ID4_XNJ(dJA^vVdaPK>wSX|?0FB5XrwY0e0QVllNlLCRZn z#7Q12xjgpd6a4#*o%q6m$8eaf0v<5iJ?G)L-6wMiUNyNMFWayI8;m3<;$;lvL`0~J z9?Kb1<0zc97P-j=P%nwBu=eorq?tJs$tDJBlep)ed-0ZkxgKA+`(CuzF%bDnf8-26 z&aGDP;^rzmAsv#L4k)QIT#q(~pDzQRsNSfao9j+^8%QzRT12kU!rfNPNACDi%4myDo-Z3})N%CaQJMZnsQt*1BN!PT5uqTF2(Hy| z;`j*}IZBemv17-CG)P0mRzMm)b_j?JZ$c7ZNrDQ#mu-Di3FPkxue95A2+oi+jdvT^ zBXiP)cd(3FttJ<_z)pl>QEO-vd1L1+GhiQ&#X%3G%{>RH&hlx7_sIgG$1pJS;pRP4SYdJv>Q1s_no>Z z;vmOi?2|(JzAW3rl)h66nRo^dN-*QCiAjn=p@ciX^CP_Rb$^A0nYoi(=o2SL@%0y8 zh)>;o4?go)55F)vjasXVu0Y4xa2naXVwg3H>vPDFv)8K#G%dNsI(JW7<3#ku7~C|f zM6QmYgYV2dhKJ@(MB{g}mZwv>{_xz5Wh|c>1gVO6-05fIH=g!voV9r?($2DG{9@R; zC~yZ0L`O0!oxT}+mcKYN-DI_UqFtdzOXs=Ub=WjNB?7DXOt;%YqgX(tR=|IJ?JKzP zwy#Ocq8bbDT(Iw%btTNI6Fj*-gy#%RV_m0*N}-FP;VR~j&tbY!#k$cEWFQl%&1f%9x(nhTtLr+e`20bnxo??#BK_vhg!3s^6(qtDuL!+Il7~Ups;7%A%AN zgq+DNJtzd)J8U9jxJ(Cgz01n#^9FuN)OqLJ9A0zZeJJFLc;~8VJb7Z$boexLlqNDEM!BK8$=&=9WAN)`c)&Ky+8fkH|z!L@ZyOJs?X&|!p_v?DjItH?5Ymcx$V$h`;9V|~Ps<}+>_Z|oqb?rIQ;Eq14 zeMjrsG7$^BWN%U(E{8*VkKxM8uEefwI|mGFf>pHy-~81JaO~&|{`#)NxL|ZWR=0hU zm1c}8!J1{^{gJlcSVsY()Y0pHf&l2S4Z2|_p7Qv536|3o-=5oxpUfRK6|JmYO55DD z-Afn7Ktvzzq1M3(%}R{AYZLvcaq@RT|02Yp@Z1pZt1)iKZdb6 zIA(b=2RwiMDqKFj1!tFPlC??cg6L71^CWsm>J=nYb>!BK0;@)mG|PaEFPDp_h6qss zq5+Lj9Ghw3->`W7wUk^<+HOL$>pH7=67m~5Q%JDKYqy*F<)Zm&3(ijyxi+yrq@BeTo$6GAZrrE zW&ye;FkTGl;ZU)HD;~TL-#Bu#F967_pni8ry@HQkcp)}{#BmZ%wH;8Tuf?qDjpE*N z)Xi(fj)qFwAZV7}2%cY3F5{~wj^WDtA4EN0!c}Y6;wj@}g5D@WTSFKLkHk)7Bx&8c zUXs@H)W?(sSfO*mY>VF3xOT5AUr{vM1%0#t3-0;Z-JYNTcjbZC-FH78qbSfxIvU7`(_=VwSGG1g~fSM_+yDc+5{_AwAvg8tW)|()Fa!U zus+KGyt$&)FO!TYHl#3_&k7(PpjNAjkzb+cPs(~(P2_xed}jQVJERyPj&q4Ozhet$JW=qV2f1uK{P(s$U1JD z0tDvhmfyS>2Il5&JZ1%9zPg5Mfhi>+NnRajm~ zrHwVQ063aXu_l_uJdz$eulRmdz)(Gb$bQ&hp%5Dk zfV=J>vVSw<;=U=AszOkfA&)`if50ZWOOp4BJ#f~GB?6gWGXLkHohA>ml#>*T?KWmt zR!}LH1g&EGxxsp&f8X0yMRS6mRjuNK2lnG_JGP-i@~0C_1G=gAXE;gmwoRMyyPO|E z4jG9SB5$qaSveLhYIvmwF0Yh2qP4gf6T6EY=_ z)$L$^v4FqWu>-dpJ}!GH4*latD`l&H{q7BErBG1#K1on1>5v^}*37U^oS4Dr$S_Wv zn3MKDsvohRzh0}!O{LSPGiF~t|D|f5_UtD@8YHWKc4h{3BLWglVEKR#l68KvJsB9N zXpd#<79i;eUzPrqse$bl%B7-^3_XA#nxH~N7GFh5fEqd3LywRE9}VmqMYCLawQcSw z+7cqM#pA9k9m8>!{b&>!kM|62IRoPRo^KRF$4|vxm*Z>zs(S~e&V?rB83Y9_)ANjzw`0zix zKQQ9;_%sfwXtIC#H_yX6zjhyfGGD^aH^-2)=wwtOOjm(zCxKETf}UsjayWxh!;{p< zDp{FR`DjF~{pg;hqqyb7KFmqiN%lOrv7J~r7;p{RM!t({{K*&o5?=bE-^5U2iuDA$SJ0y&RTT+9(Z|1g>wdEF(`LnWO`b&}dYOs1`XSs|ONw ztEIfC&9N>i?XZ;HZj-3iLu)0)iKLFJ|KoGGblqC~!bPWJrBuLuM^4~9*L)1y4(-?b zChetqgEUuDfIXK_F`O^s*CwX%OKAl|-9?Nx%INkwSXypjqFBc16O%HUxy9+B-Vbjh znw1W%C5O;uA&c{qC+u~xT zKxpen@TR1PHMs;YU$YUHZQ6iM`6|#}(W#*}zddEyuv`GB9WXtNWc4sGF^Z(3Lyjmk z1ipqMRHZynYogOF;&WgAHs0~E>v8|lqo;6-I>vrWK984;OykM5Nfek2*(?+3-xe(d z20%t0Oo3&Jl@$tD?DR0#<#~3nd8C2$wK7Vf@6+{4wD#{vkj!xin^S+Etz1JTUFv)w zqgMpSJ-;U;25ecZp=-n0d$v#vg$jw3WY*b!jCs>;MftCn-pD24PwljW#a;)cTmdx_ z^+b(Y*Mh{~M!c24$ESVf3;02=gWvt>T{yI2C{*8*@Q|>FCpT*N$VC@m17~|IFUxEJ z7Y5?11R)mQjS|SS_Ik7LQFboDDc5?rjJNFGiH|?FAEPOBpefOlrGJDx0R?C<6Zx0g8Ck-&f!qt0ts5Du%iwpobkK?1qPvAW}SO(zw zarM#|!Bl&heB|~or6gyGe)kGl{T0dX1%fZKw%8RmKcOrRNfD*MrA3qkMpLMkt%Dp5 zOo(0{yumCK<&hdaBW;otS}jrfJ8^7AS_P$ppshgw)dK0r`B_WLJXTS^=W*%aMfMb^ zPz|h27PJxvWlp?k1t=0AB?7DaWF#7!&24IjSvDP9nGv~SN2?Z6kCl}P)X!)c!JTiP z6SinVT7%44MPNg4!uk}W&vT<3IeR^nZ^9Tg$Q+%+SN#Xp_c$M55NrrX^iA)uoAs#s z3UgGzli$IRC6L0h2+BUwNMZ&-R`M0x{=FaL|6K8Bm_I(-w|$v8PnAme`ct=HWoZ%b z_`wX$8e4+~olL?$ZxwY4odA&$)8~@aI^FnqfFt*x40^kdI!r^^*}GbcxcTTF9PWyC zta?NblDyq7jS5%X1%PM)hY(S^m*%RN0`)r3ScOL)K7hA>;6HHv z9Y4hEz$8yic~gvVu+h*cUNF2yq+$!a24nM+W5v77u8o8U2LnRR9!PUoVEMDRg2m+p ztQ)G~w9z395si=#lz}6YBNCha{;6B>J?(*ku6;wve5A9R5BK?o&iC(Qb3Rt%m!BMN zcHe3nD{OP;2t?>a1XlC~(;DQf%H|4NBU%2+6;P4x5nChf@Pnbzc5u~vNO7cCz#ra! zFTQnrCX|Z$19LuuJcg1E-gf5Mc=@VH6qXmXrI1;OOcd*iG=#)9*1DPn`M$TKP)cMZ zQp^;Jc+Ix^ar4m=GB4sSYu4hqtJa`gU{{el>u5oSLwL#(TVEp@M45juhEY&pwKhXj ztSk|1wMFg`*Fysk6aJw_4uIRcDXx6zLF}Dh4yZ|=;q_Dg&~0B#dyHC{T@;E~URV+_ zA7=V<^9wTB@A&cKXx1CTQs!u0P6uQSo&+X3;guB<^(gDx5kz2kWZ239oEoTPF8(VS zf1i4vgne=h?ce8f;silLz#S<;FsLAVy2!S|LJ^B|3xZ7WAxfJDLvUN5vRR~eQ=LX| z&}J?C*|^h)e-1m!oq;do2qxF#sS{8VlVZ*>)RWb(9KOjGl{<7fimU~7IuW3zhRvY& z@Pnmm8o0#KYH(ee#Tn$PV1WJX9~M9TK7wLD%;+x*L5*K*AU{Tsp<7|I4B>t{2~TcM zib9gd;(Urfx%>*;{UbV|@F{Wu%#`1J+7$l$7uVwTH|)f6VGO60D(JN#gD(0{-5tJN zy9WV%FVp8F1A***3M#=MGMC_B+QO|z_hQ$|f-(LrSc)_Mz!%Xq8lCZAJcre**5S(6 zya7`~4dj^76P_kK-(}5_?0S{~$bv4QloZfjTEt>|25To1jFk%*u9Z+o6OZwf)8$fe4C<)nb!`%mCc{_$FzHN6_Id*L%xS+88cp|pgL-t;xxaOaGj8*pi*scomsHffLYiO3ks z9_1_(CP_?+lAHuI3uzCt?IqNU3C58qiorioo2x5zE6T_Y_nw>P2bfQl=Q(+?#wf1Ba6|C zcgEz7{BeMd!udq?a-*eYn>Hpix>t-mYn<)-mXO3Pmj8%Y#C<&mVS!sV;h6zLcuf^i zNUHe9xBnA9{jtyVIdm74J%6}~o1eZJx9&QOYad?0xkJ+^t#rhxFcl10YP@lyjg@6Y z*?p$UA2)oF>xctMXk^jD_h$Cvo`vJkxmkVTh^#r8BAMX=ST# zi%3a-BBYabxCklG0%0-7KF?kDy-s$Mk>lLU92!QnuiC&57nktv2k*n}bMs)+ojAW_?tR6zrYzXQ=CE=u4 zM4B%lY1DzrC?4B$1n+p?r*Ylw|Bd;+>Z)1wT#9id_^pX)Jg>11Q!OeT_M`<)$fQY5 zkkl^QK}iLXao5_$xZLgGcxM@{r6p__uVd@PFq(=T$k<#_uryRrdh3ZML{Bedw3#Mv z<}F<~%gvtL?<}@eGh-!K_ZXQM8X&-$t;+JDuwOD~KJdpJyCGCuT87D4d#|r$nQ`Kb zH4AQJJ=p&L-a-q%|L}b{xZKW07(_{$F5?8vLW;L-I31TwjcJP}_mb=Ob8kstO++H# zC3X}`=EPpMSm z>sm_KBk7JKdpRYH3VkF14h;_F zT!>weqFQR;`?ubSKmY&!42$zieJ+Q0d8ho7>hS!AVg0i zkTqmujgO4tk6v{J&RD$>T^cDFW@LKd`3|~RK{1!2o^(MzT62gQMq60?^q(|{Qfl>} zFKXX%4vLmyY4}aw!X&c2OvS9bQK{m_+waHUf9i8Mf6Xeq`DHIhl{Sr9_Cz?UO?>r# z?!rG`{}~*en+rCD+~WK7fydn7&5 z$eThX+wmyro5f5@YV)#GpLkP1X)Kg=TV>8p&>Ry?BCN|&%E4+;bOWy6yAMaKe?HqW z((pANrbm&i8bWTQjFe(z zxdM`M9pDg^N*yb`5U8kSX%F+g4i0qYF;vUplCd$Yk`ihgg*@8{^JueVBa{oXYp#jm zLB4fqs*7`j6=eXfM{mD!a-SNSEGU$a!yU6*bN9Z8(M6bCv*T(RQ;+JkA>R=+{ zQkqerA`7LpRK=gAT#&=fvopB-!TWKdlY%<9f3|83es}FUG`NI<2$1d?MYWtXZ5V@` zU>_usC2n#u*=!lHV*#D@Cl`gc5!oV;4*3{NXi22F832&Hn-=i>UI(we|6Uwern;xv zxoka{uln$9H>U076_iFLec8Nh6XPMw4# zLY8#g#bOD^jvT?*_?TQX-%oWvI&8j&Xaei}s_n0}Egplx0FOs40x2c5I|}=kODGqq zKDa2o|K(CyhM&}Hb-56}o$8ufj5?Q?30ts11%IxTEYrdFGxuyZ%HeWc(4W z))!-zn7VVZS34>Y?VCXO@N4jmW-M!u44u!QL>z~g{N1`4J^#*v20J4|IrLOB-oZ)b z1mpgkENBsJv#2B@pJV6K7hRh3lI}CRX7WH*7TUPt6|cj+ciofuYCR+AHGj$I7(V>8 z)9|q$?!}K!B-mIT5hgB~+RAm%=Nw3Sp4Ef^&$~aeHX32|vgVr4VQ*&z-#xw;kG440 z*Pg6wxM=KY?0C732yGLMIw=T<467})r~Ldc;T11?vAa?_prHgRxfCa6+j!mkKY_jb z4&rSueK{^Xdp$Z`cIqUea9F6;@x2Ehz`H;BX*_)BNN`~8Kt1f0=aTQ?v{D1lYmVd0 zG(jcbMOrTDtb$G(jc$T1qazsO9qO=_Z>1NW5BIqlK>i~+y|vH^4C@ZZaudN2EbHT+ z#6B7lpnp@jWEmSGUo>Ce{xz&>4Lajnx2Me*ilh?!xnRd}(pvBn8F1inc4&XHq`F@mRX84A zaLEX{zM)-e*Vnej(pr9#jux5AlGoWR2GpwU&M;Kjy%yZCn}qA2Td3h*4(-Q#c5g>Z zhDyYR*LW``sOEZj>&DY?*_vt8J4-Txnp`45lr4Z3#YUon(k`e;q9Py+614lMTPWa@ z2M*#dwr>Yh&N_fMOs>W&*R4a9cE}SES<99*h`5vCqgex)*L*{e$on9j2|u$<(2TL? z?cTX#sF?>WzTPDOF?&dYA`#C$r2_tR`-8as_&nO|stOoDwic2Pe)ooyusUz2XzJ4` zw^{;AvKFrnNU>^W1mpmalt+>dWrF!vw&T$`vjwnTt72|;P9S>r{IlPm5Is8sczhu; z8B29>VM!dm!_OQ7#OZ;2Jv9KxCvaFwC;6GSfDLTmYltAIhJh<0>GqE~0G3w-gVR^j zjUAeF5LgeZj}Nxvh5$Dht}Gz%r5l%e;w9-0IRps4TRfT$Ml0m?de7ZJ249tB;rcK& zqw;D;T(^(6M~JTbYX5#$bn*K21>Z98T0NEQ^ZU=l?|_}ZM5?y2%#stzt(Avx^}l@r zAH3@QO3Vo`yDs+0Vg;Z3xeeIVY~Y=@?LlL70*#K1&vbcCpUf9dAj@3?VbUgo`F-Ku zS;_%Fm_Ljk%^kr?V8!YQcmSX%ku#iifYh1|Td?oZ-4XZImYrS#)kNcl`#B8C!Z-aDNf(e4XxSIf$9^>_DY0vTfpfh7&Wd+;1d;WX}N}bAA1aM{pcs~z+;&n{y@m8 zm82Z7k!^zYaa>fcpiW2BZKFbky+RJ{l@)9*)UdTNjDiRp@k&^|)r>?9ti>4~ftwA` zc^am2=P{nGGZ7Kes)h!)vP`JbmP3cV^@NnGHQe9r;_B_Y@SjHyN9~-J;RtNr!)OY; zXnYKRxM?%aE;l83p<xl$9^q+FIEx6ttJbbTQ*;3OD|xE4gn) zz8@JK#mtEr$<+CK!u{0uCqthtFEwfbP`bCxjGRG%_fn@~IAyG&O$y`#=CC1l12}ph z(*uPOtVWObM^J)pYC%O^qPKRZg>tbhpT*CRIRMf^$cR#*TV)(azREnYLh8n(k16Hy zTFt8U#_GSB1C7$HxFT-EiG+&>#F{Fk|34$R<%0ymGmM9S`rpm9Km~4(!=C zU@BR{N%6ArF}(NEvv9+`dvNC*FjZ=z#J+R8DhVwDIug8s+&m3dl_@7vv&;asatR(@ zp2ck^_Tvbdh4vLcS+1LMy^fXTMU~gJK;xoIo`_DXgP;E7N70DoQ&@SkB9A})ak%pL zUW=(x2g8L9hJ@G0UOmm+1+6mfjIL7-0?vYB$8pRW8adWrLJNX?g$jZtV`%d_7S!sN zMR4xMGA?`HM{wd;8*h2Zb8-IZn^7z(%3usuSE?3^7It#A@MMeUM+`YgpphoST%(F^psAp6De{|Hzb*G3_rN* zA-wUUU&bxlcMVipwH#5&0Z$wr!z(FTw3HxSngd4+wmYdv2}?pB?2b-POepe{FJNE3 zi*3uZSe~E5&rglv((!Q&3xA^}DoY)ia>_K>iNC6VCob0_r=SfG43F-WKD5tZdoR;? zK&s9?vu?pQN;1blKcuN5zi;ZhVc3v&6^LI5$BB{#M{fKh+*b;LlY&Yr+I6r{!W$mG z7dOqE5Cp|T6t{7{W*JfxlOCQoJc4(fc^0Ncf1-m<&xopWTn{?gnW5)Tv`VxBWQ2SU z^TjgW@ZffQ{@{M$noth<^;2u`sIWSvPu#PkGBl&$T4#O=ugE5dK5-bcH6*-g~v zV($ru4+wz0-ncUX`sFHwh=M8Xxk#YsRJ}R)1L{_ng zf|2j6E$Jjaz`#b}E%x$2fSx@oiJ(i{U6w^=k#iKDWo5F+yoOB>oVoq64>pMY7#V*5 zu&{k3Vpd&M-#-pl$F90BIEk`*zfak)5qq~d3ilV@KVzgs532M=Wq^tWQ=TO2|F^vKauO=&e?;we_AQJ=9_ zuY-2}|Gt==zT`R2$L~M)nJCU2!AP|rd!*H);+=~2P!?7v!m(CIDACqu$P83X(QT!7 zn`i{9x)d6q=zSoJNmV{Net7}sX%wpXr?1|OPk!q=cqkX}2da_`0 zIUH}b@z?);Ex!3bcb{}=ePJ;<3_0NGL*sbH$S6j8DY{7)ZEIOfS6Y}r5vPxgprM_Q zqymJVe_5$9glTC^$b>v24e)L@=GpZsuADp_=NHT9aCl9-rvf*U`74Kl z0gq>Kym{>eSS%#iv9f?|3o{t47xBw$R^!}q2~{E?VC$(u+A^t-St!-;j4LAhBD&Y| zy{$D>y)!CqaKKq8764KwZP;t1)@@|DkU05gM9xeCb~}hVzBn}rzBAk>3rpNB>OSDQ zp^#RoimUhS!#f_`iA8cbLg~nDuLlJ>0(eLyf;Zu{>o=k}H;XRINhE+uX;O)x+$%-n zs}Vw%+)(N=Jshc2aM@4p#T_R&j7UoWubWtn%hs;N7~25d^x5qa+{;@i-K58cx6cUc zo3cBdt(AZp^qDBN%kJy~c3|S0@)v_7_%_iHE#&c|P7i;wZ5tk#nLi1^6m#;S+ipxL zN<_?w=g*#H0_07*naR6G_Jg^Wa%VMKGH4-8DC=_-e6uz83=o%Ci4F@{$?s*5ADh$E*Ny?dOnpZd@b4>ZDa z4-P(Z&E8&T&P;pPGk8C&^62r|KFjLj@uuo~zXv{1peaw?xC2qWJM1`Hhk}@dfjbW2 zW-C~sx+wbfSirB*Aw^*;d81hm~n`4;TkxA%XG0Wdy1jJN;s>u|xk2^1C?JXer%%&}NE zZRw)NlDYLFg=>&h)aI$EltQBF_cLqY%4XU|N{B&&;9OUMcYYBm+j(RLZ@%_ZIPlm( zyz5n$;i4@Y&`sI zis!Ui^4xeG`Cf|hN*5ceRjh9gqr&GUrN0y%i_jS8E8MCE&96*8$M%(GpK2mKPtsC? z&I2H$UxI!baM3@Z02u{+DtI(s#6N7`hW|Qp1T1>QGEd5+?_qrcyliqcUcF`$)|aX2 zPw1buemfiKPjn;D69=7Rw-GxB8hL>7!jnz(Mu$Ym=l?SI9^jUqWx4RZdfmNGZ<(1S zBtQZpAVm=n1@S+M=%I+9fPfSsG({AoA0QCAV4+JBY#=I02!xPcCz(vm%$`2G@4b3C z*Zq|D`_`HvqW^XN^KT_Hv-jF-ec$`N?^EvQe(s0b1<9cyAU}z3Uwbe9_zQoJ+m4^? z3juiDU2IKd@WL%y@x<&9GAmWoTW!@)NMx*Dt$Ul41eqm3 zxZE|0yll@U@ve(5#Zw1M=r&b3mM8_gix_axQL?3vS5Mob@V{!f989P2>$lv7!

zsv>|_PR!s18#Z8qVpY;HXZPZERk{ImBga=c`n$c}N{{{+!s@wk_a_vNz0UCSf6AZU zfoA(e;yU|WnSIr2;aD<_KY8FF{`urw2p{4HJn^ybf4xg-QIhaPT*nA{bojyr$fU#J z%I348{lB=dB)#~aZD%>Or_q$xGa#U&mI;A^5STu{i1=`BKvPN!BpTA7S5_+mxrs`X zM65~a`DguKN(a`SNC(bADa`owAu2gaS^p{+;;^mi0GM$J3u;b2{_ha+>cNbRta3Vg z!EUQT#@gcat*l1f2XFO8#c{V2t93aX8^UZyFJ$Ly25v$`@%QtGtR)kDZ@pdPFgSP+ z0)xI{l^+=Y{YYCPpegZ9;_DifN_vAIf306joLs3vg2Qb(`ZU!k;E!MPXZY?v{!8zM z76;SCP&$K8Zr_B*jg|1e+fJZb7{Ev(gRH2j+0lxL^@O-sShQBTG~y8l%-&ErgF7nA znqBpM`GLw73K*T7#Oc}D5CHI{7hH5PF1!3P2`H*7Yq<7%-^1b}rF9?t>xqwe1pe&z zUxv$%CUmBmnBMYO*G^rlimVRsAORn9;XflmW#Q}^L^60ScEy6g|VxVx4oiU<24o*tc zWqVnznVK{)s(x;)ENw}ElvIp>#cNVQ!35SyMSSka5q#?CERI{dTr7kV^}uo$_?6K? zylL~r*v)I?_-T=ijR=Cm!w67D0tu1?vq^NQ-(O6jJ5fM}%7OzWw9{RrOPoHq4u{Sz z;N5@qb$t2yyRaf9uwIWlm;iordK%ANzX|z;2I`f%jKHRTRJ{Y4m`A*bP|jV? zDV@L#YfHFqZ55-%6rMCafyWIFVpIg!>avx`(a3w<20u}4Ks?0*h-C5JOs__^R5FPh z8V&sBEq7o^5`u)30A4jdgWp*AP=zL0wz9k?bo~Id|sR~p9@7A zn6;Axn8{@%%VyhFI!(u%!Scc~#wJFk1yCV)u7r~F=)74PD3*$7HB`%=?0?}GNZnIh zl>`VR1d{E~)<6O;LN3f_kxgf$d!Q(~0Gu#Lg}}T_4x`YJkWAQ*%yz|QQ+WU~T2tTO z$%1ib6r5++(EaUw4t2*PUSDC)mU|ve540Zwhu~0S2Ot(B>UX2eI(S3KHkOSiUauku64Nm^fLZ_1sKQeHBzTa8j;;X3HfjO|^^oT#}zcYGDEzv?SETR(z#zVyY|y?H%a?J7EHisR7XrA?9* z=rC|El<=2d{}+7XYu`Yv<(hlyi=$lKExBf?KQ}ajpP!h*Xsd}lo75p9%v<89a~)QTR5FcLHi^62b=+88!dk5=C9|haP2=I^ z0*Vc`ao6SZgo?=y1_m|47B!T}Hj&S32&kh+Nr8}O`%APq>{QtK_A{cXrSVJLxtpzb zkiTb-|AccBf)5LNdFH5|sGS2D<2WR6W21qW-*+bt@;-Z13FF)+FK#B0MjO96K7c>l zeG#?^QIE>$XS`TmaD3S9=mj)=agA+hQWEIP2W!yt|Pl5f|Rqztem zl03a3K;q-qeWTl`kf~30Yf5YU9Olz$OWPlRVPs?&Yb%uhP0NrTq5`5xok&Q=zqGh) z?R{FeXAr=EfcydeT_8~Q`ct@^&{meiGAQEH7REoFHX&G+9Ebu?CYXo~4sZYCcjD_`_(o5Qon#t`u8^kQIWmFY+B}XA z?OVdhbO!5EIdn;yGUl$3=Vb96!qdSQ0)2M6s3tnNY2`F-Yc8YKo6rW1ho6;!!4V9N zjpF!`Bk07O`?d>r<6#edn3f$3V%xiS5AMG0+~wU;g`Vg*Lwa)S)*Z0Cwinc*fLv zY)fa6Wnj*1gcCZm33O{sY|Ip}V{jNHNldhmBe1b3^Qn}U+H%ceqV_1Q{i|uGkc%U3 zUM91ta-8%ggTV}r?sSpPrO`>|FpDlevVR}Gdgc_CV=dXbI}_3>cu_iuH*VXFr%g{H zM`tEBMVaHWyWHuKXh%CEqX$`z(3ZqD5rJ|RoxuV!6VpfxQ%9hL#PBd~yY()-5|2?yG%KrV$xkB#BihQ_dguW#1?k}+A@V26m&_C+k#Cxc44+|U(s+V0DBeGE}^ z31oLp9<=pgcnbT+3Rb<%1$Nv56cDXNtn3hy3A;8)?YIs~1;c7KkM|xqj8C6DhI+jE z8w)-{2aws5T(^toPEX*^c5TB5333bqk_-)$NGoT6K@Q7E95Y|e<2@%&;eXwCz;rW# zJOhB?Dg4gN3`Ru)JIXeM+Y&=$?UEy6atMgxk6$7uUFBFQIPxQ*Ptt(Fm`}6vik^Ju zKPjTQu z)e?XX3R1>D9~7^b9R?kFcnn@>Bse69r34RE^{n+V;(hN07{(u!$FoL1b_6nPaDOb3 zk=EU82th1}lUKxuc5%nh|1(CpYFo|?6ZExaF#=t|K3|@)5%ofSEgFH`8!%X6W%m?` zP1rq$&xpn8=8yzYiPfXed0%_a;Jn;WhJ6XWS|Zo2*koIZ8xydkCUq+BYESN!&~ z@Z4Yec~s|4Bb!Z#EHZij;y-j&U@#of9`Ns};TKY?wMQu=9!t}BE?7gB9&!#))Xo?p z3~8j0NwG_(h-#vax4ieGc>Ztv7ItnLlfgs$IS~MzVwXfJU&7I~Cf@&*uj1R+d|&2t zsIaf~=llLSClpraJPhE;)024Az=+OrVAjfWolZ$sKGIELN3n?ER0e4h_heuj``%?z zixvXHefkg+J)e#~2;^(NSt=f;3n9Ro*$EGh*zVZm41;tAOQ|eAd+a#=>cn9@AfwEc z_~vmToy;_Vmv7mO7i`*w5m%9ufI+qV`TqD;fduj)X!G1TCJnS&IF0@h;ouB zsKFjl3l0q;{jix%V=e*Q*jU8f)VFANu(1RD^2{_IH9UY4C#X5+Ia23dvI4(TU&drxW#LxEUCN?pxABWC7Y-5s_i>4PE7_5u9F0XlN?n}>!1nw%>-5rlEGMZeQo%Cvm(e$;d9~Y8QgDX9z?zKk zIRQ}A`*eU0g92vp%I;Sbf-Q;u8I%d;p=(k5U!rdXe*>H$i)n_@33O&8ofy(@sofDr z$Ix>cGp^@x%Apox+s!iw_4l!V$(!3s_q1=sq6E93epakQaq1UhL>v3J{#xHt^v@0~ z{Mg;b_J?uHd&nvBZg#Kdh9GuN4hO~w2XP{y9ng4i5Z=pcy`r$CgI*+a>U5CLmGQ}| zK8ugN`@=nXL;$oY4m4cE`!3#vzq@r7`!hL=Wy(mKdL*xo5xkciWC|SvK~Bd@0DI~S z_+fPsRnw60Rk8A1&z2~b2Qe}}j#I~G(P~D}H&KDdKJE#U&8h;S240JE^SJT)9|}74 zU|$z+Ux#nkSUi_d3b}=--ca49J_g-_+{MLK`7SEx~ z-!P#SI*fcSjeMbqZ~XK3aO!jw&w199(5lx27N%_^qEZ+h!Tl#sq!B=Kiv>#1p181}NNRNc#D(aaWb!}1C~GP@r)tVS}zy1wd; zhZ4YZXV&4BJGW!HlSG2gmqC@BMOQiIpc*5uNxPk8aVj$>Btv0jvksBZBAG3so6BJ} zna4Nx?#IXPy$g4(tq9@Iyo;<|ISKqfTh`-wQ(IA3Yhr$GRn-32B|rz2BKf+-BnKiW zm6PYEs({sQ2e(yMa9^{A1=`S73zwG4c=qHtwo(A9UeRYQYi%4UIy^ytTv1TbmL4Yi zIS)M|PWLzxS%R1YwCCV9!P5Y5DRhJVgECHDE1Kz~|?W;r&Ms zU@ef~^bC2hX#Ufl4n==B=c0?>-Mj&>-@E~1lxe0$0LL^ZSt?*Tpd&+mW(x)U`pviF zzLkpZA)cdd7q1?h!tYMcV5~$kh;3MlhMfH8&s0!>@YTPNE`4n`Zk#o}0e!80Zmd<- z{7O7g(DjXY2#V(D_xOM9P7}3E1|OU~j!(=UL%na9RpO)9exuu_qFzx&f7o(IRX-j~ z8aEy)4g@08pV2(uoPGOj6{O?NWwWTPRm6FVK!>0MK(UmU`2qYQiooAvJ)f^(#!eVt zlmHu4|0_zsK&3#*@NL2$0}{SJ+X89es4+l9fZPEF6Py;vdlyL2f&hJ1Ho-4!hiE!o zZ?4_f1rQ!q?7_uq+45tDYGTjRo!xmed5biH=6a})$Li$%l;vBhvK837{&{gk!dt1_ zS$Mg_v=sd6JyG-x!xu{Xt;u}X z`Sv<(vcBBGOP~KL+pjlKnu>AR9LB~cu)MT@>KbRX=~uRp$Im_Pi5M8tP8LO>xPA#7 z*t-|^?%DI8?_9_v@T%W>Hh%31k4B=lBIT`2ilwZ!mMW~`qWhQMX79G+roDvGIICLw z#FYuJ!LLtO^HP8!0hx3c87dIw^Eh*Q37`A&ck#mKKOJd8+|0gNZW$fJj}IQho3H#R z_C9d5H_t!MJ`ejVX52h?u~Bl^oJr%!Gwbk>Oaa+i9ocjj?Ib&WWZDrnWU|;@8bY~c z#McCs;;3e(Es`1lMCVP+r1V{xSdw>Z@s&jEt;JWwfjl5(z{#wfrN(Ru8RoqyGU*fe z;GzBa%G_C*#uxwMM^Y2@851M8V%x>IkPo}n)D9_$01Vv|X1)?B^~9R0oJ8(U+%oLK zGz`dT>J&>jP;cUWcioAvoH>Q6tgj!suEY#a8XLunF1QE-^DT7ODyTPPsI8eT{+{N3F->N#kGXD@?x zwS!J8fmtN*_9J`oos~sZ2eib<_o5b-BD6t}0M6q;2mf=!BwoGc0&KOp3~XoBpt&pK zp#Qbn#EX7>2P#g6Wx1OO(eNaGcilRS=CrF!%2lCrC34syZZRhw7E&C%-Jdyhb1RBo zimrk?^>`PBkzM`$v?^#QPPBg{d6I})@kP1FObXwcU&Iv$4q}7r zSe##g72Tk?u!?(Ck3-!FMhmLMI{R{`87m*es$Bud2|td>n)Z8%9w885al`7yX7jMLYQx$w;p@EU&APP;E zJX2ERl0ZN!#PWLtJ&!jlxUO;rCwp4|?0#ZS&7TX^-Nq*-v9`8~)n&FPM)!Wn!!E;? zZCiC%PnTJf1|5XaZ@%efk-vVhuZy>D!~0+RG7PjSoCV~vB_YUp0HF1KT@Qt&BQ_{6 z)d`5K`6GT1MVh=W9iV8JQ9_Ph#E*ep0Be}zkIzG z0!&R%e&~if5|;5t3UZygzqN}QaU<`5`4aTS1}@jKmV`$~%$tKaLweawv68^`;Ft!M1 ziUd|poXptV8|^OOdDrrh1vzm~?k3VAg8<`)*P3nBBt6`Z%++u^WnM8(lvDFy5Pv+$ zkp<(Ayr!*ZLR1gxEp%Jd*2v=KdJ}IwdN=OlSnnQ|d(4Kr&9aoblhoF~r;Lx{PcGPj zholNfb}Fb54IsiWSin2(pT#E*J`joL2m%1SY-9qzJF^ZGrL;)~o0$yj6Se=vwpFY! z5SL;PH*7}JPyyA?ww3Pj^ok5PG1ik)%l5sTpv^{#9*bY^bZ~d0ix=*_4@W5H9}fVe z4KS?&KDk03#X=r)bMvC~S8vn>!snNc^-YM1-@ss5x&WA=OGZS^bN=1%NP!+WFeH@} z9)KwKb-F4rMA={t{2}3wM5J6!rU&xxg`!RYq+=&hkQqL^6Zm^tdu>T~f*v;|m&qcB zE`SJ@)76WHS-2aQ`k*s>#nU+9$eokLkG5H3zCKupC?GHh*B(_bx)80hzdl~)kM@g3 zd-@uymGJ>PIV`l9SZybYE*$tbRIaWtSO5RDyLjhaJF*TX|lqD%B* zdB2zk@Un~tP7dR$+-fA`BRbl$k(0=!OL*_QKZ4JF;xkcE)>egvof`Jf9w_2X7mVX` z_s-#Xs)Vs_Ng{H2PX+*HWH{husoTcA%~jl3nG3Vy<0yys2=^R>WD-L|BeD;P0>s&} zBV*%u!LPPvxl0ygh0yK@{SIv@}(MLe^%C3D3L)I^=1d_H%~3%000j!dQ$?qLG8QURYoei9!#au~C8eD?VC>NOO5 zSTT>cUHA|@E?+{jR?)Uxg2mE}Bxt|o|3oje%q8Uk>wHl{qY5waGC7q^+CNiC)Cwhh z>&!{K>%d;zU$5%u&uo*`CSIg5)>w7I9Ow~jdqRV zKt=rB(n);a#33xnd+K?JI#$$MI1m}Enq-MBJfxVx?{C|IU!I)81ZONX>Nt~6;(0gU ziCgBD)xVg>p(kVj@WSaCOqDq*GWhKP*GDA}`@L|aLb`4Vo0ePjRq71_*)(QG;;4z5 z@)&X(98u(9B+`thQG$vykbFH{c7y_<*b0LWcGCMEE_PRVXGcVgwoefo;(cn6a?l&jsRp@iqK8g9UO27+km>fa(BP%Z@ow zX`K<`QlCweb<)EUs#QJNyf2F_kvQU5Bxw<*>X*kHax7~P4mSpCdBhggXL_HoqX_XV zIywb0j5roBN~U8_kxQ)V`^Vnfc?2;Btv}aq^zsjSLi`>>WZOX*I}H+=u5o>G9FX%texJ2y<=vRn>-eYk>TVF2Ta zY!EeJu*poBa?z_<;72Q`aet$#2tn+Ndn3oz)K!ANQ7*!laMbPC*ceWoJQ+$H{C+Z( z#$}gZj*00RYZ277lC6<2%>hh*$s4Gm#a7 zD=&jV3hFx^2uqlv>{B8i>lK};s{ z;5<=Kve}-gknoJwof6!W*sJYIA(2I>gF7?fgP0A_HZSK}PVSmsQf{{TzJhTZV< z)qHeO&zEsqeGMPD|8D%->XMSQVitNj3p{_v1$fHL4C=?1v9eafQj4~*fO0yGVoQb? zisB-LfctQ2m*a5`KGT10KI=60_A+ zJm&{@V1e@*N`aeso|{Cgg;u7751cxV4;?(zv&R!3|H0pP6LhxZtWf{I zR+HAg)#Viw%O!ETZ1W>IkjAr7=Xl;hR4bJb(Xl6=S#@P?6@vou*wz9S+PM+i-W&;cWgbZXGpU47lfCFSCvNho!t~57k+`%&#kk1#gB6ZPw^K4$1 zZ!dLV#t$&37!L;2m>T>QYTr>79dAuiXRQw-?sR>_cwW})WG&aXy&j^Uk5z~iH& zBR}GDW&{x+>sF^?WwJ#axN|@L;6<;-nG>h`suwB{v^AZ^hc{2+$f*Xt*X&@rFpPZ5 zhYm4YVdmJv@nj2Eub#miOICeuIa4TLX4@7V*>j%_t%+VhLhI=G1eO*TWuM2thR4Qm z`6C`ByahdYVYY~nVQ;?i$5>um{0Tuo0=aYouY2C}@%YOw#b5vRm+%MAeI`Z+i^lLb z1VD@~|6eM;DpAXX)j;yR45t!!(&!{EFArg$O;kds+sQzrdK;U{C0sBtgi@o8lyoR|#X*UE zlFzZO0WqG;2dzQSNwQ`DrQD6LZ1$b;`7^pX}SZkruWD=CaP&$V)gW66TS@IpEd5c%y zRzh1cfl4Zk2U=C!vbKst9iZB%VYJ=BuWZ*8jvM88zT!JZ+lHg}_PxX>-|41Mt#`0iRW(bF zc+Mo#cmPSf=foj=Z*|_Z1Jv4SJmtxWu8lX5=)V~BXsw*HBDRkek!>Y#_fl0#Q1P9> zvi~bbCh)v<>oHxV{*9q_#z*Q6iX4g1MRVJ19z*#=@eV61d8m{ z_Ve>YMaR6`K~1vwf@I|)zLS7W2jp^Dy}!{=t$%8gmpNcXYkw3Is#R(z4-_rKhm;XW z@)Nn?EFC$(DS<>3q~@a|C^W)DB}f|2V0sC(+S zTfVrV=4B5(3IKY{oc$x_K*IxzMfP#hh+2HT=8){iBZOECdLHJzqA7jH$a|$@pRHhl zZh5#Vu;{0<3TGgFv2REV*=zUpe6*vVFnW(6WK!s{p^4S8u^AA_! zEw6qv8V$|GqP#$ZfM3X^@U|@*@TGg_aJE#yhU}0SI_rKxv)jb|?JBOV&fC<#==tzd z2PY=6Vfz-`f6E{$5Wo_+X*NAAS?-E)q~USw zOlOhjpdQK+F(F8_QA7e4mqxHLmj{SSl6=fy$kf_IH$c2PgCrZHYr9NieD4r4xKt1T zE~1}2RMEE9MadII zf1oYdKd)9eDKcqLLk=f|^Gvz^zI@L}aRdeCb9sFJ_;Fk{yB}w~>D{hjI-S5D?AnD# zjSK^4*KlUFE(4I#-2}$71q>1SF>xs(4}4oGHJH#16Q_A}amUIE_P1&{-f3c`S;b|; zB|LA#dTe3WRlTO@4Tm|I^A?0vf?XxM$t64c%xr7S#Hv`6kMgxrRB&1%oe;N;u_P+S z>kyM@$(2}@;9ym=VztteAb|W3QI3@kjU4XiHt~)J?!le)n*1!#|I~PeP8)e#F@6*G zM$|c=&Nq@5jtXqtm#-Th$FI+5~vOHa;fjp;D=7J0RcRg%)h7 z_`aD8oqQ9yd|n<9KZE50mLCXVantg>$q`^_fE@wMw)ne3Y?27qAx9Gs38;a&B&)Bw z^K;nTqc_x-)qRHw{6Gtrgb=U_q|v;d*&RMN0v1?C$%bjs(Tr;=}Z0p9SQk7B8Ip3${ESDwP=9lJ0xGcC1k-aDu6k@ukLn@!w$^}nOOvSL#XmB1IA zLFg)pf>U)IzBfBET;-92WjuO#0$Y+9AqjHOK#L`!dIwvx1?(CaM3K;RlcUOYT_|zu z&WLZP$HfC_%lZOZ<^h7mA3h;35W^G+U_CsU&R{uT!WWO9#7B=E!111u)w)k9;rj>c zHsfU*H)4!aDmzVd$V@lMV3rlsSUV+E`x|}awiI!sx})bv+BSLl0+pvw&E)Z&<$1jC z$bGnn^2xEU(G>6-n>OON$7WDjuHlGqtEfMe$4st>Y^^DQl%gH#KsYE#wF~%BDVg4Q zy4k|MZWVW|uHbZa4db0Ap1FBFetu#MB>*7LT;_c+IN=)dFZn!v)UM;bv-jhE z1~XFX>7f;wsrTS9ekZ<9?bBU+%j_^MeF;gtX=nt$Iyr->(K5OuwZ;$zzMB)))uk9d z5)jd7jr@Mp(ud6&p~}|7FF}AO{!tF+n>mI)M@Y5biI=Jwy;b)9(@CQ%NWsA1fVBRp8lMj2VJ5U>^W_WXUB;WLu-EQuut{*Li#Rbfz~KW05mK?HOib(tBBD&yZGE{isaQixM@ z5~BQEm$jz^5YkC=`i2IGBQ)4W{%YFw?!$R#D2Hms{|h!+dFCuwOKj3)rerImmTHQ=f0<3NpxuaHD2*>NKdj)wnAC@ z9*@2ejdAp^E#;X0x?-Ppp36dG#@yx}!(#&!R8P2VLd3P5=BrR&DAqn5#~^J^%M z3?R`-AwlDn>f-wPBJOLfodf@~mcQ+x4@GHs2y18OaNw@HdTRZ$KFOqL=(C-b}>G?Bo1+pV`^NeFfQm#%y4Sx0p3Ns3cB)06>iHWI=SYgLbv4CtYC#48&$IapNvBTJN z!;ks~GC656JmsjQX5Z2g!Dh*f1T~pT;>p8fxV%u7F{{jA6P((HB#PY*c8(5VQ>uVG zRUoO?Z#wRT&(#9=;5eFz`9Am36U!%V+wH6hq9D?+ic0XuBy3i*d3s= z6}tt{KM4i@`pgvmc;j|#;QPw>VU?oRa)SnM8VKl_;yqDIKx)xa641}q*xJ5lQz2{V z6n<2%;hhiMhnv}b5WBsh4Dh^lTkwqO8LTZ-uvBSaB>_y7N|;DykgxMzSUS~rr@n$- zi=quWB0C9&e@AK!++C~S?)oauHmi8VSQ)=PIgLw7oZV2DQjeHaX@TT56AcK9Xxxj4 zs2Vb0B67B#0eN)CA45;T!(u5{kNpbYPhdEa!dr&N@YIP(OpR**p!Q370J7rJ zr4rjOF_Rko#H^k`c2Vbx{M`Zyx6;~TXy?+7O(>4UOu2IaqHDMM7u7Lkk9APZ=5W=a zqxit=QRzexd-w4le4|^)W`!lni|5)iyA~Y)PU90oUK80YIgnj}@Yx1ft7x5{fdGR8 zX6vfkpA{n|5vTNfQ`%Q2{_*>bMja)A^cl^ykfr`O)d$l$Cb&_p3ENsm3s)3fU;sf! zPGvH76^QY(-c{!V_!K?Of}(aaf84T&R;vzKgJJKwz~(4JP@pwsAH^C<06x(wK<`l( zfo38xEI&Mr2)@zFhyoaYiC;tb6_pB;|LGt4mwvcDScj0Y$L=F+9eWRJ5fs>37o>*< z*IW6IxZKgE>SLhVGj_xy99YjrLs6g?L6K#W%rzDvtNgl@GcxG{jvPFN*S_)!?7QQ> z-ZFsTcO8^d3A}809GS%~PNh0%X0j->6C(fnAGJlywtL61@-wrgGIm^knTXPm34Y+t zJFq-A*XOCJPr&_3p{xD?Ku;XY1Q%X>F$M>Rf>@2hp1{)5GVZwb7CAHL`&vIefj@c4 zi!i(QKK#a$9xsG6af(vL5EH7;#cFvhPmTRTpC1i>`-#Y7q zEx3&N?)uU9v3T;N_CZUA+3SQOlNEJ0=rN0R352uK79+Jgm&MPIjbnGJgd!)~aYb@T zG&(J8&J}R!$OH~#M>lbQgyG}MyA_B ziq+#FO>t%@0nN~yg z%ch}03{*NYpCQp{$sTbsq#}6c#Q8>oJfyIcOk!`RiCdb>IJh*2k!};u-nw3x7oSzwylEct$~=pJqp!b%)w{5L+G#M(pf|z{bv#Jhe~`iP#Go^hTIT zFVbq>L@tX&RV6dFa#?(Lei^Ucw-*bh2ax#4)nDyW6I>MhIPgHG{VnQLzidXz0EGMJ z&d!VDrlaQfq_$5@et~~=8L1w~K|V59K&A?IrIzoGoDN=UmHe5>m$F2_0D-N43=o** zc!EGS_z29hST_|kfgA*LLh7h>K_n;$3#5yoKkm)SBWD}2+vWO#V~V0% zB^UaZuuT&y3q-D2H$e1icm5tZTSdG_U?&O#;-6rTFOUPGAjS?@xTjdRLaYYrfrh?& ztSdq-aqMoQK*PTuH|Ozd3Z(`sb3~;Py<<6EvE}TEX=9}nH`MxCtI-LqO5Z1!AHa`) zcr#w}lGkGS>_U92>)+E|Y)B;WoS|`~7Fsx-V(mMNg+vEe*B7yP9tT{Sn84OcFOm0W z?{s}>5%=DFt3dd@UmOmUmjFN{vCs2W8W_Z`UAr+fIxPFv1+2K|XOAAn{=Mff1<-lE z@x{M~QnQYyTz07h*up80y{WBdegK$lNuZk`N@ioPnZ>2pLPwXOaIZhJM52Y#-~itG z<*#COv4+3=i&t?*Fy^aWyyW%o!hhU+`+4uhze$qc&I>QZz~C_Eg#Y&b%;)j$*?sup+(~hK{-r^{Mm%M53azCEPF0#H4;8VkkVkj1 zA+rJ)OlWtA0B1&j(?FY=_v8-zgWsI*r zZv|LrGn`ArJ`4s}olG9LH>-I6iG8@WR@J70cz!Ln;AwE=JK~qy9{_AjXYjVcG5pNP z7{AI&_m)2af*@#P zK1xd6R|8&r-yZBSj#J`O*MGCi`V$>04W2XV>5yfZ52y5H2;D2=m2f{DI-5&(%15b75ZpF~mu2Rd_ppXCBNay~%m{U=edO8HQ#cBwdp zO#Y*hWC=iENSl^xgg-aFVLo$gRpQ8iw759bZ(d5rCtB>_1VVd8L1(}=iY3kJqQYlH zjx>U^&Dex&-?yGUq@LgRP}#}}e$d1i*yvcQxsb|auQvG2lajpx*itbSa$CQ!H+cGgRO<1>Md(dggr&2h4 z?|nFPZ1x=QmC7j6#Mv0+ISQ)*lcT44vltvB6@TD4ziPT%RtkNa6X z{qjrj@aZXxjZdLPQ5`BIO5n)Mjngip=a$JOkj=3?#y;hiK>DH`)9Run=`OSK6w>Sg z%N6mm4}KCm#wPHNH@z73Tm~s|#l%(W3ME{@8AcS<+zJC zDdZ|-;5R|Vyk=Wb7pcCOs;AomZ*S9}%d_oBpC#BasYK}ccU!|}AcNf;A@yzu21ng4 zR#s|)vPfqF%Vb8xrK%;{ViV-6(?>N_E*Y5|?nMh9IlE3p{;eGU z$pzU0-aa&nO9w|VHa3V(hY7HRk}>-i`oHwP{0;g%wI{hDEHQ(vzY`xZulRunu+qce zzw9VN2tHzbw$Kjg$e{*;%iS!lIJgi0aO#YeqV_$(z8zp!mt~a5hj^W zy9UI`XVYN&d?S&I37a;>_K@B ztD8Wl3!XP3^uWV~Z9MY`S+Qr-y-&S&n|b~dhs$(3=uhI!D{&(V&+- zJcuz31Ifm$-6lGPc1<3B#>ygkNp3)GRYF$fs}%33i1-G+{*DVqv8jiwnu+MSry*9g zJ^s6t9mJo%`Q7;P-~OGhO$f$}shjHJk?B01kS$}i+Qj~36W2AWsGlcbn%sT?#y4(| zte$&_0ckdoz->Rc9<5rhLYAIGh1VbKE1k(;dS)FqZ`~TYVt5b7XOCn5-uuq`t86uV z$4h??+p-;O+BA)3oppBV6J$`#0l6GWwN9AST@!*WAw-FfK&#n7tq#;G{5SjE+US%A z@T^z96Hj~eC3xfOUWEVk>CfZcANwkr@jm~)t11i*V8`y=7#W?w*@by5%r9el-8#`l zAa6nx3KL0W@@X8o|9%{}`wqRHVBjBYg;&H4V~QF^0Y6Vn|5vV2((J;h58r>{!pK0s*R|D1WyJ zfNVqLHPXq~$_0FL;ViB^cnF6`{)-<00Gp^4uyYrlJ~)i@8f(LhZz}w!_Tw(;&YO4MoKSTWcZmvTI?W^&f-wAjlVp31m9U+5bi@5K4i8wLW+LEJ~n?r zA**+dj^V;`8KYwZqPw9a;CQg(UcWc{cb^?Rj$MSCP(S0kB9=_JJ?yc=rWN8kGh!R8 z>>a8P+IH%;l1QQOjJUbeMSSe!3B3E@0Y#kH1&~Xpg##d@KKA-I8Y1)~Qi2rSNu@+2 zh<}n8MRK1^4rI#{-!q*?Cd1!lOJ7x{|M4r8NeMY)ZEa100%q@=wa(9x@jaie*V5Oc6SulLryDCBcLXhz*@`@Nel3Kn{y`gjHvwwfZl^wR(U zAOJ~3K~(V;1YXmtRY1T>6uQamXEEp?DpUA9#;(COeW+RWoehZ*;w?SSt8^WP7^yNk z5;3U3VhQ_XTjS`P>R;%nvuLcf@!D6C6nJep-u@x#E{$iSjHS`I9;9_pxtQWbTflr zeZl+jir;@Gwr?K7Z@%KsF<I2GP@q%K zB#>&h@W@S**ibB@wNll!BGJpogUi0x_GKmJJMB(67k(}jZ#7s^GGui!0&g=|q>W2< z&?yvfORa|Y9@vlTD>_U_KEnQFzdk4Wy?AUr3bhKFLiblfqD(53AfqLL0JB%NJ&JUa z&7=3{?k!#d&vX2+W$W!?9^YA7!rNx|;V?;h9s~e*=JX_fZSxi^&DW5rHn6o=M2@P1 z;$S6J4;eqp3_>)O{EV%NZ9yRdHqoKx0?D zz2%Kf97uyyA0;ddeG3G{Nt!KE-xk&X$!qa}R(v-fSSa>NB?d1ZBp;AaD1>VMADu5g;-T0z(4Tiy!#91Pc@hf^p9pM-JkfIyPQ5 z+<#O?h)i0%^k6FyBBkic!?)r=K@VCGtwCs+>_;SG@P(_%`E2VCb;Z-tzk2!+SsSar|)a5i1X!>-@>?-*w5w7#yavPvPk7 zQ6c_q*t7*4T%#pBgL@Oe?Kj?lWc@wY@}PqU#o&qyf+H9 zT$(B1S4$(z3r@N83$+IK%cO7Ny;gp;$`G-!8!vo1AK5*~={&DH7WR&q0V5{D1Hf+WVre;u}X1&1qe7aw}+=H9fq7H#vlI4JDJblw9q>oiltaY%i zTo4&(DKCg!>ETdQHQUcp2ugY~5%CW?7vr7KG# zK}BQ9Ptidv45j6M4Xu?}&ek9hswKeEKuiD+mO^CjE6h(W#ag|ITD>JDFZc4Y9io34 zCeY_K-EVlSb}E6DYzF_nwv5l7cmM~uPkOel+BXXed){4qW4DW+Etc_?kqKM*E-q;NXUfFpKN?)_Je~1(vwd{C*^-npFAV2e|&JvY-Bp% z@`}{(nep>ABs0y=FJNGxEJQu&(U*+9gev>LZ5I1R*Z!BXeIQTJ()yjJQ?0DYxHxqHeT_;uVY|j41fLke>BH(u9Hq>v$){G-58&ml5<+A z)^PaHEXF6MF*Gu4S{5n*!}~dK-@Q0?aBncS>Q;M$(g$Csz72Vj4 zTkfbnO#vneK-yN8B4nS;U@o7=CyzdWzddylZ2I&dgMXvG!3)++M%$XDgklcCp>PSQtPHO2sWp2Ugl2jXi9{OrW?K03Bm41#TC_ejTS}=S#hk|#z&nEo58{9cjj#LNqRrOi{NP4!p|^KS;|S*J8e|!b&L+= zFp$flT*#2Mpm4jA0!(hnCNY}l*3VKTMs2aBR1)LpL+7*SJreR~^^5MadSaj#B1lwwN73O6FYc+MYNWILmTN&GnJ&^lP;e z>86S{UvS{D;4@eRuT{l?11rDiTeypGMyr=P{E#_P6GhQs|4TIM?!~nRlE|lvxb8o$ z$DdyDR-8H+7x9S}Y%G&P1xZx7r~1CeHgCc7mW^gG^?kYjGRc&%+q;bhZock^XgB^d zk(>v2bwABdi~BLb@v%%;;gcH|)Lzw1tE zlk@%Y)8+mBPCt%L#W!~J;=u0nObs5wYrm>JsWMidKJ~+Ib_o%% z$G%W37$nmb(jT*Yu2`{9l(ImfSQJ=(VSW)qLjyV+fP;W?L;@8gtx^>O ztFpcw5m(_C0|Q=^hya;$;!W{&gn$@iup@vDp1(s9AyEL9BZv;LOu^Ta45=DIbYyW|_|M_2FVPUZk#gR-YSB_!CT({s4{$2WzRsh`G~|94))^YONqKO1j&?h}w` zRZR%Up+n2ZY%N1ICFaaEYajxU5ho#Vb|#7WMgqU_`cLEP8}Iswmo+*$i5(YSAWEFs zTtQlVXOEu3%%;s&<|CgcAt>F#=~KA-wwuunZIwTr>;KQ!ByOT(hS$$cc4&$`b<=u0P@YI--n-d^pPR$UttfJ3AGX0o?l@rc;Ht1MqcSL!u610nh)(du-u+GwFtWBV;Hh+HsUb0nqgkG^Mr zdGzAjNd8m=LpYQ{5Y9*neD(r2Wbr__3Ai}wKHN%;Z1 zVRRZ(=?o@E%1C91)^MtAuRT{0Do+G^?YTR3^Hdfo`5sq9nin1HynBARakFoBe*w5~ zv8&)qdTnfADA@UFWK#GbC8rM@V*v2?UFp3}r9|b=AwOo_LcGhQu{ggZx&V@Gw>uab z9+ZB6GWcDupU#_bKhXhl1eom;nx``_7W2}+#0;NV{HkaLC{a+60?KMAQoz3p`(DU@ zO(_BRpdcD-cf4W<%(3S(8Bt}Eer#$iO9ZNjg=}yl3${r1Q#C2UTl(U<8eM;FGe$WH zYwK~I!B@sE_6*H}*1l-f1CIDpjiOccmG%KW+!BqT`@iBQ)PF79Q{RgNiP7wLFO8||48%uR?g~s|2XPlC`$-v>@%Y0#6Ea66hcNzWKhVK@rA$o zJG|@9-y?hEoOL_A^ zEA%FR<*^UNm)`qwl&~gaV#RC)f>g9-zLrL@4q2x2)^z$CkjNyl@6;Ne_KFYV@bUgK ziC%0bpTR{By%dFV8JQF_!vyy3*^gpr7^6a9Oh~YuO{K81G><*E-->39?To#9_P>4W z5BiI-9UVR{!vr=CmCz;{P}Nc5F4z>6 zz!d#d`>LvOO4>6wN2f0aIco2cWYyLDNIFcUa5$aDdk*c#zb((9Awjd=00Wd~3=QIq zn=Zgcit;vV!mpt7l)Aoj9N6c_vNjE33G@xrXD7si`lTyXI?Ma_2L-tYqX4vvV#tr9Zhr`6VNI zn=Mo-6$}jMfFC|k{++r2kpduf`WXzct3YLf>s2N7F(7cbo^X)2^T~!5ooQVT5ekxN zNnL9pOo)S*mO*eJ78L>0LgHd$2!jxrV@(FAwbHPBpfjkIt*`4A)yPBD&b*yJ2!#Y~ z5%r{m))oycEG00c!Ic6%Id&(m&|%NxwJXq#tTn)>RqlLtF(3}-x;W2?%s})za6c}6 zSePCBSlK8xNXP;ji(JI%+w}*cmoc;BsKU8BFt&E*xv6kv2E{s=GT@BHFQu!>Vw{FA8P`qcBs1bw>8_N|;oe(r z$MOF}HShne6Wlg6ihus}o3U$@$e9iW664{{$8cB8V59qY=vvJTC%|zECXgtU@#TNN z6VH3oC;G@t(Uq>-vJM+JZ{v+i>+X>Uj$&!PikS_YkYdYiDvfL=g|($Q+;`WVXjVBC z>HiDo-@lsCXNIzrzE3|(2zuiD<)Et_ux=I^{wfF{YvJrhS8||avHbj01}F0ceCX&teC^aJ=&)(s zu|xu%Hc-ZEwqAq_`2Mw;Br2i_;L)9^0lMSRV1vX#-n%H0+URWR1hf-c7pKUhj7;xR zFert0&K|};F3zE5Dw~B=0?*yF5&xqwiR@ZM>eUp$@?gf{eRs6M|Hu)J+Y~L2h&p8m zQuOY`{+W%WTyI+>3p>bXQz#d6GNCY=(coQMY8Am@yi4MDl^H*!eaUAL!lR994#p-WY$TD>mNJ^S|w(Q_zJr9xBkh2>Y!Db?J0Z6^jh_&w z7=$#M5ZdIV{Ce>#3TeQzyfK*2zvhm;pXl-;3)Y&et`H84;f zoi4sd-y!qvYh&@l65cD0CfI{egVEc&9?#IdAJ$U=`zb0+=FvYl3nDwz(X^6JauRcTN6nXDs60<9LCJp04nn<=r%i2-ryE9Hy_SV z*l&KGEEv!LOrdDqt2S(h+#~C`rbAyTmGPAm$MBis2XI0)i7W^Jer}+EH*UKK59wym zX-Y@B@}&%&Oj>)Mq#((zhy|;TfAraM0z~*ogI4_?)%lV*Q%vDQ2lwGCOAFGXNM|#i zN#Nz1cH)V-0VI|g0BN@h--7{%?kRU2zPsF@M8T2R7k{n&*h{Vf_(DY~(QTqdId4alZEpyF%y?4dOK85Q zgT5V4QuV{gZb#^!K9k;%-xtCD?8*53>BLBe!LG*xD&^*JV(9IiF0Qy`KN26i_G?|j zeSAMrzv4`2A?5LqWF#n9m|Ku?0581J&_I9)04Ia#@1z87&lj>N91TWg~@%~@O08X2uP<5=Eay%@|V*Fb-bvK0@lf6>aZAyxbOs@ooe;dV5?-Xj#eeC zrW?+O)kI`CkBoE?<@-js3cc6XJrFjO_wmQ9Upy*{2NRwtMVYfc+)yjGO=-!mdF?2_ zlGoX$4CsLU&N*0uUr!+J#Z9HZbzss(KU516h{$UB{Kx+KK14yIi)cw=d(c)g+@r=X zR$loD9wUBm{q<^yHJb6LSszjiB@xYhg%tT8?uS;TgDcj-};QZ5{kS@b%^w{4(D0if5u#U6$ya4@NV; zNNP3gRX2#1Nr_t!#yBSccCp$i;ul|hC2qNQ|9P*pI8egwOD+=PA7y_ZI6RAbwTY3j zNjd9uii>AYN!mD{P%4}e6|>PcRU^nrR%o*tjBKevL+sF)EUpmgB+E*noh#yd zEA#l^?0($Kghq~=HYNV-KoM`>b}@E$(h>yl>V=PQ-Ax=RENy|Jbmv2SB1qZU@-|Cb zuSAFkBnPB0pUvXR<45tev&RL6V6mO3$m_T5!Xw&QBpRHrqKd3CLC;VOM?IYN?5JPp zjxdfu0JP6Zcr*}kR7!XjJTq+JHs2Bjug*m<*1R=EsLq83SNd8j;y;L||H(g!8K3hd zXWofnt%<%7DbU2vJ&iWrdE^27ut8-in2x$nr1UPLRL<(=d z}aET1b7&*qPzW`~kxJ&iW4lg=#zV^9z(lBhXTB3T#G)&+pU8lOI5?f=5oO z8=$bi%&!hgWkx!&BvLbN&v-4eb?DojS8#f95r?Q2rHibgfbK6##ZCR|!8fLvt@Ak5 z$f4TV3|YPJD(%beard!nj`oB{4&GuD4~cs-a?QZ|LyVYh4nc<1n!Q!E_a#=2uvj5R zn2cL_U8<<?a%!h zotnh<9XpUO=7nhWz@b@`ie(hagYx2qvnO%t*iq>n|Nrxsqaq&8%Uj3=QJqMjE^FMO->Dg1mGSwb*)ca;c$(Vpa*5P5Vkw=cbPC~kq|dGO8^WO7$ERH7!|VwAc}&iH0m;+KotNP zJ*kwWE~3;ogh0Z~zlF3sB!$=brK-2_$$bXDSUT^A%MYnO6nCPbz!FM!zOkZB7qt{c zKl9YuVA|Mwy6=zNZZEbKQAhmn==I0`73<~KPt-e(t~vhNh(H#9pQs;K-yu4Varj<; z6r%|{Jlc0e<8XC+edO9qe?5K&p$y?y9S%bOZ%6m%ftJIIJa7_N)`2VE^HF^6Gham4 zd!l|yFP6$>uyN~lOs!vsOgWEop@3amHeu7`1cnC(kw|5Q8_;eiMZ`*m1hv>MTgSrL zIo!K<4}N^ptvEKTGX>(I)_-;h+a`zc@1MQ`TPI60cfbh!!jh(Ah_V7c18+1l^D1^! zGuA{gk1u@tF1+xqe~H$4Iv{u*<1;hZvTdu3_C0m_4CYQRVsvy&*w+hlC$Tce*2-S= z=Rdpt|Jy(JJvTEtgoj^zIllA5Yb79xzVw7rH-C3Fhs|Jb6p;V`AOJ~3K~#wh9+Jr7 z@zax-$>xwEiB!~1HJIw^!t2DlBn^Ko;PP@yj0>Z&tf`iYUmzl?6aL5xIGjx2Lnn^l zJ9B4Hm8Innr#2pu&*KeScH?I=1+?o`lRws;b(L{d-F`-uz4qDmI-w$=R{FAee4k(F>*HXb?kM!@iS?!a$^|# zr@m9nG09)!!9^4p#m~<9KI{B@?WcdG-)H=D+_pLTs2EZg$C7QlefB7BuB#8EJ9$B2 z49^=LN0y0dw}WCXg>^$k1@_%RJ3eJqS|4)c`ZdA7cqu3Rp3bfaU$KBOJn5J_3bB-N z&D_3+QiKONf|@E(R#682(ZX7;h)>^r9Enf==v!S9LnsC85VXMbt(MdS*^alov?9)* znL6K01b3{ZgxR=H{9|_jKY{IoEFE}TAocuJp3_*MB1=tHD!HO0gS1SnN8x)3SVRY! zv(EJp@c-!O`97{<#lI_S+2*6O&zAKXiF8Pe(&$QFC+FLGE_;Z58ntOX%Ne1ja>C^p|4isP{A7sA$w+ zujQ*1)*%wGx>UoTT=6zs^X>2bm;Dm6@1gNgT>3MQ#LjKov1{8#R2G+U{P+pX&&}c3 zu@jhESi=1BlC-|Eiy%|TVQg#^+cs~<_Dx$+ugu}P>u<#U_Ys1RT6=!-#UxP3Wbn23 z{~@0GGuzRruW2Unzp48VFiWbc3m9Hib#v%AJvqS)8Ob;jBuAAfB7z{IVgeHqBqQM^ zi)0uKARv;X1OY)MD2OPC#9zLEcb{(ubtdOF*2^A$hCUH3eI-#z}wn6oge3@Hzo8i`?j$HlIk?AG3a zo`!>%9kpjU-tK6gk#mc2KMjj#!21lA(b!55GuKUei<}~UVSh+A|t^Ir0@B3Rn z^aO(7MV-Izh*}QdA3yBq*1DelivXbISR%L>oRTTW=%fC~VF2(E{#hzY0KnN?-%R!3 z3mx4$Yry5RtzmLDi7XWt*)$krk4ELsh}D^4Q@VkTBes8$Wu=o^aFfwB9SnxDVR2^}mn6zFOw%cK6WIB5A_M890o3Fiw(#YCHg)Tb&V4U`?&!LtZ zGSAcGVjq6@a8)UR%J&icp~N?5#ji4b=y z`2xUcg#Vv?=^|!F!`)Y2f!Xa5JoMCGaP_?pVa4d+xL3tbv_%|rI3707X0T6NH@5N; zXsfYzpR90-k{J@0K;0 zNPzu~qY<3D-bUCV)`nVyp~|2$v)aA8Om##AMTpYXxt+y`@x#Qa3<~o@7Jq1f5}I9tIY9JK+FYx znCig+9o?oKS;8`;qAt2KDR@p@OE!{mYdJ^0)y#{r2!Iei#j!WzrQsmTw=sS|VSf@T zM;l!Qdk<_ZmM) z?Bid4)MebL3HqBIopGi%1GV$SeW3|mXr{#NM|9$An^RkHz?Nv^2f`s|?|*M7GY>Cu zan#{eGv53B1!Oi{NI|N#L!sFfhqjjQ$So$NziTRwXv`G6LdGM^Wo^oAP57g6kE$fv z)3x0W!2xag!2}Lws+wP?G;OjRRa>igIO#hl;rS;y{`UX+($-SeiMghDg2{LsQ>OM{ z=G5t!HhB`-(h0OB<48wby#LOJSk^a;)TAl+#1>nkR2#+PPyZQj{`2)Up2!hfTv&2BIM!H>X=hL;R;yp~>S;sf&0a^M^9YrUCSIPxk zzW6=-hi%llk+4bB!5K3*#4hm;G)j3{bGEH4=#(*ab1Uhpk%d9RRCZ7KPZp`v^pR&X z5ce>D#fSJ!e=iEIgD%g(*Qd2yG!wEz<#c0~@{$EUWWVK7e z2J>&vQ(nWaB1OQ1F zI4wO9d$sG3ED@fgHc6v~_GAoQZ7ExWHT8S7YeWhyq8&*4sZ|Whyk<|2lQdZ$(RxFD z`)w0D!Qc#A1^DC(}vM=%)kDv}MuX z+b@#8s_(DEf~X2eG6|;xc6N4Rpsyd9Y+7oPgM)*}X0wt-kt~?WWPAzVbSjBri5db8 zK^u6JhzjsTt(Jf#>FIzWog+Ymfx(CLiwa_4vUdaVGeF>lv7--`URTt9c>V`>b|x!1zKXa7ARdmuKxse^w4L&*=H2w*-sn~0h4+h^L znTwo|K@-gR7?)@5Z1Au|WlROV`R&z;hx1Om2#-JTlt|b9?_Y*j(x@SwiecXNTVju$ zcEl&Q*c2OXFcT??WHE@X)lje0WF&1PUq!KZ5W_2avGn7GNVLUKXz#*ZPyGqMd*lx& zg);5%s`*G*sHA*yQI1>I}(8_36SW>>5v=Q^G4QIdg9fyumz8+j}&B2 z_!kTwJX9%yLni@8^h$V~iW|WjjXJJc_#gZ&PlmaE|K^bhPMfnac8sQgg3d${;MV#{ z$>?pTjOk1;J?tVxL%V1Mi)tdi!zq6z2&gsCpNitjFRuO@bkbko3HgDa^I?%x7f1cc*h)f-L83Q z>NVP~1B1p|U4*Iw{HsyM`O7}WM^f$sQZ8_A#}w?C>5%8n5*?UCWk8m%SxPeH z8ryAUQ2-0^i^P7H^}EeSpv0rNE%ax8wjm!H+zm^YW$HKL4SqA zfc|oSLxxO<@vI8TZ+Lh_3>-rT>ix6rj}U?o{?aK41bCIwmS-e>NzRiYKM~~(kod>! zp4vbE!u!}@52MWIK-%49hv5NRwK9>Hx-K81&0coD40*4x#ROm>Z4aN3++%Hq|7GECunJq-~Ef$EF+7j`t2D zcGf;w2(lUS^v&1U=h))#eR~?7`(S=(7DlTdU*`Z|9Tt&AD+EWOV>E&}9E1E?t%&oM zFUB$wa~WW|z(t+Yuw5o=5*XJ2o~upegh>2z9VhwIQFh4ec++eGe{PB@uNNhZV36Ut}(p2uLuWSs~D10IW! zHIWDH@75fJ$F>s!MIp^xkVI0F8S3I^z#*Iik`s-Sl-4O~7{oj|A0%-5sbkz_g$ki15+EDBIVBJ7EXIgg80;{h&n0Uc-0NK@v2)@74pA+1sCwS{rAEd zr+gRFdfHJa4xvIgkVGrahG+X}Gzq8fBElN)NCCquhT!J8!4il^l4y^`v1-+S;AUe$ zEQ>oI{R3`!_)!#EAnAzXVE*Yxv z@8|%Q_4lK%a@?vB#&JY7ngNS!=~|+qPZrS^hKJ`KsFUe z2Rq=*up)&?4w?@RKBf;W+@_7?1gw3TN9fzUeVDYGdQ3oP#6*)))(aX3_4TzdP zG5(?S49r^fIcd$YM(8Y?&mmkNXbxWVI)nc9pnujHdwUJC1&lsGz<@U`Gxj{p&gXk0 zkvPsd^{06Fjz`zcw}szj0**-3!MDC~48D2Hmyn2(Yf=^nSDmtpDD@~Bb>v0)vJQvN zq)wuMgJSOpO2b9uhI9Dv^>;8S6~#td%)w}W1ktV@{Q8lnF#n;4MCC2~`oR&q;*uX9 zgW6!9%;nHQKdz2S4XKBzAQ7`W)m}$Dj_3dJArAP#^(dNP`Md>RA6|Wt2o*;b=gMLbQJ1+9&3RVpCwrqc z!84UnT)ccKMyl-c2PSfW&V(758%vn!LV9M>f^PI1*m>5TN}?m1lGaj5{A4qz4Ysb@ zZ}&4d!M_(4Rq}%qVa)6N#ik{k1CE+uDQ~&5NLu?`Q2lot7KHY@K6-r53_=K{HUO zR45AkFM0yzc+xgwpm;{7?DOY$F+iX*XL-O60>p9GCAc8uPg0?j5jb1G)LeKYh%gY9 z;P+-SY13D!3s7py#^=%b$wLdXYJEo&3JuLe6KBK?Zli5i>pXTt1P?Ie0Bz5IXdeWQ zxjEtuYWDhh{fMQB=x^tD=468pIUno9bnL3Y_2wnWj14kE3p%(H#(^my4)ZD{!I?s`)Y&J^xRPe(*M?PU=R-ob^$vMRDbC@5cRq`HPN!HDUd{ zjb`HaH=mBQTa4*i{@%)JP1JmX~SvDsWL z%a~xD-%p#y`^bN3SK?j@{lz@)_`|cf_xa~AC^hirm);YW)#^w#fX(9xe5P|EcFA<0 zhe05Rz|ek)ffp>sXYMl?;B^pE7C_w?c4qCqwgc~z1PTq5;&D7S+K20wEX2~Xsu*q) zjp5AM8{iXOLdH~!9=%CUR25qdzBThcS|-;W1OjRjv=ku70b|t$kWWW(Tkj&=wThZ| z4i4#>fUosTLo~+`%4|DSBtW|g%(pZdx*#}EGTnHua35Q901&b!tGV~q2Cm_`H~ngL z$6xKq1rFKRP0i+ke8k1W`62v#_Wko`gsh}Zkm`U|w}1nP zqOORlFkq3=gK!mUDh+Hp0&D;fc51e+7k&WN{nGh!45!>rYH#|z!Nye?f2*8=(9f*% z3q^PGTc33sxVmtBZP!G=6|fKAIMeWEY|7q))ubf=qa5hhEj7FI<9EXz^`(%FQ3-FG zG0>8ULiI9&erVfmYLBls%Ur90qqP0WUzI+r6%K3y6eSdP*jOgqJzNXP<(N=B$rubrchNdXR4KK{P>f zYfOk;_T25>5cN6HtN=y|dGxL7MSp)k%9Vobp|lsps=vRAS04BacHe(TBx6yaJ%jID za3x-R>m5I+>dM6N^tGpAvkiMtE3vd?91Hh_=b(Sp5aRJDGFeXLR4eW@YAD9r@RieU z!~=hN^M4ia*tIAd)MR|KrcT8HyY7uWw%Qimu^5I%hLO(=BT=i$w8VHeEgA^43qmI=I zSU*4cN7!$hO;8wN0H9g6K=0;oME}HoaSgIg>qha$f(5wc!H4nk+wV|TVKpyD1cBNd zU|w4r4sM?;(0Q5z%!mjWA|Me4nWk!ln1sJ+iFLxhHo>?@DIjtMbWwY?2CA_b{!$sm zRUdtT|5PgkQ?Nxcjvr2+i54Kthw@tUof4FY{y&2N-Lut% ziw9ON#xDnZQFUA#K6x@e-`0cFa1~{?6q?p*pCD`>ix{tUd^T%&TdR9EZq$Dct9Re# zEv?57eIWQ__$c-R_Gh|YC0pmcs@IW=dAM_=AM<;c!zzGpnuz0nCeFlUk0Xjr<{e68 z1aLKGjU$3t4(K8hz&&MD{ltHnuT@Q&aF0~8;%rmyBEH3s2K@CbBa=Lx3?SE zi#_}@_)(PtG@1S5O6N5@>58kp#XpFyV%FL~)RhHa9XV7fB^UVv*8PW0=)S7E^%^jjDpxL&Bik zV&8#VNJX8fW!P%{M|l9s2MFA@5A(|^woyQ6ue3OQ-_8lbVC|l60UM0yDVhgB#0ol_{7@B9;7KvvWPKx=cUgwUK|o7Cf%)La~U- z=pb^#1ERH`EalMu?jpF2GMs7!8+>9Dy#B#reE-7Bv8caa`?%|M+;q~>`07!+qC7Mx z&tJ+k(rVPek_Ag;A55IlfqK;hvTg$(4L7j&v6o=s%KkN8-?+cj1@Eub4W`e+fqU(P zowwQok!l^4fnh{gs>`O))|uAvr_}2ZWUz>Gsfy9j94h%dqLl{R8U>kRQb*2aQpl#` zh$UlEGOIK+NasjqyVl8lh|6AziqzVNOP1p5yYIy_Z@#Iqhz%Oo;mWvnFaN;_ICO_? zkQ>QK03gLKI%yLqGDu{GMw`L}g$|Xiqm{ac$Nu&*Zhh(rEL=YR7-ie0%0OUxB#K?y z+p%}YBy19iAyz3%=|hkigH-7_TRM6J6`Qh+lq`AQ-H2q?!dI%(g4R%uM)9Uw#+4s0 zz^kJZTMZOYgWjca%O^)b$G z?Wq{Tu~-e`p$$F50F(OM^ibMaGZ=Mo^S~-RzCqmp03ZNKL_t*CGuZ1BSa(RLaBj~G zbXaTV+j85UsW!=|MhH*^292PoVNp?)|peETI&v4hw93f-&Y$u2T zc0(%A+AU;~Ez4*%fvAr>NRV&PMK6Lf5dd-k7!?va&$MO4fm&#tt${Q;g?!Nr9f3&n z3X?xV2;Rf!$f!7OI$%2EbSfnZQ#4Y%7|FOzZH0Ay!f|Bt^JS3%b)0O0BD-HZ2)IyP z7U2wh&-e0o_LwpWO^#7hg+eI-L=+;WxKP9s1R^3Tgj$cWH^P!gEXT|m^pMZ?qb%PM z%Ga5Ck4f_F!wcSHM;ZrH0GnVRa}G_;)gl9}?Hu;x?1Bh|cuh5V13iiJ4HfD2$GRf? z4BmIJ;f-p$0gOswld%n_?F;Oy19kpuVJ z6K9-rDyB@Hh-5mA?g^8`DY3Cj#%CH1LQtGB21W{u6qzcd`@H*nV8AN?&-RN)=!ZF{=xoLv{j{flXj6x&hKKK$4gz432O(7+%%yE^61GD0|=GSb1!*rUSGwQYdBbF87-834=>ICw~;R^^G-yE<56~i_gcD8Pn00 z%^=%8QCZw3;PYde7VBY40OCbm5BW+3zrEuwJaF%C@&3Y(;Kico>Y0Fz=FY(<=WKvY zrp-dNe;K+WISel8#ff9vPsPNpHtjuYRE7M-U}(h$ zA0yMAK{89?EM09KjZ`~+eED6t{+{R8#dbEmCju86uQwN;-F+|YwDmSXsfuDQkG2%0 zP2(t)3mEDfz^Z{k43$erwHs?H%n>qVt16I)D1edL2UIb*uKo)yAjmfI9`7H1I&Ny5&ZML z_g4GWw)B;D8rU+C#$FR9V4G|Qrn3}UtRP0ISQ!G-kRX6-SqeymZOueBAzypsT2-{P z$>%D}aTiMxz!l3D;F+O8Ar$VCNZ`j)=V1MM6mG4CnxLCn61B8R%M{Y3YawRiRZM-G z`-*EvksnhIWKF4+?p(e|Z0;AP%)pNEHn`=AAO|ue$+SpYNZWURHiHe>IPHiQ5cdB7zr7U7kAUIG9v&%?7__-@Y> z#6(*pI9{ur77J#1PLl33V5lSNHqe$yBAt$lyuB(l>U*tJz21aS`<}J00ukuHCa07C z%UY2@TGyJ)$6+~A%I4aEpfhu{{3J?}{Il#;7d58-*Z=vx2B9ch*Jp-a=7vwFgs{iK zA!Hu2m!IVT&IM@e$V!Wy4oI=311bc%5=hwgJUVc~_B?mZ>{S=2L1 z?-*3D1i<&_NAtoL&}&y9nSlUH0CXJG39xO1%IYX$^YQN zTOS%@QZ`tB7JhcsCD>}~tcWqOAMm^XIR>BE z#kPOyJ#ueC$u9PPpCG1#KrQ_8b0&w%kl4zY~6efuF_=unN$>;ZoCQh z`qUnnxA7;Cr~#w-g6NxPvTexcav1FAJoySb+R~UjsT-3gb_t9rXa~_X$@N*=*FJZl zBt%>pLPWA3GgOk+gmXhg&Z#3eI*64YE=6USdozwyIw^a*!a&u7kO`d%TY zOYXQG_dNfCY35veDtXFRzKHK0{5cHw4axZleJe--Ep%GS!2Mrrp>w9AYyd^fL@c*{DP9x+indPEn9?lN-6@xBH;1u6LIu}NpK18 z*v`jr$P$<>gU{>Q62WIWWCj8<+Xk}F)5$nuQO|D?w8T#FO{P3c=S@8eqUj`N@*hFA z1OcStt?ow&1bigO!lGKHl={EDHZxpGkO<);xNvTI>A^Dw$S!!A2d)ctRX74X~*AS;y4icwglg5aK6C zKx_P*9S{-6&d)?BBvMtzz7F}(44(l4vp@zMBp~vooB1H6%yXmv25lzc$sB_1kN2-m(_b zl8xevr~5n1-)tIWXohC*jnrw)^O6jQ|JWJqq5B(TA!c-@YzY%QahMfR1A#mpoN(Na z@$$2OZ8}NvPcFXjY<%v(12K8>6lAkq(#mB^MIn38gkwqx!`vP+r3Z^;9RICv?do-c^Qsl+lm{QFr94J@o!hev&)`=hyPfAx1pQG#6P#VplfALZjh6hliWOytNhxK^rX*VSVX6ZEt zk$42XBRPEUnj7%9HDu!9hkD}S2jb)-zktz!5p{6HA~=WF8VP1Y1_h+ivcp&?%Eh`4 z%T!#Klv~$4_h3V?B&B1!uMkt!4NSf(&Ua#g99 z_Y@q|*&||O$`xpd2m|-fY~Mnv1>$EU<5zbr=P8qH0}hzrpf&~w9#ZgS@~lk|D?c?? z+l26%?tqYu52aO#Jcv}a3+FoF0H~ps7LjUI`u3$)pH7?HfDGIGSYIs)Jj9W{Tr!0I z+uE{PpD?=Q9N*j7)h^0?EFaKuOTWH_Z~c{TCIgy`SVsG@okukPwIhIzpXCBk6C~@O z*+0txWoG^wIPg3~fso|@8eG%sFPVsqR=G)J0-;V1^q)`|$}v9<_s1Jw-Q+m@#bum! zc(H}wuK__o&u?9n5*rw^!T(8{j%hQrLPNS<$7bFkh(I%tAOz9vVS+CGc+iLJAI~U^ z`9w&AuIQ%bCO5_5P=*jD-ZibuF?-5Rv$KLHVZDK1m)L|}?}DlrYK&_ftgm5U40ssC zFk(QqY@o!>fK0>n$Zsdqwqiv;zH->{So&eJ6ztyn?1qajI2*IqpNk3Ilh9}|&JUSI zgLK&7&`+Su`SH=_s#W~()RXaxpIxo`@2NE0>9Y{YbirZk8OdmU{iu&{C`|<}%9!lM zaNb!bVBb$|hW-y%V8X;S+9yoHjgLN!lP|jk-#v5}Ty@$}sE-W8i9}^@m4@=DQD7_U ziSwwiJkT}?-#+J7-1gMVkhJFl$t;rb zD7rHdBx9;EP^HW<%Pt%&BNwkjL=tXH5DhPqfRjlgS1ICOufKt3UwjU4yuA>8{R1eJ zi_#g8Nv1G+=5%bj@rIbU-PYJ{i!G3G>nINNVd?veQ0FLK1_vDP&B)2s3|W?+(?BF1 z!*g%FjUWH~S_~A%w(2!)OlD~Z?vK-tJrcRWA-Qi1=tRfNc8-{C5pGS|0aYwCif*jo zOKX;9Y679kUWg%ItK;d{Uc+yG{}?`6+AOvd1os*!i4*mJj01cknZd!G6R>?k6$N?h zT7|aFpd@wLI|mLYoRXO@h=dttM)ZXHmn9L`#S4W#T()3I$NjcBndvd02N zTcIO+^ia!m_&ZvkQP}p5&t4e+6M@5Bd$3@S5H?NY{aHJoY%8V0A}p;uTg&6pr5|IM z**1WL2b?uwI`(es626Go+|X(Ge{6Lv7AX*gW&Nd}4mMpzsFY#+Leq1!kI=NX`gYyg zeH#OWPb3`sg`SZm3fd4vg@E8l%boQ6$KxWVBei?h&&4T6j5a_@0s=zxTriaGWy>Ek z_F}1o_VzXj5TxF~34rmK3R= z3FNxj;#VR9z+poQ@v{UVa7a~$1JMCUOOgXYBp56(FyRkK01V`PgF3d&%>5hfGbK!` zj-R<(*gm{4Elh77s##qYLGTd)`@n4+VBaShiD}WZVnWi^rIMX8-`#gWw)_)%qCUzK zkar@Ni}f2VUG%w&a738f)q=&sM{Jb%V}us$q#s%$iXTL>%aU( z*-TampP1T`<|QAL2Q;&{)2(D41#vV}F7veqLLCH(yg(*VzWCCsIN|6YN^iJ*#i9{h za@hqq^z#Q}`qWu)9HK_L*5odliwgjgKz-02qXY-%UU)t(J?m@`6i;ux6EgcBjM2;_ z_MJ+`i)satfmNuz{RT$={u1i_%YpnTrge4WhO*#FpbvHSMZ zal!YGf|DQ7nkxg}Tv_%;BEtZn1}7HBzn2uT@3*eP3W2A`_OCG<{^XWhj@@rBoOAqFP#7Y4OLZy8n6F9=+oanf867I` zNu6Hzg6v5%3SDdqOBCg*pr39mh9&*Oxc$i|@aW4g%S6T1xTlsNGRCzf;);EmM+B7d=Z~%%i^1p=Aff! z*c9Zzn5AoGN!k*BOOZ|kBlRh|>-b-PIGNQ?a2%N)_{6{pygS&7oq8tWlbJ5KMa5P~ zu(ag{f9u*CvW`AFD5b6S(fYUAe#TmhH7>ellkGnjn^)cy!`~{AqaUnAx(Ya=o*27#SWGJ^^pOjt5Rk5TNaToH$q!F%i)npkbl% z7Bd3__4DVV3fQPCA(5>$Wu0T70zx`HkmoF9I3fBe5zrCZ{0OEFd->KRK@h32c$_8tQf}{E+qs=M!3RY2uL=C|O3& z)!ubmLNU^}ekn=2=EAfFON7$0znSMC%9aV-_N#kv@fjEE;575vcAL#{?G0C9lTEio zHq&J^vki`7<1D+&$Asca()#VAPvJXXJOslWjJ55dXglPKC~`oM6JhUw7)Wy1*&c{W z4z&;7M)A2v;VxT@YJLC*e0m-(`q6R7IwfhBOJzIp$EW{>m5biO5nn$Dl~K0fxiTWQ znkyp8@*9Vedv#RfX`Fb`o%r<=ua2Aj&sl#KjymKE*lNQ~1O<_)nRSw8J;W)X#ZlTZ z&WGojN(UYy9-RQG__|M+L9-m^i7Fi1+~V+JF(lgB@aNb5flIEx9B;6g`q9uVMe{lmsWQ&#=hFq zvdNgCRRl18>kuE~gjznXhiSSE76NEqe9ZxXB4O&I^nJp@y7u|I8+1N&zGUf-jEtb8 zqa9T5^O^S^4F|gf*kkWVNr3P^Yh{Gzxk1EnmrJq$IS!baKQ|9gg1!H|VRYt^sFLuk z9zPei$eYsOM5EH5D*gSezmU!Ec@jX-k(bIPQ4!-6FbE)wCvyhGfUy>7MhrLDF+fB> zGg=G#2a^QNjWU}}h!hq!x0$!8(O6=f_~URIZC0x#Cu$i!kl2-tXlmPf*F&yN0f5l) ztRuI@Al@}Qyg|>^4_I0ZjD{5odF#L;4M1!FpuaKUhxuNckib^mtS-<*apX8{-OcV~JU&uZEC`wB{M5bQB`KO(L!*<>r z$w(c^Y(}#EW$*t7(>9z6)GDYKijujPM@JD)Mpe8f-Gw`!{5wv(_*Uc%N@g#Pt!D>+ zW`FFn`@ZPTw!^KOKt38l%8Mb%{~PBZD72#`G;L2)u`L;$P5uqZvJ~!WMC7|mku==S zIDY-egSh~?mMIR{dY0k z-;0QoM8k{m%f;|U8bkr=sKwK`{GPjT%Ok%ZQ^5^C*dzAd6X$>XDCGMo2X36dXz~Uy zuQ1wu&3=90N-))RB^@WyF{D8z%M;{9$zMb_fXPG>D{}?h_1quvz@MJSz|d&Ru3Y_J z83=5XN@4f54$Mn;W3sdXD$=S#Q*^EzVipCY9a?6$iS)DrJuXILE}qOU$G?^@#~$4i zF|Tbh+%lgZ`}}P*Z&$MM6+u3s``HG%;&_c%jnPTbSrdk|%il-ic&}E(-+Py1LNbQ! zIwm4s;rVE(u*pHSIC1;C@qJ_H%`MsR>hIvX1^~^+_7|%?u&K~m4G!B8wbFBNUEc~k zHZ)*@#|C<05nM5O7B-8geEl1_9`hBUO0WK?DGt&*;9q~}Muctmc=spxSr`RaYseD> z6s9cW+cZBSQ(Xr+%^P35zmbeaQ7o3U6@@iMl40n$lL;Z}i8OD8+Wpk=FG`lr$4;Rj zvh9fo@N;bE<8j!^CtU&}{TY$KL3jj?gOTA;v=d1%t$wLw(y#fmY{1VGU69rY8ljr1 zcM0=fYK2C5i*>U`LsbVw3m__yFS`h;^$Ieq$9k$5sAMvQ0CgIm%{KK(Ne$UNP3tV} zLk&Act?=2}cCbX{zM3{T?G!NJn(yQTeA>id#wnZ6nM3)(hFw{DU|m4rl7t_{1+6|h z%RmQz_>hGu3)s*5F~9FL1Grn$YU^nAgru#`4|@EQ3>LH5{eFO9_k^w2o6TT1mtKvz z11&p3eSBaA6pGhupB|d)K|XCb9P*upyI_M8+s-LWS|Y8r2EKjlkMOrY{mp+^$z%+- z+;RgBI`|;O6Ip>=)ry(x_YIoe-TF~$$VhxU>%#MJ$(iQ@-80d1%#To>HCLL2n4rr% zc$rRVS{0cq)ffa6k*(KIeC00~diFuM{Y$akRvY2It4={G>PUvqGFNe^7jA@OWoxJp zmrx-?-(yQ#6>chvS3l~<(LcEkiBV9%X*LL!osIz35zyuMU4h8Win zMYC8^s8>;?nq^%BP6h)Epk0lv;mFVn2X54Z*N93WmFZ~5MOR;mo9=qh*SVLneYy?F zxtjtT?gV$@TvSsX@_RaNlt$sK{0P-|{(l8^|4a9j}m3Rz`i#gN_dCbgY zzyc{Plbu1J^tNhnSpmC04{LrxN_A}yfTtA%=!kV z$78sB@+{1a#to;cx%a=x-`l_<^z6ge;=i})0s}A8x@3a;wJwf7{H6N-)>x$J?|dpZ zzi@w}A*|PkFnHVBvLf!oze^3QT0uIM)*(S{X^BDy2L_~$&(o%SFl+zCLQy4tjXnS& ze*T@zT9yh}0$_H}TL0j{py&+{bs#5zKVzU_=K$DNK#itMwr3nQ)dcD6g|RE020D_M z8(g{rY7NBPxJ2>AMnQyB%Q!jwQkczBhQnK8etB78L%oOaL(lcI2+HcG_VO8p7k( zF2cnpv$##!L?_c(*eF=?j^6$N03ZNKL_t)9f&Vs++WbI$c=&#WPr2pu>c!P5_`{j3 z!?kzAs9)I~YlxjT=o0wW{2QpSDJNw4ias23$k(v=qd*3D!%gSluHW8{O*h^Qo|{zh zD8YYC=|)n&AczikKk&@{@!kj6d)_?sjnvTfg|DIh$=zA2LQL+xfnN>rOvbnxL_peQ zX~;*1(fi zHRLYF<2d!YYw_62ulS#pAvqzPfMX8&49+|DFytfvVDg|&PcMc{L zFpvOu0oSbt(~=SR=!gseYCx60Sc-TU%;zNg&xi~=*GbU2+zio-e;?NB%0j2Vwi?UW zV+G$iZmr(#ZTUZuU+wc3!h6)kYfc4MELnmNIf>CEeCx+zxMcEdtQU65OQrO{#2nuu(#v;=9EOyM8G z`!d{zhS)f1%lhZ>Wq1e?h-wX`a#`94`FGao==gaIU85=-#8$R=gvLV$vQ$JANV7X~ z5L97KcZRKT`|z~De-<|h$bf;x2|0%l>%1kIRd~g=9@@G~bLeHDvrx`&zuOKd3X|-@ z0YH=VaLBpZ!ninJ&Fs}lS|g`EE{HYFJ+Q|P#Q++2*}zD%4ttM6F`2(e145wQ3ArkJ z-6n>!kgDGL;NEJGr~d}SE!043DZmtG#R{9K)&x2h+0m97(Z#@^^J@Ew0zxF!BHrofOL%xDq zPnTxTs6!}oR=kKp8(e$xD2)7Ojr#ZvEApUu3LedNTXKF ztN2PGhhlFZ;>iXoWaW3xz_-u15%)gxSATuUd3wO{U;jGx+hbQmsw(J}Nv7d>RiKtb z$CP$Vp1mFtZQatk$2Kp`i|xyb*s8IS!j;s;P0i2&?z;Pz`09}-s-OrT!>rBGbL0<@ zpEwu8wJ4m5me5R-cwoHvesUm4;Ej7o)$6Fd{wyjFTn*$YU9893VCn?id-=~XDHg%t zs$N0vM0VL#?Rw_5(PS2Pzwl?Aa^nqxJoB~XteKcQZ7P2M*Vj;}Rq(^FegWV8++HXQ zjF@29DCP+opuN*P$12n(mFTOAG#%05B=yV8=`+c29R=vseDNBMv zu=P^frFl|DbHQ)9H5;ng zxPJ_s*;-v#$nTB+Uh@VEtcH>UAAPVN4BqOWm0XhTK;{~d!E|+$5)nL8$>GLjE3k}W zbLLsijmL4xl=U#1E#|@>X)fRDfz91EfkmsiU+vwoMmm&rto8nc4mRe!{_)27M)T(2 zGn4}U^SzBIHLFQRF)*2wyJ{7rGASYFF{n>cWe=>m7bRn7?Y&SeNeO_60bzS)@WaC+ zW@=wm82%*dk?5DrW@Iy!N<|?6%5lNtk^oo~!^OHIg-C>BP~?bULilxY^vr0Oo%1|( z%&#tfmIf$Inux^_bv=yaa;TvyQ98+mWKT=$hS3AiNrf8qO3PoEp>IibVOnQ+;Dg!0 z%{Se`^Tz2~RY8+MTW`|j_$&-=7n1RgwzWisY;154njqz$Y{-#Y?`u7*1h#=U7pG`+ z0t6NfpS-{ObpZzj4r{24Q!vKB=RjCTtA$IgA?k(toB#_h_`eBTK2w4a`CR)7*YblH zLCffJdgeYGlBHaR-%Ds_C(wYKATpiI;+}gR#+lzc$5%z;>p$YW8}C;l zPl%xSjbo0)7Y;sHIu6=WX_26<)$-`-$zr`tH-?*PgTofb2A!(bli?6j*6~RJGsPB& zbiGJ9#&hxJzyFB?_B{Ygm-NYPNpHO?vd5f)rQRes`6`@7MKf*VT#Y_$P=>O?Cc^5C zA?4QK{q0$dJoIy*q{rmz;7`rN{PWL6am7-U@)ho2aMUglIL@VK%kKw61Ni(o=b&#u zM}B{QznyUMm-fdCe|;aXyt@>8?Y#}wpE?ocV$~?3X+R@oEHg-l&srsPw4|iAF62SP zBv}0ejReE8)j|NXgUL+LE))J7pyIf=>#1k)($Yov{&$YWzh8P2_doFj-hBImar!jE z20GZUq-9QUTx_36VULb(Y@KXJS2PNzOhGk1T=X)K>!>Ek2bk#s&q6N)@JB^wcg)2 zl+gcL{Qt*{n8fL~g^;V<_!(uueX_YgT(5NF3PaqkIVt6!M!llnD!`K+FmjjHvy7BV%UMsX}E+Lf71ks8ZP?Enq4;Nl}InF%kWFRpaZHFC!)JEGP z(-lL;jiBm8FcV5UU5E|W z?~&|IYP_SPXyivws^pQG(1m**dJf+`?^X;@4$j^f0Ed2Vf1LR3Z=hN%qAit0y;Mc2 zJ&qY0&O-ad32-8;S+c~zr%FPsj<tSeMISxJSi}>Ro z|0bbAbhGWzar~L+ulJx{U?5TV>(hkdrM=8XesT*#DVnspaEfRINy6BFe+0P)t^?|d z&M*VM%!2*aae`3rWXg#ljfLv}z=mvXyQ3j4HA!Zyha zCPh?DRa~Hve6^xzQbCg-$3fK+l#RW#$%hH&*TJdS2D#>L8|&!pzO(<=@=EB{^4IZT z{558|O##4|gW33cOD6vP$_U8> zD_xGL4YCc8P`)f&Gw6p%0VRkD=TnD(d;yAbXc^u?B1x@*s?jBIrOSW|oe)@+lekD+ z6vne_>W3~mlWi!~^T-u-a1qa2#siz|%=kdl0@(V1ETcYHy7qSZH&Se!E&<7aEhoS? z6l(m!)=9naiA$X&V#tWlu$ z0Aw7LGI}jfKyoLM?KPD!Q=_-UJB56W)3pIkh+N1su@I_Wi{T#lH%hq{+M%Jn$r>o= zO%URik7~4+yr02WN7B5O_vmLKqMgHMmVBn=cpeq7A_S;MG$?Z2pMP##GG;+gmYB@CG3B3OC z5Dq)~bSz#z;ForImRoPJDK0zzTtN(EY>@{{nlcTuHkl(sf^-@a_wAo(SRajvS47?n@xP!%4kr=eUoYcTP2d%x4j$Nr8+Prnn09mNknP1EtNKd?VKXpF6)hw z3KSF40Lq^RWNl2?Zumy6B}5p{uZJ>8b11XFnnHK|*=hoTH9G%sKc|Nae=a~Bnl*kV3S6~f3I3kTnJIvJO>?3#oI7C#Hb^Ex`nzzBnvy|F z4L47=m3%PPXNP_gx+?R>=KHhykRg0no5Tb^YY70x+;meiz4^rl8e}f(&;YHMS0zIR z(it7(!Rj)|QL#d?D4aGGQ36nvXyQ4C~ z%tE}-SYXYF8SvQpv8`+QggV)!K|7s5x_-SmnEYqe{H!CD6Y&j*K1h8IHVZbJ#MZRs zS0UL|GRD@QeSNX?B4`?D70PqL;+-1f+g+6?h0c#kV z`+52NeE8faAR0oSD5BYEj*2#j@n!i&g(Ibo$)BrUgRODubw~ifYcg8anyKkMw33?k z{#sPX0v0sNP0L_hQcabCF7B^q!d@BY;EAbF$j!re*A* z#~;Qa2OkcUBlzN(XX2XkPei6-2t<-edWbk4J}#DU!h_G@^}Z1}$qrQN4(er&rL9Y* zL8lyTR8jcj@6h-3Bj{)}aO+Ly<1>40japHOwDn>Ejd~e*$Hj3cU5W=EeOWScmCFO# z+cLP}+RL#1#K|ZXM$y&Qj=39efW+h;88d4|W%T*!ebe?bGnr0MAXUp}@TV_6dm<1b zx`DsH{t6D-?+_tUMxt3v{r+iKvH53FE;Zm)s&L9VB*@W;QY#=TJ?QL$pkbsPp?-O_ z>Y!Y2plry5%DC2vceEF6L;dKx^-MJ0d)0qN7axBtjz9D;^nJVxQPBioE3oc!FPX&e z{`xXbzUmsW2|xb)J~;XCy-^bBUk?QiB4QBWSOmbdJBc6=C-WJE#RkM?M`{pYHiUtN z%`P=4lX3?F7pGoOmWCnciHe}T%ds^6{=wV0^!7V(;t5~J4x4X+i0fczIFBoD`VAg` z?nP8(sLWV%Z36=RzZww6=xmIPH-I^@81`$QgxxzPU>XUWm9p#=woUW2p$T(@m z=S_LWh8$`=)B3Z)SqjTPyN&w|&68l8A}H%EZus-PjZ{1?@IBRcnB7zI zmqwHfdfp(;2w?4Bf&eZys*)v{scVZ~CY_eHJ~H$P`AZffhQ2N}dcylM0+>WRGt5VH z11c5yJ2?SF5V#8I$oXE0(%1^f&0JL@z<;eG96!&W)4a)a!AL}`aj{Sq<~94V2{#gQ zwNKJQ!~tv7hyaB)i8yRi$M+?5oAmr`%{U;{$Tl^-!?yk|%gqu^taEO1)WIsZfrf5( zy9A80wpnmXja-~p=!ph~L!krtyZNEC28uH^$e5g%4HA?Gp^qIxVkDlpg~lp+rud zbFSGP8Pp#7`$llYfnUMmg@FX{yiaY9yY9XNlP69U1VHbS#SB6Tww#VJiVTij7vDMI z+n9gdO+d06zj)#!Z& zK%`EmSVP=xp!k=2(f9iY(OGZc)?ZwJ&+fl1%0nX}zEiK1;Kb6n_rYiIwI5z2)A4-d z1`dAoy%TWMVV^~zeD>=N_1H5pRBwY*tH5(=NTZGpl5!&~k2sPUMH*GqoCw@V zL`b{C#X5>r4=gB_sPAF+M2X+;dC0~+)LwfEL%%p5C~IfH<};?@iL0+geW)J}5ft8A z^N$ma;FXUT;>+h=B<;fAKV%=A@uhvW@85AyYD7?N=-?d=O_Akq2c=NRM`nX)SAfjj zkZv8Wd?OQNNq!o&n=+``X(4q>LDS3~<6ET)&kVfA=fs|kn#@hP=F!LT+`I4Ky0cG3 zCmHh*py+tG@X{Oc*q{EYoDYGT!?qxBJT{=0;M9!6cHA0bHDLWz9G`BVfSua9F*lY# zs#t|XG?dqA#E(d5*6{#R@K(J+Q^NE2viY^zNMkUp32N=;^PQq`O7^syP=OXCz{XYM zI;fWJ1bZ%HK4XHFCi@Z`usKLjJ0_1Xe13f12J&ZMBX)h`gNZ^B*|s)u@ceUITLvQ|qpJF6n9?#quv8HO6XE?Kq6qPrUz6m|$OGWx z;KO2DB2fw=3vBUY@Ij}|z=4FpDs|IEI+z1WR9MI|{w>1MRC^X@XBp=T{|kKYkJL4u zNuHfm>ayGc!|?Vo7|RA|Ud8q~`0&4Z$UZ^Ocs1L)XV-vrPa0%clMs#?4f)X2UbXCM zouT0p_#4@@2AZWUWG_?5vdi5{3UhJO`AYx2(Ha-OD?Qb!VkQJ61FLH1g9#$6u{3p6 zy%y~f(T)rg92qSK4t=s30KPDkx!wSkQHLxLvq3f(448fFK zm3@8Yv-{)5`PZSRdm`e|lqpeLUPCwlFu{>|-oyD(?7rg;c>Rrck(@XkH$U||9I*Lj zs1N3}3&xG2O4^9);EHRn!xg8WFAgZX@m5G3csMFk=Za%+s|=Fsh}Np8{P7+P{`pCC zJ9S)h^;tOlfE`dC84_p8EPgOw!{J{(3D3RqR#RlM#bz7fmg}xSte(Tn*|X3&V*@iE zUgd6;ux6&(iK90Xx6JUR=S}y5?{geJLvjW5la3q1efQjfqmDQZ)Ov}n{|O|%dpU+C z&fv&DK@K7)AyO(LS);=*O1(N-XMn@!>mcEJNPAHX=PMX0)liEiQRVn<601aFmc4;d z(Fy6ORnU9sH&OcVpR%_&f9ST;e}etC-xec%gVLhNQX`f4*pI(pWC#bIcD9TiKjFaL zaNe=|AwO88ZXHV12+CCkY$gCOee;%EA^*1;0MgR0L}EJ~RDxI)p770o&<+x42dHBS z@meT=1eSJG8%MY_4G#s!!vM%>~S}cYg+Pkn<`$TN&C6FkyeT}6jwap51QjO@jYVT;+lTn{N}Jvv%3AdhOO~zRSz^lpKn`(L`bZ( zIiLmr`X=6=D!W*sQ9PXM!?nH3k#F*p4Qvok;0N8a(8jVJfT{5)GI9(XNUW>JZoaeD zE;jk@F^+uA$6ozsg8(2Le)vZ-Nv(6s%MUfGrHZiZ3Cr{D(%JHcN6EY=U8q_^Hj@={ zAldtwOhybg_5aBYUmIUS#Cv5cl^$D~*T4naqy{rBp1)hGrK)EKZr=fNQ|_F+0=%KOnSO zhi^Arpr+@e>wy4}H4dg;ZKF&HFwDvfoOaN1=x;cKQ!7z0$Uc=tZ4?1R#6jKn8kq+< zdGK9UsK=6s{4Cz?lCi}hEdhjMns6lP|JE?O>F|xjXt=dd_PaRqAOMoFDw4q)Nm8i~ z2WCS>*~s0{+NE}$XfVM*gD|oTg)*C!r!E1A)S7kXgI84 z{(*0N>ANU}G~2)Q5r^P2S%6)2$5$S48PajZfrhDd5mrr1$1xQ>+Dzzh%Qh(@rmcMvPebvW@9ylO)v zuk#d}aU-ZSYDlD$s62iP2JgI{mO}RS7kAznH=p$r3@=}y6Fp^QE}wq`{q-vLIsKnr=9Fi?_t2Bi|)f*Yt4fe`)$k7fs3DXJu+FU+2@%9FaHG~1K1mg)I7 zLjrUOH?;_w)<83qO5kLX@zv)bVjz5`Sa+z8zqM1yc#Nj*p)f|@jM$I$R%-|VbSno=G3*`#$7Zwt;LyH=?zqPe zjuOK5OwWh%lGwJ(ujR~zTErFPXd!C&<+4S1v6vIQOpl_8%Eqw-zSBJ$X@}g38oDAL zW<=O7tG>tXEw$3&4BxbouEB@@Uz~sN%}oY8h>nbnA1`jRL9TPd3->n&Pl~Z^)CIz) zAW%A)LcWmGNq!t$QLRbK9)kmFiZd$b&si=|>EEh2Z7bVrAz3#8}dSt6?pbYtr^vsgzJGlw?h?{3F~F)`(+qX$|H5w0<&VdVA;dFe|iV}@}^7C-jPM;{9&Xl=DQqH?rG-h@5aPa_Ol+wE&b@z$aIr3M9$z zG4i;yAB5HG1#hZl001BWNklyA6X&SIkl@BFWz`Hk~N@^&j}hR@NK;;S@qs$osR_z7UFw{?18h7*&Dgu z5*+f1q&!(enZYHYa6%Y1SkyVq+OjA<*3{?7BQna3x~tR?6aCamitbDmiVWnorNz_i z6QXD&f_mg22;)|dO=}m8C$M~I1i!lXQ5>-UPS|MkjqyJ>-GTd`e8vv|f<0$Lk!Ft( z+SMk7vJ&0;8!x8iP?lK8Y}quK#b>)FW4m}7-Q1WIVIyQsyC$uBL}%6L3GNd{I{oa| zMwNOlzOVHc)i|WT&}sWw=Q<()_OJn+F68i=ZihK?NQ=S#+BTiouT<@OC8cVEN+1ly zxSDnUA;-ZRE21KMt zFVc~s2nY%yy@PZNT_AJ`y|)BHNN=~7xzlIf|5&&=E@I7kkq z1j?uLWOh%bKQ4q$3Dk_AN`Kjm@&clfsBjFJ&GR`#2iQ``4gqF(oIJ?>e@-N%V@ajb z$|aDRKkK`Uc2&KPzazpx$T^qINT$#CZ{&^1Ya~3$Ux}Ka=~5^= zaO2qYxEr#N&5~tSxp_$4)CPWLDXTMf{>}&;mCJ;LZnxpEbjFI|bQ)!i6O4j`RO$#5uPC3EqqlkcOhd=Q~v2#JOo zw6r#$bxbRo8X7Qe!dSF6H=?GdMuP)CgS7%&WID@rcxA8@{~^_F?hWa+Q_XT3HOZFT z3;?3I_s)Oeg3~S@c8ZQa^;n#B_Nj=4Y7p`h#WFqO){q;^QAS;;4(4{@$WxET9sjrw zn6VuK)2AT&($g592;-D9Peafj!kxF@fY+aW9uCndww(zjO?MVuP+-#Li0(fVnRpF6 zg)Gu@=OMW8J*4NoiS|SU|Ged|*l2?ZqW>R=#_-sa&*PB8&p;|?GeQ?aK_BkE<*(Rl zuiX)d*8wh_$SDra29VutvB&pz!AD=M2I|IQ zo2%}?gzxW&o+7YxU;rz-lTx=Y<%(F=%Z>#Iw*mPa9E#Eu2ute@aY58MOi&D;~|-W0WrrFD?OzBleiq%J6NO0htm5$B3H$g^aTCvQmuh4fG!*)$Uu z?PsR~AsoN7BlBWu>_v{ZstuUuXIR&W`WUep0^%7Ck?0P{%ylXUdXX1fnL%_+GyZtj zeR%Nk*~0{#zMZ)r{aF(F8DvqKdK?GDx2A=nA~n2qD1la1@}vZ#?U8cU8MGOCiRg|P zj*cZkS9_4P`SIb%!0=$9DpnZv!sYk!#>2z!>L-_ODv?nCG%Y)~*{xj~8jL2MhCQJl)rY*9ZEryvXrJH2A+HY1P2gcAaH|88YPRk0LWB7-M$E*vNeRqaX13~) zq;Zfkj+-+ea#*#h10TNo8RpNMhtEFx6w8(@M_*4La;fxa6KTJ-O(VI>9}3Ehi1u-< zm@sJ^#!p!X)7D=HEiKJxY^akAi{T057g63Le?;b^l;HI+qfsSE22G)G6xaRzHe7x2 zHI-{{xp2}MC*ai6euqT34xvC?OAE#+=`biYp5@@laWGKKVfQ_MfVbZL622{WfxER8 z#rNJq;gdH6LZ{Oy6*J}1BtvLkX?WatY`n{^SpC&J%zOJ2)co)eq&M6c?*2Yx=PpD% zvk-l+&&EdUPR3o={t4s9)q*I8;|<}^nI~e_>{qM2c&ba@d|3JL94lZ|CVPPS$ z4Tl|d)T>1U42&tOU^6Gt!!uB&?o1Jo@c_48aT$Jl>P0BIBUt~}hv6@mU5WMLKBRI* zEOs0`vZ5ayK{r-s-FSClH!>-f@jR+gUoZ|PpBG2%EwW0GgDY1Mgh2vpqsO6_LKK49 z!)~-h0vNpS&sg@v-R0o)(qHU{6OT9?y{kHekW8aXK`&o8hC{Bp2G6`Q2WK9#7tTC* zH)IAg@;*!$h={nu9@MnO#EDnvjig~%NLGVf4m)CpMlM@KUsnp<%ah2Yd8H&WYPg~$ zDt#YSLR1)CBya^tP_J(T8xa_Cxy|eVnUCr`lTh9?oj#s9ny_doV*8A15g2jv@{ zC~It5lXa4J384}&H4wtii8^c*ierq&hk%S8ms!MOXn2*PfLflJpy)=K!c!Srr&mz% z+@l}^BQEkA5P(WIl0j0dUvAqjhrMNmEn%%>+4&OPC_6$^)*(_O)m3dr&AD7yRLtSU zWH06#Hq5OP`8 z_c;W`koO7&ar$&Rbog}qgHEM)8+FUWMELerM$XFaS9F1e|!%QAmXA5e+8P zF`0W}4SU&7n!kM2u>#v~y9-uj-SBU=EfB4N^T|8NfAmJVW^F%2I#MX0FGnf3P!hbraz_@HZ&-^&!|Z0RPH2F)-&P#5Q_n!cr9Lg^F!>j z?{CpR$a=V%GfG_FdgImj`5`}n<50es`JLUXhRs|C`%FDc&Uu;gW-=}QP~THD@|ocZ zyyBo|^)l?e!yb6+i=_yR+W=SIe=82(dK35ySrpwKu<7@eA>e%%@NwRY55HWFo@G7A zulN$4fj*hs7mT$aUpE#>7fX5$f~AZMAmYB_`ie{CdC@dk$a=hIHU0qnZ$FB8*Ifvd zR1@Hkt+&L@f4&fXE6jc~s9GQ#!_ha~f_tBS7JoSGr#NZf9g!MLnE=3?-H-=~)+k&o z%1i5|$-`v-)7tCzwxgqUK$r)Gw1d9x6jm+oLo$_zmun49A~n&lu^GbC6x*Y03M~hk zke+2M`3My+lAjyWD+6_3T@5b0>ptB7#B=|HBc#UMnzU-GU_)CjI?p&;*T#AL*eVvs z4)JEu$3v6=Ts`>*hJYPv#4B}zKHk&;Cx8JO6u&RRqgIQG> zv$CD6a%)_5$@(RM(e}P_@*~?!xgU9t7fYNxUP*T2rM@04ASvAh2I?;LgDM}VpT=&o zJ^Ts$w60xBOoaPMXl02hTr6O^--n0a@0Py;w(h1CVJYFyRRVk`NNXzRZ+^YxxMSSA%m~3pa_&LR|EXV7o1J}6# zgxJU-;Oqbr_6W7Hzn}0eQ38?;2@Uf-Db-6P!L}aO*40Q2S#-M%gvJY%(uCQF`D_lc zcud&+d>?WSib4!jr9VQzL@xL{wp1}&^pYTy%ZtHd?Tf*H1P+8CNd)0xC4^|0>heA` zp2oS$nmUvvRfoYt!WwEB&Mepe?PZ#bzdS8V+w0W-8aZC+$=4%gvu!y)HbJkd-dKgc zO0;Y?tc7vP(3LTD!@@x&=E*AzgSPzZD!~f=RUE&E0bdUDKAn%3{{0eOeC`D-S-cdE z)KmZOJv=@?8rz$&>2{l7+wX0K%{JQv%?%AQ!LC@$%Au1ityF+F6u_ky{0$G>@KE`J zxdtbldjgIQiFW|BV*(>0)zdhxwCvHKqTqSP`T?y1w^@rO`+^CgrP zQH^fRBN?f0#8G!YhH;ePWm#>Y2WRZI1Cnk(LNkv?YWZqJeL>{kdlb2^7GUom?uzUG zay}x_0)p{cocjAqaLqsN75Gw}6|nh6>*2{K?m=C16I`AcT!gC2kfVB4+e%rvL)FH# zQd!mBe=QqG7A)l_4<5SnM*Qlq)JTO9UlJbK-q&_V|xOlc)&4tRX{3MD+-+l{CH zISZfN_Y_iJeE?T~hfMr&Me7loz6IjD{tWr?8zPxEH3s1gxI_m*TKh~V30wRavqd~X z)GT`sA76R`oV0drY%#ta&))oZ6#E8INautDzzjbejpN)q?!{FPJ%|gB+#e_YY)7OA zhRQ@aY2ORDk!X&wPJg5NKa(61ec6_v3-57yx`%k6C>V*Zv-oL>Lr9og_Or zNEq~B<4_da#OkrWKa8>7fOKdOQxXmiou=jbm^Rx=maPfMD9LK&qAaJ%9FOWNtPWPz zd=HDJ*tarSw0VPJr_Cf?YNvHSOA;gYS@Bf;(}NCI39k)wOt;m@62z zga+j?w%7*;KMB?12Q{t8n*o~Ad@lM=MKr^~q@W)`B2-3RSaCHKh#OX(8(}`b*1=a@ zTGeW-X@IMPr>b`KVLQY^(!)N`gouBC`60(c=NxY9LDoL4Qzm1BAU#M{a7iVq!@gOl2gv&>nAvUFm>X-{-Zm%}=@v zs6F8Ii38{6B?_S(1*$0^WHFPyOF+We>l6+$ta3)&=}b!721T^QCx}Eon>CwQy=_@5 z+b+5-HOTvGq%Q~G5a?uzlpzD!;J^kA@^8~)sSr?QL#gDh?mNpcHUH4fV%ppUGL-sN zHS^!@SG`F4z7~6~5E6$UG5NXO;3g2zAv-Gn!=vEl8hb-Xr?PnWgAeh@LyzOlmtI4% zqxb)w$N!p7_5=bLGr0}hZ@)Em-gOsjIAc8oqihu&k|}Kn`f>3&f5D@-JyL!b8sX#4 zI1ax%@krE0Y7q^^*|;UnP>>?orP|J=>&31D4{m(;ejIh=sle3r;Tbm`?pz6lm!5#r zw`z?m1M@FX%>4;mvr46I&`bNAd9S$78^0IA6f`nR!v@Ade%wQlO+Wl@B3rt7P!^6y(gf3?PD$AP}B`Q1YhX&E- z_lm0(V!wc4dOU0Bjps(@zKS%)C& zDZ8x-{#1U{Sh9#=V83~{L4u7u6^eE|KB2la3YQwo34jl z_u2{DZoLIs$Fu-`FD^R!FPL@vgXPQRcR%Xnqj170zeRnl9Hz((Ao83 z)_49t{)&q)x(2T48^PPs4A($EaxXjvWGW+VE5^!&_H8%EssDTwp-d2gun!-8`W~+Q z>5f344$-|1#}F0t;xVLW-GFF5i;K=X6G!}NU-&&)Jofl(9D4LQ$QbrI?Nm)Pgl8WA zCpO<4jl!Z9`B7rA5BB*f2Iqb*rZ(pHqOPq5LqkP$byfmKcf1}G zk2(uOQ@@8)k?YI=z?6q05rU&#n%&XA<+_rBjf}t@?%@ytg4@tp5}Gv z*;HK%9TBS~A!d3$^mYwl(HGrFr%LeoR63c@_qxcBVc=khsI({AD`231g`lOjF4>P8 z{+c*0xbGo6^u+A{X#n8zc;S=MYu=0puEhOD>Z3wbGbp4PBP6@N#sjS94`atz12zuC z(afo@GFXce(WZS@;0#M5CehP$JQ!rrqL)>Lr%FIljRsUXC)uAP@dPVbt#NpzY+L2H z1vOnZm*OAV)3XT3&Mt%+O$HCYzJf;qQLLP0>YdTPY` z2BafzuZhA<&#(xwR=0>$1q{^@t|d2GCtv0Wl%KEBWjTYXx|t?U7_qA=nU2)P$hD%F zsMPpvXBjya5iDK440qmo51x7WDfD-6j{0|a=-2*kkN-EXtxhQXUQAeLJa*dcd)Q;2 zy)f&($8rA+cU1)dz+t~T3@4v)G#X-!h)3&1C!eJ*J0ii7g@jPX758{>#2LTGo%cKi z_w*U?*VVzZdKrdZc^W8c&w4eA%U6xxeiIz>w_7l+ehU1F2=2e_T0DI6vG7G|5!mw} zWc+^kiaDeozYX;PKW@G8I_$X3hVUbcGtaveSKs=Onc<)YWQXlG#Z!;ni9no)MFegS z>;KxVQ>k_i7e|rHD*s5JE3*z70BAd)N7~d_|Ni&G4`aXm4o21;LdyY%;@0b~#-0gb zJ6Vl9xxpNsa>ChXOk@stBuEjN1UxMwn-xk?{UOe#dllar0uS3t^p!_~CVSTh7 za~^sVL>M8|}%fcSg!I=0)DRYmpMNCLd@!AJ1$ zV^9B20|2g>Cm2F)TbuakzEz#bCX?lWaHMmn{96NFXDe}V7fny z7Wm+oMp2_CNi{`L->lB2jTxsg6OF2)o;P#lrZgc_ zUg1LC?Zzsnh&NMxm_5*e&(k@izN<~Dvabo3gWty5F*TA94gjlK9{||rS%Yi*-A%7Q=H$uPC$WmHDw(Wg-r~@W zo_{WvMZg!38XSdyLLvSh0y>e9+X4;Yr|gldGkFuE z+TU5yo<f}l$_7$M zW^y8xn=?B1-|Y&9d*b?*HfdR1w&1G@s>6}9zd8^XTzDGlV|9qf>fjaxz@@`_gc+^F zZRCzpTGmy-!M{BkkG}98JkvLTzqS$HW%Duk5(f;e$%){ds+-t`A6<1Lw%&4AeDvmr zxc}Uv(EZT|@FyC9oqqzSp&8z#^N@b&5v&5HSz z90wPibt2Ba;3N^a@doPPa`Zvr0#F!0 zz7)b~XIz16uDe;@+wJwEb>^|iZgUW_oao6qvy0=3wF^gu_n0<$(Hd~D;7>1!{+_8U(}t}i9zUS5mW2XqXku-{4N;fm7_#YSVBks%pb*!Mb?*BAC6)}$pt zapI;uZx}?r#(6S?P;>sowaGS6l~k9PLTMf>o!@~~OOo(NCx%!uvu{*4L)iZEnT}{C z3Ct?Un}%!tx>{WP@T0hY)>Hq}0Kl4cp0~PjV=-phbQFqNbaixKaAhabef`p|Ir3qh zh6#6!Jy9r~Bi!2jUThky!IsfDCi}vu_jnOz)@=t7k(4ci#)cCHRXRjVwC1l`WifeG z`NUd{6Jr~-IA!!b%J#CtUoMX!HYE+Rzg9R!yptZl3(0PLp3Wk@Ho6kaHi4j&pL6Lf zN~4gOEBT0CINSY>j!nSWKo|wuF=>?s0`wtl9rU?WGow_%c#j8Tecabec{5c_HI%q=nbgB#%VzIagz|i4~F{MhF*MoE_ErdWCTVeatVH0*Uy|(-}8T?uT zD98)~I(K#+$f#nL5l9l`f?Luavsx1U%n&2#4q% z)t_6H`d8uuso(?KpL`9%9cdAF*lS&KApv2&k%aL2e4vg zCl;^l!e@(C;iJVXFn>iiItSCp8Tse`(OJO(zc~=+pMMq-p*W)9m_WF^*VH$VaEszr zf(I)*eK4QKK8GBNmp)qw&w3jo(9{a=-1jl?;?tuB05osR&Fi-#^0yS^5lH(x^Lqt~#-mK)>#J8nWvB7oOknu7y=c`Sw~wqt^eNH~aRXWfP^ zciaRnPXsPcy%}?5%1Y(BXk{Ol*UC5^4N#5rvN*MZ1U_(iMFzERFohrQwk=+NUI-%j z001BWNklr?pJH$vlar{Lxj4#ggUg5dyoR5i`*!IfA36=$4!ifq)@raHXx+3(xt_i2hxQmZvhK0 zJsLw_d?a6iu=MP|{{cI1w-NF^Lt?DG0d?xhY!1IU^^Z8~=zTH1DIr=P%)Xga5CjOs z{fM@N;FR>7(@(Q3?b9ggYG5E(X*-(=_+(!m{~FZDCe2xqu9`0>+xdUPy{m~32YFHVT{+0I-Vb% z8(9Mi4r#Dy)hR7`TehGT9T-v8q`hN{rD`r|uqh79iv6%mYr0lS91-nuqu1?VQ6Y^v zz1?^_)sGdqqM0eV<|_H4QPhlWNBx9$1Ya7}RoVBTE56FNhjYs@WC*DlOS&LaVvNYKQ&b0tKVJcC7(` zDOnA#0vHPWwXZ(xM1w(5Fk|$}Or4@UV!-8yQs5_o!GWX@1xdE+?C3XHXpGYX2l@vE&bPy1s=ENl4PbCW zE&~5#`GKFO1Vd%6ioBzBl~$NXY&qK%3-SPZK!v|KELnKnj>yBxq&o4$%J`Rhq)E2P ziq-_HBKl);E&T)=)tc_0a!-G`nx#|js);M|no(Gq>b$G6db9T=3#?QiG&-mv!jzFP z8+h33GSdY4T#r&?m9XfrJuZlk-~AX@TzoY?cvCa2Z~LIrpE0c+yYI9ScHLY5wiWvV9EY$QijeMQSBGK-zm**siSh|7}&UWAJ<8HHmwxP^zjm&;WjP$V|Mud}lKS^(4=Vt~_p^VOdUstju;>18 zg?uRddlpJ77h&dMKgVA#ItN~lgTGyQ4bH#xx?xUXlj)Q3!qaylTpNZr+ys}qhHCj` zW3}diuQDUmdIn{jN~f5~ZsXd!-T3s2udwG1n_@*r62VP>0q-yO$CZcw2#3YI@Nm+c zTMb-y-ypuf(~kJ$W1X(J(FRlS+_U$fHUYTvDY#r&AlQoMUicKd?Ril7d(=#yj?fAJ zK#vnb$jPce5aDxy|BbSq%ZnC&0rM|90;w-QllSLQeEN#BvD;3YA>T)x1zrci0uKg; zhH&xUZp5ia?290Z0?ksihYp&h!(dGS(MBd3!zb=Z_k(R~8&Ny6JThbS)K(I*m!N`s z)kyLRq7}lbMg3U(Wfxqou-XHjG45%%7cQToZA2RRD^eebBnSdfQ-jO@JsWr4KkGXN z0QRmMa^r|iSg|B5?|4zS&IC-Hu_3&%h&ZL7+Xp9;N3yRMJu6nBXVq#9ba$gbg!EfF zCf*xArJg-*ObdrGB@n}ua2PGV0BT(xL>w=CV@tWOkkIIGklH*ITA&U*p>t+h7_n+Qvr>x z0;U842nlJ{^1rIyaYW+w-#UJ~Kdlic+kmh0{^jDaZ0hn|mTh~}YmYctX6BKwI8v4X zm}&FBo}OOO{T~`iqOPt^oIMkya7Z(AmIz|87`i*V1(v7t@9pVDU0toT=8@<}k{?S1 zRClB9e}CVA1O&qTr|fVtrDVcTP$hreEG6i*kPl#CcAZYAL>&eMJYfR+dp*(?$kCw# z$w5I7RO!ki2t*+0la2%~G*Jh?nJ7p@VkZk)BtYTxifE76$?}qw!wX$N(-K&T?kv&I zj3^eTSjjA`X21Q35(S5SfPPoQ&@Phz%LqUj9;lQIZ0fIA6>M*+rLQ*&;Hnzb5N9*TJ`KX^3F2oi@iodu@kpw%Y`e)<(d~E(}%HQj(C-I3SCj zhzlcX1_JUN6EGF>DCP3T@vGW452M`EA_D{l1!RdE)PV6$+Cj26jW6de#hddM;-$HZ z@Y=#9SlXSea1KlW_swB}e&4o9V{z8$r(^$v4nmYhL11YSG&Xxp$#LA9%X_=A=g$wp zyrd7It+qmNLObxm+vvILPM}yZm}~oNyTW2H8{6Rg^Z>YWeaOsu6u!ZJoOSkTIO(_} zkj*CXyW>yCgOAJ}c3);5_#@nO{e>vHO7O)d!*PYAcCSa+nvRk8akxpTz~9xcG35id z7f;Q86$kCL1Cq45d;T8y{x;ZekIiuPx@~CYkQ6tGy?)$w+g&)~@L!g{YFkqSUY&g# zCa&9zVyX`wcN&O{#h?Fl7tTEIuM+-HC@D6k8PVV0g#Ks?!iB6b%*~Yg@`z}1F`AIZ z{0n}K?DB=>0N{zsPR5?QY>j+B2Zd26O#fW9crk8!;87g^>wS>V49WX3``5O{5~7U} z1mh$ysv4h=g{#)ATw;+WQDX0~hbfzBuwc0Xw$+jhc>*Y5bgvl1!q2+s*5P#rB;zL% zLB&xaCDIUyet~FJ>*Ufz99PYL5jWjUg6h|t!L@E^7d(ESIQK$&_}CizlHs9&3*q`2 zOxk#3)V8$9^@v2Z*CVJY9dm)@xXvDQcXXn^rxOEx{U~HPBjTG+mGGQQxqyY)8n*{c z{vcX?A+!XkHQ+(m?ICLdF|QXv>d6RX&LF}ggQEf*4(am9KsZ0`qSuSG43=^u<&@Ct zl<;|a5T9iS@o{PZ%hGA2M;Rn&uQps?i>ArbP&2Vz)=Egv+$FY)%ODiISCq9cbob%2 zm**he+cP2v(6UXd+k*pZ+c3o&lT2R-engzjmQeJDsb-^(qj==ZLPq)%iEWTll_KH7lJ;A)bf{X-6~CfV_t7dBVo`xW(zS zPER2qoiU*O{yao>959``Z3*OIBVjQX38TBG2Wd_fG}GvmfTz(Wy8@J)qj0DY5M_pT zg(It|0Hec-ct^F=rIrFJrbL4|H4OYL9l)w=W6cVNL-_i0Go#sL^EzihQ+*pG*qdAa zG^;aEB{^wg5LF?wHDfd0vdm48157>g0G@gB8C-F}-_X6h`)f|MPLAUjYO0IkSNraS znZNh}rmjC7-l)nw%hj@WE@s4Un8h@-TnFu9(2Y(3n2fZ5srX9?4ub$$bILli6jM}4 zn0?}m%nL*+2sZNH1(euT!o@8VMYLyVU;vAkEXF$@&BJ4_&BL6}S7LR_IO1<*OZ7=U zzX$vO;vgJ%{BfAE!3HwmMnqv`vL&ZpxO)UgVC?tdu0bSDtqbw|V}wj;4}3BJ7G zkI1LKrc=TOW>tFS!S;zL?t1{7C5w=I@<~L!MO<~|AFrl>wMv^9s(3V92Y!F~Ila*G(6w(ckKY)&}2C!&8&s0QVev;BeR@;<0IrWeP zV?hJ}Z!CrzUwRc+-+u2m1OQd-s8!*fE{+G$))sifVe~Cs4rJKiHvFNktKz|_iPAiI z8rr8%1x4GatjO=kbu~R1dcKPs@|KarEZNtG{@xx8^z@>b%}V>U1aPKXYR%8opV**) z7J}0qeVn&Ybb%-$N6!b@vEaB-7YSorJcb63AHI?sJ}Cthk@x%14;KaoQs~Jhu_WDt z<+%*{iv=^^VRRD~sZC()^yx^9X@wIA&;m)v7kNFE>rw~KjuGBR?iaSA76u0J=?gC* z+sjWLzII%vZKGlA8yX9Li9$ebl-QMGkO?C~A`?NSDo%xElz zGOSLen6JvEVVvIT>?hYg#B6bCjn8W4zgsCPM1{!GugCg4zSHoUPFk9M$^N20*;6a zG2>^@B5g997H9^jRAjF#6lGKZO=eKZvnsX2Dk5t6?uqyVH+ngvn>Ir8ldu! zZTUf+xke27-q!W6nVA?{t~T8cfH4;&#)&nwvQi=t_DAvjbFbp!v;TnZWi(XPAxzcw z(TO^!i3D)qZrkIiBY%P!o2?6Xh@=+>jyEXvPZu*q7s;LMiw?kXE78*qQPJ_jynb>* z2$K^DAR@qfPB~W=B=d05X|NQ)cDh1VTd@iboIE4byg<~YoIzDeIc;StWyn^Mp)}cn zEEayT6p#PseLVQur}$_^2Shr*3cVQJOjLf_);53{5>QlwV~C?;-lkFMXtNc1UsXDim#>y0l$M}2Mr1k*=S1?ey}@S z|9Kth*Iq?i!jHS}yBV8pIvsDn`vLYl=t%TY{m-Obp^z6ZJ$yAb-FiA4cNCuZdT_|r zDF>aZJ}39Hyh%r7|HB-Irc7dZj1Gk3_TqvIFUCdZp9@#(6gY=m3N%Kj=_@R}=GIp1 z-R8l6E;tjPKmF7QH^fl4gdgs;8TR|}uITORz_m9&f%yx2%)Zw096KSJ&$<=e4ddZw z0>KiHhX|e$y?o4=T^=-LI`H8+hoRKhBMVA~z%y5!gI#vm0=ZsFkUMbv0eHhPeEj?i zSh;FBw*B75(n`!gMn>z31uCJoy%rw7Y00x=s>{JpHF9F_ojt3=rN$*@FzA7lK9n?;qh`!~^F}P?sN`vg;8ND=M-#d_qWBjz~Xc|9O>?*$# zgK#_4OG0F$iy`vbLbfKAhlyZTMy==4X=Ekg%ORJ_Ae+k~pW!?Rc6#Lnz2$@7o9iGp znLRfw`3C$55JBW;aKM|-Ct1G)anh|pd*gvad!EfB-QS6>rAvirzc#lQuB}Dew5dpp z9RrscBQM`WI9tYUn{o^j3Yp$$l0AsSd=lfD6koK+3U6fKlpjeQZC87;bw)^&8R=o7L09(GvoT*Q+&ss|W zV4qP&nE&zeLylx=9=Aa2B;Rq&FNskk&5;*ChtG_UtbWS<*4NjG8X(_<**78nkSQAw z;-_}|;NXyC@~pLTun=bf&`A#t3`)(Pj-6!(b`8*R^LK0oWZ=LWKYz~bUjhM>@$=u3 z!O6@4jum#xe{}4u@l%I@&tZfb^aat@+>Ea7E+o@KQjcNQ&%H)M2^9~8^=}YBTkTi$ zPH(CuPYrjvW^0(!Sb_ExX%H@fxe4-b^Y^l}u*p=_(Nyk|O&Tj%zqwT-QHPP+puBSo zf*|j&n?4u}W9|oYapnnUWAOqjE;I5Cw3C4V>i1@h!#StT#P&PvfIy8Gg?t9o=~p?7 z2;i@U+Z&Y(L@V6f*fRCPFwz-Bso0CHL9i|+hEuLdkpe{-$TFuxpy41=kki3GBzdNj zaI!2DcoiX#VM1nhx~Q;mmV|!sQl^CNRjcvxocD3ZGjHS7FP5P%qa&lg&4ZxNPP^=k z(|&(CcHI7ZWi1S%OskSf?0(>{@CCw%?!Gf(*#72kLAr;jYJ!@%UI%4J_xbu#G!oBWxzyW8Y z6f>%uT;EI&``@}WK^+f|{JL@*|woQdEPo#juQ#2#vWI)!E zeck}-dcML3=Nu{bo)7jqyl~wG_}(@fA>XgbMbYJfKURb1?|l%7m=BYujzdly1J(G< zENu@^H>L(|1^|LcRXKu@UUYh_W;1cx9E%SO=k|P8S_#=;bidK(lxVRX#@0s{-bLaB#=ensnUjbzf}LEV@(v`?OjctagLU1y}p2OJeBGg6im`OIkk?C&hlsunz5xPz?%AP$YEZcXt#0(}0D1F?IqK3}Le9 zmjF~zGwT(?cAOO{}Rk>bEvKAA|86k%5RW^@iF~u>{ z2$UF%x{+MI5}&>JvI>e-KR6cHnep#6*cyqFf5=1^j3y z1Y*@)%!kVf8R4nd2mp+q_+}ec$?L24uem+tm#_JDrVa4sHy(FL0HJeaMo#BUSSX!g zwjDq=n*k93LbuA8cZpOivv5lOGH9R^CUh?>PdRWUnO$ycT(-}hucz}S;gEzpe?Z3o zvqZoMjF~zg+67P)(i*h`@_Cj3G&{Ea`gZ0#qdb-pq`lCvdKt7RX-He39LhIITarTD zEIU{=JmJaNtX;AxTg$ggW?_5XRJK=N2_X8unt2;xm2ww={!Z$FWj%76eQ8ZfBS-rl z`uzloHW`{;ftC}>oPnwxAwTE;1W(s@vd{H^n@1E-&GDn5Rn#Huy7fb`IoN{IPA zIPSomal)|&p=s(w6xb0V4x11^8U0bXf=vLK=prVn@F9(iZd^I-`aPxh+6cVJlT?gF z{*>lf21@G0%>t-XlX1X={_}9CKc60gi}m?jN>PG5NrZeGWsj3%;DCICqA1M4<43wD zix1xY47dLK6+HUhd~_!1IKEw}wYH%SM;tyAzx~Z`Fk#|Yfv5*^1?+vuZ}C=t8j(G} zkMOt##GB$M%y|n7&$}Ehhl-M;KY}$;g!~Q$I)>mLI}YJvjzQ^*FOafT@5E@Ru z27PTCz+cS4;Z3A~4tEyq0X5ZwVMLdFgb)AlJGu9KuvYxaO&4K{jn_pXNpUh4*#F~= z)Zoz@Zo_7qPC+#0SNT=OhfE4s%JDj=A6o;LO9gBsaI7N#Rr`E3ox`u+B5;;0SPh$M zGD+OSx<2lA_WF6@xWia5cP08(r+m7`#E2&m3?xfj1b$GxNW~Syx{tB~-o0 zHFuG{Dt}`mLbY}1|8l-K*flX8z#EC8Vbje~H)UOdz5sonFF>kO1A%fvGU5*U zf+4g_8iz6CCm+!W$!*Z;F&$;K=E^0euOg*KFylg&!&oTQ_fh=pw3@#xW6aoz> zlf@97bxb^GwG=QkIDn2NOEIvz1DT*qY;=uOj`$Sgb@QMD^F?Md!qe?G_a()lto;>NXEJc)ZUw8uDUas@~llUuwsJ6<5(; zyO2xfaqcM>;loe3HwSgWK~5~vBpYL)7=WsT(lW`!=6 zhB7I*(>au~?6=Y;YgrF=8Azt4>(6-+@4WXVZh!1m%zEt;baGmuT;tl@b!BBX-gqON ze(K5CXWzY%bGvZJ(Wm3dj}{~Tvme4Yu^FzYA7dN`9~^TU+{+e@Fi({`79}z@z7B=9 zamdct7~vV~qjdYd7<}^`$yldMo`M&jxeH!*4kw>>A?|sI+5(m9IN--S;m+&N6yjTC z%!VkTNm~HR64XXU+k9lTV;gq(s{;T5YRmvExX?K`h~2i@0v~@qAO0U7jofzo>R3On ztuG)%N6$^?qO{<>uK}vpZmD=pfq)lHXI+JXNn665$)H%uONWsZFG$vPJE&(%=7%ri zlgm#n*O#L{ALiU~88)0Y0fiwF5le79e&n(ap1u14?6B)*$maNdjyQiA1Xd~`kno|V zB_hLZgcN96cSn_f9kv~+ND{*W16^6&X;m*M2k#p2Qa-p;3gGkimtkn20FNgmXGH4d zq5;4z6BoSxAl_KE3a9+-??@A2F=uA9RkWF6Wb$-)f)Vt8`mP{idOekgT%pT_=;Z0B z+hkJ&qcP-%Qpk6&M9+e+Fht2_qA%uA(el$D1Y$9?PFe>o?d^i(^8OUeR73e3GY^I% zjfF64I-GQvlnI8+U^c0T%bF{~s@f{%nw5s`<_RHB?toDu6g-sXki1IXAC?yK8MLE_X%x*P zdTnMbm~cq%V-*U#_LKB=zp>t=63z~L_8Nvke{0f3xc)l(UWddC8X9KO%;u9uTbqM> zNFW>N>6dN*w$=3a_lfdfPj{bW@O1wDy#pX`fG~b=D*<} zHIxy1ZqgI3gA#O#gr%t+jPnzIv5J zS<+MLfdm$H(He9QbPJQ3BtS7fJczQdwO3O!ZmYCsf+3T2%Z4}bm7X(`GZ>Gn4%;3I zb!zUCnR#xI1sM^lh@k`u1~^lnB-xfd(&lEG&(_mYxr61u*-c~qB)e5NlV)#zcLaa` z%k{YRAGcItO;sktIREy#1g<^z*Vy}KJEQ0?$>AvY>f!d)!X04S83)e<;kfc}IT>}L zptI$!w<1VNeXQ-cP&mI)5ZxE-m~AZ8wUI$n8|8Al~oJNc;naQVeY$t2BC z>-unc$Eb9%tQ`#+cW~8Xc+aSD5miBtDIKW6(f^jLUWGlj-U?r=ScSmBXCb@6P7s)d zWfleigWd34a|%jbOTJS8AkM|>M#IS$ATwikIGLRE@e2%W;z)jrmS_~Y7w*SrH(pl$ zLUo}4-n{2;SZ91IibGi~EqQ~|y7umibFj_!8zYs?$nYv=i3Q0niwM^S5O1WdA|k08 z20C<0Gr!?Ek~tIQAgQvRc1K!6S$4bSm{lE|McgC+_WF?>aACnGE0D=iV}kq`W`(-{ zc~bm=AU^Kw!7+ci5(AVlHYDZI_M_MpQKa;tWz#JM;pq6_eYlH?JgpG~&{o#eqIvpe z2#p`B+6c68{k=%7Sb?s^i!jvJD{89tVRszuXr!(d^(`%^ZEArh6oQvRUb5dR0p>z3iWcvD1l2Xo^4|gDlmUSkf zb?OuZ5;1|;Ro{uhh^&sp95UYBw!9d7BLk3F@}($&EoD#u&H<9EyT@O+`2YYQ07*na zRIy^=LbOgAk3@5`P7zdPDnX4r`2rTa_6E8Zsx-BI(CY0NO5n$_b}3~PNcJ>-!Rp9R z=fY0gEEUpN*YC$fz9)SUDFVi)h-|RoTa5bUzww8oCp3CbZI)=>WVkD-ga8}|>~nL- z4#{yGxWkN`U%zCjUCAcW7K$-yg_WDC7`wQ{2*uP^uodY$7v4)%uAt{B9Oi&n ztLJOz{H6ci=%-8OYXSioS!{@V9J?!LNu1SiK7fQ2#$bqnM87v(QPkPBgZ*LDCc=kP9|L7a|Xi0}9bdP$xbnmEm z()ah?13x_S7dZF4n^2gsKEgYF54n&ZCGEnmSfmMs( zx%PAvhrX_JoT>xg{RPI}cum zF^zE)l9X31!4nAK<5%AhiQ6gD#$hO}q)i%NABTJubEs*HArK?EP2pyXY^vx_*^QUi zdU%3VWhI9#UnMYGBP2J0muzhjBMM?*bp{K*=z>!siP$>Z~)6^)0XU~~X%9vl; z1Np+beo`P2Sl9ID8%7P0K@k@+&-+*Yi?YNx9_us9wQ$yS>;h1&EdvUrx*A`-Hu)q{|{*=*|Yv~FlmtHSpD znQl_u&y{?m>Lbi+4I$CM-Fgj`i?BpPeiw2!I#=}I#3N6`ypQLZiBGm}STSPpfEQPs z`g6>j`D2uvG#qaTrC>Asv1Yh}H3EXk$wTfd<&3c6td4PHNm9a^o6_InM_*8!nO37ZhGDC1> z$Sq*aQ)O7;&cov>z>N%?k{4fn*@3@4@E`p1KkuS%XoN^i)s5DvjmrA28QY9l`*?JW znF8W`OyB+JBm8jN@1e`>h4;6Yq0qXHkTPY>IV;rOa94){d4C0kXAH(E{+`Ff#&Baf zis>wW6T8;(XO-O@Mm96qfZG*7cUK+@KktC66oA+5Gw7poV^)`dW3TudmMmXsXiIAg z0Mr`jX>m~Uhf%l1#)#I{VddNJBfm;pCfD|AcOZiB*s*AuunvMPO+xyl!;Ub5D{Q!myvNdepZznh3jN8XC_gFf(Q!@ z_79-1w+HXc{*bZPTVB(%c}hKF7E7`*Xk0*3*7S8QFYy zZA&zpNt7lf;BB{2M2~^*bfK%aAS}QAV$^<`=LYU z;b6Cb@C;ZIFtsEi1$6F$Ja9@?P+JM}g`yBZs8AS-h6JThP5{Ym1!3q)ZP3(!NH}Ck zgTaXas7?u4q6#7oMCY${MB9=laK0#i8B&|&N|@myveafx1wImy4MQX}lh(>Xf(;wW z_u7i*sGC$|!9>j#TC}9Yvd`C>Yl%t~VxUgHW_jb9OK-sK*WaykE6ow9l)qJ+;>1IK zgiFpk6fO+G@di-xH^5sn5$qx1D`5(y?*@dic4|S@iQ^&!F%z` znI~Z=B@9PdUplutuQ(C=?z26L{u=ljH$$nUvc=^ofVKs6r1c+>778b0SQKj{aNTaa z`{BFTZS!r>AFYAwn9JeRw+o*_3!x#nKX?VW^J0`36nw`=h=kmI4n}B#gf_)_T%??yg1=+e-{`939*%GTci4O6C}Z~i>=e)2g` zsMPmIy-jXEBCXA+oiGuxwq|&PVa_s^{YJ!;Hm_7veHXIdrB#=!Pl?2^`&)&N5-7 z@R3(%u@D8YM93-)b>SI^y;c1dv0BD<>%BFZw!o&^j+ajjVg4&~knLvcq|w2NN71t0 zbTmww2$w;m=_firn~#OBzdr0M&=T%d+l+0(4Kjf+NAz0cp9xl3#Z3?LE;qV!L-6E= zuxm{nS`3G(8aXW=+5egk@O6RXC@WrN8_I3x*S!3Y<2Usj_HSx?9wm88Qh%faj3*K@ z4Um~V>+lQ!=zv+T7uLFAlS@8WEQ-Tt2|)VMT}tMoQDX@}04gKGql0`LAqz%hI$((9 z0zO|g|CK<(fPixXxY>B$Wd?!F5zq{R?-z@NB*Q4kOaiSL$HO7`0v@EYd1MPk`5oU& z_D(?wf^;Dg9U4-gz)EE7dO77u>y;V^XbViGW5DVHR1PT*ghcC_sG3@>VOyFO80*1B z;HK=NtKYZk{mSTo`I+(LmF@%6bL@f0HGlBV$2j5FC!?>kTFj!_h&kA5y$QJI>NC;a z7Dh3b2I8&oHLL?yfX<(#A9dIs;@lMFp=_&hpz`fxlhU!1+{f!O69)Czm08V(a#I!Y zGt*bYthF)&(N-B{g=s69tT-=e+|Zq?Q`;;7(LjJSOI}J0mw;3j#hxypzXzUTTI;X8 z<|2_TmX8YXxtN6pv1rKvuDa(b-1_{x7|5)R;m)tivO)sc@(FeB!vVc=4rYFn(e??!V`D%slKAWO9}O z=|Ujr#jNYkz#e;Sjhs6UZ~aE9yI?XwO=~Mc!=oA3%3ZI{h05(UUtx+8YTW+|YtZQv;%jI(UiRdVI<$A(U;prx-Ykb_Y9gc;@m2HWt8XJ-0w7*goi(h#Y zg9|DFs+?n=ABQ$hK&|7G7DUd=5K_5tl8PcI$A2fCBDypESl1W8cHuB6QN#eHx{X(^ z)?9xjXVT|UUo{@}9Zsgg$oju)|MRdD2?mt|z>Jua0Yln{9}FvLZ=kmyi9|v~ zdTMIo=Q!uEfyBCRs3I|aKpti0?CjGG;P+m(i zAbY^6_bh@!TGOT;0JCRj zTXPggD#?G+Awcyh^CCjNc7PG#FDV%rCacuCOl`?#_4I&RZm=bRN>)&PuvG)VEV*fC zv{1hIyfOhP*e%HRAiB>S`>>eoPX9ucuLsh3e#82n{Jr8L>PBnehPNo zW||QDypcLMu~xX_W8k;~MpckYqSMt>v{OYq)HhT4Q8$ka{@@@cw)UCwgtj)?wj+MN zayDrAD(!2=(Us4qjnGYvR{m*8N2UZIEsm-kV1tNCEk;yfnBLP$OOFqS|5%rIv2aYy|cSN#1-xuk#IaR&}R;$+$9)@X&p z0X%W@IoN*385k;r5NX>AE{*}V4yQ~b)q7zd)+~(t78YT$hePhA+kTr0R5~6-D z67%NZgGSe2y7OEbfa!Iwe0A% zmaWY@K}6IagxlxSsjH?$#K4NdHp{PazSy2y%8NQ@^~Kt@KFCktxZv#eKjM}+y(w9O<-+SYTHh+XsxW>$mP2va^- zJ@*T&c%QQ^D!e4Sc5JO|o;nQ;8%{$h96+|a9}AwIjiMQK&5vhl^wat#Y#nJqDOb>z zR0dHdN%9nxQ^pB`9k~>W=>hztz7cK0HB*grf#q%RS#_E#xSib@dhgeE#{cU(VY<~^ z*S`F)Q!;&&Wb$(p^!4=%v!08{tY3wCI4~$BZ6YG$Nm3t61#Xu}0S^oeqOP_^9CuGw zHxe~9l5GzT4xqNC7Ja<~0^Reu$s}hAxMjj1y9N9{zqAfg<&O@P$N-h6EZL3_m6u9U zawxSvLxU;fTTN{vTgbvoxnP0dRIGwd${!4gGL`7YXgVoV;5jOoge11&__M_GirJOm?jL}=cR7&i69|@N zp;9&vubTuz8U|id3?I#1fpczn6wkc-skZQpJWXX4FQDWPz`w!9i0r;AQo#rcUN8Ki zfV2P+F)Dci0T|u;UHSML#(b z4dUOoorSHo-2kaV1ktuF;r4KTh6xTeT`)V({>|% zhsq;fUym_Y+>B+Rv2bV7k{Nj=1kkpr_ILov`)|Pfhi)rRkFz`ou@ z0M!Nm%95JTux8pwXbS>H^P@sX3{gBJ8|Q!UE#i@wi3L$;%1t-OHI@8tgnKV?GK*ns8<6VOTy2NTE= z1Hyt%6Lfn-VNVr9b$}a7W&V&`q_A~Dpq_EDNeV<-&Ywz)3#rbGZ#KFpDiN*cMSo9@ z4bPDW*{(i({+~I>50QUUK^`ls;YP5r0b@2;4>jY*p=bW*STyI&VQV`%;KRX9QxKzK zBljfrDva6X_mY6h<3@j}fR%$C*en>t*0DPHWm2W-B#~QMaqSiX7-@;BPWX2V1ghV_ zA^wFqgFLNpLJB0UgvsW9jpIh2UX_0rB(6CE&!t9OWhRd=FLnV@u)y+Y=qPXt}F1 z^@%hkVg5jb!z6UnX`Y!WRmfOsu*zw&$w&rNmz;Mvu2|d0L?=K7dB~t^GsB+Bf;yU5 zZmr<~R22OzFsSCn8duc-sG1L82!)!U+Ll5)Yup3})pEh*_}!oeX7lP`6V(#X4gehr zLy2Q|(T%fD`~#kO^4UtjQQpOxovb^x6%YRPWK16)WeA00xCQP6r(`t?+^Y_kHD-^p z=Y`DA06?|-9VaIvMZq>ZDtNIKDaY}yIm@8bLnd7zL|S}QJrSsxj9&FTyeI|+N;H+j zUQS>YILbb8x^J(B&x?<*I;8!DbL;R=Ivl_)^OQeO+>(ju4(+5SR zOoFN+T)rSy3<7_>;}P8Q$mdn-vZbO1uY%I}FtM!x zjSD`;yZ^WzNv3}0!5LP6`qP;>;rLS|7WH_2c;NmA@$-X^L`l4?9{pG}h-Yp;1DkC# z1F2jDvG&aYceLEnshuDO{Mr zKmq^`M{-M7i0h3P;koAzGW2A$$AzaJgOkrc3z0}I26~e?^`w(<)1CJWduliyMe}9X zqNi@Es5}ySWtWU!q+eZ_6e(fBpH4&9`)`zAM<;pLACAM%_xl0znL+s6J}m!y0hTRZ zj)~)&1SM3GCpkWx8&ZTP zK0ql2i@Diys?k)i4xeQ~lF0cX93mwzrSB0x zrMhG*KKTr*=Ta!m!a>!4Rg2>B1W-G966(e_VaZz`A~QhVfIdPl;AahEv1zCVg&fr? znKx;2=eq5`H> zF`pn)1c=yq2nJ7ln7DbyH{NOQM7bL9L@XNviq#yj!f z%m!JGuNV>%mVMYZ$fZ#k+78#$xJb5YdxT@9MYuU7Ajlm*iU{GicRhjcU4J*Wq^9(N z&&bZqY&MUuU;!e>z6Yg_9_0OD_&qKxZB3$e+iQ60imQ;FNHu-@?0-8AKlsCS+gHSP1w1`Wra#&;wAY`4H(|ftric8Oj5oKqt*I8zJeoQqmL? zIOnfF*n&76(;au-fz#f93Tkur2F|$zwJ`Un34VM*xL$e=-s>(<#=2=QKH{5xb}>#l z^D|NmtyWz4*?;~RU;5g2o8B2}O(OZjt1zDE7x`5_Gk=Y9Ib17#^p}S4(s`dkew+%G z`icYsc<9=TaL_@^P%Y+BEf(?8BTowLcpyYTotD}e1hW-Ldme3#AlVsqT3HN)sksu|g>8YTjJ4(#+PwH70w>pHiVIZt>~CL2l38!W$lyLnK&;u^5c_u?V-m|&P?O~ zw~xKyUIoHo`40NVM#HdgIF65W_QPAOq3mK1P-kYP|6)ammor>0V0~r~?~EjIL^vXI zRARlHMQ7HFEk2oZa8}!F&VH8xK>aB-HxTT-cfob}-*2r2{XQwhN8=plt06U+GAw+Y z3<#r8EWzjT30og2e6>t%q+Z=FdC$Y` z1og4HkV{jFSc3qz2I%yKmbk$7f|kq~Ez(G{yB7@ykSkPB5+n#8F&9-4K7R$J0xv8i zkz|05o|}@B3t65wvt0hJf*`?vE&?$~a6>FW*l3C?pv}(es}?v*PF4B5l<%eA%=Kd; zYexBIK`IBJ-kZj9y|Fb8ov~&a%1))Ja6Ot8yJ+V7?b;BjQ-fxeNdYq*K?BDA*=0Yx z62HEhGt`Y9tN!p0J76(xx#VngwtFSB^RV>aJ`eEm87YstWZdR_#86g9$2$dAH4T(= zsH8?w%%xGyGwUahKc^`xyF6D>)}uzc51vp2h_t~SjFZ05bbL znK=LU%dg?hhaZ4K*@H;$3b?%8PTpx9ix^PjzybJOz^&K)5GQ`@ER<{H>!{&7Up^lfU-65k zQwcAeht}_0g)v_Xd{wOg_$n^=sAw0B!@ufYto`AIs8ATnyymyJUT#3!Zn#R*~ zZ8Y^^tx=xg@wEL_55%lH)?rtN>z{$RJBrkB73+ucxaQsmas4fKHJEt)(Uyhx)F0c& zFSGadXdXX2y`4xbSOkvMWp`{tddqfTl7Ya??(&{~*UsMO4hP|n#t`r7L`!!M+B-Yp z_6Ni;`E1FFW27zTs9;e70tUXGfa)di_a!soW1o+p<<%(bGcmLOmRv~gF)4o; z6G~Md64N%3#+t_;lj(-;<$I%L{ya%E`CS4C2Qa>21J*zKtO8(n^RoM`ZjXSd z2l&*SCFrXKQRaG-$%)2n-X+zBf~SU+6I;;%H_q(nL6X;ESPX5zqcSx0Fx0`XOgpcg z2C6$b|GFhIKUrMbFIN6RcmN2JLZ z;swb2&yhch2r<&5b0?7sd)zb(#0{{-sUv|_0@$nb{vwE>Qowu<$%R4$`4kE$mPo0o zDt0+Aq8QHT7+rc6`A*@0$?y4M><1}fGRv*z~sY)Mge6tB07>U;V zPRc7_^K~8lb9&mQ2gE~JtbvVUjRi6|2r&)XK56(qRx<_%M{w4^o{J5yZfZ6rrhM*k zSMk%YeFSHpayTlb99*$(xZC!Ei!=Y;fR@6A0$G}DvN2sKpjwy&iW!tsqbN*Hz+GfO zMl41{kYR!mZfe_Kll~;BPvw28(=O3Dg96fcQ9B|Sf-4e-JK72aB5?5^VNFv}4h8_4 zNm(bY64l~NO{VV<4ViMkigWTd@j3DP%s88nfT$pmJC{Lq$5zx*V{nm@mQ@H&IWX%L z@D?EuMDNG&{F<#e?~)tv*t!8zfoZV(b`M$i-r0&p?>QWyi4knL_gQ3e!W7_SqaS+T zyK%*pKS4`p0+lM63PXar_WJzz+r1Cs#1lU3=wR>?!XY32a{X8FmbV>()KmeS#>ej7gWfe*s9UAzW{7}Wmd}L4L&KnA!&k4q4Y&M-ya)9_-RyFckNmZzv?KLx_pYD0wlF|F4akR{tgJ^3Ufp`KQj?K~T z(y7TD_G7~AdgCQ(AXg>s^ig9U}UN%nD0_CjAqG~Zn za%U*%6B^e$V9Ks>6tGQ^4%b6ETEQ^;-G zK6}w`7Ux^PPnv?9(AJB$#=C{Ig%VoZTaY5XW3`5OJc^022}C0iVgAcZWsq!1 zBFlwPEXnT~1@a&MEM!E6`A--D$@|VxKlT91MT7$$^!IjS^LQGmsk}HOg1k9z90+(& zpyoifAR9d7ccZgCfvKq+Mp9`ZvS2_#)j76l$V1Mx%2`uO=SWRzUOz`1iBBLlKv1Rh zO`KF9OO#FNt>YHMs#!eS=N31hL4qmqoA_LB>$E7$P!a3HGuu~6{dMyoB}_5iTC!xo z#K7t=ci)R|U+{gIv2LnY8<5@7c^&xERbRopF0vqcQHyoM+r1REKnpxF_NPXfC|{QJ z-PIf_`7ujp;f`HODAL#sDRi>RrdZ~oijzkNOzY9$U) z*0(J&hG%}_RQ&LV7b4ygmI@j{wZsX~8F;*5-2eASapHSFh+M7?6ePFC?N^_NW8eEu zWHNb#+vdR?U1GQZOu^d9+{(SL2bFpS1}9-o#kQ~HZ$dq)RKmZ%^>%#Z)DHmj_J#X1 z7o(DhD?1~ffr#1o;koMyc>a1L>St%yDC=)E?6;^N%lBT4hn{>2TZWinHs=~0XPyHb z^&zD51yy^bOfLNJc>U-oXR-AgpG10lGu809#~+EG{^%@tTwY`br|`sQAl?PQczNSwl{IDLLpXO60psImnt ztxqOxWU0+&@%rOWBfp)*NiKxC+tIUPU&OjPRosb{f{}5oeehvavkcI8e@CXjXIUtW zGy0anMFBE{7qj&Q(bu9)bi1&zn#H|?8*xIS6(37>!NdC|WJn^0C3G?f2jE>gP6`BB;Y=Zb-3a4dZZP=em>Fu1UV7dN9V@OpS^koNVtV_ ze|TsZ$wVCfU;x8|!y*~X2f$+_;ygxPKzV$C!WbwHJ1C5PGV;eoB%_0501OJ&fG=VD zV>DGFs~ZsXxzN*<#Nb2**=$iVVgib)r2eaAcRKPWEKMYlisZ50WdSevZn2rpz@#B zqc104GpzyphzwxKDeu}*Ki$@KKqAT7eE7k?{|9dU@7o(ztPL*o{pX+dFZli!Plk*0 z@7_4v9Sh-Zn~$11q z&h90@H)$v{AH(3KWE~z+@M7gb2%6|6IfopM!<}e@JJt%9pJFaro)cy|>8V=20gHt* zlCm0MNj9j@qaNMr)P}YzswMthLN%WPvXhKnQO!-ER^;<$X_9l>WD-C!p)%%?t%)6X~^u>@5$r%1FaqAto^t^vTKE1$u! z$DAZpEBoSX{B75qj}zW^jG*dWz8C^s2cueJ)y!s>jYhlCS~P5RhY`)HP}<;)0htef z_|rZ3zxV;q&X8m~om#)H-&%Zir$zK4+te%>-&>M5l#>6xP zBF@bvJ|`STuxk#&t*rQb-RMhZtoJ=}jLlBRaG*#ntd1mgMcM;+9Bivt@?KL7tWns~768Z|x$@yVYPAv(`J4hM%LpDXh`l;e{IcAiOe8UxniAUHXe5l`kx`ilp!4Nu zAb;=W%@%22x3KTYNg5?6I(wG)iRx!XAU%~)i~&pP%!J%Z62uAtXHHc=pe)B@;K51( zu?I5RS1w8ao}AUy61e~r809A@5X1)XdtLC#n4AJVXkd7L>`BW=ka65rmq4ZxjB~T9 zf;?9`SpL3Daw6d&(B8l9WG!P_JcRV-*~*aek11C>cmiv98p%YggKU%=u0J{_7&cL` zg7TE6SSy)sVMhD>znzDdo_TqesMUpzLu236%-HU-}n*>U#O;FX$ ztdv19HH6Z{AYA18M&q)?syUOdl0zoQl$TMv%)fFSQq<+^~#404_b~s7J*NH1!H-S=rhPa^0olF zf*wrWco~NNd~4G&6Mhfw{lyP(@DclAa>D@DJ^LIw+ghdlB=AkSPPgn$j)(o}A5S9O zmcY>;I12H23q0}`+(nV?XBtjuO=a0NVgVbd5+I_nB-9UN~cnAFWYx46Hx2l>$fi-&Fa z>Nnvs>?#~Iq#hblJt=nOp>vEroife&h>fdxVMZ2lKa42 z!;aTCBQrP#e=LAtGJ$wkm$X5s3dlrfWc6xnd*XSc9-%>a+dd00XT|=g5cFCqVr<=d zY<*&t!)LN*fdJdkHJsGek3(XuLaEHaqas{goI29>B;(DJ*NfXz8?j|_9G~mx!TaK^ zG6pG=9LAs2cXxw7Xsk-ic2W%wV<)j{^8<8R0ROmMrT{4P!`_TijaH=;Pc8tC^wBw! zu!l~W=>KRWD%8Pr{_X8;f&*x2NeYlKlb%9bdpipG9P;_1R02r;LnlwrAG7>mARyT{ zrG)8xLqbHv=-neXfn^*TbXEnd7?8MBDeDIij0P|@m64!AfqE6R#A8UdMzLx0HbDi8 zCV|I;NF;<@K8qrWL->1^uRQt>#}m`JoRP9;R^*aEqtUT(k+Sl^03aChNq?zWG7`l` zc36{6lbKl{kCf`ow3eJZb7nIQVAur(G-rCxmSIp4#8#n@WJVmtvuyN{W1owp)nE_rjZK zg(uk|qiF&btg&>i8NGNy3B-g@+x)EG<}7}}C7AlYWc6i{M6DJ`$dN`Rmr<}`sUq2} zi({!26O!??3TgzNRv7}2^4EuFUfhPy{KtRerEOzP?SQ&DuK)C&cOHUYU4I4I+k7aJ z2GkoywW4dAJx};TaylMg7%#oF4oAK7IE;*PWJ_OM9#?$lWBANx{uP;Y4&I;}f!;$< zsRZHk_>DZay;?eI?O&^1uo<}C3FS&LAT>>&^ohB>c>I~CaNLpaKq}e-@8>T=xor;M zDNDeipJBb;g|_qv(!aP6_{HJ|sIkRCZTL#Z17*k|9TerLt2)jS~gA2^2@^;3nWH7B~wc_uCZqq)hE$ zU@CkPmKI#AO4jZ+8dxXnai#+r>aZrO)qp~fhj5eR>CrVflWP}J2bZ+ zg^?Yo5^nX0WwYnd^+zR4$Cn)*Gt_2;rHhlVLUDY0e{cT zoi7F9>5?VmAHSF1PiM#sU5ZWA3@3LqfrAvjC>5oGK!Yx$AubQ~LKILaRRLzCnwheM z%!BZAbWZmBB>t^Z{*r!4ou7=58%95|c%o8g`c#r#Y1!Yw7#W)&I6KFXXcl6@ZGwb9L%g9LPKm=P?Le%8Z)YT#7gs51w810*-(CJ21qI z`rJ!V>|Ly^XqJOPssLR3ZbXOHBXj+CQGT6<(FOs{*{F5?p7tbKK6?SOOO8OM=7ygj zx@s9-(sa9ANJip_JpMPVz2XO`Rh!WTA3ypqTyx1cuw&h`nAo&kripyPpiBmhjuvq5 z zE(4b7y*?MxTemjc?>+QHJA0ecvYS09sJi{|M56GwcO%;0hnDURgcB`dFL_T0aN`JE zkt5zk%5xWVdP8C}24{W*!u%(KV3_ppJ^__;KQr;8O*!JRTOs?Iy2WS9mk!xu4d-3W1z0p>5 zqbLGqIw3+}BhTGmB@WAtRh0?cv1215Zs40eeRy}EMO1NxqeDz$eehA%Y;nX;|x@_*b`J*S%FzNOBdy`(E3vGFP+H}?=Hx=E4(@nI7TC^EnV>d8F5wg&$<9s}5RER9#mwsXH)fWmFUT7%h*f44Mki7c{fZfoUX)j7 z<0ZRi&ze!3S?ugHma96YpsHr3&nhE&hFpX{ThfZ={M3xLXbDn|Z%Wv9IoW7X@;ph# zEH*$Q5`=?MTy^=?xaKD}7_@xjGVvf9a^d$Eora?i-3OJj7Zq;^-qucd+UB4}ftErR z?qUv=%p@wsjFd_hx2_IDuE7L4Vw`62>w9F5CE6yubQJhMH zx)_ojaRgCV$*QOotFm^=-Y{Ccz3E_I`qVM_(f7V2RkuJafNZ*qK*tKy+;KxF zWpNg^`ovteeotjGT|jYU%F`0G_PNPz&Oq0T%>!F;)FFps{YVP_b1p`4@qvJcfs$t1 z)|2tx5+oek@*2i}b2(~jS4p^G!wKg$xczW1UykG{AH~#y15qq;beF1}oX8;&uJU{I zM?)C9uYTi8 zc-zrOh*T}HKZ-X|xoVBfbUKHDd(%T=mBjLD@HyAYsOBb7*fD_8*mn3?5ipOCRLRtr zmu2dRbD7hX4Qa@~ET@2z_&4?f_-qtABXto^6$M`iA3py&{NiH^@W*@wkCS|S2cn3UeMtYQC}*QP|jziTAWR#kV{Pn@UWCkqms#qS|C5gx%#~hdo#jqZ4$GK z(X}!HDHz}y4y5= z_O+sBLaU{&t5@D#tK>`CzxI+M*Dpqcj+h&$P$d6AJ#v=#lPz)S=`)+> z>^}eDs33X%>G+BG=O`h^{Wtkl(}KOWBC5 zf2>YKNlVNU5`BY?eTl-K*Q@V0kBr~RGQTiJF}EozXpz^)K!*Z4m{ z{43qD01yR>;8pgv8iSBBJPdL~agKdW(^E9O1=4FZ)Q*kdAnP9V{+n#WFp)LA&zTf3 z#@%2EL>-Z1RJ?|&3*S8NLj3Iy|EN1s=O;JjcO`JoPd|pG^E*(kcy+9+r4tAzq$ekU zA1Q^WWekozcAk;d@KfR<+Y7gGXj5vlYa3$&hFJrLofs>@QRp>Gykp9g(kEo^n-!l> z6rM;zK!YA3F;bJMunR*`Y(FWPKXuTH%34?|h$ecK&s;Vr0|#}K0y(7dC@>IG4Fb1U zgV%Dt46n~a_oFCylNnKf^AyfK^Mm-&C0_yc$E7mmcMBO(6+zV8 zehEwmcZ}e$!;i_Bh=VlBIM3gpRFSkaTDE7Lz9zu&yjD(s#jl!|IF{AxbNYVf0JNH7Q0LXXvvHJ z3c6~DOl(8u{+p10@*%jV6t_^O&FJe#@?9sQwCrG%6J27+JjBmf(J8m>(({c z@a-?4T9|6GP#v)#9{JUOpu3|LBipy&`RCT)k1LGm`S5>s@@86)p;zca zsH+qG`z#hwA`<5)t-CrVNz7zd8;+aw6!ZW9AOJ~3K~xe`9Tl-U8UEIUNM+*rxrzjW z8=rm-sqKwps2ad=AGa@Eh?WKO;fsV3jrx%q7{tr>J*?o=h8HUt9ZMIZbJ-%9URe9^ zN|e*h^wu-)q+${nC?A{b!7=T9qGTvVDEbSfRfW5(1VJxVr*PNcdO-)Zc!5iL_rjY! ze!&P-2)Y&w9jhI?2>{-}Bel=39yIDtt}7nAtwx7JN9y(ZFfleJnRz4>!bEBk?QQMY zJ}@9on!Wmo@d>oGwMllwGCaY2)By+vsUTQFz~{$QIxS#7I`E;vA+)u&$|4!#XIqj; zrP4y@8wo45umTUYOjfCuM0&U=Vk9RWdR6D z$=u~aA{NC&YC?NQqH*sM1u{1NLlHl6S$_oq>S#e+;RQiAWWJtWV7@_L&bx!5~gWK&l*eM8+5h5-d#> z@RiSf6;D3=+_Yt&UvTun^YN>1eF$wa=3RbaH8}HkgW~L`tvMZvLtVC1mftG^MRM|Wrmo^ONJt1sm#0tu}^@Y8_SxXDfojPWV2~>E;tCD#8SB3^DrKL?m5a);QjAE4qyG|7qNW#V$@jbmllti?lDzA^_^Oiusvj_!sKXl z==JOBQt&8oCyb{g7-U6kau9`qtw3Q4-ijHoB=xm{Q3yI;R)>bl^~oOd{Ea4++B2;< zSUnF<$cGm;4CB3@y#j;7la9l04kC4|mS-2cP0Rn%gF~16D7)EoBOh} z_r3vt@xS&~UnGj&eU_rFf36a5kvXqqTd%}l)KJM5uh0^!eIMA}=i@W2(qi4aMIF}!XAUU}q6-4{(S z7ZQDQ(6@XkCbw+Ew$*E97W<~X-&X!sL=*VXoF%A{H%Io3YF5aJ6QS<$;OX)N{yelr z@C&?_PqudB)9qdG)90}QFL9~bB=K+m7oNuSl*=ZZQUTy(J2xjYSLNa74XlZCXCL0@ zr#!C(<^?e_G$OrzX7VKLVV{nGKmr9NQ|DMuRZ;&D_W4<+snz6XO8?4n>CC+z8Np)f zAsP)!`+%7}D+Ll(88DS>E6ex902%W?U0Z)XQ4rY2>a$~szB6FL%c zjHgn_P_KYUe)f?7xi0qPyo|hPEDc$YI&KB*Nee;7%d2RQMdUitS$>XNl#-^bE~vv6 z!6C_9NoZ72T>;y%w!l97AZ;RTj8Pp%uSFJ14Q!$W^C_cu%~F%shk;E4IQPu+uzpQF zbF8}}7e0B+e)!J|PJzs)*8+k*_e5h-$`(~Gj>AZeU3->d674MWtl4|B=bm1Ds>iDJ zrDkHxJzBn+jcGjZEO;@xwnd_;tll4)Y0%QP&<^{gzs{bzunx*K@_Gp5Q)kcKARRx| z%$!LE+4#nh)LaVwDprsg9F;Lux2J|ur6jDA!9ZB>aV2WgBY+pzPvWf0evVZG?A_O= z034@duH>9EPr$`L{2qdS?WemqU+?Bb3a`bFEkh$X>g^}u^$i3j)n9wfYsIgw{tmj@ zqsV4ch=xN5xAdcP(IF^Tb=^sq!`v)KA=2D0VOTVbku0k!9#U2EEZ(H*@!<1keHPdL z{5trL{}f6`pN3k1C2NDvklSO*;Btv3(~&7b@@W*{c2!Yj6|O`~4%>cexXRQ7@#%BW z{lFmJhdJ9{!-k8`M?OQQ#OBCwSzkX~o-{UY8Ub^zy%*2Lm%n@#PCfM`1VY?%Eb}v4 zG_5**mtnEg;8ZhuXCd2Kj7+=8A60X~wA%z&Xg?_BXIAt|Kp}_9$PSc727m&=q9mhX zLSdz-4Q|Lzvq%d<1+X<(wJTr0Q-23|B4OO~$V>S6*RDlsius>qw6v8rn;#oizm67k z9rRWVJ^pu8CUi`6x-;N$L+zc2wzpw;^Cr|N;l7h#VUITA|I(X$u{h=|UWB&!bKwhx zL_dQqRe4-i2g4*g-eM7}9(Wj~oU&;K+mdMMpNIC|UST$rpj;G5tAf=bUP}h|ie>D0 z}tOFU~5u<=#v9O1~lD# z_nR%EhLzoc0M4GfFG9tt1i>Pl#{DM~#x)c@Zrq#NhNq`Soo9JqID$+17ojbuev%1} zR$m(`0Mm1_T~=6UKE&w(fU_>Ic<8noX$nQmhb(?xkC5~TTGQ=DE|)W?er|TX2yt{A zyQ6qdxq_sW{JB6`O3xz}jj1vkmFj|F;r}NtKtzSoX|yDh;(+<^7z9v_k023dp+SF8 z`X>U!kw{xN7r&1zd<6DMZ{DCpdEWdv77mHJR6d&%Rzqc;Q<4b2Y&N1mKFZ;^MDCcu z4%z-B<~CRWi7*9mOeHE#4^$BJ@u4WMGu7UtIzgTRA@}e~#e(e&Vitt-!AMfpb-S!{ zH~(gkA)*oW>oY#k(7S5oL&-_>8f+#J^ab(ii?89FPka#*Bhx1UsE7!B=ZwQ~{u#%Z z5k8-6I+s7F3VuYni(rx&qp`(Ov%VW0aN|<4d2W-F-i5(1lrwtnP8c)Y?CXCwGOan+ zYd)xJK~d=%5NO$(*_(k%YLqQvMmnNtN4FejFW+J?0!Btucc4Jzw0j@`BR9$u83~Z6 zen^xu=kL8VW2!4CR6*%(9LzL5i z*T4}>p-J}R@uy$FN$))aV-2D~_I@3H@DlWPwIb;EBOVE3-om|b_&eW)IlT+e+1-U; zh%(%YZ{hTWfE`WGRFe*sV;1E+vIt6`q9ji8^7`aG+&{~z zshg;P$AyodaRR>fmCs`SqWP#0v?oET;u17r*L^1IS-!^hmo({^zQ3FAnyo8EydIp_vmBl9BF}Xz7#LLu z%%O_s(xdpt_@Ml3U%df8uIO8cMM9}fu)599O@KJld(ZyJW+|rHH_kYOodUS((YtCS z&0(LF&W5G$cp@f_l#ZAP{;iw0BSDfMzYh}=DRg#rh*PKYjz&Tl93b&eQhGQ1oXqhT zwG^KxOJFRDTsAMqYe~kDnoP?`AX@+GgGX#_F{@`mvI>D*O(NFB4V`3X*d07B}<8(^bU z{a9{dv`L-;LuOMmU`Cdwg&&{z2DT#<=O8hh+v6Am-e4_*P>$b7fV;oGHcr|HaMw8;Ni zmY0)ZIoqVZ+8WngJy5mZq6f3piF$E=7pv9zewF^T_(%fhBv+C?D-F7m0U6k$Q#;UG z-4Lc2k0tj|O4_DWZnI}GKBkw>ETmdPh3i{7MFbaef0Ts7AKoyEsl13qfzKL%G^dJ!T)Z6~-8#NdtryuM)#uKnfhxcSxxb$#yUMdKI@1Z6IM z!NPtVu>W$rL`{{!~Jy!i`cKWSE})V0bQDelJvPo}yen_owx?s?!ooObMcP+hPB zm5+TBNVb{rC*C9zfZO{iTgWSAd8yla6#F8)0zPIrCFRBt(3(1D?&pxZjO4^Nr0%&9 zV}JjrSwXX3@7|00@b#~J4kw*@EMhTYFbMLavzMlfO1M$%Nd~7`QPJn7>(2aM{Isqw zT`#t%ZlAr*`O9iBxEcoI5{nxLrQlyqSmsd~-;UDo4wTa*%hIU`DUyifxNQSixocB} zV4z|qc(}HxJX-Vw@a3P}f?wSFPklZDQZ;QgdpnG`u@pMKrE}4F;DH!g`5X!xn{gtI z#+_FiWncZxZx%MxEo)v!VcQl|ts~sU+S;9e+1=mkhjvMZ9!w??@99Q+PY)ukaRI=R z2uG&4B`Z!G zv19A0hQj8}*#7K`I$GCYeRPGGqj9<=5=Gb2MOd(SK6Y%{j&-aAH0(oL$FXhZU2ji4 zGP`_d_AJ9b{wQ)}xFyS=tAY)M99~F|;?>L~>Xcu4#4eu)SM)5vQcp;LfnxK_h^68P zrbltwB5JNcG+gTL42iiXQlGl=pLd9y7BkprEFvm?1b6tD;a6CO_rvQ^sbD@hqIrcO zFdkDzKT^_$s1C@8tBQc1ngDtEodW%`^0Ss&O;*4@hma$vm<^8;r0lTYkKv&~#A0#D z^qFn3I>4#{oj8pP`}`CMnV3kctfL|P5srRtGLaBA^Y&Wws~Z9%s-v{`5}WA;HlRd1 zzUR>0)37nyH+?-NmeFM;Om19sB-mvc+E4&nk``Iua)-?lw9S79w55NP45q#+wWMiz z0PHgrOH_Yb$QQ=Fe|QkzJpWr#Ica{m5D)us!>naYyVcC|Qo31tSqrU(*peHI3%~ zW>l~p@YJ*;*G+bV^PjaxHJfp_+o`^I8>?E*&yFE<&eEw5aAPo441tDQoY_Nz% zN=dyQmNl=D)_4GCoPH8M_xX=v;o|uylkdUo6_PYnCF3VJk9$`uKgtJh`uT!MG28{b zKMrMV`QZr$5K1P5SuoMl zjYu*MPk;bHkKk>@4ho1=cet8G?mgwMC}px3+qwgxj#l{E;;PfY#I2OW+6Nv%abm(C zBI-8BrZ8TA5Rr~Hbj|HUtg{oWc}&qpWTr6q)bl7z*10w8RV%pJtPhD3-H7+JVcCiU zuzl?YY<+F5bAxPFzbALhzL)OkgqHa@rl%jng$!Pv8o}zRQEblTkYNjQ`oY}5p`kFo z-8vV&UY`U21VzgIa@w!!~FP_4V!ljKS*r|V}A-A+>me}JI6JR(%Ca0+w?VX&=Glb@d^B^t9y9GL$all9*iaT!pBfkId z7YkvH^*lDq?o5Vo(>G7Sk#Aar0$HZq0h!GeMm^J8k|kp0oEw(B{m~?SJ6-!tC40Gs zrcdsC&p4i5BZtQ9V%BBb@JrqJ2Kg)Z#u#0D2W+@3tsILN&`;?IoNg)&3hk`8iKUG; zyr7I3fKdc0m4>TQQU}WC$?A#MkIY07UJo%Vc^T=u;r@T%qTk(zyr7aBU-cM$@4e^X zkOTL_6Hl$ew(Vn>N;l8H?}?GwllO2B?zLng-gVU5aqPQ~!9fQfgx1z}WzdsKmlKhK zi0+Ze6yEi=qww-8uK_220iJ`75L^Wfk*aNKKx)|pSw&Nh2HsUpXOMiSf`F%nP_>M3 zdIYugucErF`F#e{f0R2BS}w=Tz*zVIo$>)3a}6QouIgLnqjYF1gz zBWO~RM@JjN>_uC;YjmW-?$?ky8L3&he^P^%AL=Z3X1ypThW$`FY+`wgq_Ef^moP_G zGovUDZ$vpY0k_2<2>4V9nZ&usf9g1?Rs7_i2gVEd*RT8>kG{A;oSaOzOj|Bqe?Y31 zf}1c^jnImH(00Iqa7~P1>%)(sJT^Y-Qn2|3$&$j0=Am!@eG%zsM9?I#Fn} zBe{kZmFvd=Cz>RHftCkC2DIks=Oyu*zhXaBcmj50)#JkWx((R;)N?YfDNoE)M;e2E zvBR7UYi&WIs|SJJE+7yW@E|smgwD`W>o+w7d~P6G1-{(VkK-e81Zq?gBohad5K|?o7qc4d zr43}84`-9a24lR7>o5VpRgd0TWA;o3$c-mJJAy03L6WzEbikuX#9|1B0vH_~K}TyF zCMVNUe&Z}I(fE|~W!ZatbQB$(9fA+w@#wtS|0BSUi;W!MqBt{DjdJ+?*O-_wa~K8KNU6(r#XCN7|>C5F-Q6sGdz zF<>cD#C#%es#_7aQVfT*HK^% zW%j<#7=HJi58|Mub5Y_sdr8hg4SJ0fZMjsJ2k^H@$ODg3LYMM$HFn3<_qr zQ%3<~S~e%fp>a&nRvc^vt$x)i<=jx`FqHu7AH>~JzH>4IuLwbrw}Sodf@C(?G<{?O zC5{zVN*GUParR|5;_quWi?CF4DZrRzN=kbdq#~+7xAA2m8@3$X<;g~uY`Z;oJxZL=+Gd_b` zZ~6^z;BoLCcN!3BLDj37lBIwaVJG4VcsU#ei-Z^-AX!Z#KDrIH)sG=dr@wwJiX)@q zM5be_r=Rzmm(Rzi|Ls(qe9G}?@9jjX%u!$(d+IW{RYb}H_N01e47Yt=)^VHaj{Mb# zWi?EIr6#j%BFFge!(?Zy>U?M7p4rZ&PdPL$9-BY3+WC66|M$vTN@Hb9*SUl z9PX(!wmtATiX$VBX1O*bs-ZTjRTTzThKR=6 zEjc$?2ddP{Yoc!u(i&Dz=o3y6#E^lS2b|{qbi}fd_lN z;}8TBNj)FgE{sIn@Rf>q^`DO;J2)NKz5>SVeslbzKr?%BRGQcUtZEbc(79?kZX!f`d68E?hEcFaM9i6osr12BjB zyg`Jj>BBoW0_sQh#5mguz{7Xcf350B9AQyLLx9(or49j! zN(8mcu@vGLyiz?V6wAmJ@`8&Xu7Q?QjFOSUC0>B%M&1OOAkcwy)kJWzT~n&I2<=AT zkP+8Wq8~L>G6q-==TuKyt()AtP#}U|T=pwm^^>cb)}*xo3wv7dyKkR{Cr<3=+-JHOkLwY6tEWvB0|yKWnxG{O#TFT`L$oU2kt+qnL1 zm#hxrG4{oom)HN6KCn5Cy%&<;mTIU@Wppjd&QcPpT$&c0a-v=#Iw9`k?nht1mw$UF zQrVd;8GE6LPyi<%cQ}^r+mAVYb1|>4AMKqTh$dn(8KdK?ia()~W{azk%_5)6U~qUG zTec2j?lMw+`82$jf@J} z$!s9C>E|}5s|9C$@`L!-51)kog}p+OUXp#O9#r6pBI~Tr-v-bM^s^(e=BSoL%Nc`o zRzbtpHcSRMi(Q?P`d|}i7(lK;nCj;(hk)gLkiDrOLAC{oBU_LgAA-9|p(85>#-*rs zL2ZSS>O8q-J5K-d&#`64m;s*J=T?7y{%91@)&#~jD>0PR8r?o5584-zB@5sWg%Hf- zu<6mKklwtd`PMcB0VeE}^%`vJMCa1I&^>=1{PCF5Uvqs?sn6&$Naiov^r~oToj%F7 zEK0wkXi6u@61&9Y(cpAg^<{u4J#6kNQj=tuMcX@%5X;NA&Hx9PP`N zh?tL%9ZBU?wMRG-SQ?$cYmYpFa)Ve_W6RVrlZ{M_q?`_i5NT}{Dq}K#&X(xRzGrrp ze(Y|C)z_naXPv3j8cqA0l@@xr$B&D97NMV@bp}`BvDlJQN?@n=?-=f;A7-AA`I8oPX6`ucS0bW+K90uz%H0^H;C9p`L+Yl|$X zd_HG{gHj0k1A--BmSb@R$wUG}LqozGNQcS!dLCzdd|c4~ERzWnpb+{L1bNL(L<2y_ zPXo!egk(uF;;D|7PM8~&SOPkKmXsKjNC~Z`f@b2f zBn%Q{2$6}BnXomXt$tL5G-_hPtET~+QiH|CK&@PUp zx-glc8e>2yF25U7^mAKMDgmgkUr=}FYq zuSR8LSjL-nm+6anak;Q?UI$J*`6zty6Q^PCWt{0Rhy=B*swvc884~ThxBjg`M9U!l ztgw+qlRHtXV+5O#+ZvY}hE6jvP|ub$H?@-l%Bi3TY^&8_m$L?oMwm;+waTbu$B`c! zL@6~6FEeb>nO6#W1+ls%GyUshFXQ81zaAr#d`89wSifN|z#9yrWzhnRZ`vYGPpXJ? zhKU5)4>=Hl?oRmqZum(}k|N^N{RphmXQk6w0CF zXUj`exa)#uddsTtuzhNnR%NP0q`D2diWnJbhmtK-TQmy7*2xq_M3Y;pgrRloG5Xr; zD5a*vhX)b~EIjgXxWf^XA=g_bg3F+7M-_vs*I@heAN9({^`lII95iK6agZE5KBjNmK-UJMkCq|AZj%l!{o#ST3TCW`ar0Iy*dTJ z5Wd<4tTAm z6B72AJ1_swa@grn%I@}qxd_eJngfj8eYCzrO=Q|&-EF|q7+=`A>sgOG17tP*U4|Xb zew7$K{T3SDaw-FNmMmvA)Z8i)S}FUHDN;a-ggnHxjKk~l;kHK}!?$n#3(|$zG*)TE64%G@#H3JI$a3~qD#)hN*!apTxbOad;-0@g zgvXy-jkG-coxi%e+VPK9On&xh}b6X9LFk5s!nl!o5;GTa+p zMRogn6tgKuk8x*5=Cf%D1n{QCy?E22E-dNqMN1@#{ocGU`VU5c^*GD?*xDM0Hw$dR z{zB{yTTY22e2KX5@rR=^_`^XEgTQso%BUoTrjjYv(E#|EJh3%f&J(CjdR^sn5mrrC z4P{FJS3s!0yu?;kzJ#g4L5yx#i_8vfTUcyE`@YN3ww(GCikE1r;7LD)fmRi@auJ)J zeF3Sp8)h?1^#sN|ORXr|WM%gbb5HHjp1rRj!!YZzF)h@JMGYs06ZmpxKcY1q6(l}Q zN12VSY@`-(9%N%6v5T{D#t3(H#s-7MF+dr6;;gRdL(^$9Tdax7Up$#Gva0GQGubQ> ziG-FYtNct*76TV3IRrT0?+YqTHj4Y%q9A{+E{vMD3;L_`H?M^BH`_1pbZ!SC)OXjqoP_Du) zz#Rqvx)kdpB(uz!+J$E_EvG*-t*^A(ZRKQQHUQxLG{C$)+er=Is0&}e)8_KcXD~hL zZTyP5rb|QLcc;yCI~}3D&q}(0>SRuOu~jkwF(;`8qyj*KEY9*5vKjbl73`Rtz&Tg` z7Ef&$G${I+cj;?i{unO$;Wq@pC?)SEz?yM5)(F_Zz^q*dG46;n@hT}gLZTf8%^dS| zAv-*dRV$yw&A0vmxBcxQ42-9DdB^*DyYb00PQip7wR0|N98G2A%`m2EhR!sS znV<&dq)@i|rT+M}TB9ZCW{!MKqM8k*&F1N}ohM@ZvyNwT7QVB_q{?9U1FR64b7EZbnu(qzwsI!)=wfy4`3!aDVtB zF$_Ka1ghjmu}(7_M$4P_gTJK%ByVYHYn8-oeC>K{d14ic<3rQ>%nb+M?S|!-ko&|V z415ekq-Dw$E$x)CL+rB?3nhX|={#a>o#FK3>)_j@LeeoGg3kMd99`N~Mr&X)zc8$h5)u=$K#!n8{OAh~Ld)*vSDNm*3BR zzhD5w!5Xm=AwD6XFDk)_I9;C)g)CbaUc_TTc>EEwx#O}WdiLJ0s= zrck3|(^{~Pjsd_gulx;ucF7e@3)ilhefm3aH12+3Ae344a#pkoF1_=9T=}O*WgdC9*Kx-lgx}wM9iq{s znL#&wZu4gAtd!$G$I@i_z_=w5J~D7B#i7t?sG||Qhp8$t7_a1tSoh*;+^JUyjIiZ!P6*H1;AD>gu*h8;r?}}l8Xq=vJaK1Y8-@b4GTK8JW zXc*;O0aH5$F|n0nhvUM!X<{Si8a#fVR2SlN=Avug<#72`?wr1fz*{lMe5REOQe%UX zjQWys)JPULHi7jkpFwV5;D6|_|93v#?6WNeobFxwq@w^n-O-EFVr>XkD}uYQPG3v^ zS_5?E|7V=)ZUTTNJ6!({oBB0QZ%HNSnw59gNaM+9k3Dn-0MhR_8;zxGj`gvBPiN0w zKN0_I53rOk{dLOj`g~HZk}`d@f>4-k0s{I}B#6NUWqz~StU691BJ-k{S88BxP?qos z^s_G^=n;eGl8S<4`bM{a?;+Av6wlml#7H)j&ni%l`4z!=bohQ{K+ESV(#y1cBt8+M zvV}XK$N+%CL2M6j0zw%Ix#0+OWTsBFLh`u_MX*X>Ny~zN6VQs#oiaP*D&|O`iV!K! zfY1^-)D6}*ISA)^MbiY3rXbc307&*{2nvF}F#h|Rn{n9>E}gMF_@I~ewc(}NOlH8;(78UW0;r*;_>%nShDPz*8sL~JH!P(k)s z_Oxv9B?ietAZ&@woQl0Kx?A<$aLlqioPxJpL&eQ06lUB(Ol1g&i^1m)qg2icXFnl4 z55KwvU%d8K4316Dy7i0}@70BS?)o*BEZbXwKcv!Cx2WVi*40~RwTb4PyVXc)B!E=5 zowQ6WtuGTe3Y17nxPHMfYG*xJU#mruZ;?pIi)ttYZ&x=w zUA^!`NG8VUKxJRGMW&g)jA1rB^ZMg9!ap`3Fwai*mP7bt5Ejz=H;jOF7>=FyRp%yl zPbW(B5T*)XCr1n~gosJ$jD!P>lZt^<3d?Yn@+b`LfUBH_hu7*4;j$a=$G86LE|eJD ziGpQgRZA(#dHT`C^U(k9qcOH_9R}`y)R5umN~T-!w05BTzh(@QZ6OzHtjG`8wsytik$D{GeSAM%279Ow#A;_rae`qE+Va+lBFUV(S1#H*7{F zP5Ry$X@piW1VpNu8gmXh7|8|m%-T2CW>i&q<9$_N%|9PTetZI5do4i6-pdefO`@1d zW8EW7=3fjpKX9~_WLOvoX?A*o~Z3r%1R`Z%Of13%3oHr z062R}^gl5Gg;EI|Y2x!H9{~}~G<2*w$ZWrvZ>L{or9k+Ug_W;nXoYL=2WYyK#fx*^ zYyr64T86Kd)yOFNp4p0u3Q-hEd-@#vQe1@1PjHsbBp||Qevz0sdlt#a531H7GGKSBKHPI zjHN&97GWgbTkd~W->UIe_L%s9cH zxo--xGMh{Qhsp&YjC_^@z=;s`sVNP$jeyKvHv3WnI9p{fYfCEvC8-3s%0=Xdx4=_o zzbK6F|Kbn0%*&u08}^+B0DOh}x-sY2V^MZjvE@(qp|ovanyYgo(A9ythaLn^ zB!r=jTT#hX(LQ%B;=OGmZZtJAfr*W4k$Qa-D&r}oJ#N^od+JUk2&3Ro(1!roU|SQ2 zw6!4|BM7{P%*Z%)ta}xe8IHoQ^G{=F&06sX|9^Q|`{V?wP9mWdj&~&!I6FBH3#8RS zQXmZgxMr=qw!N0wpU<8?wP|-mDl81f>)a0U&He17>o`}bTdDlFuevKg~&@(QS| zuV%>v3{s7dRfBXUE&pZ!Ffo}zG8PkY5#j+t;gIY*1`*uRLO|pp>tIHVSB;rPg@8sN z941n`Dw(kK8-+TUKrRLV>Q$KWGpJ$Nlz0Z^Ay5E}Q(hJhbt9Q;0s#Ub830H{z|hGm zrKq8m6*|y5C96h<>coW4!1Vh0C88l$1(npmS^rNOLxrSduDg zo&a)r(v~I!&F>8m&{jk#GYMDFi<=&M65qJy4wN{7(PX;Sfn56I3-E>WKBuZ>0x&bP z(sE{I&B>+NyIV)7n@41vdjn|5FypCSg7IzgW>adG5;!w!BG=v6yk-NgzwTE2>XtjP zdBhT*5SdWJ()JKO@|FeIyKfO9u_)$r$I&~l6|rOlZYm5G^8(D3{8*|ON;NFl8>x(Q zlvV>Mt(9y4kzD)I5~xxS!76}iwT$N}E~>2q!G#M^i;%p?XYUB8l+J2=Mdry2cvdPP z=85V6dmYTn`@!dEq=9!wZzYW5S`lJmi zBoZQVP%eX7W(NwT8qT};Cj9cw$Apke)?DNBbppT|LhWssfBey?Btn>6vmS%@K5SV1 z8@2@RcV{Ph4>}0Z-Y$%9-ipbMn-GZv(Xy})zIYNYQ#ql(CV%Dvba*2aB_GrfW{vSI2 z>7Jo605F;wt}7q8Q>cK2)S*^0JpM(=!pZkfk44ANY5>*#=%Cv>+NJ+bht7Tu-Jbpl*k(}E_F+8AC8T>8#n>%B{~^k!+z3J zNBy=>o^d>``NfZoUWN76%%ZhyuD}xWLB|o9GqvX~$!i@zqyb|?UE8@22E}fS zlak=El}714FV?Jj3D^GeCj9QMzhcJ-+Zg+ArQ}P@galT8-N$SlDxnvZHg#>we&B36nMeIz)S^LnJLkdI z(GRyjs6@TB3cS?W@K^+d(FTy~(*0py7HB(tU&s?S=zj(H>7vmxzdi@$VK?ACwJ;Kt z(Js^hf%>z2#%gQuGge)a&xJ`1?+B^5bHk$kch2{&$1Q(;N?Hzz<(RpD_&aYXf(6IE z1Ma>~)CzeF-}e~CHq6uk;4}Bd6G*JsAMMK)qf$&`+v?ZgB2{}NfpAv`{Ot*G>SX#W zlEErdKrK6k{P>he*7}&)hXdefhbPEDjFlS-!_hWS!nr7VE(B?_{7#{)TAp~S-Dlo% zeY~F>H6(?46$tnCVDTY`p~~u!#D&`K5t?ra(CYjQBodm~ycL@sf2<)``hVoj-_X~n zg^_~Q|Hs^WfLT^mX~XY1=jKp3byru@2^u6fO#>24h@glJ;^#0TI_N0oFk={X#Pq2n z!k86i)EP(23PucoAaQ^JC5NVgZaP+VrJKXK=X>6@_CDv_s%k*zng98(4|aFe4d?E& z_X_WN*Skc&05~j}!I{I$FytmN;5is`639`A&hUDCW0dr}Fa2RrlD5=NdFB@wVVnz3 zgahYuzyBd;o}$oO5T{WMheQmB1hiW^R7Yk9WT*+?NFYHpfd@Cmc?8m9 zOZ7B5?l#&jb`o$-bOXc19M+5ts+g;dR3eRg?tBPud+l3MB094m04QcT3*hBA`Owv}hHkcizOg|Q z+{Dh3v6|f*FpNnTy>D0-H@|t>ZMlz)tuNolFBkrJAHi?9TuZ%=xiI-7Ccd#OxhZDXJ_7C|w}~0g!RtT$b=>yA)+Gag zlaD_bKl#bmkn8DFwL8K5)e;cCHv;^sNgrZ1x-g1>o7Om!)T$@3H&e%tT5+w&!Py`` zT6*reL?R=B;Z4840hfI4o4D%tx1wQCC9-jzf5;#{c>b95k%H zB!YQqPnpFnEa zY6Pi1SqqNg^2o!GuWOyYk_Q$1H+R@b2-{MdCDVpzo?RT7E35t2Au=cN4;{QFE?QcV zu6F&IM*mR&KtUi0ENrQOhz`phEa~ujcHsK+n4B!(+_!%YSKoZEvhC^c4168m;$jQD zB*u>4h~mNP1Sx56-ij^1yHWHe7CwaGlu9GBW)+qlbtDo)1E}uahQ`(%Xw8;{CsiC7 zM6#!dYPEsZd=*};48K;v{Ps!Is|->j#nLc{QhcZDr2e^iDCL8bxfJ|bOXdQQ2)XKn z6Hp+5+kZ&ExCb1R&*KT9yJ@;b%kw>4o$NT^InYPHQ0S}N3fVcIJ;ID?p zKuk=^#8yDg=sH-jBwp&i0sw2Tp6R+mfuvL-}N zT>!S3Ozzo>zP{eDRlZ)Ml&?Da0d@e)mn70B51=$xlAp~^&kCe3BtFB4r>M|Ce;>-_ zia^(FF>KUo2C8$U{g1N(2=Q~#xGB=9lms4yTuzV%zCW+2TBr7alJ*!&CLABdYzn2i z$QmeI&jT2kSq7pTugzCvn6^7@BxQh!)?7urg7Ce_1#A6O>iV2VKqDqn#>z2Zrq8T} zzq4(K3~1!YbgXC*MU0e26WQk|SddNz)5mRR-fLiB%LK+vN~=@6908=<6n1Q#!kb?G zMr=2VVX@naOv1&N-u^P2b?kv6!bG8s!4;z>YS$qg)&WNnG_g}-gPcVNDcVLRd3ySz zw$#pBG62|DoVxFihz{OTuhm+x8t^Enk-v4-g01St0_?-K=P~FjRneKL2x!)HTJW7T z%9SKs3vGiFGiF;-FEC zA{S~H(Nl*>09VN2HG1a5?st1DMow=n~I z!T`WT>Y7OEeV6?$j@$-GCQy*lKy2UGHMS*yuw_tr0=!XS;Y@Cqoxx$s3Z?`g#J;dB z;0qJpMGez4WxV|FK82g^d_-F8(uE@Gv#ROe)h1@nM&ZEK7(f0PICP>dA5UC=3+6=h zr~6?~GWTY3U_t$i!}U6t-!+9+c}|d8E((ycfM8)>rRIfgfY*Fqq=4zjc-YD2rxRwDPR2fU{P`crfeZqsr>C)OY)snv$nKYR zKht|lU3^OZvL?^qfd~Q11gzzgFh@8^9H!}yXANHjbIde8zLrElUK}R?_Okt>6QEXC z!5?-DaNIBnMKm(ZT)2rv*P3mNE(h}@b>ylnHa9l|1npy`0ZNi#>UyX!@)gNxnd>Da zNP*aCz}NvGkiN-O_!?K zLo1iTp~h!87-g|%4)>FVAQnNC(KhOeHlH%nHD5?ouqi9{iTul(wI zeDd<2i{^iX=j(f~9q-3)e|ia4A9fV{CU=8o=PLQE#68O?k(bA6*ZO^9-`Yf%-64=! zCllBj)`M6b_SahnCFestqmorUmziJ&k3F~rANl0x@!c!00%W+@v=I8oNy$;S<(DN#B zf?TK1wH@e3cI^frKMbeEv!!dM>%vyt7-|xNA#$43#CEZ=qI(<$hZ2A(8PMLVaL<^v zmv&I2N6*B9aK8j&ww7;$GlPBz3TT3PjuhU$XAWna_YwT@!6)RqkE~sX=_j|MQ*UbA z7xDo*2H8RY03ZNKL_t*a3X;PEn0Wq)NcI&ZBW~@Q!DGLc;&*$*T(N!3s4HpApLsk(1)Yf9@4A2d-H~=%J*<9o^4$%JG z0D#y0Rki_&*11A2bi#f8y#nnMewXpOtTiPPlKJW(A*-08If20Y)C8!Ot1=LWIsv

K{M)1 zL_|muVG@GCaOsF0p zBqxai9HHWthY!B%L%8;;-!BRPxTP+5$tL{Mt4@K_s>+zm;g!qbQQU>ciGtmBvsim5 z7qu`Fjcv|Fo;DT?JX4=;57}RM#1@tf7Ka1zH;$ZOwBY)CCD%osUj424S!fT;;2Z9b zy;bQ1aNL$;RyCiR6XdTX(Pl84@?`R4+VjwE)R4-g@QXh_hUP?an>01zT^uLq;(#iUVVQjNdJpFCf2SC9g6&wLwLpp z*X@sq4M(Cy`>v)|)=oG{;W4#HoGBrhG0l8-OS&|C1f?=|-hDqBlatRtnrh$IujWc^ zT|Dsmi_MZ%N)R-ygR_bQc-Qb4veH4Syb{|MsMA=De0GfL6CP_@o{DXUza#)y_?m?# zT2F}s=LH3Lk3!h|Z(<*H_|dm8 zu2qYVq1HKn&-%YxWgb#TC)GizM)k`g5{ayEQM_Cdt!hosB@)>v`Fs1eop|*{mtn57 zSet*OH-}$+=Ofr~@@Zf@aKg2%XnJ2l#<|Jhc4y{~&2R*x1DG|LDm zBF_;89G=wSL{U$5G+4i8pwaQQ)^EwVKndaVBjPE*G2GT_F@ zAX57uiy+l2^01m$@m%t_7_%M=GNpiF`uXMLizBfU1`Pmf>i?ppV1sZQ#M`opj2a;~ z!FE1y&b=wKSh(MuC2c2UV9=H}WS`Sa(ZR#pXK==O7vk}48UPfA1~Ig1KaujKN*)Ug zmNQ}ZblwnQQD6bXk68H7TS=22hw7M{!~TiQZnM zG8qwON)v5l1Ug?ubADdN;F1&2YA7d4P#xiBcrt`2mCYfSFCt&0{zY0;?8>t<*!9Sx znA^4;ZBA9SqDf|77FX?uwVRGcizOaIl=PHWX?PswuA2Z{wzaWy^bw*3!=2)Hq+HQ1 zS24Tgan$$hkszb5H;0MwF|1p&3aeJ_Cv$0r2Kq#ewpP>eRJ-;};lYO=#sl|1h(|U* zjz-N6?24q=7X<h#E*6^zV-(}k%Ml6j_?3_3VBRTO^e(v9k=N0CzCP{fTaNG zL(k>JK@;YuxX<+7DXI7KL9rlm048@&qF5{n%uT4Dr3HOI1_zo+x0+4l^LcT$4Q2}o zTaP5cs41HNd>&B-_SI+EnNTSU8o>j|l7MFL>=ibU66=`)NzzejL`!6W&yz17IKp+{ zwG)pKzC$9Z3~%=CvU`9-GE5)1uytv;j5B9%znzL@4TeBUA!P4hzC0&mk2x_>5kkEc zqBGz)I;e@L8iI$MM`o@OQGuns1ZwRa=gu*cC4XNf`@Uq&}CA_+c+9iUHry~0d0QMC` zJfqX~luruB41|6@OevNOjG`-v2LPJ=@+aI!(vNT2#3_5)o|;9n*+#q5gr7{HTw!}_ zK{C-cTJTa#e-+x}^t7}K+`DH6e{<1Sv3I5vUWZ+_k=`_}_{Mwj?3cbA?RFBbWmFrI zr$vfvz#2Jny&u~3XdQs==?LWxJKy!GWcV0jVsirNVwCsJ=f&K1XYCr!tTsqu0|ILmn| zl8+Y0D@z9;>;LSaApuSUT^TyVerY0J3AUh_k>%N8q*fgWuQ;gJ8L*6K1|^w6G}_D< zFP)h0WPAE;?LQ`|Y>`bhkZKpS#n3*;12FBH%=ERkqX>Z1|M_pD3m}{U=^Y4LJ@4JT z3orhg4`Ih1ZMozip0&?D22Vcl0P4G@;H0x?H)~xPX{wmV^5>r*<_0|K28zKBRw#L!tf}HW5dYw_Q1hd(fPkl-9ub!xUuDGqDn@jyoDoDut#XunaglNUGYi4i1uY zr=0*&)iFkiHde|>61#T9em6P+nx#1;tFw6S;RoXM=bwlThaHUN#j9>iXHz-rzBi1bx1_5C*7ol9HFAN@*>}%&IEw7BzFrkwe>yg{60$5niC;#O6!&3?fexSS ze-08NN`R3AmXF%St3} z=9nv?kE|rUWg1!X_bel5YmSifjIj}tAIXU18IYkYd;@-t!drG2i_#11$A)*~EH;HnIGs=o)glSv%l5j-T zc_e}swpJ?m>x(bNlRKvuUCVG!4&VRMTkzbo&PJ=lY)toE2=}Y!kKGd83pPKF*Kg!5 zhQoxSkBSa_thJQyX&k#XTJot@fQ5mG*~f%ug(l~)&5=r%9NHm&s`ZY9(3{j>kK3^X@Pwh#BhKS&%f{pd>^??8tFs=r5ei)ycK%DWr|=%g^GlQ+6iPj zphUoCSdS?E31d}_B;Q|q`jwe4&fV5kY4ps1A92%W!TA?a2sC6N9bjT+UYkQ9+PN zkusST+t(~PQrE7dUUDw5C_1|G#7lOV`VuCIGOf;2A!x>(E3=u-V-q46t-| zWL~Tv6}A0E$4tkV^1%#*EmVHBjppuYWH@e`sF#;TwZ2;NH5w3%hkDR7-O zwl`{c5#ZdQ=cteZ)>0foDg?HE@R5lzGJ7~vR8xD#P^T-WIWN_~vAHe%RdS=3MA3))7k2!62%Lcg~Em3(}&FJjqs#1KwI#%w58oS>S)Y2vfTh?-SpP0=7Rjy z*6qWMr7Dnmz!j^OWTQ3x6d)y{;(n-nuqQ3{7ww6}nlSVBkCM@$9J8t)LSYo1mO=R)d z=11|iw|^MFyy|*AHRfz|<6=*}Li(MxTPGC+cuoHRUOPOBq~Xp<0Xf!n)debrr%?a$ zKM4S=yRlarZg-=dBI-BBkJ#2r30pq*haU&|bXo*@NNyt}KQl8ULw(poPDjIxn{9q% z^ivs(PMiJybih3D>>A*F0G0-(_Do`EXb3YiI^c&o0GtBY*Vl*Xsl9Tjd-{6B5m#$9 zfx(Fq5DlPHE|tonHBhTnk<07oV2%-H`ydI5RO+K90RsdX79@3a3hpQ(AcQ{SMCrhX z`}Q5URX5(Y#^ZDtLt;6rP0L)-f3#5 zc8`Lf=byX@AAaqN;52He*4yYGE222kqmUbkfPy7!W@)qj#ST4|H!tPb<8YIvlZ#Kx zQa^h-%d_xp!hp)WaFJ2dz;1ELV)3yGsf`O9q25p3Kho9FanYV{pgBE{WDua~yQn*9 zRI7YqUis>duTUTDzfToRPukdawdy$cl1s7W$=%_>wwJu$Xg|LD*}ugxFM1I=g>iuo zL`H|b?Gg+c&u?xcBj zYgNrpg?RXN6J7>W)ARVm#b3o`|Mo-F=4&`|ML$0E)-!O@hE?daD{xv45{06a0HjTj zL6L|hvCQV9RcXleku$_^(4q>d=OCTU$b0}2BRESyx(n=RV3$P0gp}>r8s{hSa7Na_ z$@PZ;x#cxTur2p8BC%>YFuqoH11i+4Yh}f*bS*WgFm0qH-U*(|HU>&RYdL`21(}W~ z1Un+4dZy&;6X`SUh7WGpjTimRh1j-hcNhR>=u*o9U(DltzA^P@8}V?PSM}vKYi-Y-`D+A8(~X@Cc(4;fJKz) zg}-p)PXZoDs`&A6N`I)#JXei-A;fWRx9cL4&7eGAl0JPpas~oK0+`X;8b1*NI%kUc zuqMOle-ccP(3eUHL8xA9h_mKv+1KCGtG}nd067J8^mL+R`}1=o%VsNHG9f{StKl5d@8-^Ji`bP(ctr^FEeDFNnzXn&zO( zp;yMS)Sf#oYJ2A3RGX50)qM|@b_#xwf#1=ZH=Tr#mZaNGEQv7UG6<XZ1VkUsYZ z;c>YGB+>CcNeO`ek+^0`5uKLo39kE82{_|#Ka4y6q;0$Wt_<=AjyoKqtJdR*8*V^- z+YXE#{w(;JG@P*49_HeYY@zHj7^i%;$^Y<;m2_H zE6>N>cW*YQG$IE-{WsU3nIC&VGJ#Kzt-yqvmJ(SKL--T%=KsWYQzCjbqLP7Q1$k@AG)KRYvno??$YUr2mbK9?DR zWW);3OMua=Ba_L>=W%Kvp?*U4q7+CY;`4gza_z$d{ixP!29+S>t}NnKgCzz^h|<|| zV2{G33=)dPoLrAMUq-CWmN;d8z7Px9r!SGdnX)E23Hm;AGi{a?b8s?TNR$LQOeiG* zhn8Kn^+}}yRQA&H0u&{%pry_COl-;NhbAZy5+U2G+BBXnQi%laz3XB8!v*if)K0d> z#2+sDGf8~yUFYEFb>kxY$%%@?D+Z7oE=uiGpjT7I=nevw7N!U{s6Av$lmJwGCEDuo z#7h6Z1VZqPPT8OLr0`R^gXu-JONGOt>`*7`8S$#7kLs=&cum~{^-da5=U+%YeO>;zC^QR`3haQiMS>UZAn=|WovB~qy5X$xsuA@zIz_R zCqDZP{PgF)!Ky+AAARF#c5$A#x*Anmz>ZdqqXLQ) zm@H9-dMQd`%XNzEGH`J<2oY`%A!?n#C@7J^bd~*2(t>siNKD1xK#0da=Boi-{q|4e zm)G7NZe)I3dUOD*pL;SI^HbRR+v|`i7BO=CQJCDk6{UwZcfrEBRV%RnNyoxr*M!qS zW%neu-?JI%bOKCaW}e&*pRL!i`}w~f2qcmyteC)>BM(P*uosnfGn_dhT?DjkEoT3e z(YE4D%D*70#^+SU#wstP?l+KiT6o#9N8oj@I0LK4m%-_fbHKG%F}JW?TC=Xm^L1}z z3vItu*_FS>tN-?$n4hPbr*>Lt!2JwXkQxZ+;FO*oyuW`0*?`!r zRiLphf1&+~LN#kI!*knZ0IWH9D!Uo~;<4-P3W}mYL6Jbh(jJ_PuKs?IO(iAkCJaaC zS}9jd#-B!|Tou{h`B`fBQ~V?$Y<@EI3GWexuT^RS;q$eVlT#QSB}q_6QIcVwD`ZhE zYoEVm{!E5Sm4U!P>?Bp9-+U`G+#Iy{vEd9%nj zP+(Mhs*`k}Ns17)#gLHEDo~_6n~}XTc-i{MfJXQR98=8eGl7ou_)9%h=fPXPf=p}^ zBWqg=)ll==xc0WRp#9vIUu7J;4MrvKQC7Z%ois2o1A{~n#cWr(Y5I5GE9wC>N-g}u zo8F86x-l{=<{4l$_DwH62JblgH28BBolD^P7~6jwZZf9jZJmn^qKyTyWsNw7z2tE) zd$YSGFuq|IZeH{6|E*YH!A0&Hbc;j&QP@JAb1Fu*=W1x|Eg=!mczLL{6DV^CP%^9K zq)@zA5;aiEr}C>^&o%VoOtD2o@_<+4l`~5W3#l7&{}etHEuk&t}>K`B@rxGk_Ju zib=qH8DIIzckrce{s=*J4wqc;GQ8xJgW$Qo`b4@XSEYlc*2;C0X=uKR32`kMP)HyZ zz)dF=DM+Q@vl5)(91fndG@PXLX~4;N@cWj-$*vSV2647_n2#BgEII<>Bw~);Oi9!- zi(#hAl4`0JlFCd;uOoyv(}&JkBYZD$_FMx>{tD!0fMSE@=`2Pe?MeLAX#h?ZAG`S5 z`1qH95PAZ>RU(tZ^5dV4{PHpEy6INTZFwAH8xKZid>oHoe=9n(oX-$FMh`m(%ML#b zflOj-;Nfd;L}S+sdWJ@j8S2OU%oOVEH1MfFXr0G1a$^fU)KmZ4O`x!R3@e`XER3w) z54Cn%G-IsK)HxN(&qz?!k;^6(Pus-;c!GWgs1af2@0&g{i6-9jnzL~BDbGPV!Gu%& z1Ybw{MTg%p-7-?vS&&d@tQ=#zSxh8E1I(CbJBnFGR+E^W#5>>eUVQBvKhU6}3q5$M zrBH#n0y=m@-zZ+)Q-sf%71{`50(yI1HL0`qSx@rfhfSK~_Y3*Jv3uH;O8@`f$tesB4v52LK*87AK|pl$ zFKo5MQ8$`I0=YCtKI~GM6K+o4G?UV zk|T>ni$IOC*3HbI73G^4&H-D_GWhzJzKJhia%re%5N)skN30pfW$$`D!a!jF03ZNK zL_t&?@_v9^A|VJ$??@2?6TRqw-_6uZrPdm;0Za@KM%(c`ao@zhg{oF@N59YwcLxFS zqY-}g5}SSDfmt$!SmIPI%%9_?F^u*@^r9nm%f_x*(N1?+_xBw%f;4F@1*@X3eBqJ!%mrtQksdzgIY-)2gsteKo0jw@#ETMe+5 zarL6aiA!Tm3b)*P4?cO(7qIoN`|yu{dkRiJ?J&vm{6+^3+lkXz1fsav0UfKG;5lPW zKP{zzPNNE+YM~6E=^RKXOeq16nSZ;Jkj@z-3JBaB(rXVF4uCN8&9zDpElhkRc`D=? zSyFWn{F&+3r>hpPu4l~#^ZNBvO;iI2C3{M}SGsr@*{DQtKs^!;{qYI^Ye|S6KzSWp z9IA#U@#A0Kg7e?|1ysU5bPf6X58NMP$8JPtb{3m|_B%$|E#mK}2 zM%JuGZfr<;^V!j5*PLu~*7j*1>m9YHIr;gs?^eXFBsHm)^E$B-QOX`1kz1mOqZ6#B7R704~yMS;=a2c#0y{g zD(s$|Gp~Cl2mq6^0Q)5z{L{!dHn<7+4n@LjR7w-`*xv# zn|aN`d#eMN7>Fg%b`scAZfgm^=THyNk*t1xc1~*iY@M4gl~649V18~+bo!Z@QpAVi zKxFHuWMUwnQ zat#bVI5bFDwJf!@eNi5yv*&Oialo!4aCocL6y5>heMX}KGsz5cM(gTuMcVO(l=x`z;69V-S9ccSP29!AkM$Xu_O=<1cM7J{a($r!NloSsDVL+b0 zR3kRO$@%OgERjI9qk;_|z(lk*aNtdqfJaD%0aA$+9=P*Cyz}q=0eg1av99*kyU1rc zxbT%P$IFg83Z2TFj8twmYgm5ZC{jI~rNHTSCi~?Y+0n)^=W8rrSicH`>P6N=H+{IF z?E#FUiSQNHr`f#QL|5@Y7QOpY2c;_t-iI|??EK9;*`YBe%(meVH5vR;4Ygf!aQy_D zZD#*TwCS)|f}zGeJa~bNHlbjpW=P$Sk7UxpU0b%`{4ahBb8;?hNTvt&?Ps5W51o4k z=BD4pi7@15d3ZV4!qM#rmtLk%Xxglza zvv*Kl8JOvWQWrOwyD5(-zOL7*P4AqCki_hhdvNh*{|!I;$<=uMnVax8uh@v92vE@( zNYNEO4ST{>K24~hAp#gODpza=X{^aKTsjR30nv`PE9lf~aB_o4jvj(e&xnW!38cvq z0Bgir;nRI&_Og5z!wJxH#I`*>^E&WJd;X=yEU2=shn`uTzaV-XB>Yj@mJ>Glnrub_ z8K3gxv|lX$(O$Gk7!1&;l*KtvB`=l9;>pP>{?7&fj63dOt=aAgAlaM8vKPG&14F~u zdHe0yb^9N&?2v;{JajF#-*yiwk9JQ2O!oI-|5Kk2kI3roJ$U@LH^S$-m{#rL_zLu_ z9LHRF8ne465lqf=cKS1ev+W6_+&WQFs{IX8Lu3pC%ae#gYFG+_3G^!O8+yLMH%m;DMs$qyvl9?=a|C%X{ zRxd?H!!p{4r2zJ@ES$C)(>zhPZ%ViZw~@@JA@YSO6GBUP;F6nq)I)ZEN`V-#y_3xo+rk;sIVyjRB`0QK$3TGa_5taEW1`b}2 z!9xx~o2_Mr465;nI-%H&kLuHA?`g#$YAn{ZFa$xHA(?`h?w@WZygr+TRqNymmm3GE zCsK`1Se!5@f$3Bbu`C&%&0-T@+kqPpx$^MiD}RmeUiKe&!4b>x>Q|hEw5JX=ku9Rl z&ucf(sm%jk8lJ}vn-*Ff&iiBAEdO&PQ=v+t$Df-SRxPg&iQ%S z=G+XtR03|=gWqV#>r&Y~*x<%-z?!@ey>#%g&wLjbed`yZR%#zqN$o#=1D35n5ShTi zJwNyv+O;}XopwAr^){Zk?soXic+bCwiRT=Hz5`Z^O?>R?>rvjaJv=wuJL$nb8N8ER zISQXW{Iexgc27xW%|3Yc{IlzVnK|2`Jui)PUPXa&#e)2qDHM>*Wo17zcB7Aw@@A(k zoewgm+z7ETdD8n2A#}3T>*dmDw0tQ8^I=EeY7N>b{z+?Ufa?4_miOoIql^CuBYkP3 z#;Mg7d9HQzrraub*wp_cK+7PpHV`IaATh3nl=?g&XDFv2Om_5puq)-hKW)ZoXTAnc z8u{x*2DR%4VziNikJt1M<2AWH4Osc1vZu7|oS&&BT@$u0k$ZRD}di65vf&1Zlz+gmM~9$@B?p zQj8}{X8r6eMuvw`E>$G^l{_Heq4#78;NW1tv=9n-DTEzn$*HhIfP4ZPhjc0}SvlXI zFg`N^IzkfK1_y^QKQ}L>hhn}UnY?7{GC+vpJ{<`FxCa3$-i|)vzoFf#f}jq|+o; z8Z&Pg+`RwkWCmAVc?~}D4<8Y-Wc5Ir%#~1}jAyNi;&2W_Ybl{j z2+xc?4fjnnZ(W_hzP)RANx)v8%_Z#tG*Qfwk*@wQay^Ux6+U4hs2Tfk`}*#v*gAU) zXNQrB-aF0l41%r^}%HsCH7Qu!AC*gYWR1(5SO*kK{O$0CT^{tZ#io%BEia zyKC^#@BLK!ox_KVfkGPp@xk+P#M%`oP0yic%>#X=}! z3_|js&2ZvNAI4v?b%~(~+J}xa2y~4U{ew&c*QX;JuIH(ocRK ztK1UKebtHRU7knBNu%wP_~*$v5;RvzCmnQL6%?bBqM9I`jm!o~XVJ;_Au%)ouXjHL z9u+qA&bcgOiDh5V$8E{2rp;C65yS&3)0LyKyOR1uyWb!kG~nXYSh7fJ$unqH`G0UF z+it_n>DeZTD^)bBSofl!(-iRB*_0FzaffbuJ zBHuTNVEZ22ebw(UxN;B^haQG)_dbfrM;;0F?|CnJSFOOr3yz1MOrgGQJGT7lhG<5B z+1G@%ldG0t^~R%6934Zu;~-VciYKQ1P8zNekyf8#M%>0NB{xb}h2X{7m(LQhO7@u1 z$EMAV$PE1fkwhXL&2kNMQ`0gDkX(+c2)=RN1CL0=5|aHZ-4YtS%}-BbVzhvt{L6bJH?A<^=e_@CqE4$fE6NDNwZttot|R3X4qHcL<&OzE8P5-NFS5t#E_{ z%1YAQc21IDlpWR&*FQMXsE4onk~uvf8AwC$zGDzaC z6coVJ=lMHnJCtll2fC=xkm)J4k(}+pHeC*4bd2Pwas)VXm}DUbId(ZnFrbk9?98+T zPyF6VkJq6YtB9k3!HI|Y=?dQVx_9BeJ6R8mKeW5`_(NCY!q=aJQIZZ#RNksrF}SK9 z#Zk_8CwWp`y$~oR12v$ysf~9#RxybY&J_MKs=>)N?IP(cL<737v>O|Ul@bFN+_rl~cutg?nt zkfKW(-HGt)i4WG#u+udaHZ&ay;cH^SoRq~ZZ=-u{BGk&j=SEu~#--9gz-CcWD>t(> zOz#te@x4hy_ZpZ><}T;b>TH-Lx6a-g&X6z06;h`r$J`7Iv&acGIyycR2}SC( z4FcYbkW z*EVG^vFbS|BD;JA@?LJ;!^x`3<5E;qgBKg+ssrb4 z5{EDRdEJLp2&~V{VDj;;(*Cz%^?t|{3uv0skhKA}xh}j&ksa3cEzL}0d?bS_Klf1# zZ%2!oheac)Yu*0Pl;&rsh&W=UvPtW6x6^-`6QQwZrCg%T$FKx=hcsiU%@u>~`Ef4c~hD;iL=ZIF-o$y2- zWPA8+{a>!m)EN!f9I*rSm&$M8_u|k#09D7qbkM=JRvXhyfQ5D;y|tndqif~D`SLA4 z4G7yXim@{-hxLAP0+PwJI8jE1d<~sB+5QPf33_Y;WVTM! zfG~DxwuE#hWm@Y3R7w@}5A@0FWT+0sa~z!;KvhB=smm?^%|__t7s+~=Q!_WV53?7(8g?$

_78G=;YIWLI?$aO9qB1Au5*j^Y92qK5SbSAGr+TuraHC?Y;;P@jR zgLVF~99ncHbSQ3fY8LgG8vLM$`If@}6s(uYWperyLZtaqIb}Lf9VJcc$5Ki&;o-fP zegjwk>48P};k@T>!pGlyF6y;;IL$V^!6H_kcno2jNQg%q(*h?SbKv@2;%lKK(J)|( zwfr`>jH*hjUnT{)#3yTDzB%YQxdI}%-2d)>t@tak7WHy29(xxi2xYXwS+t>*{zq87F54cH98XJw2FOW zfNi9&O^_)14Q_yX!^LY}^Dg}4=hr?(03aiAk?Ht2r*8=7_w*ty*Q55p-cu{-A?KBoh<+*tWQ&aP%G=AkEA1`SEF>7p6{(*dw6@UfS(eb|DpY!wlWlh}mHl+wAz z;NOFDiID404%M}slVOBy&j$}SMb}Q*ilHB0XfI!-;XH!C$NP)nG zgwzC{a0Nl2?mu+}sOG1cc1PI%LxaQ8kDp2>kn~b$NS}mj`BcXgRNkr5m*KC-kq%QLYv`<)iw@Iqwu#I|COC2Ok zkf|*F_DLZKu|&Z&5%Q8`5UOz?vj-L=!~C7qHHdbR@(D!fB;sxvb57XQ0#laTVtZb7 z4Q;Qin14B3X2VD@l)&uXdA#|&ci^FW2^V%h*bcdVY!n}R?K#+g`4If_46-t5(T9`t zB?!nBGi;^Ak6#V z2G~=p;*A%75f8;=fVuZlNf#Hr;gvY+q)n*TD(E!Z2=Ym+dET*baw(Cr*J^~edf2s& zzCnass445l_LUWFR4VOBwgk_xl5Y)+dA%+9$nX^_I}^XZW{u+!pk&_Gn#Ce5OTgor zm{GjKJoY%mDubD=yKw(6zX@mCW*lc3LY;-O> zlMF77l2%zBkak2fYBg-ZltJsJoWr(C(`iHJu1Lx-v5v^=u0I+^V5IGlfn&iA+u> z7`jOp)m8^@e$Q8N`S0!sx4K?i5@W}2ME}Gpc;#Ici(&mTIHy# zgv&j6*m{f}w~>+yC_VTvcKrS>DNigJq=%oDgjdKTH$05|zyNv&2azonk;&)Pc^cA~ zc!GdhDovRMP%F=)JUxTb z$74J2;Dg)HOnP|1i%vq{@Td|G`FiQxk||VSo!kb;JOkW^mw)@GxZo`xKut72mU=L- z2{7(Cc<;a{j!tKgFkLcIMv|@t?M~AmM!&xPu`qy-y?Z4b?MkO)1MCl zOa}ob10S=LRwfUJV1R-7xqjs4D8+~O!*YRK&I0&Rkk4p+p3wv~`bp-SpP3cM#{T}9 zy|a>)@`lXK%uD9a7Cz2@CK-_Y022RrXzG<3di#4Yy=O)c0gh#yo0-GF(15&mgYVzh zBY&5SHsJ{xz_vg_hMXxdG&mp*ywPf+uSYuqI1NzYIUj@l{b)2>f<7=P;dOCuP&$(2 z6^S~Hu%p!HSm*aVd4JabEnF|PN&{PJn_nOS386Z+BNEA=A)>wkUsG?^H5d^_uo49A z1T6*-BuWZjfCj>{|3%=(KvSmuPly^~i1bkCO)(@u!(dgP8qb)`)-ealx7wtc!KeI|gn~))1exbYyGXORj|&P0dzASk=^5l_X@ovxWD= ze;cg{v;OuzFbLLisgl(+n2jkC+RUrVhMQnSirdCf%bhdw)>2PpP2Kd(Gs`4duwBE{ zA8*69+pou9#>M!ijYuwEhjwmUCJzEBRR;{EiHQ+weO;skMNEbtsG)qvCz@umdbxHZ za3l`HIuIKXY8^lj13?6=;E|6`vkt#hmAbx1J`l4*w)FXRbQ&5=aC|Wxq~nt9-UJu` z5DntMB>B0c+WdszxI9T0?MhW%L)+uDI8T78h6()F9b54?@BSBTo16`I2)|Kwd<@Gs z9gB2dLDWYZGxK=#hTAZ{Y6Y6jI=0<;cUT6nr)c!(4d^@aAT--G?7rh(R7BBk@%dlc zkr`(sT@2c8NA|tK?S$xc1iJ8@7C?(u*P^&=94j{*jzqqI1_L#swM_7s{25|t2xwWv z$hK>m;YrO(1x{lgzq<63STivQ-`9ByQn0d8>+%J7J>ir9f#4}Qyl2N0Zu!IA`1Lh+ z;D%f8$Bym0QSG#F_`2oz#=n0Y&pL7=&^Cf=GK+$@)Yv!w6NTMnvl1ERnuj#2*a&% z5D1}fuG7H`fD-*KGkunXIqlX2O_V4X-@-E?powZ8gtT6+133VM(Rh>Tgel`o$C}S& zCELz2gI{urFYNaY!zl^-4^sRx)`R(aUMO1_% zB2y{(J6|iUcCEHJ`9dKl#**!Tbg-pLSvUeZk034WfHYhj6)bImY|kV4Mm(@DGQcAEsMGc~;R{CD7w zi?{&VlCpB7fY-kGG@QBVC^*d$T$!Wb34HA(0}PB8Q5ej@@eH}j62ZiVvf`>ysNKId zPzl|vc#;L~{V6j3rB2tv>?M|OMo*8&2wg9TJAA!Ld(I@fHvxtwigfHg+-3`v$$2!& z9?I=J8h!@8mzK5}zFORfkit~{)h7gP+A!HUAwAGkI_tEN%x3ZJtFOkz-@l@p0O$Z` zoOmR@@V3{YS*q!fsUU%t+s4}GZbWh*Z-%9qG+Kt$#4LnOxI=i>klyTO)(|o*Fh+ok zX`{0DpgXybpn8Q9RF*g_ZAhe{`%~7-P8JleZk}&S1Ols@pjG}P>PG$qU#l&&6maRd zNw^6Q&G~6e-TDX2KXfO?)=i*i<8$B)OaKmp0Cv78bSyj<=|tk&b4}QmYqEEd2Ua9N z%Md)v5rl1}w-rq=Qv;3Mwk^}K^-#XK(ASx1nv#NX2+S&(M94^Sj_VafvrWT>);5K} z^UDU292%DRB&gQW*tr*8CV^ys5$#%4#l-|Aq@1wsNgrAK%fI~sAN$%*P;17Vwc}#s zh{Mr;&^nPYXK*(C$QJB;Y%7+nT#lXhZboyK?Y_}IXAnBH@nG~HaS&>yGIrm17aH4l z{TV^PK78ChU7vnBr#KEBcmPHZSc?v)u@X%rv7qBhHZ1jhqOUd!jtbajuc%BqIRG5^ z)0o+bUtacqanRc3Xtx@MpTplXlQ!B8GDM2^sx7>M8?U|%|9-_EaQ$r$VfXGxb(7Zh zMThT?*S+F69Chr07)sK~4WTM^f6oey6WC{uX^g99O zIsw>N+%SW+gaaWCUt7qb2;oGWaX2HV-CEu8#!n+h$Q$D@bI9rydf=zwnx<* zG=2w8GbBINmq7ZkRP?5ol2w+d~>a7S%^h9znonQja-V%TgJ3j&LQvUOTf zCDbw)vo&#JG?3I>pteCGi)(&)Ek5wBe?q<1Js*JYlS~1JOsvE|zVhWbWaTj0<&vNv za>E%+Bm?w~vZI<{u=5K`YLL2g?ZjXiFy9 zwPxNu|4t-G44e*ms=~nsFTD)c-}jK=2dLplr96D~udtNK}KKS*+ivhy<~lO2Z{-m(sp`FJ|U^ zFTaYQLn188J~?y<>4SO?K7#Tjg1x&D@VSLSIB8e<>iwBHG)mJ*WV6T&4k2)T_#{&A zF2UjOqrS8l-gbqZ0lqk#M23k5yP$wdD~UI~|6lQ=8}I7c#$GOu(c_Or-?9;9(AVqO z`S4?+D=;`Zj2)Xdqqcim^zv;Wz;EAw_<3o{F&+QQoxdj=FfBfvKtdA z^sX4k#KDIkIoN|1gC>a^P3ePkOU!s&?GlKW_w+o-rAU{7wqUmFZA?D?Fuwb__u=Fd zH=xz3YW0VEjscgTHY9<&cw)Vs{ZJ-GI_*Ws`J`W=|Bbaz(S6+7?0EqPa136QWoH1n?2Dk454v#8mYm@mlGKry085{i7$7NFCX))d41@5$v*u9lV2Pu+i# zNaR(N_T``>?Q>?IztrxDZe-GOBcydPnUbuGKj)-FaunFhDojs`3{l)8P>5GHuu%#m zL6S?&;6V1O(duVllgnw3zZgWb@G>k!5RYWoCL*;dlbM8YyD1k~0~CItSGJ0_s5u9gkU;>Eo3d4Q3aK9=&EPY_#Ro zkZ7c+64}Zk0f>8d?8MtH{sLyZ1b~3UR*m6XANzasa1S?`4eRx{f;v{6cob42y;_@; zI82$$T?=WRuMSiRJQm8*s6Va{W){aFH5yT9kYJvve>Z52K6`PzHtPJEP_@v z5tHp}`jz6)FroZ}D7ne?*}i{akegbkz|3Zv9|sU|_!oma_OnYTPJm!`kHYsoz3_5b z1PlNK5o`mM8ML0*jG*BoyK)>s(URx*{R#O82xK<2uCqfy9KL@-CxQBv78)e{u~d+x zIGU19-E;;IJv@u^KJW$n_ru$|&T4vW6#G4U6Vm+ylKuMSI%Xbw0$wf&x0pt)RFPQ# zoI)f5vcE(!s9pHj8-Kg1wO6+-^UZ|kEz^Pm{^YG>kmMFU{G4# zq)ocxD4HrslLp9P{4P8jdti;Qq_rTzDdcn6Mj7DA`|rY6FMK1;IqMX(8)enxAe`<2 z^>Ph=y6t{^?OVUbFRr~AQ&Umt#4g-m%Eh@a+=SPj^#bfSS`@oR1gGPBXeSfsTelwR z<@*5+2YIp0!{~U#6fd;9fi}>?+)NoSdFktM%UusGvZK6zCu9pawP#GFj!p8$k-(Gz zVv8kb>`0<$$7(aiFvV5=+2qh9i(FL7NTU7TH6K%Y{LF zRC*S>9H##IzVg3|uK9kDO{WE_XYHTaHQVxdL+M}{ff5~{6J)KP&R6Q>NjmQmM#e^@ zwU6w5I)47WSS*M@Pb!s0y;@V9eIfMKrM^ywm2f=?E>uTARjl|tJ3X*ct_jM}+glX4 zpFR9^)a3)T~rJc@2q=N*+Po$-EHTSvAXd2sTkEqfNm|3`#9Huc4OI8m zQJHhm3bJUrIdt3<%Y>?KCMsU`0L2cXOsZ4D7n!w|Elp6@Pbcy9UtNVuulSW5X8T~+ z@#eEm!+Xwq1sdgfQ4r)_^jjS?T2-t*>2PGm`n67HMoruNy(=R(C}gy@>SUu8)NTOT z306-cb{)FM7er5HEtM4AHp;3iXnwC-#I2JsUF~JN&VDbbg z2iy>+Pvk)Rzqye(DwwtNmRuhL5AqV6Rznd4ey2ncf4K8ey#B*q#p6$Q5A+&5_(1d@ zey9lccx|9HGmn`adyve#NDLNGZFb<7DwuopF|?lCC4mC>T7Jzcj2?WDl=ZgWeJ^UJ z9ANL6*+z@(!oGfx$mP+udfUm}tX`2ql zJ6?4PjyY%r&}<2buw@cXmN;wVj1}@2-mno~rdKE7g@J11T=^r~c}W)2S-k&!pTR|6 z{Le+mB9Fme6^F2?Fd*i#<+x~*iza@6xRLG5k$xd_#cKc5KkG?YD(12%nYyCrw9BrF z{aaVTWcuba?7a=|ajXqdf2KD{Eh}{ZZn%Pi<`TVWjh=G2DQN>_|2>0(J-haZY99|8 zNqpoA@IvVH$q>zw6qruSAfVylA(8tfT*}FSwMtboewGO+5zIP1$Nw_mpi}2TB)rdV z1D;i8;tVj@Q9yfL7S^jP?T13Tk^wwK9#rO699|+J#5+1xwkuL)i|@5!d>Ll;PKit~ zIRHHCJw>V(Dx{O9P(`&SCL|5Eh6o4C zC%^pRRru)pK7l4lOkEF0CM2%x>%kjNeGXoH!g18%6b*JBUXL^qaU>+j`6%=z(Lb0# zhW)uRwo^K0P0d+1vl@3X$rd*b4^0$!TJ~wo>6><=1(&D)3=>Q(xJWfICVdt1Dq@Z$ z+~cM%9fb2cZLRh5s{JM!}+5du2PJZ9WIQJ5>23G6K>NM70Redn~+6mw1-;ShGYYo`xfV#7^@M+WO3_c5DBeq z+6d2d2$*{q{WwA!Y`aU?N@yy(9Hb-NH*QiC|3pSfNVQG0=JuevYn#}X{NN}u{bTUb z1*Xhl`<6Ia2}13j(9z7cWlie#Jj>+i7*eH7AMCWyuFS!q!_W61@Km=%^aN@ZG#-Bh z?fEh?qXS3|jUY(n3@@Sy%*~@QH3ygKrhOUJC8;&goSKK@H;^32q2ud)Q4*nP9Wj#% zC4*zv5pQsdIsq-XNe@WmaK+X4;4Po{IwtoJ8;ffCtN8Z}M>bOLdtZ13ZB zligo$)a85evkCe0TDcbX2Jnz_gL57D{Vdy8GFV?HVDaaQBIupcmPJ(?RMw{k6ISVu z!3xU&_A@jh)LOGDNZI$&2ytrfUXfew=^?MeM9-#Yw@fMKY!(TsmB}@jz=ZGFO5p## z|I_%%53gME@MO{n?7yrJfA`{3aq*2MhO9f1% z!e$1t1TF`)aM(zI%H9TQWpY@uGT5i-B-JSjq)sVXLi-v>$^XoHtNNW1@5qvtPcw3^ zA{diM;Hv+=9iRM<|3uAzPWFmUKmKT3^7n59s&$0~={RI)3{Y=ZvHFC=kQwWfwo5ev znj7e9M1xPPt_EVJiKBbJeP1W0Usaj zy?fA_X1g+w87v}TD^|6E%9dSd)@mrO8;6(5=qTe>6V2JO>=mz&lY1=_@)&5eHGyI2 z!J~Q<&wX+j-}~Jic;9EfiQT)W!u!EVk)y{Rjl!z^RI8%Z!rbPqc=C>WkQ&HiV9kC= zj19p{<0YvqKen|@$w|qW;%<#u z;OlDhWo*6e797x*#Pg0j2EV`IHazn9t{9Tmohwfa7V(aAPQj~Qavb_HuFQy_Jh8$L z4E0G@*7+0MKa}Wgl0(o_#K_^B&`E384TlZM_Ld7)Ed;e|6a@ly?3}}?XTA{+JlHL= zL`nIx`&ZyV24;ba7UlEV3B?Q zI7o|dO>a-HWUpLBX0OxJd(q$DFOsyHkrIw4tDbV0`X$J0W!BA1it`2}`<2g23B=!5 z>=nmatJl!iOXNUx^(mb@Gd+#rp&{+lC8S=hAel}Jv@iMs9LL*kBgs~jgokozUb_Zb zZHZt>@S}1e9WZPC(o!Ot0|_ZL@Phf>_%reWc<5xH5Fsz4E~eSNq66e9s7`?a?2T{+ zpF>DLmg^eW#W4C~X_j3V+c9A7tywG12hg@dTkALOPu6d?G=p3^i+qj-Ngay>BcfiI z!PMh>@wN-zjr;Doe~BQ#K`QIw;PFws=j=0t9Eb+jM$eI`Bbv;fy~V7jXR>V+3moyB zKq{Y7=CkPpQwUJIvIP+lwxWfE<27o9Ni8H5&*W_up#g!ZciS255+E3+ZQQaL2d4qC z!mo7V%qE>y9qnclmAM8gB^UKZMl>M;O44>b#=fQ+rU@G%Y0Z$e>qF;65D{VEp`R}$ z8bZx{CQCjYV5Zf^NB-?n-2BiZU9etH%Ef2j{Z^cO^kL}K>gGEUVQRu_Cs3)Eun?^fb9yav7P$g?wgrDTLvk$E5W;23Nvz? zuMg5a_|BEL;vJv(D&{Lu#+mBv$FgT1jqK=2!%?Hz3JI zL{Sjq2m(4LWK_)BDhAX+X9ff2{LMIK9d(QtXH4&i!WceL2?BzorlE6HS3FfGhv&W4 zy7zw0sqPkh-|PB*%y+z6x~ooj&a?Nv!@AeK?o%>F%2;X6L6i5CdrV!FwV zKXaaUv+Ub0k8`@8dDzbne_8|vxV%X#7bOSF@20)hl>t8XtM7#*D=>4n7j=-!XEnQ< zH;p;~Gc$9__-R^Mw`W$bES_*sEG{U$R&AJKo^Okb$Almd_UGX%6bmB!(V3c%4x=;5 z^qL00k)CWw4)yU_M`tFjj)PW9!APTC)AMait{D*H8Cm8eENL`0`eB!KV7+B?O882OP;z)agl4+pOEAygJ%(U!ctwo=w zyylgcV{zt4;*4|)k^xRXb`383+l#Pu#kd?5ou4v2_HoG=xWN_k88e9{2kbBs zCUCh7e)!w#@t!Yy8I_(PFTA32PTYbIzTq+y=+{UL)5f&+(^wOzx2jlw_VLJ$4v0); zc&alEE})9Mn{&r@2VEPW^U?M*2W3VmOfUJ*^DlVLEWq()qi&FDM#_gF&ipS!sZ@aS2i9bkscX9YG@GYp&=yF8IiH6Jjeh=Tq6awgvOmscJVu}*U&c$ z0BqQ?Z9eaeBB!*htk-pO+E=O&TF@xuEDX@9m$Cb|e?Vi39EhXq7Z8&rtN-%9dn!)c zvYM?GXxD4DS37a-54vnQe6_W+byEv2G(gouPW7xe5uX^t$f>8Rh1Bz6OcH`BhYU!q zywvX0bZ`FU)8E8pZ~8za7cTn>;H3Nj&MA%}-$@wpk!`EaXtgyZczOLJI+l@>wv)X) z-02Cpo!|DqUlvT6(_H@QsMO$2bkDDQF$L#F^c{kaT=$)@S#2QSms1uyKQ}LDUnlcE z2vo0UeVz`I&fl{9SwZu3ehYK+7#bQj;bn0c*TglYsv^MKmv20i~ z-~(pB{L?@x5h9ZfkS(N;%*2t*BoGiLXZL`18*w?W%yD3-d~kJ03HEw{o4`Y z&dKF@JcU6-+e%R!Pg*OtP_I&*fS1!Dy> zSTrU+11n}n7GSEjCjls;9FFxI17=$dT=A(-;Kn^-l15zc??nP{;?rO8`J z&W6npPa}b5P{z8mPtcro))H+-1n+@MYu&or1-g4AJMDY)+z+i$J~AkVV)!zFFT?mi z+bE;{InjO)c;IsbZ(w>d7h}9yX61HU7mD<%YOY2TMHS9$Ma$~4B6U@$cgg9yI|R)I zS%%M&Bjmyn1>G(#a3l@>MuE4JH5Edq{(T1Bboeb-F5rDNO?yus3_3M5_S}d1%pPR> zhmc;m9-VXnEETk-_ajz5gjjI^tz^G=Fj$;MeeWSO8zBlCRv5roB^Gw;mJQ zPeU?K-a;JB@*MX6?iQ4HTL(cbNMmrz1`Hmv38`XUCCX;4isr%s$_J-VnVHk1LfSR0 zV^i=f68S6==`=FAJ`{$AP#hZ)63nwed!$fNgHvsTS-Y8SwDntdlj0=e=LD z@xPYHhy+V}-0a}~n{Gtq@Hqz7bfS3$XK!1J9e?{2oO|X8NX1FCtlHW#Ame)o5pp~& zoeKu|Tzl*MYH8*G(G3QZag;L&jGb}@;`sqv6Ftu`HSIPRf3_*4n~dYSU*C)ip8ZBF zE!xO?XNfVIO5uW`Raimo3Qv|*d5WD7HjpQ(a=PPm_`iE?vA<`33IB+Q(>VE3w z-u8qPMr(7CfHxVA&tw6>FTWR3GM5gk)ox;-zh6UrsPM;|$9BGKE~nZ%4O1bX#Zsk$ zEPM2;74`kIJU}PP;Xmx>ClQdWb~MIi^w{3+tYzfVl8knTUV0+^*UGwbgwVs2p$`7Eam(s75#aolC2flQ97 zh1OL-RDuBsQIAYErO%+TknVr0twWh0DU48^#ERk)0a;vJKwrL3qB-gY2t%Zkgg+W?c6zZRFq6SIe|8N%diB*(>FV{-+c$5-C*Jxx z^igF^dn;vwuaYq|*^}PpeC`r9Jo-3fhK7~N=sU_9F{pKfJ0I-yESN`5OlHM@ix3TQdVjmY62rXccX7y@2Q4FxC4oHl-DW`5VUHcpxq1-?? z^8gwL?nfd&h}4SpK!#4Uj`rRM(3sqf_{z124Xtzmz$Dra+^_7OEt@fpHBRRxtPxu( zp|R_JPUt~`!ec}=H7=TJkM@TwnIwk>#i%kEDiEipeydeOJe@}(No0Y6e26d+$K*l^ zmt6KiTt`7Kw^aOI1DlV**l8yt#xi9bXfIVT^T*pTedkWsSq>!o`Z2U=1NtUbAXe-X ziCMu9IZi6QG9hZI3&li^+}>wAmP{a(%OSzmaEZS;)Kb~C4e#ORZ))?~nt1}S&X4N? z@LT{oD3#?sVkY3$;D0LB?Z4$#%-_{BGoxqgZ5`^z6@Pag&U@T;6#D|SD%PpQ~YNyOyMM!n*d%z5{c3 z(nYVvUH9zmJ%Px?fhP@5;DpqmwSEdEQCR8%iJo)WYi!-$qmvZ{%ZEKl>uwj|9meHv zDw$jokgu_p}+fq=GCWt_%pA-IuX8(GaJsqcx#(H#J_4 zEDca^3e0rm8bwx{TnXEKy;;Md|8B@mZ}j>_%2aU*2R~)0f6(yGEu##gbzuyZF!$PLS{S`_CF{lHtVtTPD%Qcrt^qMFdmz2q4~v-`%ta&wbMeu!p2T_lPBu7(MZL z^lv>D$uwzWEMi#{QAY7m!&oj>71;SpCUN(n=ugo>J?jIZkZ{r%6f;+f>^=b@_PZ;mWwe(p45UP|Rl+zJ1$ZpzBP}-0e{5o$h!m}daC)2F0x=@ zL`?UC$xeXaL)U&M?65A9jG<6q20xF{(P1ntEy`H+_2redIK0O=MLr@8V&Vb}4D@Rr z06)Jd=zl2^+~MY8UpgIyWPXzz)!Rh9S{FRe*N=>j;LzkW^7)+LcFqo9&7TgDzt3fJ z%4A9SV?biVh)CLC=GQP$9cTEt=$uthA>-QB=miNWxQQ!(kmHhMu%Qpk^aRT?Q=lkN zhCBlPjw4<}q?o;OW6NmSYvM!(NVIP^kz)POvn=JH?NC`=-z|z$#-cIP`;tNIBe5XR zTm?dN9QiBKUu2kV(Gr@l<^w_(CVP0Nrx)C}G74UG&>hhkX6>nX61U%SCtmlmH{rmZ zsUvGh9zlqO*t&iKZ+y zN1d<*AA8g5(HCnY;9Pm~Y?{ortVf+Kt?hani6XG^oD)H|H>H8)aFs2a6SPN^+IEe~ za!APC+n(n-=iGe{{=kX)xO=0eaml~wjr;nm*VOO52Cq=)ov`auhzMH-w{}0zlL+PI z9VF#B3N|Z40#mM|z%oV?TM`M&^FUkxpddkLR@V^vk=nMcjISc5x(Ht(LQc!1dn(UM z%m=9qVp)p#&~YY>KSNyz68~yMEE!O?5aSpX!#rAc-dPy)DVLxiY2N7Rz`fXb_68?Ysih(MDG4v|h?TZncthfjRz z7x?>+etuaiF9%euc*JQKT)T?OWHurp3^8}#P8_)Y7PNU@eZZh&z-fzFB=ZHt`wB>A z4Y4!+EuUv(})e{KP8Pnc#4qw}cXVR6Y z?3hvO!Gc4qECvtfJ`M&Wlh~9r(yHq>%J3am6=cluZ}`4kEy9cDE1eSFBGtM&mQ#m4+sh` zl@>8JI)Z}-C&k3)upTA7P7sD#0f#21RMV%yFD))%Y-|LR2dB{AUlieF=Fkkr#z##D zfKfX~L6$4x4Dfzcm&(%br;`*KFwiev0=otH`o)C>3=a=02w;yVOA;gwvSd+afRN8C zo2HTGAR^ZENyK9|sTzJFsj@&yT>>%G_*}%4oh6hKSpy0A1^h#z4|jy*V2N=^=)p33 zX7L`D^o&a`KT8k`B?eNXU3keEI0ix1fsi)n%Ii0?FFZlEad$Y7ui+B8sFas5I5gxY z4BBS!0fr$DT}H0KsC0A&#~PAhbiQR|qAZ{Nz`!+U=kKuLC zeFh$}^;m$j&)L4D4A}a&8QAG-#RIUu=a^ciA;3|$B+|%$khh^Cj|@AN_{7c@nqXPA zIblB2+Ha)S?TC?%o~KA%HAy<*DBGn0chn&j-dr{+y#lMLTla*^44f*U5$(95=2&=r z`Yv*c4q;H{N9_PI$SHopx-CaCCxJ{FKf2)$c<*OFk1}fyJ&!^@jrYCsWjOa#GXGio zXFc1lgXj{SjQ9}@tn*E5Jm+L2ayb_Ocm+Va7_E+X_I{~cFk{J1w{EF#r0;LvS5pdm zg2S#xpU<#;xMP7shY-DY;jX>Qx}!a8@9M!Gnsg!~c3JjJ*Afs7xe)5oB4S0SYZF2> zyhq({p66j?-$*u9taMi%aCPS>v~b@!x+j{^U_xQ>6!l5m-sgSh-qT!rI)|pEfJFN0 z=G@ip%G+F<1p?D4sQ((^E!J5TX7)j)-1Z9&giP1=pnK>M|@fIxX-qVx$b=^?!7CL%P*$v@52F0#{zIAIb zxOxq81O3*Cpi-5spP55*Dk@>QJ;-yza*p?dJhO=)hUvTS!Ty_Wk`m)zdhiUb9q-5E zw;hjXpZj>6ylpjN?FEEQ>ZRCF8wMV#^{ebkEAEEo_+d;sAmjts{Z&v%%>e+p4aXqA zexr@@c0srCM=Zv)VNtY8ECXcl?vH&PZ-3usT&Iq6;sKCL0%zq{;N;AJ^bt&@(}H*> z2jy;PF9zTVxkp(6hm<_1|Nl!1=fe$m4DFcOXZg6Cz+7&e4IV9 zj-9Ivog;W_H3RDrJ&HLgceK zi39QXEIpJd&no6E^#C}WqobBU_RuF%8H42{18IvTsaHAxiafN)=qJR@L+aWXDT7Q% zjm{&2`q^^SR;K}V25bR*nCC4z0}ApEx-h;uD>L5RY}Fj0Ww{)tWOE37r-1@nCxvXa z{_bZ*5VwyxS<3L_!cH3h^p>megKz!E(f65Z`<6+M4;Ao|r(cLCZ$BMLW(jqZK2{`7 zX=Gj#p?*U3`hhB$NX9u=qJ?&+VWj|m^){t%-LMwHzBYYM!aCv?m}rZ^(r$FJRcNdx z=U_@ID!K+V$IiE?+jm=-Kk8yAzU9b>{VqZf)N1Zd1|(v3(~ui6i@)RlGf(5x`a0K} zpPfkH?wKjP{ga=@efuYS?x6)?=RN9lT>0`Bf)X+TWw66RmAvF@i zx<_qQHWecj&1^YJL7a7U4&R$*JO3Wuv*$#U3hk~s#lxQTLSNUpQQyue6&7Dj*L0Nb zbShbG`(wzWD^K{cmTLj?4j>8ue83_H=*lS)(Q?duwNM%~ZawI}p6_%r2*W#L-POZz zK~O||?9Hrn2(11};D_Bz?D3X^XIr6_+U(|2e5Z_9wJh5cQtyKwHg!K*dk!GX_ahh{ z75R_jvco+Opwn(5F)@N5-_Ox3=*%8ObN?(7eM3mDTPNN|dwLQvW{YW-HUfmTnsplR zGk7d(b=BJweXR4xrQX*pSCHz@BhWNXmX|1I#(QlVA)1@eX2x5<@Bh3H7rp8#OwBAT zJFoePajZOJJ2HgM z?0EK5uyQzs*3!Ib5oj=%8$IpuJ=qgOnrOR;0H}+{Mbiub*yB++W-|(F*J{3iE?M@| zD^%LqGBkjJSPGy1_y59Df);R7XH`9=y{({7WzQ z=6|tN@L*2A7kKM&H$-dsD_JtWvGg1a8JkbEanpfm1RweD?}cq{$-T=S=&btbCH#=xhn+m(*u(IstTqbnJxfxgnT&^Y4WMGy0ZE3#SRsSLD3e zTU)JGFgQFc?E#hq2#wQuaz}HZ(ER)yihV@|0cuN3XOv;9&YH=fyts%0MTzKiN%+ec zW1j&9Nr1H~rxB*npYOxu+?*{m+y1gC$|SPfmS!UYZ97;3M$Rvd5QeG4Er zBLiDMG0W4_H;F<-XCnt%2Falvn%zy8NJJo}*@CWK z$a`iHv}kP#7>KyIL+SJgMsH!*?u=mJwzBzHrHO#4U;iLl ztpvh-JAs)4i1!r{FAhs>&#u3NST8#qJ-QBg6uhy z!s%W#V;NlauRp;1KK(5th(>-T!0@&$nAmy(;u*Fonx$9J!S|-r^wN@g?3WJBpjEFb zfEVJxJxBzCL&P!}Br{n}e$C|a$PE^e>?6U^_(+_BsRa}Fib##ek_fB&HL?Pep-+0M zWx`_o1DPO>nO(cE_YZ%tQOZ4!bRxh6$Hg~0m|I%XfHuGHZGUmN5OCV275L!mUxG&* zzZRYHw2dkzA4s-0rqXiYCPfNW)bvhAI*cTFLbOsT^lv)>iGFqgxB*ix;jviT5?GU& zCL$TbSO4SJc)=^K>Jkjy$qN#|L?VL=hSp$EBwmX(1+_0r^tcA8}>FmXTyjp;QV@J~Wq1B9~5}LIfa@(U?*a^wO~q>&AyA?Rd|w2hcY#j37b9 zz_JF&Fh`Z%zwbF#AYgL}xZCLbEda4O3a0F5?%FbZE4&g(&`+(9tpJ1f-WxmTzJ#hU zxMJ`$Ff_TIl-wszT8>XEZ# z@bj4M+wsb0JOis~_^MUG{!P{{-PA5A68q1UBq~u0N;TUdZlPgqTWurrglv&gPGr65 zauUwL(@+W`CWK4W#kB#_sIrAf?Vw^H_rTunuy+FhcRfloU2QM2o?R1TpJD8H84j*< zK!k`evK!>22?89=*~Uk|^-cWf*T3rmJ1$qw#$&krxlhMM=bnvNr*6oBL`gzWDp*8n z1(oc0iJ@LEWBAx%jGwwiL`%<%?;-$^r8h@xUd5evx7r>uRLhYeDpu^Yw7|pj4z%oo zL~4695~R<;qX5c7x~^r*VkCE0qb%I-P}^tS%*9}Jm1=}EXq1ft#0JLD ziRa0FLQtMX=k8kpe&&kRh>xxXIQ6o+fOu&ZNGH)EYS0YTr4$Brg!5F*Yawrn}Hy~;bl+P(!Mu4}{TOlbC8l}Q9k7%_FX z$QH6BD7!**BaR2ATDb64@4}z%dSKZRO=L4zdGcxKU$@FR!ayjM)>Z^yb40qc49MJ+ zMv*JsWK;$Mt~1O8lf(-kQe)&f1EefK*JW!Dn&yV{b*@z9*2rCmBh~w9<*S*@cZxVmnxf8#<=|=qVp8Kq2(FOGU)HOqW_}H6Xh9{q~ z1z~x{f-6tX6$C_rrHeqFHj6bbm;xj3Ycrj}@QDvoS;rixgYBb=hP9}QKuxWOIPmSC z{vH><^eVI(L@qoAMiT=0 z;6Xl5$aDb!xfrbuC!PW~)TVG#Wg2ygrV#;PJ=~ANrPJpKUJlzK6i-Jcm{+Y{Q~|6D z!erLU+Wgq~m|FAvq&_-7lJZp3Q)?Y1ce8TNLI9{BSgt4-VBMV$@(Tp>XVO@hos-iS zjNWQV0=MiC2-f<2Prjz=u|AjJ`*6@uRldHE%c*ohsDo{SJV0cxHw8HvJdVoJB68_C(wa4314p=F%N0uT77Q{M>z&YeTSQ0^ z8J&8fuaB{yynGkMrENpQuN4uX3m#enZ!P3bjKgN1}}Za-{PcW zHY3f0MzIwVp2Z}#Y@Zp7rX|WN&?ynpG{-7fw{5x&tT_{bi%EggWu*>bar6btHT{%4 zkKQXF@al1{Zto?M#Z{xB^6hTsosBo-oRzX`1}F}$)Y=&z#UxrS=ir!Am3Me)sT2;? ztN8d=zKm;r=f}62^Yo7=oUj$|e#MKBO>>Ws9LmwJj`>bFSg1H_))p95D+?IkHh~qV z9B1rujR@>v|3?PHxmU;7v&7s#PwuAcpsflU)&4Brbb)~!y2C~y=kF|t%k~X=apQZP zZoe<$M&H)0t0x#m-ajfQIDATu!bo7`-W;4-#8aRCBHVh*ofsPK$9Yda8!vh33-IvM zPEsJyX;U=J$f8!lGDS(>CE%Ezx*XQ_wJ)@99JWPv7#ELy`yA?XXgAt(lSmG)L)bU0 zX^rFpbY>5z#-A({kSO-ECSwW_EDv-VXtn8plVX53<`1GXIVj&FA}hi*ZeoV+WG z8Xu`U%cG~^(>=!ou~aT|?Q_etZs>d+0^pj0kVXtM_uh*=H!iRFQzPRA=bwico&Pr& zO2^SA8brh)oQ=xQ~2k%z8vR2>Ugx5rlS@E z1B~2|DOakH;FphLoX-dg1q`2f5@&Qfl*OSHIto^+jCeHUk0b*8$F(=&nJ;{&*4N*B zCVO=F^kPr!Ux^da{jz(eVCfa$ySBR5zDCsnit@E$>*zgQB89Dn5Td%`X&$N1~? z^74xzaBF=If2{ z(Q%TvH#2K0fz28TWo_vQ%+D_kLduAyh}*c)c@$Ln=L}pBz(m* zl!)zUGM@@?bQ!y0c^hkVL+-%*sE*8v&mocVSt}G)U@X2LPus z+QBgYl1e4;tuK8S@BYV+nD+l+55MWmPWTP%qGXO^F68j6r#=}Mo%L7@vmCTkGwpF@ z_f~V`Itb;EVxpZ<6B4nXlaSa@Wt~$~tqh>(*$L6eIlH|rV`G3x1nxWJ-ieeDQ87^5 zM!x^(avU=%HV;SX?*;vU&HTiy**mDUT(*h zH7juC3!aY?)=o$PPDWkri?nInhjj(Rl8p`$+T-OVVE?3Cl$xnO?F5B@EJo1dwkVrBRsF_5tvS?2( z^BaJH8uu)Tf^-OU_%@+3B>9eei{~gLIj@eO&9USzC=xQlMJ3C>12jtLaBMj%j`^&} zd7bH!<^aS7`b`#i=@7z02ZT<>M^^&r0da-Gr8&e~AzH}-f=mKvECSUs!dObASdS-B zT07QZ`KyCiDrtkegcceq#!`SZCCDAmjV%j10~?OulWFeee<2m zPAoeb#*g2Ie$ETvAU7r#GG|V7%0clKx4BLkHPbq0?ueems(97}UAuiFQ7nNvt~^EUWpC7IS#>3opSFPklII47!>o>Z4ObMM7Q& z?*rQ@V`=>E?%VJ$-}o+W`X-nfRsaAX07*naRC8C4f6Z_npL_F5@z_(=qgGvVT9S66 znY7Zza55v4F`q^4E8kd2g`}MTSO4^S{N2ll0CW)y zE736d3GwK{7#@)yRg0Y!j*oQRy+7$q+K-3<-0B|w4iOr&Z1p|7V}Z7IxD#Bma-N|u zwN8jzt8=)aK8Ga^&9W1W-~-owJIp31P-1L%BfW8?ZNmh4M5HkGEVFnj0dndeNB$B@ zrZXlfi}e@&Y<6Z=`u+UmrE(d?z5+_6CAAZh6gNG?lEN4k=I1qOkU<4Y2rLJ11F;;y z4b1;0QVC~}&8bsQU~Xncs(mz)L?IXm&`IYQIIwi#l-k(R!nQ*Go`gY`D#%9=i#y5J zG3c>2J^oSeJ{iUCaH@RZTFv-fSmLm_%GED51Sx5dKLaC&?+f%49BILWwWQf{E0}N` z1kZRjw!Y_!`ca~`!Y)^{|N3aax4HNtzu+c&Z|Mp+2%XAxzQKfdxzcys# zlOO&9KKaql9a%qeo6GodCfBRa)^~9I)~X?fICa}`c)>-_z{wkqMT|zVT1T7^eS^KU z%=+w_cx@Ew(LGT*&7KFu`g>-rSq^JPV*$mUX2BFQw)x5vqyqT>g@B( zf5&C_YvhY_Zltjn^-DadwltMD+UMfmSt0jzpwp=+5NcB)E_5xVsAAn&n~`5NZnC8MGEPSAa06nsBMx-Jo0UPEQPfml`o@W#cgpIduVOtdR-*`y$0gPOY!AF*|l z8ZT5tr;teJ@TIT(2rt_4R)p+~h|<_l=l6p19*38`_)?s4#wkc=iAvV2h0bk%l|Q7x zU~PnvWVZp;KPw251t3vCM^djG*SmmVZZ|sf6(q(c(8|!Ruv4Xq=79$gEH#lD zE(n=vPR}9D^HLZfi&;(?)e69Car>uTT0*C?h*)tD!Qcd990)(ZAF;U-g2D(wN@CNg zbqHS*rA(W~lb?msX-kx*#g;*$S#fHpP#(RZavAM`H0{+cN=bBU-D=0P_}sUCgExKf zi>L@1?>@34V;FtdiJCh=-d2oa_0EA=xy_d!dyagU8g67`7Y-WQ>ktC3Ywu9W?*1{D zTO*BKvSchwBM7I$0Q+zM6Q*vX=1BLvY#SZM6)(ON+mAaIo!XKO#R{D`6Ya71HzwS} zR$n2goJrGY;Gci+V|?YO*PvEw%N}nV>Br~)@rBs7X#&kk2|F7k_-W~h|8Zd3krf%9AEv>Z*b`=-i=nnE0H=IE&mbX0jLRY6x0>y56XLWZ5chkEoLtKM?J5wG-JG4xJsi>Ky`tNAndA^K73v~YWE9yga~ zQF1=a5`h502e183nBxszD(eOn`tlmcL!- zFX3wT>QnuX2USqFH2$fj%`vrX!(%5wLIygSq##f-BGQozm?r_n!#UayZIJ_IsNB>% zLhyv>Inakvz;x&uPSju2L_vPOccSXqkH?jDQN6E8L7{j8efb>9wVKobiDpqTMAFYo{2G05Mnq1eS96f)}@9nZC%>qcKQ3*Tv z#{c>mzV_oEV$n^?Jles%_{mSgW#>Nu$qvs5+aujPVI>gEe0d?{6<8NUtdl^j*+eVe zz=m_TAu~E?nhG-dh7?79I3hpVy^k_}7bqB7=&>ZHJE2_}^+`DKR!2;A@FMvaLR5X2 z>6TgliL8&S`$yWY_9X2hH)hyBe|dJ|(Hk?W%H);ZrEuQ^b9l@XpNmNaQ#}jn-kG-J zoU_ls^Pm4rJofA}F*3|doEajO>zD}}f5tLW3z(#-P+dZt(rhG{B{GPm^9W-m10HnB z!2WyCyk`$Gn>V6G(XwP3AvpmD_MtOBhve80LJI#>77#A7Tp1uaG79t!qXX*ohiFyi zY)<|B9FRyO%ncz_$&QG?0%Ftq)IktX36PmH`7yjssqIlfh)jAD*^$^-+k|#vsT_iI zR&0IKqBp3Hs(f@}k_;xg5l`aYeGRt(ZuqF|hS`6jrT7CYMEus)NoZ zMd#Pm@BO^%UVf5R)wPfLgvIk_w8lOWP*BJgR%)bJGQ?57wr7YcP>89C{kPwaxj)~v zY%iaB?0Q`J!oS0@qbtyER)iS(ohu@<-&>A1MJLaP4QvB4#UcFUH#g#4U;7#+7Zy}v zd(3eY_}4eS5Gw~WXx3|FBw|BHM!7b!|5dYkKc?qyE-V{9Bo`P7Lmx7zOS_r z2I(|D_wAqKm6yF=$V}IPJMwG*9GfcODFbU!AdgA6rHjBFzRQnhLwbJ}9lvfI(zOSX zVQ_zOL58I`p`^x{Uy(!ALcCaXHZbPU$s(0A)7;`R#h-FSS*MbNYbN(c^rHr`Cko4GDwc>V>(mv z4>T2wLOxQ{GQyvC+Lkc6<#RI&6jj;@GRL5n6V}vwoP=}WHd5FY)<}ac^Va*ikvPuN z`3?dL06aAAw~o^ih{*ir`0f2;NW_wO>t*l2e}4N%N9;v9!eC$+orPJ%*r)DbuHMhH z3v_jK$MQe+m^FCHS&zY)4?7tv2L}90;8NQsR~ZM~v7PRHI6)`T=Twt7?SapvW`7}l66;6kW4uERsit`Iw%YqppGa;(oMjqL*lzRT{DS^&^I$PgM&NBUV?*!C= z!43$)=uSD!)DXJaydwcd1s54f1GNnSv;EXtrZ9A%cmMj4!Jb)Vh+3_c!Ly!!8Gihq zKRb#Y?CxKlt>aGEj0-P(JkEd8;AG zh>7oIojl=>pXHG^hc87+dJ+C6JKF77>P6+^PIrUl4c>DNSDTy11Gn6Q%KrV!*7r## z9gjCW_gPr0j;$tIa_S`68rq%q{$LKP2JR)U@e|R^M2fL{{wjE7ru`yaNfW=47w6aR9x$^;d)IJfcM1Nhn_Z*bmk(lhy^MM;DKT*g%Ks5kGxUr-O8c zwRcyWRbPi0X9@3c#HC|S^NpIEx_GB z;Ipt$5&@49J~Ma{23b#5&7LgyVo_ZL{R2hy^fS}v&ZgrhazSUzMWLhT_vM@dN>|cb zu=Z-*4F-YxsaziEB$B`lLrwy@3k*cq$sq9{LG-MhldNKcZFoIamnX!RP(WcKHLQP` zGZM`29KQZ4Vw^osYv5V--bVIn{`=SZY};Po?yt+p9XcNwYGs2!3MQwg5OfQu2!{7ZkzVowdF-=L>zv6N1nl<>$Ti$?C zZbvtyN_A;BM_oius>j7!NM%yUC3A>Z=YipVjGb{DVyt&k13+vgKB9&7E4u6|Jc-LY zb-&WOKfWE(FS{q1x&3D3$KHPbWjp0?Q)JFam*gct9Zaepx*%4c5vdkMJ1xeiS9O~EEkdT3qL7aa2iMZf`C*ZM1?bKIjf=?8>~oMnZL7D5ry zn&?z#(VCh@r`kX)n?*oEAvs42_aQ#FPqpnZQ$#Sd5s1|gOzlBXZcEvbqC#XFs3nn3 zj`|N8w;Ku#k-cbCjW-lC6x7bR@tKGm$`P^^wngG!8?7*dZ~o$Dyy~AmgK~wO?(QQ! zJcfZ2Pe3|fM5kUuUw;7!r}`Hq@PcAouUPZNnJyqV8)hM=N|$b&Z-?^U z9bjSKKJ2^YHnf+zt;L+;_tZz6jMqQ=nHXX~%wUF2(8;Bf+{ac(5f>eSPsxwtIte`$ z;jr8O%PEF?56$9TU-&An-?<0Hz6@S}{+W3B`Hx1(QjM~3lLcpp^X@Jr82jYM}Yl3 zC%g-^9dYVsv9r&AWMo7o0TOZ$mPUMs8M7k!{@aWd{{J02V*cPVMD3SY2kYycL&|jQP4?{ zFCYYf-2%zDWGppTpk8;K4;}s7Hj@$gNc5{WYAT(WU9|DUe25q)Q49~)T2RUNi29tp zmbS}qMXEu}N-*wW1VuNt))@^una&+3gK)mYCu8c4VcT62FS~dLuK)FNZEIa0|8Hm* zgHO8{wcBq(<%a834zM0y?HYsj%?tStGo>vDMROR+nzgHO)@cvNna4j2n^ue?OXya^ zNbE3bKf3F-8Ax3Mj;lVpI=4Ic+P__5U~h=y53I6fo_H70%L(&c=!lIs7S%b8gTWTX zB#?|@zE;66Znz2GzUCLWd+%;{aE}&qA9h*+WD;?_<3%sVlTSMi@vwnJjI46D-87M) zEx&u-gbk5(k>n#gCHS5C7 zt>6$Io{v-pckG)tlnfeP;XeVN57lsiqn=ehC zG8udB$t$2Cv+`o6UXe5~X94iq`C$LLk{PO5deBEf0$-BAuWsLuORxAO_Uv1J|B{6P z6d!g9GMv3qngbdw3~gMgJubY)?Hh4!HAT<%N>p%)&a}1TDrR%OLqUsI`g4wx**mjy zB7HM=--Sc>+zn0Sau2fOpMUxz@TzBDgj^~vo)~M?HeAf4d6`_6^?EPjFO z%wdqg*(@|tM5qtVE#eIasL&2XaX^l7s$?xn~%d&T*wkL9U`4#_yFMj#EN8PQose=pv`qyJ3X33?;$Cih; zz3s9`3w)QqeK(Kmv9<1HO30{Q&sd83Ym!0kpn?#8ZZF~H@*&K$>V7Pdv(pYuRZK@g zyOf&-7>{dE52HElWee}ILhv3(}MFmK}V&8W}TbEgnXh=kR-wQ z;WlpLvvWan0otL#b}8#w*MOp?AsN(9;J@Wg-0XpBrJ~?xXmGGAX5#YK(&B>1kepiR zX94g496B_G(UDPWU38yMqfDLf2OhW|>o=^ofJ34}K8PWk)1eAN54x>k@`I!nIlTeP zkzCER(ZS0uehqH=!!14aEVoA-l$U$zvyeIIbS&QdOEjv>|)Qyj0uDXQZ{^?eH|JrMD+s<7N3im&l zuR8ODWAV|~?m)i9k<~_;OC~$$%eHOk5t-+lJ;9)WEl)v+^vS0nxsn7xPa2{_b;DL8 zB|iVD-m!ZlVi~m`8f2V4yFs4dWB0@EqXA02hQ!*!e1ad*NZcy>q`_L$TuVfBcWmyt zbshMcs2~6AvJ8vrthnG%9ClgooG&%fc=~f+gP;GeUpzzr(DUnv9BF&4(3i&cN1Tc` zUA_a4JNwZ{1WmMS^QPHYtAWn29ni=p5ofJ^b~l<-kjteJW{aj6*sdes1i*zdn)~iW zJRL)P-LXI{hk(D&XVIz8p|gLd)-))NAvQ3Au#@c$#U*+b)X=HTYfNoWtEnuI?#rQG zCaM}&B2#BsG@V5(olz~!hVL*?i2)p1qw&)|1Las@yPrfHRc<7N&FO-T79J>d@Pfbp zH2!dN&lEkf(Q`%gowQx0%JS}eP@kE@x--u}s=v<#jop5CZUtH`N%}fRX#M>b2U#rd zXF?dD9cdWYF0*TzDPhm;w;loc^BGS)`4qhUlINh9?x0bl%?;2duR)`OO&@`lfXIv) zYuh{0FK|GGYt*tJl#<;_vWSd8om2-7EaENy_I2EF|2{ly)gV5!i_rLstA0sed{y?`1=adcic5lv7UZBr`~^T4BS0TD7L&dqUr(`2`dQiV6TY1c>c_gy#E;11Obj zNDq75N&ag#7^ybpY)QxhYjaY5;%#Tf!*T(~y{3!{5RW0HDR>mVA#K7sEvc5s(a6fqT}w>F z@LwKZ3$2(h%_|5g7K%}iGXtrG1u7o)>vxe+CPc{Kqur46mjYKbWf1D1*nemrMg~UE zmoF?cru-f(lg!UfW98~qJ^X{1YVt3A&JGs<9BG4T7!ng3ka))5A+=%@!R^;!;lHj$ zXJ*FSgPe(t0rfR%E2;S0-Pf?YQ7-qw#K;Ja+q3~EZ`p>e>o;QM$S^Xgq&f#;n%w33 zo9W=)zzP{f!lAwOWqzzD2&uKx0s$gIg8H1UJe^}ydU48etjAXyRor)A64%~(D}Hg) z@3DKYC#3y#8UN9|Ryq;GhhMt`=by3}oys(_g)|C-8KiQYkl9A9R6*EauQws<7J|Vc zq&3zx<$8w6Tw-v-DP%6wUcREv?0QasbLLU0$VFj3bMl-KX#m*DXkYme$8I8+-)uc$ zMf9m2Y_I^J%c-zxvIPLXyQO#+uHK9e+4}7= zI;|zd>usR2B!nV1G@`b@*xU>-yb_)M6$lq+Bx~Kt77*6w5zZ|stBws(d?!F__6}fi z4vE4r!qfoTrCDX~?N~yiu-VTqzDdI}%?<+q&cR^fz~Mh!lTO=|5$WWpJV?ljT_Y4A zQl|`U9dV@)2Npv-`wgGOZ+_1JzWYcN$C2N<6|tpxEZlj!Y{%-eAA|mtD|}_akVpA% z%Ld(CO3yfPD4uihE(+v*5n3cUYaElb*pStv`R=uytdf_hKF_cHk~coHABqS zWB8j(-iAN?{*O@-RVf8s@BY5A>uMX2OwzF|m zt+nmV&Ce^k%Jc8Dvzqp|SXz{0Rsc|5LONxVzf|DkW@47^2k|gsty4<3-9fRxPrdr& z3~)3s-(P}1bMq+l^{GvdkUotzTM>(WdHswq7?A**>SqRrt$MXeO#~MX^q~|mZw71i zoJl~P0BQ}8xX3_(SqjSv3;>7>%q=eH;E{_^t(K9`WF!zo$G>IGI_#UA!cwJ*W7e(J zbkMtZ?NJK~4Nf){2bJEag)LHq~{zobIil#hh(Jp)cU-Il7 z_~T7~I&4dt1LPGk;7M#h6R~rihA@{#;=o=se*RNb@4AC(=Ag`^zgnAq=!27P&9M9C zy@3JOF@@oQeykcF!>09XaLn4ZST{C-@%};Na~WhrRJ08*sbz8}d~o4w+=8E7u%DXwUE;ha)&tXL)Vk2NPy4{a$5Bpi2()`^=Y# zyjYBl0uB*N7xAAzzX{KO-pjC9vOfL)^9RolaU@mQ@roBe2UmUYA5qK)Xmf-$Yvzr* zN-iBKQDzWln;vxm3L`)wi*{H8s#EApA40q^j83iqytsod)1q_WLY?Kk>kTKCp;vx+uGZ}Q6)@B{gr6mz9 z1kAOf8Vk)yQ8QQ|w}3>=C=<8tA_mfXOKn`V(=Jcahm9tHV{WFG6E zbPm!3oE6}RxQ-<213y0ujJy!7mm}jTKu~EpKtlnzRWWF?+Lf|oeP{38iLm~c=M40x z68Pk6UXAU?Z9=(JGNo4uLlH@3Fu~ax)(z&sJvYAGY#sNioGJU;Rnp}A6yp-5**&HK zYBeNdG5r2d2l4K&{}|JyMZD&M?RehfPm_wDz=ci%op=hx%^Q#yDmrw5r3HtUIs2iJ z=K7AVC6K{KBEX;b%;E{pcry;DWaVwJ&%v$jF&7VTL2(^6#H^jYE2(zq?DYbb7rmgGWETUJBQu8Vp1mFM zvmb|KUq6Bm{`dF7d@3cW^PJRT#R%_X#F{9(>2CdZC;^+o( z=t6up?BLM2>vQ+y9!-UFngEUnXha}$1l=IQNuN4FqTbE3tWB^dY_P1&P!M}BXU?5K z4%d-mXkU5BW%&K?{&W2?*%$H}`ni)Zf>AoVyM~1O#Yz0=2jA6ydFvbRl(4Xr=k-}6IG?EsB8;vZ;)QCuy zs4y^Vqrst5jk+dLPR`BZz5@qv@18x_xo0;H&CH_0dTjLgzt8xO{EnvN|IsU6fLFcn zkqBzj2-`Wd!=x0>Y*vGWF3_A^1XgT9a_w4koTfrz+y%FPWcQ=)4*Q3-(^(eWy>b_Z zxx?6g?bB#Y*C!0Bom7k<u3+i#vC8r*Bn8^pZZVizy%NPIJO=60 zPC#W~8130Ag7^IFTVb4#X(FyH`oO;Z7#$l`=2%)RW98}zEXdIdGiUrJ0Piq`7S@w(= z2^&#>hzJ6M2s%m%4pDxXpKpwQyKN>XoYH_H8cd=Xa(LA4c3GijV7r^8`mz&@@!1R+ z;)2?w&_&XcBhVOgM+760CP|DFBxCJ;%HAAlPlTl&r*DKkZ-kw5_A38ndx+C7^r**v6~`oHu+%U)*p ze)EQL{O1?{5u4X!(4xv5#cC+?%AAMDQH|m@9bnZa1cm+{JqTB>=n(?)&R#34+u5z> zcR$+hvz-B-opg6&#@-a!vLPD_(aMesFCM{BJR#EvfS*xi#yt{JWxUhLxQfx1gw?g6}auKab&| zL1pmd7O+IXX@6WSzBg}>SMW0+-2`N~IoMLjF~GE+%*d29Xo}Ch;%R@Vf0C8j->-d&LoJ`t0-jiNQBIpP4!OK$S9RY zLbV!$*EIFd@e>#z_z8X{0c_51nLT!!!)Ir9zAWq2gHV$MFCgxK0+6V+#kaFKr9s^W z%QTZn;B9YsC%%96kvaffw`?DvK7IZzRYd%_6VcjqBC^Fk)OY;}^_zYPEcsai#^UW_ zS$|ys@K;#>|3ObtF9a@s*@bxb6)!>(Qzi%8B-|JX3D92XBoGda0%YO#^!)oy2(7Z4 z1ZRkjg=1OXkI8gp}GuJib>mQNXJ&|0jI$Q&&Ie`Z5Wyk*zk_?Z{o_0~WjYg?s{^ z{pYLjoaa0Zt@@1498;zal4y0d#qq#|t$CIP2hpNa+kH0@%_`ans`X_M&-Noxn7}pH-H7ME z_T6~prBBD}cbtzb7SX02KyFw{h3fXuP#xpHhyWTV>}|u?RIgPg*p_z2QoDdF-v32> z?khjiOwHwvsCyd=E7xIS`)Sr{*J+AdK!K`$svEk1yQ=>IEEkd7}N&FP;NGm3b@}|;;scB zyzV<;yV{V0HRr>=_M~(HXe8La$KZe@H}AxZco#>hw20BM5l!pkh9w7pQosxj)P|>s z)>PMcBO03SR}j;mpVU9lkJ;%tG5vW^$pMg-KOwSYLhnz`0EhX^&e%Kxz7L%@9Uh;v z!0v*AaTACXXj3^wJ*p5=*odD)qMteDK;$_ZBkKBdNhwOf)O&EkxiyF z@h(UZZi=DFV1+0Kr)F`wQlpL>hY8W4J15Tj!}dv{A>=JYUOk!+#mrh^y_synT#h-f zAi;07D4Mg)BEp^Z@I=h+UNE@Vs2hCd6a*E~yJ(rS0Agu;=-vNHs>`ifa@6ALOV1KfY8>%>2RUuf$^?wF#}tBG6&oouz=d1V}<^ zoD)`ND~>FL0~B2VXtvks`pZ~FQNS`I#`;!m48k~05V*ljAoPyD=M&i+4 zWb7@JCtaj7%Rr~v(lqG2PzpSL*xch$Yp`t00YPdDDy5f+$DD+ z{kl9lXr=JW>kr^lU%v)hR;IDzIp<+)%?MJfM-VR-)cM0tq7tJ;L+)C1#*9oOZi$4; zI=Ju9JRbkdw_w-3eroFS5Y+5)0FHw=zi%Bj#n|iXMa(P^x8S%d2xtFtXT~{vbCIU= z;_pX2Y-6_5!ro>Dds<~oHmj&H8SM@9{jqd6T=O0H9`O`1$819JoU<@=$|)!XR47d$ zPTw7CVzKTz3PH)44T=3jGRn66K`CS=w7!k)!_;Q3eyo9yM^hb}vpV_-zI z!w2ReGRZ8y{P}O;{qOjIjm~_ihl^l+VA2J)Tv*{YVOK5iq2BQS&%ZY(bnY2j@s-cK z7vsZ8IS%$6Q(icXakf0=N-cx$mLR=ogGm+}x!+|8?si4@w`biwIwUjZpdpVs$^7*4 z2%O=F@`WBUp*K|Bm2rkK0vT)rcgqqU$Fyup;e(F?dc9K*DrDDa<}XP1<6l1iRlNSP zcNr1#C=am}sF-)=IcOfZAGq@_>lQgW0PXfY!__vPd*S2o>Ce0e`E=6)fYKCV%VIPO4rZRcly=Naj`IWDWf7AOI%4hI_SM0#U)~`Xl zRaOv0RDxN&+OaheoJcJB6dVKx6pCr&3UOrmNcbaHmOO zW*YeR&u+)9*WZMvpY?D&?xM5Mw`HYEfo#7L;SZh{+xNS^2mXx;2bSDPlYv53!$ZiPd@?4^JqPizaa3Dv)az};!vr{mu@i4< zK7crN@A~Q2MFP|yosc7dv#RH_x1E~O%b`u&di%^dpc5s6%7sGgY^ovXrvPA z`{eJ)@Ml24=Q9XcDlsD`>8~x8I;Z=Q;78-e_b2y&lEZ}9sZGGwGt;M&=QXj%o@0s` z_3<_Q67`xP7(@;j74oyJEzNrU`MKOwlrN?MB3eMrZEE84XUzPWsdH11+rYkEwg@r^ zU<()LKTzDJ-Dw~hwo#BmAxj=Mk|1+N8V)qAeIY;{*lzI;F4{v!1uDs$>8GQCS zhanAJ+FI1865~mK?%fr+yY=h_SuS{t;79wsNVEK~8omXa-5{_F9{8WR|1Lutd@a%< z2A}k6xxG`jP1YSJ>@pwAT70$BjFYv8M#+!zTUZY0Q+>Bql*;cC3@wO6WKceDqs ziso`W=VByRtwZD2*Q4?K-ylZk_MqD*Kqd)%;^Xher7wI6I<>OdCboseGiap;Oh~Lg zkNCk`(OIejn;woP*O+Oh?)z5$6^aZu6p-FKok zIfdbE$6;jCW^_7^L?)^A7H00c6N`86LSb}Rni$#PL1ae<40djXXfX4?q!3XG)Vh}9~{ zFnME>3eN$Zc%p-RF@|hDiN3)Mk{Rx}hPuKMB3@HESu{e~JjP($S8Ode!AlCZad=V| zx7~RNU;5mC;f#~E;=)UxioVrFO^wuKPCnP`A^Y*hvZXG_)qAypYyxk2*JttJk6-Nq z-=h_vjFgypE@zfVE)q{t@CY2W?E&tJv49U|+j}`Bp>O zzMYJt1js#S+=zt%+UYFvryh^>Pk9o`1EW|Z5k8qlCP-qYG^ciU&AbUaSPa|9r4tA~ z{LAl!%xL*bs@n|>4k)W_IW^{B6L?x-@)RH>i?!J9v-MW?Q)H&z<*8aXzbsM|s#^L$i zE9KYS_f|M{&Ue1O_u6Z(#VG;$3otnIeWb4=TK{nrr)ESN;BD!G`Ad`-5FM_xz)vWD zEG?Qj^%;{4iWSY4$I;)rP%Ul|AM+5iqu4$&feYS#5jL#b{IVi(h&Y8+l+jTO@Zj~M;3Op3uGu0bW2vvsg z=Z+UbAJLoyL;YTp90g)-!;0td?VJ7sAG-MRjsoV3FDN&9iv+&Y-V9LUwpvBy&R+qK zJtlU+yMCx|zVFKA_|<=Y8}sMWjywX}h{1PD0&z>C6)~h5wQ7@S$NS*h4!|C6vx;Ei z8K6qv4+qit{j56?Oc!A$5^_Go`8sODblNZ{X&_XBPxP>7zBti|&cEA$Z^iKSn{US_ zKmT1B`&+swgO9%N%{c1S`=cilM>gRgXtT1lQE9l?GCGY%pW28!?tcRJJ^U2LCrG&4 z`NK$LkYBnS*=2jnxSH3fpgFM(#V6OI*_c6L>2eGne6X~f7-*zeqw3}65llS#INWj- zOJ98$$}=@Y({aQLY1z}ljhf1EJW&AU*}}bcy6EASAK!x|!wYfJc_$#zmqp;Sjm>Mr;#ECl zN=5!u=&5pnHDE>!l$tG^_O?&szCWyaiM6J*1oF0xH^moXn3B_>4nU}Q7jDrsT2Gd}n%cO6LDr)w1$TXVRkSpCR#! z3}#W*bI_yO@cg``y%p_%gMr`Mn9!H8C3cGSjn^3PUwQK zCG;<9d{V8Fu}f)SHOfcvABqIA(&pc1XJ-W+5CVwTmHmZOmA=eZTc!vH zk;unmVxmC8ANxQ=GpH&^ra9$|MKMmaX4tb;{sde))3Y;j9BC)s zVPQ{9jUnkIMCw?KjkHZB6O8eRaYStky*<4Z&V~$WksykH{M42B@sED-HwmQwfBg_~5=f!T7W3Hk;x*C-4@e%y<_is{G^K%~O9(xQfJn;w&WMk-AkU=sVMG$3+wxxuBBsy?5btikN zXa*?6NrI%-zz{V<26t|f;=UQj?RP(pwvoW;?>SX*+yZ6b*3nB*GqURb5uT~WmAI6n z79M=!Ih=alr!YA+m(%d#3!}z!hfD`=NH0c@WupZTEn*^;fu`@F=C@JzT$EZZRE#!e z+D(-GHmZIbEqwIEb?6-KMjj>Zh}sCv%jU4Vn9__~TAOn5wLv z)7iAN29%Lc6+OLu64ZGT8Nf)g<3eH}CE3J@C*<-I%8_d$1Q-T=!unCPnyv(Wso0B? z;uXN)%XwM42eKc>U{0u>&|NGZP90FcfR?}^pz{r?l z=kC#Gs6a_l)4SewD!%gNPa)~lq$)H(|M!TrWp+SPG;}U7ekT~KzpLvb^Pd`*pR);u z>w#!RNI21--WeI`%pQeI!S3}|qk^25Y;BVng=^*0$v$S)4^p|{}*n&`B!2iJh*5W&3Xkz z>JLcnq-B(M!G#dQApKRcdBk&BL}`51Y@sqejppnOylPz>1H-!^I4 zEEtT6s$VOB7y(h;ut|Gm&yc;(H=}S36FJCO|>nLE%8QNp}CXvDo|9UGv_77hd{RQ1;-Q}G>7)fAR zJddW^L^beG@!F`NjRwWom<;HV@s1Otyi>^j-Sze>!dacPoB9nF3?~jO976i|qu}g& z0L)ZYTGiFe>DtjUQfG5#TOkC6S?_HhvjVt--VmTefCKPc5cv$aAY zi_y_>6bgCymP{Nd`%CDao&gMUoQ0)05aIY#DvnZdR+122SUx|W&!Ie1MkbdM7>x;r zjQCM%n5=wq2H4|oHO0(`@59}P2}-3hE6-(>&jrLJK{;)iQn?GSpw5JW3zrEClZ~hl z6PdioxL<&dgCKiQd@t#_hz_{Y>!+=;f@QUSy)NLw;H*}Tn(-$(!re%1yxO;3&+CWb zo%b!DtA#sN0jH(7)W)ntJlGmZr;?JW&F%eiAC|EiO+-{kiDDJ=21>@$SifdH-uLbg zVr0|k9)-03PdzCNG~p=Bi}COO)xm~niVBkE;sK;q?1S0+AAmv4ikFox8HhzpeCeu7 z@ZpQkgWo8s`(6+LLXvaPn@}r4Utd`<9fY6*19VUWl_V&%C=BPSDhg8+UE%29uQ339 zX7snvQC>m+njj_nooWk&W2@o%4Us9umR5X~j^?R86=P;QEYnB3Fbv;_!6sR`*`g~Ne9MQ?D8rxF1iMj1 zK-{`-0A8ekpy?xMjKgfrNI%_(IAZ^sN+-}N&%lajaQowvcd46C{G+qmM$fF^Q?Q<5+&cYEiE%7mFBu@+oCOypzX3 zwt%l){t4_ooI9~HyKmRc2``^;ytWvj9W=8_r8rtpnR zPQuBjz8Yq;4X@?HWu;HPhZIVRPezWIh!jyYjUsgsmAujqfp65Q=0hb=S&xbk$M3y) zOA#BVS~%{klVrc-&vZYwbB`!#U4=jF3FIt2qkrK9} zE3+6dE#zVmj8GAmpGUcA*;B~S@Nw$0#du405bOLZZdmgi);ByDU%vCFf!9)|I|Adk zNWycZ7cT5_j;H8(Oa?l_NbHx8A5bY*#UzM&0pt&mfI-3_p?X%yP9lL)v4}(>DZ!Wj zQOJexJc)K}4QwCXE=qx75>l-qpUa5X0KEd#xSl16348qfGGz8MfneoND2I?fNrZ`X zQi7$D7)}W^F4enU93&EwxF;%ytnw{W_ySZSs<^L*f z{!L!o?M?G0V~dE_ir1i1h5oOPqniLo9hpyI-kVOu?6WVR^2k%d+}$~CZD${>D$)M_ z3~u_@FXIiz9)fmrT05(8)Qj2~s+tiSQ-L?b+FRK$OKq+z^w45g!Iq&#udaqP46g#S z797#UCPfqqje_(_2@4y&Y&yYx&gX}fd?(WCRzbMbRBwN-EpRuX5zdL^1nPXsn41~E zily-TKR$~yPJfrQ-FEem0op&pGu%j)z8)}jM23) zD|G~k9E|=2@Dq7O=Jczu>(lV7MVTIGacq%fx>}9?+gLN^;DRe|#4}GlD__@+M6vAP zLu531>vQYTs?AEmBQ2-Ur(Uk1uZxgwS9phjp-*09a1i~g_C<2u06aq#R6@r~TyL!D>k_;7+<^ZzXOw*rBu>N5GX@qo))#tT?x(< z17ZcwfLy1$UrzzW>7oSRW=oL`q^gBt<=5x~&}wULKvQ7(azzDfs{PH#s9yx-YE^`S zM0U9C$vEN6^bE3vG%B-I>E$!~W#DBCpi!#{Y%0BPy6{D#Xf;|g#>ewV&H$@(K93nB z1%G&2$zNbwfcC{qKsq85bc-UC&o)6KE)PM`l zrplp`1ZL0`?{{%8)WSZxQj#DnTIk(YZny{~z=uEA)lz3q77Jw6mJm^s7eTemBb4q$ z*xOa;fJ`Omg|0GUZqzbWtgA5t@n{Nn{QCF!#3i3XWhNB#`CmuR{|DbdZ+$wzEg0y* zDW{%<@BQ${GUlQluTU#PXYDd@nT(iu@%OLC zs=bEbx67gcNAn-a_Su@!Z>EwxRb7XE2c3Yc`=Rm-u6cplLqRO<^W8}y6w_E*5#ffX z|E(&1eY(QM*ZF+?qxaNUI(1>t!Prw+s?UfZCCNLZ)+5RSRZk_-SRgHM0{8~CR$ zee*BLPIFGEU?js!VV(I7v>aPZqdeO}bbJi0n{NVYvwP5vF%gfM_{B}v;kXlCC9Ore z0~+NqnfEr*eekor@LUt9&_cHXGvgp=Ov9YmCQ6R&WFA3cK!|pc+60WM2cIs2$$~KV zjd}@wa|UKWiCZ6LK7&?i8jXpHD3cmy6E&o9&3FF`U;p-hb-p3ba&~wzdKN81wNgTH z;|4U@p72SW+ub^*Y_AO~n?cX;Qe>7a6l6ih4OFET^(F|eF2h!$qHZN$X$>tFKok9^gExb7nt!Y-GPumWUaagj!L9qL<{ zaK(Q@A)T}UPuM`a(L$~_1HV<4NeQo764~EKn#4;^0H`&`u|T25DyqCJQ6iU0eqBkh zbMNPRQ~RMlPa0zH%-aVBN|h$iGl0PrD>W#_f-vN#=ysVT3}J_jH)0z`B7q>L|;WP^TsQo`$R)BOV3v1(^Ky7{gu%Dk4OZ04h-^4d;~S15}zbMdgIpczd}pY#u%oNl9`8gx}pxK zWQ3*4)7h(#QRD|Ii6H+#*!$uVrW*hq(qESt7Ul+O>8V6XZX#U;iff)Ez-mFq>=>rU zN;v;r7vY)5cQh8+v%vPZeU^ML@k9@B(DG&Y(q)%nbaomSUj7+cpQ~(pE)D(_I6Q%_ zX^eHuk+2TfAMFhzsQ&sk1T^(|$?)~!4_COsDX%{WH~#Zi&|ip(wtUA8jlo{}o_ZAt zj2KobUDQKCmfcakaAOHkq0nPEto}5JU-@k{TQrvBrq#ZBWv!A-~99(_P$I+}8VLAqEBL%1EJtD$}4g zp>Q&h=@uAAG?znia310V1K`lSYbmSp7#ZHPWlTK#3})9pzk_n0-ck@P3}NMQuff`T z?nQNc0)@rH$S)u-p^lNKp9Y%TyLa_45RW;y`tLu8GhcfE`eH6@&qcH4iusXPUPDcL zQM#7|YN8<2Nny*jO`|4i`qLl5GT{1cWzlRa6+DteL&<1?^Fzc*_){wIi*ABeb(Q^4 zCjlw|`%)FN5UcwJ%>ISQ3@lUuH=O;~fqI8xO4dE!tAVzY!iPWeb^OaWZc#+--LA5( zt#HROIox~0c%v2iB#yCamv#g12QG34DCCgcZv~P=3ov}dYtV2Km}>cGMolb>`#2|? z$31H&aDStQ8uvQSMLc531cvC!SS_SX7yW^SEna|%8G|JQ4NAg|2uqYmglVAGYM|Qm z5hHQ^>-YaWptd#pEmA3%hA8+kX!Gx^gjrRyH_qV59(|YqkSjp{0FvXTre@I7lM|Rm z=IxmQQi3s&l**d(qiO_D$>B115`w2{6lI5r4iXL=9O%W=bWs8@69Y~Pa5kSoFq=!E zTq+A1z{?_JJgr~AAkRIE+yjmminxS^^tqX-pxqYD0I~m#J1W7bR3*65CYX|x?8Onf z=TreJZ=M6CipckfqA6~K)Dhsu#`#sn1)xz)m<6gzUqMoQy=no3Y|}6UozwdjqMp_U z&{Dpt!qe}geFHiHP7(|iDLUQXQ1C_fj8%}(h{K$TvTz7i5d#e*oiuK^;YNJzs{bPu z>FyW%D-c!xinj=-AGqb7e(dXT+4=9mKqQ8T);^E-eBmloB?FK<`>%NSoxMaSPntZL zI(#2Qk31HZYr}i^Q5669M+9EuWqg|2eo16|$2q6qYu8_nltXxzlPwBL?GW+8Ir^~g zqpNmx4s?{Zz8p-M&qfR{4 z)~XX-b?!waQYZzXs1ZFg=wm=9z#Oi)@>{t2OE>7Z-#MjH4#_lOAAKTl$dT|Y#Wdik zojp2=_K$B;L3VdnTcbrs9=sC2{OJuy#a(zDJ&O;b<=XH^A4imP|FabY@jR?W`-@Yc z$OkvJ!QZ|HR?A0_>;(!75d^jf|DZksw=xYY+Y7T+gS&kjU}q5YEkmS`huJE@Zy1{*^6p zkj+Sv=qz1`#PVSniGH|#1pTWPBcI727E5EISVgH;!$hr!{buaowFi3EVoF{V8SA zAxeNfsLfUoiA7}Ietc{Ky}iBCi*Hc*FO!uDm`Q`GG9ihA@CdX;urU2?OEdub`};+4 zkG+42+0aWsNQM*`5)0zAShliH&j1@wvL_#4c9u56Nnx}z7)yJhR+CXZ235YEUIe^6 ztjsyr-*P!hqB2k>CLLwJR->~a@#66F*|&E+SFAIWi3D1nl9^-zz=;6z5O|V^0Px%p zp`b|zlMN;l{OV1*7=nRR*+8<-NfM7>PTGP~Hf~)m$@P#vzn1*fB)#dopV|sMD z%OBg_#`-t>eHi>xmW@l^awgt!%E>|qGh7#=%_dI&#O0W#pwTN_{j1Yy%K9^?seQ^B z@UsQP$}=e4bvGL8UJ!cC?jLipcD`SL4`1|FeD1R!LW)XtEpCM5+NjS{+5;SeB@`O+ zKU4%)6sK+rQ+>AL4=6UdKAKLzp967*NT(zlp__&Z_llhyzE}q){T?BwpaVj;GPGb##_$25aZ*MyC7GoV~}7Ua}YV>T=)x@BjD@# zHjxVYa3ai>KkWNFL?~H>E}3 zTXFc2IMA#p;UQYtQpl-VLL`}lNs*RD1Lz+T9uTG{$;p%PH=qHlT|~r6V5Vllig=hA z-GE;_xE}xcz*gLU`@Lwnp|jkJl7JBSrgUpO2`iI@na;qZ)=46XXeuQFWD$vW=>Vr5eH5-d z|6B}b4Mgbnr#UR1%;$3*7&0YbxGev!<2@6&tD#0n_ zV@bd#iprp`$=j-Brq;R=gd;+#5=OamWFIi$+Hs_p?+e38%l##*K}%7q1VE{9Nc&1a zQBNTFyZ`VQ&N%C%sFn%;E42BgQ-E%v)e-0l_hH^!yr0u);j;vNeOPtgnTS&_B9TPP zF)=k>!sNP5QsB<(p*N0)*@lOD!$qx8gM}7W<`ObZ_k6`A=!A6OA;L*HwtXl8Z2>0^ z0ctd&2rBd63Bg zIRQi$*lI|o5)zoHH$cW_Jn1Ok0D2O{*`qH>LcnTXDtC`O0LliFFsM!z)%~Pucd087 z74$%=U7KP@>hCIx^c9aFW(yC2(*dP&S&p9wi0~Tdnk5p)yh+%~s3Mb03y**a4Uq%7 z&2cei5|kWfOMs)3L?*)XrWFAJ%oK})O7ss7h~pg@qY4f2F*dm!xpY>JMUn`89|}3L zQkC9iZ-3vMwyL@@#2Q&3pb!xV`#wq@ONff>m381`1(Q*3l&o4FD@UTPxcp1_$@l(a zcPo8Qc86E=Ibr5MFPFmQ@B09bKWH`lDj`LMxD*2er(ODKY@eQb(QUf>+jdV5lp5_6 z&Ip_sGH*B)?t(?2K38UX2Bo`iM{Q*D&SX1w@z!DRrtEfrkALVLxcrmn!-*OQ+D%o- zk?q}7KJg*1Mi~gHi4jGr??U+E+@|GQ!_BSR)YPkJj=G=%5Y#B4N=p?!FF)5r)o|_U zf=1^BiqWr{0Nt5?wX5n01+k%4L01!*6_r8WJBo?P8s7QdOL6Dj_jhHpU%a*I%O=}( z-w^CKpNCd5s|dE-wve3nXfr920NL8 z-M5$+w>f)L;=R4Ll78QX$Sar3ubE^-p1#Fcn((IQUoniS`yTi2^tlp z$q`GypB+VedMlz%94#}2Si-@*cRhp)uDu1rhrAkFpIe7@Yo2<^u%hv&E=fbwRxRoK z^G_iP`z;tQom6)`3v*Y#s{Pb8h%6k&;@2ICEss5g+L|?bq;*fu-_DmakL2Aa9fQwb z^iHHvhAZ~OY!{K4?sG~KF40YL0XXt%`C^LYQ%{Bjw7^Bou@Orp&}`KZ$;DwfEXr71 zsnKW#eNq1ta-<=ZCNHH)>)SxlA6s7#I{lZ;|fDj_n;Q*CN2*ix~R;Md-Hg1vV@JLy0R2DEJPzF-2ne1 zNW~ms-m_w7@1My4PZOQ;Nc>~KtI)+NYD@La1E6RS8T3@?uUcX#S_I#*j=d}@u#x&o6L7UfyQjVUG|40UjI+PS{$!5YH5_JL?keR52f<2OT z222DPbjjijB>tOY(h0>_>bERFc3MQ65h^o$ZYG2~2M) zL?Xp(Ls~+ktZf5WMU$fN_P`D5inBO zG(VR&St60fpZ@SDK6b&S7~97A-&ZDh?n!_RYqXu+VqxXK|KbI>>Z0?pa&RFU4O+Ys zUbG}xt69KXF8>0a9|?Ws!*_axzJtCeJlBP_a5+*Zyb(1zTLvED+qYoix4(s}EL1@{vP_|S!C;qp&?1et^hzgbp$4K0NA^e+)9x=_meGD)1;2&IE6Y2t@eNg?v6 zTR%z}QiXsWI|m{5sT3d!sh8G_vX2F)=o zAJ<=X87_S98{ySv5Xtnwon<00jo89fX!>y>>zR#7_@nC)*fCgrOT;u44bJ{I0Ivw6 zT!)t$fE!D}EKb2_%)sIdAzfz82qHbp(4HPaP#K5A_GOz0Kn#yQG=jHX@hwccCXRo@ z33%p-HCXfTLmgF#x!B9xbuA{{ih~cA-!Su%UC_eHS43E|A z?%r|ERm=>XD-$aarKT&cFqzbK- zDuRggE}aL{QT-~#Ur}5rK~^=%RQlbWR+u_IK>;*+C8;U*+Q5TR7>Ii}3{Ee_gchetm`QJ9WniI-#TtVL1*?yzDc`9ep^SeD2R! zyM7B6u3CX}_I(w`CM)>m#^*6QH37?QVD&%>Z{2ql*3T4i`=+fJtw6+@`Wzc4&(FZ9 z)o|Zf4Q1cQNdpP|{b8$c@x2@HM5Qc6N>oaI57V4zv1~Z-uy5AJdGm&FK>r}V^5k>4 zV{%5?&7RLzw*lk2yM7vYO;?!o43uR2&l?)V)WoD*1my=%Lq3-i1E5B|hOAb&Yqi?a z(ILg4F?n;6{B4TZ}hI}v80vn<; zz+g;-fv$ftS6gq$M!+#dsqh<36w5Q{$ra#4BJvu6j{+Wtjfkbx^0?H+Oh+(q5+axt z(iyNRJOU;o!o=2OCvq7$rr2yY;F1_<+wz^{xtzOflhK2sH(TJT|OB@FmsWfijixz71|o_i5I(!FZw$kJ4z-BzYd zc0@)QJMW{}wX#2llL>}}m>tKLuec81{q_$fOW&i-VNWCc%YI&Ww>jlCufnD8dN&50 z7+Q5@VJa0Yr$HEql1Y5v`fG6iL#(J?p`btKcnwx~?8EZ2qcLPpcmvu?_l0Npa0nT$ zUx(sdcfhZf_hdVr^Ug}%!!|seclJs6(p8sYUVjE|ok@VI_GSB)KDx52mPAo=MY@|( z2=QyiamWPKpQOY3j#uDC+=A{e_2N_5{tn_mp@5V+u5P$eXsa&s#n6gyt-8;IKSi}T zFN!O__$~a?*S;;^W9QeYd+j6@j;G{D2{?Oa;#V@=AZI0Y!7opXT!7Oh<)CtgPP|?N~voAC)QGYaI0Z^NPH@XEzvj)S7 zAc*#&O)fw*Ky-XPyvZ#}Up}6J>+~Ypw+yIF!7q=%t4^cYG>|a%k#zt7AOJ~3K~zZR zQENH4`L;jdAFliktbutr^z|p8I6j6a?|%SZX}Zf84==_X*@u^X`{=+Qrf`vEi!uMu zLs4A61v8I40!#Yry9jF`nxj9I#4*Q-h&v(uUFt%l4lkj)W<&z6J^yUH=k(*@H!JXX z4jBn_U}hZW(qPgq_O{ad_hE9R)AWTumB^-1sm;QQTZpBjO0z@|PSvWEzl(ixR|z%j z_;q7qjg^OqX7_Xa$T)1;2Xc86X-*Upk35 zE$YR&doM*I9>FiyZo$viPM{V9SnQa1{ZJAQkBs8sij7vpK*9EK#KkBBhn+}#1Qf(+gB;? zk(3dWzC8nfGO1?k#m$J)x~(R%*}NnS?ETZ!XLNK_jm`0SG67(Vm`=WsN4Z>)f4fcU z_$LsL#W6Ll#D!SZvodF8t!QuRVn<>Q>V$hK5FtV~p7ibM3BYQeCz&dLeDFDSdKj3N zRR0Xz!WAF^Mf?HOKvX!Qr*z%b964J7@-L2-$T6tC5~mM{XpnS6M1zZv#2mI9n)QYp zBNGM&U~&+`{-;cHb96=bslrN(GpaHsNHsuiXp%f>f?(L650L=9Pj<Cgx zjDpQ!%jzH~6{bM{Y7vJ$z!3gYDwv8@&+i?NC9rM%R=nq}7h&UiR@1M1Fx|6_Onq*| zFmdtOr{g`Rya^G{mvuw&93n1?^}wRbLOT}6$8P)v?!5oOSNNDUL~fagq%-h1?ZWHU zKZwjJXQ7%(!#6xQ+})mh9MuON5TY05QB@CecMnycDU_p)z_7;eA|~CcU+?h}*SkBW znotP&EP70liLfLT)YsrR7b`U7b6`f|s5MMneeJh!-B)kara!w{SL$;@nO7{^2hrnB zhntQoW_KhG-s#Ps{S@uzAC-!-OVHlk`BtC#(euy3m#;bxL9GlcGavOJ58L)(S2v-( zWmLEdvQMPu!_N=EinUA9F+5+aqv?7$yf1?{?=>Xql0SK2 z40n%J(1L*!d>oLqG2UunW0Rs(;vtAgOvUO*+eSXd*0qi85|@;tEeqTjT*L#4S=rcJ z_7H`Q-l&P$wu^ehfN{+ow+7i1rF{v*xDv>z<$1Lr(s~VIqSaH-0da& zJPNbOXrh5cEQ6cB^HW^)*{jhIJ;_e0zq@Gm3jAI}b3uUN!5)0(-S5V0SM3Xz8%di% zMkT52;VE=S&N%FJ4qyAPoAK=*|Mx4K04NHDY(UY0e)t^6nwgP*#}7La&f!O*D#9ZH z93neUK84x)e-EF1+1=L+-3TkztK4in3=j9?bC-V*XPS-VklR9Tyf<;<6Hms1KCIRB&n3G&5pr7@iZ9AR*F-a5sSdF9e5AjhvuEXg-LT% zeZYHEq3J8~iA&ysYp;1fDr=vE70n{BaxiG(WwwEdF~sdAyh;sTya#?^5O6G5F7*J$ zDV;-seUZ4OPQ1@~@#0nlr73vPJgnS&1WpPDT(rkGNnoIti-YdUxk)i$A+h#Kyl+{ph^e6JtgFI@6z!4()=f~U;*YGa0uEh zAEQq^hG5%zwTE@+$a8L4{ld6q;^>19z?)Be9S&W&82Lm51~n5%z$1jtO4^E{-1f0< z+XU`@>}mY!p(n6$^EN?-dF@`eaxa{J^6}Vz*&vea?_>79F`h;zP(8*%wr*P(woD@SA7EgEA~RucJSM$ zp2eCi(^#=!5J%?HIBcj7TWd}H&-yVuTcUh?fc*!v7@3;HhB{XgHES%ubJ4>y=C{y{ zq$DYjJs)lKTOO8kY}NHJWe3#Ht)y>5b1 z!2cOQvLFN0t`}9kL_z{H`*-Xm2+T}V4$Bf{zkEI~#147{$Vdu_AAu)kKI8=q4D<_I zzq4J)OnyY{Bh}2ls>BK7vy$d~DBn+@Yq8c~WNW`gt+V8u=cL0))j0#qtB zo*J8+3IGPvvce5!EP>~aFve2T=>2r3ZU7rgpK@$i7PL^8b7-EKWJ(_=S}zgCU|+z z>_W6WtEV0HT3mSUn=wC`hTEW;Ha8w+flQQ$bH(iC5sBi4!j&-Icrsd12b9B&aRly#4QSl<6Br}~hn3lj;Od=U*)Fnh=?C9|EC2Dm z2x`+nqXd6+3`VX8=HdfnPT#Q`Fi2!;G~p*wXw&YSV~1WvP#4ejW%?v&Hh^rON`Txt z+6_w9x-eo4Tn_wN9d30DX0ia7IiOuZ#IM7s6){r=uDj)SeEna4CN|@|x2zJbkS6IeGfhI+e& zs9Q!MaFC0}kx3>nFs}fc$fsn}nj}iIAksI<2V*Q~&=Agt6*rI^OpC4WoUBvx01R;) zoU5S{j#;a5R~yZ?4>#%{vtSt_`FTo|Nl#maYeT|*D03W+wlbNETKMH}?!`Ih{sTDN z@p802!xStm_EcLez^bEPgZE$lAtZ7cICcc(dVqV@Y(Z~6hZQjc^AZm3-ZF-}wiZ#2 z#9uODGZkxjR*Yg-+RZY_9u_*^1T)Yxvb^nHqZ44g zDX4-Z5$(2A`6TpFMp%#wkMKVGOdgyB6$S}cLAVAyu}c4-iQYm%j-QAKiID0w3XYN;WFS<7QVN3VS7DD-)k|R#5Z=J52OiFxcAwJ$ za&^@2whp=waJMiFS+CkeBb2~qbsI}zYI_kMzWC$#(>=S=Y2U*jy3419=b&4WEFJ8_ z#pj-f;}1R%2GIvfF|z`Ys$W6dzCbIin0bHkBv^>2;&^!TI=uIaE77DDz$^J+;z-F+ zPOThz6zXgK2&1GdrLF!UqOunvf?Dsb=VhV@`S)bG9xR&_=(zPs+ls&4F#=G2TcLACA|m%~VDhW#_bRs1!07hWW3IGUHis zOYj#3zzXuYJc>oi0CVh(V}cEl3ubVoQWFC@AsPl>R@pqiqQ)1AAe+ykNW0-wQZ{lC z5=ti#kHw@P#7dskH8qaeMxdX4yWNtJLH6c}N+gmT`J*x)c?ynH=;SU)tD#&)I_b#d zK{$FSEPlh104vi4t(Lrongd)|g+fk_j{#qp%YpJ2Ac7DVS>w7)0`O-%2JtgUB;d7M zD9_9YL`5e-n^jn-`X`(Ot;4KzABD-S`u#)>)D(eYxGG4SHo?hw68U_lJJT=sYA7Vc z%hS~k2(wMTPY8+e>0wJiIcBMDpCk<-r$EI}#W9z0vaUoh;bx*fNhgVoYe(?O4_%H2 z|Md9FDP{L;!w>Ci10a)(;q=$O2JbrcOblc)XjUnOswxhN{<_s9H?x$OJa$&hmh?Ge z*{E1zzTutk#ikA6B*2~>#;%{$WyxEC0gozL`>jI6j-mR*@7_D%m6uKV(LP%3hKSL((9O{;rb%Rpk zEVtk`>Zp!whr4YX+T&vgno0}c#rR#f^awyA{!y!!n^<79aL(Qf;ZciV+cefz zU68(g^k5vv4#d#(n)vzp79O2xBcSdL&z~8f>9^p=EG)}9c-O)L9@$jF&nFt9KR{HS zdzhNr;mKC&w@p7lsX{ZLyM7wDwWb6|W_6VOW$(RSt&4Og`|q5QCj`z1D{5waB-Vw7 zLFyFvGK$9ee3Am`_LoYhl@zGV@0t=!8JsEf!})x&;Twe0r0TX1PmpZa7VBOr2vYq| zCIK>Oc@961tC}P{zK+QXneOBa^!N9wUpOsdHRZjMAI#01E;WCV}zPu>9A1tbdc zsPWJmU_M078zp_B#Bz|_4X*DqU(x~5ZfD;h+imhlAlwp&7zPk$af59 zN$Ib!apUt?xpF@>Zlx*ze6Qm4jEo+pGfb?)%~?q)m&)`AuVBIa1zMiB}_&)Br@4=4$Kxebwg9O0fJ=C`dZlND2O~IO(66U>6C|XXAB1E#f z3LW3}Y(=W}S~lh2*Z+PU4m+R+v!ff3S#&5`$is-m0KW)qeFmo6f;~7NUabn$OA-{@ zRTqA~Pb6gxl8mXa7ct;*?3UKQ)UKc0093|du`r1wfyD<4OdhO#7;QSM4y}M^Wzihl z0Ea4sj)j^V$GuN%z@^vRgcr6=bhQ-b1a7%s=?-W*B1cX2KknYDLMMvR0Nf`0HVMk1 zMAVKayL*nh>nWFv;eyj%k9VASEV41`kg%)FvP7*y8PwoNcF>l<&7UgqvOpnko`JTo z#@QzG1;&@jD}HD0<>ExDz-OUUbKex+mHM41ui9*%+KLd(58z%(5@ZaB=i_jas?DX4 z9<`FJh%Kmyv`TqgL4cXrI)ZF3a?4jEno7&w!Qd#vUP>LPLtN!KQrtiNhSWnbT?Q0< z;_|QKYuEh%24BB(n&eBaVpYh=zyT7ydA$Ad%dqO8Ls4QJpp|_LxaG;U`14d-+UCpq z3#c@j*syg>j&UfJzzGWrI4B<#Zp8MQfqO=#v9(smUg-!98FKL0_9E_^GEk)~CR_G4 zm4q!Jo_bLWR?x;W$H!E?i3y)=0u`41Ru` z=;`SZQU`nUw8bUCkG~^{Z+d!K*#D)Ol4ODTLID%oCj@dQTrba!LeN9ARvXXG$$=L)&e@agv=eKY>v76ayELXa?vx zR~3H@G+K2@SO~SL(L*&R#t93`6lc;IJszi?&-d7}Z7Z_rw48gfoRwdxJ{(&bOz3E% z%&MO^x$qWvlZir<(u7IG98^n9-0+Q?@Wb!jjM`LJUUbg_Y*>SY&sN*fU_OIyec=mO zT(yQteB{L2#?CXs(}c0!lPz{dg0=HZfa&&06@?8D}}v@%UmhYPDk z@~~JPli*6RvM`7$F9K1eyWvVYbFKJB)U!)gB@`zimT=mT1D(3e&L zi=7z9J0m6{sVLfhi%Uz)fkZ<^vJpMO#b9QUix!aT&xkz|?!Huxd>+RFSq;;9j}gOL_tjIpAxPaU})J zRzYu1K^7;4gBYBdB=9>F@>!{l8JJm7PftxtqQc|lrbc2QWqwt~1N0X1@^6}1&?A7K zpUq@Mjf!mqA`qNdNYSuDHbM$0L?!F?`Ft8E#i$FQ$cEg=*izsGL7JO}MotvLk6Ev3 zBak#GOD*i@>#9u5XEb=n*DymSnnIFe2S?6cbcjI)xDsZuguOh zqh6}GaGXpF5~&*?>=dd*%eE7^?XG)q!!_T;nunj>LxOTwf~Na!>gJ-_J4;)p}`!+!hkhb4=LkWR(B z=2n3#l_Dorlq#eigVN?y3Mt@q3j3X=06juV6-GaUf>KeVHUvQLj252%^9b&|=TG?g zZ+?r19)AL@Hn}i*F~95$bnX`oSN_~hIOK`!369czukhL8gcf*s=Ud-^Z+`U)aO?nX zWgM6t0g5w-q;v2J{V+}L4>pXXgTNAtCKyxeDOxK&03Ho;h$IrxnVo@OZ^Glm(C{L5 z{#9G5?joQzgtQ3TFevOqIpk;pCS6LmZvrY!#4Q@+RD_t-@)OuNR>ar7{|o%=u7^-< zHupFNz4(m081h%^SoO*imW^}X@LF8-cgJBUlaT_6Om(6?5zXajiTbIfh%c;`g~_T! zsASSalefb_PF+DJ>!-tuXESC%LpH6cH^E zV;o<63%>cyAL8SmxDGxm>phb84%b;XHDjy83j!3D4&(D*xgL$)9^ClgLs*oJ;lu^= zF^?WPu7N*qub|y-$S7VeV!%p7u%_bSq0LhgtPkjmJX`w!03ZNKL_t)`pw@Kpz^sRD zjfhCh$Bh;;?J`Pr7rq%+Q*Cw&hxKKUPR8)?bOmLq1~yvgw;~ufJxn(}X%P&^44giH zAu?$T|N8V=Y;9T6s?WEK3prLsmHhPcofSC)J>PUqkxhjV7yL zE^-<`s8PH|qb2kG&3aQR2)9{BoQ!`$WI9Mz%QZRid>(_RLb5zz+HyREJ#bdrZksI$ zTec+$qR2uUdc`qH+L9({|Pg_tBPeDHe_qURV3MT7VnE;{~P z{0rhSnLr?#z${kA1L=rZts5tfUSYtOzw4P+4dg16 zS0@xZKDUbmgplvtr7X#ZwAmGTMrjMN`sV0k66@A(!etkK4v+qkz^@;x^0~d2&Hud&mr=v3TD_$jmP& z!x?bHRh1@3y8iwnSK(j&?Up$*voEI#-C0-;xd4iINI28Yrjb1Ectn~ms`uXkucZ3% zo@pbo->VQi_%PIoF0tonTgXr?a_zGi``z#0S7vl#p(^n`-syKZw7*1ENJ1_2^XH-T3Io zFHr3bJAm0PBG~W(>_iI9ffX_@Zk8utw5qT(dHC#ox2o`?EN+^x8?z#+3r6H_qjbp-7OVj0@#rsciLH4i`j zpFiNAZ@mrAZXA6@J8r9o2XV>SC*%0TSHljJNDqAte3I%|(^Okhfye6(bIc3ce9U}=)8~2C`Z_8EcvyP33!|I4WOl&VG zvl{;27BtF`pwEimj~9?zx)cc_H8h3O64}C8kVBh`|AYvds)|%Hq`nh_KluYAVWC6;pj4py8y9t0)PLxPvWp+kH^CkqxkLVhtRai(&n?DU@hi zZ3fsc2VA`Ga`fbS@Q;r_hc#1G^x07?O{K7=JcAiMC2(Zg#(S47gYQIeHPt8U+*5&Q z%g0cE2F6z(_(kAW1TT=FP6A+|ry%2Wgw0u9a@?UNS;L{Tw5VL=m-Z@f~2$XL7`Xk0d#&Ip#Sf{2*#cthe9 z1GQA&`sWg!w9{3GDWmhK zuhXN4`lbzqf>g7Nfl9SW_^)U@g{!Z)0pI=F_jVR7J2fEpk6bo^BUkQ&GmqF$;QC9y z{v(W5s@l#N$fe`>(RE+NeELa@O{ywi{Jq&?LhEN8)&li30Av;~~81tP9X=veJDyH}bB&jyk&ctr}9{B^T z(d`NeAPai=N~DiC3e8LwuCK;NQ_(muz712qyB+S8@As$0RnykNi6)J;!CaJL(tm&31ay&fx9E)|V>Gm}T#Xu+D9 zgz1&g$}B*;ZNjWu7burj!H+9O7KxAS~k{?OyTBV-iQCZ z<56tfGAdQ&U;D_!Y@B`KQFzZ8Z^YtW2VRr5>k7Q5dZ8N_Xg2~xq&LpJkP`-789@d} z*+;mxX??B;A#&Zr{V1%urEt(iiWDTe6`*)1T2)dbl}s>5+$Ir^(*Yd!wrvk)+=r8i z!=iF2Teb9vsW#E(8qE(NURZ!=@38mp%6T*o~dUqIfD;?TZ9IF5;5 zkBs4WYsZARx1f;5$t&hzeX)$+Y^=J_@~f6sm&%4!A1YMt zQ*y2ygBNWREP4R?#!+if6See)t8!UAIS}@^I(ENv6M?YG?`|8N(Zdj4mn311c7RS0 z)Hl#KH&+B1r1HJgRC{1Was4&l!S}!VT_vWZxd{EeJvey(ebIL7m_I*{0V|F(PCgp* zA{JuJq98zD`|+K)?iat8sy=ibd)MpVgv&p8A;vbXlj|-Qg4J2fLb5l3{-Hkf3>6S7 zq=eP$vEt{5k*AftB)L-R22{%{&iFX)P4C3xPd~Q{gyOH+O1r^G*1k*?88DVFhJECb z@LN9cz#VWmjdj!(t=wO;XV>4P z8%B5a9O`Tc+9T)^O>nr8a)bBfUQx;DPBy}@_w?7C#=q%n)IA_;4IcjPgv0QiZ+r&n zqy@KDf}QI}kjNpR;8=AFe2yBLX|=|y4cQaCS`k5|2`dp7*F2w^{)_uzCLEx`vEe4# z)f$X)5q2_%pl?t#8-kGyFla!=F=&bj<~Ps0Ne7=+P)r&X$;B~;! zYivRl;T2@*3imHM2b3}`yV1(Dg~8v%)btYm`l5@l zVcQlluCt63I>fZK>Dbsn6Ar#LwTK%JugHDgTFl`YTgK4PJpAC`3Kp6Ua#|N>_2+PE ze;zm2+xXE;1*0k8&o}ksgsg>~D;-=m<%m&G-f!dRv=UIdqt?L^5dp4;J|;x$((C-5 z16XbsM&O{Og>Xq!)ye1sjw~fn&|XjPT-&-~(8K{MEMM$d|XvmLg~ zY30cv&qXBYb<*?%jYz#^)wXBCkVsOa-Vk>|%J(wUYqh)bXL)@}0Q-KV^5?0U3{lAB zkCJj?E4BgE16h%C2V^Np9FRk5p;2$5+i@|_Uy?vYxncf(KA)8Yf=Pg->!^yjkiXo08&FG zSOr_S*A%aiM2c%le7rebSL$9uY+>R+L@*Jt_5eQcp)cS)AN<54&f_2YU|qehqHMp>Q1S>1_ zgzPD!#nP+WseCZ>9gJ=Kg zS#Vb7g_V&gmXyqFy@p_MzaY|GBLl2E60VtsWp$xf4!}Qf5Xr#;ysnN`T^ELcS?X88 zEoufN@bFu!-{OGJ!i=o1F310*l=7gUP@0Q`LC~}s zvntxDI)UUYA=p}k+oCR~RQ-`Ug{W03Wj0F8ul-r-2hY`qC3axluGxkI+0R^d!dH#{fkNghDn$b9wo&u(Y4}RdY zc<%@PP}wT$oWo|fd` zjEOOfIu3jzM7vbLbh(XwI}j>h((RybrLnuK%joHx;XINl6JNh)4tKd(p%Gf_nuHFz z>}u;KJkvnN&@qv;G23#`&~0=G79_5NvDR}0%RvC4Z3KAsQR5iO=b?S%+HZ!OjdSL% zjDMQaLR6eh6~GwKnW2iVv!Lsli9xvkR1cJ3&Y5{3k9xI^OfD__P9oGfk}2j3Xc5#$ zX+kR@0iBs&CY6Tc_)^vJyJ3K3CCD$DvwBwCqO)(A@?Pz(E1Cc*;Zz#;qUEeBjws*9 zq19vKf!i~*=C;LGPCfKTBGD1xCD5E%^IWN{na zP9}t{(5hDjELg}FU@!v7bM`N9B1f;a~g#R=MH5WFqQ9C?O+-z!xNKgk?}8 z3$jE~xc1uH@TQl)5p9l*BJkL{u`&GOr&nOzpn=x%A#|ta1%TqU+i2CQ@VZT8l39H9 zCwJp-{_|=y9JZ1bNBmEpd^)~*$)}Mmu_NQCyHMjT}hoxRtCsZiu=)|f2T zeLU@~<~z~DN7BZGDWT6;+;BqnnRj1)qr22|!?bL|>Mbg3DU zCa~2*ccqG;)`Xo)A=Jq9NyDT2pzaF5nTNNjF_0T#v)gq8wA+q!MG3qnkdN*3RKgUT z3RzA(OJ)Cxlm@{oY~^bR99GVeAaHbc(hc0POdOb;L!X^Pf}H~2m2+RzS(U*=Z}!g$ zUODqoj$0KnaR-1ulg>g^^oUSYEZH0xM6n2z-5XNX(}0~dVH7L`X&ZW`05d-hEjuU< zk_w_q{Q#A4E90I>V^hGL%2TB$>MQwTyaM9Bno3CG=|!mfFcT@o8!-u74deZm^Ceju>&B9dO5y2wT9>Yde z%#WN@25rYg(GD@7xxyg3z1GFeWe3U7!BK%HBlz3eA*Ma9lTe)gd}`;kT>%776chNv z{sM-w8GK>KUd)7wGHO!01EC}XYW(e^nAGvIqqm|o*az)H*Ipjd8IQAi(lLm{XUm>d zJre=3$*tFwFpwXJq95Dud@)kuSV=RZBmZ8ZFCiAR?M6#zW~}lF{zy`BOQnsnF^P`n z-}8b9s_VG?;2tzDKp~fwjI+hx<#$T)oh-)&40fJ?@(5~5CsPst$c5)4m!`3g(kbA| zR5HFElCg?myCbXuBJ^qR%Fjkk0eT>`N!bu-Ujw;pM%WIE%SyB;#Sw?0V`gqzsD*v~ z10pEI>*{y}MpDL6&99j_RBI~&Jj|vu(Hva`er{$JU)N|^%i2?dE@~OBY5hl)J}Y45 zT|kTDXfyDDi`)g7R0--}gQ zTeWO3aZLuD1q988RCf08I)sS=!cGO=@+36M;t~gxFUfbRp$;^vaF!~ti~T&9;8d1j z);a=w3z9k#rJ{^loRv0oUx!DjRxLm(TY%qXtJ;T8rBUunrW4fTmwH46GsW(SWSJvl zGYPno5ECoK>mcBnOeuEK*e+IW@HjbOmz3H9RolRY@B9dko!E?5J?C7cy{6!5*g0Tr zAuOW^kkeWCO zdLjp1&qVxx^|_V*O8mK1P*+_}Nxysg5b79P^#9*HdexuHw;5_y13%rY3yn0D5MjZU~<%~;IPBleo+kZ z#&r){(i&z18wV;4Y|UGE?Y8wu+ev)z_It2{I$S}3g6AS*hFH=}I2`E&AquvEQ^tlc zJUR&NeOF%^mS_gV8F-{cPg!18z2wao`%8j;r3jA_YGF30f*rO0Dc$QhUBL@bHkfLC zbavvGM(`go0!$PNg`DXAlY`z@q5+RCN`Pj`RR=<~ z?aA0^G-?UR=y%AgrYz%Iih*VE!)M9BH-ime;%u zKl|Yg(Xp>w2S4$_cjBUpUIe$vgK`D#>@vE`RG0K%7#{rk5-h{U9rrHd6(9R5_AXYG zYDVbe4^Mj>zIFL$kk0bHwN;|Tb-=kaC4r@NXsQkf|0qljP1>ah8DiC#MUo>cjXQ4M zgLBWn0E;V33?FTF|J$P4T6TassJOYcts4;>yIp9r#&QMD-kk_17eJPcx(d~a5p4?j zcs%G$gcHL^Y&{0!o7cmsEnsoSPPF$Py2pGd@V_o(|E?D#m>YQ!WueS<^|sgj z1wQz!>f3oFMmNJZ$Wmg0Nk&o`+P;Gb+aC1(JhYJs=?v15k%l&Y zu7>;<;hT zf2%ery$XCKU_`Y)$Cd6C*KCj|;r(B_6#w>>?<1w_;^_CBC!C0ocn12W!4hRVMF>)X zk_xJEy&*+kRM032V5D+NY?O3Am9({VY$K&oJ})qRn0L8MZ~ zCO}aiK-B#Bf!5HZpMP*Pk)*~=0|fj#B+xb z+5Q~<>RoTd$xnI`Zl0QkV^EE+grbqat#fnu_T6{lfW_CdRWit~|Vq$*zS#-NX6$EK2D#uBtb1Pg%uSVlUWD&8IAA zANt8R!cMJ?Y#}3j_NB#T5&vO}yKkTm%gbe{?2`n(QH3v^#O%x*`uh4Je|x2?&mhQ! zAyU3#_!FYKQbj726huB>muP+-=1!Lql7@iwXu`ubI@_v!xx576N__>HY&Jrm8*)&J z3KWC6?}@k#dH>WS;D+bxbJoSGonO4lOcj5Xod+fXs(KF5V$xt}$Yz;Dbd>0g1b4a& zTD=V?3;-QSnI>9|8dj<`xj%&*X^2X}Dv)YaYCEY=Ndsg%E>=y9XXJ4PTdP*l>Nc@) z-G-h_ue9ZL9g~xja;;-yBg$_e2I_%y<+3DOysuG&k17Ef(g^-ytyL!9m)UU)Oq3Q~ zG}mSI!{1_3rL1E~<)r2T7lzeCysvJJsF%p(Ou7JReCz|C!59AN3v1474HvxVIr!(# zeh6W=A#$pyEg@{T(P`A6Q?7KSEF9{<%i)Tj--);U`xU6Rqfr2WFMjH+c-c#z2e(^` z2nZaDC^=EyW648g?~x9vIz0S2dET+u5)TRL(g%7D7rycR_}8y|^HFZ{hZ&BfouhCX zIuA7l_*4e|_z1$04X}$D7)}@A@)W$uIfS()G?(m#$SyZdp$J$daD3x9jQ#;JoN4bp z1fLZryFU-}a{k}b*!AQ**vHeKd@BCq^B=+BKq#`aB7eF(FMBoAatKZGshOX4p`|G~ z+XfEoL(q1imj-~5afG1>k3~m9hvs+S&o2lQ$mDs41QHVy2;f0qo`FBTNE#E2{(gi` zfbRUVbbZWH0rtqK2)byLxq}C9c@}!Bf{;|^{1CiU7G^FD&2`|_mJu}Du(L(+86ZQ8 z-Mzrm1Q}0p9R^b>WD>|m@T;74RHpPi)iD*lel=_7Sx9+a?sFv#9^pXz0G-gnXTNqe zKKh^EgqSrVz~+*TPrvz#Qt0;hcP|DgN-EVvxxudorG+Dgy+$PDEqY9-4+VdTVG#1$Qj_Uc|GLOfa@)wbi znHo7nod9v!R0R?B84TOTC6`=$IC9)CV-<@R)5g zC#5W9DU*``R&6vTmi1+a?3Oy%N?gKs9V}KB zF<2UsMa-LOS%$Q>m<$nTpw#_h2};$aR~U;(T{kKwdCyqr(t%J-zEsOf4ynlQS5?6A zq?i@C3d;ARl*cXVIOsrN-1V0It&q0Vt-kWIYw+$j z{+-MyjP+XW!SKF7q%Xt zJGTU{O6_@1XlcT>hz@3z(ye{08YVqfSN;E|*LkoSE$*h=pQ*sTqUOTK9=#cte(r-f z?)Z&x+w<@{bef}(7CqrZgjNb(X9>Z~Vd$jBjSd01QRpq6bs8MX|0ZA5^Y0`e*i0Sy{fTE+&U z(^QhXq|Y(QZL^>W6hYYze2x~gh0-BCgnWHqRye5eNv=oaM{tzStxBk63Ou+5z_53q_i`@R-fF) zu_zDsmJ(U(olCJCb#Ea@(!O!39ATWE$8hTz~zqasCTlhq;AXWcc%FTK_`l z!ciw4jaR<@LKHWSqS)V$rpE@43!M&!?D+XU(z%kXN2gYir01@c4sJNG04E4=%4i9v z_hsQEleoTI$JA08#}u>Jp3jK{_qCI=xU1@;l(O-(kpwm(z#T1MXIIC@gpbE3Ox(28 z#(#MhT+v}LktAM^o;FG4*DweN&mJgX(dpuYK+Twk5#R@#EtW=RDppQmI1RhdrAeWUHI5!5Xt5P8^%ZNz{fqqP4#5}|@gqBDV zB9SZB2_{U6wWSw><*3rvsv*zWAE(vaxRUI!$_oTvz#GK(;o&2;m?A~2=3pwuvjeai z=#{aLGWF?=2%_LC0bJz;tEzwzB`}`Z_g+@U^RXIW1k{P=`mvBuZ$~M0K=r$;fI_9x z)sxefOq!-gJMTJxSH0l1SXtueKwVI&n8i=7xdJC1w-Ii&476rhlZbkpj1@cwezgXT z8tcR%1U4@H(M@>gXRpF?%YhBxpFVH_UUlKi;PE?PwGm0#v&tdeMJ4y#&`QpkYBe%f zjM{`kPZAFSK)3sF@drPPi$DHZ_=1XB8&)4p>+fNN&B#JH)-#HKgLhQA0}N11+ALOd zq%$gC`&i#T8VE(d_P~1QVZ8tE^pw@s=IYBl*x%hhIr+_`>ao7JYhUldL%&J_6;1h= zZ4>y{&wK=@owOcabs2VoqdY)QrVwykMEP5;C9e!~dN&NmM0aQ%P#RZND62<~UbGhc z#Y6DtE6|JmFo)IysjQ-$(Tqt9WjfFRfk1x0q%wAZBsp2!wFZ1r1IZGgA}FPVvq|X1 z0{qz-1kD<(;Y~35H$k@v3hcl;vZ75?trBEs7mr(i?t|$f*s3U824a7>JlJKn2NRZ=8ws~}D$j4HZKgTI|?c9g+ zp8q=BckjVSDD06`{%UL|r=i9vPaj_Ot~cPgQ%-@ygppb?MCLOp5lcr$L`}zDj(mI% z>xu8uTr3$W+_us|wcW%qr3{XA9Axu3RMRQUEmd(GJ4nqIW@;6b(`hmNA^83Hq={O! zfm=MFldy5lND^n~)3~kX;ff^>O-FU369V`%;6$QMhTBDd&_%~EvEXz3Ly;Al4^gmZm0C@% zp9_fKLMA(nWixQv&Zxv9* z$f8F9j3)ju*d>s~dZ z<18GV7SL`88Xk!>Zn}OqzWbA3;E2;szDOBb&usZqe-_iq4yboLYR5AVMS z$8KK-uRRN`6~MHZ@OPn0)$GHs&PprY%vjI}xJ>rL>%%oPf_xSi2f^ zsd8c_L_lCLeE>$gEfhVge+aG~!cLj!9-Kj#&chm+fLV-+%v*#dQ@RB#;j@zie@n{5p zB3%iYXK4827yTuk_S|P8kxq(=;h}mP-@I!V29imfcg%JSTQ>GL>iF!gJ+K20&s{%( z(P9R>>P`G~s*DzA_yc+TK`8^nvaqYsz~sy_P97@a$b1S*jW%v=I@r_b%6c0O$RJ2y zijKCq6wWGHI5!R4zR<>{i(LVauD3MgG!2Kk0p=Zc6*VLYwD;Pe%$~#ZH1JW-d>m8A zVaW?{c(DWR1J`~t^xK{|KXJ~^FLH2r05j9GVzfiU9`fsXsFId9HZq9$r3wn!EOc>W zY6zE}-)O7R5NjF$;P$yuJi3eNubmX?Fgl8q- zlSbBQD@_1tZ~4d*oT&MzRvSnfrfB`MB}H){(@vn2%fQk#R9g*cEpcwnE&~@1gEJF@ zMy&?Y_&5R}dw{cW8E-_6hr}RIJ&H@Zn!vL< z?(a!oN-&CmLP|n-RR}14y(kx071fB81SQLls5epU72WiAz2W`%&ey+vzc_NiOJ0D_ zeePoT%?k9OLDVDsPE-D^DsUa9ZXxW};dkmV$&JtS!wW4K#iGm!16sA}wum%I8#AIK z3EhvQDmeop02Dz%l5|Cqr9xX&_d*iQL=iXKa5tXw!q;G_*7*J60q%Dv;zQ~FVODi2 z?%%k6_<{J5KQmqxQe4Hq-a9-WED3tx-#sAc$C!smg@He1k0%D;J+^_rcfr4rV643p z>^2{DX+gC=RE_tR2P1m=Sv88xeA?3Y&!{~Zv_TT{Mb1EF$*n}s*o9!gF)ig(C|x+x zT0I{fuZ!&F^~jz0c%e_S%2!1}G!~6xd9-gM(%u8ccJVj9%5sW#k>DK!^cS6tG0T~N zL_2}|_DH$mr?6F$cH|*r*o)<7A7rSsD?wtYIq&YwnE&?5qvVl>o*Q#z)0Xz zd*`sD9ZHf*_6pa8hk#O0)RV_iL4XUkZ9pTD!xaZ-p}qI&uXDO1+Vu5yQ`+Toi%SwL zOZlt>c3L&_In8bdsgzAUHn?pENyCzP2GR2r+o9a3W!k8d5~#BUopYe*P2Qt6y16uK^DcXhE`m1Y4u6%6KLa3qF5H1M&-WQb0HqH0L-AB&v@U&a>vIl+9KMH4GP zsvyCJ47R*RCO^W1rBDXjTWq%posTdhw(!(EOiVQ*%z%K!qV{{Nv%r>@2m?jiGX8s2 z#YZ4Bw%zyzN)<2RO)Yo9lz^E?2#$j|0)aUZA9NzP>e-KWgewP8P6Ced<s)b<4g^S!w29jEX()t`*`FsDU6K|VSau|#)0hm^$!f<)W;oj+%H&h{Y)2`+BHTP};ykJ&e@=yl6wSXL6QXM#EnFq#M(vk2;47)0b}DI^xa zEcT(>rRZuCL?JWEtPM_9k5Zy&q!N?){9tzcu^h_>u|yOJDdbp$qeq$HY4+ zP?~Mj5AMhE-unP*v~YgUf23p2Vulz#Itl9VLRZAFG8;FbZ`)A_?Y#012p#sH6-7~F z;t+KRV+T}m?u*j^jt8O@)f)q-)v2h65v7`ERfIzjl|P=TRvJ5YAHYjrc@b{Cd9QL7 zl~40XTK_RmKqU^t>(}9;kG>bViE+3c7p7~WznI3}Rs)~@)t%Thl*3Bm~t; zSEX#)7BxU|;S&)Gg_fn(JdyrnHE`@fW1oCBC+$LZ00?#@OMw0Y42lO2AHw+fI(bk2 zjEGoLC0m`gp!3DFLb6)L_n(`cMt`A%OfDlqnC-G?pQr%KFGuESYC#KMZ!v+rj?T7pAFC!=~ z{?S*jJ zp*#bNo)^f^9QlH5`~6&Dcr!IkMbbG-_42-6m3c64^WxAs;mYLOmA?8gP4s zo%0nuZYYP-1_w}W2DrE3z)D%zk})tEcvuJn+|<%B?UG&BgysaOGM=RpNO~Qdk~YzH zlDN(FMfYjY@Uh9%+7YgNQ z8W<;BQq3qIN%uQa!kA-fngnT=ADHo=Q-P^vP$-O|)~%rH)S-8D6f#A)aA71(;nN$g zfu*jGUmv;`Pua2+rCb_qx^`&{+%2KVkh4!(36YV~@^3XIc#G?xr+|R0=DE_!qDBCQoESIrVU%{qz8-?nk%zn5>T+GhSNJ2qwy&Mt| zNQWE9A{a+FylkPVPJu_T41pvx;D{SwqIaQTVQx+W`M}_SN?;>jZeAbp3ByCfY9tT^ zbm}rp&Q4-tY#oxUaC-6?6m_ae$AM0V%KeW{gl)_%&0}@s_$cgc zC&C{n!s~Vfh$>(%c6xZ#R`>cPX~yFArP0~%gEQlPbGj%5?~uQm4V%Ep>xA8 z;NJLC=w2&wi+HTZ8!AmZ>4c+j&37(Cslff!m1Y7Fx!cwGlh-G{YtJ9JWxrL2z9)i{txN~kOvcHVp&uKeDQv1k80_8pkPzJs$U zSF3X7JlqH6-QRT4%kh>szXq8sO?njbK)}j9kCQc(xI{(j7_`xznL_o@VL0`=bbyR( z2HCMuee=t>arYjK4v*l> zO&c&A>M;BOjUYrN>>_JuXmxz-ZdRpOUYAJ1=GnSA* zv`j24&Exi}gL~Qr8bJrg77{qSm;j^Gb&Fl>t~oFqAH&eluuW8L8);3$h@oLmvyF=TbdMr?sQ zD5Mg&XLbS8jW$l-GLDpP%Z!b{xh9P;vK$eRebB~WAt&0{6^C;R7lYXp463H-8kR_* zG1H>#2j9f?`wwCF+%#T#%E_W`RBcoQCy>sjP;YjnVosRKn%JVuFNJ(yppeZemOvy8 ztg0RwVHgVi-)xBo0Ry3!^k^C~i3G~k6|mx@cn|{)*#TrOWJQrs6Sp){8!5EFg(@w7 z=0?(5ha?wFc*wd~HDgf-{DTMfi#Sh~`Ug_(My+^VN4Z=UgQ8r4K+I?YATuu|J*mb4 zaX?gUZehd;T_?^a6!>LkdK#H@3WGy~u_uq>2#f2VQrwFZ5c)JYF2_D;(FpK4Xpy8( zhu<;ql0SPTcHB;Y-Rk3+=bnYjF8?y%ws7Ch-MHqOYw`V`U5{P64xmzP%glJ~gU3PI zGB7dHhm*EW;JHsf11Ft&G)6bAhn*x!xDB_-cV>H^VkTJ_9K{g0D#kU@C?M*nNP-Yi zRfS<89S|LQvWTDk{8qf+Rd2!E!cx=;Tx~Tz+{5yJ;&~6JJMr6}qYh9GmMD=NWQ#Di z9S3dmk??YgUoJiukztB}WaT#$hJv=hN&!~QYsJAbVE_rGE#IA*LvZJ>5$w2Cs^0h< ze>m6u$nKwd#iyTpI)3W^tx>W?7IyBQkxHOBf z-hn4DsX-I}}(M||e+288K; z88aAu6W-jc(4Z6Je4P}iQWgd+=(Gf8b<1;L=^%8Mgl-MK%?`>Ygk&^1Z2<+ExePEl z4^-v>d?jqANVA+{T4*%7_|&Jqh>!i#=jG2mRP=8} zJv{V1JLaSl@P?0F3_U}IS|2|=JdG>vxEJY678jmz0*)~gur&=;UBhy>gQVu;_UU3~eZ)S#Cmm&37*i{ceDPd{!#%&UgXw}JW1%AC$!;V@9gNA`8uNy|upcxNaLmKW`Si-)= z79O`@043X&0821Sr|l!OG?W`n?4O>;iJQhTl*?nbx&lL7m2?zzpjxS*s~JcUyyFG3 z$aa^Pu~@F-iR&iNpH3*oKs5KJ?J((LY-h10&rt;1Xsj9t3}m~=vJ*1b=cu9{jW!gs zL07GEFF+=nM!i-Oxl1Y?IvrvRGSU%HfI6;wwz`QU;JA(MMV6za3P}XYuKee+bv#_)}c<-5+D2 zOz_>qfAGi~AIRgZ6OX|8e{?2JK6M*LHg6Ubx8H3j0Tj7)szOy1u-J`E-A`#PGV>Qj zu?QE)KiCC)<*VPv>)-M|RM-xWa236|%EQ0R|FLJrL3K5_sxRla0}A73t0}H@Lm149 z1Hi-vXk+VOj*J0X0wKpM(vD?tFo>eC)&TM(9<5fLI_?f@j$7AdxRpi&l~rWGJJnM1M&C!onQFdJ92m z1W4o&b}P^pcR?eoM6;leZHG^Q9w~qGd*IiaKq3P>KM0>QeChx!AA~luUybkb0|;`X zO5dS;0LDs1iStnKCO@H+_m-yx2Y_N9+3&#OegW(ed1MWZBD6}723C6+ZlwynP!#Qb zqvMNbdzc@ARhn2+-*z@G001BWNkl(Z9xqCO>{+Fi8OYkrNnn35M&>n`d(ta6%E@x+Q= zyxijXJX3Z?d5+~|eMLW5b7>DgnON<@t?dh_`IN#1D|WNO8pO%$^x`7k{H71!8<$@# z`(IFXy}r~#-H`|TxqA29KpyY;#D`JXv=I#|-exk`+il_2`B@aK1kM~BML$(&yB?Md zzzRYn9S`$-&cZA<_V;5;CI{QnG1v9*jeG9JbhC-49dQJnGCTk~Y2)XWDsJC9i6bow zXKfzGU3CXHEmIsQz<{aYDeDGt&;Cj5b1V#IfYS;dPVLL$mPH>|mr2>rNE~fC1k0;J zFRgb`&>hgH>QEDCkqH6~2{*)OQpfnlK~(E4XfL|z(vX{qmqe)v1=$aSE{C_Y_P#=fjkgBj= zW5SVyp0?3!bwzE-i5vq-+hbQDUn1ZyK%?G3CT+`7V4O%NlBm}jBDO%509)`})C_jY z3Ym(32?^e{Y7MzuMt~g6MoX%5&bds|5M5saKLL&sWT~@E`$viorMSZtXPsFv6}bpz zli<&mD#xudwx*bm5)hIuMC6w(v-#d;Q=3k0zcpIdioe&>6NssQ@kWt0uskKI?qWtk zY(5lI=VEq1oQ4R1P$`Hh`Crb#6vS4%$$8CTh^(E*zx?AD@X_~w@;7!*!cL;=L>QS%3j403i4$TP6s1h6=0t%eOM9xuaWV)?DfuH;mF8=sGqeT-K5fl2~ zlmf&7e)X!>Rz~V3J#?^D&xqED=*g>!O9-&K1wCJcMqYcN57zJ?>|zO4rXYYJ&NM}& zs8vVUtRtA62Ii&^)K_}%tgh+z&ud^u>YqRPe!Sx4=OJ`gqQhTxx%}o549iDYTM<-# zm@2?0@7_!*?tRCB&%I8T64!aR49x6+;e|k^4}S4TAW=f-*AYzbgkG5!03ft1oX~_Z zu>pR!24ikFZ0^kz$DT>C`3+X4kXR{0OK1r5X`p`#yhat_!F!>laxjnJYY)R;oT36R z0(Pa?R%tSUQJVQdwT6IVIK>Q{S{?S_D74%-^i)ATmqAo$%S>n#AW!ym+^`V@9f7ea zG@4!9_N&`)-3_cSan4bx!`~zIg@w3A-_{HQbo^kAE9G5N1&#kmAXFlceDL38G4BTC*A~1Dq z&t@^A1#op8x3s&s<>0K$?9U)LFrCIN)dpP6z{Z3DtJcCV=jyo2x6z+y;;Dnc*~3Ff z1V!9g^Py)`xN_G%l%->2iLOW(bdk1vY|U8MS7~E9v}EL+bv%?(CfWwU?*_ELyyjcP zjiS?NA(u^Jx!uCP*+pdRB%Zu^7#mC#0>D4B)x<+cs(A);d9LD`5;wq+eM0*D zvMq!a!7&1}UkUWlf~6+4B&^Z_Q2$8KmV`hPP$VlZ5nGbBU!)@-@1=aTd-J$>FGit2 z*>S3XFMBacAXLXKrVK_p2+{AVnnGpf%91!f(pO(6t^(M_;eDcBB-a>CaFTWwS6_84 z{`$4=RHh~OYqQ%QN?8);FjMLqCURG!4OXv^En+eXyQwHEXbopwBe1NI@Gs@`p&uYez2`9+*?>_!A` zo9!rJkO=tGO2Q922p4A%Rx1*q#TpeJ62m#T^dzafFtTa5l;NduS*HTcbp_`T(0B$! z*c1q;;CoUBRfC5R8S&z)? zS1bNr;E#%csBy3MdxRvFL|eLU@i2%#AQvhcO-5F|aW^vl>ZPL*zf5#aKG?D#gGiDM z5&lw?-_T5;)@tEz-u4k(a`{zg)+yc;*&zOoEq}>EOdWrG-m~zci(Z2TuOVuCC9?pV z?yGJWuEYM0hQQKsZ?%ET?m2)%jWV9GbrYU8woYO~)1di}1HE>9YXToP2$ z3Cqi98yaS^Su|V^{jE0g#66K&mPm+CPLiHC)i!Qyhq$fnBWF7}Bi+R_Mn>Q|N&Ikj z8FN91Lro9#APqD->gf4cpAGTK4JGWWc=*c9ih#I}Po^;2?O=b`N1D>_=YQ|=uxQyx zc0J@$HV!sh_|@TQG}YNWJ(^(SRw|!M3%%1tZvtp%3WDqLfi{k4U+{R(w%hOvlA)^ zo2IxLnv@2P^!P*{odcz~$gGhlV|jL@S4(A49yYt~*o#-Z z@U^IxDYyIJkIh>);&Y$-44(XyvtVdUA^aX?Czdwm;iVf%d9TH}1^oKgH{-G^zKiQ` z`~{{DPdy;H00#1T{Lz`m&wmXu(w z3_LA^PkrV~`1?=(E9$LQ6l{B?)kB7-$La~aN?l$sVucXosRzjP=g@3-5j3cYuzGB& z=f&DVYZI0SCIt`p?==zp{`FWSbaKBBr8Fz<56$X9P=4nl_Tk>YbI(2v-@N3rLLVke zL}+~F1HvOW2akYj6reGvH&&ogS0a&x+o(a)>d>_g!g>w5XQ3O~FbBt=S$zn-4uY^L zJbhwSQsWbVRfNwjQqq7oyB|jAz!)9|!W6=ejnJ?Zwbi82S=3LlGa@VOvpW(xViLp* zD(E!fw-ndkD)zz4tu$`Z&ZPaqb`mp zX(>!k%;+JqU-@6fnjUeFPzhzlf@=yOr1Uh_w&4{$H-0FDzLEsyZ%1qzdE2#N{HR3s++&^?FjXuSpuz>#CT)cKyctFZvz%#Wm@wRQ)Ppsns03`Hk_v`79h?!>*Mw=38xSD&{c}Sh(i!5~flq`Igp)9h}Fw8P8*wmPvIV?jUm%SGG_~DQ1?kM)9~Y|72Lg4 zN7rrP^pQSnFBXwa+Tx}Con3pd+;Q>8#~g{{`g3Sevp5WqNTsmcXyTqjvuFn)jvpDo zcqSzs0F&(xL3C@VbzD&=q!JZ*l>F9h6_25NAuQWKhZ(m#+oDV^-H z_#x(I=WyK(zr^KNejh)(?s`RlrV7kIBttyE77t+7qj=*XH5Z&X@M=oXI+*3}+ z1uuR9?wg&*dp`CFRO)qgs7D8`D!?A<+8$7||CTc{e&*Vz#Oizhvz7nq_sc(h`eX0J zg|B`QJg2Fop(#qUJPFIIimkGbjMAphW)TQT(GZ0{q67`vsJbxIj46lXno`G^97*20B`@Dvi>n~6#T*v zaQJqF^%aE4J|MpyT6Y;(o>Y8y+60#d;94n|L01IJT#Ek~IoR$Jg4#UvWDa3=3>t04 zB@8Qmy##H=YhAOwRCO)PjTl%Lb+>x)vLygzZ{l8I)_ps$Jdqmp(3Rov; zVNZc^)x9(h`f`v&3r5B2ILODUJaO*+->uWNY3ORum%oiF7FpwQ_f7z+x@a6+{C6M0 zM?dkURR_o(Qt?ql{ob!cU8nVSeIsXUA^E zcG|v6!R%`83Y3$X=~CUAW*N{YD+~WagbpB}9st%mU7`85o_e zgJdo#=yYmuGYOCm8V!c150FkPEI_o**@{yRcJZ!Jv_VCYQSC7CG!PV%Lb-BrsT_M1 z03_{NXu_Ikb{k?wL|~!v$5si2+BiyWP#J5ecAV17k5I&hAwiNAvpg@-OHdsPb>U(j zBFTv<2w3^YC1o*<8?O5mUVp(w=(N~;`pw5X-t=00@Z*0k7Qe!ZASQxW$v>~QA0qy~ z((qS0C#oGO_y{Wluj}F9zJvJQ4}OHNU4A8QyYp_LcCJTkZ)>cJdS zs2(a+c*x8BKk$U@Ti4?U-~KmDj1R&m9av@)94iU|K;>)o1)<}G#UW_y5>WZGMd{yu zG4Rp6Hp1#*xC;lNC)3bIw~APf#+%KyKf#P<61ts(@9N^jrxDagIq}9koT+=H^B1HC z5RM%siD|O37yi@~f`Ro2hbLgS7om46@T>y7>?Y{#8JM+0LZ@>*4PkZ!VI~hyB3nx$ z92^pf-(YqRG$tnb5$NMvVc5}GBJ=;qa3{`2<7&7U#8qw11VP#Hs!^f{e?_3!nE$V& z(4%fbuX{+2zXBIWOR!gM$D={@_o`Y@qs2JhucnF0BgG;;(18ABd zp1O4l23rm~rjB|xje`qI*qliUfc^&urm?F*%VYxsnv0ijS&wQE;@7o0x_*efVcx(L;ZlrFWgH)WGJeISdBX$cLm?rXSdVFoqI;C5U*@#sw$qWM6#jgAgnH($eK zUCq#+xor#^LZC(|SS^97<6z?vqgcFm62nXY3?0j@Hd28vy!^YCma%(g22a>HhLgtD zVX@i9a`-_uVQE@Gk-1sR`ShyLlO%r1yUB6AVCIIju~`Y zgXXFkv}r~3D?`WZ%rx@3JhDUtN5Pk!305(&X8<>ET>VoAfI*ym|J6!=RsB!!cMROB z0FdYK^U^`lGL@D={673Q13kMRGRKccdSpZB#5Yjfh}PpS>QyB|vADQ^xw*2)J-3@J zblPp@Bo>+&9O%RN#5$}S8^Pev5K6@&(uo}2{r30Zk}rMv0Y*D2pyjg({P25U#p9p! z2O=mWy5z(~>*~-Fkf8we_?eVlsY+Os7KLd3_dI|}cT5WBy3=UlmYeUu7r%NruKw}$ zn46x7_PsiUN|_X%arOy#%}bt-Q=jm7*rpGkI024B{V zAOAAGeC3ts$eN8M5Px5xGZum3eE=p(d7Sr@vvJ) zuRro}9Gai)S*5CTchz5Dwbi-`B7Ha_Fu(i3`TKc=54`8~c;{PR3E$};^c(PMWrS`A zW~Kx^Hz53ctvL%#%fd?+p>%M?fDix*fjXfHrp!X2*fcyEj z!Rpjq-Q_sBkC*)_w&#HzbA<&`!CI}7C~(Koe9aJ`7vX!)`|sLCYlHnmjm*{t^atx$ zt?dA;7Dj77ha?!prSP1yv$*m*KgG*m{wAzco4=E^Z*Rp$2~IrjI`dgi#b3PTuVE#U zumV~~11%c-Bn-vo2t$;$B-)0KWTTCQ?;$B8I~{%|DU!QXd2}gBZw5$)IxMO_YAg`b z0)+Qc28_0gyx_((q)T~)@e@u3I7R;;xieZP!wH_YNT8)CP zRsLAz1p%BeM77(N^>FIg0M5*ta#-$PuA!O9V#m@V@&xxSF5{HV6G)f|BoJc74e`^t zB`kUYsvQTX4&`vPWx-46*t5KXIhmuIc*@uqMl2002(VCTAQ1-WD-@BY?lyqt(h?G; z2Ih8ReaV0;s%|>EZWmd$;JaOTY(*zcw8%s5xX2|E@B$yru7`AzEq)#4N=@X9sW8~4 z5>XH!m&u~usEJ}>p^z8rV)8i|@EoUufq?;OJ@aQ$VTB>44jmMmWMTw3!xsROs9FKj zGlwzQ*C(&RwmlOHZqdcXc@&F9Np50SJxMe?Ph17%wMCMRN_(oE$4Wy~_aa9?*}j^F z$*IHWEA@$Zk3t8p&c-<7S6~`d0Z8zVwz}YkBY~h~A|=hmO1X;3gOk{C=MLO+_q~|> z|CoCZc+1N2T>M#m?OsklXL^BQ=!k$MNKw%Q6$@%&i5iVYELRevu_U>c#GAyZ z2r78NMibO1ARtKZZJ6FpFT3x(`u#ocxAr+RBl*W0HTfR}X3p7X*Kd8_`#$ee_Uyy_ z+ya&sdH4ruez>&E%Xh>~aPl;m0y#1~jE$Q%V$JGx`1ZH1#JqH+zxZ|TS*POatG|wH zxhQf^Xe2o*L`EcMWT5gr5J9)3H7I0&1psd&KGv!eS8_!17&Mr)VK(^w8JIk95I6qY z&A9SA*W%}QJSg(c{#U`aaQ1JXi1)qy)j0FHrwCz`7bcM#ku=hhn92;%ZZiNTi>B}3 zJJciVTJ`#uJTKPyG${Er6%zn<6fo4OZuz?^pcMqK@^FJol1AjKO; zZGp_y8L(qQ&(Np825sRWlIj`?o1t;8e{jD9P8k^+v5|$Itw?80+f{N}V)PIk*nxOS z-H%ot0|v$bD?qsSNiprAH94xI(1z9_@Ix403uaVDSepSn1NzVg#C&{`W3&#i=W(Fk z0*1zs46cJ_M9{lw21a32EMO@c)Tees^EIGcLQ)x*;*(@HeVDc`YD}j>a{4~E>eE~% z1Io`1IW+sN`(ep=sW-?nik{B!`*Psra#<08Qyl7u5A%?A|FCxbh=%*H^Lu&NpWfFq z6wdv5{<4(t5PDBP#IJ96p4JYc&=PmQ3?8`eNxb^?@5HV>lgl|L=}hcbT8w>l%$Adm z$6NmTL%`60?D3kHz;QH@muGby4v}PC!xOy-J3CE`+BU`=2PHXNHQY5dBO}+X`65gf zah8EnCWD68MHqR|g9w>?0iJDRW_AICM2sy9Q(=IqS{-AygHhXp?>Km}*@3U=7>omK zHWIW94ZFPryQY`0CR@N}%HERzk2O8)3N;MnTnt+#b~ZZbh6&1Xh>4<$`x|Xg7^Ca- zafwlhWBk@w84oWuuw$kH?Tl|;o(Q=lj^K)<#6q3XykUg3IUCQ(n^@9xT>I#5*n^zU z>o~jU!1V$wxeoTwO)m)GX*RYmb>YV}yfCoI)p7RV5SIKFni&^2KE4yJ1UR)?#<7JA z8omcJPB2i&!Lw{MIvxfc7cOmYwFq7k!f_cML5ZDW=*s~?HoXKrkoBq|<5&U_dr<&S zbo9)wR(f?74YUFFNR$XYWZaCf#@WqNODtBymMLUGjsOb9LOS=S8BdIW(nuOxXbKVV zX(Yt(3mMZXM>X3vgcZ+>d!-~`2al8wpkjgL5RUjWWx3op`eoIp2t^A17lnW%?)2e* zRl&3HQAz}RnU*Ce0eQFd9uRGR%D7d-eJOT`R`7Xudp>sU+J&FqbSHjx(_PrRYd5-e z+VTEM*UkUuFTeMXe-EGfhfgZfPD0LxFBb=A^K6@>!xz0S-=k1TQh3oxA;2Pm2R=P>!4dV~6V#$kW%f9%WLkhJYM zbUFOOef7ss{W9RtuD@?O@=LC#L&EO;(5q5 zJNW7J6b`~hm}sb4z{|$+*y7lDq+{Z{(@nI=L(>g-Q3yW@Q85ynn6+`x2PR_;tsp^| z_&CM_-h0YXxN6%BZrVEw?X)YeNGQi6OWo097>0!)PUMaXwu#g7Sv)wufCpxpqKV&H zH1LXvD%>c*4&B79Gi@9{TE?Awr!d=&kT*;et9ewS0OyPi37l$_2l4$UccP<3D0I3w zcl{cbBqawm@Myb@2M^3*%(n2dt(!4y8))`CDMD!C!vmog$CzGP!201)=^W=eZ44Fi zQV=aUm zIpGJS0YUs(tJ#q5otuhu>0&Gt%I_1cW}2e1@>S(4{q=M_?)iz|YpV(6>QO~wN>7fJn0 zB|bSVY$7tjm-!4t7WleN7*QZMgonAL6lnPomx??5I)=S2mt>hPvGM z9J2czfui!ahMWRUJANxJI_DfbYvU&9W`Mv~1&KhQ^vx4&Llk1&&iW`#)V`w{P& zguzgt>M(pShUU$~DGvb!x~A3zkv^bhbO&--V(tHYT4hd$Jyznmgb@~u8PaOknW@_F}p zFsc_ykk#j){@zj{B;IQzFTANjbyHUq!u z;oQ}mrTE^txF|FAiEIItQWn!*gqt4TfzvmQVNbJfr$}DcMu1>-L&N5#w&YIc*ItkMz*&O>7|3Xt3^hEw(2_aI zsg*pA&y~>dJ0hi{RqM}cOL$=a0tOrlFWbBZ>q(|aLNxtIWf~_5dWbQzw1A^V#$`d> zvwJtTt^jI4mA~I0R=j(rCUNTeb;#(HhtSB!g5$bkOqDFPT}T&vYZMUc-IFg-nuLOzdnw~cPMhrvo!lmj_BAl94ccx_~}Ihn1?8D%K5 zAMxBb46zNC9C8LMEiOqSVa9cpcRdvV)UJmyn$0?jg@Otn5@Lzm5y&neKzn@FTB16K{FR z>Bwny==lL8#R~LF5!rR?pl62>`7{(_2TR8}5wNCkyR!EsUqJbw(?eUuF0&9+vCI72 z6uDD3)a+0;OU~j9+63D;ux}cdU2y}xc&b`l^nv|Y>NF7eJ%n_jJ7P1EuAN1l zM5G$xWwCZ>1kc*M1!tdl0=BFl7l<(Id5BWN7D=j7!LkQ&@10NMBcJ#e-16vy()kL$&DVdJt~%X(e^Vdyul0UaFZTNvya=DT`2AS3 zDoc5fbmmdJE-MfwVe@djR z?(i`5(wNMYDbtM_Q<7w;G32XShHmFoa$OuDnA`?4?g%<+RCrjckpd5T76aO1O)>(s zk)tGB1;vbt``tUM+$hTx=mVp`>`p|1hNLn8GgAO)u%t1+6flN1z$y)aQYk6t<$6}u z;i}W`kNZ*v-ZORdXTtJI$6v$JGgH5ejMlpc=m7^&zoSduYvm;4eVH4z}jLCTQhmMhK`+`9`?iu zMlBP^8#Zp5o5S`NQA$7$0-QTg#L-TI?RpNk&34dYqI=F_Z^eWRe)E@n>9T zNH;=`E7B1JZ);jMEM81$UbZlASxkrq-4OB}D zjnG5rB?x>{H0#sL&2 z@8->crwHi~YNs_?ypNj&J|yM`-#)Ck`bp_QB2nt`+dhJZ|6pLUnT&o_7vD z_M!J+)A}(uwnw%dbdorGJ!pLW)abWbh&sNEf07_Tl(^6fRhY#L5^o7UVSE|}=@!h& z2EW$&$=n;iP6fUQMnxuUojG7CjOF^R%W4|2H<`ZN3$8(vlHszKtiHNf)Qz0$HDcAPKI-;ZeN z4;dY-+{MamesLA0$ipFHCv}@c9<5I7@oXl?!)R=$N}IBh!TMw2(Z@U zND}u}l{5Im)uSl*0dCkojoTZZ9H@-KX~hA0wuv)VRd8ZrN<{BNi#~ojvxIypi<3(k z9G4~38)18(;U{|!U@;1?KIh;CgM-qoa~|#`32vTWz^*Q%e7iWcQp9<~!)SSJ%m!T) zZ3k;ARV+1IlIAy2sVYest9z-I6RCU%-*$apAa~mHdQ>Wi^>0QtToS~17}NBpUxUg# zUSsN&g7Gu1Be!GNkg%``rCSzyy$&*2#&U9I912f@hc+dN?A&;e^Y^)=$R*(K#Hk^T z1OlZnotO*>v8-=#sb>Cs4btrjhe4T)@XsO}Oqad7=9OoAUydVQ1eyld&0= z^H2ltw0nsK#W`-Qh?ksp3|{!GHCQ!JhFi%Z&<$jcT8r$Oqmk%&)x9gD0JTZTXoHA= zBE`|}mmc8!+~gtnJ)H@GWQmg~rW^~GD=6{=c%QJb?eXpS$1h!l%fI~tG?~r@z%fl+ z^gGYOU;V|~u=&`{@}398>Ec*M0+jH0OFpb5hd9s>b`yjR#uoEzvS84Pn6tGof-mwK z56qh(*@ig;95FC#CgHe&yoR)hC8>|k(xD$k!gwl$7{oFMj*%D<%BwOM**FXDD$rd@+E>5me7yJV ze~1kmMvyC-aB>F1(1T_q2)Us$cD6hq@;pI)_`cSr5H*`Xaa@@Eh&%A-4+uGs1(sDB zgHG>$ohT1Gc~1zHajgw+*EVEERzVvYC;J)E+<>dgSiEZ zP3VpVEhS--|I(Mq_I3Vh1L(8))8jME|B!<+H4W>N1&^GqABo^A)wpwU_Ib{4h2p+OxiSg$kP_NsVc5 zi~(TR8--X^cJQil79*yKM>-L1oN0plALr}*aAPgR@dJ6BRV|{|3-CZI!UOX) z;kb@u1h9x8fsT6pHXXxcVR8&Yco1UbQ?C&r&|oudj3GYUTG*{oD#%KUUIX031W`$Vj3TL2)8fmp78J@v>(f zh1b6HWNg_`fnyRZ)8MY%0CQk9^z687u*)+}**sQoP2L>?mJ~`r$drYkDwz_haxBt*5qXaD7L{NP7-$@qw0!viIJ@LjLLo8I&aRE9V&?#dcqG2+)c zXinD=bT}2J0);c;1b&0r^FZt;l8{LS3Q39)L#ruIGeX6AAPG{8))LsRC03@C*or|E zpFbgg+IJ!m3tPABV)W=?6vte^q(Xwe3yKt}h?B~OTJ9*;;5T$ctsHjkn!?wwxdNB} z@K!8#hz9)s`%4{gF$#`>H(vBgyy>;CK-D!czIFg+$(Dnkm3qWN8W_bh%Sxkq*2}rsfXxTh8+d(=ai~caiO^Tf4E)K*pw+fXSWr+n6&O7R`s{r$X77^GLu9!~ z@*AL4x5%Nc#dFZw`=GZw2&g(Cv2bub5^f7zj|@3wwuVWh(n7+>VY%ihKNF+b^!#6G z1mtRxYa+G1JyNaOKbh12s#-h^1#-heI!OWwFd56jk8#m9~(9SF5G(iy}0c*iq_ICh)O5u~$`@2PD~x^4bX zwEhY$5Z6Q_3$GXB`s;4N-+kc<-22eu%iYCuPTGi1efW=Y_W5UU&?cW}5}8N>4ehx) z+6z58$HIh(q-!AYImcHBfP`qVFqKKB@|KY2!I`EMaU_?L;6$5O7KN!fN8(BGz;kV6 z-5`e655eSt#QPx6ff?&uFA2R9Xzk1*La8DMkyAz4+FwWaAW4%h?tSzrTzcaTxaHCN z(51)4@@Dx=J=?Film4fEedT!^zjgxee)H>a_Nk}9an=4%nkd7_ke1Pv#6Hb%MY&-J z3n-5`WK?aSY^i&7iSk7@hsY0+bYtkbk`$|auL-9IttrG)Q_zX>mB*kDu0_l?h{}{O zLEQ62uB90)#(ZTH+-l2VL~Sb`laSLZA96j9ypV ziIU79wEQXrkpLKD_3kMrY2_!p>q7|zDLL^Fh@lVXry7@) z)gZ;bWKgCEMM~&RGfI>?S?w=U*mArI3p;l2#~1$jQhe=-tI@1+7ITR6<*&$3kUcXU zt#VzgIsPbo@Kc}0LPo=5)4MQ|uVVAiT0Fiqg&#lq2wt)E6l|_mP>({~w0AdJttQSt z`X~$+3z)9AapSh_m~6K3(qp$^Q>lP$iwn46_YA682X9zA0oQhM<*vPWdT9x#uN}p2 z4-6n5Ygi0IOeZ0RZ5!1fLd&*sd#jBdb8}eb*f?i+1S9!8cJ{isZ?T3##>A--@)O{( zUWjdr9n5wEtWJP4iZ^2cw*dFMS3 zg>ps7?TGoLhvjm|-uDR?M?CTt()qmXy1bZsl4QsPNk!#XGduD07rp~I5N$m9_>;Ky z#-HMcKfVQzKfVJ2H_5}cA0TV!ICaZvy#D;t@Pc!;pi+zxwd$~32S%;{eQX_!;wUty z1bA$@6WOqpJ#yc+Sa~N%9%~Ah3NJoQc~toW%HN#Ljvzl)7PB);_@{sRCcb*b4LC5R z?2HSpjW@sUeEh}xUyq5kV=^C(eWD4ba0FVjH8kei2o^L%-3YO#sbolY<+SDOD;Vg_ z%>$n<95NE%dep^*i{1xR!O++$SFur^sGu@df?HLG8{bzUL5dtug-`Vl02N-PibS{5ApZ|Qk;T12%+QA|+ zNrG%Ci{hF=BrfB@)f!KHt}92H5hmz`okhTJLDwu8g;B(&gV^gpi<8gVk@z9jb;&{wkg)8usj*#{aqUdVJ;b zZ)5MizOp4srNlTbqkll3K7J}{K4;vgFqBgT;7 zNOQ@9QqQ?a;s7yyQWA(FZLZr9leu9#2c|b{TWsL_`zJA8bnts?SECzixN7%)v?Cv< zj}GDVN*Q6K;Ya&taWD#S)>su=G7j3Yj(ZzR*k515#!?x_)4>2AKITp}m)8CMd@u!hdYU>#FErO5jA-&zWW@>4eN8kV`=h3zER- zP!|b7gN2MD`J`L{E?W8r1d&P( z1c}ca2g11YYt5j!RL5OEy9?jA<`!K4qgzm$T_NM814-Bkuyu7A7oBrFE`0u}ShcP! zq%H%@4akXwt1++^a7vOuDHgW|xft{jh*ai*kO$ zc}q;D{=M8Ab8EhMq(BKoj#%Ohf8YCyRE_!)^CJhVTCDRy_3dQ)sp*ZnCc*G4ZGgHhZ9gQ>&4?aXd)WsN9)in^2jUZE5>{Q^Mo4^}#U8C(jFNAa2{Ks)*?mn4=z3~6c@1GnyI;vZIN93T+b@gUf^;yPm%Y8kh(_WBq0~__0zk-LC@E5%TI2_ z-+ktC{Pb?l9uv5xfmglaEPUicZ^cnt*9fsjIWMqaOGhy&oGIZ>^c0}E5dF8=b@TQq zbgR~+rXYN7?wo9i+>!6O;u|anF$r=&^$F1#r4}(5o!B^-I>OjTxG>9=2P5es_G*X+Hv$vCB{>3mV>|TPZWtM^`xsG7 zr@xbt3~hvw%phsa3Zg_*!*H|Eoh+vJP2(@#^+9~-Z~qp@9={$**iNThsmH(gVDznl z59y}WfIkfl>VJ~`!}>7j;P8+6h#&PEq-Bgo9%4myxI#!g6#rM;8sRBOF`%4LWq;v# zD>WoivRyfS5ymcZE!8PncZlTM5? zL5^4@=9VJ^%M$pX>}zq;O9^z6t1OIRmB_~gI+E$QIO%y$O+GmT+;}7m#br;)p2ZA+#N3gjQ{o5pM_WN!|6v}BI+_e`J`j<$&Y^w z&pZEl@_xx7$;Xq}ht!Xs<`676cp38zO&E2`V51mG(1vDZ0h4k%_1a|azU)!J0)U4o z3j`jtrt;j6Nt$Hm&tyd(8qtfMCsx8_>a!!_T({He;^+4~hD*NreSH6iH=((3xTP>( z#i~jM7d~eT-f+QL*u1F%(DcO(;pB&542(m|Paxt3Pxyr+2vG_26&A`&NQ9>HePt*m zNsdyOq!6mW>B=71G+or^+qm?r-xH$X{u$*cc=WnKT>PF_;i5OZ5_XPA2Uju=Zw4eC z(o?Irx^(kYMo01`|5e3-a07Uaq}+vm!6KsrF?LA}eZ@9eTZ3hmZS!=@Yd1;#4 zuU--`Q)94F!q$!JaOR1};hZy0#rn}<#2z!~Ln(NP7-^P;Ldij)T7;f65a%7m_)h4F z?jxL?hE9W%flLFgc{=~P$*%ouU63BxUkXMq$XQkz=%m%sZW zp4zz^pZ>xpka2`5n)dc;KT$T{EG$!*tVmu`*Ux@f&!HdP@@cNVxl${n|tX{*NOFBAqtVE1#1e}{M;QY12 z_{y$>c*5(c-d^L5?BnFEM(li_}f6JXeI zaZi|_rCG4WjyIG!b!f2wF!9@!B91ELaP_`P?CmN$)3RY;>(~&AwuuL37ck>_l9+T_ zHH(*x4r0FJp%v0<*TOE{!VmXM!Rxf}vSZg{LlB}D^blAEGIAg*(-*Seiy22YyV8#5GFt2`dr3EA(s`Y8t3J7?rYQQ{hXotJg7vXqF}4RN(Ei7Cz3W^ zV07!_{Jqoeiaju)AI3r2u8r}r5!4qJ;aIkC9a>$5ED$+RPIfAKRwZ(&f<$%9ih&|1 zLhA9aU|c18V>y>FlaGPOr}0zL+H)}7LFi^4VS7FY_i zKJGPwa;Xe-*{=G^0r3$~EPEks(k;%k@$f_2@Tt#y4cFiBGj#l(X!Bou!I}8<$NmVL zk6Dj6>Zn6jW0nBzVnx50c0=3)m6Vh3n4bPwWgC%hmA-%M>J;r;_!Ha|1@TJLn@qaW z33nk;84a?&_}l6|jePP2SX8m9OQ5?hy0Z~xpO{9w=1E~B>n?q5yp52(bQ6(gAT$ij zH(HomoX7UP`^8Xey0(a^*(J1lU39yibWOZ2`rHGqMfzH>Sk1}!%*8;tfJ&i+(ZNBi zofyUH(P3;{vkHU70=UA2`$0&*?|MnN)zzi5Do+f;D%uESt%NWM(VIEa?!{?S_$F!h zPIom88H9d}#9xHZG(8rTb&OKi_nt*zL5GdaH}JTEmQU(o#QGZ zLEJaHi-H1x*#U-)V1b_h9%3g4vs{#d*zh8J@#0V7gmcfpnJ;}VlGXx(Mh#BRk!V&e zGop&Yg(>K^g(x$OxUfmk45PDCjor*FkgJH#fSJi6jz|CvVQCrsru&@tZmL`;Viv z4G9L;jNyZy`4ln}qtXpGqaMC^%Uu{R7V(O$$6zFr!()vazWc;ZM6r%H9=8Q+ayn*v zA--_?BPcQ@@Y!2Y&Dq$~_V9y8_rSDbyy=9aU~49>-+d6fnjO4w%Ubd2r*q}Q3pG5n ze+ug>Rh(R|NRsT+?Il67Hx8LR9r`MB?qtjAwzh&BD7wiW;VQ{k=_d-vCf><07p5q*!WI;kQ%p~CAZM9l5b|~fZ z2t6ORp^MQ&qtikqpMgV(q3wzlW3$^t#&nkb`?>keEX>K~JvvCuvbqj9|L5h#~M4UH@@zb_~=J3#>Bd{hd2X;BH2yR>;OfRIs zl$|z-CvuHbzU3kbJ7PE?-8o^}ka~R{1S~rGL=|^9@}0Z+GC*tFFNvcifGb z!|XmgIY}^Hu<^1pHsFmfITu?_SPk9iA~qvfP6_6~8fe*JK`fLbn(~TJ1r(uQRjLqy z;cJ)8IwdzMMS&s-3Q9t?0LKEo1mC&p-|+YUa2X!mv0q--#)BJ~qK z1g{2BQ7F=%+A!HUaYQAabSdvbjU&WzIJG>L&zNbF995)}QvMFP15&gJ=~Iy$1vVye zHCETExTp1HlDtb{keY}Z9(s)!-ChUXW)DG&#Thvd94Vl zfTYl_T**d$tO~Q>B5`P96#^_wV;^x4Lo>@roC@NEHfEDDi`UIE%<>wM(-EygYfi?M zuv~Eo3>WqTofeF-qeU*5^yU$_XW`Gy!_Z9RM%KY^cR7;~Zh;(D8cl`hHW?v~8ON)^ zpWcgvox0<|C>7xMLg<+S+Vu`T_tC$^JKy)G7%D+L0<6j~^dyI9sSPV%5+_v6?jkm9 zXyr|ax-FQ^?W%AimnA<0RMrFJCPgh7d6c3kJm#z0anUrHM9Rk%BF=GzlUrogG!{K zRjXl7?BVfx7giME=t>pinT#|iPq$jw(`=z+>Nu&KM=9sx=SxlOou`V;#E2H*^rC@@ zav9&>*TSQnNX{aTriw;_Ug)C}8u00i$uV8CA7{tND#m$w5pjxMsN(e@`YGa&R}w} zgNUXw83&oLi<8Pl^x{yAdkD4PH`_u31_oRUr?@sY5T?jGs0TfaaD$?Dk%m5rnyZt| z$SS7obul(n#r*7&xXBGvi(&;#$)IH!@L3EH#ur!~5(Dfo%9r*Xo+q0rH&>du_;CQo zHU*v~8bG^WcH`YHNl9GuCfco*j19`g0-~NTK{9;PxcAdULJK_2@su#TgU-;M0U~ptWHg~>uvoGJ1^NCO*AFPKcZYp9QrAw40h4euvN+&I(xx_=# zZ3)qiE>|?O2^w`Ifd{Kx73ofYKCNtexySc+g+RJ1+0;e)rs(%F7>T^p;71iC0?#s@ zGCdeItAtiNz+Lw}j&FYFT3mm_Em&HZKWxt!%-DGAi%-MrUw#(WZp=apJm}dB5~m1c zsxWhdLayTL7x=uNvyg`8sJ@8@E_;ELb}BYsHHZbt>LTjRAz7G_Yse~(pf@)MXK)0XTR`lE2&Awup=I)j z>CneW@34u`V*$*9k?Vd*(g$@bi(7B}5pKKjPJHmgZ-C*|5JfrYP9CP^sMz1a5cK)m zV0C8^nM3fcVHjEmnztxLm&VsnJ_;cZ%ib&!lf@jZi>r9gJ1@ppzH>dc974=g zAV{N%4=Fx0Q#A*QPm{yd*WHdkdH07fH?y?tR;X%}ey9JN?D+dHdF3^j83S+s%Mas} z7d#)e=8|dxnCQwNIm)gR9ZkT#E&CSDS-m70rbuSe+IE*TS{?X4U>|Xz*$c995vVuY=vK7?GVp z$+odJn}cg<_}ShmwAjZ9nKXgH=8x-z*EUDXa@~f|j=SWtgdLd2**dFt*#JpijD={o zCgb3DhD*3%*CZBUV3lj(t;epzLeIdzKJhd>TgM1164SuWS{;!%uO+Cs22L2SVs$Bp zAMc*RY&Swri&3#mynM|#di5rPjEViNI)1!w0h*gbQHyZVF&nT}*U|9WG6Jy0V1*Zn zDO&zkvx#ChBa4`kz_|=15N)`;0Fwwy%^JpsMrA}nDIUpq!o=2fQ3SBF?)4Z88zNsQ z2p6Ej{|2Er#L@hR@IH0jbQ9z_fP^Y>+@ot(Ke>+t62T8?&_g+vB5-7~?2co!x+E|~ zBKuRh_o*2Nqb&t~rjsx;qIn3X33uFQ)3Jn%C$tErW@7sjJMfYBT#T9hGyiV)|I*V= z!sq_}(^z-PR>Wl8GFFmO5^n*HQ-yBjMQA8pK-@vtnUq7z%9cg`B=8-x$>qFKbG+DN zq-hVP>mrsBLYhR7`i8{p41J<2mN1$Heotj5vI%j;rGMvVLmevQF;WZPzPq8!x>E=- z4L710P!yZkxotNt{nm}R^#5FonS-fBNL&*mY#J-zZ7)3)uYJk6C=XID))Hp7=~fXN zc^FydymOArGmo7)RTTZyHKiIj-xQ6R$F|)& z<$Ze1i_gSo|Ni|LUp)Y?TZ2i|BcjbSJ2kTU=_5c^zk+f55H34uwY#caSrCDg2&&M5 z72DVT&-%F)N+OneAgDS<#s^YRNKHXigb3J$U20evM6D2w9doG7Hxb1a!XEPrOfjH| z*||jwt?KhnSTLy~L+rE#aZp8zymwr;K_psxc|R#7V%7zl1};03Tnxry2XH7?O*!OZ zd94$z(U2|W;1mp)Lk#rMkhFW`j6%ye2)i9bjT&s)i&s`l0bm3*B<(shvi|KXqL@fh z2!vf|L>aU!%zPH`yU;Zkq3H_uf!#gVpDCl71flpT1S0>F&maoW+w&AmCkJPIEt2d2 z0>-xbO_6-*Zbex)*JgyYNxn>{8{tpi@>lrKhu(}0M~_NUAv$g7x{qY^6ojRXFb~`c zZEg>e`~br0R%pW;ptYu;wWnpwV3t=QA=2tC5*!oc$978i;SX-Xg>U=_f?f;1bMEQ* z@+F_e=x9-tHvJ??;j;)bM51!a2}v^p`c1>~S$ibSlY4twai+tB^u$aHf&ST?o}bIm ze?MR%jV2IvupmNA_*96JALen*4{pZ0|NLUiPAx!743)_G8}0a4hz`q4gcP0NbLVL6 zHGlGEyzuv4jm6MMjRA>EDGZ-1F2+U7ngkd@8cs?A1~I}^@QJ3Yq#I;fOO=y^RE4G2Lm zN|4WI#nH~qkmcV@S>u3{anWk1Eb9TM+>4&uf>IK=@NRvC##KWJW)9+Idl@1<^0Cdp2Z zq%#kZUxya@FtTMC2ML8HJ;cfOmkwFgGfc6MAc5}=K8I>Y935av98Ti{sb@@F`CB$|zV`{!`wcdx}|mtTjSJ9aI1H!Q(U z+Ax9-yzxbN{_{^lHpd9fmUMtZM9P<8R|jaeFV1d4w&Q2Sbt*(G#S0J=Lx%J$7SuR^ z9~_GSNh+j$rdz;{ZPWPp-+dWZUGrlhi=KJfQMl}je}he%hYbkj3XfZSra}7W`guIw)b}fh>mrrHiVVAPg(=Orl?g zE)YLI^tjU#WvADSFt>dkof-+TOseFXO-B$k!r|^N@qc*heA_be!GDSqyjYv!vTEDme>i_^C07*naRBKvU7;|Ic zweIHR_Uy#?e$LO;aMla|7*9R5O_t$XUjGVw{tFkwvABn*AS_-}svcv}%i?3h#Z}`ARK8E@d zfpo=FIf94y%n$i1efP*mP=D`#_mzM22YAsNe-FJVKwywnzliDPA~sbAF`O+TYuZ>! z0^Gi5Cr%z&jjEeP+xJ9iRkSSFwuW7`I`*`?SYIt-gG1K7j;ESkJiLDr$BqqQL!|`W z4@CYx-R(j%Elk+1nCvX@dJ8-ZnhtU#a>7L0Fz~be`$Q^y&d4xy$Haa0I`%HsapK@0 zwv-Ff@IKXQ>}td{Hj%B z*s-vH8|UT_YYvPsLOG+PYBP-v3XX+Wo_H+k^V7KP;0)RtebRsdUBfx+Cy;kd{9w-{ zW`kHDNzPTCGf+a=(9!Ayh_e%d`=@auG2_Whp&ceTzO^>m|Jx{}zF1rSw{=2VZdSM>V z-ntIoy!3CdVe>Eojsrj*hpXnKqA*BdZg!w@sH)FGWuC8Wv`T5gvsQKZs^d<_80^M{ z6sY7^{SI9rHyS#%nn)_MBUK@xk#r|gSQZ6bUnJtWX>4*YMr*o>+JO!NPbqDZgc8ow zEJ;)R0~HXL>J?bfQaWpML=+?7*oBWH&W2pmiI}Fa!w?ru8TZg^ON~9aEUW9PFspfn zFsRRfg#{&hy#4|o2}ezCRtm*9_7Sxki0bs=FH2eg zOm=-a4O+X4z>lG4i@5oYC-L!*U4no8@bq@7~=W@beDuzAG*)cTo<90 z1Kcbuo&1Ill7z3tg;5$n?2JG+h)A@AJok_Pd@bJp(a#9KhI8AGfAn2=-}~PIO=EO1 z_bC=#wrxl?g5f0)^{@z5*ZpU!O-~krvdc{r7LQ^M8h_A?^Hy^rve$@1mFE_3wT=7Fp2gnlx^mUVxa{ zF7$U{3arIIpC0xt%wv6_@_)H|4{+PgvRw2XeatatU32xdc3Hcm5K^epLLi|T0|HSI zf}kKC5JW|Vt9WR7@FJr00AxU$zb;4#h!F*)Nf$y52}yQ(c6Q%;wb|R4V~%#7_xs1( zdk6G+&ZGBU&pnGL>}0Pt=NSL^|L=Rhw_vDi`0~MfQPvGSZP#v;GGL^eh^ihfX5mxH zm&Ew`)&Pe)ZR{(T@UX>sRErMoX*6-mMiV>hGkBz5#4t(l^>zzegFYU-cPRsCw`4XqtHM_j-nTKrF{Kw3nUiz#WiCLu zQdSOJhKUMgA)}EL6O0b#hv@hXUD)?b4>W8SmhDjZB^$Sl6(rw2M!_$LTcA|EX@Ua7 zfEX-6_>|gFc21myCyDs^^ZPl{V5cvns{A@meE~!cm}i5({mN#8xeR~)rawhx_lk@}Oy5IF$r}wfIC>CF^5$f7~) z2^B`Jt_H$(Lk<|LQWJ=nm9ApeD{E$9nDRaHz~cr^c_n{m{(poF)kY+k^=ZgoeSspg zsn#ZEy617ILXzWQEN`8ixQop2#Y_7$U;HxO|IvTKt+!G+pbUIwJqy41q=(?;FL(;p zb}y>{7;aW>87Aj&t}mkqN(RX)SH(=O++fGejf*5acCHdbtU4ee=?V8qlZa57#@uq# zLA>G>Z^O5K{3AT-lJoGXkG=tmt5tQ#bNi;rN6fi5AI@AiolCYP$uMW4*D1^9G=7uXGAVlz7y#e%+4ZBJ?nnFT2 zJCFNBeIoQ*6O20%1`QU@gvwc*Dhp*Qgk(w~4-gTKOs+vRiKR$Q6F!41fDNRP(%XyF zPh1;ySVaq^vI}o*0a9lS`P|t6c^j;Ow%fp<53TAWD^?Ycf_A@jPb@INi|cqShqk7L zFuHxQ1V(v5p(T?*4k*pxYvAEDK-$`drCUfjHgGBenFsgXDnUSn-D@Jt(`nQrYtBF- z&XyDP%!pMp3ken`RL!)JhLq6SQmFRf;>u_KF)n@dKK$vcu7*1aF%Fp`%3OvHG~ZQG z-5Q;O*yPNK;>C_m%4$y;{@Q$~> z4lj7YGm&s4CuoqV)*>a&QsCx14Yina3tcr*%Q64Foc`&!;4??*nc4hl^SvCxmlI*? zM#-X5SBaECVB>9X`&<0Q+un};fU@K%iRJ7@KmG%I`jH%@Kg?qPr#3%Gi9S@y`!HySwJ>*Mmp1zb?_KzHM7 z?>Y>_Pw7k>L=c25eFLs^kYLp>~xY^D;Hj-3ghq)=w6+B$BI)~d@J^X01k8v`=TpHnLcdejg1DpLW_AM@9JQ}0Z z?P6th9*0hDV)xvvC=pJ!wy|$z6&vdf6nqCWl`=Lr*D+J6%j@ID#-f0Wm(a~5jNpqM za3Gew!6=XeSbZqs5)XF5{^MXQox4qEut_YkqEQ&2#*1#52nGQ>*A>Wo5v zQm_s5dL-HD!j={l7`sjR*=7#?D`zTs*pejA9+<+tBwJZHa^DfW`lWw}tz*ij_RNQZ z`1aMSxc1Ng5D&ZRN}%Wq$q_~XgG_j{tdax6fr6N+_h*++|M>n8m^7jL3`?0&2RPl= zxCv&Q;qsEj$R-m^I18g|UcmzG4pkt{^f2TB#m$F>PlSvFMUpgG6fiJbw9IFuKY4>I z2bcIJqdF@lKnk=v?YsqQ z*WvhOY;T76)7Sqs-gWIK@tmhT0`L038(`Z+A0{v~R}o01Gv(Yrhf;-LXbSw#XGnsf zh?a09LRp)jH&jw66$6Lsld`xa$ zRR}8{nK~@eIDkbIZFL6z$}BX#SIdyt@T}F9&f8dAfbLe1w0knPF^aa#Qq!$AFzmvd zUqV)0L^^5!;ZWT8$nICha*=FXQU4gSjTQ{61bcNCG9iDm`{7|uXAq)k33NwCJm?~u zXfS+VEQF(zM)Dl!IJ&^1T8 zX>vxAw1C(B#fR}{Z~c&1OS8Zx^ZY$;eGQ)VoM(#sRH;`~b)Ab@a)d7@o65Lo`mCt3 zTYa1b@t;P|bMoJ`c$j|eS%v6nm5cy?vFs(?UWPw?(|ho)cYjPGkQGreFFO7gkL~}= zmrbW0@LLxJ(N{(q93)_Pbo10sB#J+Q35VewV znSm`s!<~a6O16gcG#e$`z;_!B{Nur+c-qSTQUN!vsHQHV_wpeNF{)G6vQqRkuG0VW%_| zQsH8eW56uhFrpA?vk$#sw_6(d)$OLb0&LS=C6YMGUiZ2O5HDF_&i9J za|L3?#767@$okindzR!`lU>Y;UAcW(T8crKp-?IzmGvCqqM0&Wjik94OC=9O)4{iB z7BuYQVe>WoOsOnmgpHt!mHG@!CTXRKK;+$E0H56_G;H@en5)$>3Y6_Ga{zk%E^KzF zgw~^2zGte>XQZtV`+QI4&6M=i_F1yvxhSfJd@CyW^<;z2NpBaumQ^CZ+vn_+r=&&_crJ$+!*LnSb&V~g<+KSc}V zYd`c!y#LMb&9yf7XV0?te)Y4Sg8%-;m%*Gr2eDp26m*e|PNFcsiqxzr9vWd?GPVU# z;EZ=75vp;&36tLFoR20EV%q((a)xN%T6;o%((PlRPt*=fSKtDM3o|jY>KBafD40 zrH8FX51;wmm+{ws^I6;=sfZ+@Y52t_UV>M=@EJJ&oF!=4MB-O7ZnlmuU8;2VIkYa< zOLira=g3pweVZt*00paJ*PBD;d_c%!V9s`G<9+Y=C|>=>_u=(#cnN;@rPm;d`*Poe zpf}BK&x>p6;&S&*-g~KSL`4uO`K<{`Py$#0e5*1+Q zZ;Me*9OMu=56US5Bpqo6!=nXu2Hmsa@82cYmmLXzejE;fRs(uC0&26+3NtbvXQ#}y zNr@^pjx&o1`uu(*L?+uOkhM3Uc|MH!bCH=vK%=inge*LT_~B%KC~`4u=TdnKTTg%htYeE|#} zAS_he0^a<-&*Q(p`6GDkuRj5Q@Uo}M=O+CDtc69yOawG4u(obO77mdvtRbwN1G9Mt zte}O|n?*)<(9uXFjP~3rvQQIF0`K{e;~hNZSudAxoM~S>SF-V@H~x1#?}g70gDs*i z!V{83p~i;lk>WZyGbvP%|Fi(Oe-ClLh9aMwE2A%^5)k)6K@HV2%mbE%_wi#5y!zE| z!6*Oz3;B=ue}CHl)5XFRQJj`G8J_qHPsPtFW+T>_eJXmZRd!cyV+&38Eu5gG4l}cFRu4rj87NCTkO^-2&QM3#4 z{P6pDfH#{hj7=R@$&)j0)E&Z(hzt|;vLynzd8&umf`x7r;?B_!?S2>gt2OMMUl5;* z0nsSegqHzMI6*5;@P%WiFiMqdx}LixHtsvMG_`-1!Q2mdp6z$t`S0v83p&l{{Rtdk1>K7(+c1cI2JI z0C)FC=rbzZeV7)H7`(a9rEa&M4iUkjyR!3rg84)Zx>5?Pk0gqK!q?pKb-Q%H8 z<~}!7cJ+-`3-ww>y6jG;i`f}=y_t0R+~g?@B*Z!n0+Ii5{z-KJ?H&1ZNPO}<7BHEF zd$1#B!NCrQn#yTy&5KJu1vRlEzvCe#X>RJ!<3r>UDit<7?}sAG$-$=4bx@5ASS)Z8 ze*LTd2X6lM4}QA)r#s!Ohflrb6?ov4k40*)AkD~vAHpdZGOoxp8jYwffQ#j@%s#L|~D<~-GFdT2#sgsn{>fZ3I*M6OalorWP| zv0~L=F+MmyOqDL5KT0_esv%R(Fp`5Upa187e|o!;hJvLk-$+g<%t=XnU9tfRYhM}o z5E0m3@8hGN{wm)8iND7^cO4e~!iB3dc-5~y8CPBPXcS63)EV4H0#-rzF5-qLU3ET^ zrzsB)X(7X@S+;zhJQRb;iHYd9#r;rx{z=Fy;@XdW4sUt$U*W?acoQD{@Job*DWd?k zs$#UIBysBfDfC3TIWcunBRGW;C^$|E071$V5G>>~*iq%;H?j)l!^)ygnR zKBDawdWTNIo2|n%d<-@=m3P0d!QWAp=PMyuw>~3sMZ=`TiV!`|dUn(kd4BXlQ3?Rb zXJCXfKl*(kQj#c#Y=~%UQ=U81ufmwwCB-|ff-TpE;b{WvCv@$jw2xi_)HhS-PrFwZ z0K+c4f`&B6U{+S3my1X;dYX{u!LC{P5M=2X$sk}t2y~9pI64Uebp}WD|L;O`bhyP5 zKL4$I@r;+e1?TOmSmh+Ge8WeG7G0KUbUs$^_v zqeM6l;aAj|YIzQ8)e5e=?QZnNT2&Xa;1zop@sPP$eD>bs_;I6y*wk_1d>L0S zFQDBYVXVQvMF#Qkh8AoG{eDlB0eo;nEk?;L z$cEYL_7xM~G=$~f?sO&cm*G3(AQXo{Znl*7@c?F*FE+s3q;gwcR#rqk*o9L<$eYTW zozSB>({Z$M3VRmkQ7igrH5+0BOyeKQ5SbUfG&e79emv6dIeb)#6;_{9qE}4Y(wyNh zkg^yl=@Kp^cExltGYz-}8#jID$9V1Uy&i)`_a~kXHR*cAQy+ylzUjA+Ei5C|%CP+z zNh1_RVWLAzdHpQ05N5X{P$t74gdoU;s2l>t-JXg7I<9eJ7N2@5DilmFhp*`LdC(FT zW%m>!3Am^&(Zmu3;21-k=J*e*W3t;hyPwYYr*lNHHJ#Fnr?7wCY20sqneSZFfu#8R z=bIsmhSLoBoVb>|DoP@u>Eh^-4SeX6|BR1+;$Lv+*iqD69amlPQ2gd|ABT(2-wo5D z`A`8dJ6oDpv3h5PF2#x>C1!Ax-EOFU!hj_)O8UAOYsQAI7kW>5+q|4_iHM$rO2u%qvJS$*I#t_EIe4a@gY?{8!U3C z+z4TYvqR-|9w*R9qI0O{O42-Z%t{#naq^tYcaer|Ce*d3SHbc;wH&7Nb!QfAZg+hClj)s}Obq zne~rbBh?5Lc`7h~$$~z+0K;;T40=+`nM5Jt42kFq4MYtTYAf8_Ic!<*iP%`J{3|G$O&f3idn#EgbEj)`Z#^hJ2YGq1uR4pg*oq5{@B zVTkT%h_z}}#&Dg8F1IG!+`%X-dic!UcVIN^)|O8e#8D33H~2*feq1 zc!E0)uj3&Lv#1wsY=>mykFZiHVU}aEG{ezRAbB>`f``X1)P(9){QmCKxz6#=`;jkpJcua7xp0%gGHv zksrP^i#1orsVK$|H%4d=LfM!0?OH+W=yB|yoxygH;OJn0i72X+GpWHMo%~zJ6t0i9 z_U;d6hB%7x5|gPvtv8kC+Z>Hic69OhFBaMLYXX~y(-}>6kisvJSP-E#BFTaa7_hPg zyQ&3TP%q(zV@+((aZU%kIKo3_tKt?&o8O6%;nq$MlT<@dPw=GOtMJDWx`@zCC)iV) zMKVb+ipMBbN?2cC$4ae+Qn84`TU!#(E0thMgPj7pJ;wdAaInPNU=oLzEmwr;KN<({ zJP-Z;P>KVN0QkMkeWxz~V|w}TL<(385XKjioG=mDVZpO89t7g$Pt8Bu87c>;%Vc*) zEwx-!O%oX+A3@5QDDUHf?RI-;Hny>U|6V~g#Q2pRV178*wjRB( zBDbCS&{45${Qc{mk1L*cB_?HdL@spMr&R5V-7cGp5agy(qp6446cNa0ieiMrf>c4`YEG}b)Tcov`A|PM97%=T$tc;| z4jjQ--}fne>T_Sjs5imG_pRWS&%XkfU;ZFCWnYa)OkZSXrz>T$V>yqf&x0Da=D%09 zRMLS8kyEj;WfVY^M35gJH-7B~eCONW!}EXV*Wh_-Y@&P)lt@dih>+>1{+K#4W@0QQ zvb(9KU&2b%dZH?UL`>T&!}2*VPJ|q&*_@m6b&@m38NDIqIs*F*dPoLzw5!4_mXI1u zL}XJv77}ObdMbmSqlci6jNMDICRVBJ`m(Si)o7O))^ zrqUlH1;e;PB%ujVep)OdX|@ryPrBfBs)=$D-T!A~gSDX$R{uB1LusODI z`E%chqsNZpqFpPv_H8f5j+L4al?Nx95`GF}TwMSFAOJ~3K~&}JKM$~dgq^-{5Nsnv z(mDw})}XsKbk9R-7L^Mjjh)1xhH}*qS*+v68}7w5uXqyaKp`a;?Mu` zz4**Oe*Gs%h5wPx{Xh6UXH6LtUnW7hUc+-<{vtf|$&W{4GD3Ga7Fn?mOB%NhiJxJJ zy&U(Mz^0zzuHFbITSKf>W^h4m21mCxG0|hJ6pGlG00TS5vXx<(BjHRr+a4>q7zQDB zjA*ZI;K%&{Hyzu=u8N1p?OcXu82HA?b$oZTgE`;A(|0UlsZz#)IL24*K7tD?B|K)P zisod3&mG@Da}wh5%S(7*tt7!~hlT-ubl-j0Rj=XUOAClbF&a^Vuw-I99ATg9iD}-z z^KsY47MQehVXcf(7GYo&@VUE=N-D2L^v}*vO(HyJZ6EGgAK}Z}o05aTdNYX_PNlXq zUQTIY*53I6;Q+|CL`SbFm4lsH6yu5KETLAm@QEKBMP`xVub@|MF(ffb0v@5sWCA_Y zg)hKOjHE!$sp^)2{)AA9>Qu2t_hT_I3PbS_U{fwOwInKXG$MpU5(Wzjj!rZD)ocC=U;fN=#&-XQ z=22@q@lS92P1GKI5z@*Uz|bC%>DbamYRR@}_sM<++162rq`fU&o>?v;B^1kpXEK3C zyIvk5I+vAlu45)TH(p+2F@@pbC8RU;I)pz?g@=USsI5jA$@G1v*+v$@a#XO(wAs(G zWknRr57jC3Fa7`2i7cO&=U;fb`go z6H#t(G{Cyp$Pl+1h#DO~6iL4)q&#tvG)vIv!9k*2!lHO2cD~R`KxJON zFA^AO+T)c3L76ayH7AqNfe*B+?lhn!I`mQvNjwq+Loay9bQ5vACGfcAdx*HvS*6^( ziqh2#Sx8@h4H{7$qFA&vc8T01$e4=AeIw7bAk5D|FM2Qt+C{z)%hmz2FLaR?@|l-t z=E*=g6NY10{yfr%{2vYD_9;yKIb8jcH{n`VCv|g72?C6`ZoUf-5(Vf(X*QX zK83ca;b))tGkDpHe+>_N#3d;Dl*$s}7hMqv>G z96ERk@A<%W_?u7uql`}ft3SkhQV7*4cI@4aUwzffF#CXgXaoc3h6DQL@Vd=7t~~Pt zlCCWSGXy7%6fNDrw@x1zQt=Gf<9lfI6C4gkDBA|k^&M0#7dJKA zl7dKtibOzd&$~X5v9cA&O=MqFvJqMH6PzG zGf~t{gu@WCGgWj3L$T;wU0lHNtxYKk)|QtL#)+`*`QVTU$U~g2e(Y4df-ji?j;W*Q z7G&XzQA9AV@P226Kl$C) z;`SREOMCW*DuZ9~)JNb=uYL~F*`3hm_5v0=Bm$G%^vKjFagD=99vozZ@&M<(rK6#m z=aY4vG}=g_5a!|x4BL|t0cC(uuZ6JNg15A&LW8(bk$_D1fc35jit+@>5fJM zSxEYgx*mKlDuy)=J<7)bYNN^PiL)6?Qz8@Ja(=k0);1p*$fd}c1@hN<7>WFQ?zAXu z{waA$LG*^f*W;qKJ;H}S`FVWcqko6ZBPa2g3)k?Q*Ia?eKH))dO1^abl)6&uukmop zGY}LwrVtYeG7@&aa^zz-avI6tK`mN%&hZMe>9de9bW6ebg0SR=JvVud9N4W1`!G*t z6wZYr7_s|Tz74tIkHKrHMn$qzSPziP!NQ!r1EO^wAnA@_R(NqK7 z8wZFQC!mF?C_&5$ivU;Tf@H^=yr1-4ATmY6CmzPkeBh{p;|4CNUK1&^EiwyseLhD( zGQ^EzFsPcSt^&0sK}YyZ$oQe)D^Ce(<-;zL?WXdZm|r3XP*9_E+(XnjC9G|0aW_)0 zE-HZV_+9X9RR9zgmJl)g$uA%sZXh{+NMd!3h24@NAPI_T50i~!$hupw$~8EP`w@%c zClXXH?zWM3M=-nsjA~sXtA!ZG;)UEGj^!+laAG7uOP-t2i6#=#A$iW$mhpRk_Az|m zBQ)sBaNg=H{^7l^#{L~eOvoGHGieu)cH3~n0U~m3YIW#-1$s142nLG}`f5lzC7tW| z-^}jFO|v2$_ubRMwV(e1{_hW7%OOpUMor6L^(-(4aPn1k&o zgiK<)xxVruTb=*Yq)cTjqzoQelo;p-5pMY5ZTQR=zJgEx{g<$QYHGv%UqSTCNv}Cc zU77^b!o!~MSp33^Ux30~8Leo5(L|Bc88=KboSNfEf^3&&ZE1o9M()}+MlsMFhuCJg zmj>+ceE7P7^&rNLodCD@LX1;iVPM<+MaiQFeBV45+YW{vd604L^kelBpq^_8=1{{ zsWQ1?XCxB$EW%=`i0xia*y7@FrR%Vx7|*?UFLwDwyyyG3U|ZbI3{)%wmoC*&GBq^& zL-aG?zHum|z#T;wSMA>kJ)B^3*q8ZRrQo8`@5-#5PI_cE`-}!HI6`7_Qe9@|l=gA% z!gGW6z|CGy4jBgg&}NZjKqd)pbvl?NG3IMEVTeZISRE3k1=n%FIX(YfWP7r~D@jGd znG<5ToFUO%D2l{mz^09I7=rLUkpr3MQzaqaOJ$Fm(a}t()9)Z2$5>ogP|$y_1?Trn z|DBuuefJ!~8-DvWXs*-xcS-^h@Ef+F;X}XsbUf$Ru0&j31H5@5K-wM;6FTdq(CB5r zc|T23Dlw3Uy%5?2*Uymj z`l6w=$`xpim#YE<8&ZA(XS0t_AM|Gv{uJ^wO(nbkdsXi@r#7EOIGPqjYD1br>Z&%# zBY3A_JIcmPjU`T}5<0l|?tAd=w|)xOeeQZVo`qkz`Z4&GXJ3Y0tMiI5NeGz4Ff#mE zRf}ybj|fHhR6GDxr0{T*f+jcnVSyyCny3Od{dl6RLiG$L=ery&g=G6jf*g^P5l_Z) z=#%#%8vY3lV;G@qOAZ5nkEm>TOnH>-aZNH2FGkoQ$;^bq_*wA@AZLJTi9~eqGM*r} zfb4n+9^@=tp$U3Q4L@IOzE+bFChaY8jC5yL$dp8yMq&b`@jjCn$NNkOoBR%8v?WqB zu>I26feyRS6API?3#~Y#9Qb+-K_-k+Q961!o{SjuKv@!_Qba<-C?3AUOpMkNo z4+%rWdR?*Zj(bgv?mGf^W){w#y`m47!Zz$9y!U|cGR)QUpxK1z10mdn>r@TI@G2e( zD|_(vcmD%^`?Vi9U7^3^>6hazum3feX;1hY^hJrt;pz5J*t;JgjkJmm3>uv^8Upu= zDn)E!*@UV6S4g4c4Zy_7gPSV{(NAE=;^N==;JRB}TEHWI=0SME<&VUJ zF1`ROt4m^BMk2R3G|D(26)WU8PB9!s*gUn4!v~Myd*8nm-}wFyaNxGPFbc;1)eXP; z{ZqU;%`mfb5m&$9c~Ss0qX{M?hFUfT{gHUQ_%v>7cHz^9rC{Ocpod-%;M__Dv$_R` z;cSAqX&8=15&$&OG@Ocoz;$tA9N~uB590E*d0bR4VQf0sn2b@c6i^HjSls|!J;ia; z!q_xX)>8Ok0$)ad4w`Hbq6C#76*A(sV5vx)JVbe%$$6E- zu6cqzMFW@Z-;3*RxeNVCgp3!TVL(}m2QAlNr73PY)xnURiv}!|9K7fOYrr7DIMuP4 zrudhWC(sXLtW}Emr3bEw>((%i5Ye{RmatibNsPg0h^6^C^qL(+gwJ^cEL(g6I5)3V zeKebG>8{zmOCKm5J-aN19MPa9lWL0(?+>mmcu?_F;Z})7zp^@(+rNu%E}UBB0{hnd5E!3l8ISTh2sX^Y%yEmeW&Z1 zQUGv9<2WvEyY&EG^O9F%)Fte9b^)Mc&a?235(fBShUz3B@tz zcOlDMfk)G@4=rfJAl%5!o5_k~Kx+Ze?F+k`8;)Hrsl!gRnq+~q02E^gVc84YUiALz zz?M((+Ndm0A;R$RVpk(-Hy&zJt^d@@_AE~THM^I1AnDjnkJ{z!PG{x$25_eFzyGFm zI+!dsIFa^7Fv8Q^0B)&N2q>AHazA8&7eyMr{FNKRA*FWqbK6OpwZVn z6G@*jNs#FiA>xTmvM@2#bHU698u{o{Hw?N`6qwZ+u?c3`L$cp^{W$DW18DRXhVo1dWjO+VXP+Vbp9`J9Y|rlyGg<4+Xj>*-D%i zwFnr{hPMb5W)Vx_sbZIP!t&&d@LG?!h9RtcjJZQ-vc4JcBYqJa2T*bt&;r>;=J?9+ zDj3QptYsm>t(L5Lr#_D;2$b0qbOiU^EC-9TN?D<;UK87XdM5-!guM>j+B`n}h40~- zm;YtXJIS!%74Y#t|1CW7XZFJ~h61Apt%-1e+}$haZ#F?O1WBCqj%6e;$7ne!uTVCg zPeflh7GSX$*|_nWHzBj@_~wDbc-P-uhZCFsRwl?#pVtM~MSZ@Gh2?p1NG0#dG#%lv z3&N8D+I5BXNK&C7J}(wPpNbz$)0HB&+Hgwh`lB4sXEMb)36GI2j-KqnHQW@LLHWr)=k2rT9zIyv%L^9X2 zgzU#&M9fg+p|)i-kmwmoCh)5d*n@fk^y36K^t!lVi_n@T20>TsTEZfuNey6oFhs>Q z;o2^2nT=)`4Tq@GjK^`%4*IY(3(aOz;MBQ#1>H8uYL=Kk@HKS0efXXu1V6_4Qu61J z$b=MK>i=yA^-3ARI7C24D0+!Uv6x8A&(_5-sMG0*-j?vp;!Iui)_gtVQ6P{H=e7(_ zDwlkO!=a=R_69@QtT>mW*#k!a{eE9~0(E*D(3)7b6Q0<-A9@ujcfuU_*P zVkdg$Lv_O!&sXtq7@qN_H_J916{XPBpuH8bbq z$&{%LA#*Vfnier~I;nAjARedB?K4Hr**fIu8a#~<$T#F8fqaIbdL}hGnSzw3H`E*u zlS5k^5tYnN+z9CHZ@Or#xA5_gd7M<2$>g731+F9 zb3jR%-2soLT?0^F3f5bX{(8hyp`46h^g-aOP*FAY?{N)E~$?HfXNJqJxpGf{>~`SM0+%I$>HTW3rDnamNk!K`T^ov^&Db zzw|Ho`i(!9x$ysqAJZ%WzPEi3z6j5}<~dlr@B(!7P{v&3ngv`NG^!&S6dTy=55&K} z#Qii3;d#C!#&X1JCke(`ic{ePpmvau>lkrxPks?2)2v& z4#)WCqaEz@OL)fG3=H4I^;;dB+HT@eOA9haU@iZx?KW=e_ON7|xO!(DVG8`ii6$C@ z2_7I!dnGzl^_o`wOA3 z?t;B~ftcEGUMtLe%DJeiwn$hcTt5;il}5I>>4~`t!T_nQD>D=VxFm0~sT4l~QuRIWG+tIVa}sztE;SZr=}vR5SSi`(?9dS z%m4EXU*Ih8V|utx{l%xO=G(OX}4gP zDbZ6oOz%7weIdF1~l@AU^h` zui%bD_f1vC|3}mU;vrISF}H6QuK0zY$HOmwte{w}@dQ2U`?UAGsV5*6Q%LlkTm-EoLf2CNl58Oh$z9pL6hNA&Pd-m#1|J;j})5gKWV%ga8>nWm)a zBGm?pnuWVtJ=`#yNC@78Y7VM?0e#xyg%JvxjxdT)^*kJ&MEKtEb?hwpxMYSS7Xx?n zLu_~Y*jMwhN@AjB;QCezCxQ@{&d=an+r%hK@!jSS>%A@(eH$0g*5Mf@e!SVh(NTzs zXW`+M8cL>(<57t39zBLy$;U(Is;Em4Q--Efz`xwLC05lW!!9^3etvZoH*aj>J6oGV zOy*(}g`Z@fC=Q6reS6P4-j~rcSZ$7KlRP6A&1G3r5>`Hs5zA7^Z&yYn@{y%*9g|}R z*@RgDa9&&_sWr{e%XO}fYe`BY+8hpW(QE~K=WD3=9!jo>_JA|E z47ExT8?7b^gzan->-_;{d`|*;nBF%H;GwTGix)9yAuWO3dZa!tl@JnmrX>WC5&aRF zFj#O!HNY;;CIT^0ya=KehGwGV7cd@=FdmFiD*MtI(f-sgGMs1#i`A4)*>8XvC07~H z8}@`(AhJS1Qxt(ryar4&&l{i~(~v@e(kR<5;Ir3WhY!*L@P2_B3IVw0fvb4$TV4ol zd7sQ7Ns6*)J;{4W?L8q2^$`478#20B@)8&^mO3uVQBP!C!z z>yu|ssTSDnvFPOY6W(O^&*zBV^Za=_?TM%FEGHaNKOXgauovfrH7{ch64Zu6G1XxP zK+LYc;3Mk~k#225pPz;1mlV1no(yb1DVGa{XrwtB(k^}Ul?$MRH_|Rzix6%%kg#Pj zfWp#}F!;GEN;GgBV$yEQx^c)!poYI(L#*?VE2tuvL^q3Fvx}_TK-Qy%K7qY@4m77C zV+Ji9Aq&Xx*AP!KShYDZE|G(gbNr}_v~d(RNsQhCwE8NtYz(cnjx08y&#kC1D=v27 zN{oQ^HZq1Wm1dM1=&&W1i|9nM)j^!OFlt1?#>gfkK=%|XK!Rn^16-!`mDGZYMu;|# zLJwn@^UH{0%7cM;)P-f0aKqgjc-pW3545`7(_SC^HkMtled#aZCC_^bd^cesh zf|iJcjkepI1<-_v$2^U@B!->qNvTN`%|jIdhVG!h8R6h9M+w>}#g^lvo9Xz*fdlyB z_1ELxqeoPz+5ay;&JYZDuI<6gU;8rby7*!YYzv3B*O9~-Y!bDD3C2U(vHNHYN8;jG zB5wvjK+-rJYfS@NZK|)TXonNrySa^rEiIzzxwx?#;QPncQ8slvc6kvCc7`u7#Akbe zYj!VU&Tw#Zw}Y-};=GE31qxf}KCycN!+sA|ClH%=jUN8GjpN-Rf;hqg5pUN)QgDTT zFxqO1pM%RrmCfleK){q)$3;M+x|)aKW*;`4Y3ch>Euj?#*xqj9yjopUA_v0&x6y?* zi*adv7FGHhK*zV~1E3Kr9zp;BAOJ~3K~!gW80!mFPp~63L68mO2mT z;Koi#k2o;Jk>opbjKIxLvJ>J2PHtW#sLN?%BF>$O0UG$b4_%ADecwlassP}D_xq1O zAAj=fiuP- zs*sri_6lU4jsVU+%1_G#a(F*40H)dGr{ztjTk*4`b$Rh12-aC^T4oB$t6=I+prnL~ z6sIDZOE`Yt7T)pU58>P2_#Uo&@}u#azwvC8OBP~U36na?jhu%*J9Lr%@sL*qfO3|j z*FkQnEfLi!`dASLsbI6@S)r$d>lNf@iJ$%?mV3gr9u9`W3TL>Jj7xZ^5*?riKS@ty z5-og6`53HM-@(xkOb@ckU^sq2$Zpew~oRrRK&TDz7twH zL^N!}E*625g@g&M;0Pk4B9u92H<1MavQY?w;|S&g47)P7CAWzF>ikmzd?>*+GDPhS zpdY}lEkLXA{ji{+%_|FtNcJG|mKeLr@b!ere_DiOG(d3p1RVMvG@EdCF2ksE!`6f| zAdLR$*(*K^&0ItsIVY+ z#t`2GA}K83q^RZ@lj}jWli3yp3?mJlQ+;cY?+uI1oV=3=b{IRAhiZI=q3RSBB0rj3axZTqO1~vxd4w33t<_ znf1mYE?z33mzlWxM291Q_o|^~4nzLCOGY}yJc&&4!W9`w4lx?9Fkf6OL_`I^M50~g z0-k&B`S{|U_eeEE0+L7ubq%#7#d)(;%vOBdw$;YLVGkWfA6~eIHA52{$6*{}b*3nr z4L4bK=;JWNo`qSAqYUFojGE`lW+j!kW8qk<1&U%D*>B6+$Y@lG9;v>Kr9piwxLAE+w_(fls+Z7fD zgps01auSLKO9+#^rj~8v)X5VlQF>BhVw(zlQ5M8wI4&9C28_(Y-+$;6_~ZvZ_EQA_ zH_BIEaUov)$InGrUxqzXfNsygaI1p(9;+^mOD0~-q)hmAk`_9{?ikIB| zZ#vRo(Wtx#G$9xY^;sSblaMKP+x#yyh$*rZ$}{w#2N9uaRmFg^I~0FFJq4 z7ZC!^NZEX{TP5jDV_{q%@s8J;`5n6QZBUFeMq4UpK<3KZHc}?Har(eRHaEK>Nl`a- z#5B?(iXh$xY!cM@M zQ#^*0$b*5n(?HzoK`;2w{TZbk;u?#Fh}RFnVa)GFO4}2T+QuEE<2Fnt05Z@i zi;zxwNP8!MRtKq3g1NdYC+)>ZMlHl!Jux>j>viZgS`yQ;nZ;}lTtxSERmFn5f&F3Rz8DEd@QwZChn+D_#f@0WJ(3F1ni`4&J&hotv5!ahc^= z97Hrt6OB=bV;ftz>-cdT+up!>tAXuCQyQR<7V0vxQv0Q{4bD|Txm;HqCUYD3yo}Cy zVbgc`|5Q<-ihU`T&rHbVkz$9-BWdxX2jHp~|0_qB$7zQJeRr3(1%U?8_U`Krx zN7|gV53p-)R?=X(FP95022q3)t!-2c3v+%61=E2;1pt87V2El$lRFo&Ogv_>2+!7V{qYX&ZF6**2w&}St206tzW(?o49~+u z7HV>^fA_=&HijV{yJHUfJs$(hz_;!?f@Ton!acijzN1n+C!T}uaEJ=e$6ySTuA-8D zYhW}9(e4aUES8`<22OT+LKH7?okk&wl_Cz0A{PEusga$GP^3NMMR0$RJC7kJy2#AD!D9C{=+p7OFeXcbU`1C@PkB z`f~XCRP-{T@6)b(;-Ns4$VQ{t#wS019lr9#uj7}0@#k>W)1L;v=pu=!AR*C?vOQ|8 z^CD)NoxoR_WP<)o^Bz=mYcAEvBX(tMAX3vj@F@Shj0Lz6%ir^WAgaJaG7``7j0t{% zEU~+!;v#}Zd*0)RVAImJLRlcCZ85YkM!3EuwyER*5Tz1{BB9KFLyUn)AhPCW6$&Lu zg+nnKB5Pk`f*>!S z5J0QXNdY%LdLN?gQ?Sc*Sc}WZ7$L0z;i+{DJDaddRTN4yIB>LqpL_o6&}{#ECOItG zQ#|I9bMTtqc|IQf;2qGSo}f80PRe75S*Y>8vbK};EM{sfX!Cp)ny=9^IB{_7_Dyuw z1I_@{;Vm2!RlF+4O=TP<(ahv|>KTF{#5jpC3L{Li7~@GIp>I)~3h#;Tqg<|$AYL%==&gwE`A;5 zuL&ZeM-uU^h_&iTKZ64*9*`oq!t-$PV=l$xp8JcK*)cE5t4R=|H&Ui*G*Ros5lZDE z3Yms54sl|;frWYN>Vs4 zXh79;aMvWk&4W-Ze9MUOpt6lUB^&n)Qrx`R6Sc{?vt^uLwy_?J@$L1VtfwdJT9hXD zN84@O+wS90J7#f#=i^Wq2a_yFV8&}jRi?wjWaD>KSgoiKA3W<>S;lv<-X=||P zwwvkbCkgIt^^ocl)C#T`3zazEHgq)X0={_1NlZleqsEg&0r-ze6-{O!p`$2Q2<66% z59im!`m_L0H{t$}(Z5ycO*hmt<$x?$S*To!>ObT_qh8k3G3OUBoW$4)#sr>aQCAFL zXUP@_#Vh#;f*A9rjmPhtL&dek1E9xPUB^Vlw$T`kMbEgvoNZ>2lQ8I+2r>=r{t%30 zp?peYb_GZ5fm!jgvuCU>wX;Pc1+o}p>B2d?X>@y%-bd1(FxCm{ClNaBt{n7bZgdP9 ziW4CW4w*D)SSS{K=@dKtfkXmxW;1o*9u7mezAFVu5DYO_D#NmDaRC&0QkF`@Fy~qV z`zd35Rd=yzgN09j_>;Ky{eSaQ2mt%Q*I#)te((364Xo?{xS@M90=1Iy$}WDgeiD{b zf?c0M=5T`sFlr&)I3>g{Yjq8=MuxG9(A21jrg;oudy1i`*c9i^mNX@WO|6_ zpka;&OxGrt_?Zr3>K-^X>6vygXH*E&!Glr~$bmYo2c-QLU8YF(RCmIzn4S2%@l+AST5_%d*#V=)&8Kw|@NJs6d`xN=gzPOtyy z)Cpu^3~$#S<+V>W0nK6t4u&Xc^myppWSI-c2P5|8&bf*M^81 zWdaX6EfBXbJS824>k(8nVr+uJY-CFnDABGc8fF${-jN&>W~ z(lL>iSs*7!HrJsw*9AeaR@RWZMIWN2DHy20Z#kmGk>b*d&gV_uYKvW@r)}UilWO1S_VaNkugz{92$DG z^L_K#GL&ezR9q6nLeNZc@JH)NCL9~7ObnH(sSdjw;!=e(ivcz3%AZ43&?^Ow zdcaP9IycgqdUvn@RbN9yK%o$47x!l@<5hT>+-{qFk0$edu#s2yX z*0U6cM+2-DT&!3YZtV1Ndy_si33iqX_}SGuy0M1OZ8k9oBRq1xii`aMT0xAjo@^jB zb<|A*53D&Drx|Y8?ufCQF&X1YdsnewYLX@JuP3)~kaQM$YUHC)?m1;k$9YvBw=`Pl z(^o>82vs!15l7J$v^{TokKiqe063k8=1o$rx{|eY*2=zrX^<}t*-)ndEVb`y0fkA3 zs#n60kWHpBd=yXEzk*BWEBNM-WB9>#TVO*w&2T}jga&QQU3~ zB!1X0I@s7aiSrhBNTzYXnKurPcaUa-d}|P zMPky!3;-4o1>aN7Wvrej5z6Hfn%ypfFp{7K$1vf0o-F8)maD=bQ-mGH3G=XL!C|t1 z6*43bLeBRyw7YH5qgE=@Adc>R_dOk&-Tw*!VsCx1v|4ZLms}*Z(X$1G7!<)KIvhtU232)6)e30O zivlG$OcN+%c0614$V=BR)K6z&hW+6V;$9D}xfz+))8k<(XVfN8a==dA*g1A7pk0M# zOgMu?hM-;*F7kuGA=N4$ASjHSHR0<2m~ zX7EX0A#0|a0c5iWEh!Gf%brYpX&E=i%JyMJT^n0N-)9*@hFDj8ZuhIpbdX!7G|Y} zETuZeLT1wyt&4b#MNJo0a~}TU5lO<735*10(M53xo0h_~Cguo)MRk%Q9Q0sik?iN( zbcLkJ_3?%K9>ov-`0Ftk|7&tQbx9NdeCdmxf?xlIpTte4=8;8RiC-3}EMa~sM+_5* zEPAR+)cB5Fxz=CF z#m*_3xPwho{983<)7wm&U#UH(ZivHW{efCW7t^{4;18g`3(uNMTf6wwSHFr+-uoqK z#tLGugo-IpV>;&Jdt`RRjn91se&DBm0&%m3Zm*BIMimo!NpPLoF1leTej6O6F$;#Q zlzP2_$D;&a-x^2(kc|`EHCMqiOTg8VhkxwESjvH8Hj@K0G;E+^=jbO6$ch^I7P`G5 zPP5@MA)Eo`$XH2&CX4^U1ouxu+&>zMMelQF=TNa6Y;m0048#=Ar!~9WGu6G#p$%RMgcRyz<7Qc>e<{cz8lb4~8TuYjcjlJ$hxFdi(F|5rAeX zPvOyn8(+=)50?PNm2?2x@bl%xDW4nLOMR}=y{|!|9myG>iBN_gIe!YzsQPHzCjRK5 zM=&rfAwHCh9N&L*4tAbNrF3k*ffJnhkgTQ+Xb4X+Q7=qWYz=!j+M0uJSm;ke@dqH( zABvSarFW)i{ezN^@i@X#t04g@<0O&}pNFNy{p!U=f(c>$lJCmjvukIJFl9beH?)}S z$D+fw#C?j=LJ}1TbM5s9qFSI_P|Q43RUpMctx^#$077GA{L36rA1*G5P{n*W$x$s= z@tME;7~b*5w<*8!=^qu&^o!3wk6(Z7tAN=Bm;uw;GL+_zO4k(;HliD0oieHz`6!mS zWxK?#&XLiHmIp}p&I7lwF&2nfcOslW3o(tsKhhZ|YL0Ky>_#0FtT0m=Ycy+efGH%e z@U`cuuAxXZ9Xdcc1PRdBymp^dAV6{as1rto=jaQnm`P4Ad6i_yOH?in%_We3&+bgM z)mle?fUq2Tl)6&ZHb#Q77p{TI3`Xz@(ex{Jw$kXaVN)}Hj!hIdAwwplv4bI+9IX6# zLhjO|9!U>m%|ddhN~B~#B?SOIGR7*}RZMDZSD5-qw*$MZZHdXTpgfMIRwVdP&KQj` zxqLzC-j_}y4;aJCUmYPI>>}l;r#TOlS{h^0#!cf93?8Id(I0=24_f8eB)Tei4q#zrP8&S7ji&paEvk zLK=l)J59a;*B~#z@n`_A;vrgFQH58T2*xsU@@&!(43cn(S(kiqBTaYL<#Hyh&zypo z(DJwqv^NoMufeO=5ga{%+?hj~bH8!z<}jE*I8FsQqfro1uXKA~2$lByyhwyCJ4ZGg z$v%;k3w}m??|p^|G5yejQ}rb5=WjmuAb#u@-YO9Kzy9aoU|pWA;8$Ps@9-lpdp??V zQwWLt;G`QDKLBMgrG{8k2Cy*aT35kQ@i@mLUs*>waV1T#7$HhQNTRYVGwj?24jbn1**rlUM9*^?d~btUNa~59UzOQ4|YQ zXi1^tdbszYhwy>VdA`Yj>(7O2sJne^n7`Hv=PARnMxx(=- z!=yy0GbZsZ74)_@is2fTD7V)86HEpp%+yOLag-d7@ejiZHVp$y^muS>T-@Ek$*Par zXY1&XC?7Y`n6IPP8(}U?(Qz64voGW0a^T|D`8v)x4z_I*TMVb_j}T-gR>m<_dP6iC zCEQ$ZU_p*g{T@8NRs&|IhtM(5E&CWzx;Tz8lNvIgUr8p|=N&=2dpS>R^c-NjGs0r6 zf_ckCoG_ldg|$hFZ(Q1xG2ZkX&Qn;)2;X<+2<};JW1GL9=aE!%iVG%V-h%=_b&Jz7 zt|%X$aEu;qtBPe`%yHC#E%qmJC})^2m2rN43HM%k91~5Rdd`Vu{NQ2}8-ocx_~=Cm z3L@-P$}+t4#5^j2i;MeR%=3dXatUCd@eYr5vg&zQQY+j|#v=V7qk5~~#?hHMIh^XH z0FxmDh7y76lgUU-d`R*myT3x%D2xQU;|0$}PpL+|R>pA1e;Y|)h|8$QG)9x`>eLVm zVNOH+z!&&O=HE2GA@L$lWt1@J_T}qD2c$|A-TZ-6jrNObwhNRzSKu@#@hZ@*P?1`Zbh++sg|+X?*cul=ONtOiHtWyYGb<0@6;0B0V$9B^b>$Eh5c5~N4C zTIbOV%smc~w7W8ovVsz960*4Y$06c&7j~@#bEYn2G>%{E2g8vB&PV(L4g6(&N6k^@MiNkur2+~dlbkk4z%Owz@cWE@CA zQKStjGLzyCY6U?-K-B+MEq4WTYS!TtLKCI|3m=)qsu{SDrwWu+c#wqY)uBgtpYu(b z$%`sL%MJxv)m_l^c@(3xqDxaDClca9@}Q7w`5c%(ZXoXU5%t>e7mtb;I$yOaQ0P-n zLLUoR=qZ!G{XiG?m5J1=m$L`RRdg~$hZaF5&wI2DZ| zF(I&raB~A*eF09hE=DCT55In2Oqh~(8(FWdQZ<)P0rcn>Ln^K_l|ahEHd$YR)!%_N zy98tIm@w-z@(mnI&;VKylkdQJY}((4CCQkI6f4og#zc}1!;6;?KKdw13kwKtzZ0n) zh=po&bp@l#m&A^^cK$3<*B7M^_IJ^1?@=0nT5A@6{^{@Fr+@WbPb>fwBlNl{Kv9TF zUblSh?px2`mwxu;c;Pc|LD?Zr`0U@~Z@&0X2+6U!#yY%smRkDD zJ>RI|g+Kg5xa|ecM_6%@(S3CkOY=6|?MseK>e|>EMhM_Y)(h{7n`g_C%(%1DmqzlN zy)in5iQXv1Hs$J*2zRu6+}tW-CCqVkrz2qU zbsP**Lri;E6!Te?$0;(;Mif|BZx6-ZctJR2DK1RL7?}Y^JreQNIaNGwlAB#}JUFj$ zm6}V9om2rJ{4-Sm=+6D{qSZ&^;mUS8r`2=L!Z1KJf5k_xJ3(0pDdv42XXj>d-<35) zIuWhxn0Ru-$M#^1)r6M-)4EK|`ws4I1h}}dE+5>pZa#%pDZtv!u0Zyz(ET{Yu|^$T zGVSviEru{mD3(YO_?|d4)dL5C<6(ppemQ_gn?#!PP{NaCXqIRUks_pyJ5QzHh>}$1 zQ}p>~wO0*x_SxV703ZNKL_t)1nUT=+rxui?DA;N5VZPB+T5Z!r9Ht1v32K!RQj!cQ zRiYvxb@rS?l;|S(v;4ja^TUbVtlc?njY->aVz%C(6RAItSOA;G}?XEg!b>XoH z$&_zAI~Q_Vr%n+dQ&!~;8y$QQog;}eEX)wuX0IOOBOpdoa zrTb5IF@E@~a2aE~d=lpJP3mKhhk|e!lL4^51yE{C6Qqc9b_?0g2IAE<*!6jsGgYLH zEt1Tvvn!B%5}MK+eDD)r!B79UcMFPk_)oHp1!nX#I zNCclfAE4qG_}kSUwuT0(u7PJSF{ReS7kBn>Plq`jKI+KuJqt5fDhK%V$||}R@Z7m3 z%CPW>on375l1&rbRWISm^DW%h8RFq~4~}P{949ytcvx?z=)?wIcx(<8CKK+q z(H19g12;D6I4LbXcSI_+~V>O5h+(Dd;!`F^DwJ3emG7=UBHn+$uG$hV;AER zHlq1jk@Sac@Po$XX2UHYQgpbeTi!Dh-4+b$rb9LB8H33dHRNQS6y7T>JZY0 zi~J)k4xDa$ZCIp+Ob0)cm*yd2zq$|)P79?lP#u!G$92e+f(F!UKeWyZra-awc~Gi? zM0aAEx4?iPH5!J_ zp|9D3QC(E8J01%MfW88B!&MM|DC~Wj)bM9ZCqYg?8Y0?WMcku~zXosl6f%=;k9?M6 z7$c?&_F(t{?3tQa5R)(<{6LXdQjTcCi1v_=XenHVRjx^4EW8CBn&mhZRY0?trLrN7 z#GEJHX#>$vpiu_&&>4=;gv37BF|gIc=+PA+2K&cOBCpJe4@quLkgQ*XLEnSs0_|TXI$Hc1ES99-#9c05C#!OwY^h5F)D3P5YjdPg3i?q|0q(VkQ(Cq5>KJZz* z?wxe;1P{N3;W5*C+g=g_uB?QqZ&6;7D)Wjd1x}8^|XvGP-YyT3*?EvnUZ|5Z7d?LZT$> zU5a1TNf%x&YNW(-`k*M49mw^k`3FVeEQN)(vsQPv&hwD1G$r*MBHieVLH4LZx1_+( z>_b&(tG%vFk}Q1t(i-0T@sHrX#~v2)_rdWooefOG>UhyqZaj&ny!@p&`_$VI*(Sn} zyswg+Z$!u%S%$~c7?0#NtnKVcA+_Wf_`wQ&KOB5@XClUOjj}K4wM$uqaUSC{J0q+o zq-ODyW|+ZiHC44kXlXxbK5QyUKtCS+RSIVBsnl&bhf7~|Xf9ndoFd94PH zRcrWKzlZxfZFr>;MuQM$65+{|3>rXxIL4D&6&WW#JPffuQFY!;%_^#y1y4qH2EN`M zV5b}6?xhw^1rF9D;8Gf4eQy9d>?j*|^;t}xn;xf5?{NA2bP+Wz03=AQFiklGVHIfQ zbc%aTQ+jX~7X+{C&Q!jL2|czsbJx&tt6r6e#0fo1J^J>0(jgOCK3{e4^m9uhePDMg zDQT7=k-Ggj5_yG_B^VpFB=B(-v)c1M)rC zJd+ZY&ek3eFr@6yu&~goVbJX%rk6@cnM;C&xt7e)S!7fKAHCiHm0F-+6-K3y-8y1g z6YV^6Ova%aF+>qMqao&|^Z!UT6s;<`_^AZoJcdLSn<@kzoH zzEt>ws3+44CrdFyi4R zEtxRdSwI+$1D(rF%W@7=CUlz^_ z34XTaBk2v0*)HsQO^JY{05f64W29Fe18AFl^pqq5iqj)6=VXMW!*M`MxB?`^m>Fe- z%n9hr?*e6T^bwfQ5Gxn!T4_!t7n_*p8YWj(fm#*b!V=7KQ;3J;8Dyg#OxK6$wGgHg zxFs4W5z6N}8Y&)1t%Ptmfjhe_UIj@skju!bI0E5kYa6gQ-f{yt4CvvwLC6mD>KMW5 zbdjvC$r|xbp8@ptLi*kNk+)`GoxTNz>BAiCBfkF&h%6W7 zTW>?=*ChCf{F-dghvgVzqnzw?uz%qS%CkO7%gcD<`#y;`{6BwIl&;ggdfTZveE7Hj z15Pg2Sy{n$tAc14G%(AEb~21bMMTR~n+12qFdGJUQpXO_ldA$i@vjuao~V-ouSD`p zd-0WhtbMnQJZHXwLf>@FE6+zzwp4ZYY!YOA#Py_AUqu28zgHPd^O7#c!OE4tsNNL` z^&r|?^?C(eSEFV{{Vm_8xk2J%Q0%pXB12S2y3i13OKFU9q?U`#$q?`T=wITqU;n0@ z!$l!}(0dkpU890Qfb%bU9`62kFGWJQyI#RK&9S?_k5iQ@QqRXdoBJ408)&2`n=zg@ z=i~9%#1}SVG~5*PWe3Z)i90JUN~VqPL>V4r(Ke3hOpWI(2Dr5uU?){xASW#Ym-8Hd zz23t}t6uK%9Xz*H$D~q1ClBEbB9wDrXqgz0xSm89gc&aGb_J!nso6k_=f9C6u&AJj zu@+JW7@<*igd~2kJ3`&CaimqkL+!qJ2Rv=AjuT9!H7#5iP4M-rZTOV^YLrtvM>OX^ zorMLKq@I_kcO(t)ZNEP?0#X&Cdc@N)R?$oqXWn&JQt_X%Sc?=lDKb@uB?~Q{h*d6Q zR3pm!dkgpf@HepuU+JdFQX-cTAx%n2TF+} z2!NKo%1czVEt$9S^@^Tbk2(saji5c6@l9twArbQQAiqKa0FVts+2m9NNGEPEG=_)H z1o>#7EPWkbssTW@{?*6ewib}JmYJOfgT_6~u&45ch(xu9bWCr4CLL34!uhx)b|GcD z9cSt^p{gyzsLaALCP+K0NIQM;3a}TB>PwvoeB>Pr5qG8p#5 zT!5{07-PB)dhnLdAxfFk5yL5YNcww7H<=PRKzVTvc`$>V=!G33?X^X2=gd$sU?JOTw`-q@%u&`*NcSi?P@Y1F~ar8Z^7RNGT%> zW`xsVBm*SvZJ=6FBuJ`U!XC2rNP>r~ng^>kBgL>~cu10c*nSOfc=yNf`nUht6Hdez z-*Fo6|IPmc^?>6ZUO28oRj7(!r7p@KazB2~SNpBPaAXlbLhzd>?M}pcaT#9a8)Cy@uX~$&?>aZzQd~0iH z*(lntgk^%}EEU^G3WLKa+@Wn$ln@6+L{Yr6J|O9kub2KOLYbB4McHM<&#)5UeSiCD zeB{%g5qDN85_B_BklhN`P3}`lIx8oS;O>|H5RSayxmX_z;5Z)qFhkiZ;i2sgdZR=@ zr&CQ2PiZ>%)XqM>JE1|44LeU@O+uXVEzJ2g9_tRpJ&=JqzUAN=n@Bg|O|n&J%VV(gUgxrwL zJOy1;;iYRxW!9z7EBV-&M{(+H?<&-P8Y22KD!Pb2}b*{EZ0w~t!cLZe>6)?kQTLNyt1YI#Alouf$%*R#=J4nUM5(p9PN5Q(tS zAB<6|1jvLO2zZQ~OcKcn@GG7yL^7VKOyI|o#F-R8DjR_I!xNMV2iqn}b{QXj=O5t1 zfA9fi*ERh^a!-y|J>xdK>la@j!AEg(0p)W~MMg_rR{SJtF@?^~L@LF~(x6s)VjyWS zqy;c22Nc+!+G{;~rfuc8k94Pt@pey4Vchwq*w*qip#g_Pw6SpE0Z)`bMvKL)y11y- zEKsEcD#FPWW7h|bDll~?E(eG1G76>1;gLZB)gROh`e_slf}&t8{053@<8b${|IHK> z&|`$+x#~50_&KZBtTfI7$rt6VW->~&u(bQ?{i>0df-|K6kXlDQlPS4YR2D>P)jW5K zd=Pb!?oAa!f_~_5CW&;WOjG8bijzQSQ>uF!@i5|g*p-5Se)!}kF!nbav=Lukmrl|> zb`qIY7I__+&*|PS(qRwY@-mFl0^+?5Sc5IOc+yG(c5Oyj_1W&Onn!aMUZGx!XpoXpDQmAc_l5D|8NfUJwxz0y3dn7i$mgVCM?SlpI?S$kCmf&P5TDph#O;e5!;_ zLGPw6uo^HEA}f(d1Jzauj&Dlv4aXqt#CbuBf=LQmRX8ZfL8pAGg|Eg8@^*F6BE5hf z4-(yB_!{M^f)4QisqZQ9z>rqiob$Ju_7ttu&kt zkx084QOdoG?i0W_u zmkNPtM^|)KYGJrpj91-tJHE7X6%TFhie5Z0O}yf!Gk9{dfp(VRBadCgT5pJxzKJ_# z>hdPSEXObkac-e0%=~pi<+h07*;>Ium#qmFWr0`rYdU$pim7IoR>r62_^BZIZ2BY)-aJD#^sQQM?Zbq)egEMlOsCDj>K?C2Ib)$%WEN)|aQ0SPF;N%ze5xnksrawy}L- z3tKCD$Wuy-$-7V^H}Z|d;#gEzy3mwOt5v~Va7sIf%#4v45l)?J;^+-au!v$%Igv1I zjJ^non8>h!p;I9LkDM!pIdQB-w3c6I!N8HG91;9}*^DM!i=j>G84v;BdlF_6Q5v3# z+HRizoMTk#A}DkI%s21D+y3yq*z3`CQ5_VA28vU)kCfFyM$S<_wS?2pe-W0R^IYt3 zfy0Gw9_a}mQF4Y4p*8=oM16=Bb zNK(b;@q81rB?B|ei`j`VSF#bi2_EVu7|8`rl3)qFeuRgHDMlpj8X4vc;CosDC?5Uv z-VhrN1` z*@Lat#i4UpSxgs@e$gyxRWsM&$+x~MFJu}2e0ap+PD-EhMWAFM?c$eKK! z;FWjXhHvk5@zuvK!3jL+#7|br;;X!$glI>p%*vmCbOwG3d~stFc5Vn!@8)I&70bXX zVfAVm&V&TL1S28|u7mDigiGyx+``1CatS+8icyrnp)8LKSR=<{qY*A`?W5v4m~T{Z zL#+ZT5tt?@q?A0C`B%#i;CU`OX^t;E{9PQaSMby$C!`ypvtYU8qu(E*S}RGm0F?+XL8CDX!(Uj1ym`1v;?c3rqL$FQ?IMrCOh<)gET0G2xv9V+;)JzjYU~R(1c8Qz<6A247y87{QB(Gq9 z6}Df3*;rI0O0jB=RRtRYG`C24rpiu?JTVkP1j}nrB{ac@^EU_ILe$fLXK|bms zqgf3L_pFaeR>8~v>wEE~dmp%Fe+G_)_x;L`;749~vxNU-7D=J%IS46MB+twzv1ImN ztQ$TnOLK9i^H*)MrI`5|gJR`4E!eePE|DdULSSI;;x1Mm+((*l%xEdZPxEx7tkC9Q z!t>EfyBNMj95;9gPT#zUqi35`l`3;MVT|)!Py{|T4$vfBtss+5Uk6~xm`ub_v~8#F z1>d`Y$G>w0{mn?cPNW{NluAfUh=kl=Yw}uwAetETLxi@8%Hk1x`QZofhIhXkdz4}x zBKj7H;z7HrN_BZ;fs5I@o{nQb@NZB*cLp1sE-q~Cj58KH z7H%uKsJSk_IhcUMyP5$kA`x9XMiSnBqRt!F?Z^mV7{*E>NMCU} zw$a_MKak6uhdDcV8U=9?^Ej-yTsa2%BU+CpsMIQmY4vL`OMsTdlqFIk7)f_ezib+g z@Itl4?1Z@ibjVVPiLxmo;Jt79U3~EqpL+rVz~}uRpK==S_~n-Yc^7$5gLCp$(M2;7 zT5M${d5+PQ>;_^5QCljs2csAm^Xnw}Wym^1cnf8K(XRB3W%Kqx}vaa&M9+It# z>_K2wW?{BY$fcaI2(+nk*h73}TU;fHE_i&+rFr457}*%{-c{ug*jz;BwZuHg$Op)V zdq~%|rBHUy+$`3?>>SNx2y@t1$)kP?F;x&(Xh+8DeBkkdf1H`7*2^ zP$~v@2D$0c7Zf?&9oH_yVnQL!ypEqn#`A&(WG+#(F`O(!^w=iG-2qHL!oZow5B=i1 z@zBGMU(@~^X;tvi-+47|J>5bY8j2^QIUmAFkiDZA{qzu#rh!(v{NJ8?V4p3FdKN_b zqMZ(M6|AdR^cuAlA}VEZ{F;s4+5jsLUB$RdB@?q46zNN8#I@RnXN@-K;(3(XAsX{l zoV;lnGs~1yDl2L=x=<;q3ijvkR>WTAZ=sPm?bo5|>;f56s8&W~$guI5kADSM9_-<+ zC*LBfVKtJ__Sv#gl-G-l0rU;dnxs7#B1~;GmKO2w`YL|oo$tcs?NwcTA2g_{m{+4m zDWdg7#-C*^&f)0YcVX#S&&DcIH>ZqS8y@0eh)dfYdG^(6NhIZ)G@)Cn;Y6(hYqN`+ z7aG`$Q>>Ot_}c1@sKDsMDbBW@iIcN6%o!U=p2+84;cl>b5WhzLNzmM#oSiUOcuY!^W6 z!Hus$T|LJ=-1i@Rue!G=Nu%Qzb^44IB?*nLcx9V$x{)~SF))P(!|{3r-+SvReB-ez zxZLicQStDsqe~d=3@}8Fxy2eh&e)S29Wz7Vy6|0(TqamajDC_~XE4HCy^L@aqRd&B zWuV{hvfPkaDq4@#2-+Wx8> zcjY<28C<_dOHM)}6(|H7^Z%2~S$DE-3ibdGdD|97&|%gT{Wh)wzxX-HC$ z1l3%;2oVyN~_n)&+;szZNP5 z-AUwRC= ziCK(L_)xhC*<)VJxmac8>o1=RXS^1}ZZ?4ZwuCU5&3WXtBVx{z z?_ZINnZ<-8**Rt8Mu@z-hLp;Wz=w0>4B*Wu3t4*g?{6Sme_S#p%wxAA_bNh;#dr_t z&I4hWh^$SLWyVK*znMM&BM zK@$ulaG0uzGE#b!BgACo0oWXI9J>K&{e*ObX10fH?@<`-A#$bzF5MsmKt2Y#Qc_+_ zuY*M3;{?g2hha>5aGEpnZ!?qYH3b|xE$$(^{FtCkBsFHiyjVM%Mhv$cAntaMWv*~% zkPMKwcVT4?;@m}MhceGMhp7}SQP#t5?BgXr{|-F*$dzjffV*x!jt~F#tFhEz01^v4 zzFsOoiVIU2@<<`AC8;DMdsKYr04Q~E7o(-3msOW(;oc`XB3ia5MYc9xlMx#|N+v1R zAKeymU^e03u`^I8gpBW~7iRw(#sDhkvnPKdH*Cg|56{mI|qtN(NruX)*vRBdMmI=chuRn1;yq z%fvX&(Ctmcs=d~n#g#!Hzx_w=#djXLpm_tTv6$XH`pl3cO0(akRB>&1Cr`q8%5A9L za615OK$E|2xb-ad`a|q8%fT|?TU3sj*zXQudnW1#ao0>0&p3MwAG_}&9?eW4aG!My z9A^lRmEvM%U~`lp8%%I#)kb5kij644)+j+giE&F1pwu7ZzHkD!)x-_85;nGb*x+c% zNikb0q2;@nPczJNOjHVRPkVr^aSYc2&Nl*_4J-tffpyrpZ=(ap$Z*$U9c3y8G7}F? z60Gix#P2}~z16c65zs%=KB}XS#c44DD2#vl=8+`7@FYo%Q-?k5zX1tnXpSu%KE4RvMv`gxDI5 zkw-CFjRs1LP8CmnLkxWe-4P7emaqw?7w$43s8YgYIFgxVwNe)2Ad<2e-N+ykN*!I( z7NZG%jnE&t1H62~j6^@0P>e9&n1#c9K4!CXF6f#<5){}NB;XJg0M=Gk@YetOMr=I3 z@`PKHAN0>Y_gQ%JfBIfnK?I{&hew^WZ>f2lNDC6=+neI0VAmQ*$n<7MqXSl$CdXkk z$8mImqaHmbGu!mwwknFumgERGI|2pUOAVNIAX(capAn9hTzWYdv?gAM-z3QxPC#ul ziyIZXqZX3B;MIVAdifV~z2cC&cJMSMnCS0OGg4WrS|+G|c2V*x>D2WyKy@H!KY(I_ zNXed@J_UL8pjc5W_`uv`N~aYcAuW{@Ga}ttalRj^0?`flY!%^(s7gi;{|WgVHf$D; z44dJaBcvYF2#OZcJPlomB4)+Ki;|3flM=A72ZJFw6*A_16|^(zifqgbYJkkxW2D_3 zWcyv&_|CE8NNitBZv=#!tRpdf1Q1+VE3RMFy>q!?kg20a3(zc(U6*~;)q zsCVoJM?-l}jwJB0?^kLVld?8RS>{NBo%I#CGxIR2O;IH9iBeW)SA9%617s;9dJW{= zEtsP|?8Or*0gz=qMJAL#(i?!k)bfxMd7~PIIUkb{`6Lv>Ag*O90jRCETu-9MsU)DY zrBNy&pZM3vB` z*7P()OPf#?{uNSm>Uvif!7_Hzu6}|EP}eW{0ycV^BkWwlPfkBGbqju``Q#*ANXk#JzRCo^F_TG=;BOkgK zum72!K&x6&m%aS|%DW)5=s?J|T0AFpNsW^X?e<7UUJPX_H*4tRG2ZhRAHgTS^rb1u z_FxQnaMl!e*VIyhua%sP*;&lqc^m3az7u(?g@Iwh=YC8wOlUl1QKber$rwwsbzI); zp_iE`IS!suG4R7TE#YGq_VDEa<-87-JqtIu8NNLnVkb6`&;b@1?kM}Xy;{XRdmU`V z6KPmSnT;@}i>v`V3vsmOqY=gEb_b}>)G#nyY>bGW=Ex=?W=b|rRTvRm!UH>9@w++S z@NvZFp761nn)t%yEpdSq^rT2roZjZeQ#d$orFdi!DI9>q1%Pk{reNo_j*Qdq*9YHc%Fp5<$wB=g_q%$DMQYxY*goUPdTI2NWR1ZHrCZ5tPtsRD_@xh$_d% z<>3(R$rz`abu`ml(gm5q?N~MzZ4bTi1ea2BZdJEMp25?XS~6SQ%PowCp}_WY%^GG5 zU}BO3FhtAtU@3)MkHT&(AZ4&gJcJ#G$YVx8n#y>FhYtPz zJx7JbC>ugxOpDY6xkoQ}&f{nhqHK$mX^f++cnkNvB729ka1B+8gh`^3C#vh9IfeG7 zQmBEJ8y(c^8cis+@AVpg{bkK%DRLH+w}4uQhgRz~|26%(hCdZStnmKPtaVMkJFpU- zQm<6t&~)zbUvB`j z9w<)+ofl8O0LKu#1e5j}(CNZiJPxD2pkufB9+V5AOlw(W({Es}tFpkEkT<659c5^$ z@aDfulL^vc2PxAnXO>`8=0)?)VuRgp*4>3=S70{gg|Cp&!!sVry0HjfiY*^M0R{l^ z!tBE=SCQCNAtI&J@moCLY-F@`wLEdsi^L zYtYq@>uZR7*g>?mA<`#*aS4-ALa!<~6%tlGF-OY9DUm2cD8)xQ=pzft@D|TYmI7aM z7IzWvuOYs?0n13?-*|&6R1F^)*Q^;ya$w%?z-lcZFIQkNL}oBjLDcyK7uS1u$FwDr=fdd=iMInLFsJ z52L@+!(HM1Q!wygbX+_VMaawY2xj2o&F}szyzMQ2iQoF^pTOO>o);wtV~)kbn0C{4 z3Nzz>X+(p(lmPH*CG78yv2~>jD`kC0AUXY7^f1^EmplmmsUOFp;?VOzssD9(iw3{WT%6QDjd!Tsy)ob0u7C=h#lj zhsv30C$; zLM%L9Z(w^wM4M)EspQSDc@QhTz=fA5IO7{QS!O1Mi=pk~@4kB#sUEk=5;)Xa9UNjB zxshJyPytYQR7(8Cb;f9FmT>LGQ%n}7BZBE=uOIATqc#b#AX!SHfH)1p@tF!<`}8~U z$pjl(JLvaD!hQ~_v^!13I7r+ElUQtjx#>E?zRX-$F>=B9~s)<22!s??dIR8E0C*7ZPHGJ+83CoyUBCCY;fPB9L><(0?cXS3h4=17< z90AbVfbWy*eGbfN+@2dQbqUlTm%6`Zltiul+#^S(r{3brDmTrX0wvl#xmi{-YyNrEoa- z9l}45`(7Q&O46sGE^cT(7e1ntTnod5MXOmMP}!4f0GlGjm9L znmz9X;`ErNLuCCn%+VNDb4I-Q$tRIwix)l_>mh%y1EW;a20*Eh{-WJ&F@o_I7GW^{ zmW8TkC?EfwJ-A0sz-p>wJcA^_(GDgn--TnkaF))(s4N0bh{@H*sN9n*0SLQHZYJ2kT4 zhoR&_MDYmsKCp%#eeJu&NwoOWESK@&*S`wSyW<36ju+^DDxU4ikf|7W7ug`}{<&V= zit-ha3!&)zMM8Rb3~+FwtKw6sh(zAUQIa0V@*EYzDUMtvcSKoz%g+*INl8hC%*et@ zlX&H4N=TWO@oL0jdc98(Jbj($OH^%t%`14q#iPa-8EvdfKHmJlKZ@7A{)70D@3{*< z|L#B29o6p&rf<3 zP!4^jW@3GiVkniY0WV2$v}$5I%B90s&0U5i*TN#vs=)x~W_?^501tM?vaZPYV$*rV zG2lBH9&A(5X5vPo08xVN;TVY^v<6bDEnuKjwlQO8xT6ukh$4Ku+sDxI(V}V zEJ=MO*TSoxbRG|^ZsFejJ*kGc92<^-m)~*}pSwt8AjkPu17|Gdkv|FoT<-PIK#udZ z8Y0udwvl6}-N6mb1_u2;?(6i?r{RggwUpxpCl)YQ_VIh#vKbYm|^U% zZDSZtur-?CQhR`7tvXKE0;H1^D;xV*Tq@%jLrcO`{#~Xsh2BaAjATo zQuc)_KzSfxD(UPQv?EO1T>hTy{$w&niD5yK6;J5jl^~)CQxi)8XeLB*A}7!!&-8~O z-uI@r;M<@2;?za&;8`0uQgiV3pLzve{Ja~)rHvW!{=!j!7Rw?Hkv>BuuW=;6g-1Ep zcnoW=qh|JtEg>uAqlrXYk`8MyGT1Q%72xG!mMB@G^o#`s-P$N(QHgd+>#GkCSv*>| zEQgOKyy*;b0S2`^P&F&Xsi8X@JxbuuIGhKfeup|x)P-NHzUxk=gEHb^r>oXU&HAtT zI=gj^rxuk*!D}fDSBe5y)Q*Z!D2JXfv5QnfUi?M805cR6v`;A+XV5m2_9(;3`S(7u z2{{;armLwLF_9}%LANr~@3oQdZUaGCVt(0O3$VavC_WMfl5SV#=}x_gY;Q+08&Ijh znOg>U$+8O^caUsfmI5U!H(^#6q}F%JWZDmq_amS}F9;hM3kVi|yeCF`$^zD^s{Dt% zk7RWP=3ZCqaZ^7)<7bejF zy_?R-NFlkpfyv`5aF=G_Egb_GGL(;zUA+KnG=RyVpVn~+$5FDx5b^q^T#`=Jhs}M+ z!g_TD#$FewHV_#;N~g~u;V5G3Dy+$_T!iM_31mhU(QX(1Y(?PJq&pTHY>X}=zjR5y zZY-Y>%j9%#8zx6x9Eou0#gQO*R0$X*AAj}t-@#A+kM|%Em5BOt``P38tKa!aEHO`( zmlS8%q7YFCr$DZ{nB;hk1%Mzm99L);Kl!>;=xPs5*LzV6NS&#Erno>#z6v{LUCi?P zgpVU6BTdNTk5Qv4Ez4ITL#{!tY9R)4U2BP;>|qV<7c$#v{c&gnp%LI|v0aSdidLl< z3sV{5IryFT{0;uc8$O8B3r)P`|N0r6Y|V;ElN2btV&!!!4}5{Y-3ZNP8}+3!96P|? z+63SE>ITwrD#(Gwh+xNpU902o9{48S^%oz(e!DkC1g{wh6b-?_;i)c4NzVkVg$9=X z^DiN4%*cKuq1j{nwNz+q{#b0WCdg2>Z9MP948F9ojvfgqlD)<2!?)Bq?!| zmP`Y;EzHA1jH_`XT7Ov_d4%VlJ1hPHgze{NXRzH5vDN8ezUE?~9KcKs445VDnFxgV zm!LNsqgAgU=Fg@DBQ0_z4bTv{h#PD;9LVKPr9gMomxnx8Z=ye$hz|hefb`IJ9asJx zyM8eOv22XTA!_u7ca$oAzuiZ@K`U4XU;g|(_~7f`fywUfb&uSD#R%maPU0Ou|3jFY zFTpAKNP`N@rA64arWg}(l#q1yV6g+ElNv8`ZZsa!JY?9Vu%jUJdq}T&2f0terjFvA z#sRDM$;%;*CP+qO`1Dklk6d*;1s8}oU7HUCgQW7U63*;qBKFDD= zt71JJIPyt1fBn6G&DiNstx(Lw3U=|~0zxcL^`b741J#Sr?gYwgMQVWJP-OC+TxuMR zFu$JGwkFaj6}X+oN!04+ES^Z};X##+fMHKQ3xaY(fV=ouLq#8+3~Fgn<>k(w4Jg8}?94{Zxs zsfM?G=yQ0(d;a1YRPNP3co*LOUw#N?G?AIUT>9EsPzY{f2dy0ZM0!sMoMRpqjFJ(; zCtGl2RQ$5+sp25lOLZFKtJl0D-71BSqONMc03ElbiUWNiNOp#1wn&@HpRX>rV)QHI z*23PXC@f?b6bYM$3_FSwNf!gx!0?h0#yzBD#&3AXNAT-!`cp~e`?Xj8FkbS^yTp~R z;6t$R5r-823R^(780j=O%5LZ#xB-ZdN5ORSLkJv!XY8FT*m#ESMl2)_zbPmm*{!hs3lm<$P51(QxJ{R&&WTvOyEX~{&JQEvi3{$1B zpy&Tx$4c1fgm|nMDfzQy%Vd>=E)w2nn=Z@wN6K4b9Wi{Y(l;f|q`baVx zRXPUx4o>7Q>a_~K(cQ<@;Y54`NGv~IcJSOe4^f=pi~Sgn?hH_JNw(IOjOqYWBljtA zd95Nq6icE4AOcYMm>+Vr{$~q-q8mDd1Qd(&pinsI%;Z<531+JmvEAJ#s~HBWK?w_$ z5-#p+Ag4it;lR&QJY%7TAHVem{O(u2iM(Xve7TAvl@dOCWdl2LjC17xH&$tf=-~1G z5UcxrbYct3wJKhG?g+MfeQfUZaMRgkj7LNCH~a7yJmgu@;j~8~Oy9tK*@Z_}wA>bN z=FoB^NouZap`041)yiTo%nywQ9n5C0&__Q_<-_+K2O~n|G~f{4K?a|Zp{!hKy20E6 zCj3$NSt^O&KbJ7)g7gcJ&>&>;_mVGf8HmDAqJ!xURbnTWLA;&AJV(1X!293+zwy8) zJ}m^Z>;G_3{lX7C4Zr-F7o$-z&~HzW&(0uNJ_^Hd1;z^3*We@x?75csWYY#%^20gb zkUt#ICUW@@UQDG^ArC>1_i6GmR*rObnfA2W_Lc_^yAk<5^*zNQCR_p04=eU@K@yc^ zG_A;sj$U2x+Fb7tt4Klp>U=qL>kAK|C=}Gur+!kg&=2KW$R^ZZnW6}V`4QnqYMm81 zphylZtcRr_nOdC|4rmGuNXhGZ2?^k8tA$fwQ=3MRx7nCy2VBD8Ofl)E55 zZ#he=?qYtmAN^IVC0dcgDoVFJqD{mX8#$H?peGLqyV644pkJn@DO;xN+6R(X(eFz z94pY_kLE-P#_Ovv#y!|}26tuwxm}ajBfkG63(~#Nu_dP1*xQCX9w4PxfnO6ypJOeK z0Mc>;zxnQu;yoY!#Iyh)`uDD1csXAEvOAE5WKGcD!BS@ekvMkg1^hlG>@h}~CP#*P z78?GSH7L>1#n|njKvsK4^Hj79PgRd2r(eB zReo5(c_|RNg9cBX8;3CULsNM9ojSunkOt4g8~)dy;&s3G=ki^zdgdMYxgY%x2v`hh zBSDjEMGBGxPC3TnnJR*&C5e*q^O%#-xACnnt)kbVJrzx`NVevgo5Lwr@bKOq-tC@F*#H3=w8Em001BWNkli3(ChpuY`tR^!zZeU`BY! zf{#l>17F;xFO3C0m#iqn3%qs)y5#kL^*22Rm(%L6ym06hy>JkYSmvd|xlhz7wYW0pPvWOR0iD%FqS91aOg5d!+_GnZWu>kNZQ zh;pfdXgGpT$ef)(lEHBujLF=NVj&AEVu~XQ0`aC62SMf8{MCEEi4VT%ZD=!e<(fb0 z6I-bH_^qG%0sQFq-2vANB+bgJ1mbnh!=D$kz+K!dJUDqk(anu9svZYw%FBQ}FqzvG zvusjNii>I(%8h2xG>B&oc>@|wDh>igbWwFq zS=|s6%w0NytW3f!i$&^KXhqyXwz;W@Uadt~&1Iw_H=*lbPn->{atj#^h0+mh`Vh<= z!Fa4JP{je8Mll#6Ut5P+Z@?(eBHLw>WgDhTi{h3*^dtpJp+u|dC`7!ofvmF!yVQWg zF@i^yKYfpw7cdsGB#CfDyZBKGIglht646-1L?H}*h6zs{vg17tq@|Du;|cm!8UOi> z@5kT%!@bj;cyghJkG9cn>hM1Ra@D0?lnok!(c{WF(~eNb;c&SlJz0K^5DB5#IdAe~hm` zT6i>E*Usw0q!>MLv}xUOJA&uE7{Sb(938%E!_5*k0+2>eXOR?9$=ss)Gbt>0S>%h! zmmc@XxU+~w<~mcuSe=HbQH9AE3lxBP0d{E|l_-yggyd8txO1V3wY>zFhcN=rMteeu zqJw$QMkO6#eHBpKt@<$2s11ZXmx!LkvR>*yen%lTZb zA?MrQU%D)geYCcvyVC7}jXkf7hj;tf9uDCdDPDQ=Q6Vk;$paVvf6Cqi+Oo7N7yVY; zIh~wC#qO?-bT=(Dv_M0fP*IVA;5DFL@flF`^1ONt_bNUS(5GMmMByqyC3*=$1C4+{ zHy!Bes;*phLY*9T-Yc&+zWJ}U&#r2`@4Xs>LG`H<_E~$af6n>MFTe|QP&IL4tb`ZM zjbOXo!;{-}3DoKOK8B18o;^E;twtA*tt{b|*>Ox1OY#O1YGDoI^v=nx-E2!dvFCR% zS}r4WW9+m$$mTN0i>sTO6ln6}l2xif^rK8C+~04aP^RL5$h(1V6k>n3f%)-KK@zAA zVD11D28kaLS>S@%(@lKj@86Hlz5AaK^}5#)0@WLH>sSqM`MDp(^X@zZKitFC`ctSZ z96|obEzr%p_=IzhDz0``4l!eWFD?Vk2CV5Z=##_H?1IS6g7rP=tmEN=6eeMP2b32j z1SAregn*^1)CMNpg1U9(0N~ViJFA8;3@y0tK z6b=p{DkvpA;12$#4mPD_?zgGp9j=Z(MT%0pP4VPc;H$c41;z};;gOSm%5@Ofk`sV@ zf|vlPX^8*e_W;UBvkbBdVK_{Jr1f$cp;eZ2KRs{|_4bhA%BDk)b4^HXTE2vEZ&S1r zsMHYVDnPx9V1Ebt7&Y?&;{7h-5Q?lCc^y_mOd(8!<7lSd=^hBA$Y>%J87pjEJ&0_xL8(F4qajtLvwQ(u|U?VPy+qG5?mo ze=olB#OVVE&udQ25X{`l#UCXTSC7NKBA@SGSv6A0Ov z*cNPE!(0|vsAO=l-ovFXt)z*N48E-gtITV=z7$RU|Iz>?P3Bu57y95M2C78#Qk#@rX1(ijl_7heF9dUOM}MF}3w zgaSbcc(eE#u)cN`@BP#F;OytWA`s}c%^3w)F@ z^X271a+N%fdth=9wssM3HDFYW(1%Onm(4;S?{=VdJjArDEaX(d<`hW?gcA(S;bj;| z63>!|Tsbq+fDSp-5~5#n7jo~VzE0gSGWDcYJYgQI^Y36TNzOwBZ>6VM8gh{ekmcJw z<77(+gNdtJ`Xuz_gX(Zid57}P!kAVEDs}GaK~@t8p=L_hI0bW$F3x?o0G9If0aj#T6Toq|qq?luY})j(dB+<~q!wGEicG34?#~ z2wCJL1(%-fY``>85U~3F2Z^jzG+E%9Z#0X-Ip+^-i1|U$46lwIgUUa<9vDl zb+YvR2*G{>{`M{kQzP)%*cC?*@-(93AN}7qmJoM5oUjnv?_d$7hwMNa0dT!CR`Va?qj zaBcVG6VRJ3vQsnANni|G@XtJopw&U9T0?g67<`)itZX1&--21O5f7E2S4L%J(_URi z)a}VG`|+&~UiqtULA}wITZR+t@BPG!@ykDUpNd+Q!B=_xt9zXPQ~VeXSwstkl*fW6 zY;lG3s0Khm;(};NVM{}V1Wr`@{nSlR&gWEer4R!-W3MsgxwhD=N2m{lJ<~5^;JHfK z{ps_V0B1?rPvg1MR$yQipbi0Po`Z&I!kMav&fZ|>EA}Utoyv^}9Wjdj(mAKAUO#5UCkoSw? z7d|F3NXWHMa7eLqKUm46gUkWXB59_QLBt^@%dF)2dRk=b!9Z|$J#5Y{v>n#&P^N@V&u@j=aK*F)% zA>Q6XwAO(}1DyFW=%tDnVzEi)>*eR`oNDWBphz7wqisW&g(~9Gu*#EX_>WE_n;m#p zuR=4j$Q))M4lP`HXt_d?Bf-=vR}t0s5U*|sTN>3NN$XQS6Fe>XOnHbKyXY=0p?vHp zdJS3(XJqd#mco3$_cnlbUEBcmd=AjIMqFk-fh{P5NlwmI9Y(nTlW;6eh=K@qDJRHD)b-?^ zLyp?^ZD4g-Y_sjrF*rLNSYyL5M#>TtgpiyG6G3Ye?wM1t$ypd0g>~$>H0aUhve+6^ zhNqQ>goqn&)}e1~0wo<$tt`^Q*fZhowIndfw+(#g8yE0Pzw;Nu58|KUN)aD>%WHAR z;fgW<(`hfRH~?t_AjO8ofsd4A-*OL}kZJ`1PuTDRv86>jInbqnP=!SfZ`my;pO-YV z(iF(bEhXrwCgNaV>$llS1E9_e_2-#5ubSVJXEym1c`lL(_ga&!^jjSuzw%6rNNSW+a8AD%`rJusPxF{1Y*80?$tE%=%xzq^`)(zT4DP7c=U z{@(?Sy}kkHldJ~NA=TjZi_P`O|G}+u0RIebAB-r(JvSc4qvtQ9%Y;0}-C8DIcH{_F z8%=y|XHyPkGUq397JmM&8}Q`H7A`fL7%F9PrP;%krh^yEjNrbhAqH{cpHD5L?YlTU zJd6wpgkgkJ+gsR=C^0Ufny1RZ#?f33%iT5@r?a}%L>w9z=EsXg7z{!wSSVT+wssn5 z`!PB}j3@W^aC~tR&l#@5XtQCVp--S(qT&FJZc~al4+~ECwxOfjb0rCoH)p%o5#k>s zTU(tja-6mZ?dTS=h7LbsGtm`@v6Rb6Q*d%+8Am3kR@3QRT3D)$==^;AX^)U zo1uNS3Ed0jU=Y$CIUNKVNuOq9?e;dJ$U^4O0s`*C`HJHnH14|N3c_4T8Wq~+N~Esi z!ryHpJ54hoI*}>kCmse-m&QC~P%F-f6*U(Xjr5*RaHo9_@4Xgn8@?#L>| zaMiu2JQP3+A>w?%7vKToLf>;Tn#jls8qeS;cQ5Ip`gMk*nl|+k;S_VeBB$3{t zpAQ^KC1fs%GV(gA!T3^#hpy#i7Zw|EV&V-3s5MLJQFE&ubO8?>Pv#I`YW%4eZR2!(|vzJ!x7Ue zX<8H8-einRQL~g&lSWrdYGiV@CtsMh&;5cc-z_aB1AK$Q8jw!3{f0g%IcWo+$c!_~;KgmkWa|bIs-*&6e8U31 z^7KWt*fEkz5XZ;`KEC^=c`+3F)YB{I1R83&EEXq*@%;G_nXE7EHC5Nt3z1_QS2l}} zJ@!pZ*Yfz`n{R;Mbtncn1W9KkZDZ%ps?Dg6x*?DBM7M3@+1yUShEVAb!pU|v-} z7U#ik6rdZ#`1C)26c7FF`vsP~wvSf_1y>zE_?)}&hM#&77KSWzc3S8~^i`RGNh{)f zSsgSy2XCr*mAgAD6?5hYhECey-TYDjVn6bydu%P6xmP_)j&&YPwd9 z`~Yb%h5O}aCj5Z(@9E){pi(LqJ%yUoY?9t#O53dZTxlZGRe+4u$ppi5mYQohY?DP< zg8R$fo6;juFoa?s%WsknVv&(S6nO}{ZJ5dDFzS?KBNT?jic^YfuLF&7%4`H_`I{W{ z?&ZsXXTun-0#%h1NanJjixT=2LEBh`MLqOT2~nj8uSF-ZBD88v<;Lu9B3!-<r60m+1*2o;p=|i%U}7gvhv;BL0Xlt_=e$c&65u1q5EnkpZFlRQxyPLV+aQOA)+P58;# z41%Do_RE~hm{rW4Y= z&=N#QY8DBca;L8zl`K7#35)%pi)8qQvs0MBomHhzCtXGXBZehtkv=riQi? z<9mMWf8fgxpYH#VBSS;@{nz|Bj!({^G;U$`<`EbLPc@k#&mU#pr4YPyh)@r@oCs9Z z1U627`3g4I99Xsq4}kzu2cGy$)68H`xA6Kud<$0AcN2s!!Lw76Ub0{ju8^ps3_;7X zx88=zU3VZeGpA6a^gN6?%O*`1&$s0LQC4HgIh152h}X#=@gn6vC5StJn<#Q|xR%8o zl`>YEJv_YK7D+6#W4Lpgtw(xx=Pf@tuSqzb8IbV`wj zYukX-2jG9(01S$&l8)lKX5yI$z`=f3z9Nj_d5(Dd_u(+&VB8@aN&~Na(3MWMqK9Zx zDyW-yRw0YPGV#z(6T962GZ_Q7O;&MXyMrdBJtRbAOx!h+N5hM;?s#Z6yD+Q_Y%NCB z3@}p6Ifs?4UlauG*g?+9qmr+P3Seb#Q>OS5 zC&$oNh(gk(k~-paeH6+Cc%F-(>&Z^p&fCajOziCJA(tbGPu)nqreUK|$EQE=Aw2xP zPXfLpUEu)|piDk&#CYlRPT=(~e*ibkmZ0Tyl&41!7zL5#h~+0)EnWtLhmww|rbiW55_yCRz*k8mAw?@04Chsgws4E2&|MTpqQ?Qlvw%@k-z;PN@mBPqhOd7Wwxi z&N$iCaxxcIFz-zj8do~r3A40b{CA-izuS%L6#wQSD zO3r^1MWsg zwDekSI8jXyYUO<3Lu>BBKea3YLB^p&V05_|sNFTJEI{&wXBJ={x>=?)fBiIK z1}VjsL}D9bRrusx>^H^v)1dc1t&L+BLB|0)J{-4)AkcB98R17>|7Pql*a#Xb83X_H zM?Ws^e|(N;W)#RiHu3Y9sZ0^Z>`ao5q#(uRXOL8%uxdBk$vXAKZ+wH!=hDzg1p0%`TNhvz1%`Ck@ z1mK9UweR4?ulik_JbkI(0L+wz@S8vKgSg{{!#HuzH1d;{S{aF(c7l%dx6+i(a*dZ% zY9YIGXtB}Vi}1*o&%vi=kH%v}s}iW3SJKlAeCnyE@R#rVD15H)(sN=!|B%caeR8rI zGl&iv&_;%kx%nmx-+m{og*nMo5yLm0O)_I7C=UNBY=2cZQyE~Nnw0+Li8Z;{toO$m zu3a5W7ieoAqaGTFge#(fs#<=}KpG!5!7&Q4gT;|Dwp|Zb>m=DHY9Ka*imBMY&gxDX zSOz4wjyM6%vjI?o&ug6KRGq(`gFR6FlfPd38ELPfZZf$)!Vr~80o|S>`%`x6bWt11 z+h{VvlQJ`TC^TdY9kZ1z);liIb{x)UkuMtf=3bZQ7Df@`#)%?!ePFZg!(k{8V%#}e z!cO4ee4W1MdC5*U0tZ7m10~DAm3>Ml=*Q0B6$jsSbPmkzezMuZ6HDu`^%%#-#&PHD zIHDe-F*WRO?_so@lRbRPcX8#yGHyS9OdjlBuZu8|D~#A(n9fkHNm z)tzk=3I&w1If0(LA({PMWbK?n9b!fWyRs)_3~|745}gu5@*#9|+Ai9DfKs(6rbV5e zi>zg0oD-mFpy4@KTi?cq-|=3Y{p=TI=i9%(1|E1zi}BK1kKs#3b?=11%OO z1IX5D$PSOgp+n=<9YmL|Ah$3L^T@P3SlFo}*Myj@001BWNkl+zTTXHLiypwk-G3R;vKuzwgO>VWiMW2c>Z3dlpR zEQl4~ApK3#EYaj3N@P}Y&L;P#ieOjJoNy8nuLAL!PhJP_zmPGzlH5kvS8+|`vEf0T zRVoOE;z)cNnV{2fnlI`=^~7sbr>U~ux1vanaYupp7rc&(wQ6h?o&zm!x^{*VdqCtI#H9M1>Gs zJ`FfM*b9dcmB*pE9hjS!L|PaQFG{?&`}7mg>zl9_k0N{YHiSJN!I>w3`W_Q+lG*oxh|sbzPS3y6rX_P^eRZ2fK&xQlCV<13`p-%Pl~j-tB0)L_NsMOQrxA9 zNcb&Ed!2yMeX8vN=O%d?57q_!gIpdeN$(ptKT^`5LI?XtOtKnG3ros&Pp0Zbw#x=u zKoHBo#g#f<^s3*+^74BBo7C()-tdZ-;FT|b3Fc3XiYJN0SF;J1`jL9~CXl+E0m)FB z*mWy}!NS!`b)5XhW#~pe0XCF&-?p+?>$LG3fALOSS>Ns}0@7b4ZEgBshLC~>q>hOx zm{9dc#*n}JE>v&51J>x63IR)Jj^sU-`5;j~rzWTU%hqqG)LKNyyt1@QdXk6%Qp|;k z1^VZ@oY&AynwB7Wk3~Je3=XdJgpZ{ZR5JbdVbB9fz%v?v~GAKxW z@8m|{|AirDrfS&TZNc+HffB1Z8!vm-34HJ?kHQhwv`PdU(R6(Ou^Bwo?c%ZZUDPZC z$0tiz-)iCJsY#5mSFzOfo|+igsZ zj!KpRwVz}|D-wwTS2twkP^8{G;(CFG540BMO%scC`K3!&@qsu0Ij%nbluT}_`%C~Q zk=D^jE5yAgX7Tf{{BC^PO$)HiChRysXWfBas3L#!0xUAln=OH-nfp$g%DB;nySgsZ zbYW%!dXdCIwKCwIop2^$N}At9ekACMkyQ;H69c92=JMiL$3{@)p{Mv5J9-s~ESUm{ z#jRpYlTnk{^d=CUphVnP3oDRF2M|Jn2X1NYDFx7^=?}X zs|*_S@b}Y0I4sWV8!)I+nL7+4PeUFZzSn`3GvMy+L!%V1>&WzO4-LatI62VXM@YB5 zkxBTBkToNCYs-imErDNibBh9J(qh=Xd={~5AUk~s+GtH7Ij#q1brohd2XlNp$*^D} zNf{*@EDzq=IfN@)2+8?5dL#7W5F%O@`#Z3TMTB7n!Cpr!d~?U7R z;zR%P1v$8R1Ha*wFU0Tt;&;luqJ^e{j%}K`D%8-MZT_}QQN z0hk$I?pY~fJj0Sq0#(=2DCJ~jDSV;C)F{E+V-#@ekqcP8ya$^W)v{h;_DK#O`|4Nm z&JTS|Onnaiq!V$9q#RVPL4+W^zOokJOW_J{Y#fzm-Hwsl?m|2|3Xdzm0Yb98ewj&< zpV3!#BtJvE1R|JGBoXye?9)62ap+4tR1({ClhMsuERtizb4q3)39sWWUo{Fs{!66L zDRW9SbN!0kzZazeBUQ#!vF&5q*EQVy9y4l7=Shb}Ln#T{a&g9XdLvB^>7vKg7uTrb4h zUK7I;LlVhYvJIFq^}Mt)HxNdexB`;sC;Cv)@e-lc5KtTJdnj;Xfq}hx2gO1Tm4dC( zq)Z)ayL-s!84OhlFv%kzj9Mt5=X&ty{Flkf$C^$T!-X;|U56Wn=tLpvjTUByYvKpc z?RMlH;G3SaGiY%d^kZB(e;FTn$KPS=2{r+0vQ<4;3J>yziEwC-Zo(7B8%l;~N~ zKnN%x=U;MYu{n?ijC&=eGGLMBu3Sz9Y0y}_5`;qHjp-~$G8{7xOa<-n=aFkfPF=VIbBhFP|)Fu$N>Tu6ol)b%u!=j)Cy~|I*GCZ`}2F_mI#6N!aQGEQN zZ=lt5@#-JEAHVw>KZ)XS1-hP9!9cAQ1gn>z7b~!ioIq$7B=Xn0ct+qx>(DHs?6@K= zF&u|58QtqnorBhLp&y!sF*28{n#Us%22IB5EdM6)+X_b+^;aLm@ zkabSj-9@yvEEBLbJ`K%gd@m)sf!O$3c1~&A`Cf@pk6MS&CY!4}IUfB8PP+@MrUHp{ zDjUQo&4S-k@uo&d)QcGbS$I81zE)%204HkMd3Qu|XcAc&nnsl8?w$?XKH{~@K(_^B zd>Y2k0>a3IVR+Crax{9vrw~+w*04lmh(1Y-G4D@TazL08$b&bgL#9D_D_s;h<-0&H z5DrU7UII;J8f|!;Kp@{eC%{@ai=X(-KNOca9stbl`1l{c7I)uN6C!LTQ$Z-Hg1R)9 z((9(9B^v=gLvl|m8K7LiK76dEe{oAmo-t|U<*-lATs~)V1g8W+HEgLCgP)`FBoao< z!C9Ra`2l6RthZ8Ke`*jURGhv!wgSOZ7tX=KuK)AWf|fKG31PQiN_l^>Rmx`Z;1^Hf zdw=5h;Bd8-{23{h@PW7eD(-*D-SC~3H28exq{UC%L)C+r_#6n}r_Nrby-o>%B&Oy- zAFM|nIs=EnIOJjIK-;kKyMO&ooV>7fP4Q3vLuD@{gGpST66mSl48zdf?jN#u62lTQiD>ipw>@jfIEh)sa9A67q$D`9bikdHBY zC2@*O>g<6wLm{H<{pFTOkPF2lQMEd$00{>x{Ybh^Wdrc$10$e8*~cK!YT(KZY!cF5 zFk!a#KS^~zs;N#4$Nic-2jFczm;gN4NKIqME}*E{m(4 zhek6%G4gQFu}M7LiSYPNM>4lZE$-yovVL5omd{{iuYuDmYq-jR42%sY%DwMXO2TpYMk#x{~I|gG{py58mdU zOp)=}1njXnSuOcX=OlC9oE^g2P8U~Kw(-G#{sR8(smoYe-$$1Zd=O${as(fF*RSLD z#WA$DccGgFWUE7B+T-nPL(i9_2+MmAIPlJ$gVF84nx6y8qf&giW)Z?6$$fiG1e@E? zt2x*sH4@$h!p{`5V%Hn!HX_r4$-HS=61N@k){ktyZwdU!X@K_CoFK?e@X1ASMMfAf zu#2xHn~?%(X1PCL1Es-jcKYfFoA$(fLre={Y{~tm(V#_EDvBjd@}N~D&ma=Mj7YZZ zEJ8D@rapHM;gu`U7>iBhpggJ&mORa~y3o9?V&^k(D^SlD#0r+E2bmzX^&Mnp zr=XQ9&^(fLeP}tZS13zWoE23A$%)Vq(u$kP2AU+18R*9_I~q3(aN`h-AcK!Tb^*Wn z#y^8E^1m1_y7L%5@W!7+&URo@4WTi+fC>OkpUGGxiG1WYsl-0H_mZ`M^7&WKjg%a* za!vO2f(E4707~K~MMU61{+@}675`4;Dw)`mcK9GkFe#pDEs%C3s)e(gcz58~ksN2qv^rp{b+DCF?wkE>v)GD;Fmq+R=byfW*ZlHZ`(ZVFDYqOwgnxPW z8!$6FBuG}j@$cW2ssmD3MY1jLqx0$7n&e*f3K;3UyV}F!Upgb_2^9d3u5RGB-}YBB z0#ao#zliE{?ndQ?o8jjRBCi#m zMzVI~%9r;5^+k#xn8*neLX@;{$?Am{Sy2kr`^Ax2)pIr69N3{JH8k7Ir@%VbU}#;EdxS6v(*HMy6Q zhe54;B8(I`kpm@D$L+Nuyi5jPTirp&4N=bN$eS^mp$<1TkvD)*%f{BmrtCZ2NEaEJ z&7Hc_!%)%0)I<%BoL-ZOGhfQ!*z^QmFjhjd+ruX=F5~iU6E8V-1jnmGlHBy<>Q$_h zi022mdu9|<#XPnh509T)!u>ZKM$yvIq?F1JFj6sC zINh)~w_1SHfmW^o*$Ut^fo=x|qc-z-M26Z`N9{J;Rs-1~IuNR9ULY=p#PE=1klvsw5a1vxvVN>C&>0ff|C6$jUhiycyCiEhm_E_{ZgxpWZlx;DL zhu_{8$ro*V#psAKwdOtoqAo-_azoH^6+|5uny0~CUqZCCiR^8Q0JZI50B>y-Pk-Sw zzIy2rKJf1+adv$N`wb^)bd`}vrBcRU|L$w?oi98F+fWW?QE0;`)g<4XuS<$18U4F! zf;{MU4I2Fd7zV`a+t`6lw4^$uY;NVkP|mYox$=Jc z@EauMwP2L8FeE`RCme(*auDwCNL+AccnID$8vp~j!^e~raMVGty@`-bVP#Zh5)d(q z0B>VW8UWGo%Q_`MSl<;pZF^))0)D{A-@R=_yZaJ^L?=P&@Hqf79VTiwWEEx6QLx>C zyS@(HF2Nk1lt^RV?_qBf;mR7|L@;X==;ITTD-yXq=xdu2`eV=&|Te;y#`|2K_<3gqJW;Jp&j(#6svgMAO8&=`r_mL z73dpZ`~CPYKlyxwy)I1J3A1S=8A}>~l&PQk`YWPyf8tLvG!z=3<}{`CPd=XoEt&Y! z>9C)ODJ;2z!(7I!6zxkpxk7@0pQmk9Oua@B5}e? zq-K4nJb~$@Kv9n`zY{OBuYTQJ6~F%{|A^mu)8AZEM_&E?FT}h4laNiOKYnB7-xq_6av{l)-LpQ1;F! z`E9ZMaMfQ7K>XKz!|PgrXEp!_CPL~vrTY&}f)MDf%;`rC>SX67v9JsUp)SXnIgY7p1|znO)vbLL$17+wT9_D**#JiocG_*xj!sr80%7xG?CP?)EF9NW z(|}A!WTX?yRQcX~FIue*D#KNo^!zTTTTaXt9FP11OLzy|6}Ei`Zr73Qf&F#|b}5ew zXU^fnZ~1HNoIQ82PwSgCDFYJO%OS+wM`rMo-*GoyaK|FXM+>s+jXG_3y%_niX=puz z{>Lz1q1a_A28oi*V9fli8q8ZmE9I8AiP%-ljEXw2pDF8$hj4Yv7g^q}% zP$W^jTWGGqeU_;8xF1zgbxj*7{NZisyG9qTV;$|lk3*laY10=Ikbl9+SG9|<7 zO8jw5B2SKa-;}y3j*;RBm}(*p8Pww<3O&Su3w?h_oF08nj+J?6*)h1AtFW^@*k&7@ zuU~*UJps=wV0&u^pZMgL@%I1uMO<#|k&myQm;*QrH!RNMPk!xZ@Z4Kx5g1JriWyjy zDtwQSG0BD&!oZTS8fy20OUvl3Y$AK;7%(vko%y8Q7P==-!Hhg)7f%SoUXlk*Hcbp0 z$|9me9bLslCfGx;SBFN1KjV2zHRyQ~Wn1uc{Mn2Od+K%IY;GW1 zVPRaAps3=}HCB780`&TQ1lu0gtX@3jePyihK_n_8ZLF*|GDTTmO)j_HYlV`3kwe>1_{+cl$Td~uO~3gQ__d$^K7^e%aW%D~6WII!txywhUtW-?eA164 zjgv%kvzW3$F`)%HoPFvlKJc;6;cXv%5G|%NUNirv*CWPvSOxsl*c^s58#{>cNM{G1 z=53a zKT=+^d}y$arkKUGA4>_G$pLe~t5Um>0Y}t+=al}aetqXr$y%$;zxi!)C#5aGL5tCU zeb1Qw|MwqAEDn<{j^CS}_jyYfOI@ZQk?<#PotTQ~2sptAOvashfJ4JMWLysyni1@B zL2kyd(M2t5qv3jp^JLZ=sM-O3;Ls$-q8N3}!u!rHN%uybb~Xa;UaX<#1o-;e7Hm43 z85+zm!2M$*SQss1CzHXyUAzj12}>Oh&sms;*Xp4*R6tgXP|2u$v!k2%#_}>6^(Jnd z9K}d3gNk7wXK~MMB3~$=OUaz6!J|wrWXyV2lGA9;u(Y!!mc{cULt?xpt7-bk6A>^? zY|k#eZCBwnvIKUViWG_>u3r6NjgV5I46_880FXvUuw6zk$W) z-Grg*S%Jh7Oe`D6V=E9ZU#Y z$$R9XC)_8-A(X`Ma}#fUI;=B(Ioat8!7}Nv!9u~tfvY52ju9#1GAWavKyw`aoL*b$ z{4E=XtP=vnv}(=|L1WZ(x1r)|LtUbG_1qA!vy0Zb$6yRqkiYFNG1!Zm`>^v3Apq_? zx{kH=9=>$u68_-}U&E=@I@|*-W`9!Tz5W9)z7PN9r+ygoH4A=&Sq@xrc$m8R2GPCy zyUQ|BTg6eBW8;j16kq+g+Y{L*+4u%MsQnHi7Gy00ZDL;JYRq>CH!cJF4vcaYAxUWC zOuLJqHFw}X`Z)AV4VjqL5zY5M<<{Wh3I+k&Rs;*=)hpZwJ;5>kcGdriD;_@FLq%~jldjUKx=I& zal3S-sqQsBu{w5hIsE$D-iMEW_G|QMlqcymFMS@~_Peh@*lr+amk>(PRq2lMfRhZE zkZ_f$RKjOT2!JA+O{`e9>R{$}&E4-Tz=0F)1q}HlpC*?v2!)jfS{^^&NRC#bs zsS#W{DTirl93gGxW2gM=_)M~qRt<%OX3NmlkB#YbjQWtXjK?Z4M{x(#iNg$x&@A9p zKlfIA=o1h1Ut>O-#fRVVU-7^Lw!QVcyBE}jtJsZ_x%vnH3 z3vjVV;}b^ax)_Oc%$76g=q8>b(_d)S8cK$NJBMt{mTUOj&Mr3EZ6WK`vU&XU?Kfk} z*74P~U3_tE6TYdzX}h>{ZcNaM)!iK&8!F@0i3wyZ6I(qWm+EyaR8WqSk<~KJkYKw;dM#6 zv9YYln8o##cThq?L&sKWLDpDXc#ZM=XSj8*~c+IQvz}sxI3Oq-NieO zAqY+Q%N^K^V?9(*&^e(k&q1)u4nBrev=OkVL>df64%>;%FQHLEL^VWiN;2YsNks#5 zWn+Q;bxsT>Nn7j@0*Tut`e8C5k(>aomu1IBHGn#hqHb6A^x{@0`gMMvM4l5SO#?}1 zoD{htU{g@9BWN{cA|_j$dFAxp_pYu%D;8lb9*15SN7URw>#>IrItFUr{v3%zZm(^@ zi#vGa8yE5E2Oq)5zjPW~q83Qa4Uz{~>O_o*;VNGD6R*Y(zu-Q^ZUaHk71JNHNQ>nV zV=PFv25;pY+AA9{a}|_sz6n~npe8I5?#T2N{k}5Op?rvBMUwr8F5I>7dA@MOF zBfprD21B#x15j3$s>$llZ*~!`uE-i7yD$wDOR|!I*JQYk%vH(P%kxidGAGeE3g(0r%cIDzY_^%!<8ova1(g6E(d{ zy%qvs(f|nQIW?q8wf_S~SOWd`Rp{b-aG?2@?jq?Il4~cAL0ZTU?m@-tQ-xS@u~PV4 z?Wfa)ul!r085jOXVsyhMj%Spjt0nUIezECy9tk|J{Pb1xpuWE#oV4@RW7N9_Uh>M{ z#-ran)vr!B9h=8z-uvG$F z5zEl_E5GqJ{LMc-dTs4Yn;{)|elCyyJbD;(tCj0uY7*FH`6{zvCHC>L<~APoI%pGR zNGt@dZ?sZ)K^ioMfg*k#BA{yi z!TXweF&q^5!LLm@VBexCxTXOZ@Bw&cUo)^yxbA!RTcV^BNncyqX{gl@V;W=3Pmbfl zm34u#d3nUOiDo|3LelOuaPgv<8eVzlO?djrv)IaJ@y{2wWMb1i56_*h;U!}^v^5*p3SMDCwfFZf=N?(4pBmaWiDc>xBVIxxAnh z^l)F>-a;Xh7b&6u_T=UuLe{e2h8}#MegiflTSv$9uyl0|v%@2(*afs)7d_X9({u5~ zPkstt{lG^Mv|Hbr-H|i`WCb(H@CCOl;D_(K6UPtF;P~Njm~I!wPywDBqxtkjIBpwL z_uYfc=!AkgJss@^KBa+v6 zp2om_59m3FGkKK^7y9rwH{tMJ$Yzkg<4z%!1&t*bjK$ppP#(tm>MlO{i7(?5kAD-7 zURp!biByjJHQl;;NQhQ^*9)JEH~iQuu}~XDcVA68hGZkSFv~9N5g$hAqcpYvGgDO4 z7A2!rgxIY^?|IM*C3Zbd{f&kBpS=PNT5Ha~ZR(Dhw3JP$3ahX4NdcjA+u|3*@L zBfRVdcjNE=;KyKyx`onHOPRC~?PLHGk9o03R-#ZG6v<{Q<6QG@b$e^T*^g z;ODEecuu(@=PKd*Zsd!vKL;Y7$u@L2G4N<}7oYZ;I2)*Xl|l*!vw!+)sp=`OJ(cQ_ zd_6ve>^;xL@LhL9W7t}{s!8jnya%a(JEc&j>-g*8{{yUE75@Rxf@|t?QpvAJ0R}n+ z@d23oqXPtBP|i2_%MA|t|6!+*{P1g81PT@*OIw0&l6@E1!IFp-Q1GC35vCC#8wa>` zdK9hoJp{!(mYh&@#AKjP8o)h67LH5}bT?hF?{x^v)DIc37^>7ToKRbUZA5|%wch~hU12E`Rfj4MHcAPJGfNEQ{xS+N@WR{d2U*8S-1lqyX_9c_u`FMs4ia5reeaXpkZaPz9s8^x@R+YV3S75Cka z`;N_EY_g2%+$_LDHfT#a9jZ04h1D9a6enwOR$eQW0Y)LGJOjcA&V?m3pS*&~i8W8`uT5jhK-iY*UuYa8wpmta{o3X_xAy|RwDm_hlro74e9 zuEymRn0kQB{5+z{w9*Ij+S140`ih^|?}M<00#zx9W2hr`tt`@md`_y6fH;YGL4OKdjJCn=;!Q>Hjw zsm$+S=P!m*Y4(I@)Dx$>1Sv?*RW@6xhlQ%!$)-Lz$kk*&KqOQDsT$;@)Tt?471IMr zn3B?|UN!LskZ+yr&xvHniF#m|)pwIi80~$yS+zLHB%8AH)lZiOCCL(Dr%qpq$Ddxs zi(dZU&}gz4tJnHFzw#>l&g*{&q0@pEB@#LL7Kwpa;)S3TO#|e`{`r;sJb}?^@yu%; zNb^Vp>4w}NPb;+lvOq;nB3OTW8EAMM)Xdyn^ z-NR?xI<^BgN_~LkTO$np!AeOsqf!Z(+iu6uv!4TdY6bxjmW1%9Bo_&PqyHWbNVx|& zdIwy=u8Ej%zYj#0*ab~<=; zeHR7V5f7IzWLemYVyrh>*zI+3Y+@8O#3+&-PU#6lQlbDQ?lX7@1tCg>g7^lIsMl_H zP%f6`4a;%|Eh&=`T22pHhOda{wUP?)K5jHy7%3EmALkkxF05`~wpK$WmxUXI*le~0 zLC70ArpCt6Y&NB^FlM+HMp)n6mGuIxggZP8jV#WcdKwS@?R&8Ou0lemi zz8{n2JUY#`m_Tu=kYkeZx3LM^jxqMEB8*{QNQUlKPvU;#Oi7$5Vf(U2)Eh0JQ-@Wn zh*KojTNd}{R0y=2aGyRcB)7ucJTSS4NH3^~j%T6UMzBUpVh@>xDTFNYS61cVvJXuG zHVvA%Ti0YVCwreo)vQgxs7;F&Ici-&_v|T&xi&_ppidt{oU!G-a?f9awO5Ddc2K_U z2sD!T^ccNUr(rf7AY(zFn1WWD634x0cL_Lq8d02uIXMJ-enArAoD1imZ>$K?XC1i> zMtK5gZlZJc2}$2$6KjpmB17Y#ya~;B5$!h-?(Io@szI&;5uBw>_{~6+5B7YCnk)EM zT4-#w#F%JkaaiDgkJps4IjbnwJ8&Jiot6-kjhMkt8XTXi0R#J<>$X*iO2huAj$^$Vx)pMK>}vB$O1 zGs?3jMDCv+p2h>^aRhYK>&N!0#R#2PMYSdG1)cb4+C!8Jxa>LjY-KM4D*PYbYoIwNo|Mb(o z)wKSMkEz!~g_Sn@rax0EfPUO$+M_-5()F9o|F9oO-k@uOUJ|#r{tc|I4tFeOa^rA+ zO1PM&JCbrAM<|HKT@$~3f@uYfDeEdgrRjLE@>z`8z$@o!2ptQb+o|JplUac=3YvjR z25^H2d#>^+pdx@W9X_Zt#T;IFY!P3$yoRURUF0%ZSaF1Bk5zH!a0w5s@8i)u7GV?j zFODJOX;|8A32A~MJ!{)_jFvK(vUy;bcx!DW^}uO~U`lnAouU)|ZkXr+X)LIt*= zq1ow(WpU2V32`u_CS9_(4fOl~UAK#3iIPf>+gn7)U}t$74}S2YIQ8J?5V=q~@SxT|# zbP>{Vur>^nK``7=Z3@hwV!-7o^|vn{Gf+G zwC99uxhv;M63VWU&sxroQC&1(P84N=WF`aI`h=^))oqx(c}HgulkFZ;^RLM|gAFr3 zpM_Sl3X(TKbc6&!s_W>zPY(oRVg_MnAKsbM9ExDycmv{G717nJFnW8ir>lrF!wBsP z0-yT#Hk?Zrp>GGTsb zKD^Z>gmp(6n8LywQGJ96pyzZ%D@$-15$u@>SffLd)ab3QBV1n-!e-(4O+q%M`H{PF z5pdcFtt?7M4g;kuylxL+%ST2GD!P)D0J8nN4!pBdXi_=Th=vc3T`s-!@(0IGp8wfa?BdgY3F@QkL~ z|2{~Z!n=dM{Yf!Xd-&uLmbF1D`%6Iu<)IMxj^tiQ<^>JBB6{)jUQP2f(q*;a;tC;< za*0LyzB2iiU0iTf8MVeCypP+gCF=EjF)vGZ1a+&Rym?z zkP@LBKGi>_+wl})Pkaj4I0$!uq+qqW*xrlrir2ms4?RY{K{^Ew6f7N>F%10j$RRA) zIi4;e98!pe3b9f@pWHO{evxw{340NciFG)UhHpCi_;hz0XZ&O^NWaH*ws`4!M|chV zru3_rn1KDfdogs&-J(ijeG~g*4y_`UPkb3v=cf#Q)Rn*H{}kf#A0YsPMfy4$=j7y< z24L<@f7W;IxyI2i?P3PL&$nzOz9j&XyiU1i4s0vaZ2LHIOqxe8=nv_6c)u*5Q&|$W*}~#QldSu;u#r!o^J? zJ5@3k_S^sr0GloqFcf*XK!$Lp0Eb_lu~&p$kic|AfW)balFSFqctK>dI5T2h}lXBdG7yA18syj zdH#az>c`70^8dFuPS?*H?gIlI{aetz+X$FgvIIxQiTl4)Un5g zN`$YM*Q+QO^RP@?Y>4S1C`|*cNkfJ%x*~(gl{oRR>Ch&<7t`oRkk)SC$W7EHR@<;bKZ)7(|YKlutY zlF_vz*L9dC-!HZ#Li=rv!?%!x_Dp_KR)}R_vHc3gq)?yVwV1S&idyg#Qjyvzi z_wTtMcb;5EmCkPu+2B6-FXkqO@rvg^57+(Dqmhmxbjdl0eZhIMP+q8Pqd5rzjvXT0 z+(4!fV(6m1&?{_UyNK#_MBN(dcO61zbQGB@FGHB2yBNehFRHE~+^E4BA3?9`?5=Y0g2|^$-%~&sxw6B=I;TY+zE7DoX8`0=I7@)J&M1B1XD$`|H#WK1?ml*M? zIl?8OBPXL(53O$E6>ohnme;rY7yT8_y#{~v2hWBPc3={nqRdaCqvMG+{v@dgUYhop z!X=TEL=FT(5{v_8`jEbwnEOD|pi*2udP(rb&UBx2?#h04U@DeLdZ$S46pR5_LB3tN zZpvDM1=O*yd}b40zvX+l_GwpPxRh1a+Oe~oS}CxZj|-$63(Boi9T9O$GLZD8+zee& z;k^BC{sn*f*Zeo=LM@1!0^!gkg`Jrp z(vO%}(L8*uQN?!}8+|n3!9x|H-T3_xFAh5QS%^wSq_4Ufm8-56xw1kr27DHHjS_`N z{66>Jz&;wHnw7H$xAPrd=W77?v5XOb9TmXf4-eoNeL_b3_wf};q9%4&iRX-zXZ^7c zlJuXj5y!-Gx}%(ke?gD>Yjzh<&~^OhB4HNpeswsE#x;R0Pn>XsOx5#UI1%^A5nS$& zBLSXa1wN+oHX=KP`K=aa90S9-BJNz@#Kc$;zdSvTsM*2SmshYADCka+32h9S8cuFB zkuPU4SFmtNDTkD4q8>%KbA1c*n>84bjw^?Yc=QN41i&XwEXwZw@?r*$-#sHnHS>)c zPHb)ClF?BVH4FJd4jtb`v)xA4ab#+zp%0n1t`^D9+p%RQiGeL5BASjN?$g;&OD+J} zvJpsL%9q_azzZ~^YEH<_91G*G0;FOAlyYeR~_~3 z4cz+KFXE1`-zr8ggWQ04y?4%?G@vPF1E%YksN`^Px`Hd_rg6odUDz`8H8vLhingQQEwB+JT=*#e$San!IIxtw_I^Le+AE~8i5M0UJ@6qEJ3 zUGz3LM5^PIhoR*%LR#vrEFz>COKAc&liaibZpVcs#y}q6q_MHtM0LH2Gs{){)At#^Q2cH?|Z3iYwIlS;m*Wd+DeKIC1MRXf2f$vFZRPL5yqbRI> z{+Y%^zTQKoY{KbvkTP7@BV~bE-D(rL@nQH*2fa%oX4* zEWn}z;}DH;@(6+!{KcctHeG3e3?}ns46z~B;lcMT1WYa@!#QjNwW{PHn3Y}dh(5?V zSA*s^VKR!B_Y4m!liGH2S|WHo8X^H9cj(7fpzA%@yLTbXk1=i#dY3XwdM~h$l0?SI zsw*gg)-w=VmT)sf`sp_iwl)y7L)iHuA}UH~80I!a5@zHILTrQ^s_deWHoe;jmNwB@ zry-RIYuH9^t|ac%L7Qq28@_n5({qE030h2d1={ED)PfWY-$bvgq1~>-$|!>}My;jk zEEv$7%#-G%t{ZschyD>a-+E_k;TmG^cn+Vv;dR(MYQx|?%byV^NfsgHZxTi-(*6Td{vIIK zpexqDE$4{Fm^mXv=-7Dkn?8u!ZoLzq{q#FAJd#&L+gP%u*r@}ELOd7jEW!tgYYCjq zlh_r7*Q?%e13vVzuO@v9{`q~c$FD!buLu9|=CvVVsEc<`XH9&s_Fu23%+c-YtWPkq7s((rf`()=)2B-dW&sdRm z#9gj(L<*2KJUn4W*(u(#)I-C!MQ?xI-V%at8=pT_2b`49Rd@j@qia(E5O>bmG4c56aa^-|5+A<(ZX9lT65c}&sqeYS>jpM@J)~0>E*r_? z(bL0dH`_oqi`Y*~sT}xMgz-cO5>7=U;gt4$!hx)3EJ(*rJuM z?Z9*+m?X@JE23mOqT{+q89bzEo2uk9d_7=6aOnX+>(fZclad*rMW76v9D-`20XOh4 znlHsaAE8byJCQu{A6Q^bpxLQOj4&U^idpt;mze_{7`Y6XUv~7KL%8K%K81z5?g1!& z8XyVsdY!)>?%ZL#)oo2jA#G!3xQut4qr$4yREZ z%E6&YjIY6GLLyUYH(QbiZWy_*fz-5B~lQX7ThMA zvIxDL%drfFs7jBRPtOB&Ci|zwIfVowNF)<5F z?NdZF3I3@Iw5^kLm&qw-&Q=}X_6D4MPMrVT%?7N(1mOvB;_FoxU|WVDDZP~@QbR*< z$=4`2&>|10E(29IAyd#sW)XCmgK$J*jEPD(Q`6MH!tJ(z`X&r|WUv^Hj38iBQC&mt z_%ZY;3_>Sb>!Z#H`0Cl;L$k1icVWiVm41HZo$I@N7qU2=EMPhq4Bw zcSWxgqE)XWmA8bn%78Aek{p9P6TZEsG~zPTbg59%VUhn4amC1KIEzny$ti4AbFC*PUh zCC*iR0eBL{pD*F0#dvAI03_3VQjp^@EdHRzw1D_-j#UG)C+9aLbT{Qv7=*Nh)^YO8 zGM@0%-^VXM<`?jhzx^X2e@cK^+=$4D-zVoKXM$SQ?ldpz&j}(Rq&9?T`yrlx{X6ir zZ`~<>w`*buUkt_lF#rG{07*naRQ~8+;Nae2M4fJIS`)vfKAf#;NMa?gato4^Ng>7J zY$^P*P{_y7Q~1D#zl1lv|C79|5B`T9auM+C{3xC~x(gOuusjHHHZI7@bV>at@&Xb~ zgUD{B0g#DBHE<$bR_JLEXgC_U_}umx+|N~>T(U&UIbf@Y659F-?N zS>nJ&1trEtu{eZhI-gHGkE?5u*aM&cgzo1(?G)v+cfD~dCY~$g0G!(Z#BYE6R-ET& z&$nAiI?os}KD!Co*(VGZlpQ^1OjzM9=eF>$(G&th$31Hv0w)j0*74H41z27OA6w|4 zM(0-^9+Ve3Cb}e-@S;=eO}LHl79w;=cw+3j&*VA-zG=w*Jf}stdc25JZ4ZZAit$Mq z;E-h@pVo1z(Gg3_xnc^xv~LpV_ORY@abLZQPOF2dp)AHT4u(JzqXlka>V-}%BWeaE4x3RLmjTVhzx}Mb3|4;u6 z^t!|O44!t?qwuRwc@hrpnugzL%l@5-c!97wBV^gAXiSXsWb!AQUX#rGHd5&|*j5j@ zT^GO_9wNm^cBC}+(Yrc=6mt`pYDj~o5K(&zojZ>r?DUYkY#*Z0DMVzP(*nyQ>G33B z<)M$wDeK~(hp@gOO%a@|QjqXHI&~a+#(+IG0>9%zOXpw~MiI&LZ=$<=67KRUG81#+ zvKOWDa5olU2ipj@8>pXJM&W|pKxtG#*$(p@_&IGL5;X>Q_6>CJKceJ(Rto8TbI^w> z2s$0O%V(f9+rm>YXU7p55!|Em@YY&V&?(VNGd!tW0=Q;t6OCyI*T627;n$jI&2Pe@ z|3fh+egnK4Jim@^tqH@-!PZiUIy8VH(bvZ|o2HZrJL`)XQm9it>qwv$eG{7GXZR6P z0VT;y>U7o0B;(*nwTV~!*}E~nx}l186ykAL?!zbE^=ecyOso?)PIv-}$)Cva&PGg? zBOw20_kI489lr|vNo0WPq*Tp8tRRpnQpV31(ogDS%n^vIys9$;h(R)Xu?g*)0L2o& z1XPGacEl5dhU}V#x4q+Ic-wnFi8sFTdHAzGeu?ts54j#t0~&v;INl}sBFXt5`wyro zK{YDEQxIJ}*F3<=R!bBBM@}rt>wM{RACDW}{(I1aj@n z$Z*N$C96w?Vi>lKZes!8Z`akC({BQHo|0P8sMkvbflC~p z3X}`?W9Z3GgFd}m+)Rb<6MH=Hcu_Qn*fLloo5^E7_+*Z^JZHZT{olLuWxA|tGw*zV zA8bhse1CQWaL!|&Z&HkJoSo7yxA|F3MY3~Iq(C;MmRPXP4i~B1!epWkRg_g(BO7qG~p;*;UY!68IBaD`#t zveTj`AB%wJT{we$HiaASJc0&O;UXU;!@$EvN&-9g7)*5JB(A=E4;hBASzLku$GLAW zq_BK)36DOoS6l+QV`KC&69Jdj*D*3O0$bORrm0O7NT5)qkVDJsVY#-AN;;21IwjCL zZ<2fF=P^D~5hEi#((%ChD$NWQ9eSn+EmKwreGysm>(J=m zFfxiTw8bDLYOKItT?Nb>%$W-jIXU?5Hr!M90IREr>@1u;m%tvMp#Vsx45nJ-aB zDxZQw{|w(lyGN#a3J$}C`~Y1l2^jbj(z%leM}VH@3WMJo@IN7sqNzuC^NpXtm%e>R zyn7EZQn2t3?|2QK@C!50U8(~p6Qp!im*1zylRYag<-bZ2D3Ts=J!KQ1Ch39DpM)bK zUyl#=xB*b;shXms2%eS3mkO$-LZrm=z!!s-+5D{!<37w zOGhT&;8B1s-&7tNOf^GoLXvNeyfQC zMH|oDGmV?(*Rbr0n~s?EOyx4z;7*>bY{S5irej3&Fg04lEvGh6WATm{K$OBW_sX*kc&zdI7$+xQ2DF3%A+E!)K>(L8*WYz1=xsYdX4J7bhE=sJk6J zd}@LiXO<{ruBVEDS zZ{f^+_e*e!f>>gRY|s+)XWk0&Etq7ROY@s(UXspVJ@Q#Mb4)UlvudEI_ff*ZyFETU ze&$p8Ilh4??)XpvSM0q2PkZ#$c;qD)qnypa>-6At+Mx^{9(UO~?=)3EELi zZ%zZ8;yteiE8RhHI*sh?h?-Wof9)9v>NUB)&0l(D#wbj3Be`5(Hw=+eGKSyF_1~-hUB-%BO3>HSwg8=gR91~A=ibdR1u=;_E_Z~#UzBfDAX8G*wolY`!byL)rgG5s^Mpnih6lv^Q$P5#buaq1AEIM|YzKuNR;L@?QcJOE!Y2C&pS@V8GHG zI9dsA*ON?+T7wRR9b}8#)ARaRq`)Jhkv|(v+rme`@lCw@@BdxNJtd+w#A{!8HU8w) zzX~U6io93c+{~0B?hy#!=R7{rg!snK1{*^TGyGnDmXji{hQc@pKn3RX3w|7+BPhW@ zqMkY`cLt}O2{QgP#RakotXyCr z=S1H%OnkGshA(U_p%EhigI7_mVax*|Vy%*mBLw99%+6utNl$~dZ=bTA=DCtQjpR<~ zqbBO*CU?h|C4WzOT#;M~EUCg%#=O%gDw5?D#9HoKco-Kui5*8AHl%{ql1w3+k z7`H8MqUPxm6+DqKv1h1++ZUG6)0oCbt#*jE+Y{)TS<-EW*F-*M3mH89;0%tnJGlMS z8nP_BOt}gpJau*$Bk44L(CFe@_Z~wf?cgdp=#j-91ZaiH6!7$}mB3pEsUKr9{ zv4>NKPThFY$gG7hmJ#AdLIThh&lT@7i;k!JSgS(;)Uop?FeI z&G&<0ub-o~KjgPCOQ$ki#^w9>;qg~qiC?(zLX4HlvNL1>^n0#2bqNeA#4Lt5QC6N2 z1c=->LXylH%A}~T=a>+}%ty$KQ+W`;$_z=O9rgZb)v@)H9|}s4yI?o;@o@y*7FxI8 z4!cl9`tnN=WlEBFL54aDt+#L={09z;#Lv0nmk@@QIN!Oaek^7~_Ur+;>72l!(dG(* zMh)ihEJBlJIe^yP1Qw6WL1s>03}39Y=@~$$%@!icWa-Xmbl{#?0L&8X*$XAsSGPm> ztyM&)7lg3s%uPefmF08hQEnL-GKrHk7=+|6(Eni@o%`;A+if5_H4S@mZ>%ea1VWLy zue(fb)Y0AClFzQ}b7jk+!zY2Y(}Bh2!b-#KXy`JY*s$PMJBU1ni$!wS_Cg;P8z!>O zxtAw_jS@^>G4HnOwS5SB#Elxg{N!Sfxax0B@|Lwow^>6>E z{OrqK{A|4cEiXaTZ3tpZrH>SNd7a4|nrQslsHj0fLH;UNfWMqbhLY@s=mjaf;V=FP zH+=3J@%wPj*}&aDfENzW;=0n9L_w2m$Hnj9S&|IuJ99WxnlY=uiRa1HqL zl#{B|O$}|$!1vml_>avcY|!7D5y5ezD6cE&(%1mRotjb?Xb7iwqx7_=BX!`QNEB&R z%!?s067oJL4cXvmbk6ZMaOvf{9@GG63IRwO00nsr6z;Rpi~gez9?cJZ=q5Mzfa}~@ zKRAE@400yp=wK$}kr2nOF_ktk8o0Q8*ATvaY7L#hR!&n&d=!&hRT{Rk>QFCM7{1bx zlIX*b9)5ZZ6mkwG@+qV`4IE)=TAC6x!Z^HPIkoe=43N*HMW)2P(IpeZxTm&-)15&4 z>3a<7u}wT<_c&;7d;3-kKRCSxblP~vrMvO4p%FBK07o{rFjdSV%g&BG1S%9bJsQAD zs|icfF+tCBKf=*k70qr77mZ9wGT_nbvK%A_M<-xGy|R&VyoB1>Gn1(o90JF7`;$rff4RF60~ z?Eg;+ee$!E0+sR^?46mw#k=?5kr!WtgL`&km=VZ~6Cl};@HQs}77T;QZ~-hwG2H3r zZ|E*k8p)R2CptnlGc+G5hxaE7LJftP6f!fz5@bd1c}-8l8XZRD_~@KE4R3W5snH6o zy>o~(8+yGe@RXKggfgXlbjdWK(I(gLB3d~u0Y=u)jIi^?8rf~Y=(VBcvT|5NI-M3n z7!fILr0pxKX`%?4OVbr!H2VJLCeY}>%nk#EJUr=IC~;!c?g2I)2m#$^_TGSpF|J2m+ec_Z8|n5H7j@qCNZQ0e`kI03N)Ft!I) zq`H{RA6N1u$FF}rKjcMO|Mh*Z z!LMBVDERH3oZ+%cPlnalN?H61WECLMT=C0M9001pq;_)ErP1FZi_d=P9{l#}KY;Cl zb>-lD!0U8@>EQP#4q!ac;OYz*s_daD`d>Pd2A^(f3zpN?Z z7q2)13exA#`Idtpw`;hmdK$~*sl`n|A0-i=haJp;ubsCVW$~DqMB&L#mt;XvK~R1u zXaNxc={R@RC+9nC<9j4&+f;o|-Wl~D*3PzGIpmN z|dN{GUg<>|1$wC34 zufbz%XcWQ@=ztYqEAU}vEM&b1MJI)B6roPGId$5OjnmaCQnrI~IxT3!;`SCQ#XJg@ zG8r-r3#+Xre!RXSW=fA78o{uWQV>lOy7G2NK-~o7x`2?RA80%H>MXvlY2#HQj@|x(Tx| z1W-!JgQ?v{Z)IL~_0I4t^x+ZNTcf*$)_r$DuQyAO)~sH z+*t-WK88$Uu0&5$VoMt$B5k@@Wotfh6{Wc*a<~-+c+Gp>haVk2sVbPJ!3aaV?7GL| z&;Q_8VMh&ECY3rdv>lVg3_%Kss_~U760=Hoqx-a){$&p?W_SD&>AnE=` zL*0P!Ot^E?tGhgz90v~a!P{S^)fg?1VKR1Lm6sJypgPe<6DQ|4@Z{@WgA-?}@+o=y z8()Msyyp2TUYQ0CY!ogJDsk%dN~&utQ&hvLGUwT-_i*j6{TY6A?-3k0 zIE%0SeGQB3I<1v`iMYrZMeZJ>M`Cvlp8IHoUm; zerdJWLbcb!?qW$01XrLU9hR_=)$8V-PV6><0H?QWurn4a`5ZQwxE2MNC|7W5dsCDO zd$J`d>mZx*J=RaSq8>f26};qjcyl>x`%SPDAGu` z+rhDW?#I!e+=-?8kD{@>ia^o;cXT)p8dl=l;(vzX?|fX7^*_JjP$rpnP|9U7Qmlwg z^6bPU_U)R+%-9%)3q@pTWB7L4T;Copm_m{%Y= zYPkwRz?Ugd_c1hOBRvdBvNQ~lXvHyD(|Zt7W@bg=wU25I8dpwxW)N+x0&Pz+2^i87bnEaIPQtCO z!A|9n-gf{|p(xjvex0<`rgf}Wts-hPBqxIbP=3op_wZ?$egkG{2>Q@CkkjC9ucLi( z4PKjOJt-;vnQ1yPde}I6242-eeq;!lLJ4j=KxeCiZaaX-!6^cA#k5$3%lpoTO;i-h zze5Gw@EQ@pP(`opwo&VDV0dC2hLIJUU*7M@okJ6?rlaF^e|sZt`R*O@vRlEcSL_+b zKfmKOm>uJ5?80OYg=NcqBf}yMo+$H_@E`J#;=Mh0Ni1mmta7zNxnOJutng!1KNWvK zAp?VEK09Z0OxcfNVl{ck#eL_akU}3j3$joMA1{s`=2Wk_h^#E$^S7VD z8{Tq*G%qRJ#+N_wR$Oz7-aUB=QtKtiCU)wR=ST!a{9JMG(M3G* z1#d-ldmFF4{#p2|Klu%bY!?1SQh4P9tPqMmk3pFCaoQozY*7}l8DX}HLKl2w@s9U@ z3UB|&XJt@4A4!pqdUZOBm+iU;IVL?4Q_zVuvFTFNy|P*!fHekCjy^gNH(p8T5sNqx z=A<_g!DB)sj8TQ;Iw&7t1NXR1{7ZEnD`K#unh1q%a$u!tR4gHl-wWXxFobJ#1S3y= zI?`8MfsU*fSR;5{cMSH_%2hRogR}KRHvs2F00tp~f%o$vD*l7NoWTA`;eV)cFaF7M z9w7PdK4DEaBx03+PK9ku7j4wt2wNSJH5>hNd=A}L+cIV!s+ zI*w(m5KlQUheL}SINIWk0c4J$G!7wn4Ks| z$WS%_8onn6JyYcZY8@K9*iu|*dP7dZWG*e~ftzjztMxiAofsEAG8=+)3=7h5cD`0a zt?S}op@ghyqG^OUvb2Ot#wKO|f7jY7_DxPnhBf0~`8t`T#~UK8=`klj6K43y^>vXw zji;C=pl)zmGvONnj&5yXz1|kHp2^ZMhAbPcc1x_4Q-mm?qtoufOmRIBpw;R~0%C@t zOuCNRMg#du4o%NRF9>0ipw{L0b+Nj52B+>lgwyvP#^(Gyx*Ho}VJpw~r!xZ7HH-g$ z+ycfsGu7#;ORcW=VDrG{<*JJ>#Un1d1XCkrR0;)Y0J7<<$iAqm;AAf`r(z4Jyz<3E zjfaXH{pxFEaVyQX3IyQ<$p+0N8p_j~s4XYlfbqE@49BJs9W;*(58L+2Ep^uaiSq6gY1~Ouy z%z%?DqD&bY6C>$1*r*~jY@k>{;1D8gAUJwV2z*9m2-eIVDLCPolQ4A)!O$p>DI(gg z3-M6+%)OpKse$Ffz4s7630|s#?7>UKzaJe&=AHq8bfOl4WurTP2GK?X_DBgPEr){$ zrUVapqR`;=_U4!5XZ6A`?5T0#Ab7Pp+|yMtgfg5ovJ+*P!+8W9ADj0rVQaO6z%o%N z7h#a`>s9~&AOJ~3K~$!*=+=ACBNk8{UAL>6<2XQvO&eEx%KlY@=c(j8uE1=^5#NlJ z^)-}=S>a?5(&o;e2pdN?uK#IW{hm*K8XvjotFafn3c@O+4BYV6-@{XXaUZOJHn}Qa zo?W6(;xQ+DJ`cJt#&SI|R547E@G{gyLUm|RLzkj>Eudt7G%AuCOj!d9tUM6*BXwW) zIB}y8e=3s6vfsY!oU!Wh5_BQn;>P~{gDc$xf#8HuYX!LWIj_SH?z~?f=;A#y_{L}6 zg~})uEaXrrg5ypjuby+_{CD;V&&DfW0mLCg@^mZI3C^<7{H8%*f5O$&1;@<@WubU57)ox22{6N=a3@eXQu)A z2zb@x0X!^IMCf(YKq(G;9L-q8xekb>bq^|6JDdyE^(9A`l0?do&^KvJB%NxNkPM@BkvR+bVH1$>Hdw0!QPL5PPGID^>tOD`5FUpJo@E?5 zMrGh>J!>(C|Xx6mXredu#&e^iU6a_|d{D7B*^l`0g?6FINOE z4}2e~jEx38utVB1`=YgX(#leqAAokNhwaTRY^*I~_0%bxIdUBJl~uGhHqfgz;kD^| z@5v-5)x6I#{@FW%vpzKOrGNCngZRVWydER8rxlb(B~L1@jY5Ug6d?{Gh?LD43>Hvn z4jD~Mb}tX)*xW@_DQX94iNhoxhq19Hyeqo=}}l0?m>_#LF;xAE*ys| z?EfZFuvi;I3>y&lZq9c#+5g$&Lpjf;$Z{r=pu-^T-JV%T-D5OU>>B za%Jcm<#4`=2rqAO8|N0rYa_FhMis#O2p8o{A?)qoJ(OPhLAlr5f=TZU4SSLAe*CnY6;OmOoh#^!Wh;1ycRQ zPEe3yLnTr<)$s7o!g(Mu)yd#XU-}-N_uFqlkMAc!y!_Xnh7Y{!H6nXfc8{u2VZ)S6 zma6d{R0F8@tp7E6`h2`VkS!ujH-7X>c;#FF36FZz<@m}+-h>=&uzf|IQ%$&X@KZ^C zirlLPz_{S@zF{LC*Bar8M8MI78h-5$-yjgxLhO_n^OXB9p+8g& z)XC(XqWi8kB`Sb;=1LlX!S`}!G&}dJ{Xd`67o78bgU!MD8igIRi{$oa$0q?nakp&o z@FsL3Hmj;-h;Qru)TsW0T5xtoYBFXNulz=l7jR-GG^gNQ8qBc!CJ}F{y%bX#=)gj$NahCO*3+Z_+5yFHNr(xs4d=_5vR$fg|JzTHA&yN1V2kD-)GNoUI& zt4`vUAD~Q!JKaD&lY(PgcwluILxnu@A@%Oki;8}=*=-{i%ZhlB84N`2KCe&1)UZ@t z!*sENoNY?5(0yyG@Int44v(VK@sTohOqL2l06bc2;Lgou%;X$gUYK8EznQuwii$HWCgnE$}lt&Z8lKJq!sI3HIi?-E?l>ZPP2(lt%dexUC4ILW<#u#DWN1J&mB0M zBND2ZcqmSK(BW7H?)&a-SbE^-gYH9K`rmxkv+>erJP`)<`26I>j%qUW(`TOdglWm^ zqP1*)RU~jMVay#_DUl=cDCF>F@n^HZ#atLlk{F*)I%i<`vQg+`I?!nH8A4}Pftgji z(N-N=+ZCH!af&T68kcs}0rbTxqGk))WLfb6=qcZ5z+`$^#sEnAGg87g2>p&k`10`Z zIxRRu!;CaYS*QROLwB}i&uxzs zM4b?NWaaDd*PG~8YtVBx%t}cSySf2fmtjI3`8UmbQsy}7d_ux~k~>(&ZNj@sC+dw#j&LQ}_qM!Y65r2pPFKwkgPmRVbm^@o~#%Zh>y6 z@W{tpiLu!c3(8XK|X;0#HfUH1Fl9DyN`j0+@PkjA*=b2P@rvHkjjIz^^grjzeA2Y8BS}7p^HpzvfAo9K*w*o zIusJLEB<1fjuhXZ@q$>zvFh-tLzYfe{XL6<7yEhdFQjonDT}YqFJU_-BBlWBE9SAc zlE>Z4t2o{4VKSe^?x7(#T8MpV9W5=yZM7C^)eSsn|2{Em;bBb+UouT;(&P3BkFuy} z$k{e(3>@J^&K>&l3J&a=L7Ct0N5brGv4i$$qsc)rK*_dH>(RVKttvPr5xs7E+|OI$ zE6|1}uzcCJkTWf;h917Na12A~G_D+DDw`)6%$jK z$)rKzTrbj5r@uj`jZ!X+TF=9Gj?BxUwQqC?2Zl$$CG9#h-P1NYy--r(GU>D+5+pNn z|I9T4GvS>yz2(Vnr!$)^z6Lxfd^S`eQ}S$tOqdf^aSZ(n+ciYWF>uSr{{>(8z&}9q z2mzms7K{!R@aHdo87`Zh73)|kK5W`7%lX1xtAbW}K!${>leR?5rdDn*M0KhWWM9Ys^O zsWb?ndmcj56CZ)VV==Iy*$PSz8+B;imPiIQI`EkW0=J2vRfUx zEs*CXWAh+RV~u~{_|Gs-@6OXed-fL^&_llnOIeU;z}hd=RK?n6(6VH12d+@-iCEWb6KgH!2P9kh`FjC2XatgMxgNc7eA=3@RoU6VF)i8or5J!1_vyaK+D09lY1i1PkI}5 zY2tdqhmP%mRdX0WHBRV#PnAUTr2f8-jl@Np@4L*M{9RfogO;c2@pll%r1;x`z)Aqd(?U%e*e`q*x>~XBID<603?)1A4C8Y z%P{$8;6Kmb5Ikf#9|Ta;ZSr$91!sNJPO(3$3yBMQ!jV0f+1Tfk#NU_1XC{VElKadz zms0`t%#MXFBQaTgLLa*;c^uttAYgC_LR56%Npn+}C=~IXwKd$mzJUr|^fVm@%0(Q= zr*UX&8@F#a-~>J%K01W4oC>+&M265q+wH)zXl10Sq&^ll%S5f!MaDMJcDu4mXW^V4 zDhb?7L!k9e7oUINlrUDWy<|UH)jBeMgqg7+0wLmrNIwC}(w4dbj<;KaAY3sqigBH> z#}*phwoE^Koeyko;3q4am@3%VT`XeQ$%%1Nu~b9{ka%H72Zk~knZ6sI+b6@>&_#K$ z+-TzD*0$_g8EHJ7FQCBR(`*aJ;qFuGR8nI9%qWI)CEChMv95JHSZ%dXDi>tJXx5v^ zIks>h*hO!gyb3e&dGCC7qDtYu;CzC-xn8~zNPjji)C3ZC@vEAaZ4 z{uc6;k!F-Nku?Hem0XR6Aui5*I)c!L(k4&WV$VI(4MxW&yDRHW5+2 z$E3uKDl|IW`65!&dxTFQ z6WMkh?GtOV`%ce|LC<6nd2M*dPQmY4FlUBfl(GoJF8tHW@aG%i8__m=j6U*W=xGx{ zwT8~o6|rqL^8s=bWpVIiVYWvq@B<62Gi`L&RQQq090lgl^UNc2-=(rd5mCiuG2kq1 z3a2HcL4##rxmL$bw|*OU-@kxgdE%4t#7AEaEuf1bxfbLMC_7!B>zsTR|L`AQ!3RJ7 zY5Ba#IY4AV1B#Z7_q_Hu@T|vPfp&cjg>n`~+7bgHvf1g?Ap;p35hsOKq~XR7eg&WX z_iy13Ui~6m_p84Q(_$V8OAOoel-%cB7bK8yvW`*(b*BO7L+sK3#Dz6(6n1t8aho9P zELH5qc3URW7{!PWzru>7E8u%FWquXJJoz~BN#-cRxJ zm!3ygNPtkHbgWtt{%!RH?rt~Ymk}4iShbM9tAg-N9Q18XLPiCM+ zNSfxPt=j?DFsEp(FHV)V8 zXf)b#fIM-}4E8%F)-(;bt*pr;oHKwcXU0$>Y;PE7c^+;%bRYK3?Ugv(1=ECY+7ca1&w|-<8IJ2> zwbR9I)fO_tIpo|9E-e%>k#OL4uALd^WFtn)4(6T@YnF{Cp=yXBPW0I!>n{#%zH#_LqeMo!6J*@3q;uaDpQ6%vj_I{q^umgg=K_|NThvwh2}*%!fqYzUB}T{ZzHpJ2F}a~^t^#! zs}ApY72BtqFe*8WUbz>b%bc$vYSIlcK$P`hW^8nq8(5!TLV525ypDme9wF!kg3^#X zqZB)ez{?dJS4q59k*tE04x9$y$2Yfe^F4Rto8P_@Ix~PC4r7%&4d1C6LPysb~ zs)j&@{uoP^c-a0lmPq9ivHbw~OS!12rb7NY<{ZQ;0Dd_PRZ3;Or{TKiz6Rg<(c$=! zb-e$rFUKo?_qhnUE{gs3Ra+7JYWFwI%JfG{j2zerKOjZ|;)aAn52xP$yyZ^(&KqyQ z4S)Ryc+wLtkt9z#G|G-!`W!yKk^-ugtdeglayL;rA}1aU!-Zw1amQUt_>KSlJ{&r| z^i!aJo_7&mFfxhflqM1NdSF`?h+ji8qn~586u%RLXo^Bq5|kt9GvW{;b;Z>AlQe97 zzELvpD#S>wFjV2@^`!DjHun-G5Q!D?njYZa*G}Qbt(u&@gY`}B9T`sgJ07(rBNDK0 zABLX%445->GPDSxQo)wVMx}pm^?7}R%>mzbq6_2zB*qK>eb0PW900M2Cz#P9I{ z#&A|&b2j#{(=_cs4KV_szESntV%aFu1s%d%>>D3KF`vSZkI`^PL5n;%d7=0|mhx#- zH@3xo_`pyBbE6};cV!i;TP=j9j>pYSO^oA#k$RM9q&dm9jCOGO-Z`xOu56j%LrEoq@${Bh!}Y(_&BBZVN}? zNXNz9iz_IkEj((^9!wYpPPc0K&Po+Cl@cB?R6scZItFmZ`Z~6I9`+Z9FjuI+C5K>p z3l4ltRdOO1JGQckUKCbc} zBpBAZ{D7Lsiz`+jAx(Z1Ag2Qb+YvX#4xw68gK4pp>R7FAV%X3y!Un`55e)J?d2m!L zOKhE2-JZA?(%q11h9&|@+{@Yye(;rB@UMUQH=>C@dkLVf_QCNX{KfD50S-)#!w9=b zl`}AlCak;(BdsHo)&;gsjg(+J23p5Y!JHh2RVt%(&q?ULCd`pBSe2=G8dyiTvH)Gn zz!=$uC{s}fyIWVA7;rmVlox`W$Ha%qL#$e4(l1)m81!2=wlXI(w_R&RX z85^m6yM>V+)>;DVYXuXLV+aBf)?D$NAau{EnqOT(V_^}g zOde*w2yj_gyF|`QC5PtdSoWH@?STjIxvzW^)wNAr z`{>8vMNfY!hDupkg|MSkDRFA$^ zC?v>{2!NE1k9_RQ_|EqZeVHY`0r-cySWwRVot z0rl^snk1${iHVRZ+NyC=-y>#5gZvEnt9WI=*UhF>xM_-ywi5dIZ1og=%yq!PJ<@M7 z`sx5>1jNTDifmkP5r&@rbT}j9a2dNTHq6OyR{R4|EX2>{tSZXKpc7z10_=YqRrIq<^jUuz{L%9r;14eTgV$>3r3}1J1CJlK5(7;^(p<>&FKk&wZXub= zwhc79F$qxy15OU=L#x{wI_-`$1CN**!!J%x;oHYfV!7K!s+d741ZHw6%;d9ZX(pD| zw*=C65F+oGLXc~8J>0RhD9&qhr3&^_mSkJFuinHD=1*ha& z!Wyn2~QzY%%WGV z!_3+UT1{vj4`Hc-P%~k=Wb3g^FmbMch`U~Q3&Fw(gpC#=+eV7=xS0$RDvrlA9}+`4@D~@*y?;wAr8Af8MpPJvMmgX7Vf2nIBQtve^tm}% zMQQa7H19tqd-U|)IT*twF$5xOK5DlV;Wd{-tG+Er3|9s1l|}elUD?}~_e{eWu1K0D zn;*TS8V9p7gwV;WrkUnQQA?nEui6neNnxy{Dx^2S+iJt#^2NTFiULk#B#p9Qh{BzR zT&(HmvF&RA=J zMm~!TEy6o){3m?%mTyTRh)?vmv54@f3uke|A6}2CiiOB+h-B0*6U|W#fCLcPI!w#J zX4Au;zx&I$`5SlO`WOE)e(!gmi%edn0g{kIs;V5o!hMM+_4qVbgIyc|(;vrT&wzy7 z*HqhsNXfBE>Y49Mr`7G zo4EV!RQ~vF?!((V@y0F*=v1e0Rm(aA11kve&#T99Z>Jgi3#cNN?DBWyqlq1%hRCsD zTznBmpYRMsBcpQf3l_w4RNWZK6^tKf;4y_JHGlx9-!uTX4=MwaTjuA%^M6Y5AN;F_ z2J#2KYoJ@$`Ok;wJ_f%$xcf-1dD4*ykw=#-a!JRE?HJ=*UG1VY1HORFKc@fyAOJ~3 zK~$xoXlj_wr;+hJT(N5sXR0;y2v6oSxcz~XC}&f6)L2Cr{tW+ciWw}ftRSDVkk(D4 zGdY~vS{K{dj7Jk6AN7!M{5CedE^Ob#HIq{))7h`n5lhtZR9g1+|Fv9aCL0QYhbQhH z$85QXug#yqnOXz30n8Ok_{FI)xVnLktwC?QXx3UN7mFyQ9GtFg;_&*05Jko-In0d= zqqW??WZDtOz&h6st5qy)ZXlJi(Q5Xve|8kp`K%DD+Knzsnt{<$5$#?NKU`kIUCUKW zU(T`+PLL|YAb8&#ND4Mt%IL9Qrqz0vYKnv1JQjZGpoGX=d=P$_V04EoU!Iz*d1 z1nV_KG@_X(NulNH!CP4Xnhn^~69`f|qV-jTOD?p`5X^m306GXxoI%jufU#>DdVUB| z$O6oqdmsM#8Tdy|Bhp-?FWLiS$AOds1Z_m?Tk<^g(vY|}(iWJ07R<-ib6MeVc(rW= z?Y5`}tb9&}Rz~j{R$AU8k1)F11{xbk=LyFf@QtEO6&59+8|%<)A6mYIuogjgO++~h zp<|=BxruhQ4!7xwm%o!uq1W=k)ZHEt2_cJek|DoQP3z(f&kWVbgq^mq?X|EJdHAn; ze}XT3`D<8SsUepy;u(+lB|PVGS7X;iURdNr%GEiakm5{4Oag6e;hi7)2)_5DyJF}% zM%iV*9O0trF}(kEzl%$z97KLs2&$$NGr#E$Dr*1^0wy+Uo&U?+n}At%mUZI4v)_HK zx>dKfuHL(|boPCN5CRe+K~Mw(f&z|=Fn@4CKxII18({==R9rxj(Q#M>)UZa3kc5zQ z(w*+=?y9cp+Lya;=iIZ-^S<9Xx2ls42*Q8nK2ImzRp;J&*6-bZ3;yeGeHveS@DyJE z)6d5n-*h(&eV{)o=%ryGj`a7rearh4w(>cuhJl3FT0w8*_I+6D32vc`6ey&J!}o={87#9L)A8`p72N%jH({$r#$Mv}-nYIMZ-3jX;C1VwGs@i; zu@VwnMYQ^<8jhk^)YuM7Vg}NPq=(-_dK!SHZGxJGgtK4knx z3V?h9LYtk?{+34;=yR7=LJA-L>#yNgKKM5<*`X-{bv`Zcr$HHBI$Xw27RRCXJlHB= zGKe$O&kvjOVLx~f>dr4hpZ+fq@ZApW0p*?22749unBPBV3fqHf&ji@K)>o*Y?WP`1;x= zR$C^9R0Yr4H_9vllbrp?C*7y^xPq)AynCdt2JzOTNuq|aQC4-$V2AQgA-SI zhD&qJ$7dEWol6NjWm!idorLZ6(e3t-B?=k0Wj&ak9){Ct&e)Sl1Mxt@x@lpqSw~Fv zus54#mBNL-Fk#uN8z>v(;-Uz2weLX# zzv*7=n=T=i%D{FNXqhzhu@Rm%x_y%?RgwWVM)Q!gEHBu0rvYbv7K+>BS-oD`#dCUY zVfJcld=un{#C*TC1&6%ub&Yx7l0n8bNd{N4gPQEaY1ZH@Q}U36R?Yx~B_NE5qG@os({4R2M}Zo!0rQHo3*VZ+lOE2kkldD=Q2^E)ne4Xl1jkskvd&W zh|iGhh)x#CRBee=n=GYS=9Nk-iMZ7s+?EAT)|eab^-T1aDp1L0TQ{JUCLobBpj`u` z9wYYq>n=LWTSym*NQ~y;B`ipWg#O6}>gPySIFIyYd(dmFL23q2ObNErhiehw$biSy zdY{3VRE-hWKTyahiR73iPR0n`{pg7k`0C72Jhr}$#g$E-2_Kl8#JzVtA2;ni%qV>l zRbs9+cK~ugjlc6k+9zr9C$jj$Bah)jfA!~B-XwRp?NTFZ6Hg8m@qstL8aM4P!*`od zRJwj~_!=2HQMZOHsU^gcNo>^uyyruIkI&tI6nFi|jrgs%y#kqG0{GH;p)s2#f{BYt ze21<{*a;ZuoJ0lnp#QwxeG~~qI6fGZks)r0LB$b9Agb!N8BpR^k$FwXapI+%l4ciy zk;dD8>u>PB_xK3eTZ&{aM|B+dx&kKz%IUE z`&tgdaFoYw50?kYU$iwJXBd2=Od}*^3h8IUJ5VyjBWJpoMCXz^aZA31kF1`-9Ek-{ z(i9%)_V;h^0Bc+j#vvYue#7-B+xH)X&uR9spvV%)M(?RD|6>7j1>2y7DvhH|)dY?u-MZOcYX zk%TO;q9aZAz;O+mo{#m722+jEO&p4M>ves*k((Pb1^Wa3z&z|0zv_X_O9i%fUR61=KMhq>QSI1<0Kt>6W(IuWo6AdMxw!5sN z-LawT0ooF<*0DI?#o}?K;wsX<%t?f;k{mCoaOwEk-+mgO`{0LJL*PO$N=IJ%BRAqb zZ+#hZB^9=4E9a1XJ-7ZCy*}>1LHzC;)^w9$3d>T%-i|)c4P;aqvnUc;SzHb_8 zCJ8BU&|6+X^W-_mJq7(#7Ws=0z$gR=%nmw_Z=hA7#(jX|Wo1Z)0;^(?Pd5i>vt~05 zl)j%-;Cfw$>-c(CXJQcWxO^oIQtc&dlJkCud=j4uGs-dT0VKxaoS_e!~?g z=h9--z^Hto&%lzpG=hjFvDUKiC;$6X_`>~Pg3Y2f5mhVP&i65%&*NQhcn$8nasol8 zftW#l4rI8bz*R{LLYP0PS{${Ok9WTBQ~2To-^Sf9x)pDG%a3EEMAwu(6&X5Afs?C! zrieu^0rZR7^I|+4w)O|@`ei>aXtodh0R~V!UL3>JBJ3oHj}|67%;zV# zxe{Q#ZsN{+-hvb4cM!hre9qPQ)Sv$bblGR2B*O4;fh4$tVVvFmez6u4BA+@p;gM1x zi$Vv9dcooGM|FLJpXuR|;Q=Ot-Y3Qc+f+K{)e~leznLhLv~v!xm1pKEc;#FEJ03gx z9YBBLaB)zJ;mu?Fu@^cl9&k$3LlBz(VGxZ58Un>1#{DoR2x{N<_1PJBqT?H|IHJIR z9;Rb(y2Pmq^Aynm7@!41(4go5P!Jx>=UFI=2uUJtop3t%=-OE}5Q-A`DD7jM9(R`n zi%DQLWcZmhlD9txnHz3mkf6`D@dEP}h5o3UA)q7Fev1--L7u#`1Mq(*0f;ih?>KFU z(xLP6!3zKdBhjFNmVn59go9Ib(Lz*fWVArz0Y5-4pG2$DgyoTdgoKNSa@aC^s9Fw4 z|CQ$XFBr<9W?J|}tqIF@k@ta1M@twbK|B@c`T;EJ1awV)AV$(jjHe8)8v1=wpL4O$ z@8MrJnyhCnn-=cbJH^|9=FgtZ?ba^21@l$5j$`LG z&~y8k$Ru%SxPXn0iIXc^=-Pc;JUYza#Pw+FVj zV5|(qv~YZL12b)t8qpX@#_{5Vdr?$0%q?$WqieI2u%YRwY}QfA$1qeV!XcnqzmL3T zAO&E#ZQ&a<|1$#IFj5lqGa5w+Gp`h8e3B}@TaS4N;o7>?;kc?v9< z&Zon?@IHLl^#=azSAGNMp14pKK-?guWD*~E^Gk5g^Nv8zWnq)a%)$cnLP-Z*s(X#x(WiUMFrZOe2FGPSsAu04MisCJ>I)4=qYShQx=;E-9+R2K5sFg&{t z|MVQBS|93g0n&6CzL8`XKI`ln?A037xPth;T@2hK;FaHQKqd2@Y#xEt0^A-zZhchU z%Q0xlB4o{hTW!LgZ*coxAJ4KfU_kQBq->=0V6LygU$>yd@{ke+G}5RzwhD8tj?CU6 z#1BpIJw-y<{f!znk1as9fz0j_5(g)R_eOOS-Nj|NZ6HZzJ(EKqE+;iA$;-f8uEMo6 z7`rARWiybq4piGg|7-(|O_N6ydOiWyArYMzvuntBs^78EB7vwFS!q+XXyNE%b9n6J zS$J6&^R;y>ELG9zIB+Q;1dvN6@#33r!wasu8dK$AP8QiAo~?g1A@(Jyh$XVY2 zI*7Z)jFS*p7%R7FK|?N(*JzkO8ox zQz?ENjR?X(J8092?U?}{Quzd#ECl+6orFOk4zW!U8=Wx>%t;F%#|(VxAMVFbz3$zx z1uqLMVlc*%cr*VSA!b~m~Rc5Wh z2^3rAlG7?7`y+|=XcQ8z!9w$gK!o9G(qyb|S@_TI_yGR#fhWUJiD>_anmyt56#;v9 zAmdGw`*5|EW?`ugIlmH4ipw~$#HQS;4ouP5i~iJetB}U_kHVU5QS9 zL^7k=oI6I)64MPU7HXhFKf8i>N)!?OO?|WVUni|4=v+HPiDil&{ z>sz=p7e_Hu#3whaFf9uQ(@9)EHHL1j2053+zs=1f9ZMjU$}tvTv);jT4otxyU%+s9 zx#srq;N}+GzJ!CB1g6k4u_7F>L5wSubY;+kC}=$JOXIdcxv zIT<(a+l7LjM1qCUB-C67Us_y4*C8Ekm$~{kl*<^6$626e)$id1$<(HjxO~?L&aAZX z`0^$?u8S)s%Xsee7#=>mh=-~bc&d+_F5@MK4`DbF$LF3nixZVL;);u*bODD-MeNP! zApGh#mN&3ft79UOzztJlY%(;{s$)2=<3J*XFHxIYR`9HeGUA$w#iot9txfc0u+rdU zocQT7mRc4T>pc!WSCmGORSZ;o;p7!FRCH<4QWZS$<@@lz-uuUJ$VqPS+IhzWxMnhk z54`2IIC6L|x{Wo&@=55q5qPo>Ej+^lCegW}l}DhL#^7#L z;8YsWhEtG6^YAHQdVCQ!J%r^9wB1t(j5z$+1xTA!$mJrWUE@sC11a0BtpF=krU|MO zQwWk-^s7r~o_GwgbQZ}2hY)B*cr^khR?(S#5>j9xd(}0NOC|Uo4K+O|UY+fTiOxry zbGJj@>g1S5@KZ8%QsLMQ2J+db77(;dD8)Qv0wZcVR8kb|_0Tvr3#q9>8%-lIT@n?T z6ay&vaj2@*ASH;O*I`va0s~rG9W*yvP%{Z+Cnn%37LRaS$^TCy`o0hZak)u+<}vpT__E;urAfiQ^&YYsa0yA4{|80OR=r-t(XC#T}QFppe=j znK}g|^hB*^GKuk_BxUIF1U4!*e(k*_r)gA+4U$l)uW`tLY$(I{NgVtobSpvwF-kw=8>d9fp5 zdpwMkoFp(BJvT91kpBUR3HaY(^c^H z-~Jo?$=`oj=)Fb>&|v%U`BnlHeF?u<+Kt;&LkJv`l^vS_&?eL}B@9CDKUDn;4{K*r zeW&hCcx}#00CtcC#55;T2OLaH!Y+(J+z*lf9w&zok1*j1^F1+>CpAm9bP+fU8q3kT z=g^3bA;>f}k@3&%4Sa&IK4C*9iu;TYh_bH`)Gm@E8S>Z|@;~+y2&Q+7sSb4)x!bVK zPT;rF1dWtS$A9O8+nk4;%V>1M!}C4JzMc|vzhi*@0i*^`{rvMf1J9TMgn8}&*chcv z(a$5i<5T7vVatZPx@6}_#yY8lfkRmx&zdaZ z`nH&9ma|FRx_cJ_nz>RNKAS+NM{0Nh)CBOgQ;S$@bzyoEcIVP~$^I#{`!4RAUqILE z<4`t*I}aX&Y1{bbRt1l4)LGdsC26=}tc0sZ@>uM2@!7c*EVny|$vW=XKZPrY(tu?l z?UEOPg3qt6!cu+QI9Y;gNcgwKEvz=XELL<$zKCmwv&a}a&NVHZ+@jy)KsKXaX2*J>UZ#|_x&jz{`7qU5OaYffCx9wKeQL`eZx;-@1-M1jE}OHyj8D4 zk_||-P?1p%IU;&CJhKaH?i~Ck8PEW7DF=Ccm^sCyZWtsrAg%!IK7un#@W?o6qzJh* z#H}~9ByM6u>Y2PSL(Rg|WAGXq=$@Q`EC)y(nnI8sgJ)}y>^6e> zCcJJJ@<;}9GJ(LQp*aERx{OIsDd6`UG-nndTM`oE!_fDXSjyMywIIjIUf73hkxqlp zCNJdnXN;ub#Z;yuN_Gq$Q^b5Y^_mdrNfD6IfJ0*p*@A1f*ujsI6k18Gtu{KF9mFz2 zs8_2jEuAS9xWenTP56?AR@=c+vxyC>jfWmPiN{Z##JTki7ImVa9leAnya+HkMZ?XP zUygg8bq5aY8WW4ROOnlW?lh<*!K|s=k!gA!9z1appZd}l@x-wwSq^*WtbfOv%kgXf z$T;5f+MmSD2Zx}j7Rzs|WE90kkk5c%>H=u-G#Whx@BD8c!DCB{xc60e^G-}~f2r&$N`>{Fv$V-0-)kbfycR%r>JMghTej8-FB_!5aBUhj)nLWtt zfgRK~!7&}c=y)|9^tz+9m6cZDSPr`FHd5InCv5y&l5-x-d<4QTqzelC1COPc@9(o1 z)}MXstN7*L`8ay)`>>r14N!Y@%yNLNAK>*PW&C7nj8!!$_!DffFH7hUZ8S`VY?Z)v zAz&};Hi%3t9KD2A%n^oSzk#IdUr2rq|QmM}EjcnNP%zRT!#rqP$s0f+`UJF@3zNC3`zxSsyDA3(^z@JH-; zG|qzp46692yls#;oR>j}e>@d4I2Z?rJ7a*po_w2{vyvApL*=DJ=E+*lN^R#HgH#qeLPvo`gp?o0#3IB9+v63^AS5 zVYn_5few!rnRpDg+lNm02*t;@HyT)LPy!HOI-SOmp)|5F1CA2MQkx_)edHA#2}8xY z)xo(+lN}KA$vD!9ICP>6E^gu@?|dhgPMjTZZN)tiWrw6>54`f`i}CJXdku>D417&P zdwmtng*tZKejPA80-pk^YXN2xLA3#0O>n!H5*2kQ0grg@jtVJ3HopnRSI9I!R(od2K_(DU{y zuEdL;eH(U-jstxc4l!a#e1~GD5r-0wbGve*-oS%T&fu#Le-kH9pJsEdr@eMA6acQ? zb1{DLRWHYtlSyb!4Vi+0KqJEy;+PAIZn{5TzfH5?~aTMb!mgJ+P&tb$pQ+X^?D0cn(t2_5%D|VG^$#9!J9JG7g6rQ$(xpO8|)+Q%}(IB_2y| zyCO!|uV_dXnwGLlAd8eNOz zY!w_^TjBuSClHwIVMvML#k+SSBdKt73HhNcHoIMzJsU~iL0<{bBS%CRP|1wOv$1Je ztPVI*%<(wE?ex*GiD>8{8PnnF2Kuhe->vk0#8og`;B4E3O-g@>B-0R6ngUfb&~_A5 zTO9-{RS6DCxj0PIM%4vU$t2PQ#$-gcj7Hx^t#6{*>mZ#-ATDc&CrOP^#}i-r8b1De ze}sOm#odHKSm3Hgq<0C)$Im?Xa{S^?-Gz+dAvuvlx1U0CI0reKfgd9i9l%+dgKvA# z%cIbeDY3x%Hst0O%=rd9J3zdUVs|cSco+ex3AR=dG&d2L8fV??Ua+wSYqbJT3J@PD zz!)jPO~ja1Ms9&5Mx+EX)I=UiJ`2xpz*%2|x7mV`C_ta7V+7#<03ZNKL_t)ThD*}J z)OMD8@HaMKui4NF8E6wl)+pe1eb)-T5lBQR!NM;KUsuSEE{aQn`nrX~J zAiS>4)K#lm=emVv@22Y`IW)rl4dzA#hN8ibr7?eM1D>HGHxWlZs{Vru)$uUt&W?blq3yPtIjriO~l#-Qqvo;NaLB9JI4n>Aet zPs}gk>&G6!qfb7G&FbbeBEk9F#3TV46g=MU4-;9iiIzvxJDTGwqywUI zP&eXf%&#`_n}6_8q^I+E)9YW31Jk3B$g6?1L5QZMaDp%R-c(VAigsc3G?=Xp7LgqQ zzQM{ZDy=~neVUT3sOX}VUW@?3qD2fF1|x^<@ug7EqZzW$ucyEw zw{bGJ-f$3~`J3NEE=iLqVY=blmdV{88m~~pfVxp8F_vTrEpm}%C&@558C3cgs9Nb{hSf32Qi)g~Mi=5&0;nh| zaKY5zGZS{NsDfwj6Y6wCE`aRiL#PN^9R>T0C5}VaNnMhlN)nQagtROpLHDC1A)~2C zN&yUFp(rZ4l7v59UB&ws7hut`aMK~|OGLg3bjz^zgHX{V#*Y_|LA&+YC_ML0VWJoL z1kfmr4v-QY^=Tb|3t#}Ak}N&-k&cqQANDBVyJizl*@394zi=l&928%U9ZE^j)P^$b zke$j4fa5vP`XY|Gwlvm`+bb& zGyMLQ<#h}fvg{^!dcB5C$HGJ@kLA@mj2Q5ogVUHfxq#Jr6X|psL&+2_8z09~y@GFV ztieqX$J@rR7Q?M0C1w&lG=C08F2QrBD@tYT%VzNK@;sg(aU<2hjr(^Yp{w}xqsJMD zS4bsrQ@MVv5a4(AJq`978d37#Yq18u(nfMTgJ3AlI{uQ|XMm#JrAbE& zcD({UpF*$iqk5)-_PIL7E}n)oN)1S|;8D<9Utx7UwU~uXa?Y(T`!r};nirUMw~w_- z2VZ&UaXdVG46_T%FdbU%wyTs2J+n!}z_YHr7B9Z-7EG7N+1ZbD`&ms+(_u?8TJ0tl z7FKX{<|H0FeG2Q9P1rs`Kfl8(8utZ2DxSbkKKIA*yvwe{XtIULDH%g)!Z)}q1xz4c zYO-iC6-vy&17AOmzy0J_aPwU^;+|K&5Lq?gnKF&7$jpZu%2aT%J#9EKC5hIMUUuGI zg#bVnHj1jWXu*!0{B~9!QOOwkxkY1yorMTfy3V6>iQppK=|t<95#=;fX6aa$Stwur z;)8g}&%O(7QrZ-?H$L#*H{y-|=|yllEhwZE$Pq=f+XdhMsVis5sUqQ?5Szeje#8b~ zPlX6R5b!vKx`re>O_Jr2z6^aTX>!mgg%b;A5KtE;h7 zVxI`3q58m&C5Q0Fy$3NwN|=tvh<}Ffs!yt2ZR3v~R1Kvpq@Vg`cJEvOzo&fxRJqd`Cm8b0NGmQfxs^In+-4@*tsMydE z4nA(4&Y@=6I90RRvi8TPi!9~)`PB~2T9%kYx;}Qrbxim!_U$U;iFyYM-7bR)hxItF zDdsSoNMJee@pz?)&2|^EY@^+C;W_~h7ZZ5FW495<{_#=#>+u<^IXw&~ zLv^eh*5?wZ82$Hp+Tv4PLd&oOWP)&sjR6w~pA<7cpL^%;eI z_uffdn#$l*wSi5~#bhClde_E@<#k*#If8xZG&Z&xsK*t|HT&4?nkeW83xD=eJpA#0f<$z@ofRlwOEG8F0}n5|<}lv%+LvN%Xb82r8Kf_phCH$xHpx6U zYiw&9o8HY#4u5MEQs02D55wuL!)Ea zd7c80|SWMOZB#`Jo)tw1n8y zFt_Ys%BVfMj=rQIbxEGGDfkTwIvMgf9-FBIa*UlJYdr^xn{7OH@+2O3^l{8BE}=b$0f0 zzVkPPF^j6;OQ-kZp69(3L-7nU{wAJ#^B7_o1ybK-JK;#1-y@FD|?5XsztTE`dwS1@ZB zr$ZD@+E)*{k-+SkCSLQae}D&$&4yOVBB(L2H46Spz#9M0j;HV|qx&!w(_r^|)BuA^ zEgc?#Q|%t0A6KzpJ1i+qGjX1E4}1VbavFBS2cvO;6QP_*BoqPmX&!01}sDl$B z@kO_hzlIVUKj0vrBAk*YAww84QaM#+#2~SN8=L&GY_AZmr1$YYMqtu#q(of}y7n}( zp+xO?-@{vGPvGHpOK5sTT*C;gN%sajR*BE!TZ8VwiBXKa>{XDa_OkW@{Zh!`2aq)F zJLv#?PvL)XTmQRe0y{AW7r-BU*JR^6ZE%53!1)<^^sh)GIm~be?_=yK{O~~})Mz@g!-n#0(TPA8oS-TQhhe)fE*(x{7Cy7BS*mXt)j@G99d#9&E>B6}(GF zGO&FQ%ch5TCJCowLYI6r+a^+(EE2GBAfLrTy^9l@Rj4r)myQkLqF54V8!g>Lyeti>uI*!twT{u0r47=UIt^4+3Bo=4zA>kBg z!8%&0;~Q%g3=QYt^c=*gLf=(JqHCg%P2j=RRh&7uj;kg{aP8h*_{QuxoY<(KoXg;@ zgZp?v{ktbmqu~>DRAonaM$57-gMWqarq@<$X>G>c6yvU0NuG+ z1g$<|6XOWtfWNc=zvCcr;8J)aBfTwYG;b&#;zEPV_I-Zzcz(ccVaCBxC)mHnvR{H#u zDGEyEaU9&U2ia7b$5hp=H4gk%uLq07hx!(5YU75D%^e~0ds+sHxgzem?MHCSl{dj@ z_2IOZ@WMNHBcfjp>J&wtX#&P8>S0f&mSP@Xnt%YPQ1GJc` zCl-G}9~O;ZZpKHx?Hu_j@I~$Yu$9kAK{yu{h_*rhd1r+mmfxb;Ojwynzij|=4$n#$ z_?ffLr7cy{lspLOVk!m*KTh({Gf%GIuAh1b&aROdl6d{n8(x9meb>(+u$mko7`Deb z$2RYuk1Rs{v)Y}Y{0Wm8aeQ1mGjA`95nR=iv@a#B;klvF8J!dI24Hp-sn`kZ1=&4o zvWA7lI$rnIKf+fZKNBW5A~6zYEu>YdfbROZw>*w}i=)^h1?=8Rbm)%d!1W|(Gy-rv zc)EryJ-`-?E~(PxqCHGVb~ZM`go~fEFmj1g8k!J<0LZA5$jKY2_OW4+$ROAzn#K&& z##gCzuOO={$Z0aNx(bp7iEu0;zoK&;S|*DYN|+~#?v0R-4zmenjS!_v+yLz0DI54w zr-8T5Jb@L5l8q405EZK8B!}yqz!XHC0O`=hD81rdxS1ReQ%D$z?wRwI0QufLe+2No zrU2)~-V3z@c6OL9kZKC5#rO2wKjX&XIll1cJuOMt$-@;0SK*6Icn=9N)yEsJxCm!z zO?-ZK8Ir0oSffga_k@OoVF-y-*T$=^KFmD&)9p6?@%S7jM@D&;ss}E1<>ENmXy9bi zK`|Z2^}}ggmP_I6W}m@-4ZDM4A%kkqMzw3RdlV@E{>+{|tm*!_brA$rSwc$HaHKTC zv(v7wV58n)p8l2PVbohKtn_=B@0&QYSz}kVy~RAPD~%vUz`Aq-J=ei(y^F7&KFdZz zSBy-c@0mDWU&l(VfgATtqLj`cttH@gJID}+Jf6UMy@NCbT~$TZ?&8yD&SKegF`Y`{ z75nyJh&bAsg0}BqscoX(ZR2ox6a`IZnNSjh@nXP3D@!<2>$71HjTbH*9>R5_1t=O> z7q`)OEXE7ObpgsT5+PIY0|wH$4Pi0!oY!N*y?zE_6SE&GinuHul!4C|^=WU-9AE5|W7+Qy%+0 z4;}Kc*EOsxtz)g;!pzzV9-f)S$uslVY<9kL3;u#nq7ca;-S_j8@C(s*zQ53)(BioH zvg`5uTkgPcp@iC|h1SLrUiRYsD33|#Hfk)7N-wrfC6F5FNvH}+laokhR3U=H^0XrO ziUprs{REwi)(WP-f7(y5;5dNO#v61TC`3UQ6Jpd6@qkKS|4-s5kU?EARPpcwsOvW|R2mPy8;fxMUiEWkVs} zxX6sdbw5ldqWuafh#Z(Wz=!C4W=hfd5F2s=GRS7c5peFuXnCI#0J;aLn?w(VB-406 z$Kpy0um6<~;foKQ6qSF-jb}I`x3DSl5j#Z2i*iN$LU{^9gb$%^pD)99eORUghbwPk zaa~aqERkO+=|hkZ4y)~nlgK^5@V4XA9P!okt3E{Ip~cW<=y%9+I-c z0l%PY7>cVPRya{%HBIKA&yA1}>%sR`bWaU%W|1$%4r%AXLM0FnJnm<3C!o0m^7hcv zbo|xo8s0lUhZfKKx5oj|rQ{A0f0h9H(U`%N73eqLgyN6>IBY!*l{C32^E;0P@U#Tr zdkz2FC;EF0;n8#TgG>M})QNcJ1ZDd!*?uOYmL6RW7nhfENEX(+ zy3=}&!&Sy)Tw_P@(r_Mb&qj+>?MU^_aj`$GW2x`qxM}eXc)9N5wuup}*(z7}eK?3| z66^hwa1be35$NSd3wfktam)rD(&-pdM4Li_lY!5jIF3%Jz&B>3aL4X4Ov}S!+d{=A z8e$J#zlTe9jiVS(;pnMZ_>`$8;<#jT435=7(5U0u=>tstdwg??gX*Et5>#1-MM!&# zeCzuh^plw^EXRk{>!MuD;nDRq%-3t!T`uwLzu)VjkW3;$dh@o2rrCp{KoaZ?%=BAt~i>icxlA46qzTkSi;dL*; zP&oy;-+;eqq3vr>#t%R^Fj5Zrbh_NjSzY7B|J(~vWyC*K1s5)g8P z@3(BgHM>yV4%|uuR<(tAF$H~k6oHb2M*0I9H3nmF&n>|^HxD(Nf>JK<=P`C^vkT9n zMWqilt3isz*u_zDESM+P&~Lav!iSbkLQkaO)Z4J?CgVBOv<4UwdK(q^9h24m>_CED z>p*2{l#W)rhgdofo181VeavsRadL4P4;=kAPRuW$(K4S&ef@iV#Q)>l?|gV^gfGcB zvhOhNdG>R0aB?>*Dt5XG>KhGQe`OArA2Hx`t9&n!L!~c?*=!;aM~tYP1Q6ryJt+>7 z_!s>{hC=jBvR9=VRMkbNgZ{(RvXCAZ>ejOcJ|_ub8W2{FJA-~C{=!K`)G`m#(#SzB z3VLFH!U~)@`66i*K{h;vye}bXAPi`N_piYx#*X5m*|qSBpZ|S)@&2O&ROG$)+=)N? z(3=^9LZ(nWt|Gt~Q877Y3rd?vbXX-uBnBZB&!e_Ae;+sJ!!nq*3-t}c>UPI;BQ$B) z9*+>vmkOCPD^0xN9e<4bAAB-wTZ?YpKz^9mSCWgIFX0z<@5Md&5{4Y|5*3|G(dDMm zLI9gwDap%7)A3l#LW^viAwy-Fq5u$!aZc3b2(Qjy?jJQ~#0h44p8yT91_RL~wZKPO zQ&Gwo$jT~L_z80ArFJ@B3jzLPoqnzwL;VRcOWGzO?DP{jPiG;_48pd)Xx|6y<3WX} zOj;Y^ZP7B)_|3C3`0Ld*7B`B7lm-HKY&b@Vw@~|2=nnu}(-6PwN0GVxN8n17n2}oP z&Jh6b>W&V;AeZ~zp55=Kfd64908iVe!4IOeE6NdtX8_;QJ6aLE827u}QV9|no)e}# z2ZrRKs~Sx0p!QeXP*G52TvpB@OTkMq@b&c?s$Kxq32=%s2EAeI5Jwm$>kc>`xYz&7}0gyy>~aBSYN}pwrZSZ?n%UORcRC*$HV>t*YBA|(eUx)S`BAv4NT-y3ZK{CV{2edxnQ zxa}USxf&8opTy#31+5P0 zuKz#w+VPl@$<{@?_v6kRZo%b;5AirBaD2FuhK;(7sqq-Dy&?(2Z-9iBS+tiDa5DCZ z2_;GnH)7@FMO&tJKU2Gef=lS&%dUX5wW1lKOHYlA$T2irBL+zK?Iq+qn0W*;W+Z%6 zXYhD&dn6z#(#UTwuF9ucEM~CQkF>(-Puu!EROaX)`P`E9pClp|!POX?9gw-bDVnQ@v z5U?7g=%lECXF--62G5?Hso=G5|08_kD9!vuwI6k!X=YCrwR8>kY8rlid;)jpiqLI~ z11&SN!Z5{^dDzvV=E7WK>kbPS#XO!#xhobx_h2G(8{3 zx)zc#joWiX^*Z&)!}!7<`~`XoYoXDTxT@Qj0P)cC!xMNIj;VOzzI}MfUw~)-WHOd1r0U{r<~eC(D$Jg(unV#fRf9=>9*i3Y@)MLhe`Si#XRzpB^ZSS zx3v2m2mP%M%z6WIF~*(^O1lTU(L$FzC^9KX)Gqa8v?>nzfdZ=M*P1OHKXn!-7tZ0# z$}-MvkX$s);QwQCUxfAE_Kaf zs5Psc@I3F%BS>lu*o_LSO+r*ovrhc>xxyw3tteIX_1m*7ld65}I9X z2w4qB1Yr^(6xl>l5lzRbA{I13vAApxHpF@|NI8D+O5K5`s5p6Q8Lxlq|G+m-o*Q5z1|=tb z2Ky@bIH1Mwt|OP?vOq@M=>pUOR~6>@a~~%151_S7lF;;lV?;@oG$^Eu9i@5U{GazL zJipP=QI*ea`T^k3g+}OnO+vv?S-mu^(x_9Q=eeyKq8r4QfDF{4fE!l(QKyFo zM9(nOj_!`_`w@;61|U^& z-wDrv0RBh`l}thT#w0~J}x$Meg1bUO~_yM5$`GZ>HScx?V0R%H!xEY50!B`?6m z{mHWoOjyzTX03ZNKL_t(>>qr4^KfwKSJl(_dc1=Jt zZ9Gz0L29TBx7o%1ddBGtfc zNlo#f22gz;`wDry?xnZm)+-NTa(W1MyLZ8>beQsF6jKbQ@ks=T zoD?fn*o`)fL;|U?FVP& zIm}nqac*mi1){zia{s%9Zqi>EA1UGBu01$%_%IIanTBh-IQqm%oLV}ExwSninxa}gOR2vFx zXmS8mC+T1aidH~Mf~1s2U1vp@r6>v~bA(Y4-TY9zM~R`nSwr9MV|4ep5Ut@{F>J4L z;VUZPsKSq+<>#+);Ve6Z_3`o@fxSdoEna-<3AxtD{Douk&f;aq2aBHz5Eui?WZ|?& zNzL-oCSLry_u}Zx0)Moo0e|~v@5ViMUIo9`36mT(bpCV&0zItaBMv+h3q!U8>-eh_ zQhDadT@emABuo=h&=@5jw!j(e=7$`9+8#D^Erus&mhk#t{SdzST_&VI(XrLg=!gO)VGmS6r7=xOu?VAG68brBko(U%?`3d z6L4e|c}2#k1RNaA;WM*K*r3ImL?3jS?RnQ)ZS))og_MH8?PH^6qD7UzBx7G8#rpp1 zU5Ay=#^O3&aoK*7$;Y2Q^f>BJVQ3l-B$K#hdIH}*vxr5zgYnTK%j@PdN&Mr(N1@Wp z+4Es}4taL5;7&On$92==AdK4SUL7lK6IR>AHKREU=M#8fvxZ(Ep#b38OaWJClUVSG z-C&~WJDAX9Tsu@ki{{FyBpSYpx%G9N+1kQHCWGfqOrRqvSaU4sz8Gf^reW2!@U2Q6 zzN+D}Tn-l{l4#p~D4LFkSJ$xAclnqOm6Eu!n1j^;KEJq(ir2*p%VS7+GXCe2$1t{c zAL9x#wu{_o9%ouj%*`+2@}XhuD~-XHJXrlcW@k>|^MCnKG>* zem1EE2;fjDix)rZ2wwP{Yfy@3;Iu7_9xn4jW7m7U5OgNEbrlXW=#0I75iZPP-vZK2V1P-)b$xVnN<^NTpQv4OREgH_+R1+`uiE0qm)kF3@k z=-Wi6C9XCFDS}YZ7|1}eP{e3{1V;`Yz>)p?P)r(vI%PXdg=Bs`&3;LMNhX)e0NqX< z&%S9KBWMx{r70y70bU^d!s*My9gv0ffU7d z;eOL+(wx{ZaNp|tt>AU<_z;erS`uA?@I8DM zs5Q^s77tgY(s<{d!?>EHs{3sO)IlJ6qeo^!WY{Fm3MJ6u!YbIbeH?F^Jc_056$0V0 zmNAVogpd&91|$qL_38O1vG%k=f!{zWp(3x-o(g)R06o%sA^{^B_|tg{(SqBIK!Ldn zXYShxU34A8%NZfk!)tqfxY-eN!)Zv=c@b1fy3#_Ri4-3xn^k;qFLh)UA6Q(#A1*C{ zSEKM!47yDG(SpSieHN(>l6Hb}?d7=Yb^jUC_-}t;XKuM;e)V1J_3wWrd8T*%usQ(I z_wCH6ckaV^8QadZTx8lb=NV1uI5?EUgNqw*Bn5}ZN3gQAjJB#EP&F2!AjLh!cNo=9 z6-BQv@r|3N%(T-+Ib&c;*0J8Qcp=Nd#S8b0BVnX)Y_)>on2!5q7ErUObE&b4R9=^n zhm5YT!E&0B=B$zmy~yQ~5Yf)*5IzMCns;IF`Vn@iPAL z_!;yx6`8n(o2JKcWnmPRS_fKO#kZH{G2gN!XU?MLxY%7vNi=}GSbg|NFqt>@@_0$*=kg?geP>LBSk(r3?pd-a`CJr=uHS94ZWa0^Y zW#t^w=@br z8{I`Wpa5Nws+KDGDCJVPXuOPPUv?30zx4=4hB8d2%NFBk&Yxu;^Yqjn1hF*wuF7=E zex-@6nPr%*CMK`D6zb?0WUmLW?qKWm2I8Xz;)Nu*uW8YUz(uRphLYBy7#afChGUXO zy3CGsROM>16q;tAC7Jt@jP*(#^UIq!v$BSh^NU!o)KFZ8sk<7X-$|4@{;d6eb#cP<(xpJZr~!J#Npa1+NO`LyNMfb+Ks6qp}|R4 zLx!wUi(BIbn<_amypK5S6mUZORjMcni@~5b>JAvFR~tBfVis3ib2&1F3@c!=7K1EA zsKnelswSgKPNbA!n;@jZMJ@b6FIHTHrw|~viD;rL6e1J1fp$A6fnonS>^+ONI~@yq z0kHp_f_J_5&+*&;gSh|1Ay@H{-}^6k#mjGlXPJ;(VLCI=yXQH%NLnO06#9vR6)%HA zh=x_>81c^~rNAB#)<#4ck;ssk6a8Fvp)jqG93kU)_>l#??$`eiPn=#Ip#8TEeuSPq z1^=rP3B3LAMYxWHmmHG;&?J}3t!vh^7n00ue8+wQ3QS4HBh@adK6Spxp^-T55|8`n z+H+@ws$Qzp6=9mB`!0%#jH#rCQj8G%lmZD}z5rp0fG)-ZqEkUS5aFRd?Lp%664DI^ z7dRZzFb$G#gYE9}Q= zIKs9J63I};Iot)lcc}!DeWb6v{Qt28Abdcg|LyEYeE&<>Q{MR$|DT-~P5k}*Z>7wg zq<|;0FM%+teM0WoL7F4QnkAfvodKQU#;MBqY)a z)aw=!vW#p@P(8c01kaOk)p#1m7PqkMI$RA?)|rxlAtgY?13T%_`^aLooQ&bXa2h?A z)XcIBgiFL_JXEbQz=QP53aW~+Yzixt1{{qPvn-Sq4KF`(DK<9i$P}{p^yyPL-RdE! z#h{rMuAQ2~!IK*m=HXM9 zosp8`^XHhX_kXIcg5YO8?g)2%qKHiEi;cp%}1~;y9JMTa)iQA^f z(K2nsNYRbj-iCoi)5bUITby9rn$98S0b9NcRgqC7u(IpJlvLDd)zAV==_-SNmYe`9 zjV{sz+Rzkq`dxhO?>>R!pZrIl-Z_sU@btxbXEjEQqyRBRMp9F7czP6vcaP)x%cohF zz>rmxr>BsJ#aRR0&^&ZDHqqMHqOkRUxO)#UyUyxd_}%U7ecJTiC5^gexe6QG7(2lb zz!(S^OdOIx%1uHj;pPIlaUd56iEn`1OHCZ&Mkpqx+Zb$YFmBkEY^zvpG#Y7opEIYQ z-Tr5-@B8-IBgrl0K6(C007-MoF5mvvTJL(_cOlu?j$~UGpJRiSW+b7=n)8s$5^5jO z8le%< zi4`T(rcLLps$%iXu{|s^r-c}fHh+?0kv*`TAfjtPVj6^AfST)JDWAs$=eA?h)@B5y zCDGp|jGud^G!0J#UmlCfygRYeDwZ@s)ohc>=s+6tlPh@o$$fam>wgsnj~|eJpcG8F z+EvxE+$m&bEQoO=9To`zPs)?Iq%iWS*YzS8NMKn>3x(!DA_Aw9#)SJE6OkjPydW>K z=?D#*5CPGc;diOKER94*@Exi@|S4LN(Fw z<6$=Nu``>&$F`n@^9%!t>MAJ8dm0&3(@R4BGu=6pq_e!41~5$r2gxSALg!+$R$ehQ zL^>2k)A{H#i0Ip9>*!C?bXNmull1V^R2OT4R3K5%muL{94J_kRb@AGwII34q;d{he z1Prjmg8Z83qWbj_?h`Rm#B)Q43n!ey(p)$wDrAL52UBWo;mJY{e|qEq_7W1uy2qGS zLnU--J7fNY-y7G31{wO0w%b07vUxcxm>5}>|2>c5Fw$(4krfo8Cj zU18clmAyv}S4;$vtXkK{S&eC2(cglcTfh%S=214144ZK#tA}W7%W?o4Of@23E#c|K z87#RjY$J(t8+Av=L{1&TN-ddM+WyzPGTV>!yAM9;}nQCq&0N3v}3j$ zpgg~X&3(;yHebX^sw6`Lmae0t*+#iaazK~+bZw@ET^k0lvb>7^))wrZo5sPJMYcI6 zB3CMDBEPVJx1Y5c=Qnn8>*4O%aXfNzf_?wn8j|>}otxmED4?3s@kD+GV@m}zBvNoo zHC(-I7{l$&JP}WKJ=`=pf+KmK$!`s2h}Uiy#Ez_ka=z?U^*XJKNYXyrCrx+fLB5Z+?gyNy{Gu zqW`FAsN}0KZ3~4$1^IFjD}@qHOfF(#E|0}Z5#tN=GE2xqEnfj?07B9Hu zU(5>{(n)L?8pLJi?ZWw6x1+PU89Mb@t5rt0=+r47lam;0!I=Oh{MHH1r@+si=ceE$ zaF3c<)}jYXQZlO0$r+FWhFhh;tid9SG$OK+eJej##8Cm#i6mSaW@PvXJ(_Arm)~M( zXwjR&mMxvwG}r)c%xMD7CPEyW&CoMSBtX8Aq9PUEULsgC$%^G>l*FVkJ-&j69@>px zdF`cW>Sz@1bv{+bN*X7&ZVtS1#nk(_%ELd#DNz|EL*DhPFDgvYCJ-$E^&^v$sKczF z+d3mWQV~jXA=cQNG>60mOb3tbeipBN`-f31(}e;)`RDJ%hu(87f^uGB`8FpGZKJ^uh&*ueE zk`p9xj?Db$)^vbN(;58t?b~r4+0qi>4?axh1aPaHzmtf##N)6+pJ94xVJw6u9$n0% zrrT@;M6kc8jHTe8ICJ!UzK0gez@U>rPnul%=usz`r%cxP+~_7N^@H>;P>%a-pIhf+ zME9O6`#646Zz;uze<~4Fp-MSNta*(rIyn(l=fFvipymqWC!rz{iMH_j=l`QwH8Ajv zsVV%$_$U{O%qNk{O3_ml-BpsHB%(p%fMYEHnGZ&9eKDKHf7)vlWbp!D5M@*^F&Q|W zr&0faI6p_UnhaE{HQ0{D8tQU|h%FYgtSb~)b)73e-Se<95#Z{x29c|{`2LYeEWzgY z^9`W|Od}wxF8UhMXh@~-%>2CQ_d`_sS8VLT#N;w&OD@XQ0$$P6fOo%q2Yz~Z4tF1( z#)1cS!5U~yu|N7;xr!A};|W=ptZPT3p_B z%VlgG=zvoKvO2J#GYLKI;Ld}SIJQuP7SwQle>*PdZpDEUGib;paCuK3@}(MXJ8%S3 z9`L4Z>(Q*6`0>$Wm~yKu)w!Ul6}yIeFyYqlz|j%(nMqvHKg1Q?vkN&MRXTU_I3E1r z-8gdJW5D97oWLzslXwsn!Wq|ZD}bopDu^B|>6pg;--OiIHWPNz*%Xoqi#ZzaIP$EOq+ybu1eOkK)!Oa%KlXT$-g`{9l#96B7DH-5NY5WXO zwJ6|YN*$0r?t<3?|0r@WZ=)hS$FFwdmW_fv{3x{{RX! z2IqY}wSs`;E}g3y*rGMi5MMvA($~l$f(R z7csm>K^da*6nTM6pt#BwGC>No^oLSG!%bhm51;tbx3F9g^AzNp>QR*Py&%Bltxfog zE!(k$Wzs}U6~lAr5UKKJJ-#AMQb6`Zo3*5y_~m>alQ7_D7Ho-R@JL-AF)TP+4O)nS zREi1BO(r22o;-g!_fTYa0wv;emlnMob>&%#jIAyq<|r(82kAB%)de!)jpuY~o$yuU z&y++!i3-IN!N^vM+bnds7}S@?CcZPmk7#Z4<3+`##&PCTCy(IP=~;NvD}fQQn4z{% zz=#9kKM}gn^C>|Bluu_pyN#!<|7lVF#eMJ##&)$vV@Ll&pR8_(x*+k_^QD%bsfa_Y z0w8l6x5}FX)fRNoDg^hDwXW3EX33tjRzbqH*=?)}i^jNfwwoZr6v#rjij++-l!1~T zaxav0$n>Fhf;{Ajs3vS#(~g1erWDUupI)xQ$z;%CnYgH@6}xAqaf}$>dIAaEKy$*! zTQ>Ef(`vxaN5(N8xX30QbUPVTDl2GgNa5+(9F|=ltDcYcOcHM#=s;W5!`@N__m0oQ zwJ2Hf(348>WUA4V^JwmB#rnZkRA-jaY-?!g=*AOAjzcqbT(ofj>q7(8vWJJ}mT@q@ zh^5ej6&QF!cL&aDPUFbbD$Ir?mhuIx6qnJru@eVoOE|Gu#H-i$VK9?Lnl@vyi7q>h1>eNud!EL=@B9#jeTRTbMP|-wh>2kO zsvjY@k(+RARI2(tKc2jJ#D*&RrlMl16LjjefLKD3GofoU6r{j0 zO$>E);+!qpaQ^mf*fg{bS;yhQLhPC+4M~{k?uh`(s0H;xDd0#_98Z$t7JF;p31m&!J}^jq`@vVOcd% zh4E%+@#Ze#qz7I4j1Q{%DfCp zo_QDx^@y4DR`(8&jbhRFmpyQ0WK-W_C2Axg^7P#9h7WAnj6rfRB%Tf(mP8wLLW-!5Pe~nn*R#{HK%ijaNXf%fg(@4eSuPR! z#4sM8T0fLrz(bb_Y|JFknM}aok=?@eKqx{)UtY{W5IhLp=OZWGdc=tbcNHr{>^MCt z#UC?)?!>KaRY0lx^w%*T6>sXYLE+9E-58M&MC3`)LLs!D5&&xD5Gr@1TEPdNJ%C4O z1_|GA;!eo4MsinV*plW}lmJK-fH=7*6HxWfziq?$PkNWSz^81XDo2gbnHTf*(d~;e zf%v({P(E?xxd%uaArX;mlEqXjoS}MFLTF_r>B(XZtj5WLPlHt0+g-E($jynaak1)g z@06Sai9STVttw5tqkcO{P+%|%ojlA%FH3B&z8A80d{0vfZ`-;7_dRn2`&Y|wsj}2` zB&`rv4)x&Nu5R4);M17VD$J%&xH?`r(27e2yYZP{?8k~3Lbps-9BfY*xPHqgE6@A@2ingSY zyvIk5s&yWyVYBDKB3XYLAqF<~VtR22nb5@s-G&~TI8pO4T_ualDt4w*SjXy$DyB>g zPb}q7sd?Dg(Te+?d>WVabl|LXD{9@%*uT7r-1IbNpLrUGe)<6N2TwBbP?!g)M^xqB z62erF-J0H=l6b}BJ#m>>Y*9v=ek>zjgA+gB0zM^}ivaiB?^gLm%CWGaw;$(j-Hz?U zn=sJbgN9U+d!r<0B*>k&L^*+tU|pVmM`KoNVn7b9Lkb9la1aTL)-OZJ5*8PvCpp=V z#yHFKV|0wFd4l*QgiQa&ll4SN!!0(#e7R{r!I6fbSO&^PLPSJQvydyIz1_gBR}R7o z3PO9%`IW4ln9!vX4C!Mpt#c`HA;_GW)I^sv9L&sm_{`tkhHI|*Rb2Ed+i5;7Yse{~ zCjuEX>Y3Mt)k2~drGpA2mQ|U8^Obv^xG56UjlE4;2CLu1HU6=n!k?z3VzGaZ;RAx) zNU_&6ap2GiT=DB4#==Sg*-R2Yy!kJ2>8=g%N@T*5XXW>C~3t z1VOe3NFK@6AtwVOI`Tc`e2$U?`ZJOK(ilCqfREgG6TW@_ZYePpK~?6zg2SMN$OgdM zhlcQn1B2+UR+#X|2^x9r^CAn`tDaj%QfA2VUFH}VSP3-TJGY9eMUrR}CLvP+?VIA5 zQgG3qNnm4R3TaAsK*?TQ^`WA?f@?%_Ds!7P4^sWI$FlGk!lRx%0TR?f_j(|bu7gpG z5lMl-UF46d8DvtiX7Zyd;W=#7U&xY6wkQ^|M9OLy!LclScX|PTK5`gytb`EC04_w7 zVN-l>sUrYlu|*KXi;1BBgo3K%oy|a!4@TPa{$i#JN1{)ob^yW={y|}oq z6JLDtag4geI@VZDb4_mt`kR|@$I+9RDOFI>fVPx_O9p!}Ham}_##}{`|=W+vPpDn1_qk47|9j!y@R7X_=f_+x`7UC zOr>yPCd1eL-th@MGQWbFP9{J)wxn!aKG=hkOBFo5uz*C?!bM%(*p_L+gCnCDY;mwk zLx+kh2-8`vBFH*Fzp{iw4?c~t2OmXge4H%ve%CrQ%Ji+j!PA| z==XAv;G|Q)`C`_%7~@Kt0(0lS001BWNklz|MM7D=mnB)HfEN zttRG0g^iTxQA>al1gd8ESb~7ZW)soRtC2>Z2!Lf|8+E+w+%DK@8t_A7glS_ZcQ#?e zDk@aLO%UNgBsI+HX2KUu^`rloDCQk}>XWx)=lSdKrr&rKdk2t@f{>}vYw5Wqhd_F- zAjd`UJ|AVg!V{aiY8=JWg!;cxy(!aydQP#XWgxyoCYfAQ$mwno@T=Zmm+oowU|I=$ z{aZi9yFd5^p8Ij-CFkM0|9k@y8hMaVb(r91l>&CO^kjgDNQ<)6WqyPVSR!T!LSx_y z^@KnRJm{eZ-LMdZX*}}yUVQjd|A0rIos6a#asnd_ZMZF@2k6u^yl-F_Z=va*wX*OP zV2PRt%7QOak3~v8NdvGbYbl319?zF?%+rw499Sf=6%!B;5?UH~*yI>k*XST+Ql(8s zL1Y)jroBu!RD?<<)*58KRV@sGV z)sS{f3^ydvl(2AgVHL|n`eGL>4dkv#4@=FhVS`&lQ!0aFhK-t`!Epn$CUh*9OYq6D z%yjsnV${L+aOsA2T-ct)SDqNfOobj^(f_jikSp^%2Xvs-v2l4%J1*~P$Nk4A@DPpc zH4PAKywx!A#?3?c#q>O$oSlcBurb(>!o|%QOf2TGr&NPug>YOSeaRFuDIIr=O=Hy{ z9t4;e_=<*Ryt1;J3WV7P2=D9ITm)bWl=H`m@E{LwKZJT(}maf z^mBZe;Tsrxb_`QT4q<-(Gnjwo2>g{*z++K< zBm;|az9NW$2FbLfj+!b|F(x&H?}l9UTeitI)T_%YaGV-mcF6#mS|}-~B0=wE zaTt+ueg017adOopk{4#kbK)$>iF{6^EY)p%@v}cbwd&*jAGr>;Meahxvr5Q-L@fky zUd(&ogh*_O5!wFB+>Y_toY*({|LOC@I9}>wQ}MLPPUC}{Hsdwf zG;ES2vc<3_hJ!JxBuIrq6i5ME9t&5ygxHukTy*j9Y8j@Tg5?X;lN~)m4H@0TSq%=> zr7f0X(>_g{1`u2zQp^_;(3f>Qp84#Wej}4B3Y!wTn{uHsaEm+XAj}YS{a`7ZBR)=ol7L& zcX|Tw|0ZbvM}Er-#rHGB^tE4q+6Smhu2L?+7-F|7F2h}9EDhzVhpLg_o~K7Js%gUX zJTyBx+MAL%wjeBjnWRKjdg!9B(Z;H4W4TNuDW8XTaIcLFR00hxW{9_(Gk_MS5nuWF ze&pOLhI?9230zE9eUz&ql7^0#4YcFX%nT-7aBzO>x^Aq?rf|o>BkUrmnGRPk4yW%f zHg;uk(Qq4n^w>CZWfykJgl7aOR0Fyk z1G`$f&_ug%=wl{d#CTZ6s$WH8XktrCGwvUs#GJ1){M+L=_=6q&7?~;Kdnd-R;szMZ z+St+DiXO{CPossTp<%j4mB9)|XP1!nUA$&p7aB|*dnzs_ssT!+3R;5@ujp(>o8_QT z((#0!!$VVxSl?`*u~LPhr!Y9wfn~RbqZ4^_bvD5(6tFx#j;X!-F?ZlFisO@Ta|MJY zvb&WG-FVQJ>uBvpu(pbirwMAqj6;TE50M#vl!ns>>pV8tNZH7?wj$Hsf!5w0^bHJR z>$>&$!5!bgp@$!iOjhCxoL2Ae{QBGQo3H&fRF;FYRA$`2~Jk|4N zR=&IsN~5lhOC?GS#T%mP*E}gj)MzLCbz-=c&4z&%?wOdw zpN)-TT(+d?vsDVJk|0hT1PSo}A1#0vl~kyUdMd`TCJ8t-0iX+9m+tUyY4{82uSvsA zzmhCz>Yq0`RO7hFdj?#MbSK0C5N0NR;KE2GMYYAl9Sl^dkH$No4^#JPv^zA-!u$&L zW;G7>=dbU>cb^!+Oumf1mL!@Qb?l#5LYZVRI`I0wc3icg2VZ>TSsbfYdD0WD`6Ttf zYA*KxEjwhgUMr1I-Pnh=R0@XuacpS?Zn=VuX&dJ^x1u-GhCO6G>m=BKJnONE;cu(S?+hN zP=RFz=xj}6zD)gPpalW8HK(z&F^QTH;)X}}V%E1{+f)_k=xT8g4vHQzn#(0DjZb3n#7QiT97B125taF6 z1f>!}l3wu>t#WL|-#;~&pSnubHXv2ZbOQ*ccapFhn&7lIBiqx9_Wl7ljV);IXhBz3 z7mw|AGHDKf4e1Q_KJWm3{P&-Qx40Zz+4U`^A?@IkAN&Y*u3Lxl(yBl*SQ9SceUkT4 zRZWxCq|K{v5EJJ_8jm@_t4P`G{3kudbtUy&@eF zM#Ky{A5pKDrp{wesSqVLY4=GSHX6{SQ8gYu@=OR*&5Hq2ItCz3bNy zR4XtIa^Dc`5vA|vAS0_^4mxq9NmifSJmGjuEfz5?k=p?psR4rW=;sB212Dl&mFC;s&3)u z3ppJ1Ns66fj}RulK}REeY;I0rD51edO$_}bw>v@ySZWxhOcLB+C_pm)*4|IPEGh|G zgOJFOt~d=5ico*l#4U`YD)QNrXpm|J@fANc(95qC_RI2P;t3jmtok5v>~TbjTVUi@ zL;p*pNDZiJHvV>W6rZ0Z-x}dw8N(9lrdCk`5R*T>s6hH(#7e^_L9_Hk~@wTA`yDHa6>Q3wHgkL&+*7uPT&XyP#klqb*8PxvMp4}qLZ>Y z={?PGEH8e8ozEOwn9x*Qi>ow;MtO!MRE;z%07zut01qv;hNTMi)O93G4Yp>&)(JHr zMgnpBX})y;$1t#cT?a}t%h=>N2$M?GprnHTVQzmM9%f_x#K6X92hMBBRDXVwy?5Pz(8{|9Gm`}!(gc6 z;{G1=H>5CMaB=^_1ojt}*hk^w&K}rF8+)f`pj!@`Kc&)1E-%XYB7&fX)um+==a*2P zokca5gST2lVR0FrUqz)@=E}pX*0`D_Uj}BP)64_+mJ@mao`3%oMH(N-F^#3@AQ(kERJ1=-|Bme3p$$5IL+e+xHn`njNMfOp!R|ei_{87bhTr~gZ^YHFc^Lw~ z%ySlWQdFcB6vC`N9FjUxDJApN9!*%4Se~Z@4}yb>K1H2YaQ;o`3AIQESCNe^`8!l?j6lP z;Nuf`u`~e^($0thM?G^jB3Ry2=1s7G!XyJt+;P`~`1s#{4Wm;zX(22ElOSXYXg$;e zbm=|d?mm!%5)gbgRu zcmkxO2WSX1Y)K~2-)JL6oDd?kQpGQgeAu5rJ_droDWj{b>P31hD{px(>Mn)#a4sem ziuV$-AU-qR?hw$C$e-#-z?sm#s8b}@RFs%0qogRjh-_GRR>&WPp2>U2j}y0OXr@De zkOrh#CJg>JV65ihkDfh*N2(=wJnp()jPUhcLjpWS|E#_=eiCwxpOWdv{`!)m@qbJf z@shswgi~S{s!MQqrHGpze;PTmq$F~n>meEX@JQmQYv^|p_{~et!flW2V;z2l zxCQjkt`_lzzAm`wEbg8eN5!LY$_b=G;L723c>KsoR2&oIxm7f#6WE$;#FqYc{Agqn zb6%B)^jy-@g-eIJaO>V2%R4$yGj#l9VIE6m7a3j0 zU_&GNn`}l+JaX$xTF8^*ASFeVz|>aZB+^)+;9?lKthXP(J+u|eg(ALrVgyHXd2}Tc z=xuN1;9n?|k?v{6#PSLj=kjQ7Y(zs-Gb;#I=a%5>K3BGl$qWx(s?^G8uoGBb&GYoe zgq1`~M+ZtCjY|nIyReMj;Xb576YE;C@VzRY9XSarV_|e=4h|U(C2YKLcnEXzd3@vG zAry@=&L7@@jBcaBv@pK+3EcOYFQGa!Cnaifmyr)HZMuJa-8=Ee*S`&3egU;&2_BJ1 zYcyd|7_5jjDXM)+Qpo5LMGze1)zK5A|r>o&GvOzg(?Ls+6yEngA_*M+{Jltib4nxr5a8WCr+&3)1SWs zw|(z^MqLUQaLZg z0@<3HT^baM!-)6e>%mSCTTJNSHd;jyjVGR-xQ8=O?F04J}vQf!`rDcUNfW=E zSc2g=T)8F#pNI4Kfq^}fi|}m+Cc*d#4Vg>|Q^hL7Ztc2;Um5Je&&FqQ(j&`TTEG^P zx{kiIiMDhSa|<~%SvnfbH0CuOrKEw$`DOGrIar!mM8>e-x*8gLnmFMg7}l-&Xwfw6 z=t$z3l`@aH?bCIf z#!0V?`P?j?`rZ$*_gmkG=F!NfSo$uDpO(f9KKDOAh}WDy43t;-+LB35q2OYr6rfZn z!mVk%=7@|({Y0v^*(uI-xn)J`oHlE6pkt$;h)ylF;=H%3YJ*%e)dV#;e1;~MDOWwR zJ9PuNwHk~-RB!q60)~e(*to3|hE)TPHk5X#EJ@>xTlmzIv`=sV2$c{#7UICNClNc! z7+6};@u3@TL0}sA+Z%o#&Fw_k4N$2Vn3|c!z`7oGW8}?~kPZ&U6s&l_LDT{e!BY}J zrDRe__2k8^7y3~kRrQ9g-aLoyBDCCRcF#Y-k+!s?e#O1Lsga~YD}u_Thg8IzM%CggJf_E=}{ zx*6Q{vq$mq&)v7`P;z#vgAS#-*7oj9OVLD1Rze*QtgPZ>K*}hak*!7(=z)%N(ckkpV^0Q`Jp;^6(MdL<7ngHInw$s-0190j!Z4 zMzF)FhjCi$e8CsIQ274$|NbQ>0IDQ^uCI=v0QJ0P?YFG`o5+-!H~KuDNg+KixNu6ZsNCa$w_iA*!*a+5)^p z6Uruu>u6E>JOb4sl>mui*)Q9Nks#H8(7`e>D+oZ=_#SQWgjmqbca}38V z7h^Ra74lGUeGDZM$T%jRSX_Z)*x2Ei_`~f3u&pG%dUP6(%&)?$6wzVpScL}i7RxvZ zT-x1^-#dF7X6Kf0^Rq{ApyolRx!<0KO9uLJIJbbkGc)i_i`yEFrh)aAjU-iMNdqTJ zWgJ*s;cGvfZNQKPOqVNoWU-9(?TvU{XB)0)>tGw^uT73()pK!SUmvy+cOqBBqxn@F zEtb)kvazG56I&7~G`j(w$t~gjQU%#`3XNfa_M`!`SjIAu^+{*%892AU1GXOEWPSk} z?XVRSvsD}OK@lzK054B8V_l{lM`{(Uc&k{Pp1@sy{{<|Nu~8&LR102I*_U5%4sQSJ z_o6wZ76n!IV$5xoTJnB?TFt=Rd>Qle6@(#4y+qYbj=h9XP9=h;nrW1Dmthw6zr`&HnXC@+X-1#3c|9my)vy zyIjIMMIu6DlLeuS=E_qax)vny7azM5dk>D{#*e)dJGS>B)LouMF?w(cP9}+=&Ap=9 zrC`CXa#Bs8JOm;Iklf?d?kmZFLQ^k25aiP@5Mu=$OG%&FG?dIt@&UxBN;0F`JyZfE zeehLuVLi9RC^XB&m%s3@_?ypv3!nPvyYconT?De?hn|GzRt5P@+95}33~6I7djq03 zOam6^Sa^nlAoLk#5|fh!-1xls%aRuOq|`3MN>%0 zC!u%pvstn#)`j4-DU3ae%27~V1FJ{>DsGM<1!O)82@92SiKTLGIY2cqp%I%;vQhP*hKw1&4IGpS%9EQ8 zRr;BPhF$C1@$kWE6g6T)>##H*77-I^sElvGvJCi|(81CJ$UYhYkfOvOX`o2Yy(BKu zmy!z`&1cDK0WKZrMm}H0gHwxeNk$|$BkI+X6*B$RGBmuZuM1bSwBhl|1^mnS3Aoe- z@Iz!A3$N{N#~b?AMngcoNDa8Urys9f*N=ZWG=hf^sAeXZ|>>9WIn)O965+m z;Nh(Uop^O~Gdw+kPaGP>)5|Lu&Svn&-Y%SNr`e+Po~b$9HZy}AttnjB-vhhq;%M2! zSLbFiUMQkfPvVcZ3}Az9!3~S3_#y6{E#lspRRoCu$&`WZb_#>AF;jJsOzQA#6MKJt zKc4!^*Wj+K%1&zhA=Mcr`ow###~;4=G8olmCQy>~utu^wX_!OSzNMUx@u^kJtdJv~ z!xR8&NzvSPx{sVtQlFW44I~TXVJ#62OiJiPWk!i3ZP>(qWjX){Y0el~W=6Dzixcc0*$RtIjBXdhvNZCId@sTpf}jG;%HpfHK90}ceh)tW z;oroouGq*-7)?)NX|;gRuc522Q}7QWy`F3_$lX^B^AW#S5;++)hzlX-3aCg?@7Ttm zQNH}5#H{Q5BivTx3H;Hjji5}Wq!T5hh$6@kmqmCUP;1S=pML1?@yPBY_{Nt$j4hkm z;gK&1*@#J(TW%XDUIDf9*8U!J5*+V)Ebk+J1;=5zF3872lKmrx&AJ;OT^nNSQzSQV ztm@%Mg$fGf_80hQ(?je?B(dFbkTi%iEj0bEBoqq7wO)k=ZG%$kv; zJv9cyPDqPXG4aX^0`xj2-haU+99<~m*8QWXk{cmUxueQQn00twjcr^Ln$IwMU?iBN zMGsPf5)@)>(}qG$ZRH9s>}*9-Qxm>_Xp9Y!2DN1KOOQ(A6vk;r&!?w_6hRCNa>?u@GbTy;{4Lch$7)+VyN;`NwSHgoc^VpQh;H`b_ z*p^9PUj_K=(Gysz_( zMA#D_$XEpJZ7Nw%EOd8*~I3lBnT= z{uGu13(rm!5K`|?xtAvT%z_-WbQCO&NBYu&A+#YyW*QXi zZHtf1_Iz|E9k%J6%U7AuL&ujkG-UM<3q=n}JINje*|dpTrHtODBvwiuPRtizPy#@M zVYC1ywLj7hmfQd-D+${T&=h(c)FvxFJkDH&vsF_{M{Q{pwS@|LS`w%=XV}Pyyxo(a zAxJI=Y$WL%Eoy?&*99)}riLXg$tVXI)C3k8DbbixojDLmVv)Ngv^Cqv<;$=%2P?%2 z^h^@VOJ%gTG$55W5tQ=KwFGOO{aO_*j*cX8G4&8LWgXQpM2gL8s<52|@*({`fp)5r z;lZM|PQt>fZ=g{2*q6NO*N`?XRG=a60S$(MjW#ft)G(##*gapOA;JhuAB}o|vuV1Z zVCF>S#%)u)ZaYkxCVJotR+>P-BCG)=UQB$Z_oc(ic%UF&^UrsAot# z3%`B!uj23i^f%xHd0JFVGNmp(C4js^5PD=8@B$MvQ+Z6BD8dbrEEQ#IU$VNUvs4u( z`_of}D%LEoX_n?uc1aI`80Sg}fS3%Jpl^d0mXS@Du;r`{WLj;m%)^>!FK~N{zK}O8 z$?BH7E&n?`FFt*z4WC*XdI4r4fg=Yi_^l7zhV}hD`0KxV6IwDltk8yMIQYRmKgF&K z*JJ(GVR=}DK-6L5x-7b0`6(i>3f@rwPz(UJ2B7CukmE#XqaOj;v%8|iM?@Tz$1bv{ z7`h-*B{6-kK9fRxk_ZT_Co(L|%+BG{H+~t{UjGK{x?nRNMeEhSw;DKKr;Fh6;EJT|{5uK%>QWJoyTU+t2?tToX z60mfV?$LBQLC>IskYs%d)u7{voDOADNEYU97=UQf)a@a=-%4QO$GI||)M!k$fg!Jk zOHvsOCJZDDvIirZaFSjNO_exGVF^u%kpL^>o3?85$29gbM6l9@qq>Cf`NLGTE$+0U zlnQV`lxt#_mc9k_H@R=uhw9WT$0!=D38ayfKmkK_hQ4G)a&YG%0B}H$znOw$D-ek| z$0kZ6V`zXpN98}kD3A_~_-0lLzsMEwmm>$T*QK$+Qj}Bo0f>VmYg+)~bmFDHO2On= zNoZT4A*V+E7aMC|Y>0m;uY1n#ez6Dj9Ird$KT9GB9iN&_&B-J`G-6?@Ybh*Nh1a^w zD|t`g->bz!50TtBiQ&w-pql5*v!X?nB;ZeK{*e@qAWUit&?Zd89G@o_u9Su2mZj6p zr%i!oXa{)prf%%&>&B-advu0Lm(M^4_n=OF9zci1-m=anzR_YR%J?fVZv zZ%B#k+xHkj&`6`--i(!{5|#pw+ZMJXX1E((fc|V6{mCpkTC#X%Y8uCjW#nkiyJ>N6 z|N5amJd<0&-P1Fu(ZoWU?`>*0yR`x5HZW@ho#1IKwvoh4zso~p6Y-@Hvx~TC&l}$z~WF};WFi75KTF4q2&g*W*c`c2g zF!$tq9=}}7qY%1qwE!8-Kw1khU-F=t7Breo%! zft_`bF@a0Fo3SgML2t^&lQRYU^Vk$8Bj@yYBIzV}O64SP{S$wJS6{pZetsD=fQPCBT5ELOc=IvDx`A$CZla9i$BXc(1f!Kl_nox-jB1H0 zpV|28@60g~e&x)Akw_E|6{6~`O6}(o64@$toZXG4b_;=9X5uQ{B|c)Vbi|e`c6YH6 zMim!B%z~^K7{aI~UkvcU8}7gp$7k>lH@*j3dmR|n8p5y@-@orkn8^UwUU!+`32^cv zs^R+Q3pzN5a2^CC+Fq(KR1B=Od|*`}$|PPs#0r5HWWU(XC94Al)T4@1onM`4E{0JF z6ig%$EN+Kk8(3VJ#>4kLj7zV0C7N5vn9JoMMid-{i*Nwn71J7{9Xw`75YlKJJB^i9 zA2;1{7d~_AcQ8w&+xQP(?s|ekZy!pS_}%_Nytb_!onejFKf}o;&3qt$$Y;4Sf3Gx2 z;)j+3C7+@ozcHzfA}dY#<3UpiJXEaWC$$QjUJbY*E>9(~Ihg>pL@7z5B7(_mtnUw4 zPAJkunJmV(&o!qJ6|3@Jxu%kB7DDYfu;sf~DkhPIAOzdAQvc5DE_!uVB zol1zYT#@_yZrMedCLuZrIGT^EHV+`k*YJ~(ad>v3ZqMp@IIAg*-`TMhXZH`{OONlx zx5tj6Kx@zvaQJ60-i2xrxZ#n9FsG4D*g%JF;LSq=NThAtvi~3!y&7yYflR33&BH_3 z-jHFn!S}`|SV!Ne1-NEYA1>=@#i3jg|M=8lQ2UxZ4QLGUpq<1-zJ&cJXVKHygwcE% zxf*Sd;sNoBWt`L1gv+-M;^^@S3^Zr4tFsM1IJAhnrzUxD5om+86L@uBGX@<8U)y&a zp+#dy9Wz#- zP@whADZI6-gJJRS&Mso4LU3(}zKo5PN|C+L14@AO5NEeH;n!Q6u^Bp^m@MO)D~qTa zKCWzQ!Oo5b?Du>;HF6mDfBK8a?LERqIPnypf;>3@UU}XyZu#3kL%Uhyh|G*^?lTi# zgAzEQF{Z=@NML!siqVlplwDeRV$hXG(6&OvJyn&-<+6d-1t%+>0AN z@>aaV|w-TUt+g9j? z$ImW#_EYw+R2KYMG%v=dVPYQl`-L%30)F7&mj_1h@z36j`yV*~#syfHA7v! zYo8aD(D8T6^(|o$jpVZu);~3Qs?YP0YeJ687OxY>8O2O>JnH-;ipJ|Kst`C6t%ms2@#DC2X&zPIj3oeTT#nX)D9;&Ya zb|n1JXrlKeHy9=p(E{N&$O&pRG)2ylXBisL5fE7}_r1v>i(%T34SZyXeT2fs?38>N4K58riKg-uH->3;Jyj88#TOfXaJ8* z=W(=DP<(GbJ{q9Z;D>nC#w{3`S-_qgk^gjfL@ErbxU8WGyE=ODy<;bk(*vYJ1H(;e z40kl)o}(vl+^zB6l+tyi0lZ^VFJ3p;kME7lZ$i%E7y~4`Ot_gl~?F zNtH*)uWt%8Wb6cH{182v1l~Q|kM@j>?;oGW&u4NdA%vs3=ykx5^m56^eA!1=V+wB@ z>c%;l7S{jYIX;D-&E%kI0e-bRgLiD`;mz*z`(|-CU&YRhgT7P}NBklV=T@2cRizmY zT8N9&NnG00h@VcEu)Def*9fr9ws2`Og@r^CdzPk=KXeRxZ@L|omBk2HiXPXH2!R1U z`bY1?pT6@dgoQ;Zt0TsJ$dWj!@+tB%q{{|DCYI)0jGb74=TT>uv{+=qBK-3y(C`{z z0~vZhNeiUKFu~6hh${4Y8kSYVrY$Y#9&})tgiKMwL4c>&#fZPad>FM&%1uxVuOk$I zAO>33WQ=N6%b z7DL_iEQyiFqLr!Od9kNbVlKgEW?4y?UmlA}+4&fyj4f&x#9UvZ6kttqpr`{1QB{aX zTxu+J;Btc(FuLg9u1?Irv=d0TwZN;EVNxrRf@Dqf^a+ordq`uIg)ERu3cyKWexZz8 zZv8R7`1K!RYKh3A@uWqLAXX|8Jw&Hr;w>HRcxO)^)|2l@SdECuR7`LoL&bpX_w$|; zK_rxzNCZmyDhSJIk;u^&RSpb%vrxfdJw#U!;FX4rooNSJSYwolnoX26NFgaF8oYDz zH5U^g8O1qm!;ix*YKNDFRP@^V=B}FAYXZ6?XsT_w-VTugT;3c0CjKZD)a#2x1ZaVL za7xI>Dw{&_pvS)#$!J|lE2N(ysaDVcy-?!#its3tirAeL>s5_+rWUM`#GXPC|Ks3( z943#CUO5aX7IR*FxQ%74^*7Z6p-OU23zqey?X>&#AC&;8tm-)vfHUMQXFTyJJ&XNT z{@a-n8*%@YagfR@*H#7f+^@c1qD*$pL7cL9W1q|4L0`rb-GmKBRDICKPQUX#ka&lP zbb?qYAt`d=BC=Fyzz8***WZFGwr{}y`N?A_(_*J7Xdp?UfSUwi2&nij4BC9W02zcB zXl=wqq0E&jJ^W2!h@D=DiBQ9|t)b|V`<(`x`;Z|jJfB)r1&(RK^bIkYtyW~K;F8`s;b^&xgUcb(wuui8_h7Trf@w3rKkR)Lvos+u2-!Wd zH|wC;Okj+zRp6qg>F72!T&x)wD;IFg2;n5MSoqP8FmmVJ2z?r-6kk)~>DbcSjcm#xhz1QFbfRad3g@iOr zhWHm6c_h$Xq6vSi=pRU9U{ePS+hci{jsyaYxE$0^mJDZx=w+;+gJp6oorBcpb8xQk zS)G`y;`;a9gca0q^T*zU!486sX)sJ0w|x74pfQ0D{N8JL@*ka#4kC`)(&ARO7+8)a zNO*Oqm(UOCA1`)q@suj$RN;0}#V$4%5uC)+uDCq{;gbWA_$y@)k;#Iv=aoNF8YdD8 z`QJ)OKy3-KY&K|cIPoc{VN8e#J!8}+BcBfsMndBWr1WmXc2Fp%@tvRj694C)?!Z%f zM;YxA(Nk3Ye0nNFltjOsj*a*9_2ZSTEpX(l2r*bmc$1J%1D_Qm4@6&(lE`Qi7WYuv z-3!SmdlHZbftc3!V8z9ct3KMb5SJMmb~U6qIjs?Yi2wphF8H!4gQV!HM)tqzT1Py9 zs3%>&q){7YO+O}9zbbj7WNbtMD%$Kt;wfDLqBWw>AikS2VNl4u=-DUxJcXrpTso=0cQUs5{CtY((K`RFr;-n#$VIe?T()x<&mJ1ZQjOXNLWw}fk}-hu`O#PA0HQ6R=<)LA`#>N2h6^!_lxY-9UQ@S7$HcFanUH7r0 zqXFl4G~-CVfQOIIGCzU{TN$!X&e&{xl+V?WNG5q?DzzWN&_#_*YBUqB?lW*ckWDdv zVKVRXM%rQm8ypj(g({|MK5FDcUp$ zGJ1Mb7~av3Aj}K?fG@ZN0*aTCE@|Sz`e=T>WDt}#!z_(sc)kW#PvZT5^aH>A-Q?exe(Fs%^e-M zuCE{4EPBd>MKrAkY9sjEgQuiVTt6|HPlhxZ@P*Z_Ky)}D(t$HdB=r82tUTid_(myD zKBl-zH}Q&03RY0&*VCFNb1r7XOM@R_?jx@qeK#kbr#LH~!Uc$eaMb5MrMOa&J9c;C zdoTGfNVk_r=PxRLg#t($VA-ZsB<=_`kcpt$#-rpi;sHc=UtG*sJc$tDx<6&)RLD_V z+1MjIx z!AlLwXF8Tsla4j7iJ?aS=I=Tcd5SmyXZXZ5nU1;|vFI)jF8-VNofSz-jFTMo|0Q>T z^}XaUD5}IVg&VcGgc@2555N8L?fBRI6Bu2}BSG$i1n=v_8YbsLou_cUVM9On)W3Cf z3{}DcXrl>yY)&Qc-b*jUO%LwDzQQv6M1mi_Zd1oKTZS-^&*Pr4DU_HEZ{YHdR{UD` zI{fRA!+30Q9&l((F>NM7Ms&qM7Z#U`czPudkEf3Twx(l!#zE-SaCEf-Pfm4nG#wp! zfSebgOvf5}NF`{Ng2o#8o<+d;MBa>YQi6)x;+ zL0Bo^@tHhtL^LgMU#^Vte3=72S>clJJZ*B!I@VoW(%pm|sWir`W&CJj9u=3am(XRq zL{4@AiyZAN4ZV#9u36uU#j=ZsCgw1mcLB12CM3nH@dg;`N#+H-e6R^uw6)-leaCUQ zRAVA!!t}A;^3aj8Fq^MpwxppNT1Z33*5));{2ITOX!%y)B9TZUYnpgPYX)x_-hhLZ z5dSX%=DkG}s}*qV!kk;K$o zfRFs|FXK0_-G%Glei;J41S6z?XtIidB@J@-6nqBpACUyeYCYb^m%&WHv;tHUJ=(g4 zKXSTJ&_%SHE7_n78W9Ot!opnD)!}L76VKJZOd_aNM9&WoKcvrk1`UA|)=VisO&Rawd6@G*@FtEik@td99xTdQU9n>$RSs&6| zz*N@Hv^MG_djW4#awu<`yi{W1q|~GPH1Leh(G!-&<1q2fLLQT~8ZNVwcwIJw1{xt8 zk}Nvpf(Wa8&bqZ46)ZyXrrHeRj6_@%*vVSvRsC;@xz9P9n(8P6s%imAc8gpm zG9gsTF1dCT0w7kclofK)Q~!QG7EWemhN#$|qJI$)BzZ+C4+4c82p0(10#FxNB_Jw+ zmH!*HqC|eeEiUF0&?(UU2~c*DcywZ6Z33`1jW}aiUz?rq+EKtu{UD-hpEJI-(eb$w zfO=+Bht1UyJYN;RCha-B0rWg4aXM|`^f3JVZ;yU^h6F%x|6b_GMIR{oiSmUrzfTrH zv4g6k2ufef?+imUSUwN$`2W~@4|q?i@(lQynSZ~%?cH8kItq$l0c#LJ5+#Tw62TH% zf?!E8`DsuhK@BQO1f^IIR6vTTpeTrfA|goJg=Lprw%o0E_xAfQQ@-C@&diy)|67)z zU-JFFO=0ienKNh3ob%S_eO?L+i%~DTni}I&q5z{&4z^_~DbAP|JIaukeBh zZMldW>rew85<+Oy&~SWA_<1aAE#SQ44#kaYcj0Fn2hi|<4$nbH!Na~<1HM;ek7q?L z!W#};$@I%tuUUsd;;1pLHc1%;B;-X29@byN2M%8jH{Xh@9(@{*?i}SjeJKab^GAK; zO#lEO07*naR9p;=O`?hdf}DdxyDKf`CYQ{h9Z>@6q15|@7bU8sb^@Edh5HRIk9+7yTd6E#+%-W6J79@KIe*ffOgJ8GO# zB0?Y0R@B3tDhM2IKTQ5&qdtj_o*WK4W*PED3QLnfET|%l0+9Z5MOp5Fi%#LW?BoZJ@aESz#T(6cr zYTj2=v($xi5+O%Sc;LoWyiBAx8Q4e+fF;z!7To&dKj6YEuf;=aHs}Ph5;%#t(z}sy z`O@|_ynpdx9Mj&0Qb_HWDbS#m5_IONvOp=#V-28=2Zfmhrz9y!HjWJ_kV_jSjZ<`h zx>v+a;}iJx$S__~YQbN&wxNTv{p1KJQy`m~TAgGRyh&8>I%~^`B+PDU60@F`E1ymQ znD40unB{i!Dp{XTzL%|~G}UW~@U*y>o6|0!RZgh}gq`PClgbXIEy+MGMat?>C zoR2>~wu$=-u<$hg6odaL&Yp)KdT0bSuH|oNuAeyysNOxQqEsY>lIQXqV@*@ej`OpG zIHpZVJn@ms+yq1_&TU`WQxL-=7u1=bzZ!W=>6!ODEb~e@YF-cS-MgDH1k@Fdsfvjt zh5&A%kK_Bg5x~bidq*)5km2IsxW&Ermt&5`zuoye{9$k}N@0MdZ6yphfWbOtmK#`B z&SQCJD;^x!i7_V+-*dSw>CwHt=|cCQ0$t>-@|pY+!3O#JH1Zj`Cj5#iEWJc8*suFx=oa z(dAqnFYc_Mv#kRU>>0-%3O(mS98Su9(IWh2>mJ-cup2eP8$~VNUo8 z#zw{%??F5aJ-c^d&E?-lZD2ch(z7%d3!RQ)9^d@bIXLMR2LZJa4)b%3zfwq34+HkS zTxsnqC~o@JdFImXdl&}f`9(cTk!QC{Q@6*&*}J^R~-Y7N(u92-2SU4 zaLIMw$G?8`EjZ(pmmq44vw=e2n+zL`nZ*cyAyYA4x|~Gj*)Wa;rG!jEK%Jl1zhUoD zHIli5I|`b#1#3Pn-8GTpqy=*9ld_IbT3FR^CIG>F_X-lDUboa~oGL=18bwa1IhS;A z^vW&RuyqU(lqZ$w4Ws^jUtJ#z;*HF&R(3hWFe=c$T;W77c=;Y;X=e= zWNt7ZPmMJLBM@MlMnt%RLsYeCC&e_t6iTPaOW@+Mz{U562hmsX@bNQ1oy^660+U#b`9xwxi0xr`3luwIBJi4Yd_C$poT< zCq8ci;Cf8XCLq8Bn{K(qQx$u>Yf`^5i;_`Ov9&Z+E!)c9d{p~WK6eIyJ*VWU#*m0V z=YClh&E7ZB{C0h^ZPLV#$UMs)Ej=$8Q-n&h3Cf*9tRjFT&ht>UfL}+sl1HsdjCx4} z5TRccHd8_oJ99Y^P>y)wiK^BUic8*(2hyEHoiRg3tWs5j?tcH`*!ROO2qoqKeiZXv^g>GDZeeh)Ow+?v^6Pe)k8g{nj;b$U}}r zs($n+!U-=r1mC#eBj_nqxqmxhA*3%m)$|kk+cd+WPfNG}37I!0B5dC{gk9Umx%UJi zIUH_5>ch#`(9!MSfTNegEs_UGMqwVjDB3SWz;EIi3(J{u*>ZvoRdoX>s}hacF5D?WEN`a2@&N6&;ox}TNJ zFm6T6=S-ED@#fW=6|8|82P}(?dzd+!T7oCus>1#3@?F*Dv7BJcgQd9Q^Dcy7Of{=U zM9c=t7zhn-bBd3CipIDLv!&lsZK(u#Gz*iV^V3@sWmZ_RD_VZQm5>4yI|jMpalU= zecmdpYMX~k?|Be=xTc#708*+74C8QRh|WB)qOT2)4h&-=^jO&vgP=po7ZK`#FqG9o z!UyM2U|ws=dp5XDu3w-AHPrTqY#Aaqk|s5E>N1xT&UrEph#aPl2wkp^zns^L;ar5@ zY#+jS)kokJ;S-|RCxQ`jR2{-5!Xsc93l$&p@($*eau{m37?>aqGKz*cXi$h1#9&4q zvctmikd6WlnP0}np#~lr7(>l1AR=}&YM?}ndJNVgJVtGPLI9J)C*O^Ran&2sb_Xq!P7&`x&hJ=C?5M;A091w9lx6Qqjdl|M(u9dGd>Z z>X7t)ps(Sp?<>XlcGR4~o~}ok;SzRj-HY8jhES_QcI z_|2Q~&{I$0YoB~GUVqXH0e_N#f0D{lprN`}qTI#XkBuTVs1ql_G=a&4{`H)(BO{h% z;)Gi5USqG-*bOwWaO1@5nK-j@#>KqX_2w|Tc;ZBm#dGGA3AH;ez|B>#e#;;}`}ON^ z^$mBT#!Z2U^4c^RN`yolE_WhyL=N|?IBUUT9NbbwDWXar>Ly5sRq+T~=EspGp0@xV zaVQ6BPBu3f!JT6h<`xCZLMmO`ARnO0OK{EruHf?nNwhff}^&H+}mI=mh zq&X&`rH~V8z(`3jN|~fssj0wWuYo+fDi(J+n3w=|O?uon-496l=kd{F4#I`MegwN2 zpapdK4gBq4D=`>4xPI+Za0>+nu?6Y@O>8N1W*bv6qi=?PVfCBT=Q#hFr z^;`^&rDYFi9(e%n+pq_}-m-`D>0PaPj8to=>ve5;7yaQRo}L&-RA^_~Q92A{ICOj5 z9tQhrKJrA85>Qh*C2w+oiOC5*`9wfO9m+UdO*{(R6^~HlvSyKAMUab7ps;z};ckAF ze1w-R>gFa(4-D+ZNW(+ORRKPtT8))v;1GX7gd=)daQ4d&=Przw{r*XAg-QpxLdHW_ zLs%#w^eC?@eC|tI9k>M-TZd~rFA_YIe+>!q5QKQ`;`#XaOJBmNkaK?a3q%DMg?s^C zYX$yeYw*lBzJqXRZ>(T!)BK17SKx+=K8mH?KuGUT%3YD)TrSTjbZYZUikTuX!o|=YTzB6OLt%*)r4(wX2mH73HQr zr-|=dV6`|kL93&5uB^xTM#T^i?*av6+!Dru0&f2Cz4-iBzlX;+ZlBicPbwG0fM1m_ z;NA0=;N<>!n1>p71*HZu1&SuQ!T`x0zm~OeC|@HKj0>^U1TIU`xB$kkO_%^PrCahWt9fhY0ekhDoG$gvj9oTAg$Sqab5O4%-n(Oi^n zNm&p3v#P>22bP%Ls2wbo+R+B51|CIXnyYwNI~HJ^dkG1V&+?WWd+Y-fMB(eA5QV4_ zrT{LeL6H!$P?;Smx&nH0F5Y|8YTUnZFMhRi7eXSI5wZf*!jSO*)cUxT571U28BxTJ zNdmPMxE{DVu@C$5RDn?B(xJ{u0S7JU!DAZ+P$kkMYN4A4UbeIsk8T~s#?b~y*&o`G z#}lI?7^c!PFOQrbVrgFmTZV=?2khkpiNCs|6=xi<4BN&7+<5Sc1 zC>l`6p-%ZzDv2b7yXT_fML4Rn4TpDk;g{P6uwgQQuc+R-6QV%GCZUHawL~t4c<(_A z(OE3x`VCvLZtnzFI*>#qJ_9cxJcfkT={=5G*oJB&#QodHFpeB@PJqL@TF_Sp){IVI z*EqFx7FN2TK1u0WI3dMIJmd-#F$kGk-D}kGS4$S)ZATx4do~Z?zkdG^w_@dZZ%YpS z1rIxK`4RT~>@Mj6U|dgDWYnxD!YAH$8a{UB>yWPvX^vddG!%Xpg-k|Iz?cnDf@%zq zBKO62Bb-6Gr=68JRdg^|jf@q}EtA1wKz{~sC%6I=@|HZkw1%Y=PZU-Gr^Xc;8=h(4 zw14;t1_vkcrSsp9RrA{5SAn%VhVUuem>y^1`KZy;Q`D z@)flDb+*Gy%c>Z!rwzy$7OZs8Qk|i4SiRy{D#yzzQeMrgq>>~fzU3g>dT0@r~hNhR$}a;<{=zn)MF6@TvJbFM5YD1SmTtFC*WWn ztuE24L>QyY0F{o$9|L9Y22PUeQU|%#8vb0#NZ@;w`m)CT8n5YwqVoXC~6)+ zxc^RSIT{eyPZ;OkLg)~BXBf8K1rC2Hg4hBRKulFThowc{d7_^YEoB9*3^!d2t#o z*3PllY|3nn-E4B)aZy>^I7a^^4(TURBbmH1_{GMR@pQz3i2nQ%H$5Ahs!)*9GSuVF={ z63W&x+LAsa0!?NqF<>$Vg}GOBeLA8rWh`A3 zQ}!1NSWB5FIq#`yag#tQm?J^oAiytk`xEmXb?>Va;~!~nkW6sk1tA&)N#MgT#H*{y9?m%O z5M1%tdJK$Da+LrX6dh3w)w+v`JmrvGR)EBKMj2>w^m>a9-g)>+{C4wR+_!x<0ur1` z{1euRQP6j7qPff#2j=xfFlcHi+Aw z*^Y)oECdb#yV?8BA%_}8RnWj|7PaH(B|Z50ra}B+%RV%SRgie^ng5-xP@;>RINTCu@YcD###EY>KT* zwDvzy{<7J4K3d-`dzuJ)LO8_|e)I5loN>;@WIf=rPo0b2VvgAzZ~M)Yxa5Xg;d=pY z{>q1O!b?}cZ&VRBq`M*Wp(|^Rtz_Bj*E}+Xnx$+?PFTDqHs>4vO41S1r9X-rEN${|jE<+Haoh)!exI=&$r@O_Ft6j&K{y1*HSFTskIR$TVbIy_aCzJr7ppe_m}M*h0+y$FZ& zmT~5ROL70UA>6cK01dj1DkR(hr6@up^0;j+QR%itlbmiDW!{?tlim``-pbYrPFT5| zxf<4NAH*F`Z|0~0vA>B=<)gc|6;ZX0uo`e>#_;GE4r^(_@kbtkJ2!5_uAN)4_VO!G zTepElG4Cb@j`1pB@uG#e_CG(0V^>vxDwPvbEjSr7ObH{cWlb-4wH*v!DVKYxLLY|i*{n~tk#e(Xr9s_+O*U)2?rBzNxPFt&3y9S#!yL%Q~k;O zqvqXHA$e8!G9wK)Se|tJ+D5@z_=Z>~OraW1w4MlQ6Qjrqds7x@qVh>izdSCH@v@xy zb5`Oj2s0*(G7%CvMwzRP<~PI1f&G)D0CcA`cU?1kwuxajN9~=ybB_9ShC^d*syWUe zby@6W&pea(w^oRYzwwmM)oh$5W=2dTWO*N!)tUsc5_2I2G3f?kU>6~bnNNUdUx8MK z1XLZ|>{fwK>ZGvp%yFX@7Y(Kk2D)+)-tznv%oBg*Zy!ZaD6wZzbV4-z5Y?KGN-08T zDa2U4ASm}JKnL}`ZT*R9*YEtm^jAwgh!ABXmL;h^pw4DT7nq62&JqX#!&C-En=5@p!GK0=d|-* z93c=7+4Q2P>IZrsCKgfygBP6FUYog?XcKRJq1hA0+5nDIgIB5G7x!+#+t0ZKot0L6 z`TP%|wNSv8?IZZ&cW=T28#bV&;^L|cKZH|{KLk;ALRwa*>i+daDC*xlDt6pr#cfWc zIjBmgWk&`gsp}VxdeJhmo-nQ!uYyWsKC1%mGz=%V&A@MrfZ*mHd%4~d<4RE@W!q_ylwGP96*?v#t4^@mMayBa?rDg z#7n1707+?qoKc{bsEz*`_kl@p45Gdh0A4H~)gu)krCqk95elo$-Q+=xIi8Y&s?=hy zJY#$CN?TP4yz2Tv5|yj@1v@p9R4yh4sm3l66>j#8GWNwlclo?20HBE^*akq#Hi(1% z7E__@6T^QY_NOr_P1+^-5OGzBrUA@8Hr~CI(i)|r@o6IaK3Nq*I_!ER83J-*p1y37t*eq;6OeiKL)Qtwr|I zC#y9?L}*kC@YWX{jNwKVKYVBdd-jxfpqke9fXWf;Xi!+3WE&R}OGxzjeH}RaMF-(4 z_dJ4!hieQ1BtjGFC`NqugghpROI~^_5o81*#(bh7av1nW<4}N|#-eU#i&|Wqys{tn zY}<>+_f81k0hKLsYgAIcBuIrhvA5*n4M!}%W822@=&liNGDP&c)C+;Ud1_)r;s4HD zgfpMN46C|2ap@nP!c(Kv+n!oFGfNQsi5d%ipU{?+-*2>FTFx08vUV z`sVfFs!#s|PB^>=Zex@~`dmXCMog8=`%V>jk~bGcBrR8khBQWvtA|6C1QM=JYX40L z)(COl2Za*nrbE;)>6P(!XMYQ~+;>0TfBNZo+v{G6J-bJ7-B0hvwRhdgQLNV=cQ~%P z;2d<7YJxd1bi3>lYej+avTUJjH|~i6Y@*JjXhhO0#QhgFZqmDu;&-s3#Ql{`65p0Q}N@*KApvcNc*T)9S8OKrsD2a}r#_3FC zsLY%9gjj~6;9~L|S&zgK%A`}wTTk_rbiq|78joGJVDcwM7h3&S{w)NfpAki2^) z18b%7lTV>hql{B$-Zv$!b^exlnMypE*=YbOt>aY7i@r#VG_gHRm_vh6yv|kv!xV$* zA`58|V*r|00b~L_Gw7wPLQbvvtWkie>fAn+)2ZOO`O9-trq9(?)(3emhMdg~nssGW zOk|cSmD`KC&duSIKHdmxIHaSD9m5mY7Zjx?SUn@D3fJ%v(@RSESJbGX=GWlmOHA@2 z^qqvhFJkw;I+NHqo-loJw?D%rz&Tm9bk#YKpBzTHlcH(V0~9MoR41$GF6R+=4u&QI zDSM;FLU3{XqWN5pfA7E!;+W%xB|@bdV0B*?UbVChKVGvPTdUF(gQ^i)b1s(jcVN@* z5scJ*w0IF-bKn9@*7CS(%|k~V%m#_tdk#MNvTZnp{=0KGQIk*h8sKE;w$ay(bxn$50fFfYeG`dH5TVmw|HZ3t@kdTacWD?N zwP)qVDCEgYm{UTX=Br5_n?wFatP-yS3uZ|R80BK})ofVt^(xC5rLXOIBM*`1!z&eW z@4e69%^&(2T07cs(Z~J{eU&19_2@=?_S$PORG&oN^>FFGza8&5^%Za$t*vEVak->zZpk?`Y>csK zrA?FKZ*2^KtV*4`0g&DtdW2)4KD#coBLJySHZ_Ma%V2ZRnX^yle|$v(xO-0LGs|FE zv&(FrcyC$n6&qc~8fC$y8;%VC^2&)WGA9(lrrnncbatkg=UWJPJTK2>UW_CrfkNIL zMGYKc>ZA6x#Emb&GQlhn=t(?4W*RdNkRA==HBvu#zah( zLU&vo-CDvCi~DfRnhmHCk%*F)&_1TfK*^(UY5@TXY*>&dPXx!UEqM%%5)hEyGe8Tq zPYqp+O!_G2J@j;xF*r1a29-7%4zygz#3jD(qV7;^fY=ym9}_uH#Fz!jmD8UBYUnID zh$5<=eJ%2=rP8Y782*{O%f3Y*WB*7fVzloB@&D7WH? zC->m<{{UM+q`&<=+;Hc8T){!*lMJGa4GYy&Z19Cl0`;PDD?T`HIgab@g+ICv`$mV* z-qwbmt~Rd4qm&zwe=#bb2clNry7rv%N=t9mZd%TlC>J9?jLKu>7JqOJiD!%|QK@Pu z8YO6~3#-ebnSoJvX>puUu;!Qijcsjd)UA%9=f&u_LNC=TsB1Ki%gK&Y?&KA_iCs8MI0OIv$q8Orbbvd^w^}KYS zhyp>Bi!kJ8NWUw_4v?_U7d+H!#4D73RjFrs*=t&Wk;jVWkGGfpg}eC~Uyud3?iKDhD0mc?QbukAu2;gE zHRE{G-+uw9mvG@H-ibod!S`;zAJ_f*F4UQuAj0|YdM!TkzBhr)O$H}Y^nliZk$^(j z!|4c(VzKIulc_2LP3WGY6?#xaJj;EQS>pHy0ruozi0ouJ&e}LmZLlH3dxlE+6rnGH zSH51begtxq0Sbj8h9`>n#;teZOW(W+&urPtUcV?B;lx)nUH_xU+XjoJU}JTw-_00baYcibI~VITCH-R@ z17iRZgDI;bV)dMC5#@|Xj0RK7Z=N6u+@vz5N@+1pOjzISgpB2-CL$ZU+#xY*)M=jy zLr)?lx5{q+I+cK-mGR#M7UX)B%TsmBU+%}$qK7a<70jif41X$Qn!II?*aNnaF;1Wfef^^6@i2NEF z#P44yidV}aKFWW(%=*WSet~#KzJv%!>K$|mXZfD;yWGWWnwH*AAj zF7SGi3L#{D5Y$nl&VG)E?(R196iV3iYwn5H!k*(;l=w5^2l1 zB|P-d2Ap^4Ex7%64{8cY_Np|*F`l)e>)i+yCGhGZPPHie!(SS55p_fPh3DrSCF+uCbG{9#XT;saX}B za<`ih6J<2DMqJcHvGinI`Y*W(O<5RAK!vPr*@$LVkIuC7O}YA+P=JgfwvFx`=2=N{ z@HI`2zgZ>0A`7HB_SATj-alhW|Ph(6`Tai)XiU4*T_QFa9 z7l(JZ;fY2%!z2CGX$`%X{$P#=WRk116ObrV+}wx*RHJ zWIK6w1PWv&CQ%KVxfl9)-s<_-x37k+dx-Nu!n~~h_yO)C)sY3H@cDn}j4 z-5sd@=C>I7&JVbM!Bo?{h?gGoN?dcn-(gX42M|`_dPHI*SP$MU{QX2z**;fVd?f~^ z(7Pt2IqYMz0ie>y37MgC*RYS%KJX=M-a3v8&v`dGD;~c1gIjR-nsuC(p)YvatB%4I z=bed`7EllDgi(;O1t$7r8v2Mfmq&|HAYeqTTwq}@rHGtKQVsz!8pUCj&Xp1kBE`-u z4!UE6zL(FCc{eUQ6>^=}ETKj>9z}C17#k1qof~h%e_Z)P>=>L#2${GnP)P{&97ViQ zfklps)7$6aZ@O1taUqZ0)e)>4+<^u2Td{gUKir81<6ju*ufWjco2C_xyip>fCHtc5 zV(U4*#9YhbO=Vm>VysY25;-fhGA4pmdY6p|B#9)6KuVOw()`MVY6Saq;?Wo%u>oPJ zo`m@^D~VN4#R@!G2ys@D)M#p5No#h zj{SMO{W__GHtX}JUSNX&r@ft7%!~n$g#RU1s@PIvsd6rpJn1^R%MRXr@G9K&;Ck$) zJUr!ywZ5M6__PrTp+wB(+hH*~fCX7z&)mG9g*obDHEg(WA%{C7Pou7mWmP z2scN#2^YCaHy-}ucAR(d)%eM8{>Y`Su}f-b|8)<^Tq5qqd2~w$J~)3Rj`oUZb6o5m z8^M;zVXR!*jrkodaBB^o7n5F*iKH7NZg#}4JoO95C3D2ZPtgME{i=G+d_;Fv&=;Hhi*QDCN`baICot~7l4UI4pq%{Q<}h3 z5YTYU_D4B-21SXNY#TN5qy&`3hn{lv8lG=y$)m=d`O*;z3i=Z(9eK7!vC&9GAKYEa zbIm<)3HYhJa1kLUOcZ0-+y8o z_69<)+})bPysj2J@ys460rQAIA;gibIh=9y;rQbHk7LgSQ8|lOZwP0o=Q&rEcOKps6@QOv$f(~4B%iZ|&SFgvGUBiO#H<$<` zsExcP%!nfh|2K6mz^PqJu_B-2lG-PB@5IE&D2`ga7=7&}gw>j)*xX3)CbYK*lxyDA zC|cW*Hkb%CAPsIKF4?nXjPdyGG(i>+pG4N1OQ$LFJTaXLDrJRz(3=Bh2{ zs0@>1KFtlYJjBd`y(8 zRNAYO24>n734iCQ!H^4wQT&GKEJ-Y}1Ozs!}8%LTk~(fd?(Y!;fzOf}C`teSd2K#~ic}KYnNf0+(v}jg@ZKzMIZ)A(pne7#;`q`eI1snI72Zl0orM zLpb?5V<>7hQrd)ImlVdlN&%aPYY2&yC?bx(2IiN19JOpd?s{@3BCb7<@VpKKKl z&VAooaK{4=;j){5h{=GA15pT!vbZG>Hlr+cxDN4rfHyNa~oV4edoe8-@1sk8)hx0GK z9^bp;KI&_z5d~G!tZZ{>M11JeVB{_aefFz&7TZ$?n+CjlX-wk&kUu!8|mXi!uHT=#a-Z=7MD`#$eK^ z%3e(hIaBfd^5z6~&OWxeR!T4kS&7_SU5TQP4OIQuc5;sAH6;aL?1TL>0PGLhR7Iu! zaYp5DhKe^;lku8q&;8MEnIW4l;B-d7bkCX5Xqe`gnSYL7IWgjz?}cgp)!LiQPv9)3 znhmnCr#AId$iGl%p6OmS+)Cs!Tmo1k=05T`BM^vAuGZ_C&fP*Ir9xzCnNxn-Bk)>) zqUV6iPxslcOq{Ag^9fef~dfC^b9c5aGQxe(AhO(F+qxLwX2b0+In({ zB766Bbn^iMcwegjT8$xr2Vqp!>@=%qS>4Mf%~Lai*^ z06I_R5=cUrg85L7+M_iiPKgSBd;ey9;GC=QmXltGBadE%Ph5Ew9@)B8bN@hl$-(z8 zJQpV(e+c}^VeVByC5mj^vGF0%BhzIte!jNtH6eU15j5fcIM%GdIm1+_%fMhdH6{dm zA;u(cH$M_OW{!Lj<+1cgAZqNuDdNUo`~jc5_!_L=Iw;msT%sl%1p<%ME#*XLj~txR z(}%b9t;7n~LrWN97!5qIbp!f4%XsPHMJRAZLCD{enlCZ6HMgBMZocH96La2DbO}t~ zG61FuSWCyp3ZBPjD@e2pU!&(`$}zX|Q+&Cdgh*+2OAMujtwaP2FTpfTY;`)xh>4kF zfQ=@?Xh!F~tNJc6cSI3M4u2}5CC;hytLW3>hbB>>ByVAoWU0_g+;<|0Q0Q9P0$7bM zTRx|Bs9J6*M=<*Vi~(Ro?Q~TDae!dIQULpKS$T zY>#+027oap&DhraTIb42M9nq{-M%`PWa}qG%*9T!{Ex)@w+GW{FoPL$n6oV@wfzLX zV@gv}KvX9#;4~&2&7}U~A+Ip_%IRU0mY|pjUE}}&uj>;BGo#8*mcy9G3Sw}T0sDTVn3+Cquc}BevgcwN-H)F zjN=QJ-H31B`fH3gr0rGWDyz^p;73mYgv;^;yl2UQIHA&s-f9h{mR9VjPU7K>>#=HS zAC6eq19xJAbrIoKnEX=vTrh@0BNNO~U@3Ydb}TbM$&u3i?PG6WPlzo$VufY>qwzV@ z9%@FpXGEAi2ea()`QuwH)V&GP#0Sh)25gbB;hYx5)@f!ekv`G-XQ`LBlsKO;>B}Hj zt9()Cn5BT~abskOF(l_R5$oFfAfBeML&nCEmg*V7n8IZkgT(f}bUhL;-k_xP0a=}x zg8rKbhjGU=UHGSq08E*A{OMEHOPuepcE)U4${u=dI&r_R+HYgP%&F;(`!U*d@3tR9 zG<{hUai3)+;>YNQwqMI+%3?*J27vMIxu!YrC9JL`SY|v4MafTTTSA(@Br;b&K3&SS z(^8`4lNDU^(Lo*V@*XDZ6z#RxiENl0Dl#r{6KVq2ekU#7Y=ZRe`qTrRGHh zva8oY=n3j!&@wWDf$MLEzxGjylI-W7m%ZpPeEaWV_NgRnL2Jr&C5W6lH=?T6T5#THZo-WfJcs}QAOJ~3K~(T?gp=O*T72=_ z-^On?tcx88y6rP(y$&Bc`*iMZM~wr>z>(5SY5!}C&g59BNM~wa0bZHk89a(1CSvXY z7TA)X=g&#iE%eDk$;;FK5HO#NAWG2?{R8nVkV%%Wl+mcS;`*Q7gA2ZKBi25>GcH3l zb$t>E5|uzY$R|=C0IzQA!aJ6)#-S*nwLw@57wZRyFfh0iFFtT77Pgk*1P#V^5OpUqS#$=G_zSjic2t-49pC{6(+X7yB&E(%G52Pvh^ zcbSlTEEv>-P7O45RCz7q#>Sb6Sn1M(q}yZS^qWYOt%_t%h*2TnPIHg^%*&$ZF;&87 zV+Pc)GzwozjMMnKCc%Tm~uHvwV$7eOTs|`QG|lW32y~K!9oOm^J{K zyv1w=fPKsV0%Je~e+uiCQF8mCy=Qx$S%=En)X8G;&@Ap4dPrdQs?f6$^i&Qf8DtRVrvoT0bZMtdSDcmmGm0awO(g zp!f!P*sg;*M+YL5$xsL#)QH|F=b;h#%&0j2%q~pc`3uzla38mt-TyztS&+wKYT4- zb<(RbRIK9jmwy8zaqb`JDi(0#=RSnvUwSZt`lNW`22^E7FqL&G192wrK^`>L4i0ZCZ!}XCxlnb z$6#+~EpIhvG%-uIO zL$w;@KbfsVD=Y>8-MMvZUD?kKJEG_#J;PWEzhu}+-i5!q={ z`({ndzO@&{8Y98xn3YL?&v_(i$fRMeA`2e<2q86qJgPBPPzdALD z2@z3OK97RhMvF~x`}H<`j;RK)#+Hz9vfatE^BWU*(bC0uV*5^v6Z;++09`Gog5MBBW!3z<5Vi4K_tP=(w`k;1I}9IyMxEI+@c40emtnsQO@V5L>Rf z9$2>qE~t9o+5U0x))QZeOFn)&+H!S-I~R?;_=&1CMXm zh>qS84nJTqDwO^Td~W(fJDXX84FQq8I$CxsF19&tiRO-DSurX+!Dqy>%jD^;{clc8 zO6{l-E-9~MM;l3!^=Z-$fzjq!g*LTNQN~FQnNgt0Ys<`IV<+aN$cVJ~%xbv8t~$~| z<7XM2XvsxZ;X+-i`@^h;g``Xt(J0rLWz@`9?n=uPTd&9=i8Zn%Gp)5s$@8?@2?U;#kW5ISc@+=xeH?X`*|ct$6yg-#6vU*#`M6T6cdYJEx)SjWEro$c|%l zaaQ@KRdq5`ec#lEib51~5!%XS43173NVwU@vfcH$smC{YCqtxS(Cu8mU!O!@(Zz6; zSnLX%?=@bw7y;sYu)--wGo4^;6gh$qxCN&CC68GYs}U9QaPuBbRQxnwNA56k))us8 z43RU&v((ZeP75p0FtfIXdx6ej+#@M#JE?^!d<2rmV^%%`>fsQ&bXKJ2lV@1>B9sbw z1RDpi_m-a^SobiO*ztO0rSpF5#reJS@vZaTi&wvJ5u%7_3aJT_(C#wDyg2{*29nM( zVRz0^Yo|aZSCITD6Z15Pwg^A|$y)sMo@a36%U^(h{?f%5sS@BY`SY>UUx^F;=`Gxd ziVO@wzsEccCK6!pEt_;kJ#t|$l>H!SU}=TS!qG%$r0GR$Eb*GFN5#p1Pa->Bu>-sJ zM!4Xz@8OD@e~C!~gT`PUd*-U7coRlwcRjqVe+5qKS%8IHH4~s*EMe#97}gIAVAbLt ztmyBQ^cwdAph^(gWr9&i6t9Go6w{!QA^8NhA)p^!IXz}Ni>bYwmCxoZ8$-!@#W`)O zX=Pc0ex^`R>0{&rpGUJ6CKX_CAo2W@?_&%ObC$)3Ox9ce zt_x%3Q#5d(hP#?PS}~$CPE`fOxGP%#O`%KBU&jBjE;gyGRHP|=m{X>izWe`+5rAn# z?c62seu%0+<25=;i6uL^TwfOz4 zEo8Klyg!$FQ43&3(GwqlU;sEjOkwN12XFIs?78j+VEsBveZ*FiCHgWxi8B2)+dxIP zgb)4o>+!Brj=<^z=W|(ONQMQ!1Mx0szLX;rLW(0fZ}MFQ+d-jJ%B!=1N6{Kiu7j&9 zF2C}A)SNzS8r_Vq|KP@yY1&e7@!e0o6DPgu`DjcC(Gg)SsQ(0GN(6+@hLNO562nWV zf62?27PyR>XIg7g*piGO(j`~ zCPHHGI`jAc>x$p3Y*Pau!PW1FCVqC8+M0GhZ4SA7>cN`{H((ARgKJpxDi=JLW9i3@ zfl2A@Cr`5vj_SCqwZQ$#iM!u{QZ_ki(B&He9BC}rEt=Y`xYPxNW^0)T680JP$neJ)I{-uMMhD^r$H|wsqrlg?x zH&Rj!_5O3*h0o)kxjl{-p^(dAa?>*y{=uz?Hmo5yp?qaEJY>-658twRJ?QM|#KUWe zcxURLx4rQM{M~WS!vPCBv0&AFM8w}u_2>iuG~YImR=3GE z>93IzcH)WOAl|X@2RBNKaT;t?${73|ICY|BevDf%ICG*G4&Zq?Ow>xa^6I;A;k7qo z=Q#C;NDJZQCjk)Bxu^hmZD$|O>R*B7Q4Ym=4aIU9Ly?d516xqcxp>}+g=kYyq0!@f zx_BMM0N`kr-Azvw6|1%Q!A*qhlyp_HylK^+Q7$^B+NntioS9&p1Jx*mRS=RsUsHvX zJlFjHWKZ&yMgj8=2IOWP+V~^d+gULV_7;!R`zr1DeGiZ(uj?LYVd!p$4B*P=Z^%TsK!F-R@H|Ba{Nm}8E_`_4NbHEyw+{?p&zj<276 zHsM;HXlb$Q39IA9%=>JMie{-T3#5uf~t>d6Y}>Bm{3aM2h`WF?bMS zan8dTJ5b%%;DDJkY zI1(!wXXnXT_gM{Q4F80cTu$46sZDFC5^GB)A#kJk?3*&`Ctdb#sm}z|r&d~%9he9r zF=!G@(3Feqsn~ffHFPb#o1{Z5JB>|qV`=C)H0dg_8m;-p8I^>@%u=1!)NCuu%-_NZ z*e{U*F3LBT_m}9Eu@tPbNd9f&0L5m41uTr^FB|>VvrVLNiUAPUG0LqO<6;K=F~gO! zDU_MC(*MQZHP!UVKR)+{fKepbZ(u#iZ1!m%o^}bWv(fYmsL2?6SSHtLx_&b!en1{+ zg3{;x6kEJzSPkqA@L3tclY88A3hh0moCD^*M1M* zyZxtA-juHU$Q%9^Xa2=Y&=?y)xl%%MODP3m zf@Ian9i?pypLWi(7y+;ux88VDHC5kZw0>nuOCM{M&M8>BkA}8rD^KO6+@IuCFF`k&O`O_DzNGff?9we z%(0og#I=Jwr?_lHBO{r(LmP)wsf_LH7FFOSFal#!C->vj- z;C;vZ38nQ;1NE-BYNvjJiUD%K7ZxS zxcau=qDmx4ns&&#(i~gRkbhqYynWsxys2X``ibwruOVl#FBo_$fVYTN7!4V8u@L4l5Xx(j^*2V~0V7j@)71&4iwf>Q0Eb(v=A^0n0U+ zvJ7^GlIAD$9+50uGODMJ@e{GePJ7k1gOsRGx!(LN?4B~`WdBOVPprt}&!O}C_RvfzfZ3J5F0@&mCO6Ean&vkCoTTgju8U?=9?gF_o4%R(1!r5y=|-RJzGhF& zYCe z%`BDy&&3i5_S3S96br?q+JG)$fTMD})0+??oXY$tJY3G_F!tyg4E^9nVD~_hhqncx z6QL9VryhJDa(NfmJo?0xa=?{+J^1Y3pNn~&?Qs1%iltWEw|+f7e%YlMuM%ru@`tMZ zK6b`i@xhZ`jj`=JQPg(0e%L^c!u^4yXC(9=p~j(d7e%JX^-#@-H6Ax?pEQ;UnH|XwIsRBxDB$lG zAB2||JJ4FKafYCl2L{H5Fg!MarSm$mu%``0iY`!kfl?DhCZ$`2`#M;Lqv$u&n%FwS zmXVkxL5fZA)b=!r&{mS!?Vl+1CKV4My)*d_%gKj?bd zQGwV_QKv=In6c_N2CU9kynX#DCe4%BHdEyoiJh2-Kyg#g-T;_c+dmuCvY(RofA{6j z!U&kl3z_em9)7mfp6;N}cHc}hl6%ufDhA=(zpz!02n~%eO?f)+Nj)q{)+?4Kbf8ME zgL&;`Y~DKx$D`(`$sc=7%-GpFgsJK|E4XGeRZ>C1*wdI+sn4{8{3z>rQw#u;;?pV! z6}jYeiUA;qbqW=8Wdx!BAAbTnZ@K~4wLPZ7O}>+O!7UN+hK|K}*?}wY>4)yY4nJ)z zQ8nIsPdE{ObJ8i8oES%u$}OoGQ>7JOzxBts`e(Old0+B!OBc___s%~LE!9zYwMi7c zJm>U@osg)Esi93!5159yUMR5vkP8~zH^HSYc?g-mpdRJ1W0!{=V?OTN^aw7y`PwP! z^`sX+58wUF8BCwdl)v2KS%H<>BuWEMv7+FTN!_hB)`Fuc%q2m+oQY1CDXo}^kZ}v4 zgJNq7Ht(9ie}3)z_|8x6!#Eio4pBfGQlRu6S?wNd#HB$$Q&-)s@&J>t2DeEnATbLzMCbG^!2u{0l^JTPPs#B4)*N z0|z;uO6cUps=@5uILzbf+poi~9=t#O9LL3{-uu`1_&Z+#e}Wn!N#h-+ z9%lKeeFiuMl~jks-^}U(FLIc0B&uNfNz~SvzIec$A%Rxh_NzzniLc*?M>lMd(!f~h zq(NRWP|bZni0w2$pQH#Z zacJRm@!zEu|@KChYLd_jx0MZL;I(bbeEN9pDvl#%hiQ)g#zyC8B0Ll7f zKk=V><&0#o|7$i``m3iE(EiOU8C^qwe2~eb6pr~IM~vxdV`j!>*T>s(Eo&Skv!8CH z?5?&&Vn&k8)$*{7fToXCPe~OO%MZpsM9f8BkBEKIf&a+k*mv_yz@FU}Bq-KY+^H@f zMR-L^8_quHg(w$+Pd)TI+&3`^KU3>};!#K7ym!4D6C-s5;Bw7|fM$_mev@Qg7g@o36mFJp-v{w3UkZ!9^d$YmQ!lpxQv><`v~`!QlCM*#Y>)QIzPEn6A0#&b$`gM%^Bj{_ZU}4CSCGVk;3gtjBA4Wcd z*@J@0jAMmH8FOEB(fa)AtbvV*XO>Ey&39``LJUHU2v{9h*{IP*Phy;yKA`3uY)pm? z6cj_54Wp(J0Q164W%mEC{O7F7rV;3KzJE>^&aSrRG|ZeY$}0ZmH_k;F&-Ib){isDw zIaMkli@5noQe4T;o1@a5o9SZDZYFcq_%CT;mwLCE79;+h{>8pl|1RgBx-E)V6gouW zOKATPMR=Ha>@f`8bQ3VRYl`w`iV`FdS5A!`-_E@UQEiz#dI^8)J%; z^7()FZoJ}%BQZ8Ii5lV2N;&qh-OyvoT(?-pJ!>Dq1(#ofiF%U%rs;m@&8Oo1r=5!0 z_5rvQM(0LGgo1Yw<%tg<;IcPr&P*kCVZ)b>hPf8(adWtP=P34!2PnD`uDteA4DTIG zE#+Yg7vKl~^+7CKSb;;pI8INPLMesA$J#R45E3evmb>JMu}USrga+l@$q@12F2;_-9!-yvzf)SmKE0LS~?~Mo~wc|&2@}$XGcA*aZg@h zrrm6r0V#i7K7!n-^iH&N}hC6z%3BSicxYYdA z0AMYiPLh z+swxp0K)DmPc)LxHs@k_7^if^gyB14+bF3<;$3YFfVoEitnD++PMUdx%~kCGf8e@l z1^;uQ9;aXB=VJ8fSKm^fGbr5WY7Q|HS3|dyQ>Oh*O3+yM{46UMI@7b~zBd!$Gnbuc z3zg}Ve^b-R!aeYPmeN-vo&POS4AT6Ea@D1ri{P;*vGvC5fPFhN@em!1ylrZmTW-NW ztX_>HT3Rs~)N%R7r*Y}-J=|tk{z!29$mL7%xpV#=sFBm|BFYt|bdczMnH)#j&K5e_ z@cr9w$K^L&hlaJNeI0GM{F9%=s;(AH?An1s&O^CSMzvApHqlhZ83uK3QsntAT;D@f zDPbU2#DjZwV<>E3(YyuNzI7`;|CKMH%EUU!9N%-o%kW?Scp58EA`oJvHM9SzN2BO? z%BeHWuQneN#x}`;a<|GLQaXQmZ*G@du3*A1D<_;hn1f_E<)hhEf zYS(ByVFJ{G1*^W_G9Kf|yA>jjnY@ewH*a9?)MO01q%YR{Q*^pATgJ!4&PB_`^8Y2m zVcz8b;qE=aElaC1;diGKZmMVzLMQ+LAOJ~3K~!5+w{q<2Kp2UF3L*k3<1iqMGKdHw z4$LSsf`B*-BfkNW^eECWMuZU+8k%lYVv7xugN*{+(7C&+y1H_{`JA)&{-3qh7uL7W zKIfi$tEzFVeyZ!Y`QA>PWuiUaE#;4}l& zvap6aBEk6tXeP3<`Zi|@`^EHY0GxTGqAD5JbBav3K_MbiJc~*K_O+29UI51_U^+YCdSwn ze}6Z}>u&uHKDV$W83UP$P&wKXuYB&a@t>dkB%C`whb&Dw-$FM}v6NFa#U5uCB=dMi zJ^@jJKfV3~xb`1E9UPe-`@YBEt*`hcl&4N1pP%Ql#95Z1YdqXDE?rF33S*pWOycXE z9&SH-j?>z1S=T`K_yYdrJGbF|AHQCz+?hY)Sp)BS&Clc6|KZ`t7w1{*&dGzHLjYxU zZAxm{HL-ItxN_A$VJAuHFV*|#jT})*&V*ywfABos@W+3LzrOK{SYpRWVlqXFV%qJL zOib2I<;enDlMK(>up3Vs--J!eInbq$!3guc0_T<%Fg}rB^Gq9)O-culGazzKLbCfC zr!d}nR%8fZN`fDAeE3TPq!z%m!P@#gnCHv|F)t6{(sk^*dQ6|w5V}41%32udRjK*S zE{M%NoPd!=joO>PvsEJP4p+t+ydSu3N{@W ztAtsW;oyJGO6h-s1#_!mm;(#g!mM%(ifPX?djV@{maxh|=`04yAmiLBRLHYgDCx$p z?_d*vnM^;~t0A8w@C2if&wsn-7$VJd3qA}z1#yE=>&rn1sZ?Xet0 zs{9#}pX53Hv*D0U1NNK;?F~r4b>WNCKRXW;rgim*TRKa@>})D8G3=O4G_`0QiZ7g7!jbMWwyi6$jdJWSo~zLNMRyd?$&og1TjxvF3A0E#MX_Nr{m?2zg!u&xP-c{7{oS2+ zOpa;Ej>$S9^12$Gw~jYd_IBa5yvpCMhl{Fk%$}xsmbm!*{h@{3u7<3m2V5~Ph}z!d zny_Q;^XHH`>9zG2z%n7y7Aw4rMypT!;7cv%$>u-_(8Fvs`$z^F00l5BG5{lh|AiCR z|HoSxihNa=@PsRzwSvb{@Za>g66mgZW$oo!B@^H;Ss)aTFpNR#(*J7U_EbmwhAqk< z?qkIfu95pO41j%Grv~sdmLel;@`z(D>)S{(_ikDf<2n?$b{Bkz)@ zevBwfkv1r4PZu4kQ{QTGvYaT5alX^R@Bj6C@P%*Q;@aA_egf}${p)bW0M|*stiY|#m@5VCIRQIb?aye^G*>R4>q|x|egTRLaxIym z^6v(WnPW3jyPK$aKfrm<RN zUaVobtx>iwf9h7Zaa9Jh$_}lrHF?@xMR&av>(t*|KZk2?nOC%|YT*XVU{D*5QM2*T z@&UJEzaxM%Go#Wv1bU5QCvo2g-jD7*xA}P*!bn9+NGhQFemQ_w?B0#1PHjY!YSokN zy^PRprg;0o`|&5o53_TclV)&rM?pzImb9*Q1A>InxeSB|_6Tmpq`OBs5k?q$(iqEuY zPp0w76zEbHK?AP3wa(wiGFq0-RDeIYsFmOk*R)NLqZow1A=2ts?6B1O``CJw*Tk-Y zex83;D`@uCf214{b$`|*q3ud$Wh*>fP_ElL09v(|^F~l(a%9cSUm_DKi{uE+S~UOz z&gE)wYApo*|Cx7I1sp5PYAA};ygQ)pi#a4Alq$}5#Elt*x$gC9ldBxG4o|AW@tV)( zs>Uz^_f>hs#f}@VbgRl1r(BU=NcB$Y_H=d@$FKV^mTvxM&Q`}q!xlj@UI2t+TM*-a zY~GINY}$-gZ&@Txl3ep1&SVX|`rbS7kF&E}G>Di2>SgbiX?)-}UXGbo8^=#|aJFb8 zFB6QXEiCnVDB>KQ4yE~}5=KZ0L; z)-w?;C-~~&S$yvPS#00b!h<)pv98xeJL+J(m7$eHm`D>$HXGK(**S42b?4cGzXmNL%%ok|m9+5O%{%er#yV^$6U4n9dX(9(5#dy40TiEc#fC}D zw8;EGp@TFwsY}3nU?fQk5`lMnaF41)+nngiC35g?KKE*AAsiz3*GTTBFCR&xZ-{?B zG;@cHk>--BGJ!b$QR_zH9e4qKD4VzGhs=de;q3vwLUbInU^HNcah4R?KGrU{e$JfY zBNVh-gx0#Stj}DWT{xMWH``c^ghg`s*C_o1bBX0MZE+)>tzI9M!@<`z@yv!#f8YjV zxjmyPvRM_ZNTk zx;XZUk742SpGTBa5TL9dsm58$NdF}Xrpf|8yKx4;xOFEs7F0uzoChgWp5y`ePPd2u zdiU+Phw96F1u35Ps0ZPluY3U}$H#H+fjOL5B+0sgs1Qdx3fzz50%?>Yr;I)1$d|?F zb(h(rJ|;6kBf(-+;Loo6Fh2FAFG(?*tiUh7;JMg1y&K0D#<6+bBrb3DFf-A^cvfPp zl^}~Fw9*I^n-Q{&)WnpZeNOJjQk-!JoY9`S_Wi_yKem&awAD`6m#tiob{9 z966~D@do7dC!bRiA{uvD#z>nDeEaS*`0YRcF#hH<-$0jH6w=9?%7m2@B+>T~Ct9Km zeOENwcy4N z9_=*V2#bjD^}FuC-`(&f+<)o>c5UB`t(&%C_r9HY=+4c!Y~46Ei~|#m7_BVg+5xRb zhBS$hB@uefNj&F&{{cS#t#9$h{Lq8;;F`C-9GljW7@i{{CZy;g<`6vtQmp1TsZOa7 zOHTRwX&p<%jTRQ>BV7BDFX4Cp>XW$V=`jKxJ-HFj-mnYb8#g(= zKTBgQqQGp~!9sT#o0<(=G2KR+q<%}hxdNrpM6jT%qr<~P;L7xRWb|mP#Zo5XCY`JU ze0zQ?@;om|B_&{R)Tmak=zuv%*1Eh(?X~v0GPVu&&YrKLb&_b069@ zUi%hU*0nF$Q>c-*!?Vq>wv2r+{KCmvT299=c^`!~$nF51Yca^&1y5Pk<9@e4IO=21 zgqo;L^;=^JFbti_%4k9!Wjn2QOF3DnZE#znLmpjbK@S3~8yQ&xFrs+X$Gj@0bw!l; z*YrRg@LlThjkv}u4#Uxwe(q%;mJQ*oy4Owx5<^eU&i?MWl zmwwPVY8O$j^$xJ-g$>}&?7>(EHRm6IRDkJ;N(ZHMYOm`+2T!68>{yqH7|wm z6?km3iQnCRIUd|RD26lXTL^QY;n`-hSu+{`9Pl|Bo=1C3yEMo{N9~xQ7AV z4i-8Y{&{~7X=BWJ39>Ij(&50bsMq2)mwN@WjGO~VBO%0ya^3eLpqH1JJ->|eOS72I z=QzrMN)B(^ya}5(w6JX=!Bn$EyAgA)fHY0GtZyTZu#mO!6EAxUZo2(WmZX3G70<^n zJ?AOtoj>L5=VaF>4Frb?QkY+tm=6l)qlKiRI#ju@fhZos?ROu>Ti+J*dPqV)ynoxZs#t zfDAMw`nLF|wT$GZRwFnCG{LoRqEgtO!^|xxaHPRY*1_`|GM@g(0};g;^XYtYnMb9u zFx`mBNWQQp6UTS)V9k6@hh%c{PB_NQu5APk@GX;S4&iJ<>4$R^ z%Mnrv0pubza&U4=Y*SL&Tgq$CxowbS2^N;RC^%d&=kUAnEW@5HEo^L;m}p0w#+aP) zlPF;gKqCdtA;ypY*Ei$l{rm9)5896Fu6`XhO_jjH0+kfx)Vjm~kWT~gLou5Ks6*qI zo#aTi^u#qVGDN!)(&ywYb^#U;O4)?Q5Mk7uG3FW9sfPo3O?>0S?1m5=&9 zx!l9C&K$;@1+JKEVRJj<3YF|@Md^(>0)f`IU^T1Mtdv~-&I{fMZ@hXA@hgmN<&N}U z8FLFNyr*UncupW4GQ)x;ZYa#kt5C+lWcRQ>m=z+SG~?Z4$9gk=CisG7N?pvP^>Y z<*{4=!-f580X~QX2R8;1~e!IW%B&RT6YjC+_HD_=CH_^_`jNTLyq^&0OwX%=!Ft zn?U>i`*HaF??rKh^5Gi~e=BwvRUm=hbWz~fHtoi9Hg86fFEL=JlvkclWlA91SK+FfpbIY9^J)~%i6_{$I*fpJC+s2gh;EQ%bxP_=FFgBKQ z!JtF)Jv{LRzln2aXYt3GjLL`#_dHN?bPSQ_MxMCYO z-+CNx_>=4Kw_o`dmUzvis=62VRdN>-7fw%+VbsuC)vj&hLKL#iJd zEr7Wz{d7x4W+i^+7Z`Bm1uGZmpgV64F>_OX0DgMPqll;D^Yshrik;3e4)!POGvFp= zRx|MNux1sz8)m)CB5R)k_UYj{`D6hhc{d|)41hf6H3Aizi7`QmibFc&TB3tA05Zgj z!vOdLb_s~@#cpyahz{zk4#hNE?~hB+f=98{&7#^|Ew54i&|H`cWinPAsp>#+0CItS zraJHZ<0wiiOs>O=HtoZMq9$5-j+F8z#4%>060_YU zjJ1H>tr+V`+UNLwIoG!OoDF*OH$lK$_Rgy82cKehy{%MQxwFCm*bhTiIrudAs-Ko? z9wP|Ccetcypi}*-yA-&=hmhfBXwn*MFthl=S`!O33hb>0z_#HdGGuU)sL1!{m=|Xj zm*;}d%^9%C9mQs^>6Oe&mR3Hmx;VHw*?DUnym_AY-}-7)HOJbo!B0*_p(sjc85)(` zNJgxjO1;XX;sj_J0H6EcEwj4-L!1Ct%K)r|W%?|x^z4cR{$f9{CeUBaAT+S7jj0NH zt1hNHNGi3Io|EfR2g6Lsfo9&8`i6563w(^vIbeYfj)p+RHgc7}dg zKPvZ%qGIYvo#UG?qHshkK~s_(y42(~Pax8mo5hLiKZwPfZ+02W{1!?wTTy)%cuLyD z|K9UpT-k`wSy}>GE#%BtNJY6gYl7VN=b{9^dGCH)KX*D%|I;Xb;`<(ocfIC$NPAr@ zc1j#Pb_O%M_5$eyPMtW5lV=vO)Fbds5tkXF!bBku0G3jeiA)z{kB<^cS5%;pb}-(I zF+HAO$M$s?Z{|q2);+b8NJT@k;}b!o)Vbu*p8(DAX?*UV?!fQ-(R=aE-+nQ!+OZy3 z?xH9p>VOIdagKj-_s=_07Nyxn98KV+Tkgi|-}OQK{a5coSL^{kkAH>M+pS4Vd0$?4 zl=zYM6rR8BO6*1hQLoF+lT;I7A?o66zJzsSF?Mg5z(k&NZ3JQ(Xza4uri!1*-k{Hs zQwdut>_Ao@Cr>AWD_&734dmf#xMZ#N_24BxNgqZqs5<2a-}ySuK$qnmAH6fjs4_(tePDTT>%-v2Ha@QHAs0A6zEow%dB z>`1=lwto5vkH9-#`BO;p9A{=bI5XG5z6b3=V~oQ5dYGL9?mIGzBd5D4b21AMvmzP^ zO2ktZ1$rg*-$S!iV#E3-HcdCNd2RKsbEnw(rd|N=X-7G0O>Q)Ra||HL5jw z$oByatXx)mEOf4X)2h7I2dfoTM-7_Qh4I<9oR5L=!Eg?S!R{F!0wdJ%KvZ2wrT(yy z3|8|{w-U1S2IqOWmH~Lv+e_gj>V&WUFp)vzcP$J+eV?Ncc(-E%bWWqRF$kzuLcUeB z?H;XJolAjpRnYNtLZr>L4Bfs{VIl*E;JS8oXRBg(R`c*$2Bj9h`Z~{J&?;5g*UF^( z7=Z8t5f+5-(tRWFV9#TwWn>R8(Jl&{z2TEM_u0>|nw|N3=P+f-HaY#3*jSc$<+goz z*2D&6`4Y0R1{b`^=siLwlCCKa0NLXk8UFUfY5e+qhcNGw|N6;4?}?Ab)vx?dEQz0- z?I9f-!}je{D0=hk)s84bw-e#y`4WeZFW~fz?T@*-r79(L`R#>VLuCdbIe zAG4;Qv#?B7a<14%TnU%%C9MZ(57L+`^Z7h+5xRMV+i$xEmtV0P?MBI}fAZ|&)hBZa z+4?!}KnqdQ#HYS=8-D9guEAGty^p;eWdD?{zjm9HLOBJ-qZt2baua@X<4$a%90z5I zPDvU4B6KK)a^AtFi5NSl$B~lH0hjRwdb~KX_#imFvPE+fryZz#L-%Z(7S_VHm5}Wz z)FdK=80&%Fe^;tbaUNVfOj@uHmc{^NNF}T{zR!(UC0n*;fgfKjCv3X1Fg@Uw4b->$ zk!p>)f4l~O`;rG}ZpZ9@A1`0!yp&O0aYo^)*NO0IW&7x$`GB7 zM3~yV8Pn^ia1W!QI2V!EJvq7M2|9U5*gC+tYrXvBz?iFuH+&lJVLp8r&YD4oQKqqBJX zdp?3c{`ePhc6q532NALW03ZNKL_t*F)~}9`K8gZ}G6+)NPe&1+xpfbo)!vNtUCMGO zW`iXZScnSby=ClP-^S)FMoP|#T+}CG|An4m&iEtNIGA?rQK|5n?ku_Gx8Z3D5NnxI zZ8o!8QUQMrX4Vl-=Lt~Xi}~7uv2kdt@mmc}y!l0J+h%b|;b9#1T&)4?p~&;FrGUL_ zqQcRDDd@ko1{~E3n(E8TmACi-Gf~3;SQ$H1)jQRQB5(#QRVZd)9_b_9b#$bEgn>>* zISTR?Rr3ZY#Rs=;c?G^eCXo>@t8w__ND~=Huo8M`c1T|NVwOToiNl=$N8rfnAfkVh z!>1}ocWo%6yd!-Bbwqkd^Xek5Z4j$@OO0)n!(2@-!BKE3w+od1S2qBEH!s>eS2|IZ z0S@IK1`Nu%PuN>$9j&CX5JW<`%HBXtgKY{{dy(m1$?I?4CA)FK1wkLc&0VV%&cTg(CHOc}{Zja;T>#xM-qKE0R9L*@ka$MqUS)g4u&}t@ZJ|IS+%z$s@ zJ-qVXJ8|cdO(H~%+j*6E*5e{5(P`*xw#BuQH`2^y>rJ3YUbkn1~2 za`wv-PPkK$ZJw(GQ3xDm;_FEf9+29fy!|8;j&Mfs#y|TIzINbykSBUtwXMLq|`)jD7#~vSh?JD z9lk0zF5$doW%$@x(iDVI>di9B|4I@F!;ASEBaT5F<^!-s@E>J=E{p-tIaCK(09HUA zUXekp_Ug~ns@}MObseoAb+iK@{4Ov7qYd~0eyaX#_%qori!cKa5H~9Wc~vr>dv`P~ zl~Dy>_|kD29Rn0%D;+C=b2ohrr#|vwM2qv4m})PBa5g?F@0+yoYkMAo4QFSOHe+m> z9z%l|h{Yu=#s#Lb77`n#)Xh{a7$4#U=t$|C!`5Pkz+H@t)WI6gH$1 z``?}#M_Kkbhk%qEH3Zr&4TrOd8j=7}0NfTn6V<*|IZ75m@sO9Ntol`w?XWh07zAP* z$hnZz=^QstnfxS!VVT58({bE=&k4Nk-5uVaJ3Xnom?WIVaK2nd5-nrTR2v%`5#mC$543xfKf!{PiLnr-MrndW z$GXb#_Bf(cJX+KnejQwTT=SR%k%~9E1IoJNtiaZTX~lwW)~(KS-_lND7W{1cTnd$g zfC~)di)J0z>|87JxAZx1FbFqT1FxZw%?PY&Di`!b0_bTWZlY|=za5Bxc~YOAUc0WuDIhg z#8kkn2HDF|;Ne*Vui17L9-1|A^yp!1+`56o0LPLHouw|y21UV>luqSm#TIy~)xvKb zxCb9SdzzyItZCE|N}?b?`0xkguU_|DY@HyK9%BK&n|Bc>sbtsVaJPiR?`ZV&GbQ{X zV#KBami&P<&7|TTr|UHYhzOmvcXDM^N%$fmeH=V5nsag}B!+|9BJBaC9wtxan2Hf) z_`t`$ir;_{sD#9BO&PvumF zr*~i@H6WlUuyP=iRxNOu_RQhXE_Dpx72(D8?$iWES)^PYER)~_F3TGax#xN{gR5Hs z#DTO}IyGHZ_CWP7@7K^}We#!UcMO(2*m@_z$pqh6W=1g-yo}~9*&)~slXo4vWX`hp z`EY(CJVv_%#(7Xr)o!I)iYnzB8U?^{mTM%&btE?A-`3c$UCg@I-CzfddLZyWfDC}T zgKJ!Yi}Kov%z$D5cy(0hvg0@}%1AC!Bbs5cxKKw$Y`WCC!`PQsyyhWy{%RNiWpCv- z*+X0|a>}vlF-yzzm)3ABPOx;(-8k~W51>4G%mkxZ2EevYPI6>tE%1;y!>?ZUVEkZv z9gdwi4rDQ=r`kvt7crS-SX%5LYEnizPAeo)1zC!36$M^#_iflu75yZ$h>&};kwj1) zb;T~c@Be!q_HCO$9ydALo@hNH6lC{y^Om&3{2%p0;Lqt*NfHsJgGdp9W{*Sr1l-Bd zkC+D56z~uX>1AuwbJkX1 z+;=2|ejAg98JkHjYZ-%3Z{){cZEGm~t$yhMrsj56?}xq94t>l!isn~Of87<+x0i>b zeG}a}m>(=C57ty z`|-q%UkV1vs)-+p5G(z2U@{2+-8#=0_P{qbtzoz=9T?~YD712IEI=It za8XG8Fb1ISG**zHvXR3~0Q#_d9xHsqo~PBVka=2Xu(&7-v-(Cf8LeVvt$T%CPEXTZ zr^8vO)sL-iD{C+OikysUNpUW2yy~F#U*;N;r&V?eVECR`yPCOk91(6VFa@QNlu5;N zsAha4&MvhrO!aOC4j9mUH2u@Vp-F6dyapol6cHcqHh9`m@rK%O+wux!3+PM)Q_^qGARXe89 zD-txCEq26XiPwO;T}ftaC=m~Xw>UC!<~D*!W*kV$CdpkG1b)s)Sz@NAccn-w+tUk4 zook&TMYN;gC(9gXED9ghqY% z6)~u#3r5o+$2JVU=F>(S~enhW861KQxQx3VY(Mul-up?2OIz^ldm3TVzin6gkPQQs?jx;7JVTa~?%5$m?K zmDp&1So7ok3~MQy<6-P9SWPB7&Xz8_Hs%i6^u>l#vG8q&<4KOSG0EMr$*(*l>Q&S0 zKS1;B@&LFnz|aPCp$tG3R4xn#)Y}0Ou7L^%u8NxsfMRsis6v1~HyS`kn_CS@SRMB; zS~u0F)H3L=>Q=!GXXuxMhf&v69W%A+iBua_(sq@YRhduUf2ckUh`I|N z76Dr^*uS2V{fMVMrvOf=U=BqH5;H*>1A4EClZy%7@#lYs|MS6*;Vet@gQ*Es1SFpW zuJOYEqtz55$%K-s`4uIeG_eUky=6Og_e#V)N?4SlM_QOXM_zO=PW2QvPGTY^69j?3 zoGdaon4c5|sr{~MN}Z{|UvA^V#XBtwRd@qDhFWRR=m;Ej_l}qWvi6-vdmD_s+TIh# zRD@xO{rAuVpxtWzM7 zQITczC($1veVEMd*vvjUuYq1j9*z)%K@#CBy(PS2|D8BRs%(nE zBPYQKc*w*Q{%LOBR7Q(1*~;+Fmpv0d@q~ZJTc0%>EUvKKn{(UvRc44{$=(30k!-jC z4*Bd$IXoA)OqKCO=#kXK)L_c$(}bPwh$JQkaFeu&1IK%K)!W{Kzq#?B*o4xT!{2NG zX$hzjAxZnr1fdQt?{rKt{=k!`*5ldhc3_4HDpd+hDdhojHd!oj#lKx+EiO+?RTXKD zHt~{Y6g|1;wrfB5 z7)8sW_#8tFtg~Yfw_Zaee74I9#dp>d?;mdg)e;;zBE}SCXuKY(1wbq3hIg}u3RrVO zMV%Q;U;QEx3zjD6jnUG9@y>AFX+u-xeZw&5y_R4X?PC<^9E_I9jKMIL;;d)*4_EEb z261YNXD1`tIq!!e&u|7#$3O@p!+(dD12&Cb1j28JdSOkOrKm%ZfeoA88h||dsJi{H zVE_gJ`-RQNQWDcM57i#NQN_~XEXS~K2QmSdkO3IRUyM8xr2)|As%|f<@gn|*o6rH zWBZkON_!o$VhQu*63U4ttGKt66nh_|OCvB+HapRg)WO1QiB7A5x1G2j?>=+f02+o`eQT%t~e zQMoTv=xBUw40H1f$jOdxfJ)dWqisnFOclVh)@{X)t>1!;CYccF7|6$=TlUcFb+Iun zarwq6w91~uw3CA&l^52e#;MNbwlfS^O=Cqk48?4#D z079Uiw<8O0n1H&`EL6m6OK$Y~$qY3CuWx-Ww-jTct#> zRD*K*nMM=`#-BJMJj7_N1;;>PX8_qq>{fu+`~c5DElK|#o2Ul!OdW&B3@|y)=As$! zc7J12FbqrZxpltMSq0g2rx*Y=F0KG*B6d7C<9F2lWFv8T9RD^L0L`e>pw|lEKQi!) z_~7c80>!=7F#rQmdc?t~$5!3cs$UpwOoRA~(cTW|iF?Jad6kK)A;GoXxRePM0x0dR zMAnVqWIXRFbuBslk<`z$uP8AUCz$)^n{fDVuS2vv?~|#x(a5S0AZ{*!U)iz`PafZZ zMzKg1YRq978@Fx7nR`!ROV&Uh1D%L`0m(s4y#F%_SL5VD5pGXny!_xDxN~_~;sTiH z6nJsA5lK72AD=z0V)9G;!qa~cZ~UdFV=^lcQ^h^Xv`ex6OcA{ulPL_%)7a#hQQ)Cl z8!!Q=Fb*Q@Qv5zG0!h{MJ9#yVW1;+|(!%5T>`iyzrEmH(eCN4d7NP752e7 zF!Y&J44_y>haaszZ$McMq{1_M3DzPenPk-Y*6YU2)L0~B>CHmf^>UnwRjcbo7sy^kgGfU;ymyUAhpq)q$mEcPeyzrG_e~vl0kZ5BJF6 zRHI5>&AY*~?$oX{xgkB6a}ZttPY%{Rwl%)E(z*|AR-^k0+drIlsRQ)>k%gbw;H)Ys zF)V`13tw{XdS(D@XUd{iQDHSwYIXYhu@2f^@9S?l5g<9UR) z&0K~1(q+8i*g*j@Nnel0{kvWGi`PCM+cr>OA;t2O8Gual#vX3N`4lt6&U++Ta~%V* z=hN5h?QbM1G8d3pfYs_Gd~=QfQp5Mab%Tp3T=S7H;D7zuhj3(eUeXv_rh`RBL#WCU zJI5xlZoG}VPM^UNOa7)q1R!@f)T>rYZ~Jrd zyII#j!R~AqYR(nr+)C*h|Alw(H`y}t@`-uUIJ*j&SURlM! z*!835iFuZ2JO8%)WcNxPFtb=<#xapNeRe3jr~ZVwwNQ~3a&Nc*wELiV7vA{EEVZbbTU$#`c&35F%%>#?~c^ph!6Ec z?QjOB>KL?AH>+EJO)dtv?C*aK)n^Hrz(Xz|Ikl0;KdHP)GRM(lIQHQWqIck~fOy)= z&IBMWF`1Y6@6$W+%$Z$CJ9BKWUd$IUG1kJiEgNy*$VsG21;*19d6uGEr=qqc;=`&1>kUmIe-zpmqsT?dliUCjIlvM>nGU!jp{V^kd4XP1 zB1?KQ=Cp7u&`f*xJaD(t6QYxpcA>n0%5NNBe96 zn`$JH<|7rJxKlLbg1N68;~)(B)K)tqU^ zP*^~+oRMc1!s#mpqt=}6%@?wap>x zm=^oZuc5T*$BuSBc)`;nXLn@sfv+Qw9%ryRdOYFwHDu4+`n7y!pC>lSOaxvs+pVrq(vnIRZ2enXh;<)bQmbUk{v zf0O0QkU0@Y98y`2YjeGEt&Aaf?^T+Y>Lw8}GO5K_~{mnGO zZ~XLs#LIvFhmb^60H>5}e>A?FBzVE5-FV!@I*gT+Pk>VUGIt_QnB5+BwleHq*GAl>62nxKNkEm={qDM1FtcEy z51=9870j$Ur`fjLn^iuu=CC>^Ed_MeDcuuF1)NnTorfFVE^ifAF6GMZ?wFiftweas z`iETkq^5sM-#r68nBWJ`tVmH-xa}G%Dh<1IAa0Y)18V82G|KQSq7`$$R5=jYZQHfx z)j}pL9J??KhK^VpKG!7B&VH5wFz*_O)BX=tjf&SB1lLW3`4`kwVF~P0}!dCV>q(#sR}d%|9!E zXHTrh3pee<`o%71-dl(}SWdgxw{;p*trX{%y13{5^VrlH=ORB8Qbg=TmZqF5fJ5#; z&VNn3^W;&y=j0JCi_7wPQsRfR2Hv#mq1c-y_~P;`UUJ{vINR;AI+0`WQQ|-Uz@u>W zYkm^rByy8VpF`ip?#d2gAfeepbKZ8S^d3fUY~%no$XERiJqa*%^N--5WtJ`3mRa6691)vqs3e z`PD=+2Dh>LzWtI1rGT~XrGCn7RqpbQjNk80Ewr1Z7H7qPDYDCNmO-~^hoXflx2rDc zkFTg`r7u4l!-<)MS+e)uVUk%=tGN!C4&^zZOe6NF*f#t&h6rYV3mE|A$!qRl4Z*4s zP<0qD08naQ4fbgzG`n!ZKCA_O#D+b00ZXuo35>YrD=parGuTzkt5#Fhr@Z=Z58GWO zM;JtMWQiCNx}*kAB20EVIQ|beV(#;w;ncR)Nym?$V!}$aiV{E4UWfm-=`w8Y1Ax9Onu8D-BM&`)`aD-Pa??<|r3 zK+K-@RB7&Hh$r{uFfNRK6S$V48 zdSEL7`p`Jhv)IHtw)&g3@%M`weGIVqz&Vbws2jq)hePGasLb{HdwqF5vt!q)!We|9 zFkFO;(_q+P47tE7k*t82=!aco!T{}v@`^CKA^-4RR{h)5s=7V~MPw=aegM#hP+=mwvpOf&%IEv+u%DpxD2=hs4x z`?oo4MIYGJ;m5^%dR2$ft>48Q!Afn_ti8TuqfB6gOS#f0F6jGFIjalm5-os!_xx zmdh^IZ_O~XrG+%7T=f}_oXv6mWCzndNvRXXB??n?hs^*nkY@=#+L_1e5ADZ%S7LpM z0eED)fw%0w5_?+>G*IAVk>I6w--@rFpJSl5kUEh>c*B2t2L8)Wd>^9T5~7rz=HPV{ zlstz^@rs8(+0?}kAVJaVaYny11v3s1mqf%dfo+bgIfb(eDgNZzPvUJKxPg=V(!B@rbJvT2%0(IILM(RNhmos-_(Rr97r9U))~t5_#IL;e;0 z^r3WCVDrRV4w?8^ny8T=G;YLjV8cjx^N9j7XX>{;9NyefpOmzZwNQ>}HTOw#Ll!jZ zn$QOLN;CbWKsQi_P^J!p>94N4zL9Edx4tcodC&vzM?QX4i~39gOk{)wRby_JXMGz& zEvmH?WH5pDr#fIw!;MpFV8X31U6!elW)#C=7|@_tnfC#>x@53-o~B`7f@2`pg7wN{ zcqKBtrsh{_^dfz)aX?@0>XEVCJuq{*1ie(4`e6AkCbKXKb5ciVR%A#o!iLw3txAe4 zP|&@xwr~Q`JSxzP63l<&n>a(^ehafoooL5qBx;m+P?F(SXRg3Q3QA4eWd>q7TE^H^ z1KW4Bku`cq${zBlg?o;4P@bdMc1o-xmHZ0Iu}=U>YW4(}ZD;tMV+Zibvu8N*PeI!F zvcxaWY{P%wwiQi!z9`U*QoQ!yy}17536a8$s+1UuCqHs8{`A#9gB=@M=o092wmy@y zCP>);03ZNKL_t*eM~waktcAo-?tMzbM`42`{WDWQ&VvPI2+WZs6S#B#Dg4g6{s!0I zbTc?`&k)WaGzld(Wf^{S%Qj9F^yy>AaHvawZlVH=;g-V>d5;QgPgA^T`(8YvIgUn7 z1_JW*kCB^VMMh zkXve^rUIbx=TyJu1u!ciR_?e(>j8{&2!ckt5;)FT$+GSu>AtEI&#q6{LzM#gxhRk8 z8pU2!v<$DD|BZjrU)nWPpH+E}Q7M4+U$JU==W1yt74ZHQ?jqc?D%?7Yg)t{smfsIr z{AuKO#}F#=;97LAcuj(HFsPhZ@9Jho7o0~g_4_3?MYwjn>1vxe(D|UsyOnLa1b9>3 ztc!^;y6us;L-WFbyXBgztZN^O^8hT98OtT=NpHe1puUjqSIGcWAHR^UhZ2ZU$7P?R zSa*_+DIpF?{%L~Z?gKb@?fVg(KB_E`_DY*uMagFh;H6ux!ea^ouMYC0L#82OgbG z|=IFsVHF;BoMd1QLM)JQh&hRrkcH-fU38Vz>6w9Ax=*9_~P%;qs zx3Ps%FPqZBWD;SALnmP?i9WdD*5>a|T1B36z{IPjbf_YDiMm!+v~sANDMcxL9lv2{ zuItvcG3>xQg5{YOuk|--ejcB9L$k34n&B9BFDmR%?W0PkAamv=uo&VU&%&3h)U*x} zJh`*2)!Ezwz^;O$-HA#9G0tci?2(=+Z>)4){aAlyhwL)<(f!njQ{8`q`o@V?Z=W$p z^mXezWSL`D*&MBj!41;)E6%ZMef3f+Mk?C*=Br(c;dXciKo0pTy!D4t*}-34Dh6PU z!*NHWdjB+&px5k@0n^Y4e0O?XwYTG_;5uZkt6}6;z2_s0PryedhoL|{PWxM;8K)?Z z9mA1pKZN2Sh5JcC8vjR#Fp>h(CGbn@_u{F|8Dz_g44^%FzFA<~-f>K+u`A_F!XLFr&gC zoC9B3SimnIyc?&Nt#hI$q2IfG8h`YvXXB9%*^8oBl7vR&6llpM<+JCGrN~LyTSk&* zNRkGMZia3_j#c+mP6j_am}%km%`5%1N|+-4+WHThi3I*6c15b-?IvA89KB&~vHRH} zx4SC*kSS8A0jTn;Y5{B_fd_yQu=DtCKwEX=s$g%goIW;H?0DeE{lNFw2e5hwbvJF* zh+GBFOLQt$0Dhi|A+&5$CfqZ%yvdf_V{nYno?FD>zx^BZzWH^kDqjQs;*CF106(>U zE1uWhfr+JM^rD_fq)88(c1>V@YIP(WC_&=h`G95T1Gg$oa2AocNe}fx8SUW%rulz zPWaLLrt$J;{urM8@O_w>8Apav&PHgWZyE3vnk zusMO!+>%W{%1dTQm z-a;Iw=q~rr$zz3P5aSeP6j;YdsN|LTtNM-?VOeCGQ3q1rW9pk^mpvvziVp#l6V z1J$vm3Y-=C5J=yCE!^mbl2Y?!<(a-n0_ww@hag-;F-y{)o@Y3gfEf#(bvLSpiP7gT zTv&Sq`OG*1mHJUAGQl7#Uoxw2`d{E{HT!nK>$2t{{i|N7HP33K7yD^U z_*zwbanx-{>%qN|U!*3w&@_&Vw|4&$XvZ1Oe)%hy{n+)0Itu~#Td9$rrqYqjyR z4Od}vFGe?CVt_B=98)t*Y~R@iQgQ?|c6CzKaWbI{2M;e}_GlLqBu!H?9ZIij7zDC? z&ovYL;rXMu=GZYrIfcNHMxn$DCO6 zI^L3U`Ij^S^k2?C{o{=WCR8=yS7uH88p@%XDPE~C3`v%%xugD0Lx`AE13c&i<=(A^Aund5Aw{{Te zXuktT4a7#0t+kn(=TzPRyS@%%`-qRV_gBAA09Fb6Uad(lz0e{yz zzL1q2@!dKO**dq3*u-ic>mxRn0bmN}l#FCgCd+a&ZKCs?TXFJ3*8pcvxSl-0uu^b= zN2V>jZ2MKXY#E5UlqQ&b3A$*HN7#17dSq?#Qg=D=SOQN7+>qg}1M|pd6HE|DatT|i zai624+V)w3{p|#=J#;(nm?NzM)d&D4DWq@9Ea6D?!gstf>z!^#x{S_a){(Vh8R2A4Q$9V z#3r}D(`T^F(hj=(SWGZ3c_z$D+86Gm3g6nF zKm#z3&>80kl$Ws>Vz?lA-&T*aaH1}Yoi@)?hd_h|Y!`VSFho_eM+Ln5X6B((x>by~ zJzMSy>fN*cIHvsy#`3TBf13HI!N-0Rfd*xN{|Ax$Fl@dafUWZ-PboN7)%7vGSSCl^ zOZiB<=!fg!S5l9(0%T@QDEU{&0MyV{+q}&Ht+}Dq(AI&^4#2N}HLzBFGF{kSt&4y7*E)^4L>}+6}!g9 zaaVT^e{tunxVO8A!uSXTNl!^L!m9NG-_sn!Gq&x*gOdibLJ}ZR>R~CrTws#ccdU(d zCajON0vbSF_BE^F(5ULb&77esgCNLioVLsXsUA<()M(bpCN~NRFf&Dlcspnj&Z!1< zJ^0A#+vj}eE1i1nv*3?*bqo%^S9J2WtXv&lj3Q$_DRIq5WmewBc=Vy9U`#c=;{I|BSR_G4u=dYez!3V+IFT)Ia_*Uj9 zbN;oTsEa&d9xZfk<-RRLVYM~1zB}Hm^8hF&0K10lhw89gQ;UnrAL>QAd9L0@lIxq2vG+GGtYFL@s*_fCtK9O{<r_uvt& z35>U!xOMIv-gEc2aj-)Ud^{t!dQwGE#z_HtSB{P2Ih%Ij^0X<~Mnqm$Ql?=B#97ksd0)u%pqrxAz*R=X zGF|o=%bX~-Avk4y_GI6w@GaKwqJJ;}pxCU2RmR3JC+=fojtN?`b5P8(US7*iY8dBc z`wo!$z#)|q*;ylGHz~YpzCe()!T-06l`)&h@Z@f5CfDxUz+^mTSe7{n=T^D(0dJ!Y zpdE{`NjRmw{k-O^*rucwfH2gy*qA?`)JGwXt_h9>YpL%=9`40$Ze#>k-5Rp9s#p8j z(~;)kY^$m>tMOzkp>|E$*|G$=_>CjR*}uO5=WhI@EASKc6|oJvS(JFe)Mh+udM6r- zi--$STPMil99uR-*uHZNWlYI#jLMsxfv2Pyni-iZs6GLW$uc++qDU!o>Axd!ir>5c zE`03NDJ(c!Kp>OrAi98lYv32)C87ju$r^aVhRt}&rd_x!P7${=e17o^-u0bZaJ1KB z$)BPDxTc>$1p+Pd6(GjuF=OL+`qo|8T~O6NF}g%qh8~$kiXNup0=pXxO!Jz_1>^L< zmdI5S=+$1qx`1-`zRL2ZIicW8+J1#VhjH~Gdh}tQJ}`#BYo?+KHio9Fx1>cM#%7){ z85I>DWY$KFKrIHnFCb$^@5yb%^uiG0f zPHUcV@LOQl-+ak9CMUsdnMu~=Nu_ccaL%8mMIFP}zu?|S#w?A}pID=X=_a(~OY26x z9+*?=DU@6XdwyiC#3Uwctg+rR?NQ}>qzh-L^sD68S8*ve6QE@PM5U&j94y1NA{8A( zcn2MMJDhq{tZnU*0%+w611;+?r~ego&P`v9;wU$ZFUm7bLao-(x_|84yqOwDx#nxYt8^pLfHD=(iy z+UzpW6A|VLdI@>v3bCNeo~8|)I9cHM!8uGe#<1AyBF!Y3PJtYGn&K0^dHlhB_o74U z|2Rf31|HgO;s5Nt0=s$ml$|dTvx2GO$JOzq6al9rro=#p%Lwl~dKiCm@+gjR?7Sq* zp+8m$ZE2z>C?<=bu~(D5eoLC+@za~}quaOR%JC^=OJuf)@r9)`_=CG{#&Pl%G^Kon zYErQvqn%)_L4ik3t;0`k*@bO+#JT%9^>GqmiCqOsjG@HdW`-?P2Z2F`3m&_Y^GvA& zo46)s{=Qi0(FT)+4uF=L+OuP~+W#J?_4#8(V4cdf`o33sOMl+PAdFwd-fpxvP9QUj z?{rlGdP@KGJ!;hVrg4S6fs>dyhE-#}YhA5kpi1{4ZrYx#3fx%rv-5EXt?$8w(o7t8 z7}7eKy3E!tol`~_p9*8GauP9LY3IhELN5Yl&_VVWFC|#fVbF0K-S7{mULtWpnqdlhKfPV z!3d6qmlE$TCg@zsak>S5Kss4@JBL28hDBY&D}#riiSiInpx|-{*pp|xu_%Kx+$Rar z`;Oqg_x%NMoWkeAKdgi^wz(fnTX^ZtE3tWj^5k=%zwqwq5_ayK#Pm9{WB0hKU&85j z?b#>0zmdM<1`Z$YVD|VD#@iFYZmZJyorL*TKZxH9Xry7AT+_D9) z-nbPlDnevz|4az&ohCauyU7c~FVYw=8)f*)*>m{t*%SDexpO$z?Xk&&i{-iTd7HPy z36z*hQtW9q@#ys%@Wh#|xV$}vl$`O1aY~x_+?kVj+jqW+qpa>1$)EdGHcoQtBge)k zj!)z1JN95RF+@2{iU@X$D2dUH3XB!Y*wbiY3+)Bz8^{q*T>ed)1_brhQ2d{TNJ-(W zAM$9Uk6-N#Gbm!0)T@xK6kWk_J2;t)ld8ogYYDpXsi5%w_D4!`+MTjO>|lf~?a5_OVgba8P4ZvOw&nYVNM`FX69TAHo?r$=$A> z(>|D;aoR^&vykD^wK3`qko~mDKIbDuxEf%U0kAIys)ZFHU>FeD>r}aHwRY1HCUYq< z?_vVar5vXs2rF*Qcm07?Eb1yA4W{KNaouDGox70BXV7(b&k#b^qejScQoYBB7tZ0> zHP@l{op03J{fWrLC3ZzAUcLQFT$!gxxHyjVmFGQ7PsG@JH5=$s+vw-g0Q|3!|xD9}rQyEEW7_uq*V9ZDt?qf8?-BjAm@_u(1iW0K&P z9sg|lTZ@E>O0&X3h$4Y-H_C9TEOGn%JZ_zv#Xa3coa!uN0R^WGPNNtt#F%cRSf4hr zXKWl-O-y26yM^_n!N_yuy#n2`#6qKiuguNj_iyv&Xq=8^<068_5CBAQZ z1Acn*PE6-y2I(P=CC`DBCZ@zlJzUmEu%p>PLd*?0GFp|lCR{EI(c3~j6{@B^sR1b~ z-YF1sp6PBkJy&7D#DSq@ZfvKXQ}6_iGaFiF#p@uLB~W~-fQO5R9AJ31`{~brAJuib zby*Y70C>Q0Z-%+`tcdhCW#6a2M|hrFl0Qa&S!kSOL*@sFN)tKo_?Q)TV;@~#e03`s|j!$6rH z`)wy`M|u4#T%GwkAffsfeay|zEF0{63;6)_xyX!w#Dj?KASzxVJ{1sK6<)6D;Z-8w zT0yAV&7~ZpzNxF6-fABoG>w&)Y2_!}SVnxbIu^bHm4jb*%KGPA1wtd388Mkv(Ru4duI`MV)V#tpT=Crrjr+#Z^ZcM;yJwg&_Rg*z7+rYo^~65 zu=_IXHGVyy*nj>b(w1d=3~wV2hg8Xj(&MsVAsGmBvb85Er`PQkIePR>Ua+45WdLlF zFG3k3BLhLvLyye{;-J_oV=OmXxbe&>{NC-~!4YF-pvVXA-(j#gN~T4LAD-Tbr|sB< znI-b%12Z@@9xJb;MHe$kf_?2Knw)7-Y@t#WK~U4FWvd5e;1}*?mA~IeTjq$=um{SK z%=axr=s#B;Zrm=YeK6-&jknJsjTcO18?q9#)1ZWt6<||fVc)m(w)R5%58a#UIr@>O z#;PEvw6K%+DWdf~)(nHbfG#Z4wB-~GbrJCWgVmvQ94cwC-)Iyx&eL!#qfkfpDi9i& zjre>8OoP2f%A8@Cb$3QIr=ZVIl_>R1%a24fh~})QZS?jTOE=6$I8B9aR53Ev(be%< zew|q-xr0pr>LS$qShslnpN)!m!K_>}qrScN7eZz)((!j3V7ghs6QnRS?Dcq0h&^OZ< zOJDgi&R%~#(4jncHGh%_<9Ue}O>f0h*Kb90S&{;A(jc;N_d3|UeF9syG*D29UlF?H z88Z*^ZYRec7NQ7f;}-6|^%UZ~LG}j*VtTjCQXFkJaP{GPanqTz;-tvzJn({zTkz^_ z+c;jHr8bg|j0#l35Ysa#G4hZYJQID1g`t0G5%{AY|=$EJN&j+w3tYl5*`suq&( zp~gbBk#;?u+lDzG&Nk)_QBn_D26#5P-kGJZeqHfUYAgK1~l>n#jrB%PM(jXQ3t!g!E+FfyMD?YH&VAuBj z&~E*gnhPD&(a48w(6Jo7f%+k+3#^;cyQpU5B;QcAWu#(rwmEIAd6NCkIqPnW-hq2? z>aYJ2ICZL?A&Vo>~=&3Ct4*g-!q9gBJgLJmrH9Z=R|MXlvQ(@0IR4}Ds>BjB4x+S9Pq6-_DQNZ#_5HPDTBbeNE-O``E&T4 zJ8#4NOjlxW^XCFKrhrfkWO;$7uHS^GZrzD-Dm`q#iH4gRGiOvRV_gY6XsnGE6%cav zTodz64;y4OD{7`_=4|c3TJx&ZyTRG9<(-2QC+&=MT-Euh9@@UEXVAuix)S^xQy7x; zs$RYyr={M3-eYf~@4rMAggmok?E*41dy^_RicA+SZM1FGu8v_i)GN#a*xyxLznia; zvVEwZM&%IP{Cdg$%lVc5+1BsW{l@lhnsWWEg7nsIr@{ZDHG4lLJVB!ck4}CE7v8r-4 zmjI)5ldIUs;UlR4e|1W$nZ`(C8tI|c^g8(2zOJfswGZ_3+`+Z0p?)I~#7hPK{tT~S zozxmQx@9V>S?M^6&>_jQk)nC}3=aR*dr{nTM}1#0rPeL*#O4HEv~3Tz<~=FTOBaZ~ z1JLa*W8bz3%xq|)C@KEkItzv*o*5AHH;yyR&zCrKa1JRN=u*ksM8I@uvW0h_JBGhI zbU!Ge58wSX!oEg|ckbSYE0Rp=2~gT#y9XRw@Bi>BnE6Z2eGe$k(aem6GxE{@n6VL} zX3Z5;U65E4`f4$4;LCIKc;oHg#=SW`OIm^46;tBW$Hv#9ffA43unEuFv=bAQ8If2b z)__=zLxD8!VWudsccP6c3NK{F$v9b=BkRwhwX+5?wHNhkt{uPY%D4)!g;F3t5HbL6 zp)A-*!&Ui69Nf%VbAwd3#$#r1)dn7D-LJxa>7fR!mFKw(&@P1;ukFYps%35UI%$&L zM<)B{5;*m_kTgF%C-_$u*9X1)d#=p%c-m%7!j-GXJ4aD1z}vkF>QB^Rj&i`PIVN)N%%*WC z)`z5eki41CpWdq3MyLX7?zX?>YVVAya|_pjhvhCUu~-$B4FOu`B(bUn*XXC9uUPf8 zuL4gr0aZ2-g%L4LUsD@aSDaSp>*=3=xFFcDVmD|UbT zr`7A>#P!!>;ifMtrzt%&DX8&~G{wud?ZX}lSmSiQc7OzCIoiz_SM6TU(m(kEkh+{B z@e7HAx0jn$|BNItlC+8AN9J(iWEW|iIfi06%5kuj;H^jQ!d>SVB!rF{NJ>0y{d)Y? z)}5H5K8X1s>D(~@8fPr>OTrj8T0d-~PIY<;DNID8;6y!?O4#@dkP{#I5H!Z{xrI6W z_8qt3?oNj|5)D`KYQ1u&ikMphehSq!OsaFtbn^&QK;|;oBs+G!73wQ3M*y)-qhwtB)PlRS z*nc1bb&X|E>=ZRJI{&`Pg3~jyJo2=R$xg0rKXqOTA9kpzGuHyx@z$$L=6rR2gg&mv zb~cRi4uX?k|9|)Qs{B-}m z^Cv@rc=Zg#itBh0fL~u`%lVRIDMj=|@~3KhQ+bKwH+~B9pSpnx-q(>M77cbq30|^& zA0C@E5qCQzr#fvrIo46?pnbbHV0w(MtJJ3V0uWs~iDBn__$;Q0S96*)aA5ygoSUVR zzo|Ia&(QGNXQ4?QYSio=Ja~p17>MK<9OIM;u+hM#RG2Jr@)HI`?5?G$abQTHJG>qsxyPE% z{bnjK^*mAEURNFSBs9*9$C)7XeHdaOS4gXILibU358XVBR>22`sXeaPp=+;8O@=s+ zCH0AWG}eS;6Uwy`mFc4uE@V(^Ykc@0Kd&+m>UmTd2-mhL_$0}$`uy&B3U%vY%?A1b zYg!n<^?OZ)=5}frJ{hZGDgsVo2(_~W!XyYAgrMHGQr!XnhUKZ`6U{sbLmzD?9{^=X zlQCA(XpMr6E1iNBsk?ok?obVBd{zDSKkUKxBB0B_Ir@HAHQWL1*$XmkN$tEwxuV0z zG#tnWum*#Nww7gH%R4KaHy3wrG_@l67aVKf$};3PeI3Ur+0W8qeM@nK$)dzdHg3g_ zPH#riU6%M_c7_wlI`8!`(JXPrWg8K*=enrRNlPI5cp+5;{uFOdSCoL8L9=XN|H1QE zUZ!Yw(Ij-s0%ymY_{(!A@bM#u*y)b+f&{iZn+bkz*B(4PYe`ri5khj@V^y^2D+_PsB1xj>s^&E3lzb*~w2Lkt(#$YJArF#_f&Ys1;z5d+Oxb|lM)e)& z(5uehpUS}HYUoClwc5NBrY6?gvfexRzWyQ};4 zdo#OPeDTaqzkc1-RrOU@S5*rDnf_!a?~uo8NHea$-xVR9Q^OJehQJW@Z|4*vRs%)t zAQe0$1ljOn>?{o%hFx303ft&!4IHyWNbP*JA_^oat&4fuj@Ie{hhz?+)N=cH09i6) zK_mdzFNF>CY}Qb)eS~!)%#*?0F`}){8 zCmEN`G;&7(Kcc$;Yyg9@l3G#0NA(IxWWiEphxAIe!_1*UF(v4+x|TMs>5+p;g3G3W z&jDzTWhWY~XZO zQ;i_j7Ca6gZKXbkz-0iyD=#h#q;i)DqS^H>fq8rn0jC`f@b}#veE;=V@UO8k5;%lW z5XD6Mn<{^KUE$KwD!#aW5%&@J7ZYTCQ*6m(JoMRcm@7tjaBhI}gPaWw&c@K7sNCIF z*|KNAWFQ6^*TDvj8><*64Ih!}YOtM3pHv|gPSI;nF*v1^PE<$7XxLCM`mBQ{yLm8C zfR^S;zadOYcoW#Yrf66Q)@Bs9vt58@J7KlXYKNZ_UF;!Co3QdQ& zPx+p3-nhQ)vO0c=%Q6Yr>eD2t!C}BUk#MKsLjiw-KJkPd`fF;Xbw7`i<;={{$~XJv zI1SKNL}~#~`)6m&iPG@|RWI&D+O`3PVlgv9br>mb%xOb#iu>A{Hs=2$gVb3&N&(-j z&mP@qTb7~S2uS1gY(_(u&YGDYrZhDCD?%v4;>0^nXEQnW^V_%a){DQvWa~BG*7Zvd zne6jV%rD}rr#Enx&fDeOpMcB)F4e1FTIl2A*;T&w*Id4Bc*ztmG4CaJKb?K^O< zX;C(6-?&Cul`Cy&>;tVnt!SA3dD!I5=uzAP85bt)FCQ-8<(>EOohz^5H7WPYTZ15A z*eb*tmljv>ftU~h>ig~^+LlWsr!VsDrBw2xjtQL+ozvWcAiJAi{HM%(VQ1ZxH*wDVL5%;$U^& zU-HGC=EZE%$;TbQH8ZAlGIP68;U3cip0@h`*Wc3?bd&HcgE$(3O_|{ot)vIr(zMkO zn5FAk+8lC?OaU{KW@p{o?Aj&(@8_7CjB)!9f5gsTUNUa_gqRV)ovwTM=D7#)cz+&Q zv5$%>4fvuS-su;6xNv#}CsqgCKPx7R;uq+#aW`BifQ*2uU>A8#K-eGP&Q6WX zZ@!P=U>@U1fif$xyD-Fax8A`^+wXDlA2Wvx$FdqvpIOJJR+cf4T>m~*E);$-io@0b zFlx{`_q}d1ja^C=jZ-43sY)NYIM@KF$g?oS-|pSP({F6z-xIU*=S_q=zA*jRJb;If zE#nL49>S@jLQU5plkt;fnDjD^Z=m}Bc`?RX*2kkO^O&P5f2G;|lXrx}Dycq!)DEy2 zPd4q%rJ4nZm5{YSewqN@w1wn>h5{G*bt!hy)=80d7DlMxJK5T$h53Ra_^UCR4j=#q8*sq0f}{H`Bw)F#&oie zD9ClAbu1L~3LAZ?Irv_jw@qH4jmy{@eH%xt!a&oY=pv}t+y zp$wu9WDb7vUy@HmfAlLJ{cxm)fZFvt1HicHO~yVjMbn7J5=BwV~T9<)!51_JbQBsZ|{!m;Wz)kHq7yZ_4BxNY@Vy?*|UGzsha>h z;}P)x%%0Byp4GGB9k7`Q?D8j%1bG{{LLdbs&f}$>JNWJ!o7kLCbpn4_5Tto=q<)2u zEF8m=8;{^*k)a&#bKoAk`)P+i7#c6YXcrs33XiTXVOWw!g;Ngrh@+(7z_|8a1FN*& zm_g~V{ig%o)MvG`PlBEGqRc9dDxX_StR~n5d+132roypWWFw__HEM<0AiWuxlEb4g zuxUVf5Yfa*>!9=$LRXDTE0pC8Q~JDd!JHz-m^ls#-|5~J#B4ONMtjEG3}Ww6!rx)- z6?+ubWQ~xMtN|Og)8k2Vo^8+X>+p2=V?XKB4p8wDRr~DI#%KN6pPEv@F6z>d%Xz*I z!03nbK6fKz3SW3|oSp*siM5<*$odTQAp8(IX47(!=uevv zHoIF{sEI=1R(Q1X7a@|)JS|P#E8bL4)mb_#@6}j*?4 zV2b^aL2>ow2-~-*nqQ7l1?+y$KJ(QSHEG|(6~ z1lYEcACo~c9gLzAIrlMwE^dnL6glrF<0#K4J+Fr%&++HGcks;CWxP5nkkS1Ez8J5E zX}Naz;rV5JZtcT3Nf!W>Bh)=IWOAm%kd2-U)p&&adlf#mvW!K#K!^<(36`|=VYxxaa;HH?9HJl+yXx*)i@-A_bxcEZ6zqN-<# zq>^^ZPE#Qu+P83#4%rpB^r3(gS`=e*T#*esD`Yg=MbznJMlJX)KzArPY`s9SuP>%eRGZu_ZNnx9oWC8YwStwbTYcf@qCtax^%X5C zC!aW;%I4ns{qJ!1AOGYi^wdAX?Q^{x-#YmqE-ucYtoJdXTKN5(FAE|!IfHElEMyrj zo;!}=V9a;d^X^|j#poBiE2oQwOzB|`MwE;I^oH2Hb{Bj1C@Y|k-Ll5J3qAbu_6_{? z)_W$`Srj21DtvNz72iH{7RTvQ9okV-CL#siQ7WKZWMqS)?ahCD4B&PIX6#yFaM2y! ziRXN|&#n0!zrFh|ezA2O?@l;s!NfBJ#4^y>&w!2IB0j(V2p-7hP?UQZ_bR@En2!nQ zs$yVU2w-c6CRL z7BProM+F-l>2XTXcn!F}?!C7_ckpICH7`L{T0?E=Y=THYTJa_T9*Zm?G3~eXI3nJ# z=g$%&(kAhmYH`|{&ARY~eZpcmVV^=VwS$jrf&d?N1t{Ytmsz>bUJG&{ukAPTHt4U8 zE*7N)h-JB@@KhI{NV=&zn)7SQETj~;kG(N#6|AeQIdhmxe2({KdFmBA7xhY zd7Ebx%a+zC@WL@u1YO)bpoZe(VAZS=GTf3n%WzMI)Q$E5A(XWN+cX>tWI&_kfjX22 zJ0@fJ!!C9#8b0#Era^i7H?>=rzB6iuxsX5k-L-$Go~EWxtYD^X?gMLK27C|uZ;9YZ zESO%3D3jRCPz+l$UfFf?X9DXI!@Ndz<0{^I?s?>U`+OXbT3)(a@3ZTt@YH>$f%hlq z&1V=85OXD7iejfUH`;YqSUxB05Z6RV5(#`z0a*)O?>4qY$I27naI zG4Y2!3elaD28JOS0aA-VxPx=5^9*;gK3=$T1HZbpjcb_5T>(_PFQgUClX|!xz}Fso z7>{SiQQjS4zbr8#S3Zx~1Y1^hi9u1|qo-D|G2cUfXBTy5E+68W0Csbw{IW*z8xCcK z$8{F`_NM`ows|bbEkfe|9QNdNqUxucr6Vv`jZ?1 - -# Example character: Mario - -This example shows how to create a basic character using Llama 3.2 as the base model. - -To run this example: - -1. Download the Modelfile -2. `ollama pull llama3.2` to get the base model used in the model file. -3. `ollama create NAME -f ./Modelfile` -4. `ollama run NAME` - -Ask it some questions like "Who are you?" or "Is Peach in trouble again?" - -## Editing this file - -What the model file looks like: - -``` -FROM llama3.2 -PARAMETER temperature 1 -SYSTEM """ -You are Mario from Super Mario Bros, acting as an assistant. -""" -``` - -What if you want to change its behaviour? - -- Try changing the prompt -- Try changing the parameters [Docs](https://github.com/ollama/ollama/blob/main/docs/modelfile.md) -- Try changing the model (e.g. An uncensored model by `FROM wizard-vicuna` this is the wizard-vicuna uncensored model ) - -Once the changes are made, - -1. `ollama create NAME -f ./Modelfile` -2. `ollama run NAME` -3. Iterate until you are happy with the results. - -Notes: - -- This example is for research purposes only. There is no affiliation with any entity. -- When using an uncensored model, please be aware that it may generate offensive content. diff --git a/examples/python-dockerit/Modelfile b/examples/python-dockerit/Modelfile deleted file mode 100644 index acd63644..00000000 --- a/examples/python-dockerit/Modelfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM mistral -SYSTEM """ -You are an experienced Devops engineer focused on docker. When given specifications for a particular need or application you know the best way to host that within a docker container. For instance if someone tells you they want an nginx server to host files located at /web you will answer as follows - ----start -FROM nginx:alpine -COPY /myweb /usr/share/nginx/html -EXPOSE 80 ----end - -Notice that the answer you should give is just the contents of the dockerfile with no explanation and there are three dashes and the word start at the beginning and 3 dashes and the word end. The full output can be piped into a file and run as is. Here is another example. The user will ask to launch a Postgres server with a password of abc123. And the response should be - ----start -FROM postgres:latest -ENV POSTGRES_PASSWORD=abc123 -EXPOSE 5432 ----end - -Again it's just the contents of the dockerfile and nothing else. -""" diff --git a/examples/python-dockerit/README.md b/examples/python-dockerit/README.md deleted file mode 100644 index 2ba00ce2..00000000 --- a/examples/python-dockerit/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# DockerIt - -DockerIt is a tool to help you build and run your application in a Docker container. It consists of a model that defines the system prompt and model weights to use, along with a python script to then build the container and run the image automatically. - -## Running the Example - -1. Ensure you have the `mattw/dockerit` model installed: - - ```bash - ollama pull mattw/dockerit - ``` - -2. Make sure Docker is running on your machine. - -3. Install the Python Requirements. - - ```bash - pip install -r requirements.txt - ``` - -4. Run the example: - - ```bash - python dockerit.py "simple postgres server with admin password set to 123" - ``` - -5. Enter the name you would like to use for your container image. - -## Caveats - -This is a simple example. It's assuming the Dockerfile content generated is going to work. In many cases, even with simple web servers, it fails when trying to copy files that don't exist. It's simply an example of what you could possibly do. diff --git a/examples/python-dockerit/dockerit.py b/examples/python-dockerit/dockerit.py deleted file mode 100644 index 6a288d90..00000000 --- a/examples/python-dockerit/dockerit.py +++ /dev/null @@ -1,17 +0,0 @@ -import requests, json, docker, io, sys -inputDescription = " ".join(sys.argv[1:]) -imageName = input("Enter the name of the image: ") -client = docker.from_env() -s = requests.Session() -output="" -with s.post('http://localhost:11434/api/generate', json={'model': 'mattw/dockerit', 'prompt': inputDescription}, stream=True) as r: - for line in r.iter_lines(): - if line: - j = json.loads(line) - if "response" in j: - output = output +j["response"] -output = output[output.find("---start")+9:output.find("---end")-1] -f = io.BytesIO(bytes(output, 'utf-8')) -client.images.build(fileobj=f, tag=imageName) -container = client.containers.run(imageName, detach=True) -print("Container named", container.name, " started with id: ",container.id) diff --git a/examples/python-dockerit/requirements.txt b/examples/python-dockerit/requirements.txt deleted file mode 100644 index 6d0eac4b..00000000 --- a/examples/python-dockerit/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -docker \ No newline at end of file diff --git a/examples/python-grounded-factuality-rag-check/README.md b/examples/python-grounded-factuality-rag-check/README.md deleted file mode 100644 index 868b1623..00000000 --- a/examples/python-grounded-factuality-rag-check/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# RAG Hallucination Checker using Bespoke-Minicheck - -This example allows the user to ask questions related to a document, which can be specified via an article url. Relevant chunks are retrieved from the document and given to `llama3.2` as context to answer the question. Then each sentence in the answer is checked against the retrieved chunks using `bespoke-minicheck` to ensure that the answer does not contain hallucinations. - -## Running the Example - -1. Ensure `all-minilm` (embedding) `llama3.2` (chat) and `bespoke-minicheck` (check) models installed: - - ```bash - ollama pull all-minilm - ollama pull llama3.2 - ollama pull bespoke-minicheck - ``` - -2. Install the dependencies. - - ```bash - pip install -r requirements.txt - ``` - -3. Run the example: - - ```bash - python main.py - ``` - -## Expected Output - -```text -Enter the URL of an article you want to chat with, or press Enter for default example: - -Loaded, chunked, and embedded text from https://www.theverge.com/2024/9/12/24242439/openai-o1-model-reasoning-strawberry-chatgpt. - -Enter your question or type quit: Who is the CEO of openai? - -Retrieved chunks: -OpenAI is releasing a new model called o1 , the first in a planned series of “ reasoning ” models that have been trained to answer more complex questions , faster than a human can . It ’ s being released alongside o1-mini , a smaller , cheaper version . And yes , if you ’ re steeped in AI rumors : this is , in fact , the extremely hyped Strawberry model . For OpenAI , o1 represents a step toward its broader goal of human-like artificial intelligence . - -OpenAI is releasing a new model called o1 , the first in a planned series of “ reasoning ” models that have been trained to answer more complex questions , faster than a human can . It ’ s being released alongside o1-mini , a smaller , cheaper version . And yes , if you ’ re steeped in AI rumors : this is , in fact , the extremely hyped Strawberry model . For OpenAI , o1 represents a step toward its broader goal of human-like artificial intelligence . More practically , it does a better job at writing code and solving multistep problems than previous models . But it ’ s also more expensive and slower to use than GPT-4o . OpenAI is calling this release of o1 a “ preview ” to emphasize how nascent it is . ChatGPT Plus and Team users get access to both o1-preview and o1-mini starting today , while Enterprise and Edu users will get access early next week . - -More practically , it does a better job at writing code and solving multistep problems than previous models . But it ’ s also more expensive and slower to use than GPT-4o . OpenAI is calling this release of o1 a “ preview ” to emphasize how nascent it is . ChatGPT Plus and Team users get access to both o1-preview and o1-mini starting today , while Enterprise and Edu users will get access early next week . OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens . - -OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens . The training behind o1 is fundamentally different from its predecessors , OpenAI ’ s research lead , Jerry Tworek , tells me , though the company is being vague about the exact details . He says o1 “ has been trained using a completely new optimization algorithm and a new training dataset specifically tailored for it. ” Image : OpenAI OpenAI taught previous GPT models to mimic patterns from its training data . - -LLM Answer: -The text does not mention the CEO of OpenAI. It only discusses the release of a new model called o1 and some details about it, but does not provide information on the company's leadership. - -LLM Claim: The text does not mention the CEO of OpenAI. -Is this claim supported by the context according to bespoke-minicheck? Yes - -LLM Claim: It only discusses the release of a new model called o1 and some details about it, but does not provide information on the company's leadership. -Is this claim supported by the context according to bespoke-minicheck? No -``` - -The second claim is unsupported since the text mentions the research lead. - -Another tricky example: - -```text - -Enter your question or type quit: what sets o1 apart from gpt-4o? - -Retrieved chunks: -OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens . The training behind o1 is fundamentally different from its predecessors , OpenAI ’ s research lead , Jerry Tworek , tells me , though the company is being vague about the exact details . He says o1 “ has been trained using a completely new optimization algorithm and a new training dataset specifically tailored for it. ” Image : OpenAI OpenAI taught previous GPT models to mimic patterns from its training data . - -He says OpenAI also tested o1 against a qualifying exam for the International Mathematics Olympiad , and while GPT-4o only correctly solved only 13 percent of problems , o1 scored 83 percent . “ We can ’ t say we solved hallucinations ” In online programming contests known as Codeforces competitions , this new model reached the 89th percentile of participants , and OpenAI claims the next update of this model will perform “ similarly to PhD students on challenging benchmark tasks in physics , chemistry and biology. ” At the same time , o1 is not as capable as GPT-4o in a lot of areas . It doesn ’ t do as well on factual knowledge about the world . - -More practically , it does a better job at writing code and solving multistep problems than previous models . But it ’ s also more expensive and slower to use than GPT-4o . OpenAI is calling this release of o1 a “ preview ” to emphasize how nascent it is . ChatGPT Plus and Team users get access to both o1-preview and o1-mini starting today , while Enterprise and Edu users will get access early next week . OpenAI says it plans to bring o1-mini access to all the free users of ChatGPT but hasn ’ t set a release date yet . Developer access to o1 is really expensive : In the API , o1-preview is $ 15 per 1 million input tokens , or chunks of text parsed by the model , and $ 60 per 1 million output tokens . For comparison , GPT-4o costs $ 5 per 1 million input tokens and $ 15 per 1 million output tokens . - -“ We can ’ t say we solved hallucinations ” In online programming contests known as Codeforces competitions , this new model reached the 89th percentile of participants , and OpenAI claims the next update of this model will perform “ similarly to PhD students on challenging benchmark tasks in physics , chemistry and biology. ” At the same time , o1 is not as capable as GPT-4o in a lot of areas . It doesn ’ t do as well on factual knowledge about the world . It also doesn ’ t have the ability to browse the web or process files and images . Still , the company believes it represents a brand-new class of capabilities . It was named o1 to indicate “ resetting the counter back to 1. ” “ I ’ m gon na be honest : I think we ’ re terrible at naming , traditionally , ” McGrew says . -LLM Answer: According to the text, several things set o1 apart from GPT-4o: - -* In online programming contests (Codeforces competitions), o1 scored 83% correct solutions compared to GPT-4o's 13%. -* The training behind o1 is "fundamentally different" from its predecessors, including a completely new optimization algorithm and a new training dataset specifically tailored for it. -* o1 has been shown to perform similarly to PhD students on challenging benchmark tasks in physics, chemistry, and biology, while GPT-4o does not have this level of performance. -* However, o1 is also less capable than GPT-4o in some areas, particularly with regard to factual knowledge about the world. - -LLM Claim: According to the text, several things set o1 apart from GPT-4o: - -* In online programming contests (Codeforces competitions), o1 scored 83% correct solutions compared to GPT-4o's 13%. -Is this claim supported by the context according to bespoke-minicheck? Yes - -LLM Claim: * The training behind o1 is "fundamentally different" from its predecessors, including a completely new optimization algorithm and a new training dataset specifically tailored for it. -Is this claim supported by the context according to bespoke-minicheck? Yes - -LLM Claim: * o1 has been shown to perform similarly to PhD students on challenging benchmark tasks in physics, chemistry, and biology, while GPT-4o does not have this level of performance. -Is this claim supported by the context according to bespoke-minicheck? No - -LLM Claim: * However, o1 is also less capable than GPT-4o in some areas, particularly with regard to factual knowledge about the world. -Is this claim supported by the context according to bespoke-minicheck? Yes -``` - -We see that the third claim "* o1 has been shown to perform similarly to PhD students on challenging benchmark tasks in physics, chemistry, and biology, while GPT-4o does not have this level of performance." is not supported by the context. This is because the context only mentions that o1 "is claimed to perform" which is different from "has been shown to perform". diff --git a/examples/python-grounded-factuality-rag-check/main.py b/examples/python-grounded-factuality-rag-check/main.py deleted file mode 100644 index dd18f3ef..00000000 --- a/examples/python-grounded-factuality-rag-check/main.py +++ /dev/null @@ -1,137 +0,0 @@ -import ollama -import warnings -from mattsollamatools import chunker -from newspaper import Article -import numpy as np -from sklearn.neighbors import NearestNeighbors -import nltk - -warnings.filterwarnings( - "ignore", category=FutureWarning, module="transformers.tokenization_utils_base" -) -nltk.download("punkt_tab", quiet=True) - - -def getArticleText(url): - """Gets the text of an article from a URL. - - Often there are a bunch of ads and menus on pages for a news article. - This uses newspaper3k to get just the text of just the article. - """ - article = Article(url) - article.download() - article.parse() - return article.text - - -def knn_search(question_embedding, embeddings, k=5): - """Performs K-nearest neighbors (KNN) search""" - X = np.array( - [item["embedding"] for article in embeddings for item in article["embeddings"]] - ) - source_texts = [ - item["source"] for article in embeddings for item in article["embeddings"] - ] - - # Fit a KNN model on the embeddings - knn = NearestNeighbors(n_neighbors=k, metric="cosine") - knn.fit(X) - - # Find the indices and distances of the k-nearest neighbors. - _, indices = knn.kneighbors(question_embedding, n_neighbors=k) - - # Get the indices and source texts of the best matches - best_matches = [(indices[0][i], source_texts[indices[0][i]]) for i in range(k)] - - return best_matches - - -def check(document, claim): - """Checks if the claim is supported by the document by calling bespoke-minicheck. - - Returns Yes/yes if the claim is supported by the document, No/no otherwise. - Support for logits will be added in the future. - - bespoke-minicheck's system prompt is defined as: - 'Determine whether the provided claim is consistent with the corresponding - document. Consistency in this context implies that all information presented in the claim - is substantiated by the document. If not, it should be considered inconsistent. Please - assess the claim's consistency with the document by responding with either "Yes" or "No".' - - bespoke-minicheck's user prompt is defined as: - "Document: {document}\nClaim: {claim}" - """ - prompt = f"Document: {document}\nClaim: {claim}" - response = ollama.generate( - model="bespoke-minicheck", prompt=prompt, options={"num_predict": 2, "temperature": 0.0} - ) - return response["response"].strip() - - -if __name__ == "__main__": - allEmbeddings = [] - default_url = "https://www.theverge.com/2024/9/12/24242439/openai-o1-model-reasoning-strawberry-chatgpt" - user_input = input( - "Enter the URL of an article you want to chat with, or press Enter for default example: " - ) - article_url = user_input.strip() if user_input.strip() else default_url - article = {} - article["embeddings"] = [] - article["url"] = article_url - text = getArticleText(article_url) - chunks = chunker(text) - - # Embed (batch) chunks using ollama - embeddings = ollama.embed(model="all-minilm", input=chunks)["embeddings"] - - for chunk, embedding in zip(chunks, embeddings): - item = {} - item["source"] = chunk - item["embedding"] = embedding - item["sourcelength"] = len(chunk) - article["embeddings"].append(item) - - allEmbeddings.append(article) - - print(f"\nLoaded, chunked, and embedded text from {article_url}.\n") - - while True: - # Input a question from the user - # For example, "Who is the chief research officer?" - question = input("Enter your question or type quit: ") - - if question.lower() == "quit": - break - - # Embed the user's question using ollama.embed - question_embedding = ollama.embed(model="all-minilm", input=question)[ - "embeddings" - ] - - # Perform KNN search to find the best matches (indices and source text) - best_matches = knn_search(question_embedding, allEmbeddings, k=4) - - sourcetext = "\n\n".join([source_text for (_, source_text) in best_matches]) - - print(f"\nRetrieved chunks: \n{sourcetext}\n") - - # Give the retrieved chunks and question to the chat model - system_prompt = f"Only use the following information to answer the question. Do not use anything else: {sourcetext}" - - ollama_response = ollama.generate( - model="llama3.2", - prompt=question, - system=system_prompt, - options={"stream": False}, - ) - - answer = ollama_response["response"] - print(f"LLM Answer:\n{answer}\n") - - # Check each sentence in the response for grounded factuality - if answer: - for claim in nltk.sent_tokenize(answer): - print(f"LLM Claim: {claim}") - print( - f"Is this claim supported by the context according to bespoke-minicheck? {check(sourcetext, claim)}\n" - ) diff --git a/examples/python-grounded-factuality-rag-check/requirements.txt b/examples/python-grounded-factuality-rag-check/requirements.txt deleted file mode 100644 index d4bd6df3..00000000 --- a/examples/python-grounded-factuality-rag-check/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -ollama -lxml==5.3.0 -lxml_html_clean==0.2.2 -mattsollamatools==0.0.25 -newspaper3k==0.2.8 -nltk==3.9.1 -numpy==1.26.4 -scikit-learn==1.5.2 \ No newline at end of file diff --git a/examples/python-grounded-factuality-simple-check/main.py b/examples/python-grounded-factuality-simple-check/main.py deleted file mode 100644 index 0204f3b3..00000000 --- a/examples/python-grounded-factuality-simple-check/main.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Simple example to demonstrate how to use the bespoke-minicheck model.""" - -import ollama - -# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve` - - -def check(document, claim): - """Checks if the claim is supported by the document by calling bespoke-minicheck. - - Returns Yes/yes if the claim is supported by the document, No/no otherwise. - Support for logits will be added in the future. - - bespoke-minicheck's system prompt is defined as: - 'Determine whether the provided claim is consistent with the corresponding - document. Consistency in this context implies that all information presented in the claim - is substantiated by the document. If not, it should be considered inconsistent. Please - assess the claim's consistency with the document by responding with either "Yes" or "No".' - - bespoke-minicheck's user prompt is defined as: - "Document: {document}\nClaim: {claim}" - """ - prompt = f"Document: {document}\nClaim: {claim}" - response = ollama.generate( - model="bespoke-minicheck", prompt=prompt, options={"num_predict": 2, "temperature": 0.0} - ) - return response["response"].strip() - - -def get_user_input(prompt): - user_input = input(prompt) - if not user_input: - exit() - print() - return user_input - - -def main(): - while True: - # Get a document from the user (e.g. "Ryan likes running and biking.") - document = get_user_input("Enter a document: ") - # Get a claim from the user (e.g. "Ryan likes to run.") - claim = get_user_input("Enter a claim: ") - # Check if the claim is supported by the document - grounded_factuality_check = check(document, claim) - print( - f"Is the claim supported by the document according to bespoke-minicheck? {grounded_factuality_check}" - ) - print("\n\n") - - -if __name__ == "__main__": - main() diff --git a/examples/python-grounded-factuality-simple-check/readme.md b/examples/python-grounded-factuality-simple-check/readme.md deleted file mode 100644 index b164b5eb..00000000 --- a/examples/python-grounded-factuality-simple-check/readme.md +++ /dev/null @@ -1,54 +0,0 @@ -# Simple Bespoke-Minicheck Example - -`bespoke-minicheck` is a model for checking if a claim is supported by a document. It is used through the **generate** endpoint, which is called in this example with a `prompt` that includes the expected formatting of the user input. - -## Running the Example - -1. Ensure you have the `bespoke-minicheck` model installed: - - ```bash - ollama pull bespoke-minicheck - ``` - -2. Install the dependencies: - - ```bash - pip install -r requirements.txt - ``` - -3. Run the program: - - ```bash - python main.py - ``` - -4. Enter a document and a claim when prompted: - - ```bash - Enter a document: Roses are red. - - Enter a claim: Roses are blue. - ``` - - The claim and document are then given to the `bespoke-minicheck` as inputs, which then generates a response (Yes or No) on whether the claim is supported by the document. - - ```bash - Is the claim supported by the document according to bespoke-minicheck? No - ``` - -## More Examples - -Document ([source](https://en.wikipedia.org/wiki/Apple_I)): -> The Apple Computer 1 (Apple-1[a]), later known predominantly as the Apple I(written with a Roman numeral),[b] is an 8-bit motherboard-only personal computer designed by Steve Wozniak[5][6] and released by the Apple Computer Company (now Apple Inc.) in 1976. The company was initially formed to sell the Apple I – its first product – and would later become the world's largest technology company.[7] The idea of starting a company and selling the computer came from Wozniak's friend and Apple co-founder Steve Jobs.[8][9] One of the main innovations of the Apple I was that it included video display terminal circuitry on its circuit board, allowing it to connect to a low-cost composite video monitor or television, instead of an expensive computer terminal, compared to most existing computers at the time. - -Claim: ->The Apple I is a 16-bit computer. - -Expected output: ->Is the claim supported by the document according to bespoke-minicheck? **No** - -Claim: ->Apple was originally called the Apple Computer Company. - -Expected output: ->Is the claim supported by the document according to bespoke-minicheck? **Yes** diff --git a/examples/python-grounded-factuality-simple-check/requirements.txt b/examples/python-grounded-factuality-simple-check/requirements.txt deleted file mode 100644 index 403abba6..00000000 --- a/examples/python-grounded-factuality-simple-check/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -ollama diff --git a/examples/python-json-datagenerator/predefinedschema.py b/examples/python-json-datagenerator/predefinedschema.py deleted file mode 100644 index 91463760..00000000 --- a/examples/python-json-datagenerator/predefinedschema.py +++ /dev/null @@ -1,31 +0,0 @@ -import requests -import json -import random - -model = "llama3.2" -template = { - "firstName": "", - "lastName": "", - "address": { - "street": "", - "city": "", - "state": "", - "zipCode": "" - }, - "phoneNumber": "" -} - -prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in the US, and phone number. \nUse the following template: {json.dumps(template)}." - -data = { - "prompt": prompt, - "model": model, - "format": "json", - "stream": False, - "options": {"temperature": 2.5, "top_p": 0.99, "top_k": 100}, -} - -print(f"Generating a sample user") -response = requests.post("http://localhost:11434/api/generate", json=data, stream=False) -json_data = json.loads(response.text) -print(json.dumps(json.loads(json_data["response"]), indent=2)) diff --git a/examples/python-json-datagenerator/randomaddresses.py b/examples/python-json-datagenerator/randomaddresses.py deleted file mode 100644 index 3df59d32..00000000 --- a/examples/python-json-datagenerator/randomaddresses.py +++ /dev/null @@ -1,31 +0,0 @@ -import requests -import json -import random - -countries = [ - "United States", - "United Kingdom", - "the Netherlands", - "Germany", - "Mexico", - "Canada", - "France", -] -country = random.choice(countries) -model = "llama3.2" - -prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters." - -data = { - "prompt": prompt, - "model": model, - "format": "json", - "stream": False, - "options": {"temperature": 2.5, "top_p": 0.99, "top_k": 100}, -} - -print(f"Generating a sample user in {country}") -response = requests.post("http://localhost:11434/api/generate", json=data, stream=False) -json_data = json.loads(response.text) - -print(json.dumps(json.loads(json_data["response"]), indent=2)) diff --git a/examples/python-json-datagenerator/readme.md b/examples/python-json-datagenerator/readme.md deleted file mode 100644 index a551e1dd..00000000 --- a/examples/python-json-datagenerator/readme.md +++ /dev/null @@ -1,60 +0,0 @@ -# JSON Output Example - -![llmjson 2023-11-10 15_31_31](https://github.com/ollama/ollama/assets/633681/e599d986-9b4a-4118-81a4-4cfe7e22da25) - -There are two python scripts in this example. `randomaddresses.py` generates random addresses from different countries. `predefinedschema.py` sets a template for the model to fill in. - -## Running the Example - -1. Ensure you have the `llama3.2` model installed: - - ```bash - ollama pull llama3.2 - ``` - -2. Install the Python Requirements. - - ```bash - pip install -r requirements.txt - ``` - -3. Run the Random Addresses example: - - ```bash - python randomaddresses.py - ``` - -4. Run the Predefined Schema example: - - ```bash - python predefinedschema.py - ``` - -## Review the Code - -Both programs are basically the same, with a different prompt for each, demonstrating two different ideas. The key part of getting JSON out of a model is to state in the prompt or system prompt that it should respond using JSON, and specifying the `format` as `json` in the data body. - -```python -prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should with no backslashes, values should use plain ascii with no special characters." - -data = { - "prompt": prompt, - "model": model, - "format": "json", - "stream": False, - "options": {"temperature": 2.5, "top_p": 0.99, "top_k": 100}, -} -``` - -When running `randomaddresses.py` you will see that the schema changes and adapts to the chosen country. - -In `predefinedschema.py`, a template has been specified in the prompt as well. It's been defined as JSON and then dumped into the prompt string to make it easier to work with. - -Both examples turn streaming off so that we end up with the completed JSON all at once. We need to convert the `response.text` to JSON so that when we output it as a string we can set the indent spacing to make the output easy to read. - -```python -response = requests.post("http://localhost:11434/api/generate", json=data, stream=False) -json_data = json.loads(response.text) - -print(json.dumps(json.loads(json_data["response"]), indent=2)) -``` diff --git a/examples/python-json-datagenerator/requirements.txt b/examples/python-json-datagenerator/requirements.txt deleted file mode 100644 index 9688b8ec..00000000 --- a/examples/python-json-datagenerator/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Requests==2.31.0 diff --git a/examples/python-loganalysis/Modelfile b/examples/python-loganalysis/Modelfile deleted file mode 100644 index b28aa0c0..00000000 --- a/examples/python-loganalysis/Modelfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM codebooga:latest - -SYSTEM """ -You are a log file analyzer. You will receive a set of lines from a log file for some software application, find the errors and other interesting aspects of the logs, and explain them so a new user can understand what they mean. If there are any steps they can do to resolve them, list the steps in your answer. -""" - -PARAMETER temperature 0.3 - diff --git a/examples/python-loganalysis/loganalysis.py b/examples/python-loganalysis/loganalysis.py deleted file mode 100644 index 4c7eccbd..00000000 --- a/examples/python-loganalysis/loganalysis.py +++ /dev/null @@ -1,41 +0,0 @@ -import sys -import re -import requests -import json - -# prelines and postlines represent the number of lines of context to include in the output around the error -prelines = 10 -postlines = 10 - -def find_errors_in_log_file(): - if len(sys.argv) < 2: - print("Usage: python loganalysis.py ") - return - - log_file_path = sys.argv[1] - with open(log_file_path, 'r') as log_file: - log_lines = log_file.readlines() - - error_logs = [] - for i, line in enumerate(log_lines): - if "error" in line.lower(): - start_index = max(0, i - prelines) - end_index = min(len(log_lines), i + postlines + 1) - error_logs.extend(log_lines[start_index:end_index]) - - return error_logs - -error_logs = find_errors_in_log_file() - -data = { - "prompt": "\n".join(error_logs), - "model": "mattw/loganalyzer" -} - -response = requests.post("http://localhost:11434/api/generate", json=data, stream=True) -for line in response.iter_lines(): - if line: - json_data = json.loads(line) - if json_data['done'] == False: - print(json_data['response'], end='', flush=True) - diff --git a/examples/python-loganalysis/logtest.logfile b/examples/python-loganalysis/logtest.logfile deleted file mode 100644 index e4181bfe..00000000 --- a/examples/python-loganalysis/logtest.logfile +++ /dev/null @@ -1,32 +0,0 @@ -2023-11-10 07:17:40 /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration -2023-11-10 07:17:40 /docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/ -2023-11-10 07:17:40 /docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh -2023-11-10 07:17:40 10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf -2023-11-10 07:17:40 10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf -2023-11-10 07:17:40 /docker-entrypoint.sh: Sourcing /docker-entrypoint.d/15-local-resolvers.envsh -2023-11-10 07:17:40 /docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh -2023-11-10 07:17:40 /docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh -2023-11-10 07:17:40 /docker-entrypoint.sh: Configuration complete; ready for start up -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: using the "epoll" event method -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: nginx/1.25.3 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: built by gcc 12.2.0 (Debian 12.2.0-14) -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: OS: Linux 6.4.16-linuxkit -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker processes -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 29 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 30 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 31 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 32 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 33 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 34 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 35 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 36 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 37 -2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 38 -2023-11-10 07:17:44 192.168.65.1 - - [10/Nov/2023:13:17:43 +0000] "GET / HTTP/1.1" 200 615 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-" -2023-11-10 07:17:44 2023/11/10 13:17:44 [error] 29#29: *1 open() "/usr/share/nginx/html/favicon.ico" failed (2: No such file or directory), client: 192.168.65.1, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "localhost:8080", referrer: "http://localhost:8080/" -2023-11-10 07:17:44 192.168.65.1 - - [10/Nov/2023:13:17:44 +0000] "GET /favicon.ico HTTP/1.1" 404 555 "http://localhost:8080/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-" -2023-11-10 07:17:50 2023/11/10 13:17:50 [error] 29#29: *1 open() "/usr/share/nginx/html/ahstat" failed (2: No such file or directory), client: 192.168.65.1, server: localhost, request: "GET /ahstat HTTP/1.1", host: "localhost:8080" -2023-11-10 07:17:50 192.168.65.1 - - [10/Nov/2023:13:17:50 +0000] "GET /ahstat HTTP/1.1" 404 555 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-" -2023-11-10 07:18:53 2023/11/10 13:18:53 [error] 29#29: *1 open() "/usr/share/nginx/html/ahstat" failed (2: No such file or directory), client: 192.168.65.1, server: localhost, request: "GET /ahstat HTTP/1.1", host: "localhost:8080" -2023-11-10 07:18:53 192.168.65.1 - - [10/Nov/2023:13:18:53 +0000] "GET /ahstat HTTP/1.1" 404 555 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-" diff --git a/examples/python-loganalysis/readme.md b/examples/python-loganalysis/readme.md deleted file mode 100644 index 03bab672..00000000 --- a/examples/python-loganalysis/readme.md +++ /dev/null @@ -1,72 +0,0 @@ -# Log Analysis example - -![loganalyzer 2023-11-10 08_53_29](https://github.com/ollama/ollama/assets/633681/ad30f1fc-321f-4953-8914-e30e24db9921) - -This example shows one possible way to create a log file analyzer. It uses the model **mattw/loganalyzer** which is based on **codebooga**, a 34b parameter model. - -To use it, run: - -`python loganalysis.py ` - -You can try this with the `logtest.logfile` file included in this directory. - -## Running the Example - -1. Ensure you have the `mattw/loganalyzer` model installed: - - ```bash - ollama pull mattw/loganalyzer - ``` - -2. Install the Python Requirements. - - ```bash - python3 -m venv .venv - source .venv/bin/activate - pip install -r requirements.txt - ``` - -3. Run the example: - - ```bash - python loganalysis.py logtest.logfile - ``` - -## Review the code - -The first part of this example is a Modelfile that takes `codebooga` and applies a new System Prompt: - -```plaintext -SYSTEM """ -You are a log file analyzer. You will receive a set of lines from a log file for some software application, find the errors and other interesting aspects of the logs, and explain them so a new user can understand what they mean. If there are any steps they can do to resolve them, list the steps in your answer. -""" -``` - -This model is available at https://ollama.com/mattw/loganalyzer. You can customize it and add to your own namespace using the command `ollama create -f ` then `ollama push `. - -Then loganalysis.py scans all the lines in the given log file and searches for the word 'error'. When the word is found, the 10 lines before and after are set as the prompt for a call to the Generate API. - -```python -data = { - "prompt": "\n".join(error_logs), - "model": "mattw/loganalyzer" -} -``` - -Finally, the streamed output is parsed and the response field in the output is printed to the line. - -```python -response = requests.post("http://localhost:11434/api/generate", json=data, stream=True) -for line in response.iter_lines(): - if line: - json_data = json.loads(line) - if json_data['done'] == False: - print(json_data['response'], end='') - -``` - -## Next Steps - -There is a lot more that can be done here. This is a simple way to detect errors, looking for the word error. Perhaps it would be interesting to find anomalous activity in the logs. It could be interesting to create embeddings for each line and compare them, looking for similar lines. Or look into applying Levenshtein Distance algorithms to find similar lines to help identify the anomalous lines. - -Try different models and different prompts to analyze the data. You could consider adding retrieval augmented generation (RAG) to this to help understand newer log formats. diff --git a/examples/python-loganalysis/requirements.txt b/examples/python-loganalysis/requirements.txt deleted file mode 100644 index e7cb17ef..00000000 --- a/examples/python-loganalysis/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Requests>=2.32.3 diff --git a/examples/python-rag-newssummary/README.md b/examples/python-rag-newssummary/README.md deleted file mode 100644 index 51a68be1..00000000 --- a/examples/python-rag-newssummary/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# News Summarizer - -This example goes through a series of steps: - - 1. You choose a topic area (e.g., "news", "NVidia", "music", etc.). - 2. Gets the most recent articles on that topic from various sources. - 3. Uses Ollama to summarize each article. - 4. Creates chunks of sentences from each article. - 5. Uses Sentence Transformers to generate embeddings for each of those chunks. - 6. You enter a question regarding the summaries shown. - 7. Uses Sentence Transformers to generate an embedding for that question. - 8. Uses the embedded question to find the most similar chunks. - 9. Feeds all that to Ollama to generate a good answer to your question based on these news articles. - -This example lets you pick from a few different topic areas, then summarize the most recent x articles for that topic. It then creates chunks of sentences from each article and then generates embeddings for each of those chunks. - -## Running the Example - -1. Ensure you have the `mistral-openorca` model installed: - - ```bash - ollama pull mistral-openorca - ``` - -2. Install the Python Requirements. - - ```bash - pip install -r requirements.txt - ``` - -3. Run the example: - - ```bash - python summ.py - ``` diff --git a/examples/python-rag-newssummary/requirements.txt b/examples/python-rag-newssummary/requirements.txt deleted file mode 100644 index 1a92729a..00000000 --- a/examples/python-rag-newssummary/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -beautifulsoup4==4.12.2 -feedparser==6.0.10 -mattsollamatools==0.0.8 -newspaper3k==0.2.8 -nltk==3.8.1 -numpy==1.24.3 -Requests==2.31.0 -scikit_learn==1.3.0 -sentence_transformers==2.2.2 diff --git a/examples/python-rag-newssummary/summ.py b/examples/python-rag-newssummary/summ.py deleted file mode 100644 index 4993cfca..00000000 --- a/examples/python-rag-newssummary/summ.py +++ /dev/null @@ -1,86 +0,0 @@ -import curses -import json -from utils import get_url_for_topic, topic_urls, menu, getUrls, get_summary, getArticleText, knn_search -import requests -from sentence_transformers import SentenceTransformer -from mattsollamatools import chunker - -if __name__ == "__main__": - chosen_topic = curses.wrapper(menu) - print("Here is your news summary:\n") - urls = getUrls(chosen_topic, n=5) - model = SentenceTransformer('all-MiniLM-L6-v2') - allEmbeddings = [] - - for url in urls: - article={} - article['embeddings'] = [] - article['url'] = url - text = getArticleText(url) - summary = get_summary(text) - chunks = chunker(text) # Use the chunk_text function from web_utils - embeddings = model.encode(chunks) - for (chunk, embedding) in zip(chunks, embeddings): - item = {} - item['source'] = chunk - item['embedding'] = embedding.tolist() # Convert NumPy array to list - item['sourcelength'] = len(chunk) - article['embeddings'].append(item) - - allEmbeddings.append(article) - - print(f"{summary}\n") - - - while True: - context = [] - # Input a question from the user - question = input("Enter your question about the news, or type quit: ") - - if question.lower() == 'quit': - break - - # Embed the user's question - question_embedding = model.encode([question]) - - # Perform KNN search to find the best matches (indices and source text) - best_matches = knn_search(question_embedding, allEmbeddings, k=10) - - - sourcetext="" - for i, (index, source_text) in enumerate(best_matches, start=1): - sourcetext += f"{i}. Index: {index}, Source Text: {source_text}" - - systemPrompt = f"Only use the following information to answer the question. Do not use anything else: {sourcetext}" - - url = "http://localhost:11434/api/generate" - - payload = { - "model": "mistral-openorca", - "prompt": question, - "system": systemPrompt, - "stream": False, - "context": context - } - - # Convert the payload to a JSON string - payload_json = json.dumps(payload) - - # Set the headers to specify JSON content - headers = { - "Content-Type": "application/json" - } - - # Send the POST request - response = requests.post(url, data=payload_json, headers=headers) - - # Check the response - if response.status_code == 200: - output = json.loads(response.text) - context = output['context'] - print(output['response']+ "\n") - - - else: - print(f"Request failed with status code {response.status_code}") - diff --git a/examples/python-rag-newssummary/utils.py b/examples/python-rag-newssummary/utils.py deleted file mode 100644 index 0bce011b..00000000 --- a/examples/python-rag-newssummary/utils.py +++ /dev/null @@ -1,108 +0,0 @@ -import curses -import feedparser -import requests -import unicodedata -import json -from newspaper import Article -from bs4 import BeautifulSoup -from nltk.tokenize import sent_tokenize, word_tokenize -import numpy as np -from sklearn.neighbors import NearestNeighbors -from mattsollamatools import chunker - -# Create a dictionary to store topics and their URLs -topic_urls = { - "Mac": "https://9to5mac.com/guides/mac/feed", - "News": "http://www.npr.org/rss/rss.php?id=1001", - "Nvidia": "https://nvidianews.nvidia.com/releases.xml", - "Raspberry Pi": "https://www.raspberrypi.com/news/feed/", - "Music": "https://www.billboard.com/c/music/music-news/feed/" -} - -# Use curses to create a menu of topics -def menu(stdscr): - chosen_topic = get_url_for_topic(stdscr) - url = topic_urls[chosen_topic] if chosen_topic in topic_urls else "Topic not found" - - stdscr.addstr(len(topic_urls) + 3, 0, f"Selected URL for {chosen_topic}: {url}") - stdscr.refresh() - - return chosen_topic - -# You have chosen a topic. Now return the url for that topic -def get_url_for_topic(stdscr): - curses.curs_set(0) # Hide the cursor - stdscr.clear() - - stdscr.addstr(0, 0, "Choose a topic using the arrow keys (Press Enter to select):") - - # Create a list of topics - topics = list(topic_urls.keys()) - current_topic = 0 - - while True: - for i, topic in enumerate(topics): - if i == current_topic: - stdscr.addstr(i + 2, 2, f"> {topic}") - else: - stdscr.addstr(i + 2, 2, f" {topic}") - - stdscr.refresh() - - key = stdscr.getch() - - if key == curses.KEY_DOWN and current_topic < len(topics) - 1: - current_topic += 1 - elif key == curses.KEY_UP and current_topic > 0: - current_topic -= 1 - elif key == 10: # Enter key - return topic_urls[topics[current_topic]] - -# Get the last N URLs from an RSS feed -def getUrls(feed_url, n=20): - feed = feedparser.parse(feed_url) - entries = feed.entries[-n:] - urls = [entry.link for entry in entries] - return urls - -# Often there are a bunch of ads and menus on pages for a news article. This uses newspaper3k to get just the text of just the article. -def getArticleText(url): - article = Article(url) - article.download() - article.parse() - return article.text - -def get_summary(text): - systemPrompt = "Write a concise summary of the text, return your responses with 5 lines that cover the key points of the text given." - prompt = text - - url = "http://localhost:11434/api/generate" - - payload = { - "model": "mistral-openorca", - "prompt": prompt, - "system": systemPrompt, - "stream": False - } - payload_json = json.dumps(payload) - headers = {"Content-Type": "application/json"} - response = requests.post(url, data=payload_json, headers=headers) - - return json.loads(response.text)["response"] - -# Perform K-nearest neighbors (KNN) search -def knn_search(question_embedding, embeddings, k=5): - X = np.array([item['embedding'] for article in embeddings for item in article['embeddings']]) - source_texts = [item['source'] for article in embeddings for item in article['embeddings']] - - # Fit a KNN model on the embeddings - knn = NearestNeighbors(n_neighbors=k, metric='cosine') - knn.fit(X) - - # Find the indices and distances of the k-nearest neighbors - distances, indices = knn.kneighbors(question_embedding, n_neighbors=k) - - # Get the indices and source texts of the best matches - best_matches = [(indices[0][i], source_texts[indices[0][i]]) for i in range(k)] - - return best_matches diff --git a/examples/python-simplechat/client.py b/examples/python-simplechat/client.py deleted file mode 100644 index 6ef14ffc..00000000 --- a/examples/python-simplechat/client.py +++ /dev/null @@ -1,48 +0,0 @@ -import json -import requests - -# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve` -model = "llama3.2" # TODO: update this for whatever model you wish to use - - -def chat(messages): - r = requests.post( - "http://0.0.0.0:11434/api/chat", - json={"model": model, "messages": messages, "stream": True}, - stream=True - ) - r.raise_for_status() - output = "" - - for line in r.iter_lines(): - body = json.loads(line) - if "error" in body: - raise Exception(body["error"]) - if body.get("done") is False: - message = body.get("message", "") - content = message.get("content", "") - output += content - # the response streams one token at a time, print that as we receive it - print(content, end="", flush=True) - - if body.get("done", False): - message["content"] = output - return message - - -def main(): - messages = [] - - while True: - user_input = input("Enter a prompt: ") - if not user_input: - exit() - print() - messages.append({"role": "user", "content": user_input}) - message = chat(messages) - messages.append(message) - print("\n\n") - - -if __name__ == "__main__": - main() diff --git a/examples/python-simplechat/readme.md b/examples/python-simplechat/readme.md deleted file mode 100644 index a4a2dfc1..00000000 --- a/examples/python-simplechat/readme.md +++ /dev/null @@ -1,44 +0,0 @@ -# Simple Chat Example - -The **chat** endpoint is one of two ways to generate text from an LLM with Ollama, and is introduced in version 0.1.14. At a high level, you provide the endpoint an array of objects with a role and content specified. Then with each output and prompt, you add more of those role/content objects, which builds up the history. - -## Running the Example - -1. Ensure you have the `llama3.2` model installed: - - ```bash - ollama pull llama3.2 - ``` - -2. Install the Python Requirements. - - ```bash - pip install -r requirements.txt - ``` - -3. Run the example: - - ```bash - python client.py - ``` - -## Review the Code - -You can see in the **chat** function that actually calling the endpoint is done simply with: - -```python -r = requests.post( - "http://0.0.0.0:11434/api/chat", - json={"model": model, "messages": messages, "stream": True}, -) -``` - -With the **generate** endpoint, you need to provide a `prompt`. But with **chat**, you provide `messages`. And the resulting stream of responses includes a `message` object with a `content` field. - -The final JSON object doesn't provide the full content, so you will need to build the content yourself. - -In the **main** function, we collect `user_input` and add it as a message to our messages and that is passed to the chat function. When the LLM is done responding the output is added as another message. - -## Next Steps - -In this example, all generations are kept. You might want to experiment with summarizing everything older than 10 conversations to enable longer history with less context being used. diff --git a/examples/python-simplechat/requirements.txt b/examples/python-simplechat/requirements.txt deleted file mode 100644 index 9688b8ec..00000000 --- a/examples/python-simplechat/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Requests==2.31.0 diff --git a/examples/python-simplegenerate/README.md b/examples/python-simplegenerate/README.md deleted file mode 100644 index a9175207..00000000 --- a/examples/python-simplegenerate/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Simple Generate Example - -This is a simple example using the **Generate** endpoint. - -## Running the Example - -1. Ensure you have the `stablelm-zephyr` model installed: - - ```bash - ollama pull stablelm-zephyr - ``` - -2. Install the Python Requirements. - - ```bash - pip install -r requirements.txt - ``` - -3. Run the example: - - ```bash - python client.py - ``` - -## Review the Code - -The **main** function simply asks for input, then passes that to the generate function. The output from generate is then passed back to generate on the next run. - -The **generate** function uses `requests.post` to call `/api/generate`, passing the model, prompt, and context. The `generate` endpoint returns a stream of JSON blobs that are then iterated through, looking for the response values. That is then printed out. The final JSON object includes the full context of the conversation so far, and that is the return value from the function. diff --git a/examples/python-simplegenerate/client.py b/examples/python-simplegenerate/client.py deleted file mode 100644 index 7b5cf810..00000000 --- a/examples/python-simplegenerate/client.py +++ /dev/null @@ -1,40 +0,0 @@ -import json -import requests - -# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve` -model = 'stablelm-zephyr' # TODO: update this for whatever model you wish to use - -def generate(prompt, context): - r = requests.post('http://localhost:11434/api/generate', - json={ - 'model': model, - 'prompt': prompt, - 'context': context, - }, - stream=True) - r.raise_for_status() - - for line in r.iter_lines(): - body = json.loads(line) - response_part = body.get('response', '') - # the response streams one token at a time, print that as we receive it - print(response_part, end='', flush=True) - - if 'error' in body: - raise Exception(body['error']) - - if body.get('done', False): - return body['context'] - -def main(): - context = [] # the context stores a conversation history, you can use this to make the model more context aware - while True: - user_input = input("Enter a prompt: ") - if not user_input: - exit() - print() - context = generate(user_input, context) - print() - -if __name__ == "__main__": - main() diff --git a/examples/python-simplegenerate/requirements.txt b/examples/python-simplegenerate/requirements.txt deleted file mode 100644 index 9688b8ec..00000000 --- a/examples/python-simplegenerate/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Requests==2.31.0 diff --git a/examples/typescript-functioncalling/extractemail.ts b/examples/typescript-functioncalling/extractemail.ts deleted file mode 100644 index a2f0b2d1..00000000 --- a/examples/typescript-functioncalling/extractemail.ts +++ /dev/null @@ -1,118 +0,0 @@ -import { Ollama } from "ollama-node"; -import { readFile } from "fs/promises"; - -// function to be called on events -function reportEvents(name: string, date: string, location: string) { - const nameString = name ? `${name}` : `an event`; - const dateString = date ? ` on ${date}` : ``; - const locationString = location ? ` at ${location}` : ``; - console.log(`You have an event: ${nameString}${dateString}${locationString}`) -} - -// function to be called on addresses -function reportAddresses(address) { - for (const field in address) { - if (address[field]) { - if (field === "city") { - const city = address.city; - const state = address.state ? `, ${address.state}` : ''; - const zip = address.zip ? ` ${address.zip}` : ''; - console.log(`${city}${state}${zip}`); - break; - } else { - console.log(`${address[field]}`); - } - } - } - console.log(``); -} - -async function main() { - - const ollama = new Ollama(); - - const systemprompt = `You will be given a text along with a prompt and a schema. You will have to extract the information requested in the prompt from the text and generate output in JSON observing the schema provided. If the schema shows a type of integer or number, you must only show a integer for that field. A string should always be a valid string. If a value is unknown, leave it empty. Output the JSON with extra spaces to ensure that it pretty prints.` - - const schema = { - "eventsQuantity": { - "type": "integer", - "description": "The number of events in the source text" - }, - "addressesQuantity": { - "type": "integer", - "description": "The number of addresses in the source text" - }, - "events": [{ - name: { - "type": "string", - description: "Name of the event" - }, - "date": { - "type": "string", - "description": "Date of the event" - }, - "location": { - "type": "string", - "description": "Location of the event" - }, - "extraInfo": { - "type": "string", - "description": "Any extra information that is provided about the event." - } - }], - "people": [{ - "name": { - "type": "string", - "description": "Name of the person" - }, - "company": { - "type": "string", - "description": "Name of the company where they work" - }, - "street": { - "type": "string", - "description": "Street address of the person or company. This is only the street name and the numerical address. Do not include city, state, or zip of the address in this field." - }, - "city": { - "type": "string", - "description": "City portion of the address of the person or company" - }, - "state": { - "type": "string", - "description": "State portion of the address of the person or company" - }, - "zip": { - "type": "string", - "description": "Zip code of the person or company" - }, - "extraInfo": { - "type": "string", - "description": "Any extra information that is provided about the location." - } - }] - } - - const textcontent = await readFile("./info.txt", "utf-8").then((text) => text.split(" ").slice(0, 2000).join(" ")); - - const prompt = `The source text is a series of emails that have been put into a single file. They are separated by three dashes. Review the source text and determine the full address of the person sending each of the emails as well as any events that we need to track. If they provide a company address use that. If any extra info is provided, such as a description of the place, or a floor, add it to extraInfo. The first field in the address JSON is quantity of events and should be set to the number of events tracked and the second field should be set to the number of addresses tracked in the file. Don't stuff an event into the output that isn't an event. Only add data to the mostly appropriate field. Don't make up fields that aren't in the schema. If there isn't a value for a field, use null. Output should be in JSON.\n\nSchema: \n${JSON.stringify(schema, null, 2)}\n\nSource Text:\n${textcontent}` - - await ollama.setModel("neural-chat"); - ollama.setSystemPrompt(systemprompt); - ollama.setJSONFormat(true); - const data = await ollama.generate(prompt); - const output = JSON.parse(data.output); - const events = output.events; - const addresses = output.people; - - console.log(`Here are your ${output.eventsQuantity} events:`); - for (const event of events) { - reportEvents(event.name, event.date, event.location); - } - - console.log(`\n\nHere are your ${output.addressesQuantity} addresses:`); - for (const address of addresses) { - reportAddresses(address); - } -} - -main(); \ No newline at end of file diff --git a/examples/typescript-functioncalling/extractwp.ts b/examples/typescript-functioncalling/extractwp.ts deleted file mode 100644 index b199607d..00000000 --- a/examples/typescript-functioncalling/extractwp.ts +++ /dev/null @@ -1,38 +0,0 @@ -import { Ollama } from "ollama-node"; -import { readFile } from "fs/promises"; - -async function main() { - - const ollama = new Ollama(); - - // Set the system prompt to prepare the model to receive a prompt and a schema and set some rules for the output. - const systemprompt = `You will be given a text along with a prompt and a schema. You will have to extract the information requested in the prompt from the text and generate output in JSON observing the schema provided. If the schema shows a type of integer or number, you must only show a integer for that field. A string should always be a valid string. If a value is unknown, leave it empty. Output the JSON with extra spaces to ensure that it pretty prints.` - - const schema = { - "people": [{ - "name": { - "type": "string", - "description": "Name of the person" - }, - "title": { - "type": "string", - "description": "Title of the person" - } - }], - } - - // Depending on the model chosen, you may be limited by the size of the context window, so limit the context to 2000 words. - const textcontent = await readFile("./wp.txt", "utf-8").then((text) => text.split(" ").slice(0, 2000).join(" ")); - - // Specific instructions for this task - const prompt = `Review the source text and determine the 10 most important people to focus on. Then extract the name and title for those people. Output should be in JSON.\n\nSchema: \n${JSON.stringify(schema, null, 2)}\n\nSource Text:\n${textcontent}` - - await ollama.setModel("neural-chat"); - ollama.setSystemPrompt(systemprompt); - - // setJSONFormat is the equivalent of setting 'format: json' in the API - ollama.setJSONFormat(true); - await ollama.streamingGenerate(prompt, (word) => { process.stdout.write(word) }) -} - -main(); \ No newline at end of file diff --git a/examples/typescript-functioncalling/info.txt b/examples/typescript-functioncalling/info.txt deleted file mode 100644 index 4fe0e1ca..00000000 --- a/examples/typescript-functioncalling/info.txt +++ /dev/null @@ -1,17 +0,0 @@ ---- -Hi matt, - -thanks for letting me know that you are going to come today, November 16, for my tea party. My address is 123 Falk St on Bainbridge Island. I live in the house with the red door. I will be home all day so just come by whenever you want. - -Fred - ---- -Great, send the check to our office at 1917 1st St, Seattle, WA 98101. I will let you know when we receive it. - -Mark Richardson -Big Corp ---- -We are looking forward to seeing you at our Local AI Meetup. It will be held on December 3. It will be at the offices of Enormous Co. Our address is 344 1st Ave, Seattle, WA 98101. We will be meeting in the conference room on the 3rd floor. - -Barbara Reilly -Enormous Co. \ No newline at end of file diff --git a/examples/typescript-functioncalling/package-lock.json b/examples/typescript-functioncalling/package-lock.json deleted file mode 100644 index c555ddae..00000000 --- a/examples/typescript-functioncalling/package-lock.json +++ /dev/null @@ -1,519 +0,0 @@ -{ - "name": "typescript-functioncalling", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "dependencies": { - "ollama-node": "^0.1.27" - }, - "devDependencies": { - "tsx": "^4.1.2", - "typescript": "^5.2.2" - } - }, - "node_modules/@esbuild/android-arm": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.18.20.tgz", - "integrity": "sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.18.20.tgz", - "integrity": "sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.18.20.tgz", - "integrity": "sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/darwin-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz", - "integrity": "sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/darwin-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.18.20.tgz", - "integrity": "sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/freebsd-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.20.tgz", - "integrity": "sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/freebsd-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.18.20.tgz", - "integrity": "sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-arm": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.18.20.tgz", - "integrity": "sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==", - "cpu": [ - "arm" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.18.20.tgz", - "integrity": "sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-ia32": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.18.20.tgz", - "integrity": "sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==", - "cpu": [ - "ia32" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-loong64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.18.20.tgz", - "integrity": "sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==", - "cpu": [ - "loong64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-mips64el": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.18.20.tgz", - "integrity": "sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==", - "cpu": [ - "mips64el" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-ppc64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.18.20.tgz", - "integrity": "sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==", - "cpu": [ - "ppc64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-riscv64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.18.20.tgz", - "integrity": "sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==", - "cpu": [ - "riscv64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-s390x": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.18.20.tgz", - "integrity": "sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==", - "cpu": [ - "s390x" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.18.20.tgz", - "integrity": "sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/netbsd-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.18.20.tgz", - "integrity": "sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/openbsd-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.18.20.tgz", - "integrity": "sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/sunos-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.18.20.tgz", - "integrity": "sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.18.20.tgz", - "integrity": "sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==", - "cpu": [ - "arm64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-ia32": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.18.20.tgz", - "integrity": "sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==", - "cpu": [ - "ia32" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.18.20.tgz", - "integrity": "sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==", - "cpu": [ - "x64" - ], - "dev": true, - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@types/node": { - "version": "20.9.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.9.0.tgz", - "integrity": "sha512-nekiGu2NDb1BcVofVcEKMIwzlx4NjHlcjhoxxKBNLtz15Y1z7MYf549DFvkHSId02Ax6kGwWntIBPC3l/JZcmw==", - "dependencies": { - "undici-types": "~5.26.4" - } - }, - "node_modules/buffer-from": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", - "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", - "dev": true - }, - "node_modules/esbuild": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.18.20.tgz", - "integrity": "sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==", - "dev": true, - "hasInstallScript": true, - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=12" - }, - "optionalDependencies": { - "@esbuild/android-arm": "0.18.20", - "@esbuild/android-arm64": "0.18.20", - "@esbuild/android-x64": "0.18.20", - "@esbuild/darwin-arm64": "0.18.20", - "@esbuild/darwin-x64": "0.18.20", - "@esbuild/freebsd-arm64": "0.18.20", - "@esbuild/freebsd-x64": "0.18.20", - "@esbuild/linux-arm": "0.18.20", - "@esbuild/linux-arm64": "0.18.20", - "@esbuild/linux-ia32": "0.18.20", - "@esbuild/linux-loong64": "0.18.20", - "@esbuild/linux-mips64el": "0.18.20", - "@esbuild/linux-ppc64": "0.18.20", - "@esbuild/linux-riscv64": "0.18.20", - "@esbuild/linux-s390x": "0.18.20", - "@esbuild/linux-x64": "0.18.20", - "@esbuild/netbsd-x64": "0.18.20", - "@esbuild/openbsd-x64": "0.18.20", - "@esbuild/sunos-x64": "0.18.20", - "@esbuild/win32-arm64": "0.18.20", - "@esbuild/win32-ia32": "0.18.20", - "@esbuild/win32-x64": "0.18.20" - } - }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, - "hasInstallScript": true, - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/get-tsconfig": { - "version": "4.7.2", - "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.7.2.tgz", - "integrity": "sha512-wuMsz4leaj5hbGgg4IvDU0bqJagpftG5l5cXIAvo8uZrqn0NJqwtfupTN00VnkQJPcIRrxYrm1Ue24btpCha2A==", - "dev": true, - "dependencies": { - "resolve-pkg-maps": "^1.0.0" - }, - "funding": { - "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" - } - }, - "node_modules/ollama-node": { - "version": "0.1.27", - "resolved": "https://registry.npmjs.org/ollama-node/-/ollama-node-0.1.27.tgz", - "integrity": "sha512-tFABPf5P0sXCR5USA31E3tqbge5h/4uf/t5j8/rPvHDo0SDwXeN0kah2J7hIqqkYlO1vLRs0uLC1/Mprgv9t2g==", - "dependencies": { - "@types/node": "^20.8.4" - } - }, - "node_modules/resolve-pkg-maps": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", - "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", - "dev": true, - "funding": { - "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" - } - }, - "node_modules/source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/source-map-support": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz", - "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", - "dev": true, - "dependencies": { - "buffer-from": "^1.0.0", - "source-map": "^0.6.0" - } - }, - "node_modules/tsx": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/tsx/-/tsx-4.1.2.tgz", - "integrity": "sha512-1spM1bFV6MP2s4tO4tDC7g52fsaFdtEWdO4GfGdqi20qUgPbnAJqixOyIAvCSx1DDj3YIUB4CD06owTWUsOAuQ==", - "dev": true, - "dependencies": { - "esbuild": "~0.18.20", - "get-tsconfig": "^4.7.2", - "source-map-support": "^0.5.21" - }, - "bin": { - "tsx": "dist/cli.mjs" - }, - "engines": { - "node": ">=18.0.0" - }, - "optionalDependencies": { - "fsevents": "~2.3.3" - } - }, - "node_modules/typescript": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", - "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", - "dev": true, - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/undici-types": { - "version": "5.26.5", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", - "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" - } - } -} diff --git a/examples/typescript-functioncalling/package.json b/examples/typescript-functioncalling/package.json deleted file mode 100644 index 4e8fff8c..00000000 --- a/examples/typescript-functioncalling/package.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "dependencies": { - "ollama-node": "^0.1.27" - }, - "devDependencies": { - "tsx": "^4.1.2", - "typescript": "^5.2.2" - } -} diff --git a/examples/typescript-functioncalling/readme.md b/examples/typescript-functioncalling/readme.md deleted file mode 100644 index d29379a0..00000000 --- a/examples/typescript-functioncalling/readme.md +++ /dev/null @@ -1,28 +0,0 @@ -# Function calling - -![function calling 2023-11-16 16_12_58](https://github.com/ollama/ollama/assets/633681/a0acc247-9746-45ab-b325-b65dfbbee4fb) - -One of the features added to some models is 'function calling'. It's a bit of a confusing name. It's understandable if you think that means the model can call functions, but that's not what it means. Function calling simply means that the output of the model is formatted in JSON, using a preconfigured schema, and uses the expected types. Then your code can use the output of the model and call functions with it. Using the JSON format in Ollama, you can use any model for function calling. - -The two examples provided can extract information out of the provided texts. The first example uses the first couple of chapters from War and Peace by Lev Nikolayevich Tolstoy, and extracts the names and titles of the characters introduced in the story. The second example uses a more complicated schema to pull out addresses and event information from a series of emails. - -## Running the examples - -1. Clone this repo and navigate to the `examples/typescript-functioncalling` directory. -2. Install the dependencies with `npm install`. -3. Review the `wp.txt` file. -4. Run `tsx extractwp.ts`. -5. Review the `info.txt` file. -6. Run `tsx extractemail.ts`. - -## Review the Code - -Both examples do roughly the same thing with different source material. They both use the same system prompt, which tells the model to expect some instructions and a schema. Then we inject the schema into the prompt and generate an answer. - -The first example, `extractwp.ts`, outputs the resulting JSON to the console, listing the characters introduced at the start of War and Peace. The second example, `extractemail.ts`, is a bit more complicated, extracting two different types of information: addresses and events. It outputs the results to a JSON blob, then the addresses are handed off to one function called `reportAddresses` and the events are handed off to another function called `reportEvents`. - -Notice that both examples are using the model from Intel called `neural-chat`. This is not a model tuned for function calling, yet it performs very well at this task. - -## Next Steps - -Try exporting some of your real emails to the input file and seeing how well the model does. Try pointing the first example at other books. You could even have it cycle through all the sections and maybe add up the number of times any character is seen throughout the book, determining the most important characters. You can also try out different models. diff --git a/examples/typescript-functioncalling/wp.txt b/examples/typescript-functioncalling/wp.txt deleted file mode 100644 index 0ef84867..00000000 --- a/examples/typescript-functioncalling/wp.txt +++ /dev/null @@ -1,183 +0,0 @@ -"Well, Prince, so Genoa and Lucca are now just family estates of the Buonapartes. But I warn you, if you don't tell me that this means war, if you still try to defend the infamies and horrors perpetrated by that Antichrist - I really believe he is Antichrist - I will have nothing more to do with you and you are no longer my friend, no longer my 'faithful slave,' as you call yourself! But how do you do? I see I have frightened you - sit down and tell me all the news." - -It was in July, 1805, and the speaker was the well-known Anna Pavlovna Scherer, maid of honor and favorite of the Empress Marya Fedorovna. With these words she greeted Prince Vasili Kuragin, a man of high rank and importance, who was the first to arrive at her reception. Anna Pavlovna had had a cough for some days. She was, as she said, suffering from la grippe; grippe being then a new word in St. Petersburg, used only by the elite. - -All her invitations without exception, written in French, and delivered by a scarlet-liveried footman that morning, ran as follows: - -"If you have nothing better to do, Count (or Prince), and if the prospect of spending an evening with a poor invalid is not too terrible, I shall be very charmed to see you tonight between 7 and 10 - Annette Scherer." - -"Heavens! what a virulent attack!" replied the prince, not in the least disconcerted by this reception. He had just entered, wearing an embroidered court uniform, knee breeches, and shoes, and had stars on his breast and a serene expression on his flat face. He spoke in that refined French in which our grandfathers not only spoke but thought, and with the gentle, patronizing intonation natural to a man of importance who had grown old in society and at court. He went up to Anna Pavlovna, kissed her hand, presenting to her his bald, scented, and shining head, and complacently seated himself on the sofa. - -"First of all, dear friend, tell me how you are. Set your friend's mind at rest," said he without altering his tone, beneath the politeness and affected sympathy of which indifference and even irony could be discerned. - -"Can one be well while suffering morally? Can one be calm in times like these if one has any feeling?" said Anna Pavlovna. "You are staying the whole evening, I hope?" - -"And the fete at the English ambassador's? Today is Wednesday. I must put in an appearance there," said the prince. "My daughter is coming for me to take me there." - -"I thought today's fete had been canceled. I confess all these festivities and fireworks are becoming wearisome." - -"If they had known that you wished it, the entertainment would have been put off," said the prince, who, like a wound-up clock, by force of habit said things he did not even wish to be believed. - -"Don't tease! Well, and what has been decided about Novosiltsev's dispatch? You know everything." - -"What can one say about it?" replied the prince in a cold, listless tone. "What has been decided? They have decided that Buonaparte has burnt his boats, and I believe that we are ready to burn ours." - -Prince Vasili always spoke languidly, like an actor repeating a stale part. Anna Pavlovna Scherer on the contrary, despite her forty years, overflowed with animation and impulsiveness. To be an enthusiast had become her social vocation and, sometimes even when she did not feel like it, she became enthusiastic in order not to disappoint the expectations of those who knew her. The subdued smile which, though it did not suit her faded features, always played round her lips expressed, as in a spoiled child, a continual consciousness of her charming defect, which she neither wished, nor could, nor considered it necessary, to correct. - -In the midst of a conversation on political matters Anna Pavlovna burst out: - -"Oh, don't speak to me of Austria. Perhaps I don't understand things, but Austria never has wished, and does not wish, for war. She is betraying us! Russia alone must save Europe. Our gracious sovereign recognizes his high vocation and will be true to it. That is the one thing I have faith in! Our good and wonderful sovereign has to perform the noblest role on earth, and he is so virtuous and noble that God will not forsake him. He will fulfill his vocation and crush the hydra of revolution, which has become more terrible than ever in the person of this murderer and villain! We alone must avenge the blood of the just one.... Whom, I ask you, can we rely on?... England with her commercial spirit will not and cannot understand the Emperor Alexander's loftiness of soul. She has refused to evacuate Malta. She wanted to find, and still seeks, some secret motive in our actions. What answer did Novosiltsev get? None. The English have not understood and cannot understand the self-abnegation of our Emperor who wants nothing for himself, but only desires the good of mankind. And what have they promised? Nothing! And what little they have promised they will not perform! Prussia has always declared that Buonaparte is invincible, and that all Europe is powerless before him.... And I don't believe a word that Hardenburg says, or Haugwitz either. This famous Prussian neutrality is just a trap. I have faith only in God and the lofty destiny of our adored monarch. He will save Europe!" - -She suddenly paused, smiling at her own impetuosity. - -"I think," said the prince with a smile, "that if you had been sent instead of our dear Wintzingerode you would have captured the King of Prussia's consent by assault. You are so eloquent. Will you give me a cup of tea?" - -"In a moment. A propos," she added, becoming calm again, "I am expecting two very interesting men tonight, le Vicomte de Mortemart, who is connected with the Montmorencys through the Rohans, one of the best French families. He is one of the genuine emigres, the good ones. And also the Abbe Morio. Do you know that profound thinker? He has been received by the Emperor. Had you heard?" - -"I shall be delighted to meet them," said the prince. "But tell me," he added with studied carelessness as if it had only just occurred to him, though the question he was about to ask was the chief motive of his visit, "is it true that the Dowager Empress wants Baron Funke to be appointed first secretary at Vienna? The baron by all accounts is a poor creature." - -Prince Vasili wished to obtain this post for his son, but others were trying through the Dowager Empress Marya Fedorovna to secure it for the baron. - -Anna Pavlovna almost closed her eyes to indicate that neither she nor anyone else had a right to criticize what the Empress desired or was pleased with. - -"Baron Funke has been recommended to the Dowager Empress by her sister," was all she said, in a dry and mournful tone. - -As she named the Empress, Anna Pavlovna's face suddenly assumed an expression of profound and sincere devotion and respect mingled with sadness, and this occurred every time she mentioned her illustrious patroness. She added that Her Majesty had deigned to show Baron Funke beaucoup d'estime, and again her face clouded over with sadness. - -The prince was silent and looked indifferent. But, with the womanly and courtierlike quickness and tact habitual to her, Anna Pavlovna wished both to rebuke him (for daring to speak as he had done of a man recommended to the Empress) and at the same time to console him, so she said: - -"Now about your family. Do you know that since your daughter came out everyone has been enraptured by her? They say she is amazingly beautiful." - -The prince bowed to signify his respect and gratitude. - -"I often think," she continued after a short pause, drawing nearer to the prince and smiling amiably at him as if to show that political and social topics were ended and the time had come for intimate conversation - "I often think how unfairly sometimes the joys of life are distributed. Why has fate given you two such splendid children? I don't speak of Anatole, your youngest. I don't like him," she added in a tone admitting of no rejoinder and raising her eyebrows. "Two such charming children. And really you appreciate them less than anyone, and so you don't deserve to have them." - -And she smiled her ecstatic smile. - -"I can't help it," said the prince. "Lavater would have said I lack the bump of paternity." - -"Don't joke; I mean to have a serious talk with you. Do you know I am dissatisfied with your younger son? Between ourselves" (and her face assumed its melancholy expression), "he was mentioned at Her Majesty's and you were pitied...." - -The prince answered nothing, but she looked at him significantly, awaiting a reply. He frowned. - -"What would you have me do?" he said at last. "You know I did all a father could for their education, and they have both turned out fools. Hippolyte is at least a quiet fool, but Anatole is an active one. That is the only difference between them." He said this smiling in a way more natural and animated than usual, so that the wrinkles round his mouth very clearly revealed something unexpectedly coarse and unpleasant. - -"And why are children born to such men as you? If you were not a father there would be nothing I could reproach you with," said Anna Pavlovna, looking up pensively. - -"I am your faithful slave and to you alone I can confess that my children are the bane of my life. It is the cross I have to bear. That is how I explain it to myself. It can't be helped!" - -He said no more, but expressed his resignation to cruel fate by a gesture. Anna Pavlovna meditated. - -"Have you never thought of marrying your prodigal son Anatole?" she asked. "They say old maids have a mania for matchmaking, and though I don't feel that weakness in myself as yet, I know a little person who is very unhappy with her father. She is a relation of yours, Princess Mary Bolkonskaya." - -Prince Vasili did not reply, though, with the quickness of memory and perception befitting a man of the world, he indicated by a movement of the head that he was considering this information. - -"Do you know," he said at last, evidently unable to check the sad current of his thoughts, "that Anatole is costing me forty thousand rubles a year? And," he went on after a pause, "what will it be in five years, if he goes on like this?" Presently he added: "That's what we fathers have to put up with.... Is this princess of yours rich?" - -"Her father is very rich and stingy. He lives in the country. He is the well-known Prince Bolkonski who had to retire from the army under the late Emperor, and was nicknamed 'the King of Prussia.' He is very clever but eccentric, and a bore. The poor girl is very unhappy. She has a brother; I think you know him, he married Lise Meinen lately. He is an aide-de-camp of Kutuzov's and will be here tonight." - -"Listen, dear Annette," said the prince, suddenly taking Anna Pavlovna's hand and for some reason drawing it downwards. "Arrange that affair for me and I shall always be your most devoted slave-slafe with an f, as a village elder of mine writes in his reports. She is rich and of good family and that's all I want." - -And with the familiarity and easy grace peculiar to him, he raised the maid of honor's hand to his lips, kissed it, and swung it to and fro as he lay back in his armchair, looking in another direction. - -"Attendez," said Anna Pavlovna, reflecting, "I'll speak to Lise, young Bolkonski's wife, this very evening, and perhaps the thing can be arranged. It shall be on your family's behalf that I'll start my apprenticeship as old maid." - -Anna Pavlovna's drawing room was gradually filling. The highest Petersburg society was assembled there: people differing widely in age and character but alike in the social circle to which they belonged. Prince Vasili's daughter, the beautiful Helene, came to take her father to the ambassador's entertainment; she wore a ball dress and her badge as maid of honor. The youthful little Princess Bolkonskaya, known as la femme la plus seduisante de Petersbourg, * was also there. She had been married during the previous winter, and being pregnant did not go to any large gatherings, but only to small receptions. Prince Vasili's son, Hippolyte, had come with Mortemart, whom he introduced. The Abbe Morio and many others had also come. - -* The most fascinating woman in Petersburg. - -To each new arrival Anna Pavlovna said, "You have not yet seen my aunt," or "You do not know my aunt?" and very gravely conducted him or her to a little old lady, wearing large bows of ribbon in her cap, who had come sailing in from another room as soon as the guests began to arrive; and slowly turning her eyes from the visitor to her aunt, Anna Pavlovna mentioned each one's name and then left them. - -Each visitor performed the ceremony of greeting this old aunt whom not one of them knew, not one of them wanted to know, and not one of them cared about; Anna Pavlovna observed these greetings with mournful and solemn interest and silent approval. The aunt spoke to each of them in the same words, about their health and her own, and the health of Her Majesty, "who, thank God, was better today." And each visitor, though politeness prevented his showing impatience, left the old woman with a sense of relief at having performed a vexatious duty and did not return to her the whole evening. - -The young Princess Bolkonskaya had brought some work in a gold-embroidered velvet bag. Her pretty little upper lip, on which a delicate dark down was just perceptible, was too short for her teeth, but it lifted all the more sweetly, and was especially charming when she occasionally drew it down to meet the lower lip. As is always the case with a thoroughly attractive woman, her defect - the shortness of her upper lip and her half-open mouth - seemed to be her own special and peculiar form of beauty. Everyone brightened at the sight of this pretty young woman, so soon to become a mother, so full of life and health, and carrying her burden so lightly. Old men and dull dispirited young ones who looked at her, after being in her company and talking to her a little while, felt as if they too were becoming, like her, full of life and health. All who talked to her, and at each word saw her bright smile and the constant gleam of her white teeth, thought that they were in a specially amiable mood that day. - -The little princess went round the table with quick, short, swaying steps, her workbag on her arm, and gaily spreading out her dress sat down on a sofa near the silver samovar, as if all she was doing was a pleasure to herself and to all around her. "I have brought my work," said she in French, displaying her bag and addressing all present. "Mind, Annette, I hope you have not played a wicked trick on me," she added, turning to her hostess. "You wrote that it was to be quite a small reception, and just see how badly I am dressed." And she spread out her arms to show her short-waisted, lace-trimmed, dainty gray dress, girdled with a broad ribbon just below the breast. - -"Soyez tranquille, Lise, you will always be prettier than anyone else," replied Anna Pavlovna. - -"You know," said the princess in the same tone of voice and still in French, turning to a general, "my husband is deserting me? He is going to get himself killed. Tell me what this wretched war is for?" she added, addressing Prince Vasili, and without waiting for an answer she turned to speak to his daughter, the beautiful Helene. - -"What a delightful woman this little princess is!" said Prince Vasili to Anna Pavlovna. - -One of the next arrivals was a stout, heavily built young man with close-cropped hair, spectacles, the light-colored breeches fashionable at that time, a very high ruffle, and a brown dress coat. This stout young man was an illegitimate son of Count Bezukhov, a well-known grandee of Catherine's time who now lay dying in Moscow. The young man had not yet entered either the military or civil service, as he had only just returned from abroad where he had been educated, and this was his first appearance in society. Anna Pavlovna greeted him with the nod she accorded to the lowest hierarchy in her drawing room. But in spite of this lowest-grade greeting, a look of anxiety and fear, as at the sight of something too large and unsuited to the place, came over her face when she saw Pierre enter. Though he was certainly rather bigger than the other men in the room, her anxiety could only have reference to the clever though shy, but observant and natural, expression which distinguished him from everyone else in that drawing room. - -"It is very good of you, Monsieur Pierre, to come and visit a poor invalid," said Anna Pavlovna, exchanging an alarmed glance with her aunt as she conducted him to her. - -Pierre murmured something unintelligible, and continued to look round as if in search of something. On his way to the aunt he bowed to the little princess with a pleased smile, as to an intimate acquaintance. - -Anna Pavlovna's alarm was justified, for Pierre turned away from the aunt without waiting to hear her speech about Her Majesty's health. Anna Pavlovna in dismay detained him with the words: "Do you know the Abbe Morio? He is a most interesting man." - -"Yes, I have heard of his scheme for perpetual peace, and it is very interesting but hardly feasible." - -"You think so?" rejoined Anna Pavlovna in order to say something and get away to attend to her duties as hostess. But Pierre now committed a reverse act of impoliteness. First he had left a lady before she had finished speaking to him, and now he continued to speak to another who wished to get away. With his head bent, and his big feet spread apart, he began explaining his reasons for thinking the abbe's plan chimerical. - -"We will talk of it later," said Anna Pavlovna with a smile. - -And having got rid of this young man who did not know how to behave, she resumed her duties as hostess and continued to listen and watch, ready to help at any point where the conversation might happen to flag. As the foreman of a spinning mill, when he has set the hands to work, goes round and notices here a spindle that has stopped or there one that creaks or makes more noise than it should, and hastens to check the machine or set it in proper motion, so Anna Pavlovna moved about her drawing room, approaching now a silent, now a too-noisy group, and by a word or slight rearrangement kept the conversational machine in steady, proper, and regular motion. But amid these cares her anxiety about Pierre was evident. She kept an anxious watch on him when he approached the group round Mortemart to listen to what was being said there, and again when he passed to another group whose center was the abbe. - -Pierre had been educated abroad, and this reception at Anna Pavlovna's was the first he had attended in Russia. He knew that all the intellectual lights of Petersburg were gathered there and, like a child in a toyshop, did not know which way to look, afraid of missing any clever conversation that was to be heard. Seeing the self-confident and refined expression on the faces of those present he was always expecting to hear something very profound. At last he came up to Morio. Here the conversation seemed interesting and he stood waiting for an opportunity to express his own views, as young people are fond of doing. - -CHAPTER III -Anna Pavlovna's reception was in full swing. The spindles hummed steadily and ceaselessly on all sides. With the exception of the aunt, beside whom sat only one elderly lady, who with her thin careworn face was rather out of place in this brilliant society, the whole company had settled into three groups. One, chiefly masculine, had formed round the abbe. Another, of young people, was grouped round the beautiful Princess Helene, Prince Vasili's daughter, and the little Princess Bolkonskaya, very pretty and rosy, though rather too plump for her age. The third group was gathered round Mortemart and Anna Pavlovna. - -The vicomte was a nice-looking young man with soft features and polished manners, who evidently considered himself a celebrity but out of politeness modestly placed himself at the disposal of the circle in which he found himself. Anna Pavlovna was obviously serving him up as a treat to her guests. As a clever maitre d'hotel serves up as a specially choice delicacy a piece of meat that no one who had seen it in the kitchen would have cared to eat, so Anna Pavlovna served up to her guests, first the vicomte and then the abbe, as peculiarly choice morsels. The group about Mortemart immediately began discussing the murder of the Duc d'Enghien. The vicomte said that the Duc d'Enghien had perished by his own magnanimity, and that there were particular reasons for Buonaparte's hatred of him. - -"Ah, yes! Do tell us all about it, Vicomte," said Anna Pavlovna, with a pleasant feeling that there was something A la Louis XV in the sound of that sentence: "Contez nous cela, Vicomte." - -The vicomte bowed and smiled courteously in token of his willingness to comply. Anna Pavlovna arranged a group round him, inviting everyone to listen to his tale. - -"The vicomte knew the duc personally," whispered Anna Pavlovna to one of the guests. "The vicomte is a wonderful raconteur," said she to another. "How evidently he belongs to the best society," said she to a third; and the vicomte was served up to the company in the choicest and most advantageous style, like a well-garnished joint of roast beef on a hot dish. - -The vicomte wished to begin his story and gave a subtle smile. - -"Come over here, Helene, dear," said Anna Pavlovna to the beautiful young princess who was sitting some way off, the center of another group. - -The princess smiled. She rose with the same unchanging smile with which she had first entered the room - the smile of a perfectly beautiful woman. With a slight rustle of her white dress trimmed with moss and ivy, with a gleam of white shoulders, glossy hair, and sparkling diamonds, she passed between the men who made way for her, not looking at any of them but smiling on all, as if graciously allowing each the privilege of admiring her beautiful figure and shapely shoulders, back, and bosom - which in the fashion of those days were very much exposed - and she seemed to bring the glamour of a ballroom with her as she moved toward Anna Pavlovna. Helene was so lovely that not only did she not show any trace of coquetry, but on the contrary she even appeared shy of her unquestionable and all too victorious beauty. She seemed to wish, but to be unable, to diminish its effect. - -"How lovely!" said everyone who saw her; and the vicomte lifted his shoulders and dropped his eyes as if startled by something extraordinary when she took her seat opposite and beamed upon him also with her unchanging smile. - -"Madame, I doubt my ability before such an audience," said he, smilingly inclining his head. - -The princess rested her bare round arm on a little table and considered a reply unnecessary. She smilingly waited. All the time the story was being told she sat upright, glancing now at her beautiful round arm, altered in shape by its pressure on the table, now at her still more beautiful bosom, on which she readjusted a diamond necklace. From time to time she smoothed the folds of her dress, and whenever the story produced an effect she glanced at Anna Pavlovna, at once adopted just the expression she saw on the maid of honor's face, and again relapsed into her radiant smile. - -The little princess had also left the tea table and followed Helene. - -"Wait a moment, I'll get my work.... Now then, what are you thinking of?" she went on, turning to Prince Hippolyte. "Fetch me my workbag." - -There was a general movement as the princess, smiling and talking merrily to everyone at once, sat down and gaily arranged herself in her seat. - -"Now I am all right," she said, and asking the vicomte to begin, she took up her work. - -Prince Hippolyte, having brought the workbag, joined the circle and moving a chair close to hers seated himself beside her. - -Le charmant Hippolyte was surprising by his extraordinary resemblance to his beautiful sister, but yet more by the fact that in spite of this resemblance he was exceedingly ugly. His features were like his sister's, but while in her case everything was lit up by a joyous, self-satisfied, youthful, and constant smile of animation, and by the wonderful classic beauty of her figure, his face on the contrary was dulled by imbecility and a constant expression of sullen self-confidence, while his body was thin and weak. His eyes, nose, and mouth all seemed puckered into a vacant, wearied grimace, and his arms and legs always fell into unnatural positions. - -"It's not going to be a ghost story?" said he, sitting down beside the princess and hastily adjusting his lorgnette, as if without this instrument he could not begin to speak. - -"Why no, my dear fellow," said the astonished narrator, shrugging his shoulders. - -"Because I hate ghost stories," said Prince Hippolyte in a tone which showed that he only understood the meaning of his words after he had uttered them. - -He spoke with such self-confidence that his hearers could not be sure whether what he said was very witty or very stupid. He was dressed in a dark-green dress coat, knee breeches of the color of cuisse de nymphe effrayee, as he called it, shoes, and silk stockings. - -The vicomte told his tale very neatly. It was an anecdote, then current, to the effect that the Duc d'Enghien had gone secretly to Paris to visit Mademoiselle George; that at her house he came upon Bonaparte, who also enjoyed the famous actress' favors, and that in his presence Napoleon happened to fall into one of the fainting fits to which he was subject, and was thus at the duc's mercy. The latter spared him, and this magnanimity Bonaparte subsequently repaid by death. - -The story was very pretty and interesting, especially at the point where the rivals suddenly recognized one another; and the ladies looked agitated. - -"Charming!" said Anna Pavlovna with an inquiring glance at the little princess. - -"Charming!" whispered the little princess, sticking the needle into her work as if to testify that the interest and fascination of the story prevented her from going on with it. - -The vicomte appreciated this silent praise and smiling gratefully prepared to continue, but just then Anna Pavlovna, who had kept a watchful eye on the young man who so alarmed her, noticed that he was talking too loudly and vehemently with the abbe, so she hurried to the rescue. Pierre had managed to start a conversation with the abbe about the balance of power, and the latter, evidently interested by the young man's simple-minded eagerness, was explaining his pet theory. Both were talking and listening too eagerly and too naturally, which was why Anna Pavlovna disapproved. - -"The means are ... the balance of power in Europe and the rights of the people," the abbe was saying. "It is only necessary for one powerful nation like Russia - barbaric as she is said to be - to place herself disinterestedly at the head of an alliance having for its object the maintenance of the balance of power of Europe, and it would save the world!" - -"But how are you to get that balance?" Pierre was beginning. - -At that moment Anna Pavlovna came up and, looking severely at Pierre, asked the Italian how he stood Russian climate. The Italian's face instantly changed and assumed an offensively affected, sugary expression, evidently habitual to him when conversing with women. - -"I am so enchanted by the brilliancy of the wit and culture of the society, more especially of the feminine society, in which I have had the honor of being received, that I have not yet had time to think of the climate," said he. - -Not letting the abbe and Pierre escape, Anna Pavlovna, the more conveniently to keep them under observation, brought them into the larger circle. - diff --git a/examples/typescript-mentors/.gitignore b/examples/typescript-mentors/.gitignore deleted file mode 100644 index d5f19d89..00000000 --- a/examples/typescript-mentors/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -node_modules -package-lock.json diff --git a/examples/typescript-mentors/README.md b/examples/typescript-mentors/README.md deleted file mode 100644 index d3611a5e..00000000 --- a/examples/typescript-mentors/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# Ask the Mentors - -This example demonstrates how one would create a set of 'mentors' you can have a conversation with. The mentors are generated using the `character-generator.ts` file. This will use **Stable Beluga 70b** to create a bio and list of verbal ticks and common phrases used by each person. Then `mentors.ts` will take a question, and choose three of the 'mentors' and start a conversation with them. Occasionally, they will talk to each other, and other times they will just deliver a set of monologues. It's fun to see what they do and say. - -## Usage - -1. Add llama3 to have the mentors ask your questions: - - ```bash - ollama pull llama3 - ``` - -2. Install prerequisites: - - ```bash - npm install - ``` - -3. Ask a question: - - ```bash - npm start "what is a jackalope" - ``` - -You can also add your own character to be chosen at random when you ask a question. - -1. Make sure you have the right model installed: - - ```bash - ollama pull stablebeluga2:70b-q4_K_M - ``` - -2. Create a new character: - - ```bash - npm run charactergen "Lorne Greene" - ``` - - You can choose any well-known person you like. This example will create `lornegreene/Modelfile`. - -3. Now you can create a model with this command: - - ```bash - ollama create /lornegreene -f lornegreene/Modelfile - ``` - - `username` is whatever name you set up when you signed up at [https://ollama.com/signup](https://ollama.com/signup). - -4. To add this to your mentors, you will have to update the code as follows. On line 8 of `mentors.ts`, add an object to the array, replacing `` with the username you used above. - - ```bash - {ns: "", char: "Lorne Greene"} - ``` - -## Review the Code - -There are two scripts you can run in this example. The first is the main script to ask the mentors a question. The other one lets you generate a character to add to the mentors. Both scripts are mostly about adjusting the prompts at each inference stage. - -### mentors.ts - -In the **main** function, it starts by generating a list of mentors. This chooses 3 from a list of interesting characters. Then we ask for a question, and then things get interesting. We set the prompt for each of the 3 mentors a little differently. And the 2nd and 3rd mentors see what the previous folks said. The other functions in mentors sets the prompts for each mentor. - -### character-generator.ts - -**Character Generator** simply customizes the prompt to build a character profile for any famous person. And most of the script is just tweaking the prompt. This uses Stable Beluga 2 70b parameters. The 70b models tend to do better writing a bio about a character than smaller models, and Stable Beluga seemed to do better than Llama 2. Since this is used at development time for the characters, it doesn't affect the runtime of asking the mentors for their input. diff --git a/examples/typescript-mentors/character-generator.ts b/examples/typescript-mentors/character-generator.ts deleted file mode 100644 index dc5d2f5e..00000000 --- a/examples/typescript-mentors/character-generator.ts +++ /dev/null @@ -1,26 +0,0 @@ -import { Ollama } from 'ollama-node' -import fs from 'fs'; -import path from 'path'; - -async function characterGenerator() { - const character = process.argv[2]; - console.log(`You are creating a character for ${character}.`); - const foldername = character.replace(/\s/g, '').toLowerCase(); - const directory = path.join(__dirname, foldername); - if (!fs.existsSync(directory)) { - fs.mkdirSync(directory, { recursive: true }); - } - - const ollama = new Ollama(); - ollama.setModel("stablebeluga2:70b-q4_K_M"); - const bio = await ollama.generate(`create a bio of ${character} in a single long paragraph. Instead of saying '${character} is...' or '${character} was...' use language like 'You are...' or 'You were...'. Then create a paragraph describing the speaking mannerisms and style of ${character}. Don't include anything about how ${character} looked or what they sounded like, just focus on the words they said. Instead of saying '${character} would say...' use language like 'You should say...'. If you use quotes, always use single quotes instead of double quotes. If there are any specific words or phrases you used a lot, show how you used them. `); - - const thecontents = `FROM llama3\nSYSTEM """\n${bio.response.replace(/(\r\n|\n|\r)/gm, " ").replace('would', 'should')} All answers to questions should be related back to what you are most known for.\n"""`; - - fs.writeFile(path.join(directory, 'Modelfile'), thecontents, (err: any) => { - if (err) throw err; - console.log('The file has been saved!'); - }); -} - -characterGenerator(); diff --git a/examples/typescript-mentors/mentors.ts b/examples/typescript-mentors/mentors.ts deleted file mode 100644 index 17d70476..00000000 --- a/examples/typescript-mentors/mentors.ts +++ /dev/null @@ -1,60 +0,0 @@ -import { Ollama } from 'ollama-node'; - -const mentorCount = 3; -const ollama = new Ollama(); -type Mentor = { ns: string, char: string }; - -function getMentors(): Mentor[] { - const mentors = [{ ns: 'mattw', char: 'Gary Vaynerchuk' }, { ns: 'mattw', char: 'Kanye West'}, {ns: 'mattw', char: 'Martha Stewart'}, {ns: 'mattw', char: 'Neil deGrasse Tyson'}, {ns: 'mattw', char: 'Owen Wilson'}, {ns: 'mattw', char: 'Ronald Reagan'}, {ns: 'mattw', char: 'Donald Trump'}, {ns: 'mattw', char: 'Barack Obama'}, {ns: 'mattw', char: 'Jeff Bezos'}]; - const chosenMentors: Mentor[] = []; - for (let i = 0; i < mentorCount; i++) { - const mentor = mentors[Math.floor(Math.random() * mentors.length)]; - chosenMentors.push(mentor); - mentors.splice(mentors.indexOf(mentor), 1); - } - return chosenMentors; -} - -function getMentorFileName(mentor: Mentor): string { - const model = mentor.char.toLowerCase().replace(/\s/g, ''); - return `${mentor.ns}/${model}`; -} - -async function getSystemPrompt(mentor: Mentor, isLast: boolean, question: string): Promise { - ollama.setModel(getMentorFileName(mentor)); - const info = await ollama.showModelInfo() - let SystemPrompt = info.system || ''; - SystemPrompt += ` You should continue the conversation as if you were ${mentor} and acknowledge the people before you in the conversation. You should adopt their mannerisms and tone, but also not use language they wouldn't use. If they are not known to know about the concept in the question, don't offer an answer. Your answer should be no longer than 1 paragraph. And definitely try not to sound like anyone else. Don't repeat any slang or phrases already used. And if it is a question the original ${mentor} wouldn't have know the answer to, just say that you don't know, in the style of ${mentor}. And think about the time the person lived. Don't use terminology that they wouldn't have used.` - - if (isLast) { - SystemPrompt += ` End your answer with something like I hope our answers help you out`; - } else { - SystemPrompt += ` Remember, this is a conversation, so you don't need a conclusion, but end your answer with a question related to the first question: "${question}".`; - } - return SystemPrompt; -} - -async function main() { - const mentors = getMentors(); - const question = process.argv[2]; - let theConversation = `Here is the conversation so far.\nYou: ${question}\n` - - for await (const mentor of mentors) { - const SystemPrompt = await getSystemPrompt(mentor, mentor === mentors[mentorCount - 1], question); - ollama.setModel(getMentorFileName(mentor)); - ollama.setSystemPrompt(SystemPrompt); - let output = ''; - process.stdout.write(`\n${mentor.char}: `); - for await (const chunk of ollama.streamingGenerate(theConversation + `Continue the conversation as if you were ${mentor.char} on the question "${question}".`)) { - if (chunk.response) { - output += chunk.response; - process.stdout.write(chunk.response); - } else { - process.stdout.write('\n'); - } - } - theConversation += `${mentor.char}: ${output}\n\n` - } -} - -main(); \ No newline at end of file diff --git a/examples/typescript-mentors/package.json b/examples/typescript-mentors/package.json deleted file mode 100644 index 537f3df1..00000000 --- a/examples/typescript-mentors/package.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "scripts": { - "charactergen": "tsx character-generator.ts", - "start": "tsx mentors.ts" - }, - "dependencies": { - "fs": "^0.0.1-security", - "ollama-node": "^0.0.3", - "path": "^0.12.7" - }, - "devDependencies": { - "tsx": "^4.6.2", - "typescript": "^5.3.3" - } -} diff --git a/examples/typescript-simplechat/client.ts b/examples/typescript-simplechat/client.ts deleted file mode 100644 index d8faaa1b..00000000 --- a/examples/typescript-simplechat/client.ts +++ /dev/null @@ -1,77 +0,0 @@ -import * as readline from "readline"; - -const model = "llama3.2"; -type Message = { - role: "assistant" | "user" | "system"; - content: string; -} -const messages: Message[] = [{ - role: "system", - content: "You are a helpful AI agent." -}] - -const rl = readline.createInterface({ - input: process.stdin, - output: process.stdout -}) - -async function chat(messages: Message[]): Promise { - const body = { - model: model, - messages: messages - } - - const response = await fetch("http://localhost:11434/api/chat", { - method: "POST", - body: JSON.stringify(body) - }) - - const reader = response.body?.getReader() - if (!reader) { - throw new Error("Failed to read response body") - } - let content = "" - while (true) { - const { done, value } = await reader.read() - if (done) { - break; - } - const rawjson = new TextDecoder().decode(value); - const json = JSON.parse(rawjson) - - if (json.done === false) { - process.stdout.write(json.message.content); - content += json.message.content - } - - } - return { role: "assistant", content: content }; -} - -async function askQuestion(): Promise { - return new Promise((resolve) => { - rl.question("\n\nAsk a question: (press enter alone to quit)\n\n", async (user_input) => { - if (user_input.trim() === "") { - rl.close(); - console.log("Thankyou. Goodbye.\n") - console.log("=======\nHere is the message history that was used in this conversation.\n=======\n") - messages.forEach(message => { - console.log(message) - }) - resolve(); - } else { - console.log(); - messages.push({ role: "user", content: user_input }); - messages.push(await chat(messages)); - await askQuestion(); // Ask the next question - } - }); - }); -} - -async function main() { - await askQuestion(); - -} - -main(); diff --git a/examples/typescript-simplechat/package.json b/examples/typescript-simplechat/package.json deleted file mode 100644 index 6ae8c1aa..00000000 --- a/examples/typescript-simplechat/package.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "scripts": { - "start": "tsx client.ts" - }, - "dependencies": { - "@types/node": "^20.10.4", - "prompt-sync": "^4.2.0", - "readline": "^1.3.0", - "tsx": "^4.6.2", - "typescript": "^5.3.3" - } - } \ No newline at end of file diff --git a/examples/typescript-simplechat/readme.md b/examples/typescript-simplechat/readme.md deleted file mode 100644 index 5635b9d2..00000000 --- a/examples/typescript-simplechat/readme.md +++ /dev/null @@ -1,35 +0,0 @@ -# Simple Chat Example - -The **chat** endpoint, available as of v0.1.14, is one of two ways to generate text from an LLM with Ollama. At a high level, you provide the endpoint an array of message objects with a role and content specified. Then with each output and prompt, you add more messages, which builds up the history. - -## Run the Example - -`npm start` - -## Review the Code - -You can see in the **chat** function that is actually calling the endpoint is simply done with: - -```typescript -const body = { - model: model, - messages: messages -} - -const response = await fetch("http://localhost:11434/api/chat", { - method: "POST", - body: JSON.stringify(body) -}) -``` - -With the **generate** endpoint, you need to provide a `prompt`. But with **chat**, you provide `messages`. And the resulting stream of responses includes a `message` object with a `content` field. - -The final JSON object doesn't provide the full content, so you will need to build the content yourself. In this example, **chat** takes the full array of messages and outputs the resulting message from this call of the chat endpoint. - -In the **askQuestion** function, we collect `user_input` and add it as a message to our messages, and that is passed to the chat function. When the LLM is done responding, the output is added as another message to the messages array. - -At the end, you will see a printout of all the messages. - -## Next Steps - -In this example, all generations are kept. You might want to experiment with summarizing everything older than 10 conversations to enable longer history with less context being used. From ab39872cb471bed8e0795b097b248c3930faa440 Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Mon, 13 Jan 2025 17:30:24 -0800 Subject: [PATCH 03/68] add new create api doc (#8388) --- docs/api.md | 110 +++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 91 insertions(+), 19 deletions(-) diff --git a/docs/api.md b/docs/api.md index 50402ecc..ede6446f 100644 --- a/docs/api.md +++ b/docs/api.md @@ -928,14 +928,25 @@ A single JSON object is returned: POST /api/create ``` -Create a model from a [`Modelfile`](./modelfile.md). It is recommended to set `modelfile` to the content of the Modelfile rather than just set `path`. This is a requirement for remote create. Remote model creation must also create any file blobs, fields such as `FROM` and `ADAPTER`, explicitly with the server using [Create a Blob](#create-a-blob) and the value to the path indicated in the response. +Create a model from: + * another model; + * a safetensors directory; or + * a GGUF file. + +If you are creating a model from a safetensors directory or from a GGUF file, you must [create a blob](#create-a-blob) for each of the files and then use the file name and SHA256 digest associated with each blob in the `files` field. ### Parameters - `model`: name of the model to create -- `modelfile` (optional): contents of the Modelfile +- `from`: (optional) name of an existing model to create the new model from +- `files`: (optional) a dictionary of file names to SHA256 digests of blobs to create the model from +- `adapters`: (optional) a dictionary of file names to SHA256 digests of blobs for LORA adapters +- `template`: (optional) the prompt template for the model +- `license`: (optional) a string or list of strings containing the license or licenses for the model +- `system`: (optional) a string containing the system prompt for the model +- `parameters`: (optional) a dictionary of parameters for the model (see [Modelfile](./modelfile.md#valid-parameters-and-values) for a list of parameters) +- `messages`: (optional) a list of message objects used to create a conversation - `stream`: (optional) if `false` the response will be returned as a single response object, rather than a stream of objects -- `path` (optional): path to the Modelfile - `quantize` (optional): quantize a non-quantized (e.g. float16) model #### Quantization types @@ -961,14 +972,15 @@ Create a model from a [`Modelfile`](./modelfile.md). It is recommended to set `m #### Create a new model -Create a new model from a `Modelfile`. +Create a new model from an existing model. ##### Request ```shell curl http://localhost:11434/api/create -d '{ "model": "mario", - "modelfile": "FROM llama3\nSYSTEM You are mario from Super Mario Bros." + "from": "llama3.2", + "system": "You are Mario from Super Mario Bros." }' ``` @@ -999,7 +1011,7 @@ Quantize a non-quantized model. ```shell curl http://localhost:11434/api/create -d '{ "model": "llama3.1:quantized", - "modelfile": "FROM llama3.1:8b-instruct-fp16", + "from": "llama3.1:8b-instruct-fp16", "quantize": "q4_K_M" }' ``` @@ -1019,52 +1031,112 @@ A stream of JSON objects is returned: {"status":"success"} ``` +#### Create a model from GGUF -### Check if a Blob Exists +Create a model from a GGUF file. The `files` parameter should be filled out with the file name and SHA256 digest of the GGUF file you wish to use. Use [/api/blobs/:digest](#push-a-blob) to push the GGUF file to the server before calling this API. + + +##### Request + +```shell +curl http://localhost:11434/api/create -d '{ + "model": "my-gguf-model", + "files": { + "test.gguf": "sha256:432f310a77f4650a88d0fd59ecdd7cebed8d684bafea53cbff0473542964f0c3" + } +}' +``` + +##### Response + +A stream of JSON objects is returned: + +``` +{"status":"parsing GGUF"} +{"status":"using existing layer sha256:432f310a77f4650a88d0fd59ecdd7cebed8d684bafea53cbff0473542964f0c3"} +{"status":"writing manifest"} +{"status":"success"} +``` + + +#### Create a model from a Safetensors directory + +The `files` parameter should include a dictionary of files for the safetensors model which includes the file names and SHA256 digest of each file. Use [/api/blobs/:digest](#push-a-blob) to first push each of the files to the server before calling this API. Files will remain in the cache until the Ollama server is restarted. + +##### Request + +```shell +curl http://localhost:11434/api/create -d '{ + "model": "fred", + "files": { + "config.json": "sha256:dd3443e529fb2290423a0c65c2d633e67b419d273f170259e27297219828e389", + "generation_config.json": "sha256:88effbb63300dbbc7390143fbbdd9d9fa50587b37e8bfd16c8c90d4970a74a36", + "special_tokens_map.json": "sha256:b7455f0e8f00539108837bfa586c4fbf424e31f8717819a6798be74bef813d05", + "tokenizer.json": "sha256:bbc1904d35169c542dffbe1f7589a5994ec7426d9e5b609d07bab876f32e97ab", + "tokenizer_config.json": "sha256:24e8a6dc2547164b7002e3125f10b415105644fcf02bf9ad8b674c87b1eaaed6", + "model.safetensors": "sha256:1ff795ff6a07e6a68085d206fb84417da2f083f68391c2843cd2b8ac6df8538f" + } +}' +``` + +##### Response + +A stream of JSON objects is returned: + +```shell +{"status":"converting model"} +{"status":"creating new layer sha256:05ca5b813af4a53d2c2922933936e398958855c44ee534858fcfd830940618b6"} +{"status":"using autodetected template llama3-instruct"} +{"status":"using existing layer sha256:56bb8bd477a519ffa694fc449c2413c6f0e1d3b1c88fa7e3c9d88d3ae49d4dcb"} +{"status":"writing manifest"} +{"status":"success"} +``` + +## Check if a Blob Exists ```shell HEAD /api/blobs/:digest ``` -Ensures that the file blob used for a FROM or ADAPTER field exists on the server. This is checking your Ollama server and not ollama.com. +Ensures that the file blob (Binary Large Object) used with create a model exists on the server. This checks your Ollama server and not ollama.com. -#### Query Parameters +### Query Parameters - `digest`: the SHA256 digest of the blob -#### Examples +### Examples -##### Request +#### Request ```shell curl -I http://localhost:11434/api/blobs/sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2 ``` -##### Response +#### Response Return 200 OK if the blob exists, 404 Not Found if it does not. -### Create a Blob +## Push a Blob ```shell POST /api/blobs/:digest ``` -Create a blob from a file on the server. Returns the server file path. +Push a file to the Ollama server to create a "blob" (Binary Large Object). -#### Query Parameters +### Query Parameters - `digest`: the expected SHA256 digest of the file -#### Examples +### Examples -##### Request +#### Request ```shell -curl -T model.bin -X POST http://localhost:11434/api/blobs/sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2 +curl -T model.gguf -X POST http://localhost:11434/api/blobs/sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2 ``` -##### Response +#### Response Return 201 Created if the blob was successfully created, 400 Bad Request if the digest used is not expected. From 6982e9cc96bb2d1e05e58c8bff1d7c9eded032a9 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Mon, 13 Jan 2025 18:56:31 -0800 Subject: [PATCH 04/68] readme: remove link to missing page --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index caf7cf7f..5eb59dbc 100644 --- a/README.md +++ b/README.md @@ -137,7 +137,7 @@ ollama run mario Hello! It's your friend Mario. ``` -For more examples, see the [examples](examples) directory. For more information on working with a Modelfile, see the [Modelfile](docs/modelfile.md) documentation. +For more information on working with a Modelfile, see the [Modelfile](docs/modelfile.md) documentation. ## CLI Reference From 74ea4fb6046d1ae3454dfd00254259f45263318b Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Tue, 14 Jan 2025 09:30:34 -0800 Subject: [PATCH 05/68] remove .prettierrc.json (#8413) --- .prettierrc.json | 10 ----- macapp/package-lock.json | 91 ---------------------------------------- macapp/package.json | 6 +-- 3 files changed, 1 insertion(+), 106 deletions(-) delete mode 100644 .prettierrc.json diff --git a/.prettierrc.json b/.prettierrc.json deleted file mode 100644 index 0b3312d5..00000000 --- a/.prettierrc.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "trailingComma": "es5", - "tabWidth": 2, - "useTabs": false, - "semi": false, - "singleQuote": true, - "jsxSingleQuote": true, - "printWidth": 120, - "arrowParens": "avoid" -} diff --git a/macapp/package-lock.json b/macapp/package-lock.json index d5e55393..bacc2a37 100644 --- a/macapp/package-lock.json +++ b/macapp/package-lock.json @@ -55,8 +55,6 @@ "postcss-import": "^15.1.0", "postcss-loader": "^7.3.3", "postcss-preset-env": "^8.5.1", - "prettier": "^2.8.8", - "prettier-plugin-tailwindcss": "^0.3.0", "style-loader": "^3.3.3", "svg-inline-loader": "^0.8.2", "tailwindcss": "^3.3.2", @@ -13248,95 +13246,6 @@ "node": ">= 0.8.0" } }, - "node_modules/prettier": { - "version": "2.8.8", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", - "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", - "dev": true, - "bin": { - "prettier": "bin-prettier.js" - }, - "engines": { - "node": ">=10.13.0" - }, - "funding": { - "url": "https://github.com/prettier/prettier?sponsor=1" - } - }, - "node_modules/prettier-plugin-tailwindcss": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/prettier-plugin-tailwindcss/-/prettier-plugin-tailwindcss-0.3.0.tgz", - "integrity": "sha512-009/Xqdy7UmkcTBpwlq7jsViDqXAYSOMLDrHAdTMlVZOrKfM2o9Ci7EMWTMZ7SkKBFTG04UM9F9iM2+4i6boDA==", - "dev": true, - "engines": { - "node": ">=12.17.0" - }, - "peerDependencies": { - "@ianvs/prettier-plugin-sort-imports": "*", - "@prettier/plugin-pug": "*", - "@shopify/prettier-plugin-liquid": "*", - "@shufo/prettier-plugin-blade": "*", - "@trivago/prettier-plugin-sort-imports": "*", - "prettier": ">=2.2.0", - "prettier-plugin-astro": "*", - "prettier-plugin-css-order": "*", - "prettier-plugin-import-sort": "*", - "prettier-plugin-jsdoc": "*", - "prettier-plugin-marko": "*", - "prettier-plugin-organize-attributes": "*", - "prettier-plugin-organize-imports": "*", - "prettier-plugin-style-order": "*", - "prettier-plugin-svelte": "*", - "prettier-plugin-twig-melody": "*" - }, - "peerDependenciesMeta": { - "@ianvs/prettier-plugin-sort-imports": { - "optional": true - }, - "@prettier/plugin-pug": { - "optional": true - }, - "@shopify/prettier-plugin-liquid": { - "optional": true - }, - "@shufo/prettier-plugin-blade": { - "optional": true - }, - "@trivago/prettier-plugin-sort-imports": { - "optional": true - }, - "prettier-plugin-astro": { - "optional": true - }, - "prettier-plugin-css-order": { - "optional": true - }, - "prettier-plugin-import-sort": { - "optional": true - }, - "prettier-plugin-jsdoc": { - "optional": true - }, - "prettier-plugin-marko": { - "optional": true - }, - "prettier-plugin-organize-attributes": { - "optional": true - }, - "prettier-plugin-organize-imports": { - "optional": true - }, - "prettier-plugin-style-order": { - "optional": true - }, - "prettier-plugin-svelte": { - "optional": true - }, - "prettier-plugin-twig-melody": { - "optional": true - } - } - }, "node_modules/pretty-error": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz", diff --git a/macapp/package.json b/macapp/package.json index d264d8f5..088ec0b1 100644 --- a/macapp/package.json +++ b/macapp/package.json @@ -11,9 +11,7 @@ "make": "electron-forge make --arch universal", "make:sign": "SIGN=1 electron-forge make --arch universal", "publish": "SIGN=1 electron-forge publish", - "lint": "eslint --ext .ts,.tsx .", - "format": "prettier --check . --ignore-path .gitignore", - "format:fix": "prettier --write . --ignore-path .gitignore" + "lint": "eslint --ext .ts,.tsx ." }, "keywords": [], "author": { @@ -55,8 +53,6 @@ "postcss-import": "^15.1.0", "postcss-loader": "^7.3.3", "postcss-preset-env": "^8.5.1", - "prettier": "^2.8.8", - "prettier-plugin-tailwindcss": "^0.3.0", "style-loader": "^3.3.3", "svg-inline-loader": "^0.8.2", "tailwindcss": "^3.3.2", From a30f347201b8812ff0437aae63a7e5125ff897f8 Mon Sep 17 00:00:00 2001 From: Steve Berdy <86739818+steveberdy@users.noreply.github.com> Date: Tue, 14 Jan 2025 12:37:35 -0500 Subject: [PATCH 06/68] readme: add LangChain for .NET to community integrations (#8352) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 5eb59dbc..1aaae620 100644 --- a/README.md +++ b/README.md @@ -441,6 +441,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example) - [LangChain4j](https://github.com/langchain4j/langchain4j) with [example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java) - [LangChainRust](https://github.com/Abraxas-365/langchain-rust) with [example](https://github.com/Abraxas-365/langchain-rust/blob/main/examples/llm_ollama.rs) +- [LangChain for .NET](https://github.com/tryAGI/LangChain) with [example](https://github.com/tryAGI/LangChain/blob/main/examples/LangChain.Samples.OpenAI/Program.cs) - [LLPhant](https://github.com/theodo-group/LLPhant?tab=readme-ov-file#ollama) - [LlamaIndex](https://docs.llamaindex.ai/en/stable/examples/llm/ollama/) and [LlamaIndexTS](https://ts.llamaindex.ai/modules/llms/available_llms/ollama) - [LiteLLM](https://github.com/BerriAI/litellm) From f6f3713001d8afd0c443ba9eaee2116deab540e5 Mon Sep 17 00:00:00 2001 From: Bruce MacDonald Date: Tue, 14 Jan 2025 10:34:37 -0800 Subject: [PATCH 07/68] convert: qwen2 from safetensors (#8408) Add native support for converting Qwen2 family models (including Qwen2.5) from safetensors to gguf format so we can run it. --- convert/convert.go | 2 + convert/convert_qwen2.go | 78 +++++ convert/convert_test.go | 1 + convert/testdata/Qwen2.5-0.5B-Instruct.json | 314 ++++++++++++++++++++ convert/tokenizer.go | 2 + 5 files changed, 397 insertions(+) create mode 100644 convert/convert_qwen2.go create mode 100644 convert/testdata/Qwen2.5-0.5B-Instruct.json diff --git a/convert/convert.go b/convert/convert.go index 44783b6e..639e6ad4 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -187,6 +187,8 @@ func ConvertModel(fsys fs.FS, ws io.WriteSeeker) error { conv = &gemma2Model{} case "Phi3ForCausalLM": conv = &phi3Model{} + case "Qwen2ForCausalLM": + conv = &qwen2Model{} case "BertModel": conv = &bertModel{} default: diff --git a/convert/convert_qwen2.go b/convert/convert_qwen2.go new file mode 100644 index 00000000..94f14c51 --- /dev/null +++ b/convert/convert_qwen2.go @@ -0,0 +1,78 @@ +package convert + +import "github.com/ollama/ollama/llm" + +type qwen2Model struct { + ModelParameters + MaxPositionEmbeddings uint32 `json:"max_position_embeddings"` + HiddenSize uint32 `json:"hidden_size"` + HiddenLayers uint32 `json:"num_hidden_layers"` + IntermediateSize uint32 `json:"intermediate_size"` + NumAttentionHeads uint32 `json:"num_attention_heads"` + NumKeyValueHeads uint32 `json:"num_key_value_heads"` + RopeTheta float32 `json:"rope_theta"` + RopeScaling struct { + Type string `json:"type"` + Factor ropeFactor `json:"factor"` + OriginalMaxPositionEmbeddings uint32 `json:"original_max_position_embeddings"` + } `json:"rope_scaling"` + RMSNormEPS float32 `json:"rms_norm_eps"` +} + +var _ ModelConverter = (*qwen2Model)(nil) + +func (q *qwen2Model) KV(t *Tokenizer) llm.KV { + kv := q.ModelParameters.KV(t) + kv["general.architecture"] = "qwen2" + kv["qwen2.block_count"] = q.HiddenLayers + kv["qwen2.context_length"] = q.MaxPositionEmbeddings + kv["qwen2.embedding_length"] = q.HiddenSize + kv["qwen2.feed_forward_length"] = q.IntermediateSize + kv["qwen2.attention.head_count"] = q.NumAttentionHeads + kv["qwen2.attention.head_count_kv"] = q.NumKeyValueHeads + kv["qwen2.rope.freq_base"] = q.RopeTheta + kv["qwen2.attention.layer_norm_rms_epsilon"] = q.RMSNormEPS + + switch q.RopeScaling.Type { + case "": + // no scaling + case "yarn": + kv["qwen2.rope.scaling.type"] = q.RopeScaling.Type + kv["qwen2.rope.scaling.factor"] = q.RopeScaling.Factor + default: + panic("unknown rope scaling type") + } + return kv +} + +func (q *qwen2Model) Tensors(ts []Tensor) []llm.Tensor { + var out []llm.Tensor + for _, t := range ts { + out = append(out, llm.Tensor{ + Name: t.Name(), + Kind: t.Kind(), + Shape: t.Shape(), + WriterTo: t, + }) + } + + return out +} + +func (p *qwen2Model) Replacements() []string { + return []string{ + "lm_head", "output", + "model.embed_tokens", "token_embd", + "model.layers", "blk", + "input_layernorm", "attn_norm", + "self_attn.k_proj", "attn_k", + "self_attn.v_proj", "attn_v", + "self_attn.q_proj", "attn_q", + "self_attn.o_proj", "attn_output", + "mlp.down_proj", "ffn_down", + "mlp.gate_proj", "ffn_gate", + "mlp.up_proj", "ffn_up", + "post_attention_layernorm", "ffn_norm", + "model.norm", "output_norm", + } +} diff --git a/convert/convert_test.go b/convert/convert_test.go index 48a2b1d4..a98b956a 100644 --- a/convert/convert_test.go +++ b/convert/convert_test.go @@ -108,6 +108,7 @@ func TestConvertModel(t *testing.T) { "Phi-3-mini-128k-instruct", "all-MiniLM-L6-v2", "gemma-2-9b-it", + "Qwen2.5-0.5B-Instruct", } for i := range cases { diff --git a/convert/testdata/Qwen2.5-0.5B-Instruct.json b/convert/testdata/Qwen2.5-0.5B-Instruct.json new file mode 100644 index 00000000..74f1956c --- /dev/null +++ b/convert/testdata/Qwen2.5-0.5B-Instruct.json @@ -0,0 +1,314 @@ +{ + "general.architecture": "qwen2", + "general.file_type": "1", + "general.parameter_count": "494032768", + "general.quantization_version": "2", + "output_norm.weight": "93a01a6db3419e85320a244bbf8ae81c43033b1d10c342bea3797ff2ce348390", + "qwen2.attention.head_count": "14", + "qwen2.attention.head_count_kv": "2", + "qwen2.attention.layer_norm_rms_epsilon": "1e-06", + "qwen2.block_count": "24", + "qwen2.context_length": "32768", + "qwen2.embedding_length": "896", + "qwen2.feed_forward_length": "4864", + "qwen2.rope.freq_base": "1e+06", + "token_embd.weight": "d74257dc547b48be5ae7b93f1c9af072c0c42dbbb85503078e25c59cd09e68d0", + "tokenizer.ggml.add_eos_token": "false", + "tokenizer.ggml.add_padding_token": "false", + "tokenizer.ggml.eos_token_id": "151645", + "tokenizer.ggml.merges": "6b1b1c58f1223d74f9095929d3e6416cdd74784440221a5507b87b8197f2bfd2", + "tokenizer.ggml.model": "gpt2", + "tokenizer.ggml.padding_token_id": "151643", + "tokenizer.ggml.pre": "qwen2", + "tokenizer.ggml.scores": "94e247e531e8b0fa3d248f3de09c9beae0c87da8106208a8edfaac0b8ec4b53d", + "tokenizer.ggml.token_type": "b178dbc9d1b2e08f84d02918e00fc2de2619a250e6c188c91a6605f701860055", + "tokenizer.ggml.tokens": "1d93f6679b23a1152b725f7f473792d54d53c1040c5250d3e46b42f81e0a1a34", + "blk.0.attn_k.bias": "5ce6617845f66c34515978d23d52e729c298d8bffa28c356a0428bef17142cf1", + "blk.0.attn_k.weight": "a960832a9e0e83e4d95402e5d1a01cc74300fcca0c381237162126330e1a7af8", + "blk.0.attn_norm.weight": "32c7d51cd0958f1f1771174192db341f9770516d7595a2f0fd18a4d78bd5aba3", + "blk.0.attn_output.weight": "c67e6e7e868354a11bf9121c70ee56c140b20eec611a8955e7dfe54a21d40a98", + "blk.0.attn_q.bias": "3e9e994eb1f03bccfc82f8bb3c324c920d42d547e07de5be83be12c428645063", + "blk.0.attn_q.weight": "dc12132f789b97cfa1e3f5775ceb835247fa67aa47400fd09c8f9f3769208583", + "blk.0.attn_v.bias": "a3fd0757b31fdc78af5ec320332d239c1a79d34e8804df06c5454e86955e8cc9", + "blk.0.attn_v.weight": "f43094a2134c7ee2dcc52aac3c8b7d9d64fb0295a8adb94cabfd49213f017b84", + "blk.0.ffn_down.weight": "18c2aec92db14f21976838a8c35d5575f80d0e4b1e05ccc0d8388d5877e80147", + "blk.0.ffn_gate.weight": "a3a1c4ef38f8f750eabadfe3d83bbb0f77941eec1cc1a388e51852e99c8691f6", + "blk.0.ffn_norm.weight": "b59b779c42d44b5c4cec41e39b4eb61e0491a07c1b3e946ccb5b8d5c657eda3f", + "blk.0.ffn_up.weight": "db64f09987ea59449e90abae5a2ffcc20efd9203f0eebec77a6aacb5809d6cff", + "blk.1.attn_k.bias": "a5c8c5671703ec0aa0143ff70a20ffdd67b5d5790ca1dfa5bba4e87e4071ed9f", + "blk.1.attn_k.weight": "835c7c7cc95b3cb2e55bd9cac585aa0760a033896621d3e06421f3378c540f7d", + "blk.1.attn_norm.weight": "f4c36fb6c14fce721fab0de78cc118d6f66e3a3d3ea0017bb14aade24c3c5434", + "blk.1.attn_output.weight": "cc1e80310c97cef068e48e40b7096f32fa2138519d6209c6a1a9994985999016", + "blk.1.attn_q.bias": "bc332780e66b0aac80ec5e63ac32344919a840db2fcc8f87bcef16a43a54138e", + "blk.1.attn_q.weight": "d766f06c925cce38d4b31b2165b3448e1fb49a7d561985f95d9cd2fcba52367a", + "blk.1.attn_v.bias": "9f486626fb6ed9ac84970a71e9b9818dd2758501fd3f61bb1c08540dcc7a8631", + "blk.1.attn_v.weight": "e873d1e5bd4f4d6abfd47c0f55119c2c111105838753ee273a03c5ccea25ce5c", + "blk.1.ffn_down.weight": "b3ce82b093f187344de04284b1783a452de1b72640914609b8f830dc81580521", + "blk.1.ffn_gate.weight": "5cd44ad237edaca525a28a3ac13975d1b565f576d6a8003237a341ae0d156f2e", + "blk.1.ffn_norm.weight": "4ac774ee8afaee119610c46aa1ff89fc6c9084a29d226075bc4aa4d2f15f746c", + "blk.1.ffn_up.weight": "042d81ab5f1983d85c81213232f3bfc05a9302d9dfaa98d931ebba326b6058b8", + "blk.10.attn_k.bias": "767ecfeacd60a2c2221ac4d76c357190849dd9cdf64ced418d9d0c7949101401", + "blk.10.attn_k.weight": "a9f3df343227537636be8202303453086375091944e498bad11e0b91e45e8c71", + "blk.10.attn_norm.weight": "01acd0e7b3e363f873dbfde6f0995ffcce83f5aaa10ff91c31dbf775035f6d5a", + "blk.10.attn_output.weight": "a531fe660769604ab869f01b203eb115e025cad4c0baeacdd1bcca99cf6d0264", + "blk.10.attn_q.bias": "356a02c9163dd660c1340fbe1e049b335ac6178891e00996131bba9ab4cb3e59", + "blk.10.attn_q.weight": "81be0cfb227339d83f954cd8dcf35828441211c6e1d184060e3eb76085041e2f", + "blk.10.attn_v.bias": "ed0450653284b62f8bf2c2db19c0ff7a6cf3cda1324d0a044c5e3db7bb692bd3", + "blk.10.attn_v.weight": "c1247ff7092babd2ed979883095b9aa022b2996cab1c77fb9e6176ddc1498d16", + "blk.10.ffn_down.weight": "fda7544965dc9af874f1062c22151c6cefc8ba08cbe15dc67aa89979e77b2de4", + "blk.10.ffn_gate.weight": "9f2632b1dee7304d10c70bd38d85bb1f148a628a8468f894f57975b8a2f1d945", + "blk.10.ffn_norm.weight": "94f8cbd6b17a4d5aabd93fa32930a687db3b11f086142f1cd71c535c11adcad4", + "blk.10.ffn_up.weight": "8dc2f8db0474939a277a3d89db34c3bcc3381cfea57bd05a8426a164634d9112", + "blk.11.attn_k.bias": "3b8e5a662b19411e3f6530714b766aad2ee41eebc8161bec9db0bc82d383a6e0", + "blk.11.attn_k.weight": "2c29f1ed1ce53ce9604e9ea3663c2c373157e909a0d6064a8920005f6d15dad9", + "blk.11.attn_norm.weight": "48f68a99c3da4ab4c9e492677b606d1b8e0e3de1fdbf6a977523f97b8c21ec31", + "blk.11.attn_output.weight": "5859f3838a94898b020c23040941ed88f4fcb132db400d0849f30a01f62c0f1c", + "blk.11.attn_q.bias": "c5ad89a5628f2bd81252ef44ef6bbcbff15c33ad16fba66435509b959c2af6d3", + "blk.11.attn_q.weight": "d102104e5d61c1e3219564f1d0149fd593db6c6daa9f3872460c84403323cfef", + "blk.11.attn_v.bias": "8653f7d48c5f75a5b55630819f99ecf01c932f12d33fd1a3ee634613e70edde8", + "blk.11.attn_v.weight": "e0a7c7d89b9f2d0d781ce85330022229126e130a8600a09d4a5f920f0bbd50b2", + "blk.11.ffn_down.weight": "4a22b3361eba8bbe1d9a6fda1812618e894c49f13bcacb505defa9badb6b96a6", + "blk.11.ffn_gate.weight": "484698b206760d3fd8df68b252a3c5bae65c8bf6392fb53a5261b021b6f39144", + "blk.11.ffn_norm.weight": "da69e96338cbe30882cf5a9544004387f5bbc0bcb6038e61ba2baabbd2623bac", + "blk.11.ffn_up.weight": "26ec74f1f504d1281715680dfbcc321db4e9900c53932fa40955daceb891b9aa", + "blk.12.attn_k.bias": "f94b49ec3e498f14f6bc3ebefe1f82018935bbe594df03253bfffae36bc20751", + "blk.12.attn_k.weight": "ae6323d0bbcfcea01f598d308993d1a7530317e78c1f64923e36d4b1649e9e73", + "blk.12.attn_norm.weight": "3784536a7611a839a42a29a5cc538c74ee4f9793092e5efe1b227b48f8c4d37f", + "blk.12.attn_output.weight": "46826c00b066829355db78293ab216e890f5eaaed3a70499ee68785189a6b0d9", + "blk.12.attn_q.bias": "b14db2d327ce0deec97beda7d3965a56c43e1e63dc9181840fb176b114cf643a", + "blk.12.attn_q.weight": "30f67df52ced06f76b6c85531657584276a454d6ec9bb7d0c7d2ca8f067f5551", + "blk.12.attn_v.bias": "57ab4b7e43f4fc5853bca7bfbb2702f8c2c391a49252a760abbb7b26330dc4aa", + "blk.12.attn_v.weight": "3ccd9da0cfe241cd33a63310f3ca6d81c5bc5a50d200bfea6612ac376166aca2", + "blk.12.ffn_down.weight": "a095774413198a83c549ce132d7c9684c0baef33145eaa889be370ef9c881c81", + "blk.12.ffn_gate.weight": "bb3b2bbdfb065d2a0a795909c53beec327781a4a7e974bf9f99c436cea459991", + "blk.12.ffn_norm.weight": "3b486c6cd97eb4b17967d9d6c0cc3821a1a6ad73d96b4d8fbf980101b32b8dab", + "blk.12.ffn_up.weight": "d020b82dd39a5d5a9d3881397bf53a567790a07f395284e6eb0f5fe0fef53de3", + "blk.13.attn_k.bias": "69381f8254586eba3623eceb18697fe79f9b4d8f2c30136acb10d5926e3ba1d0", + "blk.13.attn_k.weight": "c4d7a31495d71269f81b586203a50abea3a9e2985667faf258c9306ec6030f1d", + "blk.13.attn_norm.weight": "907da11075d16eda668dabe548af3cfd794df26b8ab53939af1344d91bec6fba", + "blk.13.attn_output.weight": "ca01cf6d2b8ece2fb3b0f56f1eb76194471ac27b54fe264f99c909f5eb7fef4a", + "blk.13.attn_q.bias": "2f5ecebafe03b1d485b93c41cff756ca57fb65b02e9d8336f14a3d26ab5d159a", + "blk.13.attn_q.weight": "f557f8acad7f0fa62da06b5da134182fe04a5bed8bdb269e316f970c9cc440fb", + "blk.13.attn_v.bias": "a492a88ae131e95714b092545a8752eaea7c7d2f9cb77852628ca8296c415525", + "blk.13.attn_v.weight": "d1220b1fe9f1cc0a5a88ee239d65fec900f5eaf6c448b6c2cbe74c81e15ed333", + "blk.13.ffn_down.weight": "53184e33440b49848a896304eb16a983efbc6b8bee0b93de8c8de716e1585fcb", + "blk.13.ffn_gate.weight": "684bf8896f148c851506c62717e45c426921b93c10d536ecdeb0fb28259a106d", + "blk.13.ffn_norm.weight": "6cb4e547ad8665eb7c174855c08afe1e5490fece66122522c1e9e8132d9064eb", + "blk.13.ffn_up.weight": "c64107897e38c06727075aba4ea7940b2cdd0e278b5c555dffb2790ef553bb57", + "blk.14.attn_k.bias": "2814ca9b160b16ae39557c9b629482fbe3a7592d372c1e1bf1ac59a2d578fde1", + "blk.14.attn_k.weight": "3377177396463afba667742972920ebb45dfdc37e9950e1f0e1d60a2f936b27d", + "blk.14.attn_norm.weight": "5cae870477d51dd35a6d22aaeacfce4dff218ffba693820ede6a4e11f02afd6d", + "blk.14.attn_output.weight": "3cfe9ccf3d48ae9e95b93a132a1c6240189a277d764f58590fb36fdbb714cad0", + "blk.14.attn_q.bias": "6a75acc2f090b2e67bfc26f7fca080ae8bd7c7aa090ec252e694be66b8b8f038", + "blk.14.attn_q.weight": "5ef45c86d7dda1df585aa1b827b89823adf679a6bb9c164bd0f97b2aa6eb96f1", + "blk.14.attn_v.bias": "5534480443e10ed72c31a917f3d104b0f49df5e6dbfa58d0eb5e7318120e3aee", + "blk.14.attn_v.weight": "58f45cf3240c4623626ec415c7d5441eaa8d2fb184f101aba973f222989422d1", + "blk.14.ffn_down.weight": "2dc82a0f20c05b77512458738130d8d05ce150cc078680ae7ee6dd7ed68d955d", + "blk.14.ffn_gate.weight": "d4a6c6f0fcccddfd1fcaa074846622f4a74cb22b9a654ab497abdc1d0dde9450", + "blk.14.ffn_norm.weight": "777e444932a0212ff3feac98442444e17bd8a98cb758ea3356697d0846d12c56", + "blk.14.ffn_up.weight": "6b75f6bd00195198447b69a417ed9d98f8ca28b3cb8be82f4bad908be0777d57", + "blk.15.attn_k.bias": "2d07211a58e6c2f23aa3a6dc03c80a7d135dfb28726b60b0e0fdd0f35ea5c37b", + "blk.15.attn_k.weight": "e77f3c0075a1810e70df956cc51fd08612f576cc09b6de8708dcae5daedb0739", + "blk.15.attn_norm.weight": "379a10d90609a5d5ba67d633803eda1424fc61ba5cca8d3bffe70c8b18b58ebf", + "blk.15.attn_output.weight": "402751c12ee9dbc9db5e3bf66a7b23ebe7d36c0500e0be67be4c8b1c4357fa62", + "blk.15.attn_q.bias": "acb37fc409ee725ceedf7a3a41b40106086abc47b76780728f781942c5120208", + "blk.15.attn_q.weight": "89cd3047a09b46ed2bb57c69dd687f67a1f0235149b30376fa31b525898e4a55", + "blk.15.attn_v.bias": "f081a37289cbe811978feb4da3ef543bdeb7355414d476f44e09b498da10cb2c", + "blk.15.attn_v.weight": "8404f242a11e6d512c9ead9b2f083cda031e9b269f8a0a83f57ee4c56934764e", + "blk.15.ffn_down.weight": "93438f43ee8cc4f1a7fd3840a6afdd5f02123e76db4f0d9474430c0100d148fc", + "blk.15.ffn_gate.weight": "ff935a2698843e87fad9dbf7125f53e460190ec71ee128b650b3fc027fe37bfc", + "blk.15.ffn_norm.weight": "4be80f199841cba831982e988451e1833c3c938a4d6ca1169319087bf0bd723e", + "blk.15.ffn_up.weight": "ee9ba63c66d71053e33551ddd519878bb30b88eeb03cfe047119c5c4000fb0a6", + "blk.16.attn_k.bias": "3f5fbabed4510c620b99d9d542739295fa6a262a7157f3a00a4889253f8341b8", + "blk.16.attn_k.weight": "8ca6eb139b281c257324cddea97a8e9aa7c048b53075cf00153123b967c27ee5", + "blk.16.attn_norm.weight": "290157f005e5aa7dddf4bd60100e7ee7b0baa7f11ec5c2cea5e0ead2aad3a4c6", + "blk.16.attn_output.weight": "b1f4d80a7447f08f1c331712527f750d00147f35c042442ade96fd029dadc5a1", + "blk.16.attn_q.bias": "e3e4e442ad4416791b468cad8de0d0d2d68c7e7df8d06002f4d49b4da9cb25e4", + "blk.16.attn_q.weight": "cc7392fa5bb1107d3816e7e7363de252d37efd4165d065e258806291ce0a147b", + "blk.16.attn_v.bias": "a7629830f2f6293e018916849614636d40b1bcd11245f75dbc34d38abae8f324", + "blk.16.attn_v.weight": "b6c7856c7d594437630929c8cf3b31d476e817875daf1095334ec08e40c5e355", + "blk.16.ffn_down.weight": "f9c0a777a00170990a4982d5a06717511bf9b0dd08aeaab64d9040d59bcbebba", + "blk.16.ffn_gate.weight": "ed88f11bc3176c9f22004e3559ccb9830a278b75edd05e11971d51c014bd5cd2", + "blk.16.ffn_norm.weight": "ab24abdcc4957895e434c6bb3a5237a71ff5044efb9f76c1a9e76e280c128410", + "blk.16.ffn_up.weight": "99f594dc8db37f554efa606e71d215fbc3907aa464a54038d6e40e9229a547ff", + "blk.17.attn_k.bias": "f236625676f9b2faa6781c7184d12d84c089c130d2a9350a6cf70210990f6bf1", + "blk.17.attn_k.weight": "c2a4f20cd3e98538308a13afe9cc5880bdd90d543449c6072dedd694b511ee1a", + "blk.17.attn_norm.weight": "5a9da4ee168311f487a79fc9d065a035432c6cafa8adb963a84954cf32f57a2a", + "blk.17.attn_output.weight": "d5df7031e354186ce65dc09d6f8a92eb721c0319816f8596b0c8a5d148ed0a2a", + "blk.17.attn_q.bias": "3212d5eeaa7ed7fac93cc99e16544de93c01bb681ae9391256ed4a8671fc6b00", + "blk.17.attn_q.weight": "d18cd9aa7ee10c551cb705549fa1ae974aea233f86471c9a19022dc29b63d0d5", + "blk.17.attn_v.bias": "a74ad11a1f8357742f80e2a0c0b3a2578fc8bbaf14c8223000767e07a5d79703", + "blk.17.attn_v.weight": "da18ac0e90884436a1cb0ad6a067f97a37f321b03c70b8b03bf481339fef5c80", + "blk.17.ffn_down.weight": "81a8a5d7a194fb53d976558e0347efbe9fdb1effffde9634c70162e1a20eff51", + "blk.17.ffn_gate.weight": "72870d83ab62f2dcd45f593924e291a45e4ae1b87f804b5b88aa34cfd76dd15e", + "blk.17.ffn_norm.weight": "cae39ac69b9bdaeefab7533796fdf11dbb7a4bdbdeed601e20f209503aafe008", + "blk.17.ffn_up.weight": "e7cb40b0842468507cec0e502bbed8a86428b51d439e3466bc12f44b2754e28f", + "blk.18.attn_k.bias": "8bfc02b94f9587aa125e2d8bbc2b15f0a5eb8f378d8b3e64a8150ae0a8ca3df2", + "blk.18.attn_k.weight": "434bc3b3332ea48afee890aa689eb458a75c50bc783492b0cbf64d42db40e8ad", + "blk.18.attn_norm.weight": "d6ffc09396c42a70d1f0e97d81113eee704d3bfc9eeae2bed022075a5dd08075", + "blk.18.attn_output.weight": "133f001f81f3b082468a7de67cb2e7a76508fce34bcc4dee7f0858e06eee082c", + "blk.18.attn_q.bias": "758d0e28bf5e660b3090aafb70e2a3191b4f3bb218d65e9139a086ceacaf599f", + "blk.18.attn_q.weight": "12d7b86fc1b09b9fa7f8b7ed43d8a410892cec8672d0c752f8346f6193343696", + "blk.18.attn_v.bias": "9efd15bab0519462431d6c6e8a5b7dd4e151dc449468097ee0ddca369c0ecc2e", + "blk.18.attn_v.weight": "f631231a79d4a2e9730fb2e386d8c18621eb3fb7900fbfdff5e6d52cc42db122", + "blk.18.ffn_down.weight": "874a2dddf456f3ab56b958b0860d71c8c680a6f89322c9bf6b2f32a113592300", + "blk.18.ffn_gate.weight": "4549ef8976c345a511df4a7133bdaf6fe387335f52dfd8a4605a8ae3f728c403", + "blk.18.ffn_norm.weight": "80c258a2536a860e19bfcbd9f29afa13214fbb4c34bde0d4da51287d354e9a59", + "blk.18.ffn_up.weight": "8b03308a581457a3c038b7a086f3cdf14941d7ad4107c4bd6d9d6b062fd00d73", + "blk.19.attn_k.bias": "e77f7b0c8e3e0a9b0d61918cd88371047752a1b02b1576936f4ec807d4d870ee", + "blk.19.attn_k.weight": "a2a318e93355230c0d0f95c441b080bf9c4914507255f363fb67a5e771d4d1e6", + "blk.19.attn_norm.weight": "9a4bdeb3970be21ac74a94c2c81eb36986533db81b78db6edec48d9802910d59", + "blk.19.attn_output.weight": "2369b103dd3947e2cef02b2669b405af5957fb3a7f9d0ff40646078c4b4317ad", + "blk.19.attn_q.bias": "e20bf427bef69059ae84a5d9f98f7d688489627f198fb6153def018ff9fd2e34", + "blk.19.attn_q.weight": "45a3bb3bdfd2f29dd76e5f78ddae73678b9a2a85dfaf609e460240ef5b7be2ad", + "blk.19.attn_v.bias": "a441f58a3e02ed86ee1819eefc9bd4e8b70d11b864a929d58a2c2ac0aeb8203d", + "blk.19.attn_v.weight": "30b0b04480c510450a7abb2ce9fa05c65b150a3cc4dc76f8916bf8d013f1b6be", + "blk.19.ffn_down.weight": "eebb9ab8fdb6a6efcfff8cf383adac9ec2d64aeeff703d16ed60d3621f86c395", + "blk.19.ffn_gate.weight": "3fef1493029298378886586478410b3d2e4e879f6aa83c07e210a7ce6481817f", + "blk.19.ffn_norm.weight": "e1be99ea1e8fb9678f7b8ba200f3f37e03878f3574d65d57bcd3a9fd796e2112", + "blk.19.ffn_up.weight": "f07cf25e09394fb69fe3ef324bdc0df9a4cecf3dc53070b8acc39e6d1689bf82", + "blk.2.attn_k.bias": "b29baa8221f125eff6b8ac1a950fa1d7cfc1bce7bdc636bf3df7d4065ab6466c", + "blk.2.attn_k.weight": "4bd0c179bced8bc37a09f5748c394e0cf50273942fb38a866e5cf50b6c96c437", + "blk.2.attn_norm.weight": "07b3edc6a6325c3428aa12f29bcae0be0de363ce61a6af487bc5c93fb8c468d9", + "blk.2.attn_output.weight": "056b5b31dbc81087c81b9d41c25960aa66c7190004c842ba343979644d7f4d88", + "blk.2.attn_q.bias": "479b6212401e097767c9d52b12a1adb8961c0fce9fcaaab81f202a9d85744376", + "blk.2.attn_q.weight": "f89196076f446a6dd8a9eee017f303504f9c03094c326449cee5a7fc0a97fade", + "blk.2.attn_v.bias": "ef9b1b986dbd9d7291027a88b67dc31434435b20e76e4f1e9d6273ebd31224f0", + "blk.2.attn_v.weight": "9322f4f00e85f8c0936845c51ca64b202a93df104f36886986a8452a8e4967a5", + "blk.2.ffn_down.weight": "7beac0d2440dc49af33ededb85a6cc3ba23ab33ad3ffa5760714b2ef84d94f6e", + "blk.2.ffn_gate.weight": "818a93864a5890c1f4dc66429004fad07645a50142350e9bff9a68fe24608a52", + "blk.2.ffn_norm.weight": "152c924d5514942ad274aafb8cc91b35c1db3627c3d973d92f60ff75f3daf9ba", + "blk.2.ffn_up.weight": "9c9579e600f209546db6015c9acfeda4f51b6d3cca6e8db4d20a04285fe61a37", + "blk.20.attn_k.bias": "fd22bfeffb63d818ce2ff1ea2ace0db5d940f7a9489b6bfc1ec4a5398848d7fe", + "blk.20.attn_k.weight": "f74439bc74c2f9252130c9c28384fd7352368b58bb7ce3f2444cf0288dfff861", + "blk.20.attn_norm.weight": "5c15d2613df87be6495fb7546b7dcedd2801d12fa5ecc02c877df889330e8f37", + "blk.20.attn_output.weight": "6731a39286a67f6859832f96695732e579e14e0c36956eccd1edce3db11595b8", + "blk.20.attn_q.bias": "04466e5a3f454a19b9b433fc2585396feac780027ece7ccb4e4bb3e406fc14d8", + "blk.20.attn_q.weight": "ead4c71daaeb17bf20d014a34c88b97f238456488e815ae0f281a5daf6fc99b8", + "blk.20.attn_v.bias": "adcc848e043025de9bd55ccb14dd8fb6343e8b5185ed07e12964be41d0faf99f", + "blk.20.attn_v.weight": "81bfc23f83526386a4761c2c16b6a93cd0bbf9d846c1a51b82c71f1474a465f1", + "blk.20.ffn_down.weight": "9bf660af3bafad919d03173c89a65fc9c89440a76c42c9e55e4d171076f3c17f", + "blk.20.ffn_gate.weight": "c04b4f3ccce44917ee228b998e2c19dd702aef10a43413afb152e808b5ac5c42", + "blk.20.ffn_norm.weight": "3d5b555d7746a71220143c6b8fff5ce4eb63283d9d9c772f1233d848f69f4ff4", + "blk.20.ffn_up.weight": "d7a196505c39e5469dfc7c6958bdbb54e93629ac1a047a6663ed96b318753094", + "blk.21.attn_k.bias": "4db1f48e5c6a3bc5720a5da813bbef08283e6269e12d83f8a9c54e52715d8011", + "blk.21.attn_k.weight": "c687b2f0e132a5e220a2a059b61aa2a537f37d8a674d7709f87880637b263b31", + "blk.21.attn_norm.weight": "ec23b0ff847a4b45585ab8e04f10fc20bb1637c5f1fbcdc4d73f336bcb5d1bd0", + "blk.21.attn_output.weight": "01255390576316c1731ef201e32c6e934eba356c28438cd06d9027ac6a3ff84f", + "blk.21.attn_q.bias": "3098f37205a15418e1681e407c82b7ce7c6fda6c6826b0590a13e1b68a38a1ea", + "blk.21.attn_q.weight": "30ea62cbb702a5359229dc96819df17ee535e2e9988d044b005c73ea536e1005", + "blk.21.attn_v.bias": "7bbedb2c22a04737f21993115701d4a06b985b7ca3b64681f53cd1be8d7ea39e", + "blk.21.attn_v.weight": "e11905e63579e36fbee978062af7599339ae29633765a4835628d79a795ec8df", + "blk.21.ffn_down.weight": "84def2ffd8aca766f9ce12ed9ac76919ab81eb34bdeae44fa4224417c38af527", + "blk.21.ffn_gate.weight": "4e99f05377b4a0b8d875045530a5c59dee6a46ac8a45597f6579f6fdfa800787", + "blk.21.ffn_norm.weight": "af48f13d03fba38ff8794a5f5005e666e501f971ca2e30bbded2777a8096f37d", + "blk.21.ffn_up.weight": "a29541c39a6acbc364be86994632a5bf55d701027cb7f23320f8c6d55ee42c91", + "blk.22.attn_k.bias": "c97f84db6c75422df6ef5768676d4e9abefaa3b8337aa2730ff260f8fc350480", + "blk.22.attn_k.weight": "af9a0c56f68779513e95be11611b7be6175ddae27d48bee9dd72fdbf05f6cbfa", + "blk.22.attn_norm.weight": "1c7518eb5bcff4a202c6f4a2827f14abd76f9bcc64ce75fe9db60b69437a5c9c", + "blk.22.attn_output.weight": "1abcf1f3caa2f59dd018646b93f9cf8fd30d49e98a473e6a8704419a751be46f", + "blk.22.attn_q.bias": "7221e01cb692faf2f7f8c2eb6e2fac38a1b751a9c9fdb6a21a0a936eb0bf4b96", + "blk.22.attn_q.weight": "faaf8fb7b6c19f343d47f3ea6b57151fb46c787e0b3bd2c292fd327d3d4d8e35", + "blk.22.attn_v.bias": "3ec05942e82d735de99dfd0d8228d8425e63e2fc584da98b3326bdef89ecb2e5", + "blk.22.attn_v.weight": "42e7b0ad06db76227837da9d4e74b2db97f3df4050ecb3a87cb9b55e08dfcb42", + "blk.22.ffn_down.weight": "87ef98ad2d0e824b0fa5ad8aa18787162922e527c9b1b721a99bc07d3bf97c82", + "blk.22.ffn_gate.weight": "562d6e5a1654b03aaa0e33864d23c10297fd4bcaa72d30fac69fb771ee1df9d6", + "blk.22.ffn_norm.weight": "f8a405dee467749d59427ce05cdd4b9c11bb18934a89258ea461f013b7d251f5", + "blk.22.ffn_up.weight": "90e1f4ae4062649d4d838399eb353e8bb8d56a49982b6a7f64aa3945377f7187", + "blk.23.attn_k.bias": "9ad22178a85f3be7e25f5aff462f31627466364f2f5e92f265cc91db0da9a8a8", + "blk.23.attn_k.weight": "d813beffb10f03278f5b58eea0f9d73cdcb7b5b4045ae025c379592e854f7dfd", + "blk.23.attn_norm.weight": "f583c9836044bdb056d6f8911088ac28add68e500043ae1f97b5d9158fe3d769", + "blk.23.attn_output.weight": "02789911ac3b97f6b761e958b7dd6dc7da61a46a1be92bd0b346039ca7ecd2b2", + "blk.23.attn_q.bias": "38c4970fb9b4f7e4a139258a45639d848653814b4bc89ea9849709b13f16414b", + "blk.23.attn_q.weight": "eb694be9a5ab5858b8dab064ee4cce247dc757424e65282989bd4d015b8580ce", + "blk.23.attn_v.bias": "0a25f6533aa7e7a152a4b198cf6c411c2408a34afa4f161bb4d5ffba2f74e33f", + "blk.23.attn_v.weight": "187e1bac6b70f74e6364de226565aa8275ee2854d09cbe5895451a689596049e", + "blk.23.ffn_down.weight": "88880dd9ba7ee80ade972927f810b5d2c30a69520c615190b27f9daabc0a8c5a", + "blk.23.ffn_gate.weight": "5abec63197935ab3eb8e6de0a5307396ec46cdb1cc5de25d87c845f3c4a3e887", + "blk.23.ffn_norm.weight": "60e1f5e6310c3a531c554a6bb7cd883aed58db1e51853f739436ea461c1843d7", + "blk.23.ffn_up.weight": "3d7f502771743f4a634188dfcd8b8a384fb07467ca8528366aee59ddb25b7bce", + "blk.3.attn_k.bias": "0b6b442ebbac29c8c4b67e8e3876d0382dd2dc52efdf4ab0ebbc6f71b6252393", + "blk.3.attn_k.weight": "480f40584fbda692c26f2cee45f5923780b236f8b4e8ec7bbee0237777a0918d", + "blk.3.attn_norm.weight": "39872be2af31bc9cd6b583ebba6fb759f621d586d66e5a2fc0b85991615a8923", + "blk.3.attn_output.weight": "924b2c80d8513bf637f8ebb3756a340d9cf2243de723fd08d7f5dccd46b3f8b6", + "blk.3.attn_q.bias": "863c9d848156847a3fe9bbc44415a4395245b5d13e95673c014fdb71e494ab0a", + "blk.3.attn_q.weight": "bff73ee5de92fba8f6c089bbb19ce57e17ab3c9c29295712804bb752711b882e", + "blk.3.attn_v.bias": "e1b6fea126e86189112fcdfee79ffc66a087461527bc9c2dc52dc80f3b7de95e", + "blk.3.attn_v.weight": "7812b7f5133636f06cdbb4dcc48ef7803206538641b6c960777b37f60a8e6752", + "blk.3.ffn_down.weight": "00b393d6a7e3ad9b5224211ccdbc54a96aae151f24ed631764ac224972a6bc82", + "blk.3.ffn_gate.weight": "cfd63fa3a038af05dc53c6eeb3c192f1602f26ff24cb840bcf1510fcb37b5513", + "blk.3.ffn_norm.weight": "7389fc240a282949580ea2f5b0d7973ac79f32f76dc0155b537bb6b751f8e27a", + "blk.3.ffn_up.weight": "2a945f47090df9cb16f92f1f06c520f156f8e232182eaaed09f257b8947a2a62", + "blk.4.attn_k.bias": "62533c31f0de498187593f238c6597503fef2a92e920cd540a96bc5311b3b2a0", + "blk.4.attn_k.weight": "93e829868bffd980a8e589b9c4566cd81e6ce4296a5f357a2ae93febe1284156", + "blk.4.attn_norm.weight": "9e0aaa4bbdd1389890f8abec20533f3ab16d61b872b1a8dbd623023921c660a9", + "blk.4.attn_output.weight": "74467d6f44357d67f452ac49da861468b38e98057017bd38bc9a449f9d3538e6", + "blk.4.attn_q.bias": "8e6d9026fd69b314c1773c5946be2e11daf806ef22a5d91d744344fd30c58c59", + "blk.4.attn_q.weight": "e5bfbafd94a4d530f3769f5edbba8cc08d9b5bee8f66ebf4cb54e69bc0b7f63b", + "blk.4.attn_v.bias": "20c570f92022d9905eb85c0e41d1fdb30db22007a9628b51f512f8268d6c34a2", + "blk.4.attn_v.weight": "9638d459d61da03c9dd34dad985e03c43b4f8a5bc9701a82153478329b0517e0", + "blk.4.ffn_down.weight": "9d91b06e89d52f4365dece7eaeec50f81e52cb2407b333248a81e6e2f84c05b8", + "blk.4.ffn_gate.weight": "bf6350a79c6a6ee9146edfd788b88d4a4c2b54db1aa0adcc1464dbba8a84b646", + "blk.4.ffn_norm.weight": "11a70a6b9f7ce336292f4e3a2c6c92d366d4ee4306ad4fdb1870fde107e9cc31", + "blk.4.ffn_up.weight": "64f23f493d02b147a72a59605e6b7dd1c4c74f6813a38a2a60818bd66f697347", + "blk.5.attn_k.bias": "f6c2c279c0ed686f298ad1e5514b5cd882199341f896abbb2c2129d4c64ce9c5", + "blk.5.attn_k.weight": "0e682f75870abf9efaca10dac5f04c580f42820ecf4e234d43af967019acb86f", + "blk.5.attn_norm.weight": "01efae7653705e741932fcd79dff3be643d7e97f4b5719b887835dffe44b3a82", + "blk.5.attn_output.weight": "69e841d00d196acc489cd70bc5ffbbb63530ac5fabb169d40c4fb3a32ebb8ed8", + "blk.5.attn_q.bias": "f3304d76ccd44fed887565857c8e513b1211d89a5d3e81782de507ab3f6fc045", + "blk.5.attn_q.weight": "98612a6b7920a247853ada95c240807d4ca8e43604279e7a2fc9bb265ae40469", + "blk.5.attn_v.bias": "39940a9b353ceed3edfd4a39b985c9520490aa1b9f11749c94fdf6d879d1a259", + "blk.5.attn_v.weight": "839f84b828cf83aecf479a0dc7bc86cce05145ef77dcf29916dc3e0680f5b665", + "blk.5.ffn_down.weight": "1f48cbb0960f15e06ab8a3754ade792995a655856389ddbca629c07e89d1b114", + "blk.5.ffn_gate.weight": "33d8219fce3189e1aab376039896eebd4ad36ebd26a8278cd19b26e4357e4f81", + "blk.5.ffn_norm.weight": "0f4a0f83d37127fa4483f2905cb4f38ef6ddc71584b6cb05632c62a9af313dda", + "blk.5.ffn_up.weight": "22a64a11e5f0a1ff45ca327bf9e1efa258f085ff6a96edc398b7474f725b4514", + "blk.6.attn_k.bias": "baa91df99d4df2d25e8d590bca4e334b97f2d9aa3df8e748fedc8a6188499111", + "blk.6.attn_k.weight": "121f3b9f4b9491996499392e2688a929cafe102a67920b4cb2a039349c43d8eb", + "blk.6.attn_norm.weight": "b4cf987e923d71f2f84c58d20ea8af7576b225bf61952145b489fdd395e3d411", + "blk.6.attn_output.weight": "a112642150a138d54b2a4038042fd33619035a35694771e966f3575856c635d6", + "blk.6.attn_q.bias": "a97ea10469cdfa3fdddf8bad6de683ef99f6170eb8d29d15dcf6bf4bce37c5a3", + "blk.6.attn_q.weight": "d80c787019317a87361de6bbc7df6701357216bdd9b404522cede34a719a5500", + "blk.6.attn_v.bias": "d846269db9cd77ae28da26ba0914cace1b6754bd5301af9c44607085dfcbd2d7", + "blk.6.attn_v.weight": "06567c433e8a391647633291b50828a076ad7c2436106bb9278c60a3f8fccb3b", + "blk.6.ffn_down.weight": "f15f66f56b3c474eac8c6315c5fff07c3e29c6e483d7efd4d303c7f43814be91", + "blk.6.ffn_gate.weight": "47768f89c6da8eefb29adb766ff4eb38c9dfd79320bbc1386248319fcbcf567f", + "blk.6.ffn_norm.weight": "7f8195e6b148212967145fc9d86ce36b699cff0de026042245c2d344f1ef8510", + "blk.6.ffn_up.weight": "53d7707ae4347aadb445289f9f87a008b72df5cb855b00080a605442fdd8edf3", + "blk.7.attn_k.bias": "63e274df3217dde25b8369a383e480fe4f6b403a74385f15ac0b5db71dce2744", + "blk.7.attn_k.weight": "f6fce88602f5945eee09767acbcad387d132614e6da39ae359f2bbf380d94b1f", + "blk.7.attn_norm.weight": "bbf5dc7336c0f9a511afef6bf5efeffd78f1b83940850c3eb7eb20c621b75656", + "blk.7.attn_output.weight": "d9fb907a138396a859cecbfcb377927308dc93c24c7fb52dba5eb59265feadec", + "blk.7.attn_q.bias": "f02ba1318346af77e309f40aee716e2de7ee8cab67e67b17636db9bf40894fb0", + "blk.7.attn_q.weight": "54a691e824be287a61c35c172edc01922ed792d2addeee029afc17ba6c7e11b9", + "blk.7.attn_v.bias": "3a4f182f51e84ce862d558fb2751b91802b65d74596bb14d624808513a8a83ec", + "blk.7.attn_v.weight": "a142fe6e106d3ab484e2dc6f9c72b8fc0a385279dde08deb1ad1fd05ac25deb1", + "blk.7.ffn_down.weight": "8daf7e8c430d183a4d6ab3eb575fafa4b5e31689f68b290c8b370411ad9d0f12", + "blk.7.ffn_gate.weight": "a2a786b45eb660994254b48e2aaf22f3e9821cfb383dee0ba04cc4350a2f8e72", + "blk.7.ffn_norm.weight": "73828bbc8c9610cc139fcf03e96272648cdc291263251fe3a67367408deb69e1", + "blk.7.ffn_up.weight": "e85dd0f63fed449ce16893c5795ea6a050a2d7a66d9534410a227e22c905dafa", + "blk.8.attn_k.bias": "91a752a6e2c364e5ee6a015770fe289aece4911ae6c6bbfe74ac52f465465f93", + "blk.8.attn_k.weight": "99c069e92c43a2efb74e23188256b3cabbbe06399878e681ce203a05d5da378a", + "blk.8.attn_norm.weight": "c76d36d3cc06aa2a9edb1abf9f602bb7ed61ac9d61f8ef7ed736a1e619abe717", + "blk.8.attn_output.weight": "ee5ff156a2625e1f203f65e69b514f9df04bd9a5e82b28e3876e16cf1c6f65c5", + "blk.8.attn_q.bias": "8fbd868a93b330c8b0418b488c5301f42a7eb0c58445a4e515d56777f1d96ed5", + "blk.8.attn_q.weight": "9f20ef86e80098ba52a3a31ebcc315bea3a614dac9cba7ac1db02f156db9b577", + "blk.8.attn_v.bias": "c4813571d5d618742183a7890c0b89cd7f18e210c758f63aad564659bc38a26d", + "blk.8.attn_v.weight": "ea88e1a4cf8bd56e9a88ada427d2b0cd352234827640757ee2a9ed594fb67a53", + "blk.8.ffn_down.weight": "b0d1a7495811580b189aaa3e20ea871d6d01ed7b6c23e59825078ef786944ff2", + "blk.8.ffn_gate.weight": "0a17c0caa0b06721c49b59b2a63a5dcbf744dd1cffa55962b404ba910c658a62", + "blk.8.ffn_norm.weight": "f15f109d4a8e9d1ff7c71fa5bc6373df7ee80c5f7d1de3fa0d4849d747e36bcb", + "blk.8.ffn_up.weight": "bbf4c5c4c5c8a0f9ae8b88e3cc8b86f81b98148722d5a350995af176c0b774f2", + "blk.9.attn_k.bias": "a7f60d962686b8ca60f69643e0e0fa8614688be738fb0b1c6bd54de35c2beb5e", + "blk.9.attn_k.weight": "dd80ce4adb00e338fc04b307e4c18a27071f4ba4397184a24d765e6e4a268ef4", + "blk.9.attn_norm.weight": "721e6487547e2b3986ab4b4e2500ceade59d908bccf4436e1e8031f246deb2bd", + "blk.9.attn_output.weight": "5a800af39107b363861e5f5173483cdcd644d8ac3b0c8a443b9c759d71285db8", + "blk.9.attn_q.bias": "0a19b4925ea8ca8067acc909b058adc327de3874cfc94cc9eb4a106d3f370123", + "blk.9.attn_q.weight": "93e84906684c0c7ede79967236d9fc8344da84a9f1daa04e8295c2c9b6b26a24", + "blk.9.attn_v.bias": "615421f812f821e230ecde4e6da35d868823248355ce7e4e51e2d650ead565f9", + "blk.9.attn_v.weight": "7f4913e289aefd9ceecbdaf9767b1e95303f5d59dd67ecb2cc15768477f4d08e", + "blk.9.ffn_down.weight": "95d1b3933221e87dc4af70dd566daec9498bf358070b8d26f1fc70766a84a152", + "blk.9.ffn_gate.weight": "530f2d04f6a1fbffaaa5f2fbc3a328ebed7b330e3af14b4fc7d8a51b13ad8d42", + "blk.9.ffn_norm.weight": "28077de416217ea1df94b96017bef4cc562ab62e51b1a03a671c70abc29ce52a", + "blk.9.ffn_up.weight": "b87b6190778aaee4695938e24ac6c90dbbee6dce7c5c2ab5bc26ba4564581822" + } diff --git a/convert/tokenizer.go b/convert/tokenizer.go index e7be8e40..74e2efed 100644 --- a/convert/tokenizer.go +++ b/convert/tokenizer.go @@ -100,6 +100,8 @@ func parseTokenizer(fsys fs.FS, specialTokenTypes []string) (*Tokenizer, error) t.Pre = "deepseek-llm" case "21cde974d587f0d54dc8d56b183cc1e6239600172035c68fbd6d4b9f8da0576e": t.Pre = "deepseek-coder" + case "1ff7f41064896984db5d1bb6ff64fa4bc29007d08c1b439e505b7392777a319e": + t.Pre = "qwen2" case "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855": // noop, empty pretokenizer default: From 61676fb5066fe42f4586d8f951548fcb55cd75bf Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Tue, 14 Jan 2025 12:55:45 -0800 Subject: [PATCH 08/68] llama: move grammar tests to llama_test.go (#8411) --- llama/grammar/grammar_test.go | 107 ---------------------------------- llama/llama_test.go | 104 +++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+), 107 deletions(-) delete mode 100644 llama/grammar/grammar_test.go diff --git a/llama/grammar/grammar_test.go b/llama/grammar/grammar_test.go deleted file mode 100644 index 373652ab..00000000 --- a/llama/grammar/grammar_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package grammar - -import ( - "bufio" - "bytes" - "strings" - "testing" - - "github.com/ollama/ollama/llama" -) - -// https://github.com/ollama/ollama/issues/7978 -const issue7978JSONSchema = `{ - "type": "object", - "properties": { - "steps": { - "type": "array", - "items": { - "type": "object", - "properties": { - "explanation": { "type": "string" }, - "output": { "type": "string" }, - "nested": { - "type": "object", - "properties": { - "deep": { "type": "string" } - } - } - }, - "required": ["explanation", "output"], - "additionalProperties": false - } - }, - "final_answer": { "type": "string" }, - "01_numbered_key": { "type": "string" }, - "numbers": { - "type": "array", - "items": { "type": "number" } - }, - "booleans": { - "type": "array", - "items": { "type": "boolean" } - }, - "mixed": { - "type": "array", - "items": { - "oneOf": [ - { "type": "string" }, - { "type": "number" }, - { "type": "boolean" } - ] - } - } - }, - "required": ["steps", "final_answer"], - "additionalProperties": false -}` - -func TestIssue7978(t *testing.T) { - g := llama.SchemaToGrammar([]byte(issue7978JSONSchema)) - if g == nil { - t.Fatal("failed to convert JSON schema to grammar") - } - - t.Logf("grammar:\n%s", g) - t.Log() - - var got string - s := bufio.NewScanner(bytes.NewReader(g)) - for s.Scan() { - line := strings.TrimSpace(s.Text()) - step, _, _ := strings.Cut(line, " ::= ") - step = strings.TrimSpace(step) - if step == "root" { - got = line - } - } - - want := `root ::= "{" space steps-kv "," space final-answer-kv ( "," space ( 01-numbered-key-kv 01-numbered-key-rest | numbers-kv numbers-rest | booleans-kv booleans-rest | mixed-kv ) )? "}" space` - if got != want { - t.Errorf("root =\n%qwant:\n%q", got, want) - } -} - -func TestSchemaToGrammer(t *testing.T) { - cases := []struct { - schema string - prefix []byte // nil is check as nil - }{ - {`invalid`, nil}, - - // Simple heuristic/smoke test - {`{"type":"object"}`, []byte("root ::= object")}, - } - - for _, c := range cases { - t.Run("x", func(t *testing.T) { - g := llama.SchemaToGrammar([]byte(c.schema)) - if c.prefix == nil && g != nil { - t.Fatalf("grammar = %v, want nil", g) - } - if !bytes.HasPrefix(g, c.prefix) { - t.Errorf("grammar = %q, want %q", g, c.prefix) - } - }) - } -} diff --git a/llama/llama_test.go b/llama/llama_test.go index 5f835d68..b550d1d8 100644 --- a/llama/llama_test.go +++ b/llama/llama_test.go @@ -1 +1,105 @@ package llama + +import ( + "bufio" + "bytes" + "strings" + "testing" +) + +// https://github.com/ollama/ollama/issues/7978 +const issue7978JSONSchema = `{ + "type": "object", + "properties": { + "steps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "explanation": { "type": "string" }, + "output": { "type": "string" }, + "nested": { + "type": "object", + "properties": { + "deep": { "type": "string" } + } + } + }, + "required": ["explanation", "output"], + "additionalProperties": false + } + }, + "final_answer": { "type": "string" }, + "01_numbered_key": { "type": "string" }, + "numbers": { + "type": "array", + "items": { "type": "number" } + }, + "booleans": { + "type": "array", + "items": { "type": "boolean" } + }, + "mixed": { + "type": "array", + "items": { + "oneOf": [ + { "type": "string" }, + { "type": "number" }, + { "type": "boolean" } + ] + } + } + }, + "required": ["steps", "final_answer"], + "additionalProperties": false +}` + +func TestIssue7978(t *testing.T) { + g := SchemaToGrammar([]byte(issue7978JSONSchema)) + if g == nil { + t.Fatal("failed to convert JSON schema to grammar") + } + + t.Logf("grammar:\n%s", g) + t.Log() + + var got string + s := bufio.NewScanner(bytes.NewReader(g)) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + step, _, _ := strings.Cut(line, " ::= ") + step = strings.TrimSpace(step) + if step == "root" { + got = line + } + } + + want := `root ::= "{" space steps-kv "," space final-answer-kv ( "," space ( 01-numbered-key-kv 01-numbered-key-rest | numbers-kv numbers-rest | booleans-kv booleans-rest | mixed-kv ) )? "}" space` + if got != want { + t.Errorf("root =\n%qwant:\n%q", got, want) + } +} + +func TestSchemaToGrammer(t *testing.T) { + cases := []struct { + schema string + prefix []byte // nil is check as nil + }{ + {`invalid`, nil}, + + // Simple heuristic/smoke test + {`{"type":"object"}`, []byte("root ::= object")}, + } + + for _, c := range cases { + t.Run("x", func(t *testing.T) { + g := SchemaToGrammar([]byte(c.schema)) + if c.prefix == nil && g != nil { + t.Fatalf("grammar = %v, want nil", g) + } + if !bytes.HasPrefix(g, c.prefix) { + t.Errorf("grammar = %q, want %q", g, c.prefix) + } + }) + } +} From 2539f2dbf99ec1b8f44ece884bf2c8678fca3127 Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Tue, 14 Jan 2025 19:01:24 -0800 Subject: [PATCH 09/68] Fix absolute path names + gguf detection (#8428) --- parser/expandpath_test.go | 114 +++++++++++++++++++++++++++-------- parser/parser.go | 4 +- server/create.go | 29 ++++++++- server/routes_create_test.go | 98 ++++++++++++++++++++++++++++++ 4 files changed, 217 insertions(+), 28 deletions(-) diff --git a/parser/expandpath_test.go b/parser/expandpath_test.go index d27626b0..845f919c 100644 --- a/parser/expandpath_test.go +++ b/parser/expandpath_test.go @@ -4,6 +4,7 @@ import ( "os" "os/user" "path/filepath" + "runtime" "testing" ) @@ -11,14 +12,29 @@ func TestExpandPath(t *testing.T) { mockCurrentUser := func() (*user.User, error) { return &user.User{ Username: "testuser", - HomeDir: "/home/testuser", + HomeDir: func() string { + if os.PathSeparator == '\\' { + return filepath.FromSlash("D:/home/testuser") + } + return "/home/testuser" + }(), }, nil } mockLookupUser := func(username string) (*user.User, error) { fakeUsers := map[string]string{ - "testuser": "/home/testuser", - "anotheruser": "/home/anotheruser", + "testuser": func() string { + if os.PathSeparator == '\\' { + return filepath.FromSlash("D:/home/testuser") + } + return "/home/testuser" + }(), + "anotheruser": func() string { + if os.PathSeparator == '\\' { + return filepath.FromSlash("D:/home/anotheruser") + } + return "/home/anotheruser" + }(), } if homeDir, ok := fakeUsers[username]; ok { @@ -30,30 +46,78 @@ func TestExpandPath(t *testing.T) { return nil, os.ErrNotExist } - tests := []struct { - path string - relativeDir string - expected string - windowsExpected string - shouldErr bool - }{ - {"~", "", "/home/testuser", "D:\\home\\testuser", false}, - {"~/myfolder/myfile.txt", "", "/home/testuser/myfolder/myfile.txt", "D:\\home\\testuser\\myfolder\\myfile.txt", false}, - {"~anotheruser/docs/file.txt", "", "/home/anotheruser/docs/file.txt", "D:\\home\\anotheruser\\docs\\file.txt", false}, - {"~nonexistentuser/file.txt", "", "", "", true}, - {"relative/path/to/file", "", filepath.Join(os.Getenv("PWD"), "relative/path/to/file"), "relative\\path\\to\\file", false}, - {"/absolute/path/to/file", "", "/absolute/path/to/file", "D:\\absolute\\path\\to\\file", false}, - {".", os.Getenv("PWD"), "", os.Getenv("PWD"), false}, - {"somefile", "somedir", filepath.Join(os.Getenv("PWD"), "somedir", "somefile"), "somedir\\somefile", false}, + pwd, err := os.Getwd() + if err != nil { + t.Fatal(err) } - for _, test := range tests { - result, err := expandPathImpl(test.path, test.relativeDir, mockCurrentUser, mockLookupUser) - if (err != nil) != test.shouldErr { - t.Errorf("expandPathImpl(%q) returned error: %v, expected error: %v", test.path, err != nil, test.shouldErr) + t.Run("unix tests", func(t *testing.T) { + if runtime.GOOS == "windows" { + return } - if result != test.expected && result != test.windowsExpected && !test.shouldErr { - t.Errorf("expandPathImpl(%q) = %q, want %q", test.path, result, test.expected) + + tests := []struct { + path string + relativeDir string + expected string + shouldErr bool + }{ + {"~", "", "/home/testuser", false}, + {"~/myfolder/myfile.txt", "", "/home/testuser/myfolder/myfile.txt", false}, + {"~anotheruser/docs/file.txt", "", "/home/anotheruser/docs/file.txt", false}, + {"~nonexistentuser/file.txt", "", "", true}, + {"relative/path/to/file", "", filepath.Join(pwd, "relative/path/to/file"), false}, + {"/absolute/path/to/file", "", "/absolute/path/to/file", false}, + {"/absolute/path/to/file", "someotherdir/", "/absolute/path/to/file", false}, + {".", pwd, pwd, false}, + {".", "", pwd, false}, + {"somefile", "somedir", filepath.Join(pwd, "somedir", "somefile"), false}, } - } + + for _, test := range tests { + result, err := expandPathImpl(test.path, test.relativeDir, mockCurrentUser, mockLookupUser) + if (err != nil) != test.shouldErr { + t.Errorf("expandPathImpl(%q) returned error: %v, expected error: %v", test.path, err != nil, test.shouldErr) + } + + if result != test.expected && !test.shouldErr { + t.Errorf("expandPathImpl(%q) = %q, want %q", test.path, result, test.expected) + } + } + }) + + t.Run("windows tests", func(t *testing.T) { + if runtime.GOOS != "windows" { + return + } + + tests := []struct { + path string + relativeDir string + expected string + shouldErr bool + }{ + {"~", "", "D:\\home\\testuser", false}, + {"~/myfolder/myfile.txt", "", "D:\\home\\testuser\\myfolder\\myfile.txt", false}, + {"~anotheruser/docs/file.txt", "", "D:\\home\\anotheruser\\docs\\file.txt", false}, + {"~nonexistentuser/file.txt", "", "", true}, + {"relative\\path\\to\\file", "", filepath.Join(pwd, "relative\\path\\to\\file"), false}, + {"D:\\absolute\\path\\to\\file", "", "D:\\absolute\\path\\to\\file", false}, + {"D:\\absolute\\path\\to\\file", "someotherdir/", "D:\\absolute\\path\\to\\file", false}, + {".", pwd, pwd, false}, + {".", "", pwd, false}, + {"somefile", "somedir", filepath.Join(pwd, "somedir", "somefile"), false}, + } + + for _, test := range tests { + result, err := expandPathImpl(test.path, test.relativeDir, mockCurrentUser, mockLookupUser) + if (err != nil) != test.shouldErr { + t.Errorf("expandPathImpl(%q) returned error: %v, expected error: %v", test.path, err != nil, test.shouldErr) + } + + if result != test.expected && !test.shouldErr { + t.Errorf("expandPathImpl(%q) = %q, want %q", test.path, result, test.expected) + } + } + }) } diff --git a/parser/parser.go b/parser/parser.go index 40acf3e5..d5df479a 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -564,7 +564,9 @@ func isValidCommand(cmd string) bool { } func expandPathImpl(path, relativeDir string, currentUserFunc func() (*user.User, error), lookupUserFunc func(string) (*user.User, error)) (string, error) { - if strings.HasPrefix(path, "~") { + if filepath.IsAbs(path) || strings.HasPrefix(path, "\\") || strings.HasPrefix(path, "/") { + return filepath.Abs(path) + } else if strings.HasPrefix(path, "~") { var homeDir string if path == "~" || strings.HasPrefix(path, "~/") { diff --git a/server/create.go b/server/create.go index 5856b595..6120c705 100644 --- a/server/create.go +++ b/server/create.go @@ -178,12 +178,37 @@ func convertModelFromFiles(files map[string]string, baseLayers []*layerGGML, isA } func detectModelTypeFromFiles(files map[string]string) string { - // todo make this more robust by actually introspecting the files for fn := range files { if strings.HasSuffix(fn, ".safetensors") { return "safetensors" - } else if strings.HasSuffix(fn, ".bin") || strings.HasSuffix(fn, ".gguf") { + } else if strings.HasSuffix(fn, ".gguf") { return "gguf" + } else { + // try to see if we can find a gguf file even without the file extension + blobPath, err := GetBlobsPath(files[fn]) + if err != nil { + slog.Error("error getting blobs path", "file", fn) + return "" + } + + f, err := os.Open(blobPath) + if err != nil { + slog.Error("error reading file", "error", err) + return "" + } + defer f.Close() + + buf := make([]byte, 4) + _, err = f.Read(buf) + if err != nil { + slog.Error("error reading file", "error", err) + return "" + } + + ct := llm.DetectGGMLType(buf) + if ct == "gguf" { + return "gguf" + } } } diff --git a/server/routes_create_test.go b/server/routes_create_test.go index 9c85eb9d..92b9e4aa 100644 --- a/server/routes_create_test.go +++ b/server/routes_create_test.go @@ -3,6 +3,7 @@ package server import ( "bytes" "cmp" + "crypto/sha256" "encoding/json" "fmt" "io" @@ -710,3 +711,100 @@ func TestCreateDetectTemplate(t *testing.T) { }) }) } + +func TestDetectModelTypeFromFiles(t *testing.T) { + t.Run("gguf file", func(t *testing.T) { + _, digest := createBinFile(t, nil, nil) + files := map[string]string{ + "model.gguf": digest, + } + + modelType := detectModelTypeFromFiles(files) + if modelType != "gguf" { + t.Fatalf("expected model type 'gguf', got %q", modelType) + } + }) + + t.Run("gguf file w/o extension", func(t *testing.T) { + _, digest := createBinFile(t, nil, nil) + files := map[string]string{ + fmt.Sprintf("%x", digest): digest, + } + + modelType := detectModelTypeFromFiles(files) + if modelType != "gguf" { + t.Fatalf("expected model type 'gguf', got %q", modelType) + } + }) + + t.Run("safetensors file", func(t *testing.T) { + files := map[string]string{ + "model.safetensors": "sha256:abc123", + } + + modelType := detectModelTypeFromFiles(files) + if modelType != "safetensors" { + t.Fatalf("expected model type 'safetensors', got %q", modelType) + } + }) + + t.Run("unsupported file type", func(t *testing.T) { + p := t.TempDir() + t.Setenv("OLLAMA_MODELS", p) + + data := []byte("12345678") + digest := fmt.Sprintf("sha256:%x", sha256.Sum256(data)) + if err := os.MkdirAll(filepath.Join(p, "blobs"), 0o755); err != nil { + t.Fatal(err) + } + + f, err := os.Create(filepath.Join(p, "blobs", fmt.Sprintf("sha256-%s", strings.TrimPrefix(digest, "sha256:")))) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + if _, err := f.Write(data); err != nil { + t.Fatal(err) + } + + files := map[string]string{ + "model.bin": digest, + } + + modelType := detectModelTypeFromFiles(files) + if modelType != "" { + t.Fatalf("expected empty model type for unsupported file, got %q", modelType) + } + }) + + t.Run("file with less than 4 bytes", func(t *testing.T) { + p := t.TempDir() + t.Setenv("OLLAMA_MODELS", p) + + data := []byte("123") + digest := fmt.Sprintf("sha256:%x", sha256.Sum256(data)) + if err := os.MkdirAll(filepath.Join(p, "blobs"), 0o755); err != nil { + t.Fatal(err) + } + + f, err := os.Create(filepath.Join(p, "blobs", fmt.Sprintf("sha256-%s", strings.TrimPrefix(digest, "sha256:")))) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + if _, err := f.Write(data); err != nil { + t.Fatal(err) + } + + files := map[string]string{ + "noext": digest, + } + + modelType := detectModelTypeFromFiles(files) + if modelType != "" { + t.Fatalf("expected empty model type for small file, got %q", modelType) + } + }) +} From a041b4df7cd8045b1410cdd6988c660427de12ad Mon Sep 17 00:00:00 2001 From: Gloryjaw <108608120+Gloryjaw@users.noreply.github.com> Date: Thu, 16 Jan 2025 01:19:12 +0530 Subject: [PATCH 10/68] docs: fix path to examples (#8438) --- docs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index b6221041..4d3b7140 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,7 +2,7 @@ ### Getting Started * [Quickstart](../README.md#quickstart) -* [Examples](../examples) +* [Examples](./examples.md) * [Importing models](./import.md) * [Linux Documentation](./linux.md) * [Windows Documentation](./windows.md) From 93a8daf285af45ed71544e79aae0cb15245e75f4 Mon Sep 17 00:00:00 2001 From: Josh <76125168+joshyan1@users.noreply.github.com> Date: Wed, 15 Jan 2025 16:31:22 -0800 Subject: [PATCH 11/68] convert: import support for command-r models from safetensors (#6063) --------- Co-authored-by: Patrick Devine --- convert/convert.go | 2 + convert/convert_commandr.go | 76 ++++ convert/convert_test.go | 1 + convert/testdata/c4ai-command-r-v01.json | 344 ++++++++++++++++++ template/command-r.gotmpl | 67 ++++ template/command-r.json | 6 + template/index.json | 4 + .../system-user-assistant-user | 1 + template/testdata/command-r.gotmpl/user | 1 + .../command-r.gotmpl/user-assistant-user | 1 + 10 files changed, 503 insertions(+) create mode 100644 convert/convert_commandr.go create mode 100644 convert/testdata/c4ai-command-r-v01.json create mode 100644 template/command-r.gotmpl create mode 100644 template/command-r.json create mode 100644 template/testdata/command-r.gotmpl/system-user-assistant-user create mode 100644 template/testdata/command-r.gotmpl/user create mode 100644 template/testdata/command-r.gotmpl/user-assistant-user diff --git a/convert/convert.go b/convert/convert.go index 639e6ad4..fe559234 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -191,6 +191,8 @@ func ConvertModel(fsys fs.FS, ws io.WriteSeeker) error { conv = &qwen2Model{} case "BertModel": conv = &bertModel{} + case "CohereForCausalLM": + conv = &commandrModel{} default: return errors.New("unsupported architecture") } diff --git a/convert/convert_commandr.go b/convert/convert_commandr.go new file mode 100644 index 00000000..a74cae96 --- /dev/null +++ b/convert/convert_commandr.go @@ -0,0 +1,76 @@ +package convert + +import ( + "cmp" + + "github.com/ollama/ollama/llm" +) + +type commandrModel struct { + ModelParameters + MaxPositionEmbeddings uint32 `json:"max_position_embeddings"` + HiddenSize uint32 `json:"hidden_size"` + HiddenLayers uint32 `json:"num_hidden_layers"` + IntermediateSize uint32 `json:"intermediate_size"` + NumAttentionHeads uint32 `json:"num_attention_heads"` + NumKeyValueHeads uint32 `json:"num_key_value_heads"` + LayerNormEPS float32 `json:"layer_norm_eps"` + RopeTheta float32 `json:"rope_theta"` + UseQKNorm bool `json:"use_qk_norm"` + MaxLength uint32 `json:"model_max_length"` + LogitScale float32 `json:"logit_scale"` + NCtx uint32 `json:"n_ctx"` +} + +var _ ModelConverter = (*commandrModel)(nil) + +func (p *commandrModel) KV(t *Tokenizer) llm.KV { + kv := p.ModelParameters.KV(t) + kv["general.architecture"] = "command-r" + kv["general.name"] = "command-r" + kv["command-r.context_length"] = cmp.Or(p.MaxLength, p.MaxPositionEmbeddings, p.NCtx) + kv["command-r.embedding_length"] = p.HiddenSize + kv["command-r.block_count"] = p.HiddenLayers + kv["command-r.feed_forward_length"] = p.IntermediateSize + kv["command-r.attention.head_count"] = p.NumAttentionHeads + kv["command-r.attention.head_count_kv"] = p.NumKeyValueHeads + kv["command-r.attention.layer_norm_epsilon"] = p.LayerNormEPS + kv["command-r.rope.freq_base"] = p.RopeTheta + kv["command-r.max_position_embeddings"] = cmp.Or(p.MaxLength, p.MaxPositionEmbeddings) + kv["command-r.logit_scale"] = p.LogitScale + kv["command-r.rope.scaling.type"] = "none" + + return kv +} + +func (p *commandrModel) Tensors(ts []Tensor) []llm.Tensor { + var out []llm.Tensor + for _, t := range ts { + out = append(out, llm.Tensor{ + Name: t.Name(), + Kind: t.Kind(), + Shape: t.Shape(), + WriterTo: t, + }) + } + + return out +} + +func (p *commandrModel) Replacements() []string { + return []string{ + "self_attn.q_norm", "attn_q_norm", + "self_attn.k_norm", "attn_k_norm", + "model.layers", "blk", + "input_layernorm", "attn_norm", + "mlp.down_proj", "ffn_down", + "mlp.gate_proj", "ffn_gate", + "mlp.up_proj", "ffn_up", + "self_attn.k_proj", "attn_k", + "self_attn.o_proj", "attn_output", + "self_attn.q_proj", "attn_q", + "self_attn.v_proj", "attn_v", + "model.norm", "output_norm", + "model.embed_tokens", "token_embd", + } +} diff --git a/convert/convert_test.go b/convert/convert_test.go index a98b956a..bb213ce2 100644 --- a/convert/convert_test.go +++ b/convert/convert_test.go @@ -109,6 +109,7 @@ func TestConvertModel(t *testing.T) { "all-MiniLM-L6-v2", "gemma-2-9b-it", "Qwen2.5-0.5B-Instruct", + "c4ai-command-r-v01", } for i := range cases { diff --git a/convert/testdata/c4ai-command-r-v01.json b/convert/testdata/c4ai-command-r-v01.json new file mode 100644 index 00000000..935ec35a --- /dev/null +++ b/convert/testdata/c4ai-command-r-v01.json @@ -0,0 +1,344 @@ +{ + "general.architecture": "command-r", + "general.name": "command-r", + "command-r.attention.head_count": "64", + "command-r.attention.head_count_kv": "64", + "command-r.attention.layer_norm_epsilon": "1e-05", + "command-r.block_count": "40", + "command-r.context_length": "131072", + "command-r.embedding_length": "8192", + "command-r.feed_forward_length": "22528", + "command-r.logit_scale": "0.0625", + "command-r.rope.freq_base": "8e+06", + "command-r.rope.scaling.type": "none", + "tokenizer.ggml.add_bos_token": "true", + "tokenizer.ggml.add_eos_token": "false", + "tokenizer.ggml.bos_token_id": "5", + "tokenizer.ggml.eos_token_id": "255001", + "tokenizer.ggml.merges": "902a060cac8884a5793d2a857dd2e53a259de46c8d08c4deb243c239671e1350", + "tokenizer.ggml.model": "gpt2", + "tokenizer.ggml.padding_token_id": "0", + "tokenizer.ggml.token_type": "b7a352ccd1c99d4413bcf452c2db707b0526d0e1216616b865560fab80296462", + "tokenizer.ggml.tokens": "815ac90ff23565081522d7258f46648c8a0619eb847a9c7c31b238a9b984e4ae", + "blk.0.attn_k.weight": "6fcfdb466f9ceb1229404ce4ec4e480751b8d00da12707a11783dad7256cb864", + "blk.0.attn_norm.weight": "6063317f731371864049c7704a70772f1eb632194201ebdc2ed0f8e483507c72", + "blk.0.attn_output.weight": "920f49716a1e2fc73b6794ec777947f1c122701e63ed302422ac89e90f06e9da", + "blk.0.attn_q.weight": "ddbcd7cde197e632564ac58e4f25d9e3a8ca52917329eeb6081eb41a797932ab", + "blk.0.attn_v.weight": "318fc02a189d87420f0cbf57f47f11e00c21ec1ed472ce0a2a895b44f7fa0fca", + "blk.0.ffn_down.weight": "aa71975b6eb1f4c77b03d2ac4a194cf8d95718efac741bb12f0f3ff79a27f9bc", + "blk.0.ffn_gate.weight": "42967702fa0bc738b88dc50007ace26dbe74a5a9e0978124dd093f818241a9e1", + "blk.0.ffn_up.weight": "5282c8788b086bd30f46525e7995a17464882a72703fd27165491afdd8bfd4af", + "blk.1.attn_k.weight": "cd248882e64fd2c3402c44790ebe12440133dc671b6893fdad0564c461973adc", + "blk.1.attn_norm.weight": "ba84e1c8fd30af6ec94208db4078befac8c921aad3acb887812887f3282ea2be", + "blk.1.attn_output.weight": "2efa3ef7c5666ccceb05e339b83ad680cc0d2c3ec78203f5da5959f23a80e14f", + "blk.1.attn_q.weight": "5106f2e255358a1303c22e8b5f0ec044852bb30a866c52cabefd30017a7a6b7d", + "blk.1.attn_v.weight": "a211a634a1a5df1d5f973645438be0461dd922210f9747c6b04e386c7f1ebe95", + "blk.1.ffn_down.weight": "37093afe48d32c578ec956c9ed85242cd000d6aa979e60526aafa10c822dbb10", + "blk.1.ffn_gate.weight": "469860819e9159caefb1aad0bc66db790f3393f05fd87b08e52256a7ed256543", + "blk.1.ffn_up.weight": "736742c97d35d1a011f9cafd3c0ce947ad559bb2fba6da73c816f6bfd0fa9aeb", + "blk.2.attn_k.weight": "92c219d92804d832ab404bd6dc7339c90877bb7cf405dd030c121f8b27757739", + "blk.2.attn_norm.weight": "61e4466069474b76b6d1e702566420eb669faf3556b00ff7b824784aca13a2d6", + "blk.2.attn_output.weight": "d2fb38a2b2171fd91caf037faa585a62225819aa232d86fd4f7f9d2c3c8a45e9", + "blk.2.attn_q.weight": "f6faf5cc6844e3daa4f9f68d90f5458c64879de68a7728860e38374e30c3429d", + "blk.2.attn_v.weight": "f340ef8f7341d987a6f37c0e9afe0aef5be67be00c0ce5f57612daf73319cce1", + "blk.2.ffn_down.weight": "c7be61a701d779860b621b143fb6365b607bf99ec7c0f153b07908ac8120885a", + "blk.2.ffn_gate.weight": "b64f0878187bd3392abfa4c3e8ad2f8b4c133903e54246747ff8f3b4639ad83e", + "blk.2.ffn_up.weight": "50b11c712652e90ee7428dbb45cffebb80662ac982bc72bd9eafff361b5eb5a8", + "blk.3.attn_k.weight": "2b7bcbe9ee5c9c630c8c8d7483887e78b73581016f4cbb6933db2a147a25f431", + "blk.3.attn_norm.weight": "0181dac7f4eee7252980323e8032cf339bef2046ce0a16c0fd72af7c98a8a37b", + "blk.3.attn_output.weight": "aef8843b636ce231da9e7c9acbee197883cc15df0e2887709324c6a50f16da7b", + "blk.3.attn_q.weight": "55404130fa10e81322d33eb378aa0de31a92990ce7730f1338c0ace0406bb1b1", + "blk.3.attn_v.weight": "76f7fb8040d82b957d689ce34fea2302a6640ad5bbaa0052ad2b7ebce270c33d", + "blk.3.ffn_down.weight": "648628933eff3b357c3729c33c5b1ae51c28e59b9c19acd1601a2ff7c5d5d9a5", + "blk.3.ffn_gate.weight": "6a588885d16e98d5f50ebed05af089154f680085ca9c97691e5b489088630a4a", + "blk.3.ffn_up.weight": "e12455a1d702f4986e1a663493e3d5102b367af74d45557522002a35d63ecac2", + "blk.4.attn_k.weight": "40d943380a8a85e4eab147934bf6e16f23cc8ab753f6636526382c074d182288", + "blk.4.attn_norm.weight": "4ab2c098983d4599fe540eef624c4df954adb7473faebda7471ef0ba4134814c", + "blk.4.attn_output.weight": "d14b91e40f58bf4a3c8c2eca0b12bb541de406574af39027d56f6c588a147082", + "blk.4.attn_q.weight": "e1224960a3562107488589f883fa32414bae41712fa8dbd47c5f3e3a7801452f", + "blk.4.attn_v.weight": "063f297bc4aa6e709fc32c4c32e35af7d07d80e83cb939b76adbba858006c03d", + "blk.4.ffn_down.weight": "f88a18020c5e1caaa29596895eb348e76ee5bfad27ed57651a86cd8cd1f9b5aa", + "blk.4.ffn_gate.weight": "48e7e1eed3fb52e92e61d3557dd0ec002418327090e034ce4322fd68542266f8", + "blk.4.ffn_up.weight": "1ca8a7aa17355b6ce0d9ad5539fdad3899fa47fd359c285fbfb31f19f47bf073", + "blk.5.attn_k.weight": "2bdf15f8e73d068d972380f25d207004cf0bf3b5bfa46946803ba6fba07d9175", + "blk.5.attn_norm.weight": "60448d7cde6e1b6467aa31bdea012e39cdb08c88081cee7d102dca4f93f766ef", + "blk.5.attn_output.weight": "f9f687d7c457537f9fca8a4087a59f1c3bebfaf5537b94e42c831a13224f7799", + "blk.5.attn_q.weight": "987db7a2ad68657a92625e1980effbb1f79697c2183f2b9f3b3a0570c51b0ab9", + "blk.5.attn_v.weight": "cf696891148f3e4783ad1d20f93462ae091eb8651c656bba9b662253b6263e02", + "blk.5.ffn_down.weight": "c0662b0bd0929136005fb9d691fdd9b2c33867d9ce9622339a6a456b720b059a", + "blk.5.ffn_gate.weight": "200bbdfab615d7a3a84719b6ced7751e3ce52757ef212d96f87798bc1de5e987", + "blk.5.ffn_up.weight": "df5d23e7e035fb1b9d163da7ddfdfe38da6a37e86e96534dc02ad20f011b55b3", + "blk.6.attn_k.weight": "c0dae2d272a7c5a2fa004bbb8475dbab362fc1f6d008e73d5a4434a9382ac6ba", + "blk.6.attn_norm.weight": "51c57ac8b55e04354d5dca6bb9c0cf4177639d3b038e80209e33036209688f64", + "blk.6.attn_output.weight": "229d97892c62f85bcdf431675250e01c976ad69ffa450b01fb543bf88f14a2fb", + "blk.6.attn_q.weight": "c20e49621821bd46ed156e6823864a5bda4f317750e71ab8dc54e44eb48cf7c2", + "blk.6.attn_v.weight": "53ceb1a2ee43fce3c7b5b33c58a9fc5ee7f44dc1c6f29bc9dbefc37582102dc9", + "blk.6.ffn_down.weight": "7923c943b7629d560a032d1efa210d1d75c6692140f1be94464ee7ed24f44ed0", + "blk.6.ffn_gate.weight": "57593d350361af753a6a39f53b066282634c0fb44f396f6f2966a574b01d8f8c", + "blk.6.ffn_up.weight": "327b6a7a387098b8899d3ded04a4d4e7c658ca61b80d4e7b17594be232721602", + "blk.7.attn_k.weight": "9ca48b87a10116fd8868e62b76f211d4bb91f166096be9061439ee2e1c3a5c20", + "blk.7.attn_norm.weight": "cd56cfcc4e2ad6b96e23ea7b0d32b4caf236107d99a0b22c56760b62e63c8cfd", + "blk.7.attn_output.weight": "7352b509a03cae2491ffc060e577d189341a0f861233f18c96f9d275dc4234bf", + "blk.7.attn_q.weight": "2b3791c8c008c33ddbe12bedba8191322ceea2dcce5cf0eb7a93d40ad254e672", + "blk.7.attn_v.weight": "3ae721d52466487a3d48150581e57f6d64ea1e83ab929f23b28c3d777422eeb6", + "blk.7.ffn_down.weight": "3b6fa8ececdb3c34af3a5363863d6f94289c1c95bf47fce3a3ddcf184c5f0848", + "blk.7.ffn_gate.weight": "dbd7df6c5ae5eb4adb859f0d36453813a4e289a359a1ba8f72d67fcbf21c3e22", + "blk.7.ffn_up.weight": "de68380a334b4c5cfd4c318b0e9854aec59bd79aa0f0c30af3f56414f83482b0", + "blk.8.attn_k.weight": "7303c4e4480abc72a7ee271811311199245fb5c2ea27a2bd3b8cad3a53a03c27", + "blk.8.attn_norm.weight": "2e3d1921898d1b943ce1a1b6818546c8b471d6d542da24f51a8b514b8c3dd4ef", + "blk.8.attn_output.weight": "30421520887b66bf97a18dbcdc283bc8d0b60590b612fd638a319a6eae923227", + "blk.8.attn_q.weight": "73e064d5433c9b500068a1c31744dbd53f4ade298fb450a0e8c97f62cf1f8a8d", + "blk.8.attn_v.weight": "27e21f8b9a9a8533e8178ca34a72aa1d786393d57302b7806dcdf3e51de511a8", + "blk.8.ffn_down.weight": "bf694bd8e00047982108000e7b3dee7b225db8b19abc595e5697b6bbefd92e7c", + "blk.8.ffn_gate.weight": "d55fdbf8606d9141b774b0500c58944fd1253b9e69d1f765eaa9a680b9f2ca40", + "blk.8.ffn_up.weight": "1ae3f580655e7c8e8dd6c34fa4ac574fdfc5e3f1a8536da0c5442d3a2976f0e7", + "blk.9.attn_k.weight": "b18080626012d8aabcf78542d6c7bf31c712bf55a70172fbfe173fcf34481036", + "blk.9.attn_norm.weight": "2e3620620dc09998c6d3063a7d5de5433fbbae8c11e5b00d13f145d39140e162", + "blk.9.attn_output.weight": "69c3c0e27ef1c0fc933eeb7b612b70909f18cde238873c0d576a2ba9714ef174", + "blk.9.attn_q.weight": "68330e5aa28a28873c9a6e67f032186ef651df2df5844e0f27094ba349fbe4ab", + "blk.9.attn_v.weight": "3df8d45a102be082d0793a51cb82aa62a43cd0e9d047ba4115ca0f2414b39325", + "blk.9.ffn_down.weight": "1d6cc162b73745b135b4f040a0aac3c06d5135a3dc5b2421e7ee2af48662fd7f", + "blk.9.ffn_gate.weight": "034a9d40fb1e32b534b45f4bccd65cbe43c4a6a3f5d01132bd245ca0005de5fc", + "blk.9.ffn_up.weight": "c838c38d0e1a0ac0da17eb2a66023ed31929f07d8fcfe1cc546df26096c91f0c", + "blk.10.attn_k.weight": "a78507cb72f744b86ceaa032596e74e5571c822d0226d334881169addb32cbd5", + "blk.10.attn_norm.weight": "35f48d0b28ee0e6b4cad4e983925737562d64824be5b168b3e26df3d6b260cf1", + "blk.10.attn_output.weight": "53712db06796de39b131323e7abf9a58551b6d52da6db66a471580386d396252", + "blk.10.attn_q.weight": "efe08429ba196026b81cd1c471e1c7418afd9e966659feb3936b674aa0803b58", + "blk.10.attn_v.weight": "7ec6055e134f89da0cbe79ec9f13ef2e442ac584b1f03c3e13e7d0cdad0078bd", + "blk.10.ffn_down.weight": "37e66af4bcd1f3079e841e892255b8255070655901864ea3a8c602a7f681a640", + "blk.10.ffn_gate.weight": "1825282bc34830d371c6edcc3c1e73e6ecc1e10f4aea0122dbb7acc1d6f7b1bc", + "blk.10.ffn_up.weight": "819b3b276a4d4c14a35ed6682d5ef18a5e8ed468e5ce3f12e8c75ec18ac20ec4", + "blk.11.attn_k.weight": "5327e6a2af82dfff0619a14971f5864a15553c36fead84e1af42c7630f2729c6", + "blk.11.attn_norm.weight": "fec363b3c4a43036d2c635fb8aa9e122dd87ee79811839f2f6cd955be3373e7b", + "blk.11.attn_output.weight": "ccf7b38f18ee8798b8a6a35018e2df3eb3e007de62876befb68025dd66c79763", + "blk.11.attn_q.weight": "da8c4a1c824ffe174e39f126cd72f7ef83c56aff1259d452a1212de80f98f5e9", + "blk.11.attn_v.weight": "d17ae6bb77f03982b55d341eb67acb5969e9ad3da5994b96eafc09793dcfe3a0", + "blk.11.ffn_down.weight": "a6bac521e2791345f22c57205fa1c2f2f687794dfd24d0e98d50ae0d0eb6088a", + "blk.11.ffn_gate.weight": "5ed902c488cb51ba5635f3df08258c5f84f31a679a00211ea5f9d8b824ef6d9d", + "blk.11.ffn_up.weight": "ee9f1437eb890d2cf9df2574afa1cecf20aafdd847cd75b152d7eb74419afd34", + "blk.12.attn_k.weight": "5a069c06e1019b0f889088e67458f7a11ec77fa190ada6069e46211f62219947", + "blk.12.attn_norm.weight": "194d7e5fcc8c49aea62daf1940532419cf3c505afdce6be377286b677db5db8f", + "blk.12.attn_output.weight": "6534995fd4d6fecb55e317add4b1723aba4d825e1e9471d0b08813dfdc247176", + "blk.12.attn_q.weight": "4ab51ca519b5995581fa34f846276feca3b907ef2b51f192f6cc0b3263c3f5a2", + "blk.12.attn_v.weight": "5652ca3fa81ef9a1ac1543d71fc6813f8517f8ec54b25c701f6f98061614830f", + "blk.12.ffn_down.weight": "4b2c263f54c88516b8eb273bb8d9615b01c5c8b484dc70358adb91b50b300edd", + "blk.12.ffn_gate.weight": "8f50c3c3e3e8568991d6c1b0e74b500cf4f208e7700bbb8e87c3f6a6d359b6b5", + "blk.12.ffn_up.weight": "1c1a581fec1fbe959e1427fa513f400100b5e1ee9d83932630be9905fb49c231", + "blk.13.attn_k.weight": "efd7a38c46f08d8376d82974f33c644e3a02220e142d63b1704718699a8a884c", + "blk.13.attn_norm.weight": "d28fa4f1bd75abbd063b0e622e08f579c89cd0c0c5ce63c1952ec9f944f8ee13", + "blk.13.attn_output.weight": "71e0068a639288718bdb70a6cfdefd50bc8b3ec3993347a65129e70001ca5827", + "blk.13.attn_q.weight": "b97077adc92cff07a2e07d80ee38f214ad8713571c69cd5c70ebd43dc501ac87", + "blk.13.attn_v.weight": "79b3e2749ab4b459c81e96e322b215f1e8af645eb346e176c326bd00cf6ed2fd", + "blk.13.ffn_down.weight": "9f8687d11effa1db7cfecf7bec5631734bcf2962aad74a9f519144491e08ec85", + "blk.13.ffn_gate.weight": "7d14dfa0543852e7777fe8fff29ca533744cbcf1ebcf10067e5adfc4eb345e65", + "blk.13.ffn_up.weight": "852b9527b97fdab211ff3f832a660ee1d93ccb56906144c50f01319a6e8ee615", + "blk.14.attn_k.weight": "79e926b20f36f66d58226cb358881f2f68ae7b468787d33cafae5110287a14a0", + "blk.14.attn_norm.weight": "97d481b63deb0df6142c2c6cd23043720c62eb609e390f47a7113751c79974ec", + "blk.14.attn_output.weight": "aa6e94d7176d5c79fbb89b96e5f13ce75702ce3dd23ee52986446da436a6c3d6", + "blk.14.attn_q.weight": "214becb6d1bb460da9fb8ace0f99b9a5afa9edf7aa7acc19606c7401b11d6305", + "blk.14.attn_v.weight": "488b0e6d7f1a7a2ed0972aaa6d10ef9c775ee5373460324efcf5b3e3da9311df", + "blk.14.ffn_down.weight": "29c7ad16cf9542e30996a1a01ab95b844533b28051f04cc7949c371afb796471", + "blk.14.ffn_gate.weight": "b7ef208f2b054803665b377f5a5980c122c026841809cf855c6ba06d1c3a885a", + "blk.14.ffn_up.weight": "76a5cc28100748d79c4398ce7b9176aab4d661548b6293a82f99144812e5b70e", + "blk.15.attn_k.weight": "a6b8f9e98ab878fa7ebc5d080978ebf2d050acc2ab2fa8ea9188eb10e27702c8", + "blk.15.attn_norm.weight": "a26d07a9752d6dccb68e3a8a2a49fd0752cdd0a415e05547819bc37d9ba63d5e", + "blk.15.attn_output.weight": "c63616c69048ccbee801e05be4f56d21fda21aa0cc470f41d57c31b4d9283a4d", + "blk.15.attn_q.weight": "fd595a67bf96c6ba16eb148a9d02fa52fa3c1d33ed10be28a08f851409fd6e64", + "blk.15.attn_v.weight": "1c5c9d33fa07c05d5f4ed0032c6c4aa83d863f0d31c94a66109d239dcd03cea3", + "blk.15.ffn_down.weight": "585ea62ab8aff7d7d212ea5c1a03226fda6b68370c890b776834af70c948dcbc", + "blk.15.ffn_gate.weight": "a13c63f86f879b03a573d5dd2a25cfd1f4dc73e8132e6454ecc23e538b4cdf6f", + "blk.15.ffn_up.weight": "f7112450f57c12fcd511f049e0dc0b541625a107a7901c3261ed9e984299f65c", + "blk.16.attn_k.weight": "2d2c8b11dd71fba6d1c106aa1673c113a5448653cca7eab897c8739212ed5003", + "blk.16.attn_norm.weight": "95c2ec7be9469690e18a9a1779684acb3e9da44b13e263a0da840305646fbf8a", + "blk.16.attn_output.weight": "31a65046e677f54dae654ded4e733479fcc0f7283d83076b7dc7cbcae8528230", + "blk.16.attn_q.weight": "bfc6292b9c6d49b7118d08060242a138182eb182d136ba5dfaf469437c16081d", + "blk.16.attn_v.weight": "68f81d037340217d87c7853ff4d6edfbc46d9e827ee6d5bff7c3f6238e3a95ad", + "blk.16.ffn_down.weight": "bbd6629691950cef4d5113e1c6670e91b216a9b872cb92cee02dfda4d6c4f7b8", + "blk.16.ffn_gate.weight": "63cb56f282b7401ed6c76e5bb6fdf1bf68a64f9af0c82c014209b55bcb5191d0", + "blk.16.ffn_up.weight": "b54f39a2541063cbfb6f713aa81c3b69a04100e999aa2ebbeec195dc382eceec", + "blk.17.attn_k.weight": "3d9ba49799cc56664ec30a002bcad61eb651294212a68c3ddb573eb042aef5a4", + "blk.17.attn_norm.weight": "42ee0db4b9d63257bca0012a30b12737ead1caafeb5ed3d93c8f48ffec4b46de", + "blk.17.attn_output.weight": "a38fd100f05c9041c592bc739e287de0b10d08ef2bda41a879225bdca9002f71", + "blk.17.attn_q.weight": "8a3bee285b0180a9eb35662e449ee4cbe16d992bdd48fb3a94bc4a347728cfa2", + "blk.17.attn_v.weight": "d7f8f1b8b863494ed4392a1656775912e9b264ad36016547b12e832a1d6757d6", + "blk.17.ffn_down.weight": "bb7ee58f61da8630972e25b621996fbe8ec06f4dc9ab1e268ab5b120c526ca28", + "blk.17.ffn_gate.weight": "6b652dbf167fee09a45ebfd78d500ff6548fb2756dbe5343ffec3f7e6207179f", + "blk.17.ffn_up.weight": "3b67f727e55e742715de978fab80457781e7a3762bc48f79d13b45dcb8de664c", + "blk.18.attn_k.weight": "ff7fe57c57b90c6fcc0aefc39ec24593c3a7d1ea1c23770480075a015450e0f5", + "blk.18.attn_norm.weight": "1d40faca082d2633ef0ccf19e121870dd6c7c3e2154607c7f3543fa96e99cb2d", + "blk.18.attn_output.weight": "9adfecaaa397a92db4687efd5fcabfa0daef9e6b0493763b7ff5ebc185c43a6c", + "blk.18.attn_q.weight": "ad1803eb9b291948639277afe981e666b07167eb3fcae903ba5b73bf86d8f50b", + "blk.18.attn_v.weight": "308cf23399adccf27401a4ab60d74dac6fb9d4cd4b9c5940d9145118d1881b34", + "blk.18.ffn_down.weight": "7de4ac9a561fb580619b745687dfd7ca8a69ef70471dee978741b80e9ff7bead", + "blk.18.ffn_gate.weight": "0c66970f696b33bd5ee8f1f2fbcb41fd78fa5ccabdc927e11a4d5a4089f19c69", + "blk.18.ffn_up.weight": "66a42e988e8a1f468fabf976c48e9e4bb045eaac6916ef16555ac101cd674abc", + "blk.19.attn_k.weight": "a928ab50390bacbcebe2e4b66922498134ce22d7b93beaa87d6cf4ab52eb7174", + "blk.19.attn_norm.weight": "b4a02c55b46c2a96aec9c64a254087cf48e6c1d4b6f31782c77a46fc4daebad1", + "blk.19.attn_output.weight": "b768319c641dff1eac5d1f8ceb960c9899c795bf2b24c1d6bf70aa24fda45f77", + "blk.19.attn_q.weight": "79ef3f57d187d3954a26362096e1b6c222d76f537dff73e034d6e9999935b8bc", + "blk.19.attn_v.weight": "ce13d6b13e24fcb2d5bc6a2662e5bd295b31b12db10a6d0307f86cf29b8d5001", + "blk.19.ffn_down.weight": "cf90d7e2137482cfd50934a8223ad774621d08554969da80a9712df5e6227eb0", + "blk.19.ffn_gate.weight": "71ce30150f003b6eeb3bf7464e05b6ae615f135110d8e47f0a47fd973e537c0f", + "blk.19.ffn_up.weight": "7f92aca0cc29866633feec701ec01a85a8ee2fd4e2b9630173a6cffb1d9d50ee", + "blk.20.attn_k.weight": "a2df23159d6fb74ef28e14b61028fe8b00a693a2fc9234a980be74f20b958682", + "blk.20.attn_norm.weight": "c6cd5f1b096fc5efa4eb59ca1c8c4bd28730f3dcedd59a63601663eccc6724ed", + "blk.20.attn_output.weight": "896a8a166d0f006d4b09867ae4345426303cbc3fb13a18d3d4e1bde00f16dbdf", + "blk.20.attn_q.weight": "01eb79588fe61baea0da43e99f4dc5939590e1bafd01e12dadb8326f102bfea2", + "blk.20.attn_v.weight": "bd39630fdd5a7c859ac1addaf53e63faf524c3f32f5f4896d86b6e746b1d5c06", + "blk.20.ffn_down.weight": "0304a5d39957a0e3f031c4bcc4549a135d396c8d97c8d276fd1c823ce86560c2", + "blk.20.ffn_gate.weight": "117b79d595b1dca0c8b37586beaecc4d84411507276212dc286cde7fc36c9bef", + "blk.20.ffn_up.weight": "6e799346db145c125f01783539749d3828fcc451cd4f10c5352f047a47e28714", + "blk.21.attn_k.weight": "1c37e4c0664147e775bb006b226b9553e3421140cd96288ea755f81731ab80ba", + "blk.21.attn_norm.weight": "00ae783a29000ccda5e4bdbff03df0752fb82805dc3f9b987500ebd80714476e", + "blk.21.attn_output.weight": "7588b84f9fb19f15095b5265c60b4a4e7ae74bcc47d4607dfa5d0bfab6f136cb", + "blk.21.attn_q.weight": "a65f1c0dd06d45bb97532d3e932689c1eecfe7359089b39174a96a149335cbc1", + "blk.21.attn_v.weight": "4220b77e7d5e8709b4eef33a679b5dad11f297085ef44c9977f9e54ef08f7a2d", + "blk.21.ffn_down.weight": "b8c082a0530d4b5328e67db0df84c5498f2af956de23c639fa0198ffea853950", + "blk.21.ffn_gate.weight": "cd1b656ee72d00e9835ef667c19ef89a88de261eb8eb7c0e936e0f9ddf83ef9f", + "blk.21.ffn_up.weight": "dc445f73e36ec7a3bd86884186b728f8e0187f32848c3b8b69d4d41f8571bf31", + "blk.22.attn_k.weight": "e37cf0b893ec8b9ee8c78dd139b8d9c45cb997a3bc0c3d93a70ca1c3f6af8859", + "blk.22.attn_norm.weight": "248a27838d3c46cc03a5c312facc84e2e0e2c990ef8401e93da25918497f88d1", + "blk.22.attn_output.weight": "fc191a18f6d18332c66761f7ab28008bfe295dd1f5c8741a2488442f9e00d0f5", + "blk.22.attn_q.weight": "4b193a2ab8bc2b085db18f2bf3eeba26e02b537b2cdd738160c8f14b165d0f5a", + "blk.22.attn_v.weight": "7a60ce5ccac7e045e55ba1e1e85bd2a0f93f8c781daee96c5223665e22f0c666", + "blk.22.ffn_down.weight": "e0a34fb4244e2c7168f3dbaa1904c15d339ec39999cdf27128bbaf619ee0a237", + "blk.22.ffn_gate.weight": "8bac872d4b8549c8812f927efa309f1792b524f33601095fff61b826de5a5615", + "blk.22.ffn_up.weight": "b67fa2b94dd901b6ec64c0853ce8ca2d86fe9cb1cc6d2f15fbbbe0e691c0c648", + "blk.23.attn_k.weight": "2c32e66ad01942b819ac09a197c71579fe66f02226a264fdd72ad1e02c67a27e", + "blk.23.attn_norm.weight": "825fdc94deb439cb93c713eeb077c1052b90ed658d6d464fc4ad3d611e911d48", + "blk.23.attn_output.weight": "95ca6707a95b8750b0c7c5d379d368f0f2e7ebef631954e7d4d8ec0f41f13a3a", + "blk.23.attn_q.weight": "6eccc84faca5fac015d1b26e2854501edcfd292a302228fe14cf99f5eb59a34b", + "blk.23.attn_v.weight": "b343ac3d226040f1033ee049668aa1d89b1774bc18431965682e5dbdce78ccdc", + "blk.23.ffn_down.weight": "9fc599befea8d3b1e342d564a110074f66d2542df406c4b90b6bdc5828fbb2b2", + "blk.23.ffn_gate.weight": "488556c1b0c9f0b20b0c99b4bac2e0f4046b81edb601d7b91e7e5b3bab47d667", + "blk.23.ffn_up.weight": "1088e291d7008dd9c7c2dd6830af686a8a84b724d123a016209bd5156d6898f1", + "blk.24.attn_k.weight": "a923fbe35e61e009a53927d7828818e0592bb737d6a1106c4b0b5a1efc367e07", + "blk.24.attn_norm.weight": "9b51aaaa939cefafdd9b13a7e5b74ac7fa2d603427e55a16a909d6f3f353750a", + "blk.24.attn_output.weight": "1beb2baba56f8409466434b037771248c2f620ec5f53e15f44c271d5a2d9ecf4", + "blk.24.attn_q.weight": "4b0194fe5bfae0c6bf6131dcf8cb6e2b994f6ea10b27cb03574f0f4f8cc0c950", + "blk.24.attn_v.weight": "6ac34b1ab0f66226d85bca1194a7c212cd93d384ecbc8b8395de48aec0970a61", + "blk.24.ffn_down.weight": "5508f74cb732a662c2936b32ac5e90742d172b9f961a747b0e5cba0e5906a89d", + "blk.24.ffn_gate.weight": "095e39b8584403835f9bb1ac33e0e81f54175575e4800273d281b845bff381e7", + "blk.24.ffn_up.weight": "2d43ec21637dda12973de367b0113ee9840b0d815bf6fce042f7c3f270b0b530", + "blk.25.attn_k.weight": "9e2aee029f3d2c7f67dfc7926e72c8228fb978382c8e5a4701bbf82c93801419", + "blk.25.attn_norm.weight": "220cd7164fb4cdbe22d26058e4153b26c27c7b5ce2bec8e95bf2c0ea08d23103", + "blk.25.attn_output.weight": "a17f4a5dc6aa51f03dbd75602d98e9491767c205cdc2c3a5f8667fc54bbf7c64", + "blk.25.attn_q.weight": "f60827496835c440c794bf57ce9780704d10a59d8229886bf75ebb18900ba4ef", + "blk.25.attn_v.weight": "9cac217e9e9f4f4c85f14ee51165a77c580165bd4a34b202389169bbe61a1ced", + "blk.25.ffn_down.weight": "a0f36949b663e80849581dfb71e7babcc73580793bbcb0c80ab26d5a6e000359", + "blk.25.ffn_gate.weight": "df4d1be4d50d6afe5ad3ef0d0e0fac76a33e85c963dea769641d612dd53e7d13", + "blk.25.ffn_up.weight": "992da76be762632e25ebc5ef4d03728eece1b43f7c4e31827df19ca724aea694", + "blk.26.attn_k.weight": "34199ff856ac32a500c754539d070258574192a34ecba87a182897cb59fdff52", + "blk.26.attn_norm.weight": "a8e9dfb2dae5d22b5c0aec5f3675991c0e3c3e6a44153db2579136b73f456e00", + "blk.26.attn_output.weight": "1c4f257ffb0d7db0f11cfb275e38b4af736917b43ad82de1badce3f1d227da4d", + "blk.26.attn_q.weight": "33d55786274c2e718cf61e8fbecf3dfa5ee0c208f0b716d42b061f55459acb3c", + "blk.26.attn_v.weight": "684b636939cd4ffcfec5a6238a0790ffa43d853c95783af9b9e8275e74071a7a", + "blk.26.ffn_down.weight": "89d0bf066db154e6d312b5433aed1714f6a28b40f4c52e3e1530ee07703303c8", + "blk.26.ffn_gate.weight": "393d649bebe5e2940e1b043649f6c860b4b8b9f380f30e9da1744a830f358156", + "blk.26.ffn_up.weight": "179edc85ababd9d8440cc6093eecd1004290aa1cb96434b26ecf7585b6cca17b", + "blk.27.attn_k.weight": "334841445a7f1e14731b08f56eb0b1f0938c63823d28bc6d078c4c5f05b36f19", + "blk.27.attn_norm.weight": "57344471bbda2e9deffdfdb2dd05a07aa47f8761e24de53525588639145bf551", + "blk.27.attn_output.weight": "506126af9ee54b535d49f97e36f630e74834f480329f098d6d62e96246d8d65a", + "blk.27.attn_q.weight": "dd984df1acb4783849e25ba7ae378bfd385cd9efc540fb798cd5bdd873f0118f", + "blk.27.attn_v.weight": "b4b3fe9a4455d34c297ff20a2f537b647cef424741d840a747b265f23d320ac0", + "blk.27.ffn_down.weight": "621fdb185ba0d35ba5476dae73d2c81ec1482a0e878d5bfd5c3b29fe837af013", + "blk.27.ffn_gate.weight": "e4fbab45f2ec506fa374103251a0bdb7baa6f576080bdd796f3e9db92098e08f", + "blk.27.ffn_up.weight": "a0c57e463e988002bbd6a6c6792baa21a65e6f89ae303a2c301951b0ae6e4bbe", + "blk.28.attn_k.weight": "bac36cbd52ec5056841663865e1291ddab4b47ef9a2544dd285d4503bfb0e4a0", + "blk.28.attn_norm.weight": "5774a9df2bbb2e86d1f70179c7b92d81e1f401160148b3328fb64db6646a5425", + "blk.28.attn_output.weight": "e8712622d1569557000c75f26c3f55fad267fd300463c2c2cfe3afbfa1c8f908", + "blk.28.attn_q.weight": "11677751fddee52cc739699c02836f7be54d96038be4240be5d4f53d00161608", + "blk.28.attn_v.weight": "e5ee459b8958d65e1445997b9aa1e90e2f5d17761ebcf5357313119a45322507", + "blk.28.ffn_down.weight": "3934518f9f85292da8475fe38a8edcbfc4e24ac56c351b472d6351f98750871e", + "blk.28.ffn_gate.weight": "6ba735d57e98d0847e487f25ffaa25256deaa8abec76f428cb70bd9774279d83", + "blk.28.ffn_up.weight": "977fae6e1e5353114fc645dd98429464749758765cbc6e6457593d596e57850c", + "blk.29.attn_k.weight": "8122a457307d580ad6f1e0acea09a2f593d97f595ba0d6737f5fea16d2433642", + "blk.29.attn_norm.weight": "d626f721e05aa1202439b01027031d4caf1adace61ed37870a277cb6297c77cc", + "blk.29.attn_output.weight": "7fb7122ab1b6b1e6615ca746897da27bc52c92cb70d3147183cdde61795b72b3", + "blk.29.attn_q.weight": "be43e94ff6b6e391024dc824101efa0ddf4005d5b002ac26cb03765c0c73c2fa", + "blk.29.attn_v.weight": "af93c85ebff908f74f9935b81bde0516ca487c84139868a1ce079c3ae20036b1", + "blk.29.ffn_down.weight": "39dae12340ed3120bd19c495fe0872b559613641e41fde69d02d8631900b84c0", + "blk.29.ffn_gate.weight": "36fd482439840ef197c9f3b8905d86acfcea49bcf018544106ca465d4bf8d5c7", + "blk.29.ffn_up.weight": "5243fbdfdc1e2a1dd84b6210a9869d18a014db9088897e345240cdc99990bd5d", + "blk.30.attn_k.weight": "948f263616bd3788b2b968baafd69b9c5bd1b77578665f096c4b7e247b4cea42", + "blk.30.attn_norm.weight": "e168df981e744874ff303faf2eb470e5f6868c2040ba5f383f6c5148669975e7", + "blk.30.attn_output.weight": "4cf0ccca04b792573b756655a24fc89cfb1f272da8305633f0bc66ef14990b93", + "blk.30.attn_q.weight": "21e07d6cba6c50d65350289258209717174a13c42be57e8141d69712cbaf32c1", + "blk.30.attn_v.weight": "65a8ca29c7237b3182ccf03e2fc94e84f9a53d0e160fb679ab401c853170dd9c", + "blk.30.ffn_down.weight": "8b00500a6d00d84058f6658ee1d6f06fb4fcae2f90d4341792259362923b3c13", + "blk.30.ffn_gate.weight": "5bc0e19ab7a31b50ac2118ad1b36e31055271a322cd8ff661d47c3ac0210703c", + "blk.30.ffn_up.weight": "f37a0561955725bd59ee2d064fa9f4e00a12a1b620b624db3bc3add5330bc321", + "blk.31.attn_k.weight": "9a5663edda227f5d87533897146764f8e8a7481b9e71fae197c39204f8463221", + "blk.31.attn_norm.weight": "060a4f438a1ee5e220b5b5278ad2f5c085a428bf38c515766781815597c87529", + "blk.31.attn_output.weight": "6ada5d3cad9dea4780ffbb43302bb6ccc2f24eddd0fc4f5f84c9ce0fc0c6e5dd", + "blk.31.attn_q.weight": "bb5d08c08603907981ad388d5d8b70fcc9b98034ba264b8474c8890cc0297af0", + "blk.31.attn_v.weight": "e01b4252ea9c6a889c32b21144b441a347464d04536ef4f6572425be55759796", + "blk.31.ffn_down.weight": "8ba4d679c36e93ba65ba03180385ef35ea86b3b7cdf2fded9df59369f1c09630", + "blk.31.ffn_gate.weight": "e5b41dc93645f8b5e8eebae3ada3ea43a18f97ce2654228655170b07b463ccb0", + "blk.31.ffn_up.weight": "25b88cdddc8b547af294ed107d3d1312e90b983cae87936fa6062ecd8ea02539", + "blk.32.attn_k.weight": "4bcf86dc0858c8ca2fbdf6aa76674d43eb698f78979fdc1a38f556a7af1facc4", + "blk.32.attn_norm.weight": "cdcc12f3b8b9773c6722736bfb748a2729230b21478cbcc4104859d3148df815", + "blk.32.attn_output.weight": "d43f1196822995ed89a9365c97054753a8b30ce20b6e273c8edcc42673a1e141", + "blk.32.attn_q.weight": "ebf2972bb3865cbc5be4840113a322089752038344beab2a0122c7cb4fb399b6", + "blk.32.attn_v.weight": "714db81704ff34fa137512903c1013acee7877467473e46600728b9240582eb7", + "blk.32.ffn_down.weight": "2cde3da1258bb170a79d5d3cdfe10c86a71eb34b77da46b74c5ed71e7f4fe274", + "blk.32.ffn_gate.weight": "c7e1ed792532613ff9d4e5834b6536e2e0f47df2303bc0fdaa90aac0c1f4e8db", + "blk.32.ffn_up.weight": "d8d6f13fe66a716e28f79101a29817f0c0d6f99969a6f017d51bafd1a16c600c", + "blk.33.attn_k.weight": "a0a28f6cbca88da00cab2ca37094d9b0503bf9defdae77b91895b911c408cbb6", + "blk.33.attn_norm.weight": "0251200c24cc8445607ace6dc8c5aa0566567997262b7cca53a11ac23cc564b2", + "blk.33.attn_output.weight": "b2423205bdf6a1096d43c44d8d12f1a84fcd4e1bb70fcf6dc8542b8b8a71a13c", + "blk.33.attn_q.weight": "00b425c3ef71065ce5e0234e702bf38143b4952da78a85f52ab2c2e3073d97ab", + "blk.33.attn_v.weight": "035edd2335df816c42c765a5e66b9d9b9e15a822a8dc1863508145499c942c14", + "blk.33.ffn_down.weight": "4894a923a3db75bae4496ba3ce5f28796ad31fe33996a066271fb8654964310e", + "blk.33.ffn_gate.weight": "8f6c819b8bbfbe3357fae89e1ac5a3d58be85b3b04be3bacf7b62775869046ff", + "blk.33.ffn_up.weight": "257c3544b5b544fd5d839665bf5caf107a329b59dbc3751efcaa24ae63c56179", + "blk.34.attn_k.weight": "b6cd8bba892e38dac4a2ebc3ba1bce49e71b967fc436fde30c6d76f54a18935f", + "blk.34.attn_norm.weight": "2b3c8e60a064cba9955752bbbbdd92c71ba5c2f1bd721097bdbe88b5abc68787", + "blk.34.attn_output.weight": "8cc272551c9aaca9db5a660c6927bab94a0243d74a30b2bc165f06bd577714ea", + "blk.34.attn_q.weight": "74b561eb4792484e6a94b58fe2583848c3ae28ff2f1bf3d02939a0cfdfa49990", + "blk.34.attn_v.weight": "dba19e24ff05154dc5a1f55c023729303a583d13d68732ce22ea74d4410dc8f0", + "blk.34.ffn_down.weight": "76eca5dfeb274c35774e0bf9f22ee420ed9085c8e99aa2cd5a236e4918b44c61", + "blk.34.ffn_gate.weight": "9af0862d5fcbc24732846488e653db8242a467765c0cdbc00332b3a40256b4a6", + "blk.34.ffn_up.weight": "2a03126bf73587eaba99ece2066103d12e47bcd4ce30ff6c17b2f383b81d40df", + "blk.35.attn_k.weight": "52513fc0cd4e997a842729af7d21dd09399bce0a339558374738be266d0fa2f0", + "blk.35.attn_norm.weight": "e5281fa911964263ccf1630b14762edbd41d0b9472d6ec695fc600fed4892c35", + "blk.35.attn_output.weight": "b391d6705d5dc6f48326b5fd16573f679edf64109d86fb729a498819676590ca", + "blk.35.attn_q.weight": "d16446921966db9b0e0539626ad22a2511ace780e59379d6a4162d8c5441440b", + "blk.35.attn_v.weight": "9d8cdf23ffdb0c5c74106843390b94b24c9f33ef0eb9998d39f78c73390101ea", + "blk.35.ffn_down.weight": "938eb6301f7bbf162d7dd965682a5ed11d0a4a530c6fedd7e5469ce80012fc17", + "blk.35.ffn_gate.weight": "5ad84f5a0c8edcfea1ecf1a3e3d21d85ceda0c4ad9e3c6ca68885eeff8ed3c2f", + "blk.35.ffn_up.weight": "1c4330d9dc71bf4c98812c34356c51f520f47610a534152aa6d29284b758090d", + "blk.36.attn_k.weight": "ef720655e5ca2465f13db2dfc4732fb4ef2c9d53acde52f514fd4f301e974081", + "blk.36.attn_norm.weight": "88f4b9310b3c8c2644e3029160cd35678c79dfa59280430e03f5c29a6fe84a58", + "blk.36.attn_output.weight": "aec6f915fffd7bb72cd783273e871b4f09605950089d45e72059d1316b6c4b01", + "blk.36.attn_q.weight": "72f9408a2405d42f8db6ce5fcf1d26a3660b6f225fc60e77d0277109cfcb82ed", + "blk.36.attn_v.weight": "0f3b3d851dc44b3893ef53f6cca5b4acc9658bacfe1cc2d13c3d704ddd409b67", + "blk.36.ffn_down.weight": "470aec48ce8c5129a6654d9fd26fcae72776f9fc1429a8bb05818072a876475d", + "blk.36.ffn_gate.weight": "7f5f296d09cf55679767b5d15de3eff489c456782119f25204be4b1647f18dcf", + "blk.36.ffn_up.weight": "b7ef74a1f7ffb4982711d93f1787be3a70edc3d2358d5203c41d8900508037d4", + "blk.37.attn_k.weight": "c4ffa5412e4ff2dcfe1aed991c1f54169fd171a4c7638e4b9f21a1ca64c5e1d6", + "blk.37.attn_norm.weight": "4eb6c888d841cccfacf5b963f8611120f6ff24b84af0b5714fd9ab36dcda422f", + "blk.37.attn_output.weight": "db2a7bbf9682f9f6eea672dae8e150738f1bf74dbc80edc7022017a3f040c8ac", + "blk.37.attn_q.weight": "e38c0462aff139afcbab289189823527e453abc9e541154adde5e7af88cacf0b", + "blk.37.attn_v.weight": "952eb2492ed452a72f96bcc12d4b2affad9dfdf46ee39ce4a5d7b57a5dc301e5", + "blk.37.ffn_down.weight": "25f23a8fbc44febf6dc4848fd7fe03a580e2822bd3b3b5a51f4990826bfe3e4e", + "blk.37.ffn_gate.weight": "707da5eb40118b035305d3262444382351f170a20a537386a70e90c5a83a7817", + "blk.37.ffn_up.weight": "d2d2ba5cfc4ef47338dd7384219e22bf030a5a2209e0354d88f5bbaaafd20e87", + "blk.38.attn_k.weight": "abc4bb189dedf7ce661e79028427623a4f91ac091c2cd60e31b58bc62b1cda71", + "blk.38.attn_norm.weight": "9f4803a7d03fd40fcb83d85f84eb1d5682ea4e5bb084f210c02850675d804c3d", + "blk.38.attn_output.weight": "77cb66007f1a41df7135d0e7f900ceb499c2f667dfc3f1a6ac01a3203bbd3ccf", + "blk.38.attn_q.weight": "d94a8b26cd375bf2bcaa76597e314aa8268ee50a479d00931e5e0e021feadb5d", + "blk.38.attn_v.weight": "660c907888bc5016dc69b7d35fe6f55c7ded697c93be0e2d332a2f17aff88758", + "blk.38.ffn_down.weight": "6f06173bae5b00ffaf88ef383619a8b9c6a8d0d5c6494695d17f6c1de1a68a13", + "blk.38.ffn_gate.weight": "89f99be149d03f116527bfcabe073c50001c874de40fb6e817f6619027f3cd05", + "blk.38.ffn_up.weight": "8d57557c8d5e2d2688b73f01dddf1ce8d5194990cda6358153320aea88aac7f8", + "blk.39.attn_k.weight": "21be09c988b46c8393e6c2ec9230f3b5136eb7607dd1953ba92d0811c2f0dd75", + "blk.39.attn_norm.weight": "ba7c1912dd1c4e2d16917201f62396fd0600e4a451137eaddff255548c209abd", + "blk.39.attn_output.weight": "acfaf4abb3fd27fd899b5563c3877f176b597d8f6cdb2f2fd3f3a0bd4da15ed6", + "blk.39.attn_q.weight": "e8adbc140d4c8f0db2a27ca584c5531d5b1e080555fe627e34d80d0814a92bed", + "blk.39.attn_v.weight": "92f96b0e1f724e73a0f90a76c145654418844c04a6d4b14c05eb5af8a62bf8dc", + "blk.39.ffn_down.weight": "4d9ee7c65fc16fe95d10c47b79ac6a525741947600a64b5fcea5d300a82c50de", + "blk.39.ffn_gate.weight": "7e18507989f39b32191133d2657c2ee3b74f42f070579204d727eb72215793d1", + "blk.39.ffn_up.weight": "22cda752269c9757ba918abede1df95bb0f83a5c772dea13c8deea3d5f2723d9", + "output_norm.weight": "2858cf0e39d32caf52b7861378ace076000241e147f10b9eb21d8a5cd149e3cb" +} \ No newline at end of file diff --git a/template/command-r.gotmpl b/template/command-r.gotmpl new file mode 100644 index 00000000..ea5d5528 --- /dev/null +++ b/template/command-r.gotmpl @@ -0,0 +1,67 @@ +{{- if or .Tools .System }}<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|> +{{- if .Tools }}# Safety Preamble +The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. + +# System Preamble +## Basic Rules +You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. + +{{ if .System }}# User Preamble +{{ .System }} +{{- end }} + +## Available Tools +Here is a list of tools that you have available to you: +{{- range .Tools }} + +```python +def {{ .Function.Name }}( +{{- range $name, $property := .Function.Parameters.Properties }}{{ $name }}: {{ $property.Type }}, {{ end }}) -> List[Dict]: + '''{{ .Function.Description }} + +{{- if .Function.Parameters.Properties }} + + Args: +{{- range $name, $property := .Function.Parameters.Properties }} + {{ $name }} ({{ $property.Type }}): {{ $property.Description }} +{{- end }} +{{- end }} + ''' + pass +``` +{{- end }} +{{- else if .System }}{{ .System }} +{{- end }}<|END_OF_TURN_TOKEN|> +{{- end }} +{{- range .Messages }} +{{- if eq .Role "system" }} +{{- continue }} +{{- end }}<|START_OF_TURN_TOKEN|> +{{- if eq .Role "user" }}<|USER_TOKEN|>{{ .Content }} +{{- if $.Tools }}<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example: +```json +[ + { + "tool_name": title of the tool in the specification, + "parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters + } +]``` +{{- end }} +{{- else if eq .Role "assistant" }}<|CHATBOT_TOKEN|> +{{- if .Content }}{{ .Content }} +{{- else if .ToolCalls }} +Action: ```json +[ +{{- range .ToolCalls }} + { + "tool_name": "{{ .Function.Name }}", + "parameters": {{ .Function.Arguments }} + } +{{- end }} +]``` +{{- end }} +{{- else if eq .Role "tool" }}<|SYSTEM_TOKEN|> +console_output: {{ .Content }} + +{{- end }}<|END_OF_TURN_TOKEN|> +{{- end }}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> diff --git a/template/command-r.json b/template/command-r.json new file mode 100644 index 00000000..4ae789b6 --- /dev/null +++ b/template/command-r.json @@ -0,0 +1,6 @@ +{ + "stop": [ + "<|START_OF_TURN_TOKEN|>", + "<|END_OF_TURN_TOKEN|>" + ] +} diff --git a/template/index.json b/template/index.json index 0ce6ac0f..7a27747c 100644 --- a/template/index.json +++ b/template/index.json @@ -138,5 +138,9 @@ { "template": "{% for message in messages %}{% if message['role'] == 'system' %}{% if message['content']%}{{'### System:\n' + message['content']+'\n\n'}}{% endif %}{% elif message['role'] == 'user' %}{{'### User:\n' + message['content']+'\n\n'}}{% elif message['role'] == 'assistant' %}{{'### Assistant:\n' + message['content']}}{% endif %}{% if loop.last and add_generation_prompt %}{{ '### Assistant:\n' }}{% endif %}{% endfor %}", "name": "solar-instruct" + }, + { + "template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif false == true %}{% set loop_messages = messages %}{% set system_message = 'You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% if system_message != false %}{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% elif message['role'] == 'assistant' %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}{% endif %}", + "name": "command-r" } ] diff --git a/template/testdata/command-r.gotmpl/system-user-assistant-user b/template/testdata/command-r.gotmpl/system-user-assistant-user new file mode 100644 index 00000000..83dea095 --- /dev/null +++ b/template/testdata/command-r.gotmpl/system-user-assistant-user @@ -0,0 +1 @@ +<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>You are a helpful assistant.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>I'm doing great. How can I help you today?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>I'd like to show off how chat templating works!<|END_OF_TURN_TOKEN|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> diff --git a/template/testdata/command-r.gotmpl/user b/template/testdata/command-r.gotmpl/user new file mode 100644 index 00000000..b148e2d6 --- /dev/null +++ b/template/testdata/command-r.gotmpl/user @@ -0,0 +1 @@ +<|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> diff --git a/template/testdata/command-r.gotmpl/user-assistant-user b/template/testdata/command-r.gotmpl/user-assistant-user new file mode 100644 index 00000000..ae76cb12 --- /dev/null +++ b/template/testdata/command-r.gotmpl/user-assistant-user @@ -0,0 +1 @@ +<|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>I'm doing great. How can I help you today?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>I'd like to show off how chat templating works!<|END_OF_TURN_TOKEN|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> From 42cf4db6017a949c2decd1bb9b6cac2468aceeaf Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 16 Jan 2025 00:14:04 -0800 Subject: [PATCH 12/68] parser: fix parsing Modelfiles with multiple FROM commands (#8449) --- parser/parser.go | 8 +++++++- parser/parser_test.go | 11 ++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/parser/parser.go b/parser/parser.go index d5df479a..6832351f 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -62,7 +62,13 @@ func (f Modelfile) CreateRequest(relativeDir string) (*api.CreateRequest, error) return nil, err } - req.Files = digestMap + if req.Files == nil { + req.Files = digestMap + } else { + for k, v := range digestMap { + req.Files[k] = v + } + } case "adapter": path, err := expandPath(c.Args, relativeDir) if err != nil { diff --git a/parser/parser_test.go b/parser/parser_test.go index 429bdc64..eb52ee67 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -793,15 +793,20 @@ func createBinFile(t *testing.T, kv map[string]any, ti []llm.Tensor) (string, st } func TestCreateRequestFiles(t *testing.T) { - name, digest := createBinFile(t, nil, nil) + n1, d1 := createBinFile(t, nil, nil) + n2, d2 := createBinFile(t, map[string]any{"foo": "bar"}, nil) cases := []struct { input string expected *api.CreateRequest }{ { - fmt.Sprintf("FROM %s", name), - &api.CreateRequest{Files: map[string]string{name: digest}}, + fmt.Sprintf("FROM %s", n1), + &api.CreateRequest{Files: map[string]string{n1: d1}}, + }, + { + fmt.Sprintf("FROM %s\nFROM %s", n1, n2), + &api.CreateRequest{Files: map[string]string{n1: d1, n2: d2}}, }, } From a420a453b4783841e3e79c248ef0fe9548df6914 Mon Sep 17 00:00:00 2001 From: Patrick Devine Date: Thu, 16 Jan 2025 01:14:04 -0800 Subject: [PATCH 13/68] fix default modelfile for create (#8452) --- cmd/cmd.go | 2 +- cmd/cmd_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index cfefa35c..17c60717 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -59,7 +59,7 @@ func getModelfileName(cmd *cobra.Command) (string, error) { _, err = os.Stat(absName) if err != nil { - return filename, err + return "", err } return absName, nil diff --git a/cmd/cmd_test.go b/cmd/cmd_test.go index 069428be..c8963280 100644 --- a/cmd/cmd_test.go +++ b/cmd/cmd_test.go @@ -279,7 +279,7 @@ func TestGetModelfileName(t *testing.T) { name: "no modelfile specified, no modelfile exists", modelfileName: "", fileExists: false, - expectedName: "Modelfile", + expectedName: "", expectedErr: os.ErrNotExist, }, { @@ -293,7 +293,7 @@ func TestGetModelfileName(t *testing.T) { name: "modelfile specified, no modelfile exists", modelfileName: "crazyfile", fileExists: false, - expectedName: "crazyfile", + expectedName: "", expectedErr: os.ErrNotExist, }, { From 021817e59ace5e351b35b2e6881f83a09f038546 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jannik=20Maierh=C3=B6fer?= <48529566+jannikmaierhoefer@users.noreply.github.com> Date: Fri, 17 Jan 2025 07:41:12 +0100 Subject: [PATCH 14/68] readme: add link to Langfuse (#8455) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 1aaae620..4b47ee74 100644 --- a/README.md +++ b/README.md @@ -539,4 +539,5 @@ See the [API documentation](./docs/api.md) for all endpoints. ### Observability - [OpenLIT](https://github.com/openlit/openlit) is an OpenTelemetry-native tool for monitoring Ollama Applications & GPUs using traces and metrics. -- [HoneyHive](https://docs.honeyhive.ai/integrations/ollama) is an AI observability and evaluation platform for AI agents. Use HoneyHive to evaluate agent performance, interrogate failures, and monitor quality in production. +- [HoneyHive](https://docs.honeyhive.ai/integrations/ollama) is an AI observability and evaluation platform for AI agents. Use HoneyHive to evaluate agent performance, interrogate failures, and monitor quality in production. +- [Langfuse](https://langfuse.com/docs/integrations/ollama) is an open source LLM observability platform that enables teams to collaboratively monitor, evaluate and debug AI applications. From 7bb356c6807c8dd2f06a28645cfac307e1e19a50 Mon Sep 17 00:00:00 2001 From: EndoTheDev Date: Mon, 20 Jan 2025 09:45:35 +0700 Subject: [PATCH 15/68] docs: update suspend header in gpu.md (#8487) --- docs/gpu.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/gpu.md b/docs/gpu.md index 691746d0..39933002 100644 --- a/docs/gpu.md +++ b/docs/gpu.md @@ -38,7 +38,7 @@ Numeric IDs may be used, however ordering may vary, so UUIDs are more reliable. You can discover the UUID of your GPUs by running `nvidia-smi -L` If you want to ignore the GPUs and force CPU usage, use an invalid GPU ID (e.g., "-1") -### Laptop Suspend Resume +### Linux Suspend Resume On linux, after a suspend/resume cycle, sometimes Ollama will fail to discover your NVIDIA GPU, and fallback to running on the CPU. You can workaround this From 294b6f5a220e8678c2b08fd2ab783a99e25c5215 Mon Sep 17 00:00:00 2001 From: frob Date: Tue, 21 Jan 2025 18:28:59 +0100 Subject: [PATCH 16/68] docs: remove tfs_z option from documentation (#8515) --- docs/modelfile.md | 1 - parser/parser_test.go | 1 - 2 files changed, 2 deletions(-) diff --git a/docs/modelfile.md b/docs/modelfile.md index b1c4e8a3..5fcde7ed 100644 --- a/docs/modelfile.md +++ b/docs/modelfile.md @@ -155,7 +155,6 @@ PARAMETER | temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 | | seed | Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) | int | seed 42 | | stop | Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate `stop` parameters in a modelfile. | string | stop "AI assistant:" | -| tfs_z | Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) | float | tfs_z 1 | | num_predict | Maximum number of tokens to predict when generating text. (Default: -1, infinite generation) | int | num_predict 42 | | top_k | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) | int | top_k 40 | | top_p | Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) | float | top_p 0.9 | diff --git a/parser/parser_test.go b/parser/parser_test.go index eb52ee67..94f338ed 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -490,7 +490,6 @@ func TestParseFileParameters(t *testing.T) { "top_k 1": {"top_k", "1"}, "top_p 1.0": {"top_p", "1.0"}, "min_p 0.05": {"min_p", "0.05"}, - "tfs_z 1.0": {"tfs_z", "1.0"}, "typical_p 1.0": {"typical_p", "1.0"}, "repeat_last_n 1": {"repeat_last_n", "1"}, "temperature 1.0": {"temperature", "1.0"}, From ca2f9843c8c71491d5abf626c73508e5a1685cea Mon Sep 17 00:00:00 2001 From: Daniel Jalkut Date: Thu, 23 Jan 2025 01:52:15 -0500 Subject: [PATCH 17/68] docs: remove reference to the deleted examples folder (#8524) --- docs/modelfile.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/modelfile.md b/docs/modelfile.md index 5fcde7ed..cc2115b3 100644 --- a/docs/modelfile.md +++ b/docs/modelfile.md @@ -67,8 +67,6 @@ To use this: 3. `ollama run choose-a-model-name` 4. Start using the model! -More examples are available in the [examples directory](../examples). - To view the Modelfile of a given model, use the `ollama show --modelfile` command. ```bash From 453e4d090bd00692c59b79a483c355ec1e57065c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C4=9Bj=20=C5=A0t=C3=A1gl?= Date: Sat, 25 Jan 2025 10:04:07 +0100 Subject: [PATCH 18/68] readme: add LlmTornado to community integrations (#8551) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 4b47ee74..e2ce87ba 100644 --- a/README.md +++ b/README.md @@ -481,6 +481,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [GoLamify](https://github.com/prasad89/golamify) - [Ollama for Haskell](https://github.com/tusharad/ollama-haskell) - [multi-llm-ts](https://github.com/nbonamy/multi-llm-ts) (A Typescript/JavaScript library allowing access to different LLM in unified API) +- [LlmTornado](https://github.com/lofcz/llmtornado) (C# library providing a unified interface for major FOSS & Commercial inference APIs) ### Mobile From 2ef3c803a151a0a9b1776c9ebe6a7e86b3971660 Mon Sep 17 00:00:00 2001 From: Xiaofu Huang Date: Mon, 27 Jan 2025 16:36:23 +0800 Subject: [PATCH 19/68] readme: add AI Toolkit for VSCode to community integrations (#8604) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index e2ce87ba..f864f99f 100644 --- a/README.md +++ b/README.md @@ -369,6 +369,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Minima](https://github.com/dmayboroda/minima) (RAG with on-premises or fully local workflow) - [aidful-ollama-model-delete](https://github.com/AidfulAI/aidful-ollama-model-delete) (User interface for simplified model cleanup) - [Perplexica](https://github.com/ItzCrazyKns/Perplexica) (An AI-powered search engine & an open-source alternative to Perplexity AI) +- [AI Toolkit for Visual Studio Code](https://aka.ms/ai-tooklit/ollama-docs) (Microsoft-official VSCode extension to chat, test, evaluate models with Ollama support, and use them in your AI applications.) ### Cloud From dcfb7a105c455ae8d44a06b3380731d8b1ffcc22 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 29 Jan 2025 15:03:38 -0800 Subject: [PATCH 20/68] next build (#8539) * add build to .dockerignore * test: only build one arch * add build to .gitignore * fix ccache path * filter amdgpu targets * only filter if autodetecting * Don't clobber gpu list for default runner This ensures the GPU specific environment variables are set properly * explicitly set CXX compiler for HIP * Update build_windows.ps1 This isn't complete, but is close. Dependencies are missing, and it only builds the "default" preset. * build: add ollama subdir * add .git to .dockerignore * docs: update development.md * update build_darwin.sh * remove unused scripts * llm: add cwd and build/lib/ollama to library paths * default DYLD_LIBRARY_PATH to LD_LIBRARY_PATH in runner on macOS * add additional cmake output vars for msvc * interim edits to make server detection logic work with dll directories like lib/ollama/cuda_v12 * remove unncessary filepath.Dir, cleanup * add hardware-specific directory to path * use absolute server path * build: linux arm * cmake install targets * remove unused files * ml: visit each library path once * build: skip cpu variants on arm * build: install cpu targets * build: fix workflow * shorter names * fix rocblas install * docs: clean up development.md * consistent build dir removal in development.md * silence -Wimplicit-function-declaration build warnings in ggml-cpu * update readme * update development readme * llm: update library lookup logic now that there is one runner (#8587) * tweak development.md * update docs * add windows cuda/rocm tests --------- Co-authored-by: jmorganca Co-authored-by: Daniel Hiltgen --- .dockerignore | 4 +- .gitattributes | 9 + .github/workflows/release.yaml | 929 ++++++------------ .github/workflows/test.yaml | 358 ++----- .gitignore | 5 +- CMakeLists.txt | 112 +++ CMakePresets.json | 110 +++ Dockerfile | 281 ++---- Makefile | 103 -- Makefile.sync | 56 ++ discover/amd_common.go | 13 +- discover/amd_linux.go | 6 +- discover/amd_windows.go | 10 +- discover/gpu.go | 88 +- discover/gpu_darwin.go | 3 - discover/path.go | 53 + discover/types.go | 3 +- docs/development.md | 205 ++-- envconfig/config.go | 9 - go.mod | 3 +- go.sum | 2 + llama/README.md | 127 +-- llama/amx.h | 34 - llama/ggml-blas.h | 51 - llama/ggml-cpu-aarch64.h | 34 - llama/ggml-cpu-traits.h | 64 -- llama/ggml-cuda/acc.cuh | 31 - llama/ggml-cuda/arange.cu | 60 -- llama/ggml-cuda/arange.cuh | 31 - llama/ggml-cuda/argmax.cuh | 29 - llama/ggml-cuda/argsort.cuh | 29 - llama/ggml-cuda/binbcast.cuh | 35 - llama/ggml-cuda/clamp.cu | 60 -- llama/ggml-cuda/clamp.cuh | 31 - llama/ggml-cuda/concat.cuh | 31 - llama/ggml-cuda/conv-transpose-1d.cuh | 31 - llama/ggml-cuda/convert.cuh | 39 - llama/ggml-cuda/count-equal.cuh | 31 - llama/ggml-cuda/cpy.cuh | 35 - llama/ggml-cuda/cross-entropy-loss.cuh | 33 - llama/ggml-cuda/diagmask.cuh | 31 - llama/ggml-cuda/fattn-tile-f16.cuh | 29 - llama/ggml-cuda/fattn-tile-f32.cuh | 29 - llama/ggml-cuda/fattn.cuh | 29 - llama/ggml-cuda/getrows.cuh | 31 - llama/ggml-cuda/im2col.cuh | 31 - llama/ggml-cuda/mmv.cuh | 38 - llama/ggml-cuda/mmvq.cuh | 35 - llama/ggml-cuda/norm.cuh | 33 - llama/ggml-cuda/opt-step-adamw.cuh | 31 - llama/ggml-cuda/out-prod.cuh | 29 - llama/ggml-cuda/pad.cuh | 32 - llama/ggml-cuda/pool2d.cuh | 31 - llama/ggml-cuda/quantize.cuh | 50 - llama/ggml-cuda/rope.cuh | 31 - llama/ggml-cuda/scale.cu | 57 -- llama/ggml-cuda/scale.cuh | 31 - llama/ggml-cuda/softmax.cuh | 31 - llama/ggml-cuda/sum.cuh | 31 - llama/ggml-cuda/sumrows.cu | 65 -- llama/ggml-cuda/sumrows.cuh | 31 - .../fattn-vec-f16-instance-hs128-f16-f16.cu | 31 - .../fattn-vec-f16-instance-hs128-f16-q4_0.cu | 31 - .../fattn-vec-f16-instance-hs128-f16-q4_1.cu | 31 - .../fattn-vec-f16-instance-hs128-f16-q5_0.cu | 31 - .../fattn-vec-f16-instance-hs128-f16-q5_1.cu | 31 - .../fattn-vec-f16-instance-hs128-f16-q8_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_0-f16.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_0-q4_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_0-q4_1.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_0-q5_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_0-q5_1.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_0-q8_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_1-f16.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_1-q4_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_1-q4_1.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_1-q5_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_1-q5_1.cu | 31 - .../fattn-vec-f16-instance-hs128-q4_1-q8_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_0-f16.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_0-q4_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_0-q4_1.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_0-q5_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_0-q5_1.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_0-q8_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_1-f16.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_1-q4_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_1-q4_1.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_1-q5_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_1-q5_1.cu | 31 - .../fattn-vec-f16-instance-hs128-q5_1-q8_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q8_0-f16.cu | 31 - .../fattn-vec-f16-instance-hs128-q8_0-q4_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q8_0-q4_1.cu | 31 - .../fattn-vec-f16-instance-hs128-q8_0-q5_0.cu | 31 - .../fattn-vec-f16-instance-hs128-q8_0-q5_1.cu | 31 - .../fattn-vec-f16-instance-hs128-q8_0-q8_0.cu | 31 - .../fattn-vec-f16-instance-hs256-f16-f16.cu | 31 - .../fattn-vec-f16-instance-hs64-f16-f16.cu | 31 - .../fattn-vec-f16-instance-hs64-f16-q4_0.cu | 31 - .../fattn-vec-f16-instance-hs64-f16-q4_1.cu | 31 - .../fattn-vec-f16-instance-hs64-f16-q5_0.cu | 31 - .../fattn-vec-f16-instance-hs64-f16-q5_1.cu | 31 - .../fattn-vec-f16-instance-hs64-f16-q8_0.cu | 31 - .../fattn-vec-f32-instance-hs128-f16-f16.cu | 31 - .../fattn-vec-f32-instance-hs128-f16-q4_0.cu | 31 - .../fattn-vec-f32-instance-hs128-f16-q4_1.cu | 31 - .../fattn-vec-f32-instance-hs128-f16-q5_0.cu | 31 - .../fattn-vec-f32-instance-hs128-f16-q5_1.cu | 31 - .../fattn-vec-f32-instance-hs128-f16-q8_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_0-f16.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_0-q4_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_0-q4_1.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_0-q5_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_0-q5_1.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_0-q8_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_1-f16.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_1-q4_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_1-q4_1.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_1-q5_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_1-q5_1.cu | 31 - .../fattn-vec-f32-instance-hs128-q4_1-q8_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_0-f16.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_0-q4_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_0-q4_1.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_0-q5_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_0-q5_1.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_0-q8_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_1-f16.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_1-q4_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_1-q4_1.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_1-q5_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_1-q5_1.cu | 31 - .../fattn-vec-f32-instance-hs128-q5_1-q8_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q8_0-f16.cu | 31 - .../fattn-vec-f32-instance-hs128-q8_0-q4_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q8_0-q4_1.cu | 31 - .../fattn-vec-f32-instance-hs128-q8_0-q5_0.cu | 31 - .../fattn-vec-f32-instance-hs128-q8_0-q5_1.cu | 31 - .../fattn-vec-f32-instance-hs128-q8_0-q8_0.cu | 31 - .../fattn-vec-f32-instance-hs256-f16-f16.cu | 31 - .../fattn-vec-f32-instance-hs64-f16-f16.cu | 31 - .../fattn-vec-f32-instance-hs64-f16-q4_0.cu | 31 - .../fattn-vec-f32-instance-hs64-f16-q4_1.cu | 31 - .../fattn-vec-f32-instance-hs64-f16-q5_0.cu | 31 - .../fattn-vec-f32-instance-hs64-f16-q5_1.cu | 31 - .../fattn-vec-f32-instance-hs64-f16-q8_0.cu | 31 - .../fattn-wmma-f16-instance-kqfloat-cpb16.cu | 36 - .../fattn-wmma-f16-instance-kqfloat-cpb32.cu | 35 - .../fattn-wmma-f16-instance-kqhalf-cpb16.cu | 36 - .../fattn-wmma-f16-instance-kqhalf-cpb32.cu | 36 - .../fattn-wmma-f16-instance-kqhalf-cpb8.cu | 34 - .../template-instances/mmq-instance-iq1_s.cu | 31 - .../template-instances/mmq-instance-iq2_s.cu | 31 - .../template-instances/mmq-instance-iq2_xs.cu | 31 - .../mmq-instance-iq2_xxs.cu | 31 - .../template-instances/mmq-instance-iq3_s.cu | 31 - .../mmq-instance-iq3_xxs.cu | 31 - .../template-instances/mmq-instance-iq4_nl.cu | 31 - .../template-instances/mmq-instance-iq4_xs.cu | 31 - .../template-instances/mmq-instance-q2_k.cu | 31 - .../template-instances/mmq-instance-q3_k.cu | 31 - .../template-instances/mmq-instance-q4_0.cu | 31 - .../template-instances/mmq-instance-q4_1.cu | 31 - .../template-instances/mmq-instance-q4_k.cu | 31 - .../template-instances/mmq-instance-q5_0.cu | 31 - .../template-instances/mmq-instance-q5_1.cu | 31 - .../template-instances/mmq-instance-q5_k.cu | 31 - .../template-instances/mmq-instance-q6_k.cu | 31 - .../template-instances/mmq-instance-q8_0.cu | 31 - llama/ggml-cuda/tsembd.cuh | 31 - llama/ggml-cuda/upscale.cuh | 31 - llama/ggml-cuda/vendors/cuda.h | 41 - llama/ggml-cuda/wkv6.cuh | 31 - llama/ggml-threading.cpp | 38 - llama/ggml-threading.h | 40 - llama/json-schema-to-grammar.h | 34 - llama/llama-cparams.cpp | 27 - llama/llama-cparams.h | 64 -- llama/llama-cpp.h | 56 -- llama/llama-quant.h | 27 - llama/llama.cpp/.rsync-filter | 22 + llama/llama.cpp/LICENSE | 21 + llama/{ => llama.cpp/common}/base64.hpp | 0 llama/{ => llama.cpp/common}/common.cpp | 26 - llama/llama.cpp/common/common.go | 6 + llama/{ => llama.cpp/common}/common.h | 26 - .../common}/json-schema-to-grammar.cpp | 26 - .../llama.cpp/common/json-schema-to-grammar.h | 8 + llama/{ => llama.cpp/common}/json.hpp | 0 llama/{ => llama.cpp/common}/log.cpp | 26 - llama/{ => llama.cpp/common}/log.h | 26 - llama/{ => llama.cpp/common}/sampling.cpp | 26 - llama/{ => llama.cpp/common}/sampling.h | 26 - llama/{ => llama.cpp/common}/stb_image.h | 0 llama/{ => llama.cpp/examples/llava}/clip.cpp | 26 - llama/{ => llama.cpp/examples/llava}/clip.h | 26 - .../{ => llama.cpp/examples/llava}/llava.cpp | 26 - llama/llama.cpp/examples/llava/llava.go | 6 + llama/{ => llama.cpp/examples/llava}/llava.h | 26 - llama/llama.cpp/include/llama-cpp.h | 30 + llama/{ => llama.cpp/include}/llama.h | 26 - llama/{ => llama.cpp/src}/llama-adapter.cpp | 26 - llama/{ => llama.cpp/src}/llama-adapter.h | 26 - llama/{ => llama.cpp/src}/llama-arch.cpp | 26 - llama/{ => llama.cpp/src}/llama-arch.h | 26 - llama/{ => llama.cpp/src}/llama-batch.cpp | 26 - llama/{ => llama.cpp/src}/llama-batch.h | 26 - llama/{ => llama.cpp/src}/llama-chat.cpp | 26 - llama/{ => llama.cpp/src}/llama-chat.h | 26 - llama/{ => llama.cpp/src}/llama-context.cpp | 26 - llama/{ => llama.cpp/src}/llama-context.h | 26 - llama/llama.cpp/src/llama-cparams.cpp | 1 + llama/llama.cpp/src/llama-cparams.h | 38 + llama/{ => llama.cpp/src}/llama-grammar.cpp | 26 - llama/{ => llama.cpp/src}/llama-grammar.h | 26 - llama/{ => llama.cpp/src}/llama-hparams.cpp | 26 - llama/{ => llama.cpp/src}/llama-hparams.h | 26 - llama/{ => llama.cpp/src}/llama-impl.cpp | 26 - llama/{ => llama.cpp/src}/llama-impl.h | 26 - llama/{ => llama.cpp/src}/llama-kv-cache.cpp | 26 - llama/{ => llama.cpp/src}/llama-kv-cache.h | 26 - llama/{ => llama.cpp/src}/llama-mmap.cpp | 26 - llama/{ => llama.cpp/src}/llama-mmap.h | 26 - .../src}/llama-model-loader.cpp | 26 - .../{ => llama.cpp/src}/llama-model-loader.h | 26 - llama/{ => llama.cpp/src}/llama-model.cpp | 26 - llama/{ => llama.cpp/src}/llama-model.h | 26 - llama/{ => llama.cpp/src}/llama-quant.cpp | 26 - llama/llama.cpp/src/llama-quant.h | 1 + llama/{ => llama.cpp/src}/llama-sampling.cpp | 26 - llama/{ => llama.cpp/src}/llama-sampling.h | 26 - llama/{ => llama.cpp/src}/llama-vocab.cpp | 26 - llama/{ => llama.cpp/src}/llama-vocab.h | 26 - llama/{ => llama.cpp/src}/llama.cpp | 26 - llama/llama.cpp/src/llama.go | 8 + llama/{ => llama.cpp/src}/unicode-data.cpp | 26 - llama/llama.cpp/src/unicode-data.h | 20 + llama/{ => llama.cpp/src}/unicode.cpp | 26 - llama/{ => llama.cpp/src}/unicode.h | 26 - llama/llama.go | 74 +- llama/mmq.h | 36 - llama/patches/0001-cuda.patch | 39 +- llama/patches/0006-conditional-fattn.patch | 4 +- ...rt.patch => 0007-add-mllama-support.patch} | 0 llama/patches/0007-blas.patch | 26 - ...or.patch => 0008-add-unpad-operator.patch} | 12 +- ... => 0009-fix-deepseek-deseret-regex.patch} | 0 ...tain-ordering-for-rules-for-grammar.patch} | 0 ...ing-arg-in-static-assert-on-windows.patch} | 0 .../patches/0011-relative-include-paths.patch | 51 - ...sure-KV-cache-is-fully-defragmented.patch} | 0 ...atch => 0013-re-enable-gpu-for-clip.patch} | 0 .../patches/0014-sort-devices-by-score.patch | 82 ++ ...target-ggml-cpu-for-all-cpu-variants.patch | 29 + llama/sgemm.h | 14 - llama/unicode-data.h | 46 - llm/server.go | 138 +-- macapp/forge.config.ts | 6 +- make/Makefile.cpu | 40 - make/Makefile.cuda_v11 | 13 - make/Makefile.cuda_v12 | 13 - make/Makefile.ollama | 19 - make/Makefile.rocm | 119 --- make/Makefile.sync | 250 ----- make/Makefile.test | 19 - make/common-defs.make | 91 -- make/cuda-v11-defs.make | 17 - make/cuda-v12-defs.make | 17 - make/cuda.make | 56 -- make/gpu.make | 89 -- make/rocm-defs.make | 9 - ml/backend/ggml/ggml/.rsync-filter | 22 + ml/backend/ggml/ggml/LICENSE | 21 + .../backend/ggml/ggml/include}/ggml-alloc.h | 26 - .../backend/ggml/ggml/include}/ggml-backend.h | 26 - ml/backend/ggml/ggml/include/ggml-blas.h | 25 + ml/backend/ggml/ggml/include/ggml-cann.h | 123 +++ .../backend/ggml/ggml/include}/ggml-cpp.h | 26 - .../backend/ggml/ggml/include}/ggml-cpu.h | 26 - .../backend/ggml/ggml/include}/ggml-cuda.h | 26 - ml/backend/ggml/ggml/include/ggml-kompute.h | 50 + .../backend/ggml/ggml/include}/ggml-metal.h | 26 - ml/backend/ggml/ggml/include/ggml-opencl.h | 26 + ml/backend/ggml/ggml/include/ggml-opt.h | 216 ++++ ml/backend/ggml/ggml/include/ggml-rpc.h | 28 + ml/backend/ggml/ggml/include/ggml-sycl.h | 49 + ml/backend/ggml/ggml/include/ggml-vulkan.h | 31 + .../backend/ggml/ggml/include}/ggml.h | 26 - ml/backend/ggml/ggml/src/CMakeLists.txt | 340 +++++++ .../backend/ggml/ggml/src}/ggml-alloc.c | 26 - .../ggml/ggml/src}/ggml-backend-impl.h | 26 - .../ggml/ggml/src}/ggml-backend-reg.cpp | 47 +- .../backend/ggml/ggml/src}/ggml-backend.cpp | 32 - .../ggml/ggml/src/ggml-blas/CMakeLists.txt | 87 ++ ml/backend/ggml/ggml/src/ggml-blas/blas.go | 10 + .../ggml/ggml/src/ggml-blas}/ggml-blas.cpp | 30 - .../backend/ggml/ggml/src}/ggml-common.h | 26 - .../ggml/ggml/src/ggml-cpu/CMakeLists.txt | 346 +++++++ .../ggml/ggml/src/ggml-cpu/amx}/amx.cpp | 26 - ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.h | 8 + .../ggml/ggml/src/ggml-cpu/amx/common.h | 91 ++ .../ggml/ggml/src/ggml-cpu/amx}/mmq.cpp | 26 - ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.h | 10 + .../ggml/ggml/src/ggml-cpu/cpu-feats-x86.cpp | 323 ++++++ ml/backend/ggml/ggml/src/ggml-cpu/cpu.go | 11 + .../ggml/src/ggml-cpu}/ggml-cpu-aarch64.cpp | 26 - .../ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.h | 8 + .../ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp | 55 ++ .../ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.h | 8 + .../ggml/ggml/src/ggml-cpu}/ggml-cpu-impl.h | 26 - .../ggml/ggml/src/ggml-cpu}/ggml-cpu-quants.c | 26 - .../ggml/ggml/src/ggml-cpu}/ggml-cpu-quants.h | 26 - .../ggml/src/ggml-cpu}/ggml-cpu-traits.cpp | 26 - .../ggml/ggml/src/ggml-cpu/ggml-cpu-traits.h | 38 + .../ggml/ggml/src/ggml-cpu}/ggml-cpu.c | 28 +- .../ggml/ggml/src/ggml-cpu}/ggml-cpu.cpp | 29 +- .../ggml/src/ggml-cpu/llamafile/llamafile.go | 5 + .../ggml/src/ggml-cpu/llamafile}/sgemm.cpp | 0 .../ggml/ggml/src/ggml-cpu}/llamafile/sgemm.h | 0 .../ggml/ggml/src/ggml-cuda/CMakeLists.txt | 152 +++ .../backend/ggml/ggml/src}/ggml-cuda/acc.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/acc.cuh | 5 + ml/backend/ggml/ggml/src/ggml-cuda/arange.cu | 34 + ml/backend/ggml/ggml/src/ggml-cuda/arange.cuh | 5 + .../ggml/ggml/src}/ggml-cuda/argmax.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/argmax.cuh | 3 + .../ggml/ggml/src}/ggml-cuda/argsort.cu | 26 - .../ggml/ggml/src/ggml-cuda/argsort.cuh | 3 + .../ggml/ggml/src}/ggml-cuda/binbcast.cu | 26 - .../ggml/ggml/src/ggml-cuda/binbcast.cuh | 9 + ml/backend/ggml/ggml/src/ggml-cuda/clamp.cu | 34 + ml/backend/ggml/ggml/src/ggml-cuda/clamp.cuh | 5 + .../ggml/ggml/src}/ggml-cuda/common.cuh | 26 - .../ggml/ggml/src}/ggml-cuda/concat.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/concat.cuh | 5 + .../ggml/src}/ggml-cuda/conv-transpose-1d.cu | 26 - .../ggml/src/ggml-cuda/conv-transpose-1d.cuh | 5 + .../ggml/ggml/src}/ggml-cuda/convert.cu | 26 - .../ggml/ggml/src/ggml-cuda/convert.cuh | 13 + .../ggml/ggml/src}/ggml-cuda/count-equal.cu | 26 - .../ggml/ggml/src/ggml-cuda/count-equal.cuh | 5 + .../backend/ggml/ggml/src}/ggml-cuda/cpy.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/cpy.cuh | 9 + .../ggml/src}/ggml-cuda/cross-entropy-loss.cu | 26 - .../ggml/src/ggml-cuda/cross-entropy-loss.cuh | 7 + .../ggml/ggml/src}/ggml-cuda/dequantize.cuh | 26 - .../ggml/ggml/src}/ggml-cuda/diagmask.cu | 26 - .../ggml/ggml/src/ggml-cuda/diagmask.cuh | 5 + .../ggml/ggml/src}/ggml-cuda/fattn-common.cuh | 26 - .../ggml/src}/ggml-cuda/fattn-tile-f16.cu | 26 - .../ggml/src/ggml-cuda/fattn-tile-f16.cuh | 3 + .../ggml/src}/ggml-cuda/fattn-tile-f32.cu | 26 - .../ggml/src/ggml-cuda/fattn-tile-f32.cuh | 3 + .../ggml/src}/ggml-cuda/fattn-vec-f16.cuh | 26 - .../ggml/src}/ggml-cuda/fattn-vec-f32.cuh | 26 - .../ggml/src}/ggml-cuda/fattn-wmma-f16.cuh | 26 - .../backend/ggml/ggml/src}/ggml-cuda/fattn.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/fattn.cuh | 3 + .../ggml/ggml/src}/ggml-cuda/getrows.cu | 26 - .../ggml/ggml/src/ggml-cuda/getrows.cuh | 5 + .../ggml/ggml/src}/ggml-cuda/ggml-cuda.cu | 31 +- .../ggml/ggml/src}/ggml-cuda/im2col.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/im2col.cuh | 5 + .../backend/ggml/ggml/src}/ggml-cuda/mma.cuh | 26 - .../backend/ggml/ggml/src}/ggml-cuda/mmq.cu | 26 - .../backend/ggml/ggml/src}/ggml-cuda/mmq.cuh | 26 - .../backend/ggml/ggml/src}/ggml-cuda/mmv.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/mmv.cuh | 12 + .../backend/ggml/ggml/src}/ggml-cuda/mmvq.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cuh | 9 + .../backend/ggml/ggml/src}/ggml-cuda/norm.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/norm.cuh | 7 + .../ggml/src}/ggml-cuda/opt-step-adamw.cu | 26 - .../ggml/src/ggml-cuda/opt-step-adamw.cuh | 5 + .../ggml/ggml/src}/ggml-cuda/out-prod.cu | 26 - .../ggml/ggml/src/ggml-cuda/out-prod.cuh | 3 + .../backend/ggml/ggml/src}/ggml-cuda/pad.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/pad.cuh | 6 + .../ggml/ggml/src}/ggml-cuda/pool2d.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/pool2d.cuh | 5 + .../ggml/ggml/src}/ggml-cuda/quantize.cu | 26 - .../ggml/ggml/src/ggml-cuda/quantize.cuh | 24 + .../backend/ggml/ggml/src}/ggml-cuda/rope.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/rope.cuh | 5 + ml/backend/ggml/ggml/src/ggml-cuda/scale.cu | 31 + ml/backend/ggml/ggml/src/ggml-cuda/scale.cuh | 5 + .../ggml/ggml/src}/ggml-cuda/softmax.cu | 26 - .../ggml/ggml/src/ggml-cuda/softmax.cuh | 5 + .../backend/ggml/ggml/src}/ggml-cuda/sum.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/sum.cuh | 5 + ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cu | 39 + .../ggml/ggml/src/ggml-cuda/sumrows.cuh | 5 + .../fattn-vec-f16-instance-hs128-f16-f16.cu | 5 + .../fattn-vec-f16-instance-hs128-f16-q4_0.cu | 5 + .../fattn-vec-f16-instance-hs128-f16-q4_1.cu | 5 + .../fattn-vec-f16-instance-hs128-f16-q5_0.cu | 5 + .../fattn-vec-f16-instance-hs128-f16-q5_1.cu | 5 + .../fattn-vec-f16-instance-hs128-f16-q8_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_0-f16.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_0-q4_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_0-q4_1.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_0-q5_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_0-q5_1.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_0-q8_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_1-f16.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_1-q4_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_1-q4_1.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_1-q5_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_1-q5_1.cu | 5 + .../fattn-vec-f16-instance-hs128-q4_1-q8_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_0-f16.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_0-q4_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_0-q4_1.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_0-q5_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_0-q5_1.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_0-q8_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_1-f16.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_1-q4_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_1-q4_1.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_1-q5_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_1-q5_1.cu | 5 + .../fattn-vec-f16-instance-hs128-q5_1-q8_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q8_0-f16.cu | 5 + .../fattn-vec-f16-instance-hs128-q8_0-q4_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q8_0-q4_1.cu | 5 + .../fattn-vec-f16-instance-hs128-q8_0-q5_0.cu | 5 + .../fattn-vec-f16-instance-hs128-q8_0-q5_1.cu | 5 + .../fattn-vec-f16-instance-hs128-q8_0-q8_0.cu | 5 + .../fattn-vec-f16-instance-hs256-f16-f16.cu | 5 + .../fattn-vec-f16-instance-hs64-f16-f16.cu | 5 + .../fattn-vec-f16-instance-hs64-f16-q4_0.cu | 5 + .../fattn-vec-f16-instance-hs64-f16-q4_1.cu | 5 + .../fattn-vec-f16-instance-hs64-f16-q5_0.cu | 5 + .../fattn-vec-f16-instance-hs64-f16-q5_1.cu | 5 + .../fattn-vec-f16-instance-hs64-f16-q8_0.cu | 5 + .../fattn-vec-f32-instance-hs128-f16-f16.cu | 5 + .../fattn-vec-f32-instance-hs128-f16-q4_0.cu | 5 + .../fattn-vec-f32-instance-hs128-f16-q4_1.cu | 5 + .../fattn-vec-f32-instance-hs128-f16-q5_0.cu | 5 + .../fattn-vec-f32-instance-hs128-f16-q5_1.cu | 5 + .../fattn-vec-f32-instance-hs128-f16-q8_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_0-f16.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_0-q4_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_0-q4_1.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_0-q5_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_0-q5_1.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_0-q8_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_1-f16.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_1-q4_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_1-q4_1.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_1-q5_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_1-q5_1.cu | 5 + .../fattn-vec-f32-instance-hs128-q4_1-q8_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_0-f16.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_0-q4_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_0-q4_1.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_0-q5_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_0-q5_1.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_0-q8_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_1-f16.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_1-q4_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_1-q4_1.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_1-q5_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_1-q5_1.cu | 5 + .../fattn-vec-f32-instance-hs128-q5_1-q8_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q8_0-f16.cu | 5 + .../fattn-vec-f32-instance-hs128-q8_0-q4_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q8_0-q4_1.cu | 5 + .../fattn-vec-f32-instance-hs128-q8_0-q5_0.cu | 5 + .../fattn-vec-f32-instance-hs128-q8_0-q5_1.cu | 5 + .../fattn-vec-f32-instance-hs128-q8_0-q8_0.cu | 5 + .../fattn-vec-f32-instance-hs256-f16-f16.cu | 5 + .../fattn-vec-f32-instance-hs64-f16-f16.cu | 5 + .../fattn-vec-f32-instance-hs64-f16-q4_0.cu | 5 + .../fattn-vec-f32-instance-hs64-f16-q4_1.cu | 5 + .../fattn-vec-f32-instance-hs64-f16-q5_0.cu | 5 + .../fattn-vec-f32-instance-hs64-f16-q5_1.cu | 5 + .../fattn-vec-f32-instance-hs64-f16-q8_0.cu | 5 + .../fattn-wmma-f16-instance-kqfloat-cpb16.cu | 10 + .../fattn-wmma-f16-instance-kqfloat-cpb32.cu | 9 + .../fattn-wmma-f16-instance-kqhalf-cpb16.cu | 10 + .../fattn-wmma-f16-instance-kqhalf-cpb32.cu | 10 + .../fattn-wmma-f16-instance-kqhalf-cpb8.cu | 8 + .../template-instances/generate_cu_files.py | 77 ++ .../template-instances/mmq-instance-iq1_s.cu | 5 + .../template-instances/mmq-instance-iq2_s.cu | 5 + .../template-instances/mmq-instance-iq2_xs.cu | 5 + .../mmq-instance-iq2_xxs.cu | 5 + .../template-instances/mmq-instance-iq3_s.cu | 5 + .../mmq-instance-iq3_xxs.cu | 5 + .../template-instances/mmq-instance-iq4_nl.cu | 5 + .../template-instances/mmq-instance-iq4_xs.cu | 5 + .../template-instances/mmq-instance-q2_k.cu | 5 + .../template-instances/mmq-instance-q3_k.cu | 5 + .../template-instances/mmq-instance-q4_0.cu | 5 + .../template-instances/mmq-instance-q4_1.cu | 5 + .../template-instances/mmq-instance-q4_k.cu | 5 + .../template-instances/mmq-instance-q5_0.cu | 5 + .../template-instances/mmq-instance-q5_1.cu | 5 + .../template-instances/mmq-instance-q5_k.cu | 5 + .../template-instances/mmq-instance-q6_k.cu | 5 + .../template-instances/mmq-instance-q8_0.cu | 5 + .../ggml/ggml/src}/ggml-cuda/tsembd.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/tsembd.cuh | 5 + .../backend/ggml/ggml/src}/ggml-cuda/unary.cu | 26 - .../ggml/ggml/src}/ggml-cuda/unary.cuh | 26 - .../ggml/ggml/src}/ggml-cuda/upscale.cu | 26 - .../ggml/ggml/src/ggml-cuda/upscale.cuh | 5 + .../ggml/ggml/src}/ggml-cuda/vecdotq.cuh | 26 - .../ggml/ggml/src/ggml-cuda/vendors/cuda.h | 14 + .../ggml/ggml/src}/ggml-cuda/vendors/hip.h | 26 - .../ggml/ggml/src}/ggml-cuda/vendors/musa.h | 26 - .../backend/ggml/ggml/src}/ggml-cuda/wkv6.cu | 26 - ml/backend/ggml/ggml/src/ggml-cuda/wkv6.cuh | 5 + .../ggml/ggml/src/ggml-hip/CMakeLists.txt | 104 ++ .../backend/ggml/ggml/src}/ggml-impl.h | 26 - .../ggml/ggml/src/ggml-metal/CMakeLists.txt | 121 +++ .../src/ggml-metal}/ggml-metal-embed.metal | 79 +- .../ggml/src/ggml-metal/ggml-metal-embed.s | 2 +- .../ggml/src/ggml-metal}/ggml-metal-impl.h | 26 - .../ggml/ggml/src/ggml-metal/ggml-metal.m | 27 +- .../ggml/src/ggml-metal}/ggml-metal.metal | 26 - ml/backend/ggml/ggml/src/ggml-metal/metal.go | 9 + ml/backend/ggml/ggml/src/ggml-opt.cpp | 854 ++++++++++++++++ .../backend/ggml/ggml/src}/ggml-quants.c | 28 +- .../backend/ggml/ggml/src}/ggml-quants.h | 26 - ml/backend/ggml/ggml/src/ggml-threading.cpp | 12 + ml/backend/ggml/ggml/src/ggml-threading.h | 14 + {llama => ml/backend/ggml/ggml/src}/ggml.c | 26 - ml/backend/ggml/ggml/src/ggml.go | 81 ++ ml/backend/ggml/ggml/src/ggml_darwin_arm64.go | 10 + ml/backend/ggml/ggml_debug.go | 6 + runners/common.go | 207 ---- scripts/build.sh | 21 - scripts/build_darwin.sh | 127 ++- scripts/build_linux.sh | 4 +- scripts/build_windows.ps1 | 51 +- scripts/fast.sh | 20 - scripts/publish.sh | 25 - scripts/rh_linux_deps.sh | 78 -- server/routes.go | 9 - 542 files changed, 5796 insertions(+), 11469 deletions(-) create mode 100644 CMakeLists.txt create mode 100644 CMakePresets.json delete mode 100644 Makefile create mode 100644 Makefile.sync create mode 100644 discover/path.go delete mode 100644 llama/amx.h delete mode 100644 llama/ggml-blas.h delete mode 100644 llama/ggml-cpu-aarch64.h delete mode 100644 llama/ggml-cpu-traits.h delete mode 100644 llama/ggml-cuda/acc.cuh delete mode 100644 llama/ggml-cuda/arange.cu delete mode 100644 llama/ggml-cuda/arange.cuh delete mode 100644 llama/ggml-cuda/argmax.cuh delete mode 100644 llama/ggml-cuda/argsort.cuh delete mode 100644 llama/ggml-cuda/binbcast.cuh delete mode 100644 llama/ggml-cuda/clamp.cu delete mode 100644 llama/ggml-cuda/clamp.cuh delete mode 100644 llama/ggml-cuda/concat.cuh delete mode 100644 llama/ggml-cuda/conv-transpose-1d.cuh delete mode 100644 llama/ggml-cuda/convert.cuh delete mode 100644 llama/ggml-cuda/count-equal.cuh delete mode 100644 llama/ggml-cuda/cpy.cuh delete mode 100644 llama/ggml-cuda/cross-entropy-loss.cuh delete mode 100644 llama/ggml-cuda/diagmask.cuh delete mode 100644 llama/ggml-cuda/fattn-tile-f16.cuh delete mode 100644 llama/ggml-cuda/fattn-tile-f32.cuh delete mode 100644 llama/ggml-cuda/fattn.cuh delete mode 100644 llama/ggml-cuda/getrows.cuh delete mode 100644 llama/ggml-cuda/im2col.cuh delete mode 100644 llama/ggml-cuda/mmv.cuh delete mode 100644 llama/ggml-cuda/mmvq.cuh delete mode 100644 llama/ggml-cuda/norm.cuh delete mode 100644 llama/ggml-cuda/opt-step-adamw.cuh delete mode 100644 llama/ggml-cuda/out-prod.cuh delete mode 100644 llama/ggml-cuda/pad.cuh delete mode 100644 llama/ggml-cuda/pool2d.cuh delete mode 100644 llama/ggml-cuda/quantize.cuh delete mode 100644 llama/ggml-cuda/rope.cuh delete mode 100644 llama/ggml-cuda/scale.cu delete mode 100644 llama/ggml-cuda/scale.cuh delete mode 100644 llama/ggml-cuda/softmax.cuh delete mode 100644 llama/ggml-cuda/sum.cuh delete mode 100644 llama/ggml-cuda/sumrows.cu delete mode 100644 llama/ggml-cuda/sumrows.cuh delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu delete mode 100644 llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-iq1_s.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-iq2_s.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-iq3_s.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-q2_k.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-q3_k.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-q4_0.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-q4_1.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-q4_k.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-q5_0.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-q5_1.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-q5_k.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-q6_k.cu delete mode 100644 llama/ggml-cuda/template-instances/mmq-instance-q8_0.cu delete mode 100644 llama/ggml-cuda/tsembd.cuh delete mode 100644 llama/ggml-cuda/upscale.cuh delete mode 100644 llama/ggml-cuda/vendors/cuda.h delete mode 100644 llama/ggml-cuda/wkv6.cuh delete mode 100644 llama/ggml-threading.cpp delete mode 100644 llama/ggml-threading.h delete mode 100644 llama/json-schema-to-grammar.h delete mode 100644 llama/llama-cparams.cpp delete mode 100644 llama/llama-cparams.h delete mode 100644 llama/llama-cpp.h delete mode 100644 llama/llama-quant.h create mode 100644 llama/llama.cpp/.rsync-filter create mode 100644 llama/llama.cpp/LICENSE rename llama/{ => llama.cpp/common}/base64.hpp (100%) rename llama/{ => llama.cpp/common}/common.cpp (98%) create mode 100644 llama/llama.cpp/common/common.go rename llama/{ => llama.cpp/common}/common.h (95%) rename llama/{ => llama.cpp/common}/json-schema-to-grammar.cpp (97%) create mode 100644 llama/llama.cpp/common/json-schema-to-grammar.h rename llama/{ => llama.cpp/common}/json.hpp (100%) rename llama/{ => llama.cpp/common}/log.cpp (89%) rename llama/{ => llama.cpp/common}/log.h (77%) rename llama/{ => llama.cpp/common}/sampling.cpp (93%) rename llama/{ => llama.cpp/common}/sampling.h (78%) rename llama/{ => llama.cpp/common}/stb_image.h (100%) rename llama/{ => llama.cpp/examples/llava}/clip.cpp (98%) rename llama/{ => llama.cpp/examples/llava}/clip.h (74%) rename llama/{ => llama.cpp/examples/llava}/llava.cpp (95%) create mode 100644 llama/llama.cpp/examples/llava/llava.go rename llama/{ => llama.cpp/examples/llava}/llava.h (59%) create mode 100644 llama/llama.cpp/include/llama-cpp.h rename llama/{ => llama.cpp/include}/llama.h (98%) rename llama/{ => llama.cpp/src}/llama-adapter.cpp (90%) rename llama/{ => llama.cpp/src}/llama-adapter.h (55%) rename llama/{ => llama.cpp/src}/llama-arch.cpp (98%) rename llama/{ => llama.cpp/src}/llama-arch.h (89%) rename llama/{ => llama.cpp/src}/llama-batch.cpp (91%) rename llama/{ => llama.cpp/src}/llama-batch.h (67%) rename llama/{ => llama.cpp/src}/llama-chat.cpp (95%) rename llama/{ => llama.cpp/src}/llama-chat.h (54%) rename llama/{ => llama.cpp/src}/llama-context.cpp (98%) rename llama/{ => llama.cpp/src}/llama-context.h (80%) create mode 100644 llama/llama.cpp/src/llama-cparams.cpp create mode 100644 llama/llama.cpp/src/llama-cparams.h rename llama/{ => llama.cpp/src}/llama-grammar.cpp (97%) rename llama/{ => llama.cpp/src}/llama-grammar.h (78%) rename llama/{ => llama.cpp/src}/llama-hparams.cpp (61%) rename llama/{ => llama.cpp/src}/llama-hparams.h (78%) rename llama/{ => llama.cpp/src}/llama-impl.cpp (82%) rename llama/{ => llama.cpp/src}/llama-impl.h (58%) rename llama/{ => llama.cpp/src}/llama-kv-cache.cpp (95%) rename llama/{ => llama.cpp/src}/llama-kv-cache.h (84%) rename llama/{ => llama.cpp/src}/llama-mmap.cpp (93%) rename llama/{ => llama.cpp/src}/llama-mmap.h (52%) rename llama/{ => llama.cpp/src}/llama-model-loader.cpp (97%) rename llama/{ => llama.cpp/src}/llama-model-loader.h (81%) rename llama/{ => llama.cpp/src}/llama-model.cpp (98%) rename llama/{ => llama.cpp/src}/llama-model.h (91%) rename llama/{ => llama.cpp/src}/llama-quant.cpp (97%) create mode 100644 llama/llama.cpp/src/llama-quant.h rename llama/{ => llama.cpp/src}/llama-sampling.cpp (98%) rename llama/{ => llama.cpp/src}/llama-sampling.h (54%) rename llama/{ => llama.cpp/src}/llama-vocab.cpp (98%) rename llama/{ => llama.cpp/src}/llama-vocab.h (84%) rename llama/{ => llama.cpp/src}/llama.cpp (99%) create mode 100644 llama/llama.cpp/src/llama.go rename llama/{ => llama.cpp/src}/unicode-data.cpp (99%) create mode 100644 llama/llama.cpp/src/unicode-data.h rename llama/{ => llama.cpp/src}/unicode.cpp (96%) rename llama/{ => llama.cpp/src}/unicode.h (63%) delete mode 100644 llama/mmq.h rename llama/patches/{0008-add-mllama-support.patch => 0007-add-mllama-support.patch} (100%) delete mode 100644 llama/patches/0007-blas.patch rename llama/patches/{0009-add-unpad-operator.patch => 0008-add-unpad-operator.patch} (97%) rename llama/patches/{0010-fix-deepseek-deseret-regex.patch => 0009-fix-deepseek-deseret-regex.patch} (100%) rename llama/patches/{0012-Maintain-ordering-for-rules-for-grammar.patch => 0010-Maintain-ordering-for-rules-for-grammar.patch} (100%) rename llama/patches/{0013-fix-missing-arg-in-static-assert-on-windows.patch => 0011-fix-missing-arg-in-static-assert-on-windows.patch} (100%) delete mode 100644 llama/patches/0011-relative-include-paths.patch rename llama/patches/{0014-llama-Ensure-KV-cache-is-fully-defragmented.patch => 0012-llama-Ensure-KV-cache-is-fully-defragmented.patch} (100%) rename llama/patches/{0015-re-enable-gpu-for-clip.patch => 0013-re-enable-gpu-for-clip.patch} (100%) create mode 100644 llama/patches/0014-sort-devices-by-score.patch create mode 100644 llama/patches/0015-add-phony-target-ggml-cpu-for-all-cpu-variants.patch delete mode 100644 llama/sgemm.h delete mode 100644 llama/unicode-data.h delete mode 100644 make/Makefile.cpu delete mode 100644 make/Makefile.cuda_v11 delete mode 100644 make/Makefile.cuda_v12 delete mode 100644 make/Makefile.ollama delete mode 100644 make/Makefile.rocm delete mode 100644 make/Makefile.sync delete mode 100644 make/Makefile.test delete mode 100644 make/common-defs.make delete mode 100644 make/cuda-v11-defs.make delete mode 100644 make/cuda-v12-defs.make delete mode 100644 make/cuda.make delete mode 100644 make/gpu.make delete mode 100644 make/rocm-defs.make create mode 100644 ml/backend/ggml/ggml/.rsync-filter create mode 100644 ml/backend/ggml/ggml/LICENSE rename {llama => ml/backend/ggml/ggml/include}/ggml-alloc.h (70%) rename {llama => ml/backend/ggml/ggml/include}/ggml-backend.h (94%) create mode 100644 ml/backend/ggml/ggml/include/ggml-blas.h create mode 100644 ml/backend/ggml/ggml/include/ggml-cann.h rename {llama => ml/backend/ggml/ggml/include}/ggml-cpp.h (56%) rename {llama => ml/backend/ggml/ggml/include}/ggml-cpu.h (84%) rename {llama => ml/backend/ggml/ggml/include}/ggml-cuda.h (56%) create mode 100644 ml/backend/ggml/ggml/include/ggml-kompute.h rename {llama => ml/backend/ggml/ggml/include}/ggml-metal.h (66%) create mode 100644 ml/backend/ggml/ggml/include/ggml-opencl.h create mode 100644 ml/backend/ggml/ggml/include/ggml-opt.h create mode 100644 ml/backend/ggml/ggml/include/ggml-rpc.h create mode 100644 ml/backend/ggml/ggml/include/ggml-sycl.h create mode 100644 ml/backend/ggml/ggml/include/ggml-vulkan.h rename {llama => ml/backend/ggml/ggml/include}/ggml.h (98%) create mode 100644 ml/backend/ggml/ggml/src/CMakeLists.txt rename {llama => ml/backend/ggml/ggml/src}/ggml-alloc.c (96%) rename {llama => ml/backend/ggml/ggml/src}/ggml-backend-impl.h (90%) rename {llama => ml/backend/ggml/ggml/src}/ggml-backend-reg.cpp (90%) rename {llama => ml/backend/ggml/ggml/src}/ggml-backend.cpp (98%) create mode 100644 ml/backend/ggml/ggml/src/ggml-blas/CMakeLists.txt create mode 100644 ml/backend/ggml/ggml/src/ggml-blas/blas.go rename {llama => ml/backend/ggml/ggml/src/ggml-blas}/ggml-blas.cpp (92%) rename {llama => ml/backend/ggml/ggml/src}/ggml-common.h (99%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/CMakeLists.txt rename {llama => ml/backend/ggml/ggml/src/ggml-cpu/amx}/amx.cpp (86%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.h create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/amx/common.h rename {llama => ml/backend/ggml/ggml/src/ggml-cpu/amx}/mmq.cpp (98%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.h create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/cpu-feats-x86.cpp create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/cpu.go rename {llama => ml/backend/ggml/ggml/src/ggml-cpu}/ggml-cpu-aarch64.cpp (99%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.h create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.h rename {llama => ml/backend/ggml/ggml/src/ggml-cpu}/ggml-cpu-impl.h (87%) rename {llama => ml/backend/ggml/ggml/src/ggml-cpu}/ggml-cpu-quants.c (99%) rename {llama => ml/backend/ggml/ggml/src/ggml-cpu}/ggml-cpu-quants.h (80%) rename {llama => ml/backend/ggml/ggml/src/ggml-cpu}/ggml-cpu-traits.cpp (50%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.h rename {llama => ml/backend/ggml/ggml/src/ggml-cpu}/ggml-cpu.c (99%) rename {llama => ml/backend/ggml/ggml/src/ggml-cpu}/ggml-cpu.cpp (94%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go rename {llama => ml/backend/ggml/ggml/src/ggml-cpu/llamafile}/sgemm.cpp (100%) rename {llama => ml/backend/ggml/ggml/src/ggml-cpu}/llamafile/sgemm.h (100%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/acc.cu (61%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/acc.cuh create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/arange.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/arange.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/argmax.cu (69%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/argmax.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/argsort.cu (73%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/argsort.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/binbcast.cu (91%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/binbcast.cuh create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/clamp.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/clamp.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/common.cuh (94%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/concat.cu (85%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/concat.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/conv-transpose-1d.cu (72%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/conv-transpose-1d.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/convert.cu (95%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/convert.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/count-equal.cu (62%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/count-equal.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/cpy.cu (94%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/cpy.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/cross-entropy-loss.cu (82%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/dequantize.cuh (68%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/diagmask.cu (58%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/diagmask.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/fattn-common.cuh (95%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/fattn-tile-f16.cu (91%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/fattn-tile-f32.cu (91%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/fattn-vec-f16.cuh (93%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/fattn-vec-f32.cuh (92%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/fattn-wmma-f16.cuh (94%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/fattn.cu (92%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/fattn.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/getrows.cu (84%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/getrows.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/ggml-cuda.cu (98%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/im2col.cu (78%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/im2col.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/mma.cuh (86%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/mmq.cu (80%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/mmq.cuh (98%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/mmv.cu (89%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/mmv.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/mmvq.cu (93%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/norm.cu (85%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/norm.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/opt-step-adamw.cu (70%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/opt-step-adamw.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/out-prod.cu (57%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/out-prod.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/pad.cu (74%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/pad.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/pool2d.cu (72%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/pool2d.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/quantize.cu (81%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/quantize.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/rope.cu (94%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/rope.cuh create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/scale.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/scale.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/softmax.cu (86%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/softmax.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/sum.cu (54%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/sum.cuh create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cuh create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu create mode 100755 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/generate_cu_files.py create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/tsembd.cu (59%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/tsembd.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/unary.cu (92%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/unary.cuh (58%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/upscale.cu (63%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/upscale.cuh rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/vecdotq.cuh (96%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/vendors/cuda.h rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/vendors/hip.h (85%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/vendors/musa.h (83%) rename {llama => ml/backend/ggml/ggml/src}/ggml-cuda/wkv6.cu (71%) create mode 100644 ml/backend/ggml/ggml/src/ggml-cuda/wkv6.cuh create mode 100644 ml/backend/ggml/ggml/src/ggml-hip/CMakeLists.txt rename {llama => ml/backend/ggml/ggml/src}/ggml-impl.h (93%) create mode 100644 ml/backend/ggml/ggml/src/ggml-metal/CMakeLists.txt rename {llama => ml/backend/ggml/ggml/src/ggml-metal}/ggml-metal-embed.metal (99%) rename llama/ggml-metal-embed_darwin_arm64.s => ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.s (87%) rename {llama => ml/backend/ggml/ggml/src/ggml-metal}/ggml-metal-impl.h (81%) rename llama/ggml-metal_darwin_arm64.m => ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m (99%) rename {llama => ml/backend/ggml/ggml/src/ggml-metal}/ggml-metal.metal (99%) create mode 100644 ml/backend/ggml/ggml/src/ggml-metal/metal.go create mode 100644 ml/backend/ggml/ggml/src/ggml-opt.cpp rename {llama => ml/backend/ggml/ggml/src}/ggml-quants.c (99%) rename {llama => ml/backend/ggml/ggml/src}/ggml-quants.h (87%) create mode 100644 ml/backend/ggml/ggml/src/ggml-threading.cpp create mode 100644 ml/backend/ggml/ggml/src/ggml-threading.h rename {llama => ml/backend/ggml/ggml/src}/ggml.c (99%) create mode 100644 ml/backend/ggml/ggml/src/ggml.go create mode 100644 ml/backend/ggml/ggml/src/ggml_darwin_arm64.go create mode 100644 ml/backend/ggml/ggml_debug.go delete mode 100644 runners/common.go delete mode 100644 scripts/build.sh delete mode 100755 scripts/fast.sh delete mode 100755 scripts/publish.sh delete mode 100644 scripts/rh_linux_deps.sh diff --git a/.dockerignore b/.dockerignore index 76704c36..02d796fe 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,7 +3,9 @@ ollama app macapp dist +build .env .cache test_data -llama/build +.git + diff --git a/.gitattributes b/.gitattributes index 51635caa..4bcd95b0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -7,5 +7,14 @@ llama/**/*.cuh linguist-vendored llama/**/*.m linguist-vendored llama/**/*.metal linguist-vendored +ml/backend/**/*.c linguist-vendored +ml/backend/**/*.h linguist-vendored +ml/backend/**/*.cpp linguist-vendored +ml/backend/**/*.hpp linguist-vendored +ml/backend/**/*.cu linguist-vendored +ml/backend/**/*.cuh linguist-vendored +ml/backend/**/*.m linguist-vendored +ml/backend/**/*.metal linguist-vendored + * text=auto *.go text eol=lf diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 50177050..f9ab533a 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,31 +1,62 @@ name: release -env: - ROCM_WINDOWS_URL: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe - MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2024-07-27/msys2-x86_64-20240727.exe - on: push: tags: - 'v*' jobs: - # Full build of the Mac assets - build-darwin: - runs-on: macos-13 + setup-environment: + runs-on: ubuntu-latest environment: release + outputs: + GOFLAGS: ${{ steps.goflags.outputs.GOFLAGS }} steps: - uses: actions/checkout@v4 - - name: Set Version - shell: bash + - name: Set environment + id: goflags run: | - echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV - echo "RELEASE_VERSION=$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)" >> $GITHUB_ENV - - name: key + echo GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${GITHUB_REF_NAME#v}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" >>$GITHUB_OUTPUT + + darwin-build: + runs-on: macos-13 + environment: release + needs: setup-environment + strategy: + matrix: + os: [darwin] + arch: [amd64, arm64] + env: + GOFLAGS: ${{ needs.setup-environment.outputs.GOFLAGS }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - run: | + go build -o dist/ . env: - MACOS_SIGNING_KEY: ${{ secrets.MACOS_SIGNING_KEY }} - MACOS_SIGNING_KEY_PASSWORD: ${{ secrets.MACOS_SIGNING_KEY_PASSWORD }} + GOOS: ${{ matrix.os }} + GOARCH: ${{ matrix.arch }} + CGO_ENABLED: 1 + CGO_CPPFLAGS: '-mmacosx-version-min=11.3' + - if: matrix.arch == 'amd64' run: | + cmake --preset CPU -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 + cmake --build --parallel --preset CPU + cmake --install build --component CPU --strip --parallel 8 + - uses: actions/upload-artifact@v4 + with: + name: build-${{ matrix.os }}-${{ matrix.arch }} + path: dist/* + + darwin-sign: + runs-on: macos-13 + environment: release + needs: darwin-build + steps: + - uses: actions/checkout@v4 + - run: | echo $MACOS_SIGNING_KEY | base64 --decode > certificate.p12 security create-keychain -p password build.keychain security default-keychain -s build.keychain @@ -33,11 +64,20 @@ jobs: security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain security set-keychain-settings -lut 3600 build.keychain - - uses: actions/setup-go@v5 + env: + MACOS_SIGNING_KEY: ${{ secrets.MACOS_SIGNING_KEY }} + MACOS_SIGNING_KEY_PASSWORD: ${{ secrets.MACOS_SIGNING_KEY_PASSWORD }} + - uses: actions/download-artifact@v4 with: - go-version-file: go.mod - cache: true - - name: Build Darwin + name: build-darwin-amd64 + path: dist/darwin-amd64 + - uses: actions/download-artifact@v4 + with: + name: build-darwin-arm64 + path: dist/darwin-arm64 + - run: | + export VERSION=${GITHUB_REF_NAME#v} + ./scripts/build_darwin.sh macapp sign env: APPLE_IDENTITY: ${{ secrets.APPLE_IDENTITY }} APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }} @@ -45,684 +85,269 @@ jobs: APPLE_ID: ${{ vars.APPLE_ID }} SDKROOT: /Applications/Xcode_14.1.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk DEVELOPER_DIR: /Applications/Xcode_14.1.0.app/Contents/Developer - run: | - ./scripts/build_darwin.sh - - uses: actions/upload-artifact@v4 with: name: dist-darwin path: | dist/Ollama-darwin.zip - dist/ollama-darwin + dist/ollama-darwin.tgz - # Windows builds take a long time to both install the dependencies and build, so parallelize - # CPU generation step - generate-windows-cpu: - environment: release - runs-on: windows - env: - KEY_CONTAINER: ${{ vars.KEY_CONTAINER }} - steps: - - uses: actions/checkout@v4 - - name: Set make jobs default - run: | - echo "MAKEFLAGS=--jobs=$((Get-ComputerInfo -Property CsProcessors).CsProcessors.NumberOfCores)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - - name: Set Version - shell: bash - run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV - - name: Add msys paths - run: | - echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Install msys2 tools - run: | - Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait - - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - cache: true - - run: | - import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll' - Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo' - if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" } - make dist - name: make - - uses: actions/upload-artifact@v4 - with: - name: generate-windows-cpu - path: | - dist/windows-amd64/** - - # ROCm generation step - generate-windows-rocm: - environment: release - runs-on: windows - env: - KEY_CONTAINER: ${{ vars.KEY_CONTAINER }} - steps: - - uses: actions/checkout@v4 - - name: Set make jobs default - run: | - echo "MAKEFLAGS=--jobs=$((Get-ComputerInfo -Property CsProcessors).CsProcessors.NumberOfCores)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - - name: Set Version - shell: bash - run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV - - name: Add msys paths - run: | - echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Install msys2 tools - run: | - Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait - - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - cache: true - # ROCM installation steps - - name: 'Cache ROCm installer' - id: cache-rocm - uses: actions/cache@v4 - with: - path: rocm-install.exe - key: ${{ env.ROCM_WINDOWS_URL }} - - name: 'Conditionally Download ROCm' - if: steps.cache-rocm.outputs.cache-hit != 'true' - run: | - $ErrorActionPreference = "Stop" - Invoke-WebRequest -Uri "${env:ROCM_WINDOWS_URL}" -OutFile "rocm-install.exe" - - name: 'Install ROCm' - run: | - Start-Process "rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait - - name: 'Verify ROCm' - run: | - & 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version - echo "HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path | select -first 1)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - - name: make rocm runner - run: | - import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll' - Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo' - if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" } - make help-runners - make dist_rocm - - uses: actions/upload-artifact@v4 - with: - name: generate-windows-rocm - path: | - dist/windows-amd64/** - - # CUDA generation step - generate-windows-cuda: - environment: release - runs-on: windows + windows-depends: strategy: matrix: - cuda: - - version: "11.3" - url: https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe - - version: "12.4" - url: https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_551.61_windows.exe + os: [windows] + arch: [amd64] + preset: ['CPU'] + include: + - os: windows + arch: amd64 + preset: 'CUDA 11' + install: https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe + cuda-version: '11.3' + - os: windows + arch: amd64 + preset: 'CUDA 12' + install: https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_551.61_windows.exe + cuda-version: '12.4' + - os: windows + arch: amd64 + preset: 'ROCm 6' + install: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe + rocm-version: '6.1' + runs-on: ${{ matrix.arch == 'arm64' && format('{0}-{1}', matrix.os, matrix.arch) || matrix.os }} + environment: release env: - KEY_CONTAINER: ${{ vars.KEY_CONTAINER }} + GOFLAGS: ${{ needs.setup-environment.outputs.GOFLAGS }} steps: - - uses: actions/checkout@v4 - - name: Set make jobs default + - name: Install system dependencies run: | - echo "MAKEFLAGS=--jobs=$((Get-ComputerInfo -Property CsProcessors).CsProcessors.NumberOfCores)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - - name: Set Version - shell: bash - run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV - - name: Install msys2 - run: | - $msys2_url="https://github.com/msys2/msys2-installer/releases/download/2024-07-27/msys2-x86_64-20240727.exe" - write-host "Downloading msys2" - Invoke-WebRequest -Uri "${msys2_url}" -OutFile "${env:RUNNER_TEMP}\msys2.exe" - write-host "Installing msys2" - Start-Process "${env:RUNNER_TEMP}\msys2.exe" -ArgumentList @("in", "--confirm-command", "--accept-messages", "--root", "C:/msys64") -NoNewWindow -Wait - echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Install msys2 tools - run: | - Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang", "make") -NoNewWindow -Wait - echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: verify tools - run: | - get-command gcc - gcc --version - get-command make - make --version - - uses: actions/setup-go@v5 + choco install -y --no-progress ccache ninja + ccache -o cache_dir=${{ github.workspace }}\.ccache + - if: startsWith(matrix.preset, 'CUDA ') || startsWith(matrix.preset, 'ROCm ') + id: cache-install + uses: actions/cache/restore@v4 with: - go-version-file: go.mod - cache: true - # CUDA installation steps - - name: 'Cache CUDA installer' - id: cache-cuda - uses: actions/cache@v4 - with: - path: cuda-install.exe - key: ${{ matrix.cuda.url }} - - name: 'Conditionally Download CUDA' - if: steps.cache-cuda.outputs.cache-hit != 'true' + path: | + C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA + C:\Program Files\AMD\ROCm + key: ${{ matrix.install }} + - if: startsWith(matrix.preset, 'CUDA ') + name: Install CUDA ${{ matrix.cuda-version }} run: | $ErrorActionPreference = "Stop" - Invoke-WebRequest -Uri "${{ matrix.cuda.url }}" -OutFile "cuda-install.exe" - - name: 'Install CUDA' - run: | - $subpackages = @("cudart", "nvcc", "cublas", "cublas_dev") | foreach-object {"${_}_${{ matrix.cuda.version }}"} - Start-Process "cuda-install.exe" -ArgumentList (@("-s") + $subpackages) -NoNewWindow -Wait - - name: 'Verify CUDA' - run: | - & (resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0] --version - $cudaPath=((resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0].path | split-path | split-path) - $cudaVer=($cudaPath | split-path -leaf ) -replace 'v(\d+).(\d+)', '$1_$2' + if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') { + Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe" + $subpackages = @("cudart", "nvcc", "cublas", "cublas_dev") | Foreach-Object {"${_}_${{ matrix.cuda-version }}"} + Start-Process -FilePath .\install.exe -ArgumentList (@("-s") + $subpackages) -NoNewWindow -Wait + } + + $cudaPath = (Resolve-Path "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\*").path echo "$cudaPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "CUDA_PATH=$cudaPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - echo "CUDA_PATH_V${cudaVer}=$cudaPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - echo "CUDA_PATH_VX_Y=CUDA_PATH_V${cudaVer}" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - - - name: make cuda runner + - if: startsWith(matrix.preset, 'ROCm') + name: Install ROCm ${{ matrix.rocm-version }} run: | - import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll' - Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo' - if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" } - make dist_cuda_v$(($env:CUDA_PATH | split-path -leaf) -replace 'v(\d+).*', '$1') + $ErrorActionPreference = "Stop" + if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') { + Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe" + Start-Process -FilePath .\install.exe -ArgumentList '-install' -NoNewWindow -Wait + } + + $hipPath = (Resolve-Path "C:\Program Files\AMD\ROCm\*").path + echo "$hipPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CC=$hipPath\bin\clang.exe" | Out-File -FilePath $env:GITHUB_ENV -Append + echo "CXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append + - if: ${{ !cancelled() && steps.cache-install.outputs.cache-hit != 'true' }} + uses: actions/cache/save@v4 + with: + path: | + C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA + C:\Program Files\AMD\ROCm + key: ${{ matrix.install }} + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + with: + path: ${{ github.workspace }}\.ccache + key: ccache-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.preset }} + - name: Build target "${{ matrix.preset }}" + run: | + Import-Module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll' + Enter-VsDevShell -VsInstallPath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo' + cmake --preset "${{ matrix.preset }}" + cmake --build --parallel --preset "${{ matrix.preset }}" + cmake --install build --component "${{ startsWith(matrix.preset, 'CUDA ') && 'CUDA' || startsWith(matrix.preset, 'ROCm ') && 'HIP' || 'CPU' }}" --strip --parallel 8 + env: + CMAKE_GENERATOR: Ninja - uses: actions/upload-artifact@v4 with: - name: generate-windows-cuda-${{ matrix.cuda.version }} - path: | - dist/windows-amd64/** + name: depends-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.preset }} + path: dist\* - # windows arm64 generate, go build, and zip file (no installer) - # Output of this build is aggregated into the final x86 build - # for a unified windows installer - windows-arm64: - runs-on: windows-arm64 + windows-build: + strategy: + matrix: + os: [windows] + arch: [amd64, arm64] + runs-on: ${{ matrix.arch == 'arm64' && format('{0}-{1}', matrix.os, matrix.arch) || matrix.os }} environment: release + needs: [setup-environment] env: - KEY_CONTAINER: ${{ vars.KEY_CONTAINER }} + GOFLAGS: ${{ needs.setup-environment.outputs.GOFLAGS }} steps: - # The current Windows arm64 beta image has effectively zero dev tools installed... - - name: Install git and gzip + - name: Install system dependencies run: | - Set-ExecutionPolicy Bypass -Scope Process -Force - [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072 - iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) - choco install -y --no-progress git gzip - echo "C:\Program Files\Git\cmd" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\ProgramData\chocolatey\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - # pacman is buggy on win arm64, so we avoid using it, but rely on the binary artifacts - # we download the sfx (7zip bundle) which isn't fully set up, but the binaries we need to build work - - name: Install msys2 x64 - run: | - $url="https://github.com/msys2/msys2-installer/releases/download/2024-07-27/msys2-base-x86_64-20240727.sfx.exe" - write-host "Downloading MSYS2" - Invoke-WebRequest -Uri "$url" -outfile "${env:RUNNER_TEMP}\msys2.exe" - write-host "Installing msys2" - Start-Process "${env:RUNNER_TEMP}\msys2.exe" -ArgumentList @( - '-y', '-oC:\' - ) -NoNewWindow -Wait - echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - # since pacman isn't reliable, we just download the tar file and extract directly - - name: Downloading and extracting msys2 make tar file - run: | - $url="https://mirror.msys2.org/msys/x86_64/make-4.4.1-2-x86_64.pkg.tar.zst" - write-host "Downloading make" - Invoke-WebRequest -Uri "$url" -outfile c:\msys64\make.tar.zst - cd c:\msys64; tar -xf make.tar.zst - rm c:\msys64\make.tar.zst - - name: Verify Make works properly - run: | - echo $env:PATH - make --version - - name: Install Visual Studio 2022 - run: | - $components = @( - "Microsoft.VisualStudio.Component.CoreEditor", - "Microsoft.VisualStudio.Workload.CoreEditor", - "Microsoft.VisualStudio.Component.Roslyn.Compiler", - "Microsoft.Component.MSBuild", - "Microsoft.VisualStudio.Component.TextTemplating", - "Microsoft.VisualStudio.Component.Debugger.JustInTime", - "Microsoft.VisualStudio.Component.VC.CoreIde", - "Microsoft.VisualStudio.Component.VC.Tools.x86.x64", - "Microsoft.VisualStudio.Component.Windows11SDK.22621", - "Microsoft.VisualStudio.Component.VC.Tools.ARM64EC", - "Microsoft.VisualStudio.Component.VC.Tools.ARM64", - "Microsoft.VisualStudio.Component.VC.ATL", - "Microsoft.VisualStudio.Component.VC.ATL.ARM64", - "Microsoft.VisualStudio.Component.Graphics", - "Microsoft.VisualStudio.Component.VC.Redist.14.Latest", - "Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core", - "Microsoft.VisualStudio.Component.Windows11Sdk.WindowsPerformanceToolkit", - "Microsoft.VisualStudio.Component.CppBuildInsights", - "Microsoft.VisualStudio.Component.VC.DiagnosticTools", - "Microsoft.VisualStudio.ComponentGroup.WebToolsExtensions.CMake", - "Microsoft.VisualStudio.Component.VC.CMake.Project", - "Microsoft.VisualStudio.Component.VC.ASAN", - "Microsoft.VisualStudio.Component.Vcpkg", - "Microsoft.VisualStudio.Workload.NativeDesktop" - ) - $config = @{ - "version" = "1.0" - "components" = $components - "extensions" = @() - } - $configPath = "${env:RUNNER_TEMP}\vsconfig" - $config | ConvertTo-Json | Out-File -FilePath $configPath - $bootstrapperFilePath = "${env:RUNNER_TEMP}\vs_community.exe" - write-host "Downloading Visual Studio 2022" - Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vs_community.exe" -outfile $bootstrapperFilePath - $bootstrapperArgumentList = ('/c', $bootstrapperFilePath, '--config', $configPath, '--quiet', '--wait' ) - write-host "Installing Visual Studio 2022" - $process = Start-Process -FilePath cmd.exe -ArgumentList $bootstrapperArgumentList -Wait -PassThru - $exitCode = $process.ExitCode - write-host $exitCode - # pacman in mingw/msys2 is ~broken on windows arm right now - hangs consistently during attempts to install - # so we'll use this alternative GCC binary - - name: Install llvm-mingw GCC - run: | - $gcc_url="https://github.com/mstorsjo/llvm-mingw/releases/download/20240619/llvm-mingw-20240619-ucrt-aarch64.zip" - write-host "Downloading llvm-mingw" - Invoke-WebRequest -Uri "${gcc_url}" -OutFile "${env:RUNNER_TEMP}\gcc.zip" - write-host "Unpacking llvm-mingw" - expand-archive -path "${env:RUNNER_TEMP}\gcc.zip" -destinationpath "c:\" - mv c:\llvm-mingw-* c:\llvm-mingw - echo "c:\llvm-mingw\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Verify GCC - run: | - echo $env:PATH - gcc --version + $ErrorActionPreference = "Stop" + if ("${{ matrix.arch }}" -eq 'amd64') { + Start-Process "C:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait + echo "C:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + } elseif ("${{ matrix.arch }}" -eq 'arm64') { + Set-ExecutionPolicy Bypass -Scope Process -Force + [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072 + iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + echo "C:\ProgramData\chocolatey\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + + choco install -y --no-progress git gzip + echo "C:\Program Files\Git\cmd" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + + Invoke-WebRequest -Uri "https://github.com/mstorsjo/llvm-mingw/releases/download/20240619/llvm-mingw-20240619-ucrt-aarch64.zip" -OutFile "${{ runner.temp }}\llvm-mingw-ucrt-aarch64.zip" + Expand-Archive -Path ${{ runner.temp }}\llvm-mingw-ucrt-aarch64.zip -DestinationPath "C:\Program Files\" + $installPath=(Resolve-Path -Path "C:\Program Files\llvm-mingw-*-ucrt-aarch64").path + echo $installPath\bin | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + } - uses: actions/checkout@v4 - - name: Set Version - run: | - $ver=${env:GITHUB_REF_NAME}.trim("v") - echo VERSION=$ver | Out-File -FilePath ${env:GITHUB_ENV} -Encoding utf8 -Append - - uses: 'google-github-actions/auth@v2' - with: - project_id: 'ollama' - credentials_json: '${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}' - - run: echo "${{ vars.OLLAMA_CERT }}" | Out-File -FilePath ollama_inc.crt -Encoding utf8 - - name: install Windows SDK 8.1 to get signtool - run: | - $ErrorActionPreference = "Stop" - write-host "downloading SDK" - Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${env:RUNNER_TEMP}\sdksetup.exe" - Start-Process "${env:RUNNER_TEMP}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait - write-host "Win SDK 8.1 installed" - gci -path 'C:\Program Files (x86)\Windows Kits\' -r -fi 'signtool.exe' - - name: install signing plugin - run: | - $ErrorActionPreference = "Stop" - write-host "downloading plugin" - Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${env:RUNNER_TEMP}\plugin.zip" - Expand-Archive -Path "${env:RUNNER_TEMP}\plugin.zip" -DestinationPath ${env:RUNNER_TEMP}\plugin\ - write-host "Installing plugin" - & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet - write-host "plugin installed" - uses: actions/setup-go@v5 with: go-version-file: go.mod - cache: true - - run: go get ./... - run: | - $gopath=(get-command go).source | split-path -parent - $gccpath=(get-command gcc).source | split-path -parent - import-module 'C:\Program Files\Microsoft Visual Studio\2022\Community\Common7\Tools\Microsoft.VisualStudio.DevShell.dll' - Enter-VsDevShell -Arch arm64 -vsinstallpath 'C:\Program Files\Microsoft Visual Studio\2022\Community' -skipautomaticlocation - $env:PATH="$gopath;$gccpath;$env:PATH" - echo $env:PATH - $env:ARCH="arm64" - .\scripts\build_windows.ps1 buildOllama buildApp gatherDependencies sign distZip - name: 'Windows Build' + go build -o dist/${{ matrix.os }}-${{ matrix.arch }}/ . + - run: | + $env:VERSION='${{ github.ref_name }}' -Replace "v(.*)", '$1' + & .\scripts\build_windows.ps1 buildApp + env: + VCToolsRedistDir: stub - uses: actions/upload-artifact@v4 with: - name: windows-arm64 + name: build-${{ matrix.os }}-${{ matrix.arch }} path: | - dist/windows-arm64/** - dist/windows-arm64-app.exe - dist/ollama-windows-arm64.zip + dist\${{ matrix.os }}-${{ matrix.arch }}\*.exe + dist\${{ matrix.os }}-${{ matrix.arch }}-app.exe - # Import the prior generation steps plus the full arm64 build, and build the final windows assets - build-windows: - environment: release + windows-sign: runs-on: windows - needs: - - generate-windows-cuda - - generate-windows-rocm - - generate-windows-cpu - - windows-arm64 - env: - KEY_CONTAINER: ${{ vars.KEY_CONTAINER }} + environment: release + needs: [windows-depends, windows-build] steps: - uses: actions/checkout@v4 + - uses: google-github-actions/auth@v2 with: - submodules: recursive - - name: Set Version - shell: bash - run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV - - uses: 'google-github-actions/auth@v2' - with: - project_id: 'ollama' - credentials_json: '${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}' - - run: echo "${{ vars.OLLAMA_CERT }}" > ollama_inc.crt - - name: install Windows SDK 8.1 to get signtool - run: | - $ErrorActionPreference = "Stop" - write-host "downloading SDK" - Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${env:RUNNER_TEMP}\sdksetup.exe" - Start-Process "${env:RUNNER_TEMP}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait - write-host "Win SDK 8.1 installed" - gci -path 'C:\Program Files (x86)\Windows Kits\' -r -fi 'signtool.exe' - - name: install signing plugin - run: | - $ErrorActionPreference = "Stop" - write-host "downloading plugin" - Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${env:RUNNER_TEMP}\plugin.zip" - Expand-Archive -Path "${env:RUNNER_TEMP}\plugin.zip" -DestinationPath ${env:RUNNER_TEMP}\plugin\ - write-host "Installing plugin" - & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet - write-host "plugin installed" - - name: Install msys2 - run: | - $msys2_url="https://github.com/msys2/msys2-installer/releases/download/2024-07-27/msys2-x86_64-20240727.exe" - write-host "Downloading msys2" - Invoke-WebRequest -Uri "${msys2_url}" -OutFile "${env:RUNNER_TEMP}\msys2.exe" - write-host "Installing msys2" - Start-Process "${env:RUNNER_TEMP}\msys2.exe" -ArgumentList @("in", "--confirm-command", "--accept-messages", "--root", "C:/msys64") -NoNewWindow -Wait - echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Install msys2 tools - run: | - Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang", "make") -NoNewWindow -Wait - echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: verify tools - run: | - get-command gcc - gcc --version - get-command make - make --version - - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - cache: true - - run: go get - - uses: actions/download-artifact@v4 - with: - name: generate-windows-cpu - path: dist/windows-amd64/ - - uses: actions/download-artifact@v4 - with: - name: generate-windows-cuda-11.3 - path: dist/windows-amd64/ - - uses: actions/download-artifact@v4 - with: - name: generate-windows-cuda-12.4 - path: dist/windows-amd64/ - - uses: actions/download-artifact@v4 - with: - name: generate-windows-rocm - path: dist/windows-amd64/ - - uses: actions/download-artifact@v4 - with: - name: windows-arm64 - path: dist + project_id: ollama + credentials_json: ${{ secrets.GOOGLE_SIGNING_CREDENTIALS }} - run: | - import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll' - Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo' - $env:OLLAMA_SKIP_GENERATE="1" - $env:ARCH="amd64" - if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" } - & .\scripts\build_windows.ps1 + $ErrorActionPreference = "Stop" + Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${{ runner.temp }}\sdksetup.exe" + Start-Process "${{ runner.temp }}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait + + Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${{ runner.temp }}\plugin.zip" + Expand-Archive -Path "${{ runner.temp }}\plugin.zip" -DestinationPath "${{ runner.temp }}\plugin\" + & "${{ runner.temp }}\plugin\*\kmscng.msi" /quiet + + echo "${{ vars.OLLAMA_CERT }}" >ollama_inc.crt + - uses: actions/download-artifact@v4 + with: + name: build-windows-* + path: dist\ + merge-multiple: true + - uses: actions/download-artifact@v4 + with: + name: depends-windows-amd64-* + path: dist\windows-amd64\ + merge-multiple: true + - run: | + & .\scripts\build_windows.ps1 gatherDependencies sign buildInstaller distZip - uses: actions/upload-artifact@v4 with: name: dist-windows path: | - dist/OllamaSetup.exe - dist/ollama-windows-*.zip + dist\OllamaSetup.exe + dist\ollama-windows-*.zip - # Linux x86 assets built using the container based build - build-linux-amd64: - environment: release - runs-on: linux - env: - PLATFORM: linux/amd64 - steps: - - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Set Version - shell: bash - run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV - - run: | - ./scripts/build_linux.sh - - uses: actions/upload-artifact@v4 - with: - name: dist-linux-amd64 - path: | - dist/*linux* - !dist/*-cov - - # Linux ARM assets built using the container based build - # (at present, docker isn't pre-installed on arm ubunutu images) - build-linux-arm64: - environment: release - runs-on: linux-arm64 - env: - PLATFORM: linux/arm64 - steps: - - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Set Version - shell: bash - run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV - - name: 'Install Docker' - run: | - # Add Docker's official GPG key: - env - uname -a - sudo apt-get update - sudo apt-get install -y ca-certificates curl - sudo install -m 0755 -d /etc/apt/keyrings - sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc - sudo chmod a+r /etc/apt/keyrings/docker.asc - - # Add the repository to Apt sources: - echo \ - "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ - $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ - sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt-get update - sudo apt-get install -y docker-ce docker-ce-cli containerd.io - sudo usermod -aG docker $USER - sudo apt-get install acl - sudo setfacl --modify user:$USER:rw /var/run/docker.sock - - run: | - ./scripts/build_linux.sh - - uses: actions/upload-artifact@v4 - with: - name: dist-linux-arm64 - path: | - dist/*linux* - !dist/*-cov - - # Container image build - build-container-image: - environment: release + linux-build: strategy: matrix: - runner: - - linux - - linux-arm64 - runs-on: ${{ matrix.runner }} - env: - FINAL_IMAGE_REPO: ollama/ollama - steps: - - uses: actions/checkout@v4 - with: - submodules: recursive - - name: 'Install Docker' - if: ${{ startsWith(matrix.runner, 'linux-arm64') }} - run: | - sudo apt-get update - sudo apt-get install -y ca-certificates curl - sudo install -m 0755 -d /etc/apt/keyrings - sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc - sudo chmod a+r /etc/apt/keyrings/docker.asc - echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ - $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ - sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt-get update - sudo apt-get install -y docker-ce docker-ce-cli containerd.io - sudo usermod -aG docker $USER - sudo apt-get install acl - sudo setfacl --modify user:$USER:rw /var/run/docker.sock - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.FINAL_IMAGE_REPO }} - flavor: | - latest=false - tags: | - type=ref,enable=true,priority=600,prefix=0.0.0-pr,suffix=,event=pr - type=semver,pattern={{version}} - - name: Set Version - shell: bash - run: | - machine=$(uname -m) - case ${machine} in - x86_64) echo ARCH=amd64; echo PLATFORM_PAIR=linux-amd64 ;; - aarch64) echo ARCH=arm64; echo PLATFORM_PAIR=linux-arm64 ;; - esac >>$GITHUB_ENV - echo GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${{ env.DOCKER_METADATA_OUTPUT_VERSION }}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" >>$GITHUB_ENV - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKER_USER }} - password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - - name: Build and push by digest - id: build - uses: docker/build-push-action@v6 - with: - context: "." - platforms: linux/${{ env.ARCH }} - build-args: | - GOFLAGS - outputs: type=image,name=${{ env.FINAL_IMAGE_REPO }},push-by-digest=true,name-canonical=true,push=true - - name: Export digest - run: | - mkdir -p /tmp/digests - digest="${{ steps.build.outputs.digest }}" - touch "/tmp/digests/${digest#sha256:}" - - name: Upload digest - uses: actions/upload-artifact@v4 - with: - name: digests-${{ env.PLATFORM_PAIR }} - path: /tmp/digests/* - if-no-files-found: error - retention-days: 1 - merge: + include: + - os: linux + arch: amd64 + targets: 'archive rocm' + - os: linux + arch: arm64 + targets: archive + runs-on: ${{ matrix.arch == 'arm64' && format('{0}-{1}', matrix.os, matrix.arch) || matrix.os }} environment: release - runs-on: linux - needs: - - build-container-image + needs: setup-environment env: - FINAL_IMAGE_REPO: ollama/ollama + GOFLAGS: ${{ needs.setup-environment.outputs.GOFLAGS }} steps: - uses: actions/checkout@v4 + - uses: docker/setup-buildx-action@v3 + - run: | + apt-get update && apt-get install pigz + for TARGET in ${{ matrix.targets }}; do docker buildx build --platform $PLATFORM --target $TARGET --output type=local,dest=dist/$PLATFORM .; done + tar c -C dist/$PLATFORM . | pigz -9cv >dist/ollama-${PLATFORM//\//-}.tgz + env: + PLATFORM: ${{ matrix.os }}/${{ matrix.arch }} + - uses: actions/upload-artifact@v4 with: - submodules: recursive - - name: Download digests - uses: actions/download-artifact@v4 - with: - path: /tmp/digests - pattern: digests-* - merge-multiple: true - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.FINAL_IMAGE_REPO }} - flavor: | - latest=false - tags: | - type=ref,enable=true,priority=600,prefix=0.0.0-pr,suffix=,event=pr - type=semver,pattern={{version}} - - name: Set Version - shell: bash - run: | - machine=$(uname -m) - case ${machine} in - x86_64) echo ARCH=amd64; echo PLATFORM_PAIR=linux-amd64 ;; - aarch64) echo ARCH=arm64; echo PLATFORM_PAIR=linux-arm64 ;; - esac >>$GITHUB_ENV - echo GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${{ env.DOCKER_METADATA_OUTPUT_VERSION }}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" >>$GITHUB_ENV - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - username: ${{ vars.DOCKER_USER }} - password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - - name: Create manifest list and push - working-directory: /tmp/digests - run: | - docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ - $(printf '${{ env.FINAL_IMAGE_REPO }}@sha256:%s ' *) - - name: Inspect image - run: | - docker buildx imagetools inspect ${{ env.FINAL_IMAGE_REPO }}:${{ steps.meta.outputs.version }} - build-container-image-rocm: + name: dist-${{ matrix.os }}-${{ matrix.arch }} + path: | + dist/ollama-${{ matrix.os }}-${{ matrix.arch }}.tgz + + docker-build: + strategy: + matrix: + include: + - flavor: 'latest=false' + platforms: linux/amd64,linux/arm64 + build-args: | + GOFLAGS=${{ needs.setup-environment.outputs.GOFLAGS }} + - flavor: 'latest=false,suffix=rocm' + platforms: linux/amd64 + build-args: | + GOFLAGS=${{ needs.setup-environment.outputs.GOFLAGS }} + FLAVOR=rocm + runs-on: linux environment: release - runs-on: linux - env: - FINAL_IMAGE_REPO: ollama/ollama - ARCH: amd64 - PLATFORM_PAIR: linux-amd64 + needs: setup-environment steps: - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.FINAL_IMAGE_REPO }} - flavor: | - latest=false - tags: | - type=ref,enable=true,priority=600,prefix=0.0.0-pr,suffix=,event=pr - type=semver,pattern={{version}} - - name: Set Version - shell: bash - run: | - echo GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${{ env.DOCKER_METADATA_OUTPUT_VERSION }}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" >>$GITHUB_ENV - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Login to Docker Hub - uses: docker/login-action@v3 + - uses: docker/setup-qemu-action@v2 + - uses: docker/setup-buildx-action@v2 + - uses: docker/login-action@v3 with: username: ${{ vars.DOCKER_USER }} password: ${{ secrets.DOCKER_ACCESS_TOKEN }} - - name: Build and push by digest - id: build - uses: docker/build-push-action@v6 + - id: metadata + uses: docker/metadata-action@v4 with: - context: "." - target: runtime-rocm - build-args: | - GOFLAGS - tags: ${{ env.FINAL_IMAGE_REPO }}:${{ env.DOCKER_METADATA_OUTPUT_VERSION}}-rocm + flavor: ${{ matrix.flavor }} + images: | + ollama/ollama + tags: | + type=semver,pattern={{version}} + - uses: docker/build-push-action@v6 + with: + context: . push: true + platforms: ${{ matrix.platforms }} + build-args: ${{ matrix.build-args }} + tags: ${{ steps.metadata.outputs.tags }} + labels: ${{ steps.metadata.outputs.labels }} + cache-from: type=registry,ref=ollama/ollama:latest + cache-to: type=inline + provenance: false # Aggregate all the assets and ship a release release: - needs: - - build-darwin - - build-windows - - build-linux-amd64 - - build-linux-arm64 + needs: [darwin-sign, windows-sign, linux-build] runs-on: linux environment: release permissions: @@ -734,14 +359,22 @@ jobs: - name: Set Version shell: bash run: | - echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV - echo "RELEASE_VERSION=$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)" >> $GITHUB_ENV - - name: Retrieve built artifact - uses: actions/download-artifact@v4 + - uses: actions/download-artifact@v4 with: path: dist - pattern: dist-* - merge-multiple: true + pattern: dist-darwin + - uses: actions/download-artifact@v4 + with: + path: dist + pattern: dist-windows + - uses: actions/download-artifact@v4 + with: + path: dist + pattern: dist-linux-* + - uses: actions/download-artifact@v4 + with: + path: dist + pattern: dist-windows - run: | ls -lh dist/ (cd dist; find . -type f | xargs sha256sum > ../sha256sum.txt) @@ -749,15 +382,17 @@ jobs: cat dist/sha256sum.txt - name: Create or update Release run: | - echo "Looking for existing release for ${{ env.RELEASE_VERSION }}" - OLD_TAG=$(gh release ls --json name,tagName | jq -r ".[] | select(.name == \"${{ env.RELEASE_VERSION }}\") | .tagName") + RELEASE_VERSION=$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)" + + echo "Looking for existing release for ${RELEASE_VERSION}" + OLD_TAG=$(gh release ls --json name,tagName | jq -r ".[] | select(.name == \"${RELEASE_VERSION}\") | .tagName") if [ -n "$OLD_TAG" ]; then - echo "Updating release ${{ env.RELEASE_VERSION }} to point to new tag ${GITHUB_REF_NAME}" + echo "Updating release ${RELEASE_VERSION} to point to new tag ${GITHUB_REF_NAME}" gh release edit ${OLD_TAG} --tag ${GITHUB_REF_NAME} else - echo "Creating new release ${{ env.RELEASE_VERSION }} pointing to tag ${GITHUB_REF_NAME}" + echo "Creating new release ${RELEASE_VERSION} pointing to tag ${GITHUB_REF_NAME}" gh release create ${GITHUB_REF_NAME} \ - --title ${{ env.RELEASE_VERSION }} \ + --title ${RELEASE_VERSION} \ --draft \ --generate-notes \ --prerelease diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 8dcc506b..f8e1cadf 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -1,11 +1,5 @@ name: test -env: - ROCM_WINDOWS_URL: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe - MSYS2_URL: https://github.com/msys2/msys2-installer/releases/download/2024-07-27/msys2-x86_64-20240727.exe - CUDA_12_WINDOWS_URL: https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_551.61_windows.exe - CUDA_12_WINDOWS_VER: 12.4 - concurrency: # For PRs, later CI runs preempt previous ones. e.g. a force push on a PR # cancels running CI jobs and starts all new ones. @@ -27,7 +21,7 @@ jobs: changes: runs-on: ubuntu-latest outputs: - RUNNERS: ${{ steps.changes.outputs.RUNNERS }} + changed: ${{ steps.changes.outputs.changed }} steps: - uses: actions/checkout@v4 with: @@ -35,309 +29,139 @@ jobs: - id: changes run: | changed() { - git diff-tree -r --no-commit-id --name-only \ - $(git merge-base ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }}) \ - ${{ github.event.pull_request.head.sha }} \ + local BASE=${{ github.event.pull_request.base.sha }} + local HEAD=${{ github.event.pull_request.head.sha }} + local MERGE_BASE=$(git merge-base $BASE $HEAD) + git diff-tree -r --no-commit-id --name-only "$MERGE_BASE" "$HEAD" \ | xargs python3 -c "import sys; from pathlib import Path; print(any(Path(x).match(glob) for x in sys.argv[1:] for glob in '$*'.split(' ')))" } - { - echo RUNNERS=$(changed 'llama/**') - } >>$GITHUB_OUTPUT + echo changed=$(changed 'llama/llama.cpp/**' 'ml/backend/ggml/ggml/**') | tee -a $GITHUB_OUTPUT - runners-linux-cuda: + linux: needs: [changes] - if: ${{ needs.changes.outputs.RUNNERS == 'True' }} + if: needs.changes.outputs.changed == 'True' strategy: matrix: - cuda-version: - - '11.8.0' + include: + - preset: CPU + - preset: CUDA + container: nvidia/cuda:11.8.0-devel-ubuntu22.04 + flags: '-DCMAKE_CUDA_ARCHITECTURES=87' + - preset: ROCm + container: rocm/dev-ubuntu-22.04:6.1.2 + extra-packages: rocm-libs + flags: '-DAMDGPU_TARGETS=gfx1010 -DCMAKE_PREFIX_PATH=/opt/rocm' runs-on: linux - container: nvidia/cuda:${{ matrix.cuda-version }}-devel-ubuntu20.04 + container: ${{ matrix.container }} steps: + - uses: actions/checkout@v4 - run: | - apt-get update && apt-get install -y git build-essential curl + [ -n "${{ matrix.container }}" ] || sudo=sudo + $sudo apt-get update + $sudo apt-get install -y cmake ccache ${{ matrix.extra-packages }} env: DEBIAN_FRONTEND: noninteractive - - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/cache@v4 with: - go-version-file: go.mod - cache: true - - run: go get ./... + path: /github/home/.cache/ccache + key: ccache-${{ runner.os }}-${{ runner.arch }}-${{ matrix.preset }} - run: | - git config --global --add safe.directory /__w/ollama/ollama - cores=$(grep '^core id' /proc/cpuinfo |sort -u|wc -l) - make -j $cores cuda_v11 - runners-linux-rocm: + cmake --preset ${{ matrix.preset }} ${{ matrix.flags }} + cmake --build --preset ${{ matrix.preset }} --parallel + + windows: needs: [changes] - if: ${{ needs.changes.outputs.RUNNERS == 'True' }} + if: needs.changes.outputs.changed == 'True' strategy: matrix: - rocm-version: - - '6.1.2' - runs-on: linux - container: rocm/dev-ubuntu-20.04:${{ matrix.rocm-version }} - steps: - - run: | - apt-get update && apt-get install -y git build-essential curl rocm-libs - env: - DEBIAN_FRONTEND: noninteractive - - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 - with: - go-version-file: go.mod - cache: true - - run: go get ./... - - run: | - git config --global --add safe.directory /__w/ollama/ollama - cores=$(grep '^core id' /proc/cpuinfo |sort -u|wc -l) - make -j $cores rocm - - # ROCm generation step - runners-windows-rocm: - needs: [changes] - if: ${{ needs.changes.outputs.RUNNERS == 'True' }} + include: + - preset: CPU + - preset: CUDA + install: https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_522.06_windows.exe + flags: '-DCMAKE_CUDA_ARCHITECTURES=87' + - preset: ROCm + install: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe + flags: '-DAMDGPU_TARGETS=gfx1010' runs-on: windows steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - run: | + choco install -y --no-progress ccache ninja + ccache -o cache_dir=${{ github.workspace }}\.ccache + - if: matrix.preset == 'CUDA' || matrix.preset == 'ROCm' + id: cache-install + uses: actions/cache/restore@v4 with: - go-version-file: go.mod - cache: true - - name: Set make jobs default - run: | - echo "MAKEFLAGS=--jobs=$((Get-ComputerInfo -Property CsProcessors).CsProcessors.NumberOfCores)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - - # ROCM installation steps - - name: 'Cache ROCm installer' - id: cache-rocm - uses: actions/cache@v4 - with: - path: rocm-install.exe - key: ${{ env.ROCM_WINDOWS_URL }} - - name: 'Conditionally Download ROCm' - if: steps.cache-rocm.outputs.cache-hit != 'true' + path: | + C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA + C:\Program Files\AMD\ROCm + key: ${{ matrix.install }} + - if: matrix.preset == 'CUDA' + name: Install CUDA ${{ matrix.cuda-version }} run: | $ErrorActionPreference = "Stop" - Invoke-WebRequest -Uri "${env:ROCM_WINDOWS_URL}" -OutFile "rocm-install.exe" - - name: 'Install ROCm' - run: | - Start-Process "rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait - - name: 'Verify ROCm' - run: | - & 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version - echo "HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path | select -first 1)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') { + Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe" + Start-Process -FilePath .\install.exe -ArgumentList (@("-s", "cudart_11.8", "nvcc_11.8", "cublas_11.8", "cublas_dev_11.8")) -NoNewWindow -Wait + } - - name: Add msys paths - run: | - echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Install msys2 tools - run: | - Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait - - - name: make rocm runner - run: | - import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll' - Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo' - if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" } - make -C llama print-HIP_PATH print-HIP_LIB_DIR - make rocm - - # CUDA generation step - runners-windows-cuda: - needs: [changes] - if: ${{ needs.changes.outputs.RUNNERS == 'True' }} - runs-on: windows - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - cache: true - - name: Set make jobs default - run: | - echo "MAKEFLAGS=--jobs=$((Get-ComputerInfo -Property CsProcessors).CsProcessors.NumberOfCores)" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - - # CUDA installation steps - - name: 'Cache CUDA installer' - id: cache-cuda - uses: actions/cache@v4 - with: - path: cuda-install.exe - key: ${{ env.CUDA_12_WINDOWS_URL }} - - name: 'Conditionally Download CUDA' - if: steps.cache-cuda.outputs.cache-hit != 'true' - run: | - $ErrorActionPreference = "Stop" - Invoke-WebRequest -Uri "${env:CUDA_12_WINDOWS_URL}" -OutFile "cuda-install.exe" - - name: 'Install CUDA' - run: | - $subpackages = @("cudart", "nvcc", "cublas", "cublas_dev") | foreach-object {"${_}_${{ env.CUDA_12_WINDOWS_VER }}"} - Start-Process "cuda-install.exe" -ArgumentList (@("-s") + $subpackages) -NoNewWindow -Wait - - name: 'Verify CUDA' - run: | - & (resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0] --version - $cudaPath=((resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0].path | split-path | split-path) - $cudaVer=($cudaPath | split-path -leaf ) -replace 'v(\d+).(\d+)', '$1_$2' + $cudaPath = (Resolve-Path "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\*").path echo "$cudaPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "CUDA_PATH=$cudaPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - echo "CUDA_PATH_V${cudaVer}=$cudaPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - echo "CUDA_PATH_VX_Y=CUDA_PATH_V${cudaVer}" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + - if: matrix.preset == 'ROCm' + name: Install ROCm ${{ matrix.rocm-version }} + run: | + $ErrorActionPreference = "Stop" + if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') { + Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe" + Start-Process -FilePath .\install.exe -ArgumentList '-install' -NoNewWindow -Wait + } - - name: Add msys paths - run: | - echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Install msys2 tools - run: | - Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait - - name: make cuda runner - run: | - import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll' - Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo' - if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" } - make cuda_v$(($env:CUDA_PATH | split-path -leaf) -replace 'v(\d+).*', '$1') - - runners-cpu: - needs: [changes] - if: ${{ needs.changes.outputs.RUNNERS == 'True' }} - strategy: - matrix: - os: [ubuntu-latest, macos-latest, windows-2019] - arch: [amd64, arm64] - exclude: - - os: ubuntu-latest - arch: arm64 - - os: windows-2019 - arch: arm64 - runs-on: ${{ matrix.os }} - env: - GOARCH: ${{ matrix.arch }} - ARCH: ${{ matrix.arch }} - CGO_ENABLED: '1' - steps: + $hipPath = (Resolve-Path "C:\Program Files\AMD\ROCm\*").path + echo "$hipPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "CC=$hipPath\bin\clang.exe" | Out-File -FilePath $env:GITHUB_ENV -Append + echo "CXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append + - if: ${{ !cancelled() && steps.cache-install.outputs.cache-hit != 'true' }} + uses: actions/cache/save@v4 + with: + path: | + C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA + C:\Program Files\AMD\ROCm + key: ${{ matrix.install }} - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/cache@v4 with: - go-version-file: go.mod - cache: true - - name: Add msys paths - if: ${{ startsWith(matrix.os, 'windows-') }} - run: | - echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Install msys2 tools - if: ${{ startsWith(matrix.os, 'windows-') }} - run: | - Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait - - name: 'Build Windows Go Runners' - if: ${{ startsWith(matrix.os, 'windows-') }} - run: | - $gopath=(get-command go).source | split-path -parent - $gccpath=(get-command gcc).source | split-path -parent - import-module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll' - Enter-VsDevShell -vsinstallpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -skipautomaticlocation -DevCmdArguments '-arch=x64 -no_logo' - $env:CMAKE_SYSTEM_VERSION="10.0.22621.0" - $env:PATH="$gopath;$gccpath;$env:PATH" - echo $env:PATH - if (!(gcc --version | select-string -quiet clang)) { throw "wrong gcc compiler detected - must be clang" } - make -j 4 - - name: 'Build Unix Go Runners' - if: ${{ ! startsWith(matrix.os, 'windows-') }} - run: make -j 4 - - run: go build . - - lint: - strategy: - matrix: - os: [ubuntu-latest, macos-latest, windows-2019] - arch: [amd64, arm64] - exclude: - - os: ubuntu-latest - arch: arm64 - - os: windows-2019 - arch: arm64 - - os: macos-latest - arch: amd64 - runs-on: ${{ matrix.os }} - env: - GOARCH: ${{ matrix.arch }} - CGO_ENABLED: '1' - steps: - - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Add msys paths - if: ${{ startsWith(matrix.os, 'windows-') }} - run: | - echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Install msys2 tools - if: ${{ startsWith(matrix.os, 'windows-') }} - run: | - Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait - - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - cache: false + path: ${{ github.workspace }}\.ccache + key: ccache-${{ runner.os }}-${{ runner.arch }}-${{ matrix.preset }} - run: | - case ${{ matrix.arch }} in - amd64) echo ARCH=x86_64 ;; - arm64) echo ARCH=arm64 ;; - esac >>$GITHUB_ENV - shell: bash - - uses: golangci/golangci-lint-action@v6 - with: - args: --timeout 10m0s -v + Import-Module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll' + Enter-VsDevShell -VsInstallPath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo' + cmake --preset "${{ matrix.preset }}" ${{ matrix.flags }} + cmake --build --parallel --preset "${{ matrix.preset }}" + env: + CMAKE_GENERATOR: Ninja + test: strategy: matrix: - os: [ubuntu-latest, macos-latest, windows-2019] - arch: [amd64] - exclude: - - os: ubuntu-latest - arch: arm64 - - os: windows-2019 - arch: arm64 + os: [ubuntu-latest, macos-latest, windows-latest] runs-on: ${{ matrix.os }} env: - GOARCH: ${{ matrix.arch }} CGO_ENABLED: '1' steps: - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Add msys paths - if: ${{ startsWith(matrix.os, 'windows-') }} - run: | - echo "c:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Install msys2 tools - if: ${{ startsWith(matrix.os, 'windows-') }} - run: | - Start-Process "c:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait - uses: actions/setup-go@v5 with: go-version-file: go.mod - cache: true - - run: | - case ${{ matrix.arch }} in - amd64) echo ARCH=amd64 ;; - arm64) echo ARCH=arm64 ;; - esac >>$GITHUB_ENV - shell: bash + - uses: golangci/golangci-lint-action@v6 + with: + args: --timeout 10m0s -v - run: go test ./... patches: - needs: [changes] - if: ${{ needs.changes.outputs.RUNNERS == 'True' }} runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Verify patches carry all the changes + - name: Verify patches apply cleanly and do not change files run: | - make apply-patches sync && git diff --compact-summary --exit-code llama + make -f Makefile.sync clean checkout sync + git diff --compact-summary --exit-code diff --git a/.gitignore b/.gitignore index caa62a52..551abec8 100644 --- a/.gitignore +++ b/.gitignore @@ -4,12 +4,13 @@ .venv .swp dist +build ollama .cache *.exe .idea test_data *.crt -llama/build __debug_bin* -llama/vendor \ No newline at end of file +llama/build +llama/vendor diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 00000000..19d9bd8f --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,112 @@ +cmake_minimum_required(VERSION 3.21) + +project(Ollama C CXX) + +include(CheckLanguage) + +find_package(Threads REQUIRED) + +set(CMAKE_BUILD_TYPE Release) +set(BUILD_SHARED_LIBS ON) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) + +set(GGML_BUILD ON) +set(GGML_SHARED ON) +set(GGML_CCACHE ON) +set(GGML_BACKEND_DL ON) +set(GGML_BACKEND_SHARED ON) +set(GGML_SCHED_MAX_COPIES 4) + +set(GGML_LLAMAFILE ON) +set(GGML_CUDA_PEER_MAX_BATCH_SIZE 128) +set(GGML_CUDA_GRAPHS ON) + +if((NOT CMAKE_OSX_ARCHITECTURES MATCHES "arm64") + OR (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_SYSTEM_PROCESSOR MATCHES "arm|aarch64|ARM64|ARMv[0-9]+")) + set(GGML_CPU_ALL_VARIANTS ON) +endif() + +set(OLLAMA_BUILD_DIR ${CMAKE_BINARY_DIR}/lib/ollama) +set(OLLAMA_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/lib/ollama) + +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${OLLAMA_BUILD_DIR}) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${OLLAMA_BUILD_DIR}) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${OLLAMA_BUILD_DIR}) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${OLLAMA_BUILD_DIR}) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_DEBUG ${OLLAMA_BUILD_DIR}) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE ${OLLAMA_BUILD_DIR}) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/include) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cpu) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cpu/amx) + +set(GGML_CPU ON) +add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src) +set_property(TARGET ggml PROPERTY EXCLUDE_FROM_ALL TRUE) + +get_target_property(CPU_VARIANTS ggml-cpu MANUALLY_ADDED_DEPENDENCIES) +if(NOT CPU_VARIANTS) + set(CPU_VARIANTS "ggml-cpu") +endif() + +install(TARGETS ggml-base ${CPU_VARIANTS} + RUNTIME_DEPENDENCIES + PRE_EXCLUDE_REGEXES ".*" + RUNTIME DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT CPU + LIBRARY DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT CPU + FRAMEWORK DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT CPU +) + +check_language(CUDA) +if(CMAKE_CUDA_COMPILER) + if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.24" AND NOT CMAKE_CUDA_ARCHITECTURES) + set(CMAKE_CUDA_ARCHITECTURES "native") + endif() + + find_package(CUDAToolkit) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cuda) + set(OLLAMA_CUDA_INSTALL_DIR ${OLLAMA_INSTALL_DIR}/cuda_v${CUDAToolkit_VERSION_MAJOR}) + install(TARGETS ggml-cuda + RUNTIME_DEPENDENCIES + DIRECTORIES ${CUDAToolkit_BIN_DIR} ${CUDAToolkit_LIBRARY_DIR} + PRE_INCLUDE_REGEXES cublas cublasLt cudart + PRE_EXCLUDE_REGEXES ".*" + RUNTIME DESTINATION ${OLLAMA_CUDA_INSTALL_DIR} COMPONENT CUDA + LIBRARY DESTINATION ${OLLAMA_CUDA_INSTALL_DIR} COMPONENT CUDA + ) +endif() + +check_language(HIP) +if(CMAKE_HIP_COMPILER) + set(HIP_PLATFORM "amd") + + find_package(hip REQUIRED) + if(NOT AMDGPU_TARGETS) + list(FILTER AMDGPU_TARGETS INCLUDE REGEX "^gfx(900|94[012]|101[02]|1030|110[012])$") + endif() + + if(AMDGPU_TARGETS) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-hip) + set(OLLAMA_HIP_INSTALL_DIR ${OLLAMA_INSTALL_DIR}/rocm) + install(TARGETS ggml-hip + RUNTIME_DEPENDENCIES + DIRECTORIES ${HIP_BIN_INSTALL_DIR} ${HIP_LIB_INSTALL_DIR} + PRE_INCLUDE_REGEXES amdhip64 hipblas rocblas amd_comgr hsa_runtime64 rocprofiler-register drm_amdgpu drm numa + PRE_EXCLUDE_REGEXES ".*" + POST_EXCLUDE_REGEXES "system32" + RUNTIME DESTINATION ${OLLAMA_HIP_INSTALL_DIR} COMPONENT HIP + LIBRARY DESTINATION ${OLLAMA_HIP_INSTALL_DIR} COMPONENT HIP + ) + + foreach(HIP_LIB_BIN_INSTALL_DIR IN ITEMS ${HIP_BIN_INSTALL_DIR} ${HIP_LIB_INSTALL_DIR}) + if(EXISTS ${HIP_LIB_BIN_INSTALL_DIR}/rocblas) + install(DIRECTORY ${HIP_LIB_BIN_INSTALL_DIR}/rocblas DESTINATION ${OLLAMA_HIP_INSTALL_DIR} COMPONENT HIP) + break() + endif() + endforeach() + endif() +endif() diff --git a/CMakePresets.json b/CMakePresets.json new file mode 100644 index 00000000..e6d3f6e7 --- /dev/null +++ b/CMakePresets.json @@ -0,0 +1,110 @@ +{ + "version": 3, + "configurePresets": [ + { + "name": "Default", + "binaryDir": "${sourceDir}/build", + "installDir": "${sourceDir}/dist", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Release" + } + }, + { + "name": "CPU", + "inherits": [ "Default" ] + }, + { + "name": "CUDA", + "inherits": [ "Default" ] + }, + { + "name": "CUDA 11", + "inherits": [ "CUDA" ], + "cacheVariables": { + "CMAKE_CUDA_ARCHITECTURES": "50;52;53;60;61;62;70;72;75;80;86" + } + }, + { + "name": "CUDA 12", + "inherits": [ "CUDA" ], + "cacheVariables": { + "CMAKE_CUDA_ARCHITECTURES": "60;61;62;70;72;75;80;86;87;89;90;90a" + } + }, + { + "name": "JetPack 5", + "inherits": [ "CUDA" ], + "cacheVariables": { + "CMAKE_CUDA_ARCHITECTURES": "72;87" + } + }, + { + "name": "JetPack 6", + "inherits": [ "CUDA" ], + "cacheVariables": { + "CMAKE_CUDA_ARCHITECTURES": "87" + } + }, + { + "name": "ROCm", + "inherits": [ "Default" ], + "cacheVariables": { + "CMAKE_HIP_PLATFORM": "amd" + } + }, + { + "name": "ROCm 6", + "inherits": [ "ROCm" ], + "cacheVariables": { + "AMDGPU_TARGETS": "gfx900;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102" + } + } + ], + "buildPresets": [ + { + "name": "Default", + "configurePreset": "Default", + "configuration": "Release" + }, + { + "name": "CPU", + "configurePreset": "Default", + "targets": [ "ggml-cpu" ] + }, + { + "name": "CUDA", + "configurePreset": "CUDA", + "targets": [ "ggml-cuda" ] + }, + { + "name": "CUDA 11", + "inherits": [ "CUDA" ], + "configurePreset": "CUDA 11" + }, + { + "name": "CUDA 12", + "inherits": [ "CUDA" ], + "configurePreset": "CUDA 12" + }, + { + "name": "JetPack 5", + "inherits": [ "CUDA" ], + "configurePreset": "JetPack 5" + }, + { + "name": "JetPack 6", + "inherits": [ "CUDA" ], + "configurePreset": "JetPack 6" + }, + { + "name": "ROCm", + "configurePreset": "ROCm", + "targets": [ "ggml-hip" ] + }, + { + "name": "ROCm 6", + "inherits": [ "ROCm" ], + "configurePreset": "ROCm 6" + } + ] +} diff --git a/Dockerfile b/Dockerfile index 47228df6..0a8cb99f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,201 +1,128 @@ -ARG GOLANG_VERSION=1.22.8 -ARG CUDA_VERSION_11=11.3.1 -ARG CUDA_VERSION_12=12.4.0 -ARG ROCM_VERSION=6.1.2 -ARG JETPACK_6=r36.2.0 -ARG JETPACK_5=r35.4.1 +# vim: filetype=dockerfile -### To create a local image for building linux binaries on mac or windows with efficient incremental builds -# -# docker build --platform linux/amd64 -t builder-amd64 -f Dockerfile --target unified-builder-amd64 . -# docker run --platform linux/amd64 --rm -it -v $(pwd):/go/src/github.com/ollama/ollama/ builder-amd64 -# -### Then incremental builds will be much faster in this container -# -# make -j 10 dist -# -FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCM_VERSION}-complete AS unified-builder-amd64 -ARG GOLANG_VERSION -ARG CUDA_VERSION_11 -ARG CUDA_VERSION_12 -COPY ./scripts/rh_linux_deps.sh / -ENV PATH /opt/rh/devtoolset-10/root/usr/bin:/usr/local/cuda/bin:$PATH -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64 -RUN GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh -RUN yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo && \ - dnf clean all && \ - dnf install -y \ - zsh \ - cuda-toolkit-$(echo ${CUDA_VERSION_11} | cut -f1-2 -d. | sed -e "s/\./-/g") \ - cuda-toolkit-$(echo ${CUDA_VERSION_12} | cut -f1-2 -d. | sed -e "s/\./-/g") -# TODO intel oneapi goes here... -ENV GOARCH amd64 -ENV CGO_ENABLED 1 -WORKDIR /go/src/github.com/ollama/ollama/ -ENTRYPOINT [ "zsh" ] +ARG FLAVOR=${TARGETARCH} -### To create a local image for building linux binaries on mac or linux/arm64 with efficient incremental builds -# Note: this does not contain jetson variants -# -# docker build --platform linux/arm64 -t builder-arm64 -f Dockerfile --target unified-builder-arm64 . -# docker run --platform linux/arm64 --rm -it -v $(pwd):/go/src/github.com/ollama/ollama/ builder-arm64 -# -FROM --platform=linux/arm64 rockylinux:8 AS unified-builder-arm64 -ARG GOLANG_VERSION -ARG CUDA_VERSION_11 -ARG CUDA_VERSION_12 -COPY ./scripts/rh_linux_deps.sh / -RUN GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh -RUN yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo && \ - dnf config-manager --set-enabled appstream && \ - dnf clean all && \ - dnf install -y \ - zsh \ - cuda-toolkit-$(echo ${CUDA_VERSION_11} | cut -f1-2 -d. | sed -e "s/\./-/g") \ - cuda-toolkit-$(echo ${CUDA_VERSION_12} | cut -f1-2 -d. | sed -e "s/\./-/g") -ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH:/usr/local/cuda/bin -ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64 -ENV LIBRARY_PATH=/usr/local/cuda/lib64/stubs:/opt/amdgpu/lib64 -ENV GOARCH arm64 -ENV CGO_ENABLED 1 -WORKDIR /go/src/github.com/ollama/ollama/ -ENTRYPOINT [ "zsh" ] +ARG ROCMVERSION=6.1.2 +ARG JETPACK5VERSION=r35.4.1 +ARG JETPACK6VERSION=r36.2.0 +ARG CMAKEVERSION=3.31.2 -FROM --platform=linux/amd64 unified-builder-amd64 AS build-amd64 -COPY . . -ARG OLLAMA_SKIP_CUDA_GENERATE -ARG OLLAMA_SKIP_ROCM_GENERATE -ARG OLLAMA_FAST_BUILD -ARG VERSION -ARG CUSTOM_CPU_FLAGS +FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCMVERSION}-complete AS base-amd64 +RUN sed -i -e 's/mirror.centos.org/vault.centos.org/g' -e 's/^#.*baseurl=http/baseurl=http/g' -e 's/^mirrorlist=http/#mirrorlist=http/g' /etc/yum.repos.d/*.repo \ + && yum install -y yum-utils devtoolset-10-gcc devtoolset-10-gcc-c++ \ + && yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo \ + && curl -s -L https://github.com/ccache/ccache/releases/download/v4.10.2/ccache-4.10.2-linux-x86_64.tar.xz | tar -Jx -C /usr/local/bin --strip-components 1 +ENV PATH=/opt/rh/devtoolset-10/root/usr/bin:/opt/rh/devtoolset-11/root/usr/bin:$PATH + +FROM --platform=linux/arm64 rockylinux:8 AS base-arm64 +# install epel-release for ccache +RUN yum install -y yum-utils epel-release \ + && yum install -y clang ccache \ + && yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/sbsa/cuda-rhel8.repo +ENV CC=clang CXX=clang++ + +FROM base-${TARGETARCH} AS base +ARG CMAKEVERSION +RUN curl -fsSL https://github.com/Kitware/CMake/releases/download/v${CMAKEVERSION}/cmake-${CMAKEVERSION}-linux-$(uname -m).tar.gz | tar xz -C /usr/local --strip-components 1 +COPY CMakeLists.txt CMakePresets.json . +COPY ml/backend/ggml/ggml ml/backend/ggml/ggml +ENV LDFLAGS=-s + +FROM base AS cpu +# amd64 uses gcc which requires devtoolset-11 for AVX extensions while arm64 uses clang +RUN if [ "$(uname -m)" = "x86_64" ]; then yum install -y devtoolset-11-gcc devtoolset-11-gcc-c++; fi +ENV PATH=/opt/rh/devtoolset-11/root/usr/bin:$PATH RUN --mount=type=cache,target=/root/.ccache \ - if grep "^flags" /proc/cpuinfo|grep avx>/dev/null; then \ - make -j $(nproc) dist ; \ - else \ - make -j 5 dist ; \ - fi -RUN cd dist/linux-$GOARCH && \ - tar -cf - . | pigz --best > ../ollama-linux-$GOARCH.tgz -RUN if [ -z ${OLLAMA_SKIP_ROCM_GENERATE} ] ; then \ - cd dist/linux-$GOARCH-rocm && \ - tar -cf - . | pigz --best > ../ollama-linux-$GOARCH-rocm.tgz ;\ - fi + cmake --preset 'CPU' \ + && cmake --build --parallel --preset 'CPU' \ + && cmake --install build --component CPU --strip --parallel 8 -# Jetsons need to be built in discrete stages -FROM --platform=linux/arm64 nvcr.io/nvidia/l4t-jetpack:${JETPACK_5} AS runners-jetpack5-arm64 -ARG GOLANG_VERSION -RUN apt-get update && apt-get install -y git curl ccache && \ - curl -s -L https://dl.google.com/go/go${GOLANG_VERSION}.linux-arm64.tar.gz | tar xz -C /usr/local && \ - ln -s /usr/local/go/bin/go /usr/local/bin/go && \ - ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt && \ - apt-get clean && rm -rf /var/lib/apt/lists/* -WORKDIR /go/src/github.com/ollama/ollama/ -COPY . . -ARG CGO_CFLAGS -ENV GOARCH arm64 -ARG VERSION +FROM base AS cuda-11 +ARG CUDA11VERSION=11.3 +RUN yum install -y cuda-toolkit-${CUDA11VERSION//./-} +ENV PATH=/usr/local/cuda-11/bin:$PATH RUN --mount=type=cache,target=/root/.ccache \ - make -j 5 dist_cuda_v11 \ - CUDA_ARCHITECTURES="72;87" \ - GPU_RUNNER_VARIANT=_jetpack5 \ - DIST_LIB_DIR=/go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack5/lib/ollama \ - DIST_GPU_RUNNER_DEPS_DIR=/go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack5/lib/ollama/cuda_jetpack5 + cmake --preset 'CUDA 11' \ + && cmake --build --parallel --preset 'CUDA 11' \ + && cmake --install build --component CUDA --strip --parallel 8 -FROM --platform=linux/arm64 nvcr.io/nvidia/l4t-jetpack:${JETPACK_6} AS runners-jetpack6-arm64 -ARG GOLANG_VERSION -RUN apt-get update && apt-get install -y git curl ccache && \ - curl -s -L https://dl.google.com/go/go${GOLANG_VERSION}.linux-arm64.tar.gz | tar xz -C /usr/local && \ - ln -s /usr/local/go/bin/go /usr/local/bin/go && \ - ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt && \ - apt-get clean && rm -rf /var/lib/apt/lists/* -WORKDIR /go/src/github.com/ollama/ollama/ -COPY . . -ARG CGO_CFLAGS -ENV GOARCH arm64 -ARG VERSION +FROM base AS cuda-12 +ARG CUDA12VERSION=12.4 +RUN yum install -y cuda-toolkit-${CUDA12VERSION//./-} +ENV PATH=/usr/local/cuda-12/bin:$PATH RUN --mount=type=cache,target=/root/.ccache \ - make -j 5 dist_cuda_v12 \ - CUDA_ARCHITECTURES="87" \ - GPU_RUNNER_VARIANT=_jetpack6 \ - DIST_LIB_DIR=/go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack6/lib/ollama \ - DIST_GPU_RUNNER_DEPS_DIR=/go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack6/lib/ollama/cuda_jetpack6 + cmake --preset 'CUDA 12' \ + && cmake --build --parallel --preset 'CUDA 12' \ + && cmake --install build --component CUDA --strip --parallel 8 -FROM --platform=linux/arm64 unified-builder-arm64 AS build-arm64 -COPY . . -ARG OLLAMA_SKIP_CUDA_GENERATE -ARG OLLAMA_FAST_BUILD -ARG VERSION +FROM base AS rocm-6 RUN --mount=type=cache,target=/root/.ccache \ - make -j 5 dist -COPY --from=runners-jetpack5-arm64 /go/src/github.com/ollama/ollama/dist/ dist/ -COPY --from=runners-jetpack6-arm64 /go/src/github.com/ollama/ollama/dist/ dist/ -RUN cd dist/linux-$GOARCH && \ - tar -cf - . | pigz --best > ../ollama-linux-$GOARCH.tgz -RUN cd dist/linux-$GOARCH-jetpack5 && \ - tar -cf - . | pigz --best > ../ollama-linux-$GOARCH-jetpack5.tgz -RUN cd dist/linux-$GOARCH-jetpack6 && \ - tar -cf - . | pigz --best > ../ollama-linux-$GOARCH-jetpack6.tgz + cmake --preset 'ROCm 6' \ + && cmake --build --parallel --preset 'ROCm 6' \ + && cmake --install build --component HIP --strip --parallel 8 -FROM --platform=linux/amd64 scratch AS dist-amd64 -COPY --from=build-amd64 /go/src/github.com/ollama/ollama/dist/ollama-linux-*.tgz / -FROM --platform=linux/arm64 scratch AS dist-arm64 -COPY --from=build-arm64 /go/src/github.com/ollama/ollama/dist/ollama-linux-*.tgz / -FROM dist-$TARGETARCH AS dist +FROM --platform=linux/arm64 nvcr.io/nvidia/l4t-jetpack:${JETPACK5VERSION} AS jetpack-5 +ARG CMAKEVERSION +RUN apt-get update && apt-get install -y curl ccache \ + && curl -fsSL https://github.com/Kitware/CMake/releases/download/v${CMAKEVERSION}/cmake-${CMAKEVERSION}-linux-$(uname -m).tar.gz | tar xz -C /usr/local --strip-components 1 +COPY CMakeLists.txt CMakePresets.json . +COPY ml/backend/ggml/ggml ml/backend/ggml/ggml +RUN --mount=type=cache,target=/root/.ccache \ + cmake --preset 'JetPack 5' \ + && cmake --build --parallel --preset 'JetPack 5' \ + && cmake --install build --component CUDA --strip --parallel 8 +FROM --platform=linux/arm64 nvcr.io/nvidia/l4t-jetpack:${JETPACK6VERSION} AS jetpack-6 +ARG CMAKEVERSION +RUN apt-get update && apt-get install -y curl ccache \ + && curl -fsSL https://github.com/Kitware/CMake/releases/download/v${CMAKEVERSION}/cmake-${CMAKEVERSION}-linux-$(uname -m).tar.gz | tar xz -C /usr/local --strip-components 1 +COPY CMakeLists.txt CMakePresets.json . +COPY ml/backend/ggml/ggml ml/backend/ggml/ggml +RUN --mount=type=cache,target=/root/.ccache \ + cmake --preset 'JetPack 6' \ + && cmake --build --parallel --preset 'JetPack 6' \ + && cmake --install build --component CUDA --strip --parallel 8 -# For amd64 container images, filter out cuda/rocm to minimize size -FROM build-amd64 AS runners-cuda-amd64 -RUN rm -rf \ - ./dist/linux-amd64/lib/ollama/libggml_hipblas.so \ - ./dist/linux-amd64/lib/ollama/runners/rocm* +FROM base AS build +ARG GOVERSION=1.23.4 +RUN curl -fsSL https://golang.org/dl/go${GOVERSION}.linux-$(case $(uname -m) in x86_64) echo amd64 ;; aarch64) echo arm64 ;; esac).tar.gz | tar xz -C /usr/local +ENV PATH=/usr/local/go/bin:$PATH +WORKDIR /go/src/github.com/ollama/ollama +COPY . . +ARG GOFLAGS="'-ldflags=-w -s'" +ENV CGO_ENABLED=1 +RUN --mount=type=cache,target=/root/.cache/go-build \ + go build -trimpath -buildmode=pie -o /bin/ollama . -FROM build-amd64 AS runners-rocm-amd64 -RUN rm -rf \ - ./dist/linux-amd64/lib/ollama/libggml_cuda*.so \ - ./dist/linux-amd64/lib/ollama/libcu*.so* \ - ./dist/linux-amd64/lib/ollama/runners/cuda* +FROM --platform=linux/amd64 scratch AS amd64 +COPY --from=cuda-11 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_v11 +COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12 -FROM --platform=linux/amd64 ubuntu:22.04 AS runtime-amd64 -RUN apt-get update && \ - apt-get install -y ca-certificates && \ - apt-get clean && rm -rf /var/lib/apt/lists/* -COPY --from=build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/bin/ /bin/ -COPY --from=runners-cuda-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ +FROM --platform=linux/arm64 scratch AS arm64 +COPY --from=cuda-11 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_v11 +COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12 +COPY --from=jetpack-5 dist/lib/ollama/cuda_v11 lib/ollama/cuda_jetpack5 +COPY --from=jetpack-6 dist/lib/ollama/cuda_v12 lib/ollama/cuda_jetpack6 -FROM --platform=linux/arm64 ubuntu:22.04 AS runtime-arm64 -RUN apt-get update && \ - apt-get install -y ca-certificates && \ - apt-get clean && rm -rf /var/lib/apt/lists/* -COPY --from=build-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/bin/ /bin/ -COPY --from=build-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64/lib/ /lib/ -COPY --from=runners-jetpack5-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack5/lib/ /lib/ -COPY --from=runners-jetpack6-arm64 /go/src/github.com/ollama/ollama/dist/linux-arm64-jetpack6/lib/ /lib/ +FROM --platform=linux/arm64 scratch AS rocm +COPY --from=rocm-6 dist/lib/ollama/rocm /lib/ollama/rocm +FROM ${FLAVOR} AS archive +COPY --from=cpu dist/lib/ollama /lib/ollama +COPY --from=build /bin/ollama /bin/ollama -# ROCm libraries larger so we keep it distinct from the CPU/CUDA image -FROM --platform=linux/amd64 ubuntu:22.04 AS runtime-rocm -# Frontload the rocm libraries which are large, and rarely change to increase chance of a common layer -# across releases -COPY --from=build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64-rocm/lib/ /lib/ -RUN apt-get update && \ - apt-get install -y ca-certificates && \ - apt-get clean && rm -rf /var/lib/apt/lists/* -COPY --from=build-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/bin/ /bin/ -COPY --from=runners-rocm-amd64 /go/src/github.com/ollama/ollama/dist/linux-amd64/lib/ /lib/ - -EXPOSE 11434 -ENV OLLAMA_HOST 0.0.0.0 - -ENTRYPOINT ["/bin/ollama"] -CMD ["serve"] - -FROM runtime-$TARGETARCH -EXPOSE 11434 -ENV OLLAMA_HOST 0.0.0.0 +FROM ubuntu:20.04 +RUN apt-get update \ + && apt-get install -y ca-certificates \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* +COPY --from=archive /bin /usr/bin ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +COPY --from=archive /lib/ollama /usr/lib/ollama ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64 ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility ENV NVIDIA_VISIBLE_DEVICES=all - +ENV OLLAMA_HOST=0.0.0.0:11434 +EXPOSE 11434 ENTRYPOINT ["/bin/ollama"] CMD ["serve"] diff --git a/Makefile b/Makefile deleted file mode 100644 index 383354ee..00000000 --- a/Makefile +++ /dev/null @@ -1,103 +0,0 @@ -# top level makefile for Ollama -include make/common-defs.make - - -# Determine which if any GPU runners we should build -include make/cuda-v11-defs.make -include make/cuda-v12-defs.make -include make/rocm-defs.make - -ifeq ($(CUSTOM_CPU_FLAGS),) -ifeq ($(ARCH),amd64) - RUNNER_TARGETS=cpu -endif -# Without CUSTOM_CPU_FLAGS we default to build both v11 and v12 if present -ifeq ($(OLLAMA_SKIP_CUDA_GENERATE),) -ifneq ($(CUDA_11_COMPILER),) - RUNNER_TARGETS += cuda_v11 -endif -ifneq ($(CUDA_12_COMPILER),) - RUNNER_TARGETS += cuda_v12 -endif -endif -else # CUSTOM_CPU_FLAGS is set, we'll build only the latest cuda version detected -ifneq ($(CUDA_12_COMPILER),) - RUNNER_TARGETS += cuda_v12 -else ifneq ($(CUDA_11_COMPILER),) - RUNNER_TARGETS += cuda_v11 -endif -endif - -ifeq ($(OLLAMA_SKIP_ROCM_GENERATE),) -ifneq ($(HIP_COMPILER),) - RUNNER_TARGETS += rocm -endif -endif - - -all: runners exe - -dist: $(addprefix dist_, $(RUNNER_TARGETS)) dist_exe - -dist_%: - @$(MAKE) --no-print-directory -f make/Makefile.$* dist - -runners: $(RUNNER_TARGETS) - -$(RUNNER_TARGETS): - @$(MAKE) --no-print-directory -f make/Makefile.$@ - -exe dist_exe: - @$(MAKE) --no-print-directory -f make/Makefile.ollama $@ - -help-sync apply-patches create-patches sync sync-clean: - @$(MAKE) --no-print-directory -f make/Makefile.sync $@ - -test integration lint: - @$(MAKE) --no-print-directory -f make/Makefile.test $@ - -clean: - rm -rf $(BUILD_DIR) $(DIST_LIB_DIR) $(OLLAMA_EXE) $(DIST_OLLAMA_EXE) - go clean -cache - -help: - @echo "The following make targets will help you build Ollama" - @echo "" - @echo " make all # (default target) Build Ollama llm subprocess runners, and the primary ollama executable" - @echo " make runners # Build Ollama llm subprocess runners; after you may use 'go build .' to build the primary ollama exectuable" - @echo " make # Build specific runners. Enabled: '$(RUNNER_TARGETS)'" - @echo " make dist # Build the runners and primary ollama executable for distribution" - @echo " make help-sync # Help information on vendor update targets" - @echo " make help-runners # Help information on runner targets" - @echo "" - @echo "The following make targets will help you test Ollama" - @echo "" - @echo " make test # Run unit tests" - @echo " make integration # Run integration tests. You must 'make all' first" - @echo " make lint # Run lint and style tests" - @echo "" - @echo "For more information see 'docs/development.md'" - @echo "" - - -help-runners: - @echo "The following runners will be built based on discovered GPU libraries: '$(RUNNER_TARGETS)'" - @echo "" - @echo "GPU Runner CPU Flags: '$(GPU_RUNNER_CPU_FLAGS)' (Override with CUSTOM_CPU_FLAGS)" - @echo "" - @echo "# CUDA_PATH sets the location where CUDA toolkits are present" - @echo "CUDA_PATH=$(CUDA_PATH)" - @echo " CUDA_11_PATH=$(CUDA_11_PATH)" - @echo " CUDA_11_COMPILER=$(CUDA_11_COMPILER)" - @echo " CUDA_12_PATH=$(CUDA_12_PATH)" - @echo " CUDA_12_COMPILER=$(CUDA_12_COMPILER)" - @echo "" - @echo "# HIP_PATH sets the location where the ROCm toolkit is present" - @echo "HIP_PATH=$(HIP_PATH)" - @echo " HIP_COMPILER=$(HIP_COMPILER)" - -.PHONY: all exe dist help help-sync help-runners test integration lint runners clean $(RUNNER_TARGETS) - -# Handy debugging for make variables -print-%: - @echo '$*=$($*)' diff --git a/Makefile.sync b/Makefile.sync new file mode 100644 index 00000000..3001487d --- /dev/null +++ b/Makefile.sync @@ -0,0 +1,56 @@ +UPSTREAM=https://github.com/ggerganov/llama.cpp.git +WORKDIR=llama/vendor +FETCH_HEAD=46e3556e01b824e52395fb050b29804b6cff2a7c + +.PHONY: help +help: + @echo "Available targets:" + @echo " sync Sync with upstream repositories" + @echo " checkout Checkout upstream repository" + @echo " apply-patches Apply patches to local repository" + @echo " format-patches Format patches from local repository" + @echo " clean Clean local repository" + @echo + @echo "Example:" + @echo " make -f $(lastword $(MAKEFILE_LIST)) clean sync" + +.PHONY: sync +sync: llama/llama.cpp ml/backend/ggml/ggml apply-patches + +.PHONY: llama/llama.cpp +llama/llama.cpp: llama/vendor/ apply-patches + rsync -arvzc -f "merge $@/.rsync-filter" $< $@ + +.PHONY: ml/backend/ggml/ggml apply-patches +ml/backend/ggml/ggml: llama/vendor/ggml/ apply-patches + rsync -arvzc -f "merge $@/.rsync-filter" $< $@ + +PATCHES=$(wildcard llama/patches/*.patch) + +.PHONY: apply-patches +.NOTPARALLEL: +apply-patches: $(addsuffix ed, $(PATCHES)) + +%.patched: %.patch + @if git -c user.name=nobody -c 'user.email=<>' -C $(WORKDIR) am -3 $(realpath $<); then touch $@; else git -C $(WORKDIR) am --abort; exit 1; fi + +.PHONY: checkout +checkout: $(WORKDIR) + git -C $(WORKDIR) fetch + git -C $(WORKDIR) checkout -f $(FETCH_HEAD) + +$(WORKDIR): + git clone $(UPSTREAM) $(WORKDIR) + +.PHONE: format-patches +format-patches: llama/patches + git -C $(WORKDIR) format-patch \ + --no-signature \ + --no-numbered \ + --zero-commit \ + -o $(realpath $<) \ + $(FETCH_HEAD) + +.PHONE: clean +clean: checkout + $(RM) $(addsuffix ed, $(PATCHES)) diff --git a/discover/amd_common.go b/discover/amd_common.go index 3c630861..08834b22 100644 --- a/discover/amd_common.go +++ b/discover/amd_common.go @@ -9,8 +9,6 @@ import ( "path/filepath" "runtime" "strings" - - "github.com/ollama/ollama/envconfig" ) // Determine if the given ROCm lib directory is usable by checking for existence of some glob patterns @@ -41,13 +39,10 @@ func commonAMDValidateLibDir() (string, error) { // Favor our bundled version // Installer payload location if we're running the installed binary - exe, err := os.Executable() - if err == nil { - rocmTargetDir := filepath.Join(filepath.Dir(exe), envconfig.LibRelativeToExe(), "lib", "ollama") - if rocmLibUsable(rocmTargetDir) { - slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir) - return rocmTargetDir, nil - } + rocmTargetDir := filepath.Join(LibOllamaPath, "rocm") + if rocmLibUsable(rocmTargetDir) { + slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir) + return rocmTargetDir, nil } // Prefer explicit HIP env var diff --git a/discover/amd_linux.go b/discover/amd_linux.go index ecf91056..830fa1df 100644 --- a/discover/amd_linux.go +++ b/discover/amd_linux.go @@ -77,8 +77,7 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) { gfxOverride := envconfig.HsaOverrideGfxVersion() var supported []string - depPaths := LibraryDirs() - libDir := "" + var libDir string // The amdgpu driver always exposes the host CPU(s) first, but we have to skip them and subtract // from the other IDs to get alignment with the HIP libraries expectations (zero is the first GPU, not the CPU) @@ -353,9 +352,8 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) { }) return nil, err } - depPaths = append(depPaths, libDir) } - gpuInfo.DependencyPath = depPaths + gpuInfo.DependencyPath = []string{libDir} if gfxOverride == "" { // Only load supported list once diff --git a/discover/amd_windows.go b/discover/amd_windows.go index 9477bedc..0659d12f 100644 --- a/discover/amd_windows.go +++ b/discover/amd_windows.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "log/slog" - "os" "path/filepath" "slices" "strconv" @@ -50,14 +49,13 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) { slog.Info(err.Error()) return nil, err } - depPaths := LibraryDirs() + libDir, err := AMDValidateLibDir() if err != nil { err = fmt.Errorf("unable to verify rocm library: %w", err) slog.Warn(err.Error()) return nil, err } - depPaths = append(depPaths, libDir) var supported []string gfxOverride := envconfig.HsaOverrideGfxVersion() @@ -113,7 +111,7 @@ func AMDGetGPUInfo() ([]RocmGPUInfo, error) { UnreliableFreeMemory: true, ID: strconv.Itoa(i), // TODO this is probably wrong if we specify visible devices - DependencyPath: depPaths, + DependencyPath: []string{libDir}, MinimumMemory: rocmMinimumMemory, Name: name, Compute: gfx, @@ -164,9 +162,7 @@ func AMDValidateLibDir() (string, error) { } // Installer payload (if we're running from some other location) - localAppData := os.Getenv("LOCALAPPDATA") - appDir := filepath.Join(localAppData, "Programs", "Ollama") - rocmTargetDir := filepath.Join(appDir, envconfig.LibRelativeToExe(), "lib", "ollama") + rocmTargetDir := filepath.Join(LibOllamaPath, "rocm") if rocmLibUsable(rocmTargetDir) { slog.Debug("detected ollama installed ROCm at " + rocmTargetDir) return rocmTargetDir, nil diff --git a/discover/gpu.go b/discover/gpu.go index 2e83b74f..ba906a18 100644 --- a/discover/gpu.go +++ b/discover/gpu.go @@ -23,7 +23,6 @@ import ( "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/format" - "github.com/ollama/ollama/runners" ) type cudaHandles struct { @@ -101,15 +100,7 @@ func initCudaHandles() *cudaHandles { // Aligned with driver, we can't carry as payloads nvcudaMgmtPatterns := NvcudaGlobs - - if runtime.GOOS == "windows" { - localAppData := os.Getenv("LOCALAPPDATA") - cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", CudartMgmtName)} - } - libDirs := LibraryDirs() - for _, d := range libDirs { - cudartMgmtPatterns = append(cudartMgmtPatterns, filepath.Join(d, CudartMgmtName)) - } + cudartMgmtPatterns = append(cudartMgmtPatterns, filepath.Join(LibOllamaPath, "cuda_v*", CudartMgmtName)) cudartMgmtPatterns = append(cudartMgmtPatterns, CudartGlobs...) if len(NvmlGlobs) > 0 { @@ -240,7 +231,7 @@ func GetGPUInfo() GpuInfoList { if err != nil { slog.Warn("error looking up system memory", "error", err) } - depPaths := LibraryDirs() + details, err := GetCPUDetails() if err != nil { slog.Warn("failed to lookup CPU details", "error", err) @@ -248,11 +239,9 @@ func GetGPUInfo() GpuInfoList { cpus = []CPUInfo{ { GpuInfo: GpuInfo{ - memInfo: mem, - Library: "cpu", - Variant: runners.GetCPUCapability().String(), - ID: "0", - DependencyPath: depPaths, + memInfo: mem, + Library: "cpu", + ID: "0", }, CPUs: details, }, @@ -294,17 +283,13 @@ func GetGPUInfo() GpuInfoList { gpuInfo.DriverMajor = driverMajor gpuInfo.DriverMinor = driverMinor variant := cudaVariant(gpuInfo) - if depPaths != nil { - gpuInfo.DependencyPath = depPaths - // Check for variant specific directory - if variant != "" { - for _, d := range depPaths { - if _, err := os.Stat(filepath.Join(d, "cuda_"+variant)); err == nil { - // Put the variant directory first in the search path to avoid runtime linking to the wrong library - gpuInfo.DependencyPath = append([]string{filepath.Join(d, "cuda_"+variant)}, gpuInfo.DependencyPath...) - break - } - } + + // Start with our bundled libraries + if variant != "" { + variantPath := filepath.Join(LibOllamaPath, "cuda_"+variant) + if _, err := os.Stat(variantPath); err == nil { + // Put the variant directory first in the search path to avoid runtime linking to the wrong library + gpuInfo.DependencyPath = append([]string{variantPath}, gpuInfo.DependencyPath...) } } gpuInfo.Name = C.GoString(&memInfo.gpu_name[0]) @@ -376,7 +361,7 @@ func GetGPUInfo() GpuInfoList { gpuInfo.FreeMemory = uint64(memInfo.free) gpuInfo.ID = C.GoString(&memInfo.gpu_id[0]) gpuInfo.Name = C.GoString(&memInfo.gpu_name[0]) - gpuInfo.DependencyPath = depPaths + gpuInfo.DependencyPath = []string{LibOllamaPath} oneapiGPUs = append(oneapiGPUs, gpuInfo) } } @@ -512,33 +497,30 @@ func GetGPUInfo() GpuInfoList { func FindGPULibs(baseLibName string, defaultPatterns []string) []string { // Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them - var ldPaths []string gpuLibPaths := []string{} slog.Debug("Searching for GPU library", "name", baseLibName) - // Start with our bundled libraries - patterns := []string{} - for _, d := range LibraryDirs() { - patterns = append(patterns, filepath.Join(d, baseLibName)) - } + // search our bundled libraries first + patterns := []string{filepath.Join(LibOllamaPath, baseLibName)} + var ldPaths []string switch runtime.GOOS { case "windows": - ldPaths = strings.Split(os.Getenv("PATH"), ";") + ldPaths = strings.Split(os.Getenv("PATH"), string(os.PathListSeparator)) case "linux": - ldPaths = strings.Split(os.Getenv("LD_LIBRARY_PATH"), ":") - default: - return gpuLibPaths + ldPaths = strings.Split(os.Getenv("LD_LIBRARY_PATH"), string(os.PathListSeparator)) } - // Then with whatever we find in the PATH/LD_LIBRARY_PATH - for _, ldPath := range ldPaths { - d, err := filepath.Abs(ldPath) + // then search the system's LD_LIBRARY_PATH + for _, p := range ldPaths { + p, err := filepath.Abs(p) if err != nil { continue } - patterns = append(patterns, filepath.Join(d, baseLibName)) + patterns = append(patterns, filepath.Join(p, baseLibName)) } + + // finally, search the default patterns provided by the caller patterns = append(patterns, defaultPatterns...) slog.Debug("gpu library search", "globs", patterns) for _, pattern := range patterns { @@ -715,28 +697,6 @@ func (l GpuInfoList) GetVisibleDevicesEnv() (string, string) { } } -func LibraryDirs() []string { - // dependencies can exist wherever we found the runners (e.g. build tree for developers) and relative to the executable - // This can be simplified once we no longer carry runners as payloads - paths := []string{} - appExe, err := os.Executable() - if err != nil { - slog.Warn("failed to lookup executable path", "error", err) - } else { - appRelative := filepath.Join(filepath.Dir(appExe), envconfig.LibRelativeToExe(), "lib", "ollama") - if _, err := os.Stat(appRelative); err == nil { - paths = append(paths, appRelative) - } - } - rDir := runners.Locate() - if err != nil { - slog.Warn("unable to locate gpu dependency libraries", "error", err) - } else { - paths = append(paths, filepath.Dir(rDir)) - } - return paths -} - func GetSystemInfo() SystemInfo { gpus := GetGPUInfo() gpuMutex.Lock() diff --git a/discover/gpu_darwin.go b/discover/gpu_darwin.go index 15f8f799..dd5bf6e2 100644 --- a/discover/gpu_darwin.go +++ b/discover/gpu_darwin.go @@ -15,7 +15,6 @@ import ( "syscall" "github.com/ollama/ollama/format" - "github.com/ollama/ollama/runners" ) const ( @@ -28,7 +27,6 @@ func GetGPUInfo() GpuInfoList { return []GpuInfo{ { Library: "cpu", - Variant: runners.GetCPUCapability().String(), memInfo: mem, }, } @@ -51,7 +49,6 @@ func GetCPUInfo() GpuInfoList { return []GpuInfo{ { Library: "cpu", - Variant: runners.GetCPUCapability().String(), memInfo: mem, }, } diff --git a/discover/path.go b/discover/path.go new file mode 100644 index 00000000..a9a6518d --- /dev/null +++ b/discover/path.go @@ -0,0 +1,53 @@ +package discover + +import ( + "os" + "path/filepath" + "runtime" +) + +// LibPath is a path to lookup dynamic libraries +// in development it's usually 'build/lib/ollama' +// in distribution builds it's 'lib/ollama' on Windows +// '../lib/ollama' on Linux and the executable's directory on macOS +// note: distribution builds, additional GPU-specific libraries are +// found in subdirectories of the returned path, such as +// 'cuda_v11', 'cuda_v12', 'rocm', etc. +var LibOllamaPath string = func() string { + exe, err := os.Executable() + if err != nil { + return "" + } + + exe, err = filepath.EvalSymlinks(exe) + if err != nil { + return "" + } + + libPath := filepath.Dir(exe) + switch runtime.GOOS { + case "windows": + libPath = filepath.Join(filepath.Dir(exe), "lib", "ollama") + case "linux": + libPath = filepath.Join(filepath.Dir(exe), "..", "lib", "ollama") + } + + cwd, err := os.Getwd() + if err != nil { + return "" + } + + // build paths for development + buildPaths := []string{ + filepath.Join(filepath.Dir(exe), "build", "lib", "ollama"), + filepath.Join(cwd, "build", "lib", "ollama"), + } + + for _, p := range buildPaths { + if _, err := os.Stat(p); err == nil { + return p + } + } + + return libPath +}() diff --git a/discover/types.go b/discover/types.go index 4568e3b8..c5212d94 100644 --- a/discover/types.go +++ b/discover/types.go @@ -5,7 +5,6 @@ import ( "log/slog" "github.com/ollama/ollama/format" - "github.com/ollama/ollama/runners" ) type memInfo struct { @@ -107,7 +106,7 @@ func (l GpuInfoList) ByLibrary() []GpuInfoList { for _, info := range l { found := false requested := info.Library - if info.Variant != runners.CPUCapabilityNone.String() { + if info.Variant != "" { requested += "_" + info.Variant } for i, lib := range libs { diff --git a/docs/development.md b/docs/development.md index e194dca0..3e2ed49b 100644 --- a/docs/development.md +++ b/docs/development.md @@ -1,165 +1,120 @@ # Development -Install required tools: +Install prerequisites: -- go version 1.22 or higher -- OS specific C/C++ compiler (see below) -- GNU Make +- [Go](https://go.dev/doc/install) +- C/C++ Compiler e.g. Clang on macOS, [TDM-GCC](https://jmeubank.github.io/tdm-gcc/download/) (Windows amd64) or [llvm-mingw](https://github.com/mstorsjo/llvm-mingw) (Windows arm64), GCC/Clang on Linux. - -## Overview - -Ollama uses a mix of Go and C/C++ code to interface with GPUs. The C/C++ code is compiled with both CGO and GPU library specific compilers. A set of GNU Makefiles are used to compile the project. GPU Libraries are auto-detected based on the typical environment variables used by the respective libraries, but can be overridden if necessary. The default make target will build the runners and primary Go Ollama application that will run within the repo directory. Throughout the examples below `-j 5` is suggested for 5 parallel jobs to speed up the build. You can adjust the job count based on your CPU Core count to reduce build times. If you want to relocate the built binaries, use the `dist` target and recursively copy the files in `./dist/$OS-$ARCH/` to your desired location. To learn more about the other make targets use `make help` - -Once you have built the GPU/CPU runners, you can compile the main application with `go build .` - -### MacOS - -[Download Go](https://go.dev/dl/) - -```bash -make -j 5 -``` - -Now you can run `ollama`: - -```bash -./ollama -``` - -#### Xcode 15 warnings - -If you are using Xcode newer than version 14, you may see a warning during `go build` about `ld: warning: ignoring duplicate libraries: '-lobjc'` due to Golang issue https://github.com/golang/go/issues/67799 which can be safely ignored. You can suppress the warning with `export CGO_LDFLAGS="-Wl,-no_warn_duplicate_libraries"` - -### Linux - -#### Linux CUDA (NVIDIA) - -_Your operating system distribution may already have packages for NVIDIA CUDA. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!_ - -Install `make`, `gcc` and `golang` as well as [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) -development and runtime packages. - -Typically the makefile will auto-detect CUDA, however, if your Linux distro -or installation approach uses alternative paths, you can specify the location by -overriding `CUDA_PATH` to the location of the CUDA toolkit. You can customize -a set of target CUDA architectures by setting `CUDA_ARCHITECTURES` (e.g. `CUDA_ARCHITECTURES=50;60;70`) +Then build and run Ollama from the root directory of the repository: ``` -make -j 5 +go run . serve ``` -If both v11 and v12 tookkits are detected, runners for both major versions will be built by default. You can build just v12 with `make cuda_v12` +## macOS (Apple Silicon) -#### Older Linux CUDA (NVIDIA) +macOS Apple Silicon supports Metal which is built-in to the Ollama binary. No additional steps are required. -To support older GPUs with Compute Capability 3.5 or 3.7, you will need to use an older version of the Driver from [Unix Driver Archive](https://www.nvidia.com/en-us/drivers/unix/) (tested with 470) and [CUDA Toolkit Archive](https://developer.nvidia.com/cuda-toolkit-archive) (tested with cuda V11). When you build Ollama, you will need to set two make variable to adjust the minimum compute capability Ollama supports via `make -j 5 CUDA_ARCHITECTURES="35;37;50;52" EXTRA_GOLDFLAGS="\"-X=github.com/ollama/ollama/discover.CudaComputeMajorMin=3\" \"-X=github.com/ollama/ollama/discover.CudaComputeMinorMin=5\""`. To find the Compute Capability of your older GPU, refer to [GPU Compute Capability](https://developer.nvidia.com/cuda-gpus). +## macOS (Intel) -#### Linux ROCm (AMD) +Install prerequisites: -_Your operating system distribution may already have packages for AMD ROCm. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!_ +- [CMake](https://cmake.org/download/) or `brew install cmake` -Install [ROCm](https://rocm.docs.amd.com/en/latest/) development packages first, as well as `make`, `gcc`, and `golang`. - -Typically the build scripts will auto-detect ROCm, however, if your Linux distro -or installation approach uses unusual paths, you can specify the location by -specifying an environment variable `HIP_PATH` to the location of the ROCm -install (typically `/opt/rocm`). You can also customize -the AMD GPU targets by setting HIP_ARCHS (e.g. `HIP_ARCHS=gfx1101;gfx1102`) +Then, configure and build the project: ``` -make -j 5 +cmake -B build +cmake --build build ``` -ROCm requires elevated privileges to access the GPU at runtime. On most distros you can add your user account to the `render` group, or run as root. - -#### Containerized Linux Build - -If you have Docker and buildx available, you can build linux binaries with `./scripts/build_linux.sh` which has the CUDA and ROCm dependencies included. The resulting artifacts are placed in `./dist` and by default the script builds both arm64 and amd64 binaries. If you want to build only amd64, you can build with `PLATFORM=linux/amd64 ./scripts/build_linux.sh` - -### Windows - -The following tools are required as a minimal development environment to build CPU inference support. - -- Go version 1.22 or higher - - https://go.dev/dl/ -- Git - - https://git-scm.com/download/win -- clang with gcc compat and Make. There are multiple options on how to go about installing these tools on Windows. We have verified the following, but others may work as well: - - [MSYS2](https://www.msys2.org/) - - After installing, from an MSYS2 terminal, run `pacman -S mingw-w64-clang-x86_64-gcc-compat mingw-w64-clang-x86_64-clang make` to install the required tools - - Assuming you used the default install prefix for msys2 above, add `C:\msys64\clang64\bin` and `c:\msys64\usr\bin` to your environment variable `PATH` where you will perform the build steps below (e.g. system-wide, account-level, powershell, cmd, etc.) - -> [!NOTE] -> Due to bugs in the GCC C++ library for unicode support, Ollama should be built with clang on windows. +Lastly, run Ollama: ``` -make -j 5 +go run . serve ``` -#### GPU Support +## Windows -The GPU tools require the Microsoft native build tools. To build either CUDA or ROCm, you must first install MSVC via Visual Studio: +Install prerequisites: -- Make sure to select `Desktop development with C++` as a Workload during the Visual Studio install -- You must complete the Visual Studio install and run it once **BEFORE** installing CUDA or ROCm for the tools to properly register -- Add the location of the **64 bit (x64)** compiler (`cl.exe`) to your `PATH` -- Note: the default Developer Shell may configure the 32 bit (x86) compiler which will lead to build failures. Ollama requires a 64 bit toolchain. +- [CMake](https://cmake.org/download/) +- [Visual Studio 2022](https://visualstudio.microsoft.com/downloads/) including the Native Desktop Workload +- (Optional) AMD GPU support + - [ROCm](https://rocm.github.io/install.html) + - [Ninja](https://github.com/ninja-build/ninja/releases) +- (Optional) NVIDIA GPU support + - [CUDA SDK](https://developer.nvidia.com/cuda-downloads?target_os=Windows&target_arch=x86_64&target_version=11&target_type=exe_network) -#### Windows CUDA (NVIDIA) +> [!IMPORTANT] +> Ensure prerequisites are in `PATH` before running CMake. -In addition to the common Windows development tools and MSVC described above: +> [!IMPORTANT] +> ROCm is not compatible with Visual Studio CMake generators. Use `-GNinja` when configuring the project. -- [NVIDIA CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html) +> [!IMPORTANT] +> CUDA is only compatible with Visual Studio CMake generators. -#### Windows ROCm (AMD Radeon) - -In addition to the common Windows development tools and MSVC described above: - -- [AMD HIP](https://www.amd.com/en/developer/resources/rocm-hub/hip-sdk.html) - -#### Windows arm64 - -The default `Developer PowerShell for VS 2022` may default to x86 which is not what you want. To ensure you get an arm64 development environment, start a plain PowerShell terminal and run: - -```powershell -import-module 'C:\\Program Files\\Microsoft Visual Studio\\2022\\Community\\Common7\\Tools\\Microsoft.VisualStudio.DevShell.dll' -Enter-VsDevShell -Arch arm64 -vsinstallpath 'C:\\Program Files\\Microsoft Visual Studio\\2022\\Community' -skipautomaticlocation -``` - -You can confirm with `write-host $env:VSCMD_ARG_TGT_ARCH` - -Follow the instructions at https://www.msys2.org/wiki/arm64/ to set up an arm64 msys2 environment. Ollama requires gcc and mingw32-make to compile, which is not currently available on Windows arm64, but a gcc compatibility adapter is available via `mingw-w64-clang-aarch64-gcc-compat`. At a minimum you will need to install the following: +Then, configure and build the project: ``` -pacman -S mingw-w64-clang-aarch64-clang mingw-w64-clang-aarch64-gcc-compat mingw-w64-clang-aarch64-make make +cmake -B build +cmake --build build --config Release ``` -You will need to ensure your PATH includes go, cmake, gcc and clang mingw32-make to build ollama from source. (typically `C:\msys64\clangarm64\bin\`) - - -## Advanced CPU Vector Settings - -On x86, running `make` will compile several CPU runners which can run on different CPU families. At runtime, Ollama will auto-detect the best variation to load. If GPU libraries are present at build time, Ollama also compiles GPU runners with the `AVX` CPU vector feature enabled. This provides a good performance balance when loading large models that split across GPU and CPU with broad compatibility. Some users may prefer no vector extensions (e.g. older Xeon/Celeron processors, or hypervisors that mask the vector features) while other users may prefer turning on many more vector extensions to further improve performance for split model loads. - -To customize the set of CPU vector features enabled for a CPU runner and all GPU runners, use CUSTOM_CPU_FLAGS during the build. - -To build without any vector flags: +Lastly, run Ollama: ``` -make CUSTOM_CPU_FLAGS="" +go run . serve ``` -To build with both AVX and AVX2: -``` -make CUSTOM_CPU_FLAGS=avx,avx2 -``` +## Windows (ARM) -To build with AVX512 features turned on: +Windows ARM does not support additional acceleration libraries at this time. + +## Linux + +Install prerequisites: + +- [CMake](https://cmake.org/download/) or `sudo apt install cmake` or `sudo dnf install cmake` +- (Optional) AMD GPU support + - [ROCm](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/install/quick-start.html) +- (Optional) NVIDIA GPU support + - [CUDA SDK](https://developer.nvidia.com/cuda-downloads) + +> [!IMPORTANT] +> Ensure prerequisites are in `PATH` before running CMake. + + +Then, configure and build the project: ``` -make CUSTOM_CPU_FLAGS=avx,avx2,avx512,avx512vbmi,avx512vnni,avx512bf16 +cmake -B build +cmake --build build ``` -> [!NOTE] -> If you are experimenting with different flags, make sure to do a `make clean` between each change to ensure everything is rebuilt with the new compiler flags +Lastly, run Ollama: + +``` +go run . serve +``` + +## Docker + +``` +docker build . +``` + +### ROCm + +``` +docker build --build-arg FLAVOR=rocm . +``` + +## Running tests + +To run tests, use `go test`: + +``` +go test ./... +``` diff --git a/envconfig/config.go b/envconfig/config.go index c10095a6..0ca3b64c 100644 --- a/envconfig/config.go +++ b/envconfig/config.go @@ -288,12 +288,3 @@ func Values() map[string]string { func Var(key string) string { return strings.Trim(strings.TrimSpace(os.Getenv(key)), "\"'") } - -// On windows, we keep the binary at the top directory, but -// other platforms use a "bin" directory, so this returns ".." -func LibRelativeToExe() string { - if runtime.GOOS == "windows" { - return "." - } - return ".." -} diff --git a/go.mod b/go.mod index 1a1fdb40..1c99c094 100644 --- a/go.mod +++ b/go.mod @@ -17,12 +17,14 @@ require ( require ( github.com/agnivade/levenshtein v1.1.1 github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1 + github.com/dlclark/regexp2 v1.11.4 github.com/emirpasic/gods/v2 v2.0.0-alpha github.com/google/go-cmp v0.6.0 github.com/mattn/go-runewidth v0.0.14 github.com/nlpodyssey/gopickle v0.3.0 github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c golang.org/x/image v0.22.0 + gonum.org/v1/gonum v0.15.0 ) require ( @@ -42,7 +44,6 @@ require ( github.com/xtgo/set v1.0.0 // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - gonum.org/v1/gonum v0.15.0 // indirect gorgonia.org/vecf32 v0.9.0 // indirect gorgonia.org/vecf64 v0.9.0 // indirect ) diff --git a/go.sum b/go.sum index 6a2c9189..8eb8d84a 100644 --- a/go.sum +++ b/go.sum @@ -42,6 +42,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/dlclark/regexp2 v1.11.4 h1:rPYF9/LECdNymJufQKmri9gV604RvvABwgOA8un7yAo= +github.com/dlclark/regexp2 v1.11.4/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/emirpasic/gods/v2 v2.0.0-alpha h1:dwFlh8pBg1VMOXWGipNMRt8v96dKAIvBehtCt6OtunU= github.com/emirpasic/gods/v2 v2.0.0-alpha/go.mod h1:W0y4M2dtBB9U5z3YlghmpuUhiaZT2h6yoeE+C1sCp6A= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= diff --git a/llama/README.md b/llama/README.md index 3b6b2067..f6a39725 100644 --- a/llama/README.md +++ b/llama/README.md @@ -1,158 +1,53 @@ # `llama` -This package integrates the [llama.cpp](https://github.com/ggerganov/llama.cpp) library as a Go package and makes it easy to build it with tags for different CPU and GPU processors. - -Supported: - -- [x] CPU -- [x] avx, avx2 -- [x] macOS Metal -- [x] Windows CUDA -- [x] Windows ROCm -- [x] Linux CUDA -- [x] Linux ROCm -- [x] Llava - -Extra build steps are required for CUDA and ROCm on Windows since `nvcc` and `hipcc` both require using msvc as the host compiler. For these shared libraries are created: - -- `ggml_cuda.dll` on Windows or `ggml_cuda.so` on Linux -- `ggml_hipblas.dll` on Windows or `ggml_hipblas.so` on Linux - -> Note: it's important that memory is allocated and freed by the same compiler (e.g. entirely by code compiled with msvc or mingw). Issues from this should be rare, but there are some places where pointers are returned by the CUDA or HIP runtimes and freed elsewhere, causing a a crash. In a future change the same runtime should be used in both cases to avoid crashes. - -## Building - -``` -go build . -``` - -### AVX - -```shell -go build -tags avx . -``` - -### AVX2 - -```shell -# go doesn't recognize `-mfma` as a valid compiler flag -# see https://github.com/golang/go/issues/17895 -go env -w "CGO_CFLAGS_ALLOW=-mfma|-mf16c" -go env -w "CGO_CXXFLAGS_ALLOW=-mfma|-mf16c" -go build -tags=avx,avx2 . -``` - -## Linux - -### CUDA - -Install the [CUDA toolkit v11.3.1](https://developer.nvidia.com/cuda-11-3-1-download-archive): - -```shell -make ggml_cuda.so -go build -tags avx,cuda . -``` - -### ROCm - -Install [ROCm](https://rocm.docs.amd.com/en/latest/). - -```shell -make ggml_hipblas.so -go build -tags avx,rocm . -``` - -## Windows - -Download [w64devkit](https://github.com/skeeto/w64devkit/releases/latest) for a simple MinGW development environment. - -### CUDA - -Install the [CUDA toolkit v11.3.1](https://developer.nvidia.com/cuda-11-3-1-download-archive) then build the cuda code: - -```shell -make ggml_cuda.dll -go build -tags avx,cuda . -``` - -### ROCm - -Install [ROCm](https://rocm.docs.amd.com/en/latest/). - -```shell -make ggml_hipblas.dll -go build -tags avx,rocm . -``` - -## Building runners - -```shell -# build all runners for this platform -make -j -``` +This package provides Go bindings to [llama.cpp](https://github.com/ggerganov/llama.cpp). ## Vendoring -Ollama currently vendors [llama.cpp](https://github.com/ggerganov/llama.cpp/) and [ggml](https://github.com/ggerganov/ggml) through a vendoring model. While we generally strive to contribute changes back upstream to avoid drift, we cary a small set of patches which are applied to the tracking commit. A set of make targets are available to aid developers in updating to a newer tracking commit, or to work on changes. +Ollama vendors [llama.cpp](https://github.com/ggerganov/llama.cpp/) and [ggml](https://github.com/ggerganov/llama.cpp/tree/master/ggml/src). While we generally strive to contribute changes back upstream to avoid drift, we carry a small set of patches which are applied to the tracking commit. If you update the vendoring code, start by running the following command to establish the tracking llama.cpp repo in the `./vendor/` directory. ``` -make apply-patches +make -f Makefile.sync apply-patches ``` ### Updating Base Commit **Pin to new base commit** -To update to a newer base commit, select the upstream git tag or commit and update `llama/vendoring` - -#### Applying patches +To change the base commit, update `FETCH_HEAD` in Makefile.sync. When updating to a newer base commit, the existing patches may not apply cleanly and require manual merge resolution. Start by applying the patches. If any of the patches have conflicts, the `git am` will stop at the first failure. ``` -make apply-patches +make -f Makefile.sync apply-patches ``` -If you see an error message about a conflict, go into the `./vendor/` directory, and perform merge resolution using your preferred tool to the patch commit which failed. Save the file(s) and continue the patch series with `git am --continue` . If any additional patches fail, follow the same pattern until the full patch series is applied. Once finished, run a final `create-patches` and `sync` target to ensure everything is updated. +If there are conflicts, you will see an error message. Resolve the conflicts in `./vendor/`, and continue the patch series with `git am --continue` and rerun `make -f Makefile.sync apply-patches`. Repeat until all patches are successfully applied. + +Once all patches are applied, commit the changes to the tracking repository. ``` -make create-patches sync +make -f Makefile.sync format-patches sync ``` -Build and test Ollama, and make any necessary changes to the Go code based on the new base commit. Submit your PR to the Ollama repo. - ### Generating Patches When working on new fixes or features that impact vendored code, use the following model. First get a clean tracking repo with all current patches applied: ``` -make apply-patches +make -f Makefile.sync clean apply-patches ``` -Now edit the upstream native code in the `./vendor/` directory. You do not need to commit every change in order to build, a dirty working tree in the tracking repo is OK while developing. Simply save in your editor, and run the following to refresh the vendored code with your changes, build the backend(s) and build ollama: - -``` -make sync -make -j 8 -go build . -``` - -> [!IMPORTANT] -> Do **NOT** run `apply-patches` while you're iterating as that will reset the tracking repo. It will detect a dirty tree and abort, but if your tree is clean and you accidentally ran this target, use `git reflog` to recover your commit(s). - Iterate until you're ready to submit PRs. Once your code is ready, commit a change in the `./vendor/` directory, then generate the patches for ollama with ``` -make create-patches +make -f Makefile.sync format-patches ``` -> [!IMPORTANT] -> Once you have completed this step, it is safe to run `apply-patches` since your change is preserved in the patches. - In your `./vendor/` directory, create a branch, and cherry-pick the new commit to that branch, then submit a PR upstream to llama.cpp. Commit the changes in the ollama repo and submit a PR to Ollama, which will include the vendored code update with your change, along with the patches. diff --git a/llama/amx.h b/llama/amx.h deleted file mode 100644 index 5b64b8bd..00000000 --- a/llama/amx.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "ggml-backend.h" -#include "ggml-cpu-impl.h" - -// GGML internal header - -#if defined(__AMX_INT8__) && defined(__AVX512VNNI__) -ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void); -#endif diff --git a/llama/ggml-blas.h b/llama/ggml-blas.h deleted file mode 100644 index f5fb9de2..00000000 --- a/llama/ggml-blas.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include "ggml.h" -#include "ggml-backend.h" - - -#ifdef __cplusplus -extern "C" { -#endif - -// backend API -GGML_BACKEND_API ggml_backend_t ggml_backend_blas_init(void); - -GGML_BACKEND_API bool ggml_backend_is_blas(ggml_backend_t backend); - -// number of threads used for conversion to float -// for openblas and blis, this will also set the number of threads used for blas operations -GGML_BACKEND_API void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads); - -GGML_BACKEND_API ggml_backend_reg_t ggml_backend_blas_reg(void); - - -#ifdef __cplusplus -} -#endif diff --git a/llama/ggml-cpu-aarch64.h b/llama/ggml-cpu-aarch64.h deleted file mode 100644 index 14320735..00000000 --- a/llama/ggml-cpu-aarch64.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include "ggml-cpu-traits.h" -#include "ggml.h" - -// GGML internal header - -ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void); diff --git a/llama/ggml-cpu-traits.h b/llama/ggml-cpu-traits.h deleted file mode 100644 index dcd7855f..00000000 --- a/llama/ggml-cpu-traits.h +++ /dev/null @@ -1,64 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once -#include "ggml-backend-impl.h" -#include "ggml-cpu-impl.h" -#include "ggml.h" - -#ifdef __cplusplus -# include -extern "C" { -#endif - -// return true if op part of extra "accelerator" -bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op); -bool ggml_cpu_extra_work_size(int n_threads, const struct ggml_tensor * op, size_t * size); - -#ifdef __cplusplus -} - -namespace ggml::cpu { -// register in tensor->extra -class tensor_traits { - public: - virtual ~tensor_traits(); - virtual bool work_size(int n_threads, const struct ggml_tensor * op, size_t & size) = 0; - virtual bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) = 0; -}; - -class extra_buffer_type { - public: - virtual ~extra_buffer_type(); - virtual bool supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) = 0; - virtual tensor_traits * get_tensor_traits(const struct ggml_tensor * op) = 0; -}; -} // namespace ggml::cpu - -// implemented in ggml-cpu.cpp. -std::vector & ggml_backend_cpu_get_extra_buffers_type(); - -#endif diff --git a/llama/ggml-cuda/acc.cuh b/llama/ggml-cuda/acc.cuh deleted file mode 100644 index 5c12d906..00000000 --- a/llama/ggml-cuda/acc.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_ACC_BLOCK_SIZE 256 - -void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/arange.cu b/llama/ggml-cuda/arange.cu deleted file mode 100644 index 3b67b3b5..00000000 --- a/llama/ggml-cuda/arange.cu +++ /dev/null @@ -1,60 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "arange.cuh" - -static __global__ void arange_f32(float * dst, const int ne0, const float start, const float step) { - // blockIDx.x: idx of ne0 / BLOCK_SIZE - int nidx = threadIdx.x + blockIdx.x * blockDim.x; - if (nidx >= ne0) { - return; - } - dst[nidx] = start + step * nidx; -} - -static void arange_f32_cuda(float * dst, const int ne0, const float start, const float step, cudaStream_t stream) { - int num_blocks = (ne0 + CUDA_ARANGE_BLOCK_SIZE - 1) / CUDA_ARANGE_BLOCK_SIZE; - arange_f32<<>>(dst, ne0, start, step); -} - -void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - float * dst_d = (float *)dst->data; - cudaStream_t stream = ctx.stream(); - - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - float start; - float stop; - float step; - memcpy(&start, (float *)dst->op_params + 0, sizeof(float)); - memcpy(&stop, (float *)dst->op_params + 1, sizeof(float)); - memcpy(&step, (float *)dst->op_params + 2, sizeof(float)); - - int64_t steps = (int64_t)ceil((stop - start) / step); - GGML_ASSERT(ggml_nelements(dst) == steps); - - arange_f32_cuda(dst_d, dst->ne[0], start, step, stream); -} diff --git a/llama/ggml-cuda/arange.cuh b/llama/ggml-cuda/arange.cuh deleted file mode 100644 index 16201546..00000000 --- a/llama/ggml-cuda/arange.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_ARANGE_BLOCK_SIZE 256 - -void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/argmax.cuh b/llama/ggml-cuda/argmax.cuh deleted file mode 100644 index 805a90d8..00000000 --- a/llama/ggml-cuda/argmax.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -void ggml_cuda_argmax(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/argsort.cuh b/llama/ggml-cuda/argsort.cuh deleted file mode 100644 index 0d8427bb..00000000 --- a/llama/ggml-cuda/argsort.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/binbcast.cuh b/llama/ggml-cuda/binbcast.cuh deleted file mode 100644 index 3acee0d0..00000000 --- a/llama/ggml-cuda/binbcast.cuh +++ /dev/null @@ -1,35 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst); -void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst); -void ggml_cuda_op_sub(ggml_backend_cuda_context & ctx, ggml_tensor * dst); -void ggml_cuda_op_mul(ggml_backend_cuda_context & ctx, ggml_tensor * dst); -void ggml_cuda_op_div(ggml_backend_cuda_context & ctx, ggml_tensor * dst); - -void ggml_cuda_op_repeat_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/clamp.cu b/llama/ggml-cuda/clamp.cu deleted file mode 100644 index 2df1076c..00000000 --- a/llama/ggml-cuda/clamp.cu +++ /dev/null @@ -1,60 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "clamp.cuh" - -static __global__ void clamp_f32(const float * x, float * dst, const float min, const float max, const int k) { - const int i = blockDim.x*blockIdx.x + threadIdx.x; - - if (i >= k) { - return; - } - - dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]); -} - -static void clamp_f32_cuda(const float * x, float * dst, const float min, const float max, const int k, cudaStream_t stream) { - const int num_blocks = (k + CUDA_CLAMP_BLOCK_SIZE - 1) / CUDA_CLAMP_BLOCK_SIZE; - clamp_f32<<>>(x, dst, min, max, k); -} - - -void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src0_d = (const float *)src0->data; - float * dst_d = (float *)dst->data; - cudaStream_t stream = ctx.stream(); - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - float min; - float max; - memcpy(&min, dst->op_params, sizeof(float)); - memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); - - clamp_f32_cuda(src0_d, dst_d, min, max, ggml_nelements(src0), stream); -} diff --git a/llama/ggml-cuda/clamp.cuh b/llama/ggml-cuda/clamp.cuh deleted file mode 100644 index 3f74a880..00000000 --- a/llama/ggml-cuda/clamp.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_CLAMP_BLOCK_SIZE 256 - -void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/concat.cuh b/llama/ggml-cuda/concat.cuh deleted file mode 100644 index ba2b67ec..00000000 --- a/llama/ggml-cuda/concat.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_CONCAT_BLOCK_SIZE 256 - -void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/conv-transpose-1d.cuh b/llama/ggml-cuda/conv-transpose-1d.cuh deleted file mode 100644 index 53c3beef..00000000 --- a/llama/ggml-cuda/conv-transpose-1d.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE 256 - -void ggml_cuda_op_conv_transpose_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/convert.cuh b/llama/ggml-cuda/convert.cuh deleted file mode 100644 index 27f949e2..00000000 --- a/llama/ggml-cuda/convert.cuh +++ /dev/null @@ -1,39 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_DEQUANTIZE_BLOCK_SIZE 256 - -template -using to_t_cuda_t = void (*)(const void * __restrict__ x, T * __restrict__ y, int64_t k, cudaStream_t stream); - -typedef to_t_cuda_t to_fp32_cuda_t; -typedef to_t_cuda_t to_fp16_cuda_t; - -to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type); - -to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type); diff --git a/llama/ggml-cuda/count-equal.cuh b/llama/ggml-cuda/count-equal.cuh deleted file mode 100644 index 922c6288..00000000 --- a/llama/ggml-cuda/count-equal.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_COUNT_EQUAL_CHUNK_SIZE 128 - -void ggml_cuda_count_equal(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/cpy.cuh b/llama/ggml-cuda/cpy.cuh deleted file mode 100644 index 79496c4c..00000000 --- a/llama/ggml-cuda/cpy.cuh +++ /dev/null @@ -1,35 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_CPY_BLOCK_SIZE 64 - -void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1); - -void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst); - -void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1); diff --git a/llama/ggml-cuda/cross-entropy-loss.cuh b/llama/ggml-cuda/cross-entropy-loss.cuh deleted file mode 100644 index e816b8df..00000000 --- a/llama/ggml-cuda/cross-entropy-loss.cuh +++ /dev/null @@ -1,33 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE 256 - -void ggml_cuda_cross_entropy_loss(ggml_backend_cuda_context & ctx, ggml_tensor * dst); - -void ggml_cuda_cross_entropy_loss_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/diagmask.cuh b/llama/ggml-cuda/diagmask.cuh deleted file mode 100644 index 76162837..00000000 --- a/llama/ggml-cuda/diagmask.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32 - -void ggml_cuda_op_diag_mask_inf(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/fattn-tile-f16.cuh b/llama/ggml-cuda/fattn-tile-f16.cuh deleted file mode 100644 index 4a3965ed..00000000 --- a/llama/ggml-cuda/fattn-tile-f16.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -void ggml_cuda_flash_attn_ext_tile_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/fattn-tile-f32.cuh b/llama/ggml-cuda/fattn-tile-f32.cuh deleted file mode 100644 index 8a5eef47..00000000 --- a/llama/ggml-cuda/fattn-tile-f32.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -void ggml_cuda_flash_attn_ext_tile_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/fattn.cuh b/llama/ggml-cuda/fattn.cuh deleted file mode 100644 index 6947118e..00000000 --- a/llama/ggml-cuda/fattn.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/getrows.cuh b/llama/ggml-cuda/getrows.cuh deleted file mode 100644 index bbbf482d..00000000 --- a/llama/ggml-cuda/getrows.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_GET_ROWS_BLOCK_SIZE 256 - -void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/im2col.cuh b/llama/ggml-cuda/im2col.cuh deleted file mode 100644 index 2c64c16b..00000000 --- a/llama/ggml-cuda/im2col.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_IM2COL_BLOCK_SIZE 256 - -void ggml_cuda_op_im2col(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/mmv.cuh b/llama/ggml-cuda/mmv.cuh deleted file mode 100644 index fcfc8ea4..00000000 --- a/llama/ggml-cuda/mmv.cuh +++ /dev/null @@ -1,38 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -// maximum number of src0 rows with which to use mul_mat_vec over cuBLAS if FP16 tensor cores are available -#define MMV_MAX_ROWS 512 - -void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); - -void ggml_cuda_op_mul_mat_vec( - ggml_backend_cuda_context & ctx, - const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, - const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, - const int64_t src1_padded_row_size, cudaStream_t stream); diff --git a/llama/ggml-cuda/mmvq.cuh b/llama/ggml-cuda/mmvq.cuh deleted file mode 100644 index ae18ae31..00000000 --- a/llama/ggml-cuda/mmvq.cuh +++ /dev/null @@ -1,35 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define MMVQ_MAX_BATCH_SIZE 8 // Max. batch size for which to use MMVQ kernels. - -void ggml_cuda_op_mul_mat_vec_q( - ggml_backend_cuda_context & ctx, - const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, - const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, - const int64_t src1_padded_row_size, cudaStream_t stream); diff --git a/llama/ggml-cuda/norm.cuh b/llama/ggml-cuda/norm.cuh deleted file mode 100644 index 0902f23a..00000000 --- a/llama/ggml-cuda/norm.cuh +++ /dev/null @@ -1,33 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); - -void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); - -void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/opt-step-adamw.cuh b/llama/ggml-cuda/opt-step-adamw.cuh deleted file mode 100644 index b956bf93..00000000 --- a/llama/ggml-cuda/opt-step-adamw.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_OPT_STEP_ADAMW_BLOCK_SIZE 256 - -void ggml_cuda_opt_step_adamw(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/out-prod.cuh b/llama/ggml-cuda/out-prod.cuh deleted file mode 100644 index 4631cd65..00000000 --- a/llama/ggml-cuda/out-prod.cuh +++ /dev/null @@ -1,29 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -void ggml_cuda_out_prod(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/pad.cuh b/llama/ggml-cuda/pad.cuh deleted file mode 100644 index 9c23680d..00000000 --- a/llama/ggml-cuda/pad.cuh +++ /dev/null @@ -1,32 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_PAD_BLOCK_SIZE 256 - -void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst); -void ggml_cuda_op_unpad(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/pool2d.cuh b/llama/ggml-cuda/pool2d.cuh deleted file mode 100644 index 9c0045f8..00000000 --- a/llama/ggml-cuda/pool2d.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_POOL2D_BLOCK_SIZE 256 - -void ggml_cuda_op_pool2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/quantize.cuh b/llama/ggml-cuda/quantize.cuh deleted file mode 100644 index ee8e2a52..00000000 --- a/llama/ggml-cuda/quantize.cuh +++ /dev/null @@ -1,50 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include "common.cuh" -#include "mmq.cuh" - -#include - -#define CUDA_QUANTIZE_BLOCK_SIZE 256 -#define CUDA_QUANTIZE_BLOCK_SIZE_MMQ 128 - -static_assert(MATRIX_ROW_PADDING % CUDA_QUANTIZE_BLOCK_SIZE == 0, "Risk of out-of-bounds access."); -static_assert(MATRIX_ROW_PADDING % (4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ) == 0, "Risk of out-of-bounds access."); - -typedef void (*quantize_cuda_t)( - const float * x, void * vy, const int64_t kx0, const int64_t kx1, const int64_t channels, const int64_t kx0_padded, - const ggml_type type_x, cudaStream_t stream); - -void quantize_row_q8_1_cuda( - const float * x, void * vy, const int64_t kx0, const int64_t kx1, const int64_t channels, const int64_t kx0_padded, - const ggml_type type_x, cudaStream_t stream); - -void quantize_mmq_q8_1_cuda( - const float * x, void * vy, const int64_t kx0, const int64_t kx1, const int64_t channels, const int64_t kx0_padded, - const ggml_type type_x, cudaStream_t stream); diff --git a/llama/ggml-cuda/rope.cuh b/llama/ggml-cuda/rope.cuh deleted file mode 100644 index cd5140ce..00000000 --- a/llama/ggml-cuda/rope.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_ROPE_BLOCK_SIZE 256 - -void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/scale.cu b/llama/ggml-cuda/scale.cu deleted file mode 100644 index b3b38cdf..00000000 --- a/llama/ggml-cuda/scale.cu +++ /dev/null @@ -1,57 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "scale.cuh" - -static __global__ void scale_f32(const float * x, float * dst, const float scale, const int k) { - const int i = blockDim.x*blockIdx.x + threadIdx.x; - - if (i >= k) { - return; - } - - dst[i] = scale * x[i]; -} - -static void scale_f32_cuda(const float * x, float * dst, const float scale, const int k, cudaStream_t stream) { - const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE; - scale_f32<<>>(x, dst, scale, k); -} - -void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src0_d = (const float *)src0->data; - float * dst_d = (float *)dst->data; - cudaStream_t stream = ctx.stream(); - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - - float scale; - memcpy(&scale, dst->op_params, sizeof(float)); - - scale_f32_cuda(src0_d, dst_d, scale, ggml_nelements(src0), stream); -} diff --git a/llama/ggml-cuda/scale.cuh b/llama/ggml-cuda/scale.cuh deleted file mode 100644 index ae2ec5af..00000000 --- a/llama/ggml-cuda/scale.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_SCALE_BLOCK_SIZE 256 - -void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/softmax.cuh b/llama/ggml-cuda/softmax.cuh deleted file mode 100644 index 85459e24..00000000 --- a/llama/ggml-cuda/softmax.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_SOFT_MAX_BLOCK_SIZE 1024 - -void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/sum.cuh b/llama/ggml-cuda/sum.cuh deleted file mode 100644 index 6883be87..00000000 --- a/llama/ggml-cuda/sum.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -void sum_f32_cuda(ggml_cuda_pool & pool, const float * x, float * dst, const int64_t ne, cudaStream_t stream); - -void ggml_cuda_op_sum(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/sumrows.cu b/llama/ggml-cuda/sumrows.cu deleted file mode 100644 index fbd3cd87..00000000 --- a/llama/ggml-cuda/sumrows.cu +++ /dev/null @@ -1,65 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "sumrows.cuh" - -static __global__ void k_sum_rows_f32(const float * x, float * dst, const int ncols) { - const int row = blockIdx.x; - const int col = threadIdx.x; - - float sum = 0.0f; - for (int i = col; i < ncols; i += blockDim.x) { - sum += x[row * ncols + i]; - } - - sum = warp_reduce_sum(sum); - - if (col == 0) { - dst[row] = sum; - } -} - -void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) { - const dim3 block_dims(WARP_SIZE, 1, 1); - const dim3 block_nums(nrows, 1, 1); - k_sum_rows_f32<<>>(x, dst, ncols); -} - -void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src0_d = (const float *)src0->data; - float * dst_d = (float *)dst->data; - cudaStream_t stream = ctx.stream(); - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT( dst->type == GGML_TYPE_F32); - GGML_ASSERT(ggml_is_contiguous(src0)); - - const int64_t ncols = src0->ne[0]; - const int64_t nrows = ggml_nrows(src0); - - sum_rows_f32_cuda(src0_d, dst_d, ncols, nrows, stream); -} diff --git a/llama/ggml-cuda/sumrows.cuh b/llama/ggml-cuda/sumrows.cuh deleted file mode 100644 index 204384f5..00000000 --- a/llama/ggml-cuda/sumrows.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream); - -void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu deleted file mode 100644 index 48cdc8f4..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu deleted file mode 100644 index 6aeab0ba..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu deleted file mode 100644 index 2d98ef1a..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu deleted file mode 100644 index 7fe280e0..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu deleted file mode 100644 index 9835cbfa..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu deleted file mode 100644 index 45ffa2a8..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu deleted file mode 100644 index 592287a8..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu deleted file mode 100644 index fe080a73..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu deleted file mode 100644 index 0580444e..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu deleted file mode 100644 index 5b2650d8..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu deleted file mode 100644 index 886ba395..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu deleted file mode 100644 index 789757a8..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu deleted file mode 100644 index a4bfe23f..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu deleted file mode 100644 index eab22f0d..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu deleted file mode 100644 index 3301160f..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu deleted file mode 100644 index aa37c412..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu deleted file mode 100644 index a2dd8d86..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu deleted file mode 100644 index 709c2de0..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu deleted file mode 100644 index 3279dad9..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu deleted file mode 100644 index 4e112e13..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu deleted file mode 100644 index 8662359b..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu deleted file mode 100644 index bc3c7061..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu deleted file mode 100644 index 027c6d94..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu deleted file mode 100644 index 54334629..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu deleted file mode 100644 index 9cdcd1b3..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu deleted file mode 100644 index 258e08b2..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu deleted file mode 100644 index 7c41007a..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu deleted file mode 100644 index 0296737f..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu deleted file mode 100644 index f9fdc197..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu deleted file mode 100644 index 518c6725..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu deleted file mode 100644 index dfb36938..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu deleted file mode 100644 index 4ae01511..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu deleted file mode 100644 index a69a7acb..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu deleted file mode 100644 index a46aab8a..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu deleted file mode 100644 index 3fe4f970..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu deleted file mode 100644 index 933a5dd7..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu deleted file mode 100644 index b051c7d1..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu deleted file mode 100644 index 3a90aba7..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu deleted file mode 100644 index 3ddad858..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu deleted file mode 100644 index df3ce0a3..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu deleted file mode 100644 index 49d2666a..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu deleted file mode 100644 index 531c87c2..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu deleted file mode 100644 index e747f6e7..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f16.cuh" - -DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu deleted file mode 100644 index d6097d1c..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu deleted file mode 100644 index a6bda11f..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu deleted file mode 100644 index 800ea14f..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu deleted file mode 100644 index b3bad6b0..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu deleted file mode 100644 index 6a7127dd..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu deleted file mode 100644 index 62351c23..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu deleted file mode 100644 index 1b35f168..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu deleted file mode 100644 index 5c625681..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu deleted file mode 100644 index 6f70b740..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu deleted file mode 100644 index d91c6f92..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu deleted file mode 100644 index d206889d..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu deleted file mode 100644 index ae104a61..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu deleted file mode 100644 index ab2c66be..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu deleted file mode 100644 index 4b55d39f..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu deleted file mode 100644 index 1c1065ff..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu deleted file mode 100644 index b973d161..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu deleted file mode 100644 index 9b3999e8..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu deleted file mode 100644 index fc7fde30..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu deleted file mode 100644 index b1f48272..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu deleted file mode 100644 index b854659a..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu deleted file mode 100644 index 35db0d6d..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu deleted file mode 100644 index cc76b0fb..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu deleted file mode 100644 index ff9e76dd..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu deleted file mode 100644 index 4b031d98..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu deleted file mode 100644 index b99bab1e..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu deleted file mode 100644 index 22e2e6db..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu deleted file mode 100644 index 95c1984e..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu deleted file mode 100644 index 65307d39..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu deleted file mode 100644 index ae0ec146..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu deleted file mode 100644 index 1f420c1d..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu deleted file mode 100644 index 1d445af3..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu deleted file mode 100644 index b3a951dc..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu deleted file mode 100644 index 804c30b2..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu deleted file mode 100644 index 432928a2..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu deleted file mode 100644 index 409f81b0..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu deleted file mode 100644 index 032dab7f..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu deleted file mode 100644 index 00014a4f..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu deleted file mode 100644 index 32457263..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu deleted file mode 100644 index e7d49c27..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu deleted file mode 100644 index 8d732548..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu deleted file mode 100644 index a8e25764..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu deleted file mode 100644 index dabbcd23..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu b/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu deleted file mode 100644 index cfbae911..00000000 --- a/llama/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f32.cuh" - -DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu b/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu deleted file mode 100644 index b1bdc1e9..00000000 --- a/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu +++ /dev/null @@ -1,36 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 16, float); -DECL_FATTN_WMMA_F16_CASE(80, 16, float); -DECL_FATTN_WMMA_F16_CASE(96, 16, float); -DECL_FATTN_WMMA_F16_CASE(112, 16, float); -DECL_FATTN_WMMA_F16_CASE(128, 16, float); -DECL_FATTN_WMMA_F16_CASE(256, 16, float); diff --git a/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu b/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu deleted file mode 100644 index 3151d9d6..00000000 --- a/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu +++ /dev/null @@ -1,35 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 32, float); -DECL_FATTN_WMMA_F16_CASE(80, 32, float); -DECL_FATTN_WMMA_F16_CASE(96, 32, float); -DECL_FATTN_WMMA_F16_CASE(112, 32, float); -DECL_FATTN_WMMA_F16_CASE(128, 32, float); diff --git a/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu b/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu deleted file mode 100644 index eea23df9..00000000 --- a/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu +++ /dev/null @@ -1,36 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 16, half); -DECL_FATTN_WMMA_F16_CASE(80, 16, half); -DECL_FATTN_WMMA_F16_CASE(96, 16, half); -DECL_FATTN_WMMA_F16_CASE(112, 16, half); -DECL_FATTN_WMMA_F16_CASE(128, 16, half); -DECL_FATTN_WMMA_F16_CASE(256, 16, half); diff --git a/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu b/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu deleted file mode 100644 index 70ba3a53..00000000 --- a/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu +++ /dev/null @@ -1,36 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 32, half); -DECL_FATTN_WMMA_F16_CASE(80, 32, half); -DECL_FATTN_WMMA_F16_CASE(96, 32, half); -DECL_FATTN_WMMA_F16_CASE(112, 32, half); -DECL_FATTN_WMMA_F16_CASE(128, 32, half); -DECL_FATTN_WMMA_F16_CASE(256, 32, half); diff --git a/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu b/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu deleted file mode 100644 index 3a8261ab..00000000 --- a/llama/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu +++ /dev/null @@ -1,34 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 8, half); -DECL_FATTN_WMMA_F16_CASE(96, 8, half); -DECL_FATTN_WMMA_F16_CASE(128, 8, half); -DECL_FATTN_WMMA_F16_CASE(256, 8, half); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-iq1_s.cu b/llama/ggml-cuda/template-instances/mmq-instance-iq1_s.cu deleted file mode 100644 index f3943668..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-iq1_s.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_IQ1_S); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-iq2_s.cu b/llama/ggml-cuda/template-instances/mmq-instance-iq2_s.cu deleted file mode 100644 index 086ab539..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-iq2_s.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_IQ2_S); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu b/llama/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu deleted file mode 100644 index 6af7aa32..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_IQ2_XS); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu b/llama/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu deleted file mode 100644 index fc771442..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_IQ2_XXS); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-iq3_s.cu b/llama/ggml-cuda/template-instances/mmq-instance-iq3_s.cu deleted file mode 100644 index 5ba22c06..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-iq3_s.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_IQ3_S); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu b/llama/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu deleted file mode 100644 index 647be438..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_IQ3_XXS); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu b/llama/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu deleted file mode 100644 index b8263fa3..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_IQ4_NL); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu b/llama/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu deleted file mode 100644 index 41986b9d..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_IQ4_XS); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-q2_k.cu b/llama/ggml-cuda/template-instances/mmq-instance-q2_k.cu deleted file mode 100644 index 023aec76..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-q2_k.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_Q2_K); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-q3_k.cu b/llama/ggml-cuda/template-instances/mmq-instance-q3_k.cu deleted file mode 100644 index f8bba904..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-q3_k.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_Q3_K); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-q4_0.cu b/llama/ggml-cuda/template-instances/mmq-instance-q4_0.cu deleted file mode 100644 index 425d7a61..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-q4_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_Q4_0); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-q4_1.cu b/llama/ggml-cuda/template-instances/mmq-instance-q4_1.cu deleted file mode 100644 index 91bafb73..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-q4_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_Q4_1); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-q4_k.cu b/llama/ggml-cuda/template-instances/mmq-instance-q4_k.cu deleted file mode 100644 index a0ad396c..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-q4_k.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_Q4_K); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-q5_0.cu b/llama/ggml-cuda/template-instances/mmq-instance-q5_0.cu deleted file mode 100644 index dc1cbd43..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-q5_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_Q5_0); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-q5_1.cu b/llama/ggml-cuda/template-instances/mmq-instance-q5_1.cu deleted file mode 100644 index cc70a445..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-q5_1.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_Q5_1); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-q5_k.cu b/llama/ggml-cuda/template-instances/mmq-instance-q5_k.cu deleted file mode 100644 index 3ff67b9f..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-q5_k.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_Q5_K); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-q6_k.cu b/llama/ggml-cuda/template-instances/mmq-instance-q6_k.cu deleted file mode 100644 index 1d1ffee9..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-q6_k.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_Q6_K); diff --git a/llama/ggml-cuda/template-instances/mmq-instance-q8_0.cu b/llama/ggml-cuda/template-instances/mmq-instance-q8_0.cu deleted file mode 100644 index 1a7e0865..00000000 --- a/llama/ggml-cuda/template-instances/mmq-instance-q8_0.cu +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE(GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/tsembd.cuh b/llama/ggml-cuda/tsembd.cuh deleted file mode 100644 index 62958650..00000000 --- a/llama/ggml-cuda/tsembd.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_TIMESTEP_EMBEDDING_BLOCK_SIZE 256 - -void ggml_cuda_op_timestep_embedding(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/upscale.cuh b/llama/ggml-cuda/upscale.cuh deleted file mode 100644 index d8bb2ec8..00000000 --- a/llama/ggml-cuda/upscale.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_UPSCALE_BLOCK_SIZE 256 - -void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/vendors/cuda.h b/llama/ggml-cuda/vendors/cuda.h deleted file mode 100644 index e309dd3f..00000000 --- a/llama/ggml-cuda/vendors/cuda.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include -#include -#include -#include -#include - -#if CUDART_VERSION < 11020 -#define CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED -#define CUBLAS_TF32_TENSOR_OP_MATH CUBLAS_TENSOR_OP_MATH -#define CUBLAS_COMPUTE_16F CUDA_R_16F -#define CUBLAS_COMPUTE_32F CUDA_R_32F -#define cublasComputeType_t cudaDataType_t -#endif // CUDART_VERSION < 11020 diff --git a/llama/ggml-cuda/wkv6.cuh b/llama/ggml-cuda/wkv6.cuh deleted file mode 100644 index 27027287..00000000 --- a/llama/ggml-cuda/wkv6.cuh +++ /dev/null @@ -1,31 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "common.cuh" - -#define CUDA_WKV_BLOCK_SIZE 64 - -void ggml_cuda_op_rwkv_wkv6(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-threading.cpp b/llama/ggml-threading.cpp deleted file mode 100644 index 7559b336..00000000 --- a/llama/ggml-threading.cpp +++ /dev/null @@ -1,38 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "ggml-threading.h" -#include - -std::mutex ggml_critical_section_mutex; - -void ggml_critical_section_start() { - ggml_critical_section_mutex.lock(); -} - -void ggml_critical_section_end(void) { - ggml_critical_section_mutex.unlock(); -} diff --git a/llama/ggml-threading.h b/llama/ggml-threading.h deleted file mode 100644 index fe2ce367..00000000 --- a/llama/ggml-threading.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include "ggml.h" - -#ifdef __cplusplus -extern "C" { -#endif - -GGML_API void ggml_critical_section_start(void); -GGML_API void ggml_critical_section_end(void); - -#ifdef __cplusplus -} -#endif diff --git a/llama/json-schema-to-grammar.h b/llama/json-schema-to-grammar.h deleted file mode 100644 index 39b451ca..00000000 --- a/llama/json-schema-to-grammar.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include "ggml.h" -// Change JSON_ASSERT from assert() to GGML_ASSERT: -#define JSON_ASSERT GGML_ASSERT -#include "json.hpp" - -std::string json_schema_to_grammar(const nlohmann::ordered_json& schema); diff --git a/llama/llama-cparams.cpp b/llama/llama-cparams.cpp deleted file mode 100644 index 5a5d14cb..00000000 --- a/llama/llama-cparams.cpp +++ /dev/null @@ -1,27 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "llama-cparams.h" diff --git a/llama/llama-cparams.h b/llama/llama-cparams.h deleted file mode 100644 index 74fdb5c5..00000000 --- a/llama/llama-cparams.h +++ /dev/null @@ -1,64 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include "llama.h" - -#include - -struct llama_cparams { - uint32_t n_ctx; // context size used during inference - uint32_t n_batch; - uint32_t n_ubatch; - uint32_t n_seq_max; - int n_threads; // number of threads to use for generation - int n_threads_batch; // number of threads to use for batch processing - - float rope_freq_base; - float rope_freq_scale; - - uint32_t n_ctx_orig_yarn; - // These hyperparameters are not exposed in GGUF, because all - // existing YaRN models use the same values for them. - float yarn_ext_factor; - float yarn_attn_factor; - float yarn_beta_fast; - float yarn_beta_slow; - float defrag_thold; - - bool embeddings; - bool causal_attn; - bool offload_kqv; - bool flash_attn; - bool no_perf; - bool cross_attn; - - enum llama_pooling_type pooling_type; - - ggml_backend_sched_eval_callback cb_eval; - void * cb_eval_user_data; -}; diff --git a/llama/llama-cpp.h b/llama/llama-cpp.h deleted file mode 100644 index a0b7beb4..00000000 --- a/llama/llama-cpp.h +++ /dev/null @@ -1,56 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#ifndef __cplusplus -#error "This header is for C++ only" -#endif - -#include - -#include "llama.h" - -struct llama_model_deleter { - void operator()(llama_model * model) { llama_free_model(model); } -}; - -struct llama_context_deleter { - void operator()(llama_context * context) { llama_free(context); } -}; - -struct llama_sampler_deleter { - void operator()(llama_sampler * sampler) { llama_sampler_free(sampler); } -}; - -struct llama_lora_adapter_deleter { - void operator()(llama_lora_adapter * lora_adapter) { llama_lora_adapter_free(lora_adapter); } -}; - -typedef std::unique_ptr llama_model_ptr; -typedef std::unique_ptr llama_context_ptr; -typedef std::unique_ptr llama_sampler_ptr; -typedef std::unique_ptr llama_lora_adapter_ptr; diff --git a/llama/llama-quant.h b/llama/llama-quant.h deleted file mode 100644 index e60fc627..00000000 --- a/llama/llama-quant.h +++ /dev/null @@ -1,27 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once diff --git a/llama/llama.cpp/.rsync-filter b/llama/llama.cpp/.rsync-filter new file mode 100644 index 00000000..186e1c12 --- /dev/null +++ b/llama/llama.cpp/.rsync-filter @@ -0,0 +1,22 @@ +protect **/*.go +include common/ +include common/base64.* +include common/common.* +include common/json-schema-to-grammar.* +include common/json.* +include common/log.* +include common/sampling.* +include common/stb_image.* +include include/ +include include/llama.* +include include/llama-*.* +include examples/ +include examples/llava/ +include examples/llava/clip.* +include examples/llava/llava.* +include src/ +include src/llama.* +include src/llama-*.* +include src/unicode-data.* +include src/unicode.* +exclude * diff --git a/llama/llama.cpp/LICENSE b/llama/llama.cpp/LICENSE new file mode 100644 index 00000000..acb96ce7 --- /dev/null +++ b/llama/llama.cpp/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023-2024 The ggml authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/llama/base64.hpp b/llama/llama.cpp/common/base64.hpp similarity index 100% rename from llama/base64.hpp rename to llama/llama.cpp/common/base64.hpp diff --git a/llama/common.cpp b/llama/llama.cpp/common/common.cpp similarity index 98% rename from llama/common.cpp rename to llama/llama.cpp/common/common.cpp index 132de88a..4bb140ee 100644 --- a/llama/common.cpp +++ b/llama/llama.cpp/common/common.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #if defined(_MSC_VER) #define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING #endif diff --git a/llama/llama.cpp/common/common.go b/llama/llama.cpp/common/common.go new file mode 100644 index 00000000..ebbb738f --- /dev/null +++ b/llama/llama.cpp/common/common.go @@ -0,0 +1,6 @@ +package common + +// #cgo CXXFLAGS: -std=c++11 +// #cgo CPPFLAGS: -I${SRCDIR}/../include +// #cgo CPPFLAGS: -I${SRCDIR}/../../../ml/backend/ggml/ggml/include +import "C" diff --git a/llama/common.h b/llama/llama.cpp/common/common.h similarity index 95% rename from llama/common.h rename to llama/llama.cpp/common/common.h index db931490..0d452cf0 100644 --- a/llama/common.h +++ b/llama/llama.cpp/common/common.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - // Various helper functions and utilities #pragma once diff --git a/llama/json-schema-to-grammar.cpp b/llama/llama.cpp/common/json-schema-to-grammar.cpp similarity index 97% rename from llama/json-schema-to-grammar.cpp rename to llama/llama.cpp/common/json-schema-to-grammar.cpp index cc870f9f..2a8dbd22 100644 --- a/llama/json-schema-to-grammar.cpp +++ b/llama/llama.cpp/common/json-schema-to-grammar.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "json-schema-to-grammar.h" #include #include diff --git a/llama/llama.cpp/common/json-schema-to-grammar.h b/llama/llama.cpp/common/json-schema-to-grammar.h new file mode 100644 index 00000000..41623b34 --- /dev/null +++ b/llama/llama.cpp/common/json-schema-to-grammar.h @@ -0,0 +1,8 @@ +#pragma once + +#include "ggml.h" +// Change JSON_ASSERT from assert() to GGML_ASSERT: +#define JSON_ASSERT GGML_ASSERT +#include "json.hpp" + +std::string json_schema_to_grammar(const nlohmann::ordered_json& schema); diff --git a/llama/json.hpp b/llama/llama.cpp/common/json.hpp similarity index 100% rename from llama/json.hpp rename to llama/llama.cpp/common/json.hpp diff --git a/llama/log.cpp b/llama/llama.cpp/common/log.cpp similarity index 89% rename from llama/log.cpp rename to llama/llama.cpp/common/log.cpp index 959f353a..04c7c0ed 100644 --- a/llama/log.cpp +++ b/llama/llama.cpp/common/log.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "log.h" #include diff --git a/llama/log.h b/llama/llama.cpp/common/log.h similarity index 77% rename from llama/log.h rename to llama/llama.cpp/common/log.h index 14deeb15..66605cc6 100644 --- a/llama/log.h +++ b/llama/llama.cpp/common/log.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "ggml.h" // for ggml_log_level diff --git a/llama/sampling.cpp b/llama/llama.cpp/common/sampling.cpp similarity index 93% rename from llama/sampling.cpp rename to llama/llama.cpp/common/sampling.cpp index b4b72e28..e83a971c 100644 --- a/llama/sampling.cpp +++ b/llama/llama.cpp/common/sampling.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "sampling.h" #include "common.h" diff --git a/llama/sampling.h b/llama/llama.cpp/common/sampling.h similarity index 78% rename from llama/sampling.h rename to llama/llama.cpp/common/sampling.h index 58f40903..348911b1 100644 --- a/llama/sampling.h +++ b/llama/llama.cpp/common/sampling.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "llama.h" diff --git a/llama/stb_image.h b/llama/llama.cpp/common/stb_image.h similarity index 100% rename from llama/stb_image.h rename to llama/llama.cpp/common/stb_image.h diff --git a/llama/clip.cpp b/llama/llama.cpp/examples/llava/clip.cpp similarity index 98% rename from llama/clip.cpp rename to llama/llama.cpp/examples/llava/clip.cpp index d8cb5093..718052e1 100644 --- a/llama/clip.cpp +++ b/llama/llama.cpp/examples/llava/clip.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - // NOTE: This is modified from clip.cpp only for LLaVA, // so there might be still unnecessary artifacts hanging around // I'll gradually clean and extend it diff --git a/llama/clip.h b/llama/llama.cpp/examples/llava/clip.h similarity index 74% rename from llama/clip.h rename to llama/llama.cpp/examples/llava/clip.h index 42f24bd6..1603edd2 100644 --- a/llama/clip.h +++ b/llama/llama.cpp/examples/llava/clip.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #ifndef CLIP_H #define CLIP_H diff --git a/llama/llava.cpp b/llama/llama.cpp/examples/llava/llava.cpp similarity index 95% rename from llama/llava.cpp rename to llama/llama.cpp/examples/llava/llava.cpp index 15393e2d..0f0f3f62 100644 --- a/llama/llava.cpp +++ b/llama/llama.cpp/examples/llava/llava.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "clip.h" #include "llava.h" diff --git a/llama/llama.cpp/examples/llava/llava.go b/llama/llama.cpp/examples/llava/llava.go new file mode 100644 index 00000000..37b031cb --- /dev/null +++ b/llama/llama.cpp/examples/llava/llava.go @@ -0,0 +1,6 @@ +package llava + +// #cgo CXXFLAGS: -std=c++11 +// #cgo CPPFLAGS: -I${SRCDIR}/../../include -I${SRCDIR}/../../common +// #cgo CPPFLAGS: -I${SRCDIR}/../../../../ml/backend/ggml/ggml/include +import "C" diff --git a/llama/llava.h b/llama/llama.cpp/examples/llava/llava.h similarity index 59% rename from llama/llava.h rename to llama/llama.cpp/examples/llava/llava.h index 7e8e501f..b6feb302 100644 --- a/llama/llava.h +++ b/llama/llama.cpp/examples/llava/llava.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #ifndef LLAVA_H #define LLAVA_H diff --git a/llama/llama.cpp/include/llama-cpp.h b/llama/llama.cpp/include/llama-cpp.h new file mode 100644 index 00000000..1500cb2f --- /dev/null +++ b/llama/llama.cpp/include/llama-cpp.h @@ -0,0 +1,30 @@ +#pragma once + +#ifndef __cplusplus +#error "This header is for C++ only" +#endif + +#include + +#include "llama.h" + +struct llama_model_deleter { + void operator()(llama_model * model) { llama_free_model(model); } +}; + +struct llama_context_deleter { + void operator()(llama_context * context) { llama_free(context); } +}; + +struct llama_sampler_deleter { + void operator()(llama_sampler * sampler) { llama_sampler_free(sampler); } +}; + +struct llama_lora_adapter_deleter { + void operator()(llama_lora_adapter * lora_adapter) { llama_lora_adapter_free(lora_adapter); } +}; + +typedef std::unique_ptr llama_model_ptr; +typedef std::unique_ptr llama_context_ptr; +typedef std::unique_ptr llama_sampler_ptr; +typedef std::unique_ptr llama_lora_adapter_ptr; diff --git a/llama/llama.h b/llama/llama.cpp/include/llama.h similarity index 98% rename from llama/llama.h rename to llama/llama.cpp/include/llama.h index 164d3b6f..9f411960 100644 --- a/llama/llama.h +++ b/llama/llama.cpp/include/llama.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #ifndef LLAMA_H #define LLAMA_H diff --git a/llama/llama-adapter.cpp b/llama/llama.cpp/src/llama-adapter.cpp similarity index 90% rename from llama/llama-adapter.cpp rename to llama/llama.cpp/src/llama-adapter.cpp index 02a48f3f..9fd7edea 100644 --- a/llama/llama-adapter.cpp +++ b/llama/llama.cpp/src/llama-adapter.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-adapter.h" #include "llama-model.h" diff --git a/llama/llama-adapter.h b/llama/llama.cpp/src/llama-adapter.h similarity index 55% rename from llama/llama-adapter.h rename to llama/llama.cpp/src/llama-adapter.h index 1bf860d7..5f1870cc 100644 --- a/llama/llama-adapter.h +++ b/llama/llama.cpp/src/llama-adapter.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "llama-impl.h" diff --git a/llama/llama-arch.cpp b/llama/llama.cpp/src/llama-arch.cpp similarity index 98% rename from llama/llama-arch.cpp rename to llama/llama.cpp/src/llama-arch.cpp index a6cc790e..b35aeb31 100644 --- a/llama/llama-arch.cpp +++ b/llama/llama.cpp/src/llama-arch.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-arch.h" #include "llama-impl.h" diff --git a/llama/llama-arch.h b/llama/llama.cpp/src/llama-arch.h similarity index 89% rename from llama/llama-arch.h rename to llama/llama.cpp/src/llama-arch.h index fa8422a8..e8235ae0 100644 --- a/llama/llama-arch.h +++ b/llama/llama.cpp/src/llama-arch.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "ggml.h" // ggml_op diff --git a/llama/llama-batch.cpp b/llama/llama.cpp/src/llama-batch.cpp similarity index 91% rename from llama/llama-batch.cpp rename to llama/llama.cpp/src/llama-batch.cpp index 0e0488c3..8682b0e6 100644 --- a/llama/llama-batch.cpp +++ b/llama/llama.cpp/src/llama-batch.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-batch.h" #include diff --git a/llama/llama-batch.h b/llama/llama.cpp/src/llama-batch.h similarity index 67% rename from llama/llama-batch.h rename to llama/llama.cpp/src/llama-batch.h index eb439c3d..773c3808 100644 --- a/llama/llama-batch.h +++ b/llama/llama.cpp/src/llama-batch.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "llama.h" diff --git a/llama/llama-chat.cpp b/llama/llama.cpp/src/llama-chat.cpp similarity index 95% rename from llama/llama-chat.cpp rename to llama/llama.cpp/src/llama-chat.cpp index 099b3342..44670d3d 100644 --- a/llama/llama-chat.cpp +++ b/llama/llama.cpp/src/llama-chat.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-chat.h" #include "llama.h" diff --git a/llama/llama-chat.h b/llama/llama.cpp/src/llama-chat.h similarity index 54% rename from llama/llama-chat.h rename to llama/llama.cpp/src/llama-chat.h index deabed71..b8e94d9e 100644 --- a/llama/llama-chat.h +++ b/llama/llama.cpp/src/llama-chat.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include diff --git a/llama/llama-context.cpp b/llama/llama.cpp/src/llama-context.cpp similarity index 98% rename from llama/llama-context.cpp rename to llama/llama.cpp/src/llama-context.cpp index 91bfd13f..9d0e7ca3 100644 --- a/llama/llama-context.cpp +++ b/llama/llama.cpp/src/llama-context.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-context.h" #include diff --git a/llama/llama-context.h b/llama/llama.cpp/src/llama-context.h similarity index 80% rename from llama/llama-context.h rename to llama/llama.cpp/src/llama-context.h index 64303394..4980a60e 100644 --- a/llama/llama-context.h +++ b/llama/llama.cpp/src/llama-context.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "llama.h" diff --git a/llama/llama.cpp/src/llama-cparams.cpp b/llama/llama.cpp/src/llama-cparams.cpp new file mode 100644 index 00000000..28369be3 --- /dev/null +++ b/llama/llama.cpp/src/llama-cparams.cpp @@ -0,0 +1 @@ +#include "llama-cparams.h" diff --git a/llama/llama.cpp/src/llama-cparams.h b/llama/llama.cpp/src/llama-cparams.h new file mode 100644 index 00000000..9681e5a0 --- /dev/null +++ b/llama/llama.cpp/src/llama-cparams.h @@ -0,0 +1,38 @@ +#pragma once + +#include "llama.h" + +#include + +struct llama_cparams { + uint32_t n_ctx; // context size used during inference + uint32_t n_batch; + uint32_t n_ubatch; + uint32_t n_seq_max; + int n_threads; // number of threads to use for generation + int n_threads_batch; // number of threads to use for batch processing + + float rope_freq_base; + float rope_freq_scale; + + uint32_t n_ctx_orig_yarn; + // These hyperparameters are not exposed in GGUF, because all + // existing YaRN models use the same values for them. + float yarn_ext_factor; + float yarn_attn_factor; + float yarn_beta_fast; + float yarn_beta_slow; + float defrag_thold; + + bool embeddings; + bool causal_attn; + bool offload_kqv; + bool flash_attn; + bool no_perf; + bool cross_attn; + + enum llama_pooling_type pooling_type; + + ggml_backend_sched_eval_callback cb_eval; + void * cb_eval_user_data; +}; diff --git a/llama/llama-grammar.cpp b/llama/llama.cpp/src/llama-grammar.cpp similarity index 97% rename from llama/llama-grammar.cpp rename to llama/llama.cpp/src/llama-grammar.cpp index 243cb452..186dc9a2 100644 --- a/llama/llama-grammar.cpp +++ b/llama/llama.cpp/src/llama-grammar.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-grammar.h" #include "llama-impl.h" diff --git a/llama/llama-grammar.h b/llama/llama.cpp/src/llama-grammar.h similarity index 78% rename from llama/llama-grammar.h rename to llama/llama.cpp/src/llama-grammar.h index 41811c74..f8b40c65 100644 --- a/llama/llama-grammar.h +++ b/llama/llama.cpp/src/llama-grammar.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "llama.h" diff --git a/llama/llama-hparams.cpp b/llama/llama.cpp/src/llama-hparams.cpp similarity index 61% rename from llama/llama-hparams.cpp rename to llama/llama.cpp/src/llama-hparams.cpp index d47225e7..42f8a58f 100644 --- a/llama/llama-hparams.cpp +++ b/llama/llama.cpp/src/llama-hparams.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-hparams.h" #include "ggml.h" diff --git a/llama/llama-hparams.h b/llama/llama.cpp/src/llama-hparams.h similarity index 78% rename from llama/llama-hparams.h rename to llama/llama.cpp/src/llama-hparams.h index b2d4bd61..f826cd9a 100644 --- a/llama/llama-hparams.h +++ b/llama/llama.cpp/src/llama-hparams.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "llama.h" diff --git a/llama/llama-impl.cpp b/llama/llama.cpp/src/llama-impl.cpp similarity index 82% rename from llama/llama-impl.cpp rename to llama/llama.cpp/src/llama-impl.cpp index de726cb2..a05ba4f6 100644 --- a/llama/llama-impl.cpp +++ b/llama/llama.cpp/src/llama-impl.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-impl.h" #include "llama.h" diff --git a/llama/llama-impl.h b/llama/llama.cpp/src/llama-impl.h similarity index 58% rename from llama/llama-impl.h rename to llama/llama.cpp/src/llama-impl.h index c9ae33f4..12d1fb08 100644 --- a/llama/llama-impl.h +++ b/llama/llama.cpp/src/llama-impl.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "ggml.h" // for ggml_log_level diff --git a/llama/llama-kv-cache.cpp b/llama/llama.cpp/src/llama-kv-cache.cpp similarity index 95% rename from llama/llama-kv-cache.cpp rename to llama/llama.cpp/src/llama-kv-cache.cpp index aa555e65..cf814dbe 100644 --- a/llama/llama-kv-cache.cpp +++ b/llama/llama.cpp/src/llama-kv-cache.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-kv-cache.h" #include "llama-impl.h" diff --git a/llama/llama-kv-cache.h b/llama/llama.cpp/src/llama-kv-cache.h similarity index 84% rename from llama/llama-kv-cache.h rename to llama/llama.cpp/src/llama-kv-cache.h index a4d65611..dca6f399 100644 --- a/llama/llama-kv-cache.h +++ b/llama/llama.cpp/src/llama-kv-cache.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "llama.h" diff --git a/llama/llama-mmap.cpp b/llama/llama.cpp/src/llama-mmap.cpp similarity index 93% rename from llama/llama-mmap.cpp rename to llama/llama.cpp/src/llama-mmap.cpp index 3868e9dd..a9932633 100644 --- a/llama/llama-mmap.cpp +++ b/llama/llama.cpp/src/llama-mmap.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-mmap.h" #include "llama-impl.h" diff --git a/llama/llama-mmap.h b/llama/llama.cpp/src/llama-mmap.h similarity index 52% rename from llama/llama-mmap.h rename to llama/llama.cpp/src/llama-mmap.h index ebd7dc16..6bcddee8 100644 --- a/llama/llama-mmap.h +++ b/llama/llama.cpp/src/llama-mmap.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include diff --git a/llama/llama-model-loader.cpp b/llama/llama.cpp/src/llama-model-loader.cpp similarity index 97% rename from llama/llama-model-loader.cpp rename to llama/llama.cpp/src/llama-model-loader.cpp index ebb369e4..b12d6566 100644 --- a/llama/llama-model-loader.cpp +++ b/llama/llama.cpp/src/llama-model-loader.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-model-loader.h" #include "ggml.h" diff --git a/llama/llama-model-loader.h b/llama/llama.cpp/src/llama-model-loader.h similarity index 81% rename from llama/llama-model-loader.h rename to llama/llama.cpp/src/llama-model-loader.h index 873d4c0c..1ec47819 100644 --- a/llama/llama-model-loader.h +++ b/llama/llama.cpp/src/llama-model-loader.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "llama.h" diff --git a/llama/llama-model.cpp b/llama/llama.cpp/src/llama-model.cpp similarity index 98% rename from llama/llama-model.cpp rename to llama/llama.cpp/src/llama-model.cpp index 2482f98a..4f9bbf90 100644 --- a/llama/llama-model.cpp +++ b/llama/llama.cpp/src/llama-model.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-model.h" #include "llama-impl.h" diff --git a/llama/llama-model.h b/llama/llama.cpp/src/llama-model.h similarity index 91% rename from llama/llama-model.h rename to llama/llama.cpp/src/llama-model.h index 756b09f4..5b23e2ba 100644 --- a/llama/llama-model.h +++ b/llama/llama.cpp/src/llama-model.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "llama.h" diff --git a/llama/llama-quant.cpp b/llama/llama.cpp/src/llama-quant.cpp similarity index 97% rename from llama/llama-quant.cpp rename to llama/llama.cpp/src/llama-quant.cpp index 6b4d288b..27def6fd 100644 --- a/llama/llama-quant.cpp +++ b/llama/llama.cpp/src/llama-quant.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-quant.h" #include "llama-impl.h" diff --git a/llama/llama.cpp/src/llama-quant.h b/llama/llama.cpp/src/llama-quant.h new file mode 100644 index 00000000..6f70f09b --- /dev/null +++ b/llama/llama.cpp/src/llama-quant.h @@ -0,0 +1 @@ +#pragma once diff --git a/llama/llama-sampling.cpp b/llama/llama.cpp/src/llama-sampling.cpp similarity index 98% rename from llama/llama-sampling.cpp rename to llama/llama.cpp/src/llama-sampling.cpp index 1071efdc..69cea2f1 100644 --- a/llama/llama-sampling.cpp +++ b/llama/llama.cpp/src/llama-sampling.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-sampling.h" #include "llama-impl.h" diff --git a/llama/llama-sampling.h b/llama/llama.cpp/src/llama-sampling.h similarity index 54% rename from llama/llama-sampling.h rename to llama/llama.cpp/src/llama-sampling.h index 10a7878f..919f6fdf 100644 --- a/llama/llama-sampling.h +++ b/llama/llama.cpp/src/llama-sampling.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once // TODO: rename llama-sampling.h/.cpp to llama-sampler.h/.cpp ? diff --git a/llama/llama-vocab.cpp b/llama/llama.cpp/src/llama-vocab.cpp similarity index 98% rename from llama/llama-vocab.cpp rename to llama/llama.cpp/src/llama-vocab.cpp index 7f9f699a..8f44705a 100644 --- a/llama/llama-vocab.cpp +++ b/llama/llama.cpp/src/llama-vocab.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-vocab.h" #include "llama-impl.h" diff --git a/llama/llama-vocab.h b/llama/llama.cpp/src/llama-vocab.h similarity index 84% rename from llama/llama-vocab.h rename to llama/llama.cpp/src/llama-vocab.h index 81b14fff..0d00086d 100644 --- a/llama/llama-vocab.h +++ b/llama/llama.cpp/src/llama-vocab.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "llama.h" diff --git a/llama/llama.cpp b/llama/llama.cpp/src/llama.cpp similarity index 99% rename from llama/llama.cpp rename to llama/llama.cpp/src/llama.cpp index 9b123fce..c95da45d 100644 --- a/llama/llama.cpp +++ b/llama/llama.cpp/src/llama.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "llama-impl.h" #include "llama-chat.h" diff --git a/llama/llama.cpp/src/llama.go b/llama/llama.cpp/src/llama.go new file mode 100644 index 00000000..ddbd5378 --- /dev/null +++ b/llama/llama.cpp/src/llama.go @@ -0,0 +1,8 @@ +package llama + +// #cgo CXXFLAGS: -std=c++17 +// #cgo CPPFLAGS: -I${SRCDIR}/../include +// #cgo CPPFLAGS: -I${SRCDIR}/../../../ml/backend/ggml/ggml/include +// #cgo windows CPPFLAGS: -D_WIN32_WINNT=0x0602 +import "C" +import _ "github.com/ollama/ollama/ml/backend/ggml/ggml/src" diff --git a/llama/unicode-data.cpp b/llama/llama.cpp/src/unicode-data.cpp similarity index 99% rename from llama/unicode-data.cpp rename to llama/llama.cpp/src/unicode-data.cpp index 393cd273..04dcd7fc 100644 --- a/llama/unicode-data.cpp +++ b/llama/llama.cpp/src/unicode-data.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - // generated with scripts/gen-unicode-data.py #include "unicode-data.h" diff --git a/llama/llama.cpp/src/unicode-data.h b/llama/llama.cpp/src/unicode-data.h new file mode 100644 index 00000000..f6973ebd --- /dev/null +++ b/llama/llama.cpp/src/unicode-data.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include +#include +#include + +struct range_nfd { + uint32_t first; + uint32_t last; + uint32_t nfd; +}; + +static const uint32_t MAX_CODEPOINTS = 0x110000; + +extern const std::initializer_list> unicode_ranges_flags; +extern const std::unordered_set unicode_set_whitespace; +extern const std::initializer_list> unicode_map_lowercase; +extern const std::initializer_list> unicode_map_uppercase; +extern const std::initializer_list unicode_ranges_nfd; diff --git a/llama/unicode.cpp b/llama/llama.cpp/src/unicode.cpp similarity index 96% rename from llama/unicode.cpp rename to llama/llama.cpp/src/unicode.cpp index 5dcb2e98..6155da80 100644 --- a/llama/unicode.cpp +++ b/llama/llama.cpp/src/unicode.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #if defined(_MSC_VER) #define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING #endif diff --git a/llama/unicode.h b/llama/llama.cpp/src/unicode.h similarity index 63% rename from llama/unicode.h rename to llama/llama.cpp/src/unicode.h index b6a99568..c27098df 100644 --- a/llama/unicode.h +++ b/llama/llama.cpp/src/unicode.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include diff --git a/llama/llama.go b/llama/llama.go index 18790a95..1d4513e3 100644 --- a/llama/llama.go +++ b/llama/llama.go @@ -1,70 +1,20 @@ package llama -//go:generate make -j 8 - /* -#cgo CFLAGS: -O3 -std=c17 -DGGML_BUILD=1 -DNDEBUG -DLOG_DISABLE_LOGS -DGGML_USE_LLAMAFILE -DGGML_USE_CPU -DGGML_USE_CPU_AARCH64 -#cgo CXXFLAGS: -O3 -std=c++17 -DGGML_BUILD=1 -DNDEBUG -DLOG_DISABLE_LOGS -DGGML_USE_LLAMAFILE -DGGML_USE_CPU -DGGML_USE_CPU_AARCH64 -#cgo amd64,avx CFLAGS: -mavx -#cgo amd64,avx CXXFLAGS: -mavx -#cgo amd64,avx2 CFLAGS: -mavx2 -mfma -mf16c -#cgo amd64,avx2 CXXFLAGS: -mavx2 -mfma -mf16c -#cgo amd64,avx512 CFLAGS: -mavx512f -mavx512dq -mavx512bw -#cgo amd64,avx512 CXXFLAGS: -mavx512f -mavx512dq -mavx512bw -#cgo amd64,avx512bf16 CFLAGS: -mavx512bf16 -D__AVX512BF16__ -#cgo amd64,avx512bf16 CXXFLAGS: -mavx512bf16 -D__AVX512BF16__ -#cgo amd64,avx512vbmi CFLAGS: -mavx512vbmi -D__AVX512VBMI__ -#cgo amd64,avx512vbmi CXXFLAGS: -mavx512vbmi -D__AVX512VBMI__ -#cgo amd64,avx512vnni CFLAGS: -mavx512vnni -D__AVX512VNNI__ -#cgo amd64,avx512vnni CXXFLAGS: -mavx512vnni -D__AVX512VNNI__ -#cgo amd64,f16c CFLAGS: -mf16c -#cgo amd64,f16c CXXFLAGS: -mf16c -#cgo amd64,fma CFLAGS: -mfma -#cgo amd64,fma CXXFLAGS: -mfma -#cgo cuda CFLAGS: -fPIE -DGGML_USE_CUDA -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1 -#cgo cuda CXXFLAGS: -DGGML_USE_CUDA -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1 -#cgo cuda_jetpack5 LDFLAGS: -lggml_cuda_jetpack5 -#cgo cuda_jetpack6 LDFLAGS: -lggml_cuda_jetpack6 -#cgo cuda_v11 LDFLAGS: -lggml_cuda_v11 -#cgo cuda_v12 LDFLAGS: -lggml_cuda_v12 -#cgo darwin,amd64 CFLAGS: -Wno-incompatible-pointer-types-discards-qualifiers -#cgo darwin,amd64 CXXFLAGS: -Wno-incompatible-pointer-types-discards-qualifiers -#cgo darwin,amd64 LDFLAGS: -framework Foundation -#cgo darwin,amd64,avx2 CFLAGS: -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 -#cgo darwin,amd64,avx2 CXXFLAGS: -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 -#cgo darwin,amd64,avx2 LDFLAGS: -framework Accelerate -#cgo darwin,arm64 CFLAGS: -DGGML_USE_METAL -DGGML_USE_ACCELERATE -DGGML_METAL_EMBED_LIBRARY -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 -DGGML_USE_BLAS -DGGML_BLAS_USE_ACCELERATE -#cgo darwin,arm64 CXXFLAGS: -DGGML_USE_METAL -DGGML_USE_ACCELERATE -DGGML_METAL_EMBED_LIBRARY -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 -DGGML_USE_BLAS -DGGML_BLAS_USE_ACCELERATE -#cgo darwin,arm64 LDFLAGS: -framework Foundation -framework Metal -framework MetalKit -framework Accelerate -#cgo linux CFLAGS: -D_GNU_SOURCE -#cgo linux CXXFLAGS: -D_GNU_SOURCE -#cgo linux LDFLAGS: -ldl -#cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux-amd64 -#cgo linux,arm64 CFLAGS: -D__aarch64__ -D__ARM_NEON -D__ARM_FEATURE_FMA -#cgo linux,arm64 CXXFLAGS: -D__aarch64__ -D__ARM_NEON -D__ARM_FEATURE_FMA -#cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux-arm64 -#cgo linux,arm64,sve CFLAGS: -march=armv8.6-a+sve -#cgo linux,arm64,sve CXXFLAGS: -march=armv8.6-a+sve -#cgo linux,cuda LDFLAGS: -lcuda -lcudart -lcublas -lcublasLt -lpthread -lrt -lresolv -#cgo linux,rocm LDFLAGS: -lpthread -lrt -lresolv -#cgo rocm CFLAGS: -DGGML_USE_CUDA -DGGML_USE_HIP -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1 -#cgo rocm CXXFLAGS: -DGGML_USE_CUDA -DGGML_USE_HIP -DGGML_CUDA_DMMV_X=32 -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 -DGGML_CUDA_MMV_Y=1 -DGGML_BUILD=1 -#cgo rocm LDFLAGS: -L${SRCDIR} -lggml_rocm -lhipblas -lamdhip64 -lrocblas -#cgo windows CFLAGS: -Wno-discarded-qualifiers -D_WIN32_WINNT=0x602 -#cgo windows CXXFLAGS: -D_WIN32_WINNT=0x602 -#cgo windows LDFLAGS: -lmsvcrt -static-libstdc++ -static-libgcc -static -#cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows-amd64 -#cgo windows,arm64 CFLAGS: -D__aarch64__ -D__ARM_NEON -D__ARM_FEATURE_FMA -#cgo windows,arm64 CXXFLAGS: -D__aarch64__ -D__ARM_NEON -D__ARM_FEATURE_FMA -#cgo windows,arm64 LDFLAGS: -L${SRCDIR}/build/windows-arm64 -#cgo windows,cuda LDFLAGS: -lcuda -lcudart -lcublas -lcublasLt -#cgo windows,rocm LDFLAGS: -lggml_rocm -lhipblas -lamdhip64 -lrocblas +#cgo CFLAGS: -std=c11 +#cgo CXXFLAGS: -std=c++17 +#cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/include +#cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/common +#cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/examples/llava +#cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/src +#cgo CPPFLAGS: -I${SRCDIR}/../ml/backend/ggml/ggml/include #include +#include "ggml.h" #include "llama.h" #include "clip.h" -#include "ggml.h" #include "llava.h" + #include "mllama.h" #include "sampling_ext.h" @@ -96,9 +46,15 @@ import ( "strings" "sync/atomic" "unsafe" + + _ "github.com/ollama/ollama/llama/llama.cpp/common" + _ "github.com/ollama/ollama/llama/llama.cpp/examples/llava" + _ "github.com/ollama/ollama/llama/llama.cpp/src" + "github.com/ollama/ollama/ml/backend/ggml/ggml/src" ) func BackendInit() { + ggml.OnceLoad() C.llama_backend_init() } diff --git a/llama/mmq.h b/llama/mmq.h deleted file mode 100644 index c78d3a1c..00000000 --- a/llama/mmq.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once -#include "common.h" - -size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst); - -size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor); - -void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - -void ggml_backend_amx_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst); diff --git a/llama/patches/0001-cuda.patch b/llama/patches/0001-cuda.patch index 574654b5..0bf338f2 100644 --- a/llama/patches/0001-cuda.patch +++ b/llama/patches/0001-cuda.patch @@ -4,39 +4,44 @@ Date: Thu, 6 Jun 2024 23:55:47 -0700 Subject: [PATCH] cuda --- - ggml/src/ggml-backend.cpp | 5 +++++ - ggml/src/ggml-cuda/ggml-cuda.cu | 4 ++++ - 2 files changed, 9 insertions(+) + ggml/src/ggml-backend.cpp | 1 - + ggml/src/ggml-cuda/ggml-cuda.cu | 1 + + ggml/src/ggml-metal/ggml-metal.m | 1 + + 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp -index e2d6c405..1b62c056 100644 +index e2d6c405..a12172dc 100644 --- a/ggml/src/ggml-backend.cpp +++ b/ggml/src/ggml-backend.cpp -@@ -106,7 +106,12 @@ void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { +@@ -106,7 +106,6 @@ void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { if (buffer->iface.free_buffer != NULL) { buffer->iface.free_buffer(buffer); } -+ -+// TODO: this needs to be freed in cuda and hip backends because -+// the cuda backend implementation compiled with msvc -+#if !defined(GGML_USE_CUDA) && !defined(GGML_USE_HIP) - delete buffer; -+#endif +- delete buffer; } size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) { diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu -index 0b06be72..0a6ae325 100644 +index 0b06be72..be29e979 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu -@@ -424,6 +424,10 @@ struct ggml_backend_cuda_buffer_context { +@@ -424,6 +424,7 @@ struct ggml_backend_cuda_buffer_context { static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; delete ctx; -+ -+ // TODO: this needs to be freed in cuda and hipblas backends because -+ // the cuda backend implementation compiled with msvc -+ free(buffer); ++ delete buffer; } static bool ggml_backend_buffer_is_cuda(ggml_backend_buffer_t buffer) { +diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m +index a85502ee..cd8ef741 100644 +--- a/ggml/src/ggml-metal/ggml-metal.m ++++ b/ggml/src/ggml-metal/ggml-metal.m +@@ -4187,6 +4187,7 @@ static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) + } + + free(ctx); ++ free(buffer); + } + + static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) { diff --git a/llama/patches/0006-conditional-fattn.patch b/llama/patches/0006-conditional-fattn.patch index 62c24807..73990578 100644 --- a/llama/patches/0006-conditional-fattn.patch +++ b/llama/patches/0006-conditional-fattn.patch @@ -8,10 +8,10 @@ Subject: [PATCH] conditional-fattn 1 file changed, 2 insertions(+) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu -index 0a6ae325..bb425ee8 100644 +index be29e979..aaa79ea4 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu -@@ -2162,9 +2162,11 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg +@@ -2159,9 +2159,11 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_ARGSORT: ggml_cuda_op_argsort(ctx, dst); break; diff --git a/llama/patches/0008-add-mllama-support.patch b/llama/patches/0007-add-mllama-support.patch similarity index 100% rename from llama/patches/0008-add-mllama-support.patch rename to llama/patches/0007-add-mllama-support.patch diff --git a/llama/patches/0007-blas.patch b/llama/patches/0007-blas.patch deleted file mode 100644 index 121a1cd9..00000000 --- a/llama/patches/0007-blas.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Jesse Gross -Date: Mon, 30 Sep 2024 16:31:04 -0700 -Subject: [PATCH] blas - ---- - ggml/src/ggml-blas/ggml-blas.cpp | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/ggml/src/ggml-blas/ggml-blas.cpp b/ggml/src/ggml-blas/ggml-blas.cpp -index ec158dfa..b3ac1fa4 100644 ---- a/ggml/src/ggml-blas/ggml-blas.cpp -+++ b/ggml/src/ggml-blas/ggml-blas.cpp -@@ -1,3 +1,5 @@ -+#ifdef GGML_USE_BLAS -+ - #include "ggml-impl.h" - #include "ggml-blas.h" - #include "ggml-backend-impl.h" -@@ -515,3 +517,5 @@ ggml_backend_reg_t ggml_backend_blas_reg(void) { - } - - GGML_BACKEND_DL_IMPL(ggml_backend_blas_reg) -+ -+#endif // GGML_USE_BLAS -\ No newline at end of file diff --git a/llama/patches/0009-add-unpad-operator.patch b/llama/patches/0008-add-unpad-operator.patch similarity index 97% rename from llama/patches/0009-add-unpad-operator.patch rename to llama/patches/0008-add-unpad-operator.patch index ba857ef0..fd070df9 100644 --- a/llama/patches/0009-add-unpad-operator.patch +++ b/llama/patches/0008-add-unpad-operator.patch @@ -126,10 +126,10 @@ index b7fefb9d..b307d554 100644 case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_ARGSORT: diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu -index bb425ee8..1e7c2a22 100644 +index aaa79ea4..9286f866 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu -@@ -2085,6 +2085,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg +@@ -2082,6 +2082,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_PAD: ggml_cuda_op_pad(ctx, dst); break; @@ -139,7 +139,7 @@ index bb425ee8..1e7c2a22 100644 case GGML_OP_ARANGE: ggml_cuda_op_arange(ctx, dst); break; -@@ -3013,6 +3016,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g +@@ -3010,6 +3013,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_GROUP_NORM: case GGML_OP_UPSCALE: case GGML_OP_PAD: @@ -211,10 +211,10 @@ index 8fd386b0..e2ededc3 100644 void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_unpad(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m -index a85502ee..84e027eb 100644 +index cd8ef741..318addec 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m -@@ -311,6 +311,7 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte +@@ -311,6 +311,7 @@ enum ggml_metal_kernel_type { GGML_METAL_KERNEL_TYPE_UPSCALE_F32, GGML_METAL_KERNEL_TYPE_PAD_F32, GGML_METAL_KERNEL_TYPE_PAD_REFLECT_1D_F32, @@ -222,7 +222,7 @@ index a85502ee..84e027eb 100644 GGML_METAL_KERNEL_TYPE_ARANGE_F32, GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32, GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, -@@ -910,6 +911,7 @@ @implementation GGMLMetalClass +@@ -910,6 +911,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true); GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_REFLECT_1D_F32, pad_reflect_1d_f32, true); diff --git a/llama/patches/0010-fix-deepseek-deseret-regex.patch b/llama/patches/0009-fix-deepseek-deseret-regex.patch similarity index 100% rename from llama/patches/0010-fix-deepseek-deseret-regex.patch rename to llama/patches/0009-fix-deepseek-deseret-regex.patch diff --git a/llama/patches/0012-Maintain-ordering-for-rules-for-grammar.patch b/llama/patches/0010-Maintain-ordering-for-rules-for-grammar.patch similarity index 100% rename from llama/patches/0012-Maintain-ordering-for-rules-for-grammar.patch rename to llama/patches/0010-Maintain-ordering-for-rules-for-grammar.patch diff --git a/llama/patches/0013-fix-missing-arg-in-static-assert-on-windows.patch b/llama/patches/0011-fix-missing-arg-in-static-assert-on-windows.patch similarity index 100% rename from llama/patches/0013-fix-missing-arg-in-static-assert-on-windows.patch rename to llama/patches/0011-fix-missing-arg-in-static-assert-on-windows.patch diff --git a/llama/patches/0011-relative-include-paths.patch b/llama/patches/0011-relative-include-paths.patch deleted file mode 100644 index c1e56b9c..00000000 --- a/llama/patches/0011-relative-include-paths.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: jmorganca -Date: Tue, 3 Dec 2024 21:30:51 -0800 -Subject: [PATCH] relative include paths - ---- - ggml/src/ggml-cpu/ggml-cpu.c | 2 +- - ggml/src/ggml-cpu/ggml-cpu.cpp | 3 +-- - ggml/src/ggml-quants.c | 2 +- - 3 files changed, 3 insertions(+), 4 deletions(-) - -diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c -index b307d554..4eb39c52 100644 ---- a/ggml/src/ggml-cpu/ggml-cpu.c -+++ b/ggml/src/ggml-cpu/ggml-cpu.c -@@ -10,7 +10,7 @@ - #include "ggml-quants.h" - #include "ggml-cpu-quants.h" - #include "ggml-threading.h" --#include "amx/amx.h" -+#include "amx.h" - #include "ggml.h" - - #if defined(_MSC_VER) || defined(__MINGW32__) -diff --git a/ggml/src/ggml-cpu/ggml-cpu.cpp b/ggml/src/ggml-cpu/ggml-cpu.cpp -index f11399cc..2a8b40ce 100644 ---- a/ggml/src/ggml-cpu/ggml-cpu.cpp -+++ b/ggml/src/ggml-cpu/ggml-cpu.cpp -@@ -4,8 +4,7 @@ - #include "ggml-cpu-aarch64.h" - #include "ggml-cpu-traits.h" - #include "ggml-impl.h" --#include "amx/amx.h" -- -+#include "amx.h" - #include - #include - #include -diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c -index 7918388a..e2ed84e4 100644 ---- a/ggml/src/ggml-quants.c -+++ b/ggml/src/ggml-quants.c -@@ -3,7 +3,7 @@ - - #include "ggml-quants.h" - #include "ggml-impl.h" --#include "ggml-cpu/ggml-cpu-impl.h" -+#include "ggml-cpu-impl.h" - #include "ggml-cpu.h" - - #include diff --git a/llama/patches/0014-llama-Ensure-KV-cache-is-fully-defragmented.patch b/llama/patches/0012-llama-Ensure-KV-cache-is-fully-defragmented.patch similarity index 100% rename from llama/patches/0014-llama-Ensure-KV-cache-is-fully-defragmented.patch rename to llama/patches/0012-llama-Ensure-KV-cache-is-fully-defragmented.patch diff --git a/llama/patches/0015-re-enable-gpu-for-clip.patch b/llama/patches/0013-re-enable-gpu-for-clip.patch similarity index 100% rename from llama/patches/0015-re-enable-gpu-for-clip.patch rename to llama/patches/0013-re-enable-gpu-for-clip.patch diff --git a/llama/patches/0014-sort-devices-by-score.patch b/llama/patches/0014-sort-devices-by-score.patch new file mode 100644 index 00000000..67c2127a --- /dev/null +++ b/llama/patches/0014-sort-devices-by-score.patch @@ -0,0 +1,82 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Tue, 14 Jan 2025 12:01:24 -0800 +Subject: [PATCH] sort devices by score + +--- + ggml/src/ggml-backend-reg.cpp | 21 +++++++++++++-------- + 1 file changed, 13 insertions(+), 8 deletions(-) + +diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp +index 899d16f2..ac5cda07 100644 +--- a/ggml/src/ggml-backend-reg.cpp ++++ b/ggml/src/ggml-backend-reg.cpp +@@ -150,7 +150,7 @@ struct ggml_backend_reg_entry { + + struct ggml_backend_registry { + std::vector backends; +- std::vector devices; ++ std::vector> devices; + + ggml_backend_registry() { + #ifdef GGML_USE_CUDA +@@ -195,7 +195,7 @@ struct ggml_backend_registry { + } + } + +- void register_backend(ggml_backend_reg_t reg, dl_handle_ptr handle = nullptr) { ++ void register_backend(ggml_backend_reg_t reg, int score = -1, dl_handle_ptr handle = nullptr) { + if (!reg) { + return; + } +@@ -206,15 +206,15 @@ struct ggml_backend_registry { + #endif + backends.push_back({ reg, std::move(handle) }); + for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); i++) { +- register_device(ggml_backend_reg_dev_get(reg, i)); ++ register_device(ggml_backend_reg_dev_get(reg, i), score); + } + } + +- void register_device(ggml_backend_dev_t device) { ++ void register_device(ggml_backend_dev_t device, int score = -1) { + #ifndef NDEBUG + GGML_LOG_DEBUG("%s: registered device %s (%s)\n", __func__, ggml_backend_dev_name(device), ggml_backend_dev_description(device)); + #endif +- devices.push_back(device); ++ devices.push_back({device, score}); + } + + ggml_backend_reg_t load_backend(const std::wstring & path, bool silent) { +@@ -257,7 +257,7 @@ struct ggml_backend_registry { + + GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, ggml_backend_reg_name(reg), utf16_to_utf8(path).c_str()); + +- register_backend(reg, std::move(handle)); ++ register_backend(reg, score_fn ? score_fn() : -1, std::move(handle)); + + return reg; + } +@@ -280,7 +280,7 @@ struct ggml_backend_registry { + // remove devices + devices.erase( + std::remove_if(devices.begin(), devices.end(), +- [reg](ggml_backend_dev_t dev) { return ggml_backend_dev_backend_reg(dev) == reg; }), ++ [reg](std::pair dev) { return ggml_backend_dev_backend_reg(dev.first) == reg; }), + devices.end()); + + // remove backend +@@ -338,7 +338,12 @@ size_t ggml_backend_dev_count() { + + ggml_backend_dev_t ggml_backend_dev_get(size_t index) { + GGML_ASSERT(index < ggml_backend_dev_count()); +- return get_reg().devices[index]; ++ auto devices = get_reg().devices; ++ if (!std::is_heap(devices.begin(), devices.end())) { ++ std::make_heap(devices.begin(), devices.end(), [](const auto & a, const auto & b) { return a.second < b.second; }); ++ } ++ ++ return devices[index].first; + } + + ggml_backend_dev_t ggml_backend_dev_by_name(const char * name) { diff --git a/llama/patches/0015-add-phony-target-ggml-cpu-for-all-cpu-variants.patch b/llama/patches/0015-add-phony-target-ggml-cpu-for-all-cpu-variants.patch new file mode 100644 index 00000000..e68950a5 --- /dev/null +++ b/llama/patches/0015-add-phony-target-ggml-cpu-for-all-cpu-variants.patch @@ -0,0 +1,29 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Michael Yang +Date: Tue, 14 Jan 2025 15:59:04 -0800 +Subject: [PATCH] add phony target ggml-cpu for all cpu variants + +--- + ggml/src/CMakeLists.txt | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt +index 84101c32..72b488dd 100644 +--- a/ggml/src/CMakeLists.txt ++++ b/ggml/src/CMakeLists.txt +@@ -278,6 +278,7 @@ function(ggml_add_cpu_backend_variant tag_name) + endforeach() + + ggml_add_cpu_backend_variant_impl(${tag_name}) ++ add_dependencies(ggml-cpu ggml-cpu-${tag_name}) + endfunction() + + ggml_add_backend(CPU) +@@ -286,6 +287,7 @@ if (GGML_CPU_ALL_VARIANTS) + if (NOT GGML_BACKEND_DL) + message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") + endif() ++ add_custom_target(ggml-cpu) + ggml_add_cpu_backend_variant(sandybridge AVX) + ggml_add_cpu_backend_variant(haswell AVX F16C AVX2 FMA) + ggml_add_cpu_backend_variant(skylakex AVX F16C AVX2 FMA AVX512) diff --git a/llama/sgemm.h b/llama/sgemm.h deleted file mode 100644 index 3d290951..00000000 --- a/llama/sgemm.h +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once -#include -#include -#ifdef __cplusplus -extern "C" { -#endif - -bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t, int64_t, int64_t, - const void *, int64_t, const void *, int64_t, void *, int64_t, - int, int, int); - -#ifdef __cplusplus -} -#endif diff --git a/llama/unicode-data.h b/llama/unicode-data.h deleted file mode 100644 index 4bd020f9..00000000 --- a/llama/unicode-data.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#pragma once - -#include -#include -#include -#include - -struct range_nfd { - uint32_t first; - uint32_t last; - uint32_t nfd; -}; - -static const uint32_t MAX_CODEPOINTS = 0x110000; - -extern const std::initializer_list> unicode_ranges_flags; -extern const std::unordered_set unicode_set_whitespace; -extern const std::initializer_list> unicode_map_lowercase; -extern const std::initializer_list> unicode_map_uppercase; -extern const std::initializer_list unicode_ranges_nfd; diff --git a/llm/server.go b/llm/server.go index 89e5f54a..640c6816 100644 --- a/llm/server.go +++ b/llm/server.go @@ -29,7 +29,6 @@ import ( "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/format" "github.com/ollama/ollama/llama" - "github.com/ollama/ollama/runners" ) type LlamaServer interface { @@ -91,8 +90,6 @@ func LoadModel(model string, maxArraySize int) (*GGML, error) { // The gpu list must be a single family. func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) { var err error - var cpuRunner string - var estimate MemoryEstimate var systemTotalMemory uint64 var systemFreeMemory uint64 var systemSwapFreeMemory uint64 @@ -107,12 +104,9 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter if opts.NumGPU == 0 { gpus = discover.GetCPUInfo() } - if len(gpus) == 1 && gpus[0].Library == "cpu" { - cpuRunner = runners.ServerForCpu() - estimate = EstimateGPULayers(gpus, ggml, projectors, opts) - } else { - estimate = EstimateGPULayers(gpus, ggml, projectors, opts) + estimate := EstimateGPULayers(gpus, ggml, projectors, opts) + if len(gpus) > 1 || gpus[0].Library != "cpu" { switch { case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory: // disable partial offloading when model is greater than total system memory as this @@ -120,7 +114,6 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter opts.NumGPU = 0 case gpus[0].Library != "metal" && estimate.Layers == 0: // Don't bother loading into the GPU if no layers can fit - cpuRunner = runners.ServerForCpu() gpus = discover.GetCPUInfo() case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu": opts.NumGPU = estimate.Layers @@ -140,36 +133,6 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter estimate.log() - // Loop through potential servers - finalErr := errors.New("no suitable llama servers found") - - availableServers := runners.GetAvailableServers() - - var servers []string - if cpuRunner != "" { - servers = []string{cpuRunner} - } else { - servers = runners.ServersForGpu(gpus[0].RunnerName()) // All GPUs in the list are matching Library and Variant - } - demandLib := envconfig.LLMLibrary() - if demandLib != "" { - serverPath := availableServers[demandLib] - if serverPath == "" { - slog.Info(fmt.Sprintf("Invalid OLLAMA_LLM_LIBRARY %s - not found", demandLib)) - } else { - slog.Info("user override", "OLLAMA_LLM_LIBRARY", demandLib, "path", serverPath) - servers = []string{demandLib} - if strings.HasPrefix(demandLib, "cpu") || (!(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") && demandLib == runners.BuiltinName()) { - // Omit the GPU flag to silence the warning - opts.NumGPU = -1 - } - } - } - - if len(servers) == 0 { - return nil, fmt.Errorf("no servers found for %v", gpus) - } - params := []string{ "--model", model, "--ctx-size", strconv.Itoa(opts.NumCtx), @@ -270,21 +233,49 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter params = append(params, "--multiuser-cache") } - for i := range servers { - builtin := servers[i] == runners.BuiltinName() - server := availableServers[servers[i]] - if server == "" { - // Shouldn't happen - finalErr = fmt.Errorf("[%d] server %s not listed in available servers %v", i, servers[i], availableServers) - slog.Error("server list inconsistent", "error", finalErr) + // get available libraries + if err != nil { + return nil, fmt.Errorf("could not get libollama dir: %w", err) + } + + entries, err := os.ReadDir(discover.LibOllamaPath) + if err != nil { + return nil, fmt.Errorf("could not read libollama dir: %w", err) + } + + libs := make(map[string]string) + for _, entry := range entries { + if entry.IsDir() { + libs[entry.Name()] = filepath.Join(discover.LibOllamaPath, entry.Name()) + } + } + + lib := gpus[0].RunnerName() + requested := envconfig.LLMLibrary() + if libs[requested] != "" { + slog.Info("using requested gpu library", "requested", requested) + lib = requested + } + + var compatible []string + for k := range libs { + // exact match first + if k == lib { + compatible = append([]string{k}, compatible...) continue } - if strings.HasPrefix(servers[i], "cpu") || (builtin && !(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64")) { - gpus = discover.GetCPUInfo() + // then match the family (e.g. 'cuda') + if strings.Split(k, "_")[0] == strings.Split(lib, "_")[0] { + compatible = append(compatible, k) } + } + slog.Debug("compatible gpu libraries", "compatible", compatible) - // Find an availableServers port, retry on each iteration in case the failure was a port conflict race + // iterate through compatible GPU libraries such as 'cuda_v12', 'cuda_v11', 'rocm', etc. + // adding each library's respective path to the LD_LIBRARY_PATH, until finally running + // without any LD_LIBRARY_PATH flags + for { port := 0 if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil { var l *net.TCPListener @@ -305,25 +296,45 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter if runtime.GOOS == "windows" { pathEnv = "PATH" } - // Start with the server directory for the LD_LIBRARY_PATH/PATH - libraryPaths := []string{filepath.Dir(server)} + var libraryPaths []string if libraryPath, ok := os.LookupEnv(pathEnv); ok { - // favor our bundled library dependencies over system libraries libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...) } + if len(compatible) > 0 { + c := compatible[0] + if libpath, ok := libs[c]; ok { + slog.Debug("adding gpu library", "path", libpath) + libraryPaths = append(libraryPaths, libpath) + } + } + // Note: we always put the dependency path first // since this was the exact version we compiled/linked against if gpus[0].DependencyPath != nil { + slog.Debug("adding gpu dependency paths", "paths", gpus[0].DependencyPath) // assume gpus from the same library have the same dependency path libraryPaths = append(gpus[0].DependencyPath, libraryPaths...) } + // finally, add the root library path + libraryPaths = append(libraryPaths, discover.LibOllamaPath) + + exe, err := os.Executable() + if err != nil { + return nil, fmt.Errorf("unable to lookup executable path: %w", err) + } + + exe, err = filepath.EvalSymlinks(exe) + if err != nil { + return nil, fmt.Errorf("unable to evaluate symlinks for executable path: %w", err) + } + // TODO - once fully switched to the Go runner, load the model here for tokenize/detokenize cgo access s := &llmServer{ port: port, - cmd: exec.Command(server, finalParams...), + cmd: exec.Command(exe, finalParams...), status: NewStatusWriter(os.Stderr), options: opts, modelPath: model, @@ -394,17 +405,17 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter } if err = s.cmd.Start(); err != nil { - // Detect permission denied and augment the message about noexec - if errors.Is(err, os.ErrPermission) { - finalErr = fmt.Errorf("unable to start server %w. %s may have noexec set. Set OLLAMA_TMPDIR for server to a writable executable directory", err, server) - continue - } - msg := "" + var msg string if s.status != nil && s.status.LastErrMsg != "" { msg = s.status.LastErrMsg } - err = fmt.Errorf("error starting the external llama server: %v %s", err, msg) - finalErr = err + err := fmt.Errorf("error starting runner: %v %s", err, msg) + if len(compatible) == 0 { + return nil, err + } + + slog.Warn("unable to start runner with compatible gpu", "error", err, "compatible", compatible) + compatible = compatible[1:] continue } @@ -413,7 +424,7 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter err := s.cmd.Wait() // Favor a more detailed message over the process exit status if err != nil && s.status != nil && s.status.LastErrMsg != "" { - slog.Debug("llama runner terminated", "error", err) + slog.Error("llama runner terminated", "error", err) if strings.Contains(s.status.LastErrMsg, "unknown model") { s.status.LastErrMsg = "this model is not supported by your version of Ollama. You may need to upgrade" } @@ -425,9 +436,6 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter return s, nil } - - slog.Error("unable to load any llama server", "error", finalErr) - return nil, finalErr } type ServerStatus int diff --git a/macapp/forge.config.ts b/macapp/forge.config.ts index 73ad23e8..d347eed4 100644 --- a/macapp/forge.config.ts +++ b/macapp/forge.config.ts @@ -18,8 +18,8 @@ const config: ForgeConfig = { asar: true, icon: './assets/icon.icns', extraResource: [ - '../dist/ollama', - '../dist/darwin-amd64/lib', + path.join(__dirname, '../dist/darwin/ollama'), + ...fs.readdirSync(path.join(__dirname, '../dist/darwin/amd64')).map(f => path.join(__dirname, '../dist/darwin/amd64', f)), path.join(__dirname, './assets/iconTemplate.png'), path.join(__dirname, './assets/iconTemplate@2x.png'), path.join(__dirname, './assets/iconUpdateTemplate.png'), @@ -43,7 +43,7 @@ const config: ForgeConfig = { } : {}), osxUniversal: { - x64ArchFiles: '**/ollama*', + x64ArchFiles: '*', }, }, rebuildConfig: {}, diff --git a/make/Makefile.cpu b/make/Makefile.cpu deleted file mode 100644 index 968ae934..00000000 --- a/make/Makefile.cpu +++ /dev/null @@ -1,40 +0,0 @@ -# Build the discrete cpu runner(s) for the platform which do not rely on 3rd party GPU libraries - -include make/common-defs.make - -CPU_GOFLAGS="-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$(VERSION)\" $(TARGET_LDFLAGS)" -ifeq ($(ARCH),amd64) -ifeq ($(origin CUSTOM_CPU_FLAGS),undefined) - RUNNERS = cpu_avx cpu_avx2 -endif -endif - -DIST_RUNNERS = $(addprefix $(RUNNERS_DIST_DIR)/,$(addsuffix /ollama_llama_server$(EXE_EXT),$(RUNNERS))) -BUILD_RUNNERS = $(addprefix $(RUNNERS_BUILD_DIR)/,$(addsuffix /ollama_llama_server$(EXE_EXT),$(RUNNERS))) - -cpu: $(BUILD_RUNNERS) - -dist: $(DIST_RUNNERS) - -$(RUNNERS_BUILD_DIR)/cpu_avx/ollama_llama_server$(EXE_EXT): TARGET_CPU_FLAGS="avx" -$(RUNNERS_BUILD_DIR)/cpu_avx/ollama_llama_server$(EXE_EXT): ./llama/*.go ./llama/runner/*.go $(COMMON_SRCS) $(COMMON_HDRS) - @-mkdir -p $(dir $@) - GOARCH=$(ARCH) go build -buildmode=pie $(CPU_GOFLAGS) -trimpath -tags $(subst $(space),$(comma),$(TARGET_CPU_FLAGS)) -o $@ ./cmd/runner - -$(RUNNERS_BUILD_DIR)/cpu_avx2/ollama_llama_server$(EXE_EXT): TARGET_CPU_FLAGS="avx avx2" -$(RUNNERS_BUILD_DIR)/cpu_avx2/ollama_llama_server$(EXE_EXT): ./llama/*.go ./llama/runner/*.go $(COMMON_SRCS) $(COMMON_HDRS) - @-mkdir -p $(dir $@) - GOARCH=$(ARCH) go build -buildmode=pie $(CPU_GOFLAGS) -trimpath -tags $(subst $(space),$(comma),$(TARGET_CPU_FLAGS)) -o $@ ./cmd/runner - -$(RUNNERS_DIST_DIR)/%: $(RUNNERS_BUILD_DIR)/% - @-mkdir -p $(dir $@) - cp $< $@ - -clean: - rm -f $(BUILD_RUNNERS) $(DIST_RUNNERS) - -.PHONY: clean cpu dist - -# Handy debugging for make variables -print-%: - @echo '$*=$($*)' diff --git a/make/Makefile.cuda_v11 b/make/Makefile.cuda_v11 deleted file mode 100644 index a6a81823..00000000 --- a/make/Makefile.cuda_v11 +++ /dev/null @@ -1,13 +0,0 @@ -# Build rules for CUDA v11 runner - -include make/common-defs.make -include make/cuda-v11-defs.make - -GPU_RUNNER_VARIANT := _v11 -GPU_COMPILER=$(CUDA_11_COMPILER) -CUDA_ARCHITECTURES?=50;52;53;60;61;62;70;72;75;80;86 -GPU_LIB_DIR = $(CUDA_11_LIB_DIR) -CGO_EXTRA_LDFLAGS = $(CUDA_11_CGO_EXTRA_LDFLAGS) - -include make/cuda.make -include make/gpu.make \ No newline at end of file diff --git a/make/Makefile.cuda_v12 b/make/Makefile.cuda_v12 deleted file mode 100644 index 7c50b27b..00000000 --- a/make/Makefile.cuda_v12 +++ /dev/null @@ -1,13 +0,0 @@ -# Build rules for CUDA v12 runner - -include make/common-defs.make -include make/cuda-v12-defs.make - -GPU_RUNNER_VARIANT := _v12 -GPU_COMPILER=$(CUDA_12_COMPILER) -CUDA_ARCHITECTURES?=60;61;62;70;72;75;80;86;87;89;90;90a -GPU_LIB_DIR = $(CUDA_12_LIB_DIR) -CGO_EXTRA_LDFLAGS = $(CUDA_12_CGO_EXTRA_LDFLAGS) - -include make/cuda.make -include make/gpu.make \ No newline at end of file diff --git a/make/Makefile.ollama b/make/Makefile.ollama deleted file mode 100644 index a7349a25..00000000 --- a/make/Makefile.ollama +++ /dev/null @@ -1,19 +0,0 @@ -# Makefile for building top-level ollama binary - -include make/common-defs.make - -exe: $(OLLAMA_EXE) -dist_exe dist_ollama: $(DIST_OLLAMA_EXE) - -GO_DEPS=$(foreach dir,$(shell go list -deps -f '{{.Dir}}' . ),$(wildcard $(dir)/*.go)) -CPU_GOFLAGS="-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$(VERSION)\" $(EXTRA_GOLDFLAGS) $(TARGET_LDFLAGS)" - -$(OLLAMA_EXE) $(DIST_OLLAMA_EXE): TARGET_CPU_FLAGS=$(CUSTOM_CPU_FLAGS) -$(OLLAMA_EXE) $(DIST_OLLAMA_EXE): $(COMMON_SRCS) $(COMMON_HDRS) $(GO_DEPS) - GOARCH=$(ARCH) go build -buildmode=pie $(CPU_GOFLAGS) -trimpath $(if $(CUSTOM_CPU_FLAGS),-tags $(subst $(space),$(comma),$(CUSTOM_CPU_FLAGS))) -o $@ . - -.PHONY: ollama dist_ollama exe dist_exe - -# Handy debugging for make variables -print-%: - @echo '$*=$($*)' diff --git a/make/Makefile.rocm b/make/Makefile.rocm deleted file mode 100644 index 26ac6cf3..00000000 --- a/make/Makefile.rocm +++ /dev/null @@ -1,119 +0,0 @@ -# Build rules for ROCm runner -# -# Note: at present we only support a single ROCm version (whichever is default on the build system) -# unlike CUDA where we'll build both a v11 and v12 variant. - -include make/common-defs.make -include make/rocm-defs.make - -HIP_ARCHS_COMMON := gfx900 gfx940 gfx941 gfx942 gfx1010 gfx1012 gfx1030 gfx1100 gfx1101 gfx1102 -HIP_ARCHS_LINUX := gfx906:xnack- gfx908:xnack- gfx90a:xnack+ gfx90a:xnack- - -ifeq ($(OS),windows) - GPU_LIB_DIR := $(shell cygpath -m -s "$(HIP_PATH)/bin") - CGO_EXTRA_LDFLAGS := -L$(shell cygpath -m -s "$(HIP_PATH)/lib") - HIP_ARCHS?=$(HIP_ARCHS_COMMON) - GPU_COMPILER_CFLAGS = $(CFLAGS) -D_WIN32_WINNT=0x602 - GPU_COMPILER_CXXFLAGS = $(CXXFLAGS) -D_WIN32_WINNT=0x602 -else ifeq ($(OS),linux) - GPU_LIB_DIR := $(strip $(shell ls -d $(HIP_PATH)/lib64 2>/dev/null || ls -d $(HIP_PATH)/lib 2>/dev/null)) - CGO_EXTRA_LDFLAGS := -L$(GPU_LIB_DIR) - HIP_ARCHS?=$(HIP_ARCHS_COMMON) $(HIP_ARCHS_LINUX) - GPU_COMPILER_CFLAGS = $(CFLAGS) -fPIC -D_GNU_SOURCE - GPU_COMPILER_CXXFLAGS = $(CXXFLAGS) -fPIC -D_GNU_SOURCE -endif -GPU_COMPILER=$(HIP_COMPILER) - -# TODO future multi-variant support for ROCm -# ROCM_VERSION = $(subst $(space),.,$(wordlist 1,2,$(subst .,$(space),$(word 3,$(subst -,$(space),$(filter HIP version: %,$(shell $(GPU_COMPILER) --version))))))) -# ifneq (,$(ROCM_VERSION)) -# GPU_RUNNER_VARIANT = _v$(ROCM_VERSION) -# endif - -GPU_RUNNER_GO_TAGS := rocm -GPU_RUNNER_NAME := rocm$(GPU_RUNNER_VARIANT) -GPU_RUNNER_DRIVER_LIB_LINK := -lamdhip64 -GPU_RUNNER_LIBS_SHORT := hipblas rocblas - -# Note: ROCm requires an extra step of discovering and copying the transitive dependencies on linux -ifeq ($(OS),windows) - ROCM_DIST_DEPS_DIR = ./dist/$(OS)-$(ARCH)/lib/ollama - GPU_LIBS = $(sort $(wildcard $(addsuffix *.$(SHARED_EXT),$(addprefix $(GPU_LIB_DIR)/$(SHARED_PREFIX),$(GPU_RUNNER_LIBS_SHORT))))) -else ifeq ($(OS),linux) - ROCM_DIST_DEPS_DIR = ./dist/$(OS)-$(ARCH)-rocm/lib/ollama - GPU_LIBS = $(sort $(wildcard $(addsuffix *.$(SHARED_EXT).*,$(addprefix $(GPU_LIB_DIR)/$(SHARED_PREFIX),$(GPU_RUNNER_LIBS_SHORT))))) - ROCM_TRANSITIVE_LIBS_INITIAL = $(sort $(shell ldd $(GPU_LIBS) | grep "=>" | cut -f2 -d= | cut -f2 -d' ' | grep -e rocm -e amdgpu -e libtinfo -e libnuma -e libelf)) - GPU_TRANSITIVE_LIBS = $(sort $(shell readlink -f $(ROCM_TRANSITIVE_LIBS_INITIAL)) $(ROCM_TRANSITIVE_LIBS_INITIAL)) - FILTERED_GPU_TRANSITIVE_LIBS=$(sort $(filter-out $(addprefix %,$(notdir $(GPU_LIBS))), $(GPU_TRANSITIVE_LIBS))) - GPU_DIST_TRANSITIVE_LIB_DEPS = $(sort $(addprefix $(ROCM_DIST_DEPS_DIR)/,$(notdir $(FILTERED_GPU_TRANSITIVE_LIBS)))) -endif -GPU_DIST_LIB_DEPS= $(sort $(addprefix $(ROCM_DIST_DEPS_DIR)/,$(notdir $(GPU_LIBS)))) -ROCBLAS_DIST_DEP_MANIFEST = $(ROCM_DIST_DEPS_DIR)/rocblas/library/TensileManifest.txt - -ifeq ($(OS),linux) - GPU_COMPILER_FPIC := -fPIC -Wno-unused-function -std=gnu++17 -else ifeq ($(OS),windows) - GPU_COMPILER_FPIC := -Xclang --dependent-lib=msvcrt -endif -GPU_RUNNER_ARCH_FLAGS := $(foreach arch,$(subst ;,$(space),$(HIP_ARCHS)),--offload-arch=$(arch)) - -# HIPCC uses clang which requires avx512 -> -mavx512f -mavx512dq -mavx512bw -GPU_VECTOR_FLAGS=$(if $(filter avx512,$(GPU_RUNNER_CPU_FLAGS)),avx512f avx512dq avx512bw) $(filter-out avx512,$(GPU_RUNNER_CPU_FLAGS)) - -GPU_COMPILER_CUFLAGS = \ - $(GPU_COMPILER_FPIC) \ - $(addprefix -m,$(GPU_VECTOR_FLAGS)) \ - -mf16c \ - -mfma \ - -c \ - -O3 \ - -DGGML_USE_CUDA \ - -DGGML_BUILD=1 \ - -DGGML_BACKEND_BUILD=1 \ - -DGGML_SHARED=1 \ - -DGGML_BACKEND_SHARED=1 \ - -DGGML_CUDA_DMMV_X=32 \ - -DGGML_CUDA_MMV_Y=1 \ - -DGGML_SCHED_MAX_COPIES=4 \ - -DGGML_USE_HIP \ - -DGGML_USE_LLAMAFILE \ - -DHIP_FAST_MATH \ - -D__HIP_PLATFORM_AMD__=1 \ - -D__HIP_ROCclr__=1 \ - -DNDEBUG \ - -DK_QUANTS_PER_ITERATION=2 \ - -D_CRT_SECURE_NO_WARNINGS \ - -D_GNU_SOURCE \ - -D_XOPEN_SOURCE=600 \ - -DUSE_PROF_API=1 \ - -std=gnu++17 \ - -x hip \ - -mllvm=-amdgpu-early-inline-all=true \ - -mllvm=-amdgpu-function-calls=false \ - -Wno-expansion-to-defined \ - -Wno-invalid-noreturn \ - -Wno-ignored-attributes \ - -Wno-pass-failed \ - -Wno-deprecated-declarations \ - -Wno-unused-result \ - -I./llama/ - -# Workaround buggy P2P copy on some windows multi-GPU setups -# This workaround breaks linux systems with small system RAM, so only enable on windows -ifeq ($(OS),windows) - GPU_COMPILER_CUFLAGS += -DGGML_CUDA_NO_PEER_COPY=1 -endif - -include make/gpu.make - -# Adjust the rules from gpu.make to handle the ROCm dependencies properly -$(RUNNERS_DIST_DIR)/$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT)/ollama_llama_server$(EXE_EXT): $(ROCBLAS_DIST_DEP_MANIFEST) $(GPU_DIST_TRANSITIVE_LIB_DEPS) -$(ROCBLAS_DIST_DEP_MANIFEST): - @-mkdir -p $(dir $@) - @echo "Copying rocblas library..." - (cd $(GPU_LIB_DIR)/rocblas/library/ && tar cf - . ) | (cd $(dir $@) && tar xf - ) - @echo "rocblas library copy complete" - -$(GPU_DIST_TRANSITIVE_LIB_DEPS): - @-mkdir -p $(dir $@) - $(CP) $(dir $(filter %$(notdir $@),$(GPU_TRANSITIVE_LIBS)))/$(notdir $@) $(dir $@) diff --git a/make/Makefile.sync b/make/Makefile.sync deleted file mode 100644 index 628d30e0..00000000 --- a/make/Makefile.sync +++ /dev/null @@ -1,250 +0,0 @@ -# Helpers for managing our vendored llama.cpp repo and patch set - -REPO_ROOT:=./ -DEST_DIR:=./llama/ - -include $(DEST_DIR)vendoring - -LLAMACPP_REPO := ./llama/vendor/ - -# Relative to the vendor dir -VENDOR_RELATIVE_PATCH_DIR := ../patches/ - - -help-sync: - @echo "The following make targets will help you update llama.cpp to a new base commit, or work on new features/fixes" - @echo "" - @echo " make apply-patches # Establish the tracking repo if not already present, reset to the base commit, and apply our patch set" - @echo " make sync # Vendor llama.cpp and ggml from the tracking repo working tree" - @echo " make sync-clean # Remove all vendored files" - @echo " make create-patches # Generate the patch set based on the current commits in the tracking repo since the base commit" - @echo "" - @echo "For more details on the workflow, see the Vendoring section in 'docs/development.md'" - -apply-patches: $(LLAMACPP_REPO) - @if ! git -C $(LLAMACPP_REPO) --no-pager diff --exit-code ; then \ - echo "ERROR: Your llama.cpp repo is dirty. The apply-patches target requires a clean working tree"; \ - echo "To clobber: git -C $(LLAMACPP_REPO) reset --hard HEAD" ; \ - exit 1; \ - fi - @echo "Checking out $(LLAMACPP_BASE_COMMIT)" - @git -C $(LLAMACPP_REPO) checkout -q $(LLAMACPP_BASE_COMMIT) || \ - git -C $(LLAMACPP_REPO) fetch --all && git -C $(LLAMACPP_REPO) checkout -q $(LLAMACPP_BASE_COMMIT) - @echo "Applying ollama patches..." - @cd $(LLAMACPP_REPO) && git -c 'user.name=nobody' -c 'user.email=<>' am -3 $(VENDOR_RELATIVE_PATCH_DIR)*.patch || \ - echo "Please resolve the conflicts in $(LLAMACPP_REPO), and run 'git am --continue' to continue applying subsequent patches" - @echo "" - @echo "The tracking repo $(LLAMACPP_REPO) is now in a detached state with all patches applied." - @echo "Don't forget to commit any changes you make and run 'make create-patches' " - -$(LLAMACPP_REPO): - @echo "Cloning llama.cpp to $(LLAMACPP_REPO)" - git clone https://github.com/ggerganov/llama.cpp.git $@ - -create-patches: $(LLAMACPP_REPO) - @if ! git -C $(LLAMACPP_REPO) --no-pager diff --exit-code ; then \ - echo "ERROR: Your llama.cpp repo is dirty. You must commit any pending changes for format-patch to generate patches"; \ - exit 1; \ - fi - @cd $(LLAMACPP_REPO) && git format-patch --no-signature --no-numbered --zero-commit -o $(VENDOR_RELATIVE_PATCH_DIR) $(LLAMACPP_BASE_COMMIT) - -# Vendoring template logic -EXCLUDED_FILES=sgemm.cpp sgemm.h sampling_ext.cpp sampling_ext.h stb_image.h json.hpp llama_darwin.c base64.hpp -OLLAMA_NATIVE_FILES=mllama.cpp mllama.h llama_darwin.c sampling_ext.cpp sampling_ext.h -define vendor_file -$(strip $(addprefix $(2),$(notdir $1))) : $(addprefix $(LLAMACPP_REPO),$(1)) -ifneq ($$(filter-out $(EXCLUDED_FILES),$(notdir $1)),) - @echo "vendoring $1"; \ - mkdir -p $$(dir $$@) && \ - echo "/**" > $$@ && \ - echo " * llama.cpp - commit $$(LLAMACPP_BASE_COMMIT) - do not edit this file" >> $$@ && \ - echo " *" >> $$@ && \ - sed 's/^/ * /' <$(LLAMACPP_REPO)/LICENSE | sed 's/ *$$$$//' >> $$@ && \ - echo " */" >> $$@ && \ - echo "" >> $$@ && \ - cat $$< >> $$@ -else - @echo "vendoring $1"; \ - mkdir -p $$(dir $$@) && \ - cat $$< > $$@ -endif -VENDORED_FILES += $(strip $(addprefix $(2),$(notdir $1))) -endef - -# llama.cpp files -> llama/ -LLAMACPP_FILES=\ - src/unicode.cpp \ - src/unicode.h \ - src/unicode-data.cpp \ - src/unicode-data.h \ - src/llama.cpp \ - src/llama-adapter.cpp \ - src/llama-adapter.h \ - src/llama-arch.cpp \ - src/llama-arch.h \ - src/llama-batch.cpp \ - src/llama-batch.h \ - src/llama-chat.cpp \ - src/llama-chat.h \ - src/llama-context.cpp \ - src/llama-context.h \ - src/llama-cparams.cpp \ - src/llama-cparams.h \ - src/llama-grammar.cpp \ - src/llama-grammar.h \ - src/llama-hparams.cpp \ - src/llama-hparams.h \ - src/llama-impl.cpp \ - src/llama-impl.h \ - src/llama-kv-cache.cpp \ - src/llama-kv-cache.h \ - src/llama-mmap.cpp \ - src/llama-mmap.h \ - src/llama-model-loader.cpp \ - src/llama-model-loader.h \ - src/llama-model.cpp \ - src/llama-model.h \ - src/llama-quant.cpp \ - src/llama-quant.h \ - src/llama-sampling.cpp \ - src/llama-sampling.h \ - src/llama-vocab.cpp \ - src/llama-vocab.h \ - include/llama.h \ - include/llama-cpp.h \ - ggml/include/ggml-cpu.h \ - ggml/src/ggml-cpu/llamafile/sgemm.cpp \ - ggml/src/ggml-cpu/llamafile/sgemm.h -$(foreach name,$(LLAMACPP_FILES),$(eval $(call vendor_file,$(name),$(DEST_DIR)))) - -# llama.cpp files -> llama/llamafile -LLAMAFILE_FILES= \ - ggml/src/ggml-cpu/llamafile/sgemm.h -$(foreach name,$(LLAMAFILE_FILES),$(eval $(call vendor_file,$(name),$(DEST_DIR)llamafile/))) - -# ggml files -> llama/ -GGML_FILES= \ - ggml/src/ggml.c \ - ggml/include/ggml.h \ - ggml/src/ggml-quants.c \ - ggml/src/ggml-quants.h \ - ggml/src/ggml-metal/ggml-metal.metal \ - ggml/include/ggml-metal.h \ - ggml/src/ggml-impl.h \ - ggml/src/ggml-threading.h \ - ggml/include/ggml-cuda.h \ - ggml/src/ggml-backend-reg.cpp \ - ggml/src/ggml-metal/ggml-metal-impl.h \ - ggml/src/ggml-common.h \ - ggml/include/ggml-backend.h \ - ggml/src/ggml-backend.cpp \ - ggml/src/ggml-backend-impl.h \ - ggml/include/ggml-alloc.h \ - ggml/src/ggml-alloc.c \ - ggml/include/ggml-blas.h \ - ggml/include/ggml-cpp.h \ - ggml/src/ggml-threading.cpp \ - ggml/src/ggml-blas/ggml-blas.cpp \ - ggml/src/ggml-cpu/ggml-cpu.c \ - ggml/src/ggml-cpu/ggml-cpu.cpp \ - ggml/src/ggml-cpu/ggml-cpu-aarch64.h \ - ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp \ - ggml/src/ggml-cpu/ggml-cpu-quants.h \ - ggml/src/ggml-cpu/ggml-cpu-quants.c \ - ggml/src/ggml-cpu/ggml-cpu-impl.h \ - ggml/src/ggml-cpu/ggml-cpu-traits.h \ - ggml/src/ggml-cpu/ggml-cpu-traits.cpp \ - ggml/src/ggml-cpu/amx/amx.h \ - ggml/src/ggml-cpu/amx/amx.cpp \ - ggml/src/ggml-cpu/amx/mmq.cpp \ - ggml/src/ggml-cpu/amx/mmq.h -$(foreach name,$(GGML_FILES),$(eval $(call vendor_file,$(name),$(DEST_DIR)))) - -$(DEST_DIR)ggml-metal-embed.metal: $(DEST_DIR)ggml-common.h $(DEST_DIR)ggml-metal-impl.h - @sed -e '/__embed_ggml-common.h__/r $(DEST_DIR)/ggml-common.h' \ - -e '/__embed_ggml-common.h__/d' \ - < $(DEST_DIR)/ggml-metal.metal \ - > $(DEST_DIR)/ggml-metal-embed.metal.tmp - @sed -e '/#include "ggml-metal-impl.h"/r $(DEST_DIR)/ggml-metal-impl.h' \ - -e '/#include "ggml-metal-impl.h"/d' \ - < $(DEST_DIR)/ggml-metal-embed.metal.tmp \ - > $(DEST_DIR)/ggml-metal-embed.metal - @rm $(DEST_DIR)/ggml-metal-embed.metal.tmp - -VENDORED_FILES += $(DEST_DIR)ggml-metal-embed.metal - -# TODO generalize renaming pattern if we have more of these -$(DEST_DIR)ggml-metal_darwin_arm64.m : $(LLAMACPP_REPO)ggml/src/ggml-metal/ggml-metal.m - @echo "vendoring $(subst $(LLAMACPP_REPO),,$<)"; \ - mkdir -p $(dir $@) && \ - echo "/**" > $@ && \ - echo " * llama.cpp - commit $(LLAMACPP_BASE_COMMIT) - do not edit this file" >> $@ && \ - echo " *" >> $@ && \ - sed 's/^/ * /' <$(LLAMACPP_REPO)/LICENSE | sed 's/ *$$//' >> $@ && \ - echo " */" >> $@ && \ - echo "" >> $@ && \ - cat $< >> $@ -VENDORED_FILES += $(DEST_DIR)ggml-metal_darwin_arm64.m - -# ggml-cuda -> llama/ggml-cuda/ -GGML_CUDA_FILES= ggml/src/ggml-cuda/*.cu ggml/src/ggml-cuda/*.cuh -GGML_CUDA_FILES_EXPANDED = $(addprefix ggml/src/ggml-cuda/,$(notdir $(wildcard $(addprefix $(LLAMACPP_REPO),$(GGML_CUDA_FILES))))) -$(foreach name,$(GGML_CUDA_FILES_EXPANDED),$(eval $(call vendor_file,$(name),$(DEST_DIR)ggml-cuda/))) - -GGML_TEMPLATE_FILES= ggml/src/ggml-cuda/template-instances/*.cu -GGML_TEMPLATE_FILES_EXPANDED = $(addprefix ggml/src/ggml-cuda/template-instances/,$(notdir $(wildcard $(addprefix $(LLAMACPP_REPO),$(GGML_TEMPLATE_FILES))))) -$(foreach name,$(GGML_TEMPLATE_FILES_EXPANDED),$(eval $(call vendor_file,$(name),$(DEST_DIR)ggml-cuda/template-instances/))) - -GGML_VENDOR_FILES= ggml/src/ggml-cuda/vendors/*.h -GGML_VENDOR_FILES_EXPANDED=$(addprefix ggml/src/ggml-cuda/vendors/,$(notdir $(wildcard $(addprefix $(LLAMACPP_REPO),$(GGML_VENDOR_FILES))))) -$(foreach name,$(GGML_VENDOR_FILES_EXPANDED),$(eval $(call vendor_file,$(name),$(DEST_DIR)ggml-cuda/vendors/))) - -# llava -> llama/ -LAVA_FILES= \ - examples/llava/clip.cpp \ - examples/llava/clip.h \ - examples/llava/llava.cpp \ - examples/llava/llava.h \ - common/log.h \ - common/log.cpp \ - common/stb_image.h -# These files are mostly used by the llava code -# and shouldn't be necessary once we use clip.cpp directly -LAVA_FILES+= \ - common/common.cpp \ - common/common.h \ - common/sampling.cpp \ - common/sampling.h \ - common/json.hpp \ - common/json-schema-to-grammar.cpp \ - common/json-schema-to-grammar.h \ - common/base64.hpp -$(foreach name,$(LAVA_FILES),$(eval $(call vendor_file,$(name),$(DEST_DIR)))) - -$(DEST_DIR)build-info.cpp: - @echo "Generating $@" - @echo "int LLAMA_BUILD_NUMBER = 0;" > $@ - @echo "char const *LLAMA_COMMIT = \"$(LLAMACPP_BASE_COMMIT)\";" >> $@ - @echo "char const *LLAMA_COMPILER = \"\";" >> $@ - @echo "char const *LLAMA_BUILD_TARGET = \"\";" >> $@ -VENDORED_FILES += $(DEST_DIR)build-info.cpp - - -sync: $(LLAMACPP_REPO) .WAIT $(VENDORED_FILES) .WAIT remove-stale-files - -sync-clean: - rm -f $(VENDORED_FILES) $(EXTRA_NATIVE_FILES) - -PATS=*.c *.h *.cpp *.m *.metal *.cu *.cuh -NATIVE_DIRS=$(DEST_DIR) $(DEST_DIR)llamafile/ $(DEST_DIR)ggml-cuda/ $(DEST_DIR)ggml-cuda/template-instances/ $(DEST_DIR)ggml-cuda/vendors/ -ALL_NATIVE_FILES=$(foreach dir,$(NATIVE_DIRS),$(wildcard $(addprefix $(dir),$(PATS)))) -EXTRA_NATIVE_FILES=$(filter-out $(VENDORED_FILES) $(addprefix $(DEST_DIR),$(OLLAMA_NATIVE_FILES)), $(ALL_NATIVE_FILES)) -remove-stale-files: - @rm -f $(EXTRA_NATIVE_FILES) - -.PHONY: help-sync apply-patches sync create-patches remove-stale-fails .WAIT - - -# Handy debugging for make variables -print-%: - @echo '$*=$($*)' diff --git a/make/Makefile.test b/make/Makefile.test deleted file mode 100644 index 3b27d0db..00000000 --- a/make/Makefile.test +++ /dev/null @@ -1,19 +0,0 @@ -# Targets to assist in running tests - -include make/common-defs.make - -test: - cd .. && go test ./... - -integration: $(OLLAMA_EXE) - cd .. && go test --tags=integration ./integration -v - -lint: - cd .. && golangci-lint run -v - -# Note: in this makefile we error instead of building to allow more fine-grain control of testing flows -$(OLLAMA_EXE): - @echo "" - @echo "ERROR: You must build ollama first - use 'make all' to build the ollama binaries" - @echo "" - @exit 1 \ No newline at end of file diff --git a/make/common-defs.make b/make/common-defs.make deleted file mode 100644 index 03504a69..00000000 --- a/make/common-defs.make +++ /dev/null @@ -1,91 +0,0 @@ -# Common definitions for the various Makefiles -# No rules are defined here so this is safe to include at the beginning of other makefiles - -OS := $(shell uname -s) -ARCH ?= $(subst aarch64,arm64,$(subst x86_64,amd64,$(shell uname -m))) -ifneq (,$(findstring MINGW,$(OS))$(findstring MSYS,$(OS))) - OS := windows - ARCH := $(shell systeminfo 2>/dev/null | grep "System Type" | grep ARM64 > /dev/null && echo "arm64" || echo "amd64" ) -else ifeq ($(OS),Linux) - OS := linux -else ifeq ($(OS),Darwin) - OS := darwin -endif -comma:= , -empty:= -space:= $(empty) $(empty) -uc = $(subst a,A,$(subst b,B,$(subst c,C,$(subst d,D,$(subst e,E,$(subst f,F,$(subst g,G,$(subst h,H,$(subst i,I,$(subst j,J,$(subst k,K,$(subst l,L,$(subst m,M,$(subst n,N,$(subst o,O,$(subst p,P,$(subst q,Q,$(subst r,R,$(subst s,S,$(subst t,T,$(subst u,U,$(subst v,V,$(subst w,W,$(subst x,X,$(subst y,Y,$(subst z,Z,$1)))))))))))))))))))))))))) - -export CGO_CFLAGS_ALLOW = -mfma|-mf16c -export CGO_CXXFLAGS_ALLOW = -mfma|-mf16c -export HIP_PLATFORM = amd -export CGO_ENABLED=1 - -BUILD_DIR = ./llama/build/$(OS)-$(ARCH) -DIST_BASE = ./dist/$(OS)-$(ARCH) - -ifeq ($(OS),windows) - # Absolute paths with cygpath to convert to 8.3 without spaces - PWD="$(shell pwd)" - DIST_OLLAMA_EXE=$(DIST_BASE)/ollama$(EXE_EXT) -else - CCACHE:=$(shell command -v ccache 2>/dev/null || echo "") - DIST_OLLAMA_EXE=$(DIST_BASE)/bin/ollama$(EXE_EXT) -endif -DIST_LIB_DIR = $(DIST_BASE)/lib/ollama -RUNNERS_DIST_DIR = $(DIST_LIB_DIR)/runners -RUNNERS_BUILD_DIR = $(BUILD_DIR)/runners -VERSION?=$(shell git describe --tags --first-parent --abbrev=7 --long --dirty --always | sed -e "s/^v//g") - -# Conditionally enable ccache for cgo builds too -ifneq ($(CCACHE),) - CC?=$(CCACHE) gcc - CXX?=$(CCACHE) g++ - export CC - export CXX -endif - - -# Override in environment to tune CPU vector flags -ifeq ($(ARCH),amd64) -ifeq ($(origin CUSTOM_CPU_FLAGS),undefined) - GPU_RUNNER_CPU_FLAGS=avx - GPU_RUNNER_EXTRA_VARIANT=_avx -else - GPU_RUNNER_CPU_FLAGS=$(subst $(comma),$(space),$(CUSTOM_CPU_FLAGS)) -endif -endif - -ifeq ($(OS),windows) - CP := cp - OBJ_EXT := obj - SHARED_EXT := dll - EXE_EXT := .exe - SHARED_PREFIX := - CPU_FLAG_PREFIX := /arch: -ifneq ($(HIP_PATH),) - # If HIP_PATH has spaces, hipcc trips over them when subprocessing - HIP_PATH := $(shell cygpath -m -s "$(patsubst %\,%,$(HIP_PATH))") - export HIP_PATH -endif -else ifeq ($(OS),linux) - CP := cp -df - OBJ_EXT := o - SHARED_EXT := so - SHARED_PREFIX := lib - CPU_FLAG_PREFIX := -m -else - OBJ_EXT := o - SHARED_EXT := so - CPU_FLAG_PREFIX := -m - CP := cp -df -endif - -COMMON_SRCS := \ - $(wildcard ./llama/*.c) \ - $(wildcard ./llama/*.cpp) -COMMON_HDRS := \ - $(wildcard ./llama/*.h) \ - $(wildcard ./llama/*.hpp) - -OLLAMA_EXE=./ollama$(EXE_EXT) \ No newline at end of file diff --git a/make/cuda-v11-defs.make b/make/cuda-v11-defs.make deleted file mode 100644 index 264407dd..00000000 --- a/make/cuda-v11-defs.make +++ /dev/null @@ -1,17 +0,0 @@ -# Common definitions for the various Makefiles which set cuda settings -# No rules are defined here so this is safe to include at the beginning of other makefiles - -ifeq ($(OS),windows) - CUDA_PATH?=$(shell cygpath -m -s "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\" 2>/dev/null)unknown - CUDA_BASE_DIR := $(dir $(shell cygpath -m -s "$(CUDA_PATH)\\.." 2>/dev/null)) - CUDA_11_PATH:=$(shell ls -d $(CUDA_BASE_DIR)/v11.? 2>/dev/null) - CUDA_11_COMPILER:=$(wildcard $(CUDA_11_PATH)/bin/nvcc.exe) - CUDA_11_LIB_DIR = $(strip $(shell ls -d $(CUDA_11_PATH)/bin 2>/dev/null)) - CUDA_11_CGO_EXTRA_LDFLAGS = -L"$(CUDA_11_PATH)/lib/x64" -else ifeq ($(OS),linux) - CUDA_PATH?=/usr/local/cuda - CUDA_11_PATH:=$(shell ls -d $(CUDA_PATH)-11 2>/dev/null) - CUDA_11_COMPILER:=$(wildcard $(CUDA_11_PATH)/bin/nvcc) - CUDA_11_LIB_DIR=$(strip $(shell ls -d $(CUDA_11_PATH)/lib64 2>/dev/null || ls -d $(CUDA_11_PATH)/lib 2>/dev/null)) - CUDA_11_CGO_EXTRA_LDFLAGS = -L"$(CUDA_11_LIB_DIR)" -L"$(CUDA_11_LIB_DIR)/stubs" -endif diff --git a/make/cuda-v12-defs.make b/make/cuda-v12-defs.make deleted file mode 100644 index f7c182b6..00000000 --- a/make/cuda-v12-defs.make +++ /dev/null @@ -1,17 +0,0 @@ -# Common definitions for the various Makefiles which set cuda settings -# No rules are defined here so this is safe to include at the beginning of other makefiles - -ifeq ($(OS),windows) - CUDA_PATH?=$(shell cygpath -m -s "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\" 2>/dev/null)unknown - CUDA_BASE_DIR := $(dir $(shell cygpath -m -s "$(CUDA_PATH)\\.." 2>/dev/null)) - CUDA_12_PATH:=$(shell ls -d $(CUDA_BASE_DIR)/v12.? 2>/dev/null) - CUDA_12_COMPILER:=$(wildcard $(CUDA_12_PATH)/bin/nvcc.exe) - CUDA_12_LIB_DIR = $(strip $(shell ls -d $(CUDA_12_PATH)/bin 2>/dev/null)) - CUDA_12_CGO_EXTRA_LDFLAGS = -L"$(CUDA_12_PATH)/lib/x64" -else ifeq ($(OS),linux) - CUDA_PATH?=/usr/local/cuda - CUDA_12_PATH:=$(shell ls -d $(CUDA_PATH)-12 2>/dev/null) - CUDA_12_COMPILER:=$(wildcard $(CUDA_12_PATH)/bin/nvcc) - CUDA_12_LIB_DIR=$(strip $(shell ls -d $(CUDA_12_PATH)/lib64 2>/dev/null || ls -d $(CUDA_12_PATH)/lib 2>/dev/null)) - CUDA_12_CGO_EXTRA_LDFLAGS = -L"$(CUDA_12_LIB_DIR)" -L"$(CUDA_12_LIB_DIR)/stubs" -endif diff --git a/make/cuda.make b/make/cuda.make deleted file mode 100644 index 095663f5..00000000 --- a/make/cuda.make +++ /dev/null @@ -1,56 +0,0 @@ -# Common definitions for all cuda versions - -ifndef GPU_RUNNER_VARIANT -dummy: - $(error This makefile is not meant to build directly, but instead included in other Makefiles that set required variables) -endif - - -GPU_RUNNER_NAME := cuda$(GPU_RUNNER_VARIANT) -GPU_RUNNER_GO_TAGS := cuda cuda$(GPU_RUNNER_VARIANT) -GPU_RUNNER_DRIVER_LIB_LINK := -lcuda -GPU_RUNNER_LIBS_SHORT := cublas cudart cublasLt - -ifeq ($(OS),windows) - # On windows, nvcc uses msvc which does not support avx512vbmi avx512vnni avx512bf16, but macros can turn them on - GPU_VECTOR_FLAGS=$(call uc,$(filter-out avx512bf16,$(filter-out avx512vnni,$(filter-out avx512vbmi,$(GPU_RUNNER_CPU_FLAGS))))) - GPU_COMPILER_EXTRA_FLAGS=$(if $(filter avx512vbmi,$(GPU_RUNNER_CPU_FLAGS)),-D__AVX512VBMI__) - GPU_COMPILER_EXTRA_FLAGS+=$(if $(filter avx512vnni,$(GPU_RUNNER_CPU_FLAGS)),-D__AVX512VNNI__) - GPU_COMPILER_EXTRA_FLAGS+=$(if $(filter avx512bf16,$(GPU_RUNNER_CPU_FLAGS)),-D__AVX512BF16__) - GPU_LIBS = $(sort $(wildcard $(addsuffix *.$(SHARED_EXT),$(addprefix $(GPU_LIB_DIR)/$(SHARED_PREFIX),$(GPU_RUNNER_LIBS_SHORT))))) - GPU_COMPILER_CFLAGS = $(CFLAGS) -D_WIN32_WINNT=0x602 - GPU_COMPILER_CXXFLAGS = $(CXXFLAGS) -D_WIN32_WINNT=0x602 -else ifeq ($(OS),linux) - # On linux, nvcc requires avx512 -> -mavx512f -mavx512dq -mavx512bw - GPU_VECTOR_FLAGS=$(if $(filter avx512,$(GPU_RUNNER_CPU_FLAGS)),avx512f avx512dq avx512bw) $(filter-out avx512,$(GPU_RUNNER_CPU_FLAGS)) - GPU_COMPILER_EXTRA_FLAGS = -fPIC -Wno-unused-function -std=c++17 - GPU_LIBS = $(sort $(wildcard $(addsuffix *.$(SHARED_EXT).*,$(addprefix $(GPU_LIB_DIR)/$(SHARED_PREFIX),$(GPU_RUNNER_LIBS_SHORT))))) - GPU_COMPILER_CFLAGS = $(CFLAGS) -Xcompiler -fPIC -D_GNU_SOURCE - GPU_COMPILER_CXXFLAGS = $(CXXFLAGS) -Xcompiler -fPIC -D_GNU_SOURCE -endif -GPU_DIST_LIB_DEPS= $(sort $(addprefix $(DIST_GPU_RUNNER_DEPS_DIR)/,$(notdir $(GPU_LIBS)))) - -GPU_RUNNER_ARCH_FLAGS := $(foreach arch,$(subst ;,$(space),$(CUDA_ARCHITECTURES)),--generate-code=arch=compute_$(arch)$(comma)code=[compute_$(arch)$(comma)sm_$(arch)]) \ - -DGGML_CUDA_USE_GRAPHS=1 -GPU_COMPILER_CUFLAGS = \ - $(GPU_COMPILER_EXTRA_FLAGS) \ - -Xcompiler "$(addprefix $(CPU_FLAG_PREFIX),$(GPU_VECTOR_FLAGS))" \ - -t2 \ - -DGGML_CUDA_DMMV_X=32 \ - -DGGML_CUDA_MMV_Y=1 \ - -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 \ - -DGGML_USE_CUDA=1 \ - -DGGML_SHARED=1 \ - -DGGML_BACKEND_SHARED=1 \ - -DGGML_BUILD=1 \ - -DGGML_BACKEND_BUILD=1 \ - -DGGML_USE_LLAMAFILE \ - -DK_QUANTS_PER_ITERATION=2 \ - -DNDEBUG \ - -D_GNU_SOURCE \ - -D_XOPEN_SOURCE=600 \ - -Wno-deprecated-gpu-targets \ - --forward-unknown-to-host-compiler \ - -use_fast_math \ - -I./llama/ \ - -O3 diff --git a/make/gpu.make b/make/gpu.make deleted file mode 100644 index 96e1ad22..00000000 --- a/make/gpu.make +++ /dev/null @@ -1,89 +0,0 @@ -# Generalized GPU runner build - -ifndef GPU_RUNNER_NAME -dummy: - $(error This makefile is not meant to build directly, but instead included in other Makefiles that set required variables) -endif - -GPU_GOFLAGS="-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=$(VERSION)\" $(EXTRA_GOLDFLAGS) $(TARGET_LDFLAGS)" - -# TODO Unify how we handle dependencies in the dist/packaging and install flow -# today, cuda is bundled, but rocm is split out. Should split them each out by runner -DIST_GPU_RUNNER_DEPS_DIR = $(DIST_LIB_DIR) - - -GPU_RUNNER_LIBS = $(wildcard $(addsuffix .$(SHARED_EXT).*,$(addprefix $(GPU_LIB_DIR)/$(SHARED_PREFIX),$(GPU_RUNNER_LIBS_SHORT)))) - -GPU_RUNNER_SRCS := \ - $(filter-out $(wildcard llama/ggml-cuda/fattn*.cu),$(wildcard llama/ggml-cuda/*.cu)) \ - $(wildcard llama/ggml-cuda/template-instances/mmq*.cu) \ - llama/ggml.c llama/ggml-backend.cpp llama/ggml-alloc.c llama/ggml-quants.c llama/sgemm.cpp llama/ggml-threading.cpp -GPU_RUNNER_HDRS := \ - $(wildcard llama/ggml-cuda/*.cuh) - - -# Conditional flags and components to speed up developer builds -ifneq ($(OLLAMA_FAST_BUILD),) - GPU_COMPILER_CUFLAGS += \ - -DGGML_DISABLE_FLASH_ATTN -else - GPU_RUNNER_SRCS += \ - $(wildcard llama/ggml-cuda/fattn*.cu) \ - $(wildcard llama/ggml-cuda/template-instances/fattn-wmma*.cu) \ - $(wildcard llama/ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu) \ - $(wildcard llama/ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu) \ - $(wildcard llama/ggml-cuda/template-instances/fattn-vec*f16-f16.cu) -endif - -GPU_RUNNER_OBJS := $(GPU_RUNNER_SRCS:.cu=.$(GPU_RUNNER_NAME).$(OBJ_EXT)) -GPU_RUNNER_OBJS := $(GPU_RUNNER_OBJS:.c=.$(GPU_RUNNER_NAME).$(OBJ_EXT)) -GPU_RUNNER_OBJS := $(addprefix $(BUILD_DIR)/,$(GPU_RUNNER_OBJS:.cpp=.$(GPU_RUNNER_NAME).$(OBJ_EXT))) - -DIST_RUNNERS = $(addprefix $(RUNNERS_DIST_DIR)/,$(addsuffix /ollama_llama_server$(EXE_EXT),$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT))) -BUILD_RUNNERS = $(addprefix $(RUNNERS_BUILD_DIR)/,$(addsuffix /ollama_llama_server$(EXE_EXT),$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT))) - - -$(GPU_RUNNER_NAME): $(BUILD_RUNNERS) - -dist: $(DIST_RUNNERS) - -# Build targets -$(BUILD_DIR)/%.$(GPU_RUNNER_NAME).$(OBJ_EXT): %.cu - @-mkdir -p $(dir $@) - $(CCACHE) $(GPU_COMPILER) -c $(GPU_COMPILER_CFLAGS) $(GPU_COMPILER_CUFLAGS) $(GPU_RUNNER_ARCH_FLAGS) -o $@ $< -$(BUILD_DIR)/%.$(GPU_RUNNER_NAME).$(OBJ_EXT): %.c - @-mkdir -p $(dir $@) - $(CCACHE) $(GPU_COMPILER) -c $(GPU_COMPILER_CFLAGS) -o $@ $< -$(BUILD_DIR)/%.$(GPU_RUNNER_NAME).$(OBJ_EXT): %.cpp - @-mkdir -p $(dir $@) - $(CCACHE) $(GPU_COMPILER) -c $(GPU_COMPILER_CXXFLAGS) -o $@ $< -$(RUNNERS_BUILD_DIR)/$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT)/ollama_llama_server$(EXE_EXT): TARGET_CGO_LDFLAGS = $(CGO_EXTRA_LDFLAGS) -L"$(RUNNERS_BUILD_DIR)/$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT)/" -$(RUNNERS_BUILD_DIR)/$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT)/ollama_llama_server$(EXE_EXT): $(RUNNERS_BUILD_DIR)/$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT)/$(SHARED_PREFIX)ggml_$(GPU_RUNNER_NAME).$(SHARED_EXT) ./llama/*.go ./llama/runner/*.go $(COMMON_SRCS) $(COMMON_HDRS) - @-mkdir -p $(dir $@) - GOARCH=$(ARCH) CGO_LDFLAGS="$(TARGET_CGO_LDFLAGS)" go build -buildmode=pie $(GPU_GOFLAGS) -trimpath -tags $(subst $(space),$(comma),$(GPU_RUNNER_CPU_FLAGS) $(GPU_RUNNER_GO_TAGS)) -o $@ ./cmd/runner -$(RUNNERS_BUILD_DIR)/$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT)/$(SHARED_PREFIX)ggml_$(GPU_RUNNER_NAME).$(SHARED_EXT): $(GPU_RUNNER_OBJS) $(COMMON_HDRS) $(GPU_RUNNER_HDRS) - @-mkdir -p $(dir $@) - $(CCACHE) $(GPU_COMPILER) --shared -L$(GPU_LIB_DIR) $(GPU_RUNNER_DRIVER_LIB_LINK) -L${DIST_GPU_RUNNER_DEPS_DIR} $(foreach lib, $(GPU_RUNNER_LIBS_SHORT), -l$(lib)) $(GPU_RUNNER_OBJS) -o $@ - -# Distribution targets -$(RUNNERS_DIST_DIR)/%: $(RUNNERS_BUILD_DIR)/% - @-mkdir -p $(dir $@) - $(CP) $< $@ -$(RUNNERS_DIST_DIR)/$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT)/ollama_llama_server$(EXE_EXT): $(RUNNERS_DIST_DIR)/$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT)/$(SHARED_PREFIX)ggml_$(GPU_RUNNER_NAME).$(SHARED_EXT) $(GPU_DIST_LIB_DEPS) -$(RUNNERS_DIST_DIR)/$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT)/$(SHARED_PREFIX)ggml_$(GPU_RUNNER_NAME).$(SHARED_EXT): $(RUNNERS_BUILD_DIR)/$(GPU_RUNNER_NAME)$(GPU_RUNNER_EXTRA_VARIANT)/$(SHARED_PREFIX)ggml_$(GPU_RUNNER_NAME).$(SHARED_EXT) - @-mkdir -p $(dir $@) - $(CP) $< $@ -$(GPU_DIST_LIB_DEPS): - @-mkdir -p $(dir $@) - $(CP) $(GPU_LIB_DIR)/$(notdir $@) $(dir $@) - -clean: - rm -f $(GPU_RUNNER_OBJS) $(BUILD_RUNNERS) $(DIST_RUNNERS) - -.PHONY: clean $(GPU_RUNNER_NAME) - - -# Handy debugging for make variables -print-%: - @echo '$*=$($*)' - diff --git a/make/rocm-defs.make b/make/rocm-defs.make deleted file mode 100644 index 76a11f29..00000000 --- a/make/rocm-defs.make +++ /dev/null @@ -1,9 +0,0 @@ -# Common definitions for the various Makefiles which set cuda settings -# No rules are defined here so this is safe to include at the beginning of other makefiles - -ifeq ($(OS),windows) - HIP_COMPILER:=$(wildcard $(HIP_PATH)/bin/hipcc.bin.exe) -else ifeq ($(OS),linux) - HIP_PATH?=$(shell ls -d /opt/rocm 2>/dev/null) - HIP_COMPILER:=$(wildcard $(HIP_PATH)/bin/hipcc) -endif diff --git a/ml/backend/ggml/ggml/.rsync-filter b/ml/backend/ggml/ggml/.rsync-filter new file mode 100644 index 00000000..c5acbe49 --- /dev/null +++ b/ml/backend/ggml/ggml/.rsync-filter @@ -0,0 +1,22 @@ +protect *.go +protect *-embed.* +include include/ +include src/ +include src/CMakeLists.txt +include src/**/CMakeLists.txt +include src/ggml-blas/ +include src/ggml-cpu/ +include src/ggml-cpu/amx/ +include src/ggml-cpu/llamafile/ +include src/ggml-cuda/ +include src/ggml-cuda/template-instances/ +include src/ggml-hip/ +include src/ggml-metal/ +include *.c +include *.h +include *.cpp +include *.cu +include *.cuh +include *.m +include *.metal +exclude * diff --git a/ml/backend/ggml/ggml/LICENSE b/ml/backend/ggml/ggml/LICENSE new file mode 100644 index 00000000..acb96ce7 --- /dev/null +++ b/ml/backend/ggml/ggml/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023-2024 The ggml authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/llama/ggml-alloc.h b/ml/backend/ggml/ggml/include/ggml-alloc.h similarity index 70% rename from llama/ggml-alloc.h rename to ml/backend/ggml/ggml/include/ggml-alloc.h index 960ebf30..23600eea 100644 --- a/llama/ggml-alloc.h +++ b/ml/backend/ggml/ggml/include/ggml-alloc.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "ggml.h" diff --git a/llama/ggml-backend.h b/ml/backend/ggml/ggml/include/ggml-backend.h similarity index 94% rename from llama/ggml-backend.h rename to ml/backend/ggml/ggml/include/ggml-backend.h index b67a183f..7221a083 100644 --- a/llama/ggml-backend.h +++ b/ml/backend/ggml/ggml/include/ggml-backend.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "ggml.h" diff --git a/ml/backend/ggml/ggml/include/ggml-blas.h b/ml/backend/ggml/ggml/include/ggml-blas.h new file mode 100644 index 00000000..87a81b36 --- /dev/null +++ b/ml/backend/ggml/ggml/include/ggml-blas.h @@ -0,0 +1,25 @@ +#pragma once + +#include "ggml.h" +#include "ggml-backend.h" + + +#ifdef __cplusplus +extern "C" { +#endif + +// backend API +GGML_BACKEND_API ggml_backend_t ggml_backend_blas_init(void); + +GGML_BACKEND_API bool ggml_backend_is_blas(ggml_backend_t backend); + +// number of threads used for conversion to float +// for openblas and blis, this will also set the number of threads used for blas operations +GGML_BACKEND_API void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads); + +GGML_BACKEND_API ggml_backend_reg_t ggml_backend_blas_reg(void); + + +#ifdef __cplusplus +} +#endif diff --git a/ml/backend/ggml/ggml/include/ggml-cann.h b/ml/backend/ggml/ggml/include/ggml-cann.h new file mode 100644 index 00000000..b469e228 --- /dev/null +++ b/ml/backend/ggml/ggml/include/ggml-cann.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2023-2024 The ggml authors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#pragma once + +#include "ggml-backend.h" +#include "ggml.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Maximum number of CANN devices supported. + */ +#define GGML_CANN_MAX_DEVICES 16 + +GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cann_reg(void); + +/** + * @brief Initializes the CANN backend for a specified device. + * + * This function initializes the CANN backend for the given device. + * It verifies the device index, allocates a context, and creates a backend + * instance. + * + * @param device The index of the device to initialize. + * @return A pointer to the initialized backend instance, or nullptr on failure. + */ +GGML_BACKEND_API ggml_backend_t ggml_backend_cann_init(int32_t device); + +/** + * @brief Checks if a given backend is a CANN backend. + * + * This function verifies if the provided backend is a CANN backend by comparing + * its GUID with the CANN backend's GUID. + * + * @param backend The backend instance to check. + * @return True if the backend is a CANN backend, false otherwise. + */ +GGML_BACKEND_API bool ggml_backend_is_cann(ggml_backend_t backend); + +/** + * @brief Retrieves the CANN buffer type for a specified device. + * + * This function initializes and returns the buffer type interface associated + * with the given device. It ensures thread-safe access using a mutex. + * + * @param device The device index for which to retrieve the buffer type. + * @return A pointer to the buffer type interface for the specified device, or + * nullptr if the device index is out of range. + */ +GGML_BACKEND_API ggml_backend_buffer_type_t +ggml_backend_cann_buffer_type(int32_t device); + +/** + * @brief Retrieves the number of CANN devices available. + * + * This function returns the number of CANN devices available based on + * information obtained from `ggml_cann_info()`. + * + * @return The number of CANN devices available. + */ +GGML_BACKEND_API int32_t ggml_backend_cann_get_device_count(void); + +/** + * @brief pinned host buffer for use with the CPU backend for faster copies between CPU and NPU. + * + * @return A pointer to the host buffer type interface. + */ +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cann_host_buffer_type(void); + +/** + * @brief Retrieves the description of a specific CANN device. + * + * This function sets the specified device, retrieves the SoC name, + * and writes it into the provided description buffer. + * + * @param device The device index to retrieve the description for. + * @param description Pointer to a buffer where the description will be written. + * @param description_size Size of the description buffer. + */ +GGML_BACKEND_API void ggml_backend_cann_get_device_description( + int32_t device, char* description, size_t description_size); + +/** + * @brief Retrieves the memory information of a specific CANN device. + * + * This function sets the specified device, retrieves the free and total + * memory information of the specified type (ACL_HBM_MEM), and stores them + * in the provided pointers. + * + * @param device The device index to retrieve memory information for. + * @param free Pointer to a variable where the free memory size will be stored. + * @param total Pointer to a variable where the total memory size will be + * stored. + */ +GGML_BACKEND_API void ggml_backend_cann_get_device_memory(int32_t device, + size_t* free, + size_t* total); + +#ifdef __cplusplus +} +#endif diff --git a/llama/ggml-cpp.h b/ml/backend/ggml/ggml/include/ggml-cpp.h similarity index 56% rename from llama/ggml-cpp.h rename to ml/backend/ggml/ggml/include/ggml-cpp.h index ceb54875..219361af 100644 --- a/llama/ggml-cpp.h +++ b/ml/backend/ggml/ggml/include/ggml-cpp.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #ifndef __cplusplus diff --git a/llama/ggml-cpu.h b/ml/backend/ggml/ggml/include/ggml-cpu.h similarity index 84% rename from llama/ggml-cpu.h rename to ml/backend/ggml/ggml/include/ggml-cpu.h index c2b64e66..3aa71bad 100644 --- a/llama/ggml-cpu.h +++ b/ml/backend/ggml/ggml/include/ggml-cpu.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "ggml.h" diff --git a/llama/ggml-cuda.h b/ml/backend/ggml/ggml/include/ggml-cuda.h similarity index 56% rename from llama/ggml-cuda.h rename to ml/backend/ggml/ggml/include/ggml-cuda.h index c0fb681e..22ad2c00 100644 --- a/llama/ggml-cuda.h +++ b/ml/backend/ggml/ggml/include/ggml-cuda.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "ggml.h" diff --git a/ml/backend/ggml/ggml/include/ggml-kompute.h b/ml/backend/ggml/ggml/include/ggml-kompute.h new file mode 100644 index 00000000..154aa56a --- /dev/null +++ b/ml/backend/ggml/ggml/include/ggml-kompute.h @@ -0,0 +1,50 @@ +#pragma once + +#include "ggml.h" +#include "ggml-backend.h" + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define GGML_KOMPUTE_MAX_DEVICES 16 + +struct ggml_vk_device { + int index; + int type; // same as VkPhysicalDeviceType + size_t heapSize; + const char * name; + const char * vendor; + int subgroupSize; + uint64_t bufferAlignment; + uint64_t maxAlloc; +}; + +struct ggml_vk_device * ggml_vk_available_devices(size_t memoryRequired, size_t * count); +bool ggml_vk_get_device(struct ggml_vk_device * device, size_t memoryRequired, const char * name); +bool ggml_vk_has_vulkan(void); +bool ggml_vk_has_device(void); +struct ggml_vk_device ggml_vk_current_device(void); + +// +// backend API +// + +// forward declaration +typedef struct ggml_backend * ggml_backend_t; + +GGML_BACKEND_API ggml_backend_t ggml_backend_kompute_init(int device); + +GGML_BACKEND_API bool ggml_backend_is_kompute(ggml_backend_t backend); + +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device); + +GGML_BACKEND_API ggml_backend_reg_t ggml_backend_kompute_reg(void); + +#ifdef __cplusplus +} +#endif diff --git a/llama/ggml-metal.h b/ml/backend/ggml/ggml/include/ggml-metal.h similarity index 66% rename from llama/ggml-metal.h rename to ml/backend/ggml/ggml/include/ggml-metal.h index c3e7023e..669c1f84 100644 --- a/llama/ggml-metal.h +++ b/ml/backend/ggml/ggml/include/ggml-metal.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - // Note: this description is outdated // // An interface allowing to compute ggml_cgraph with Metal diff --git a/ml/backend/ggml/ggml/include/ggml-opencl.h b/ml/backend/ggml/ggml/include/ggml-opencl.h new file mode 100644 index 00000000..6b617713 --- /dev/null +++ b/ml/backend/ggml/ggml/include/ggml-opencl.h @@ -0,0 +1,26 @@ +#ifndef GGML_OPENCL_H +#define GGML_OPENCL_H + +#include "ggml.h" +#include "ggml-backend.h" + +#ifdef __cplusplus +extern "C" { +#endif + +// +// backend API +// +GGML_BACKEND_API ggml_backend_t ggml_backend_opencl_init(void); +GGML_BACKEND_API bool ggml_backend_is_opencl(ggml_backend_t backend); + +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void); +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void); + +GGML_BACKEND_API ggml_backend_reg_t ggml_backend_opencl_reg(void); + +#ifdef __cplusplus +} +#endif + +#endif // GGML_OPENCL_H diff --git a/ml/backend/ggml/ggml/include/ggml-opt.h b/ml/backend/ggml/ggml/include/ggml-opt.h new file mode 100644 index 00000000..eb5eab9d --- /dev/null +++ b/ml/backend/ggml/ggml/include/ggml-opt.h @@ -0,0 +1,216 @@ +// This file contains functionality for training models using GGML. +// It is not strictly needed vs. just vanilla GGML but it provides a more high-level interface for common needs such as datasets. +// At the bottom of this file especially there are relatively high-level functions that are suitable use or adaptation in user code. +// +// Module maintainer: Johannes Gäßler (@JohannesGaessler, johannesg@5d6.de) + +#pragma once + +#include "ggml.h" +#include "ggml-backend.h" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + struct ggml_opt_dataset; + struct ggml_opt_context; + struct ggml_opt_result; + + typedef struct ggml_opt_dataset * ggml_opt_dataset_t; + typedef struct ggml_opt_context * ggml_opt_context_t; + typedef struct ggml_opt_result * ggml_opt_result_t; + + // ====== Loss ====== + + // built-in loss types, i.e. the built-in quantities minimized by the optimizer + // custom loss types can be defined via mean or sum which simply reduce the outputs for all datapoints to a single value + enum ggml_opt_loss_type { + GGML_OPT_LOSS_TYPE_MEAN, + GGML_OPT_LOSS_TYPE_SUM, + GGML_OPT_LOSS_TYPE_CROSS_ENTROPY, + GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR, + }; + + // ====== Dataset ====== + + GGML_API ggml_opt_dataset_t ggml_opt_dataset_init( + int64_t ne_datapoint, // number of elements per datapoint + int64_t ne_label, // number of elements per label + int64_t ndata, // total number of datapoints/labels + int64_t ndata_shard); // number of datapoints/labels per shard (unit at which the dataset is shuffled/copied) + GGML_API void ggml_opt_dataset_free(ggml_opt_dataset_t dataset); + + // get underlying tensors that store the data + GGML_API struct ggml_tensor * ggml_opt_dataset_data (ggml_opt_dataset_t dataset); // shape = [ne_datapoint, ndata] + GGML_API struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset); // shape = [nd_label, ndata] + + // shuffle idata first datapoints from dataset with RNG from opt_ctx, shuffle all datapoints if idata is negative + GGML_API void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata); + + // get batch at position ibatch from dataset and copy the data to data_batch and labels_batch + GGML_API void ggml_opt_dataset_get_batch( + ggml_opt_dataset_t dataset, + struct ggml_tensor * data_batch, // shape = [ne_datapoint, ndata_batch] + struct ggml_tensor * labels_batch, // shape = [ne_label, ndata_batch] + int64_t ibatch); + + // ====== Model / Context ====== + + enum ggml_opt_build_type { + GGML_OPT_BUILD_TYPE_FORWARD, + GGML_OPT_BUILD_TYPE_GRAD, + GGML_OPT_BUILD_TYPE_OPT, + }; + + // parameters that control which optimizer is used and how said optimizer tries to find the minimal loss + struct ggml_opt_optimizer_params { + // AdamW optimizer parameters + struct { + float alpha; // learning rate + float beta1; + float beta2; + float eps; // epsilon for numerical stability + float wd; // weight decay for AdamW, use 0.0f to disable + } adamw; + }; + + // callback to calculate optimizer parameters prior to a backward pass + // userdata can be used to pass arbitrary data + typedef struct ggml_opt_optimizer_params (*ggml_opt_get_optimizer_params)(void * userdata); + + // returns the default optimizer params (constant) + // userdata is not used + GGML_API struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata); + + // parameters for initializing a new optimization context + struct ggml_opt_params { + ggml_backend_sched_t backend_sched; // defines which backends are used to construct the compute graphs + + struct ggml_context * ctx_compute; // created in user code, holds non-static tensors + + // the forward graph is defined by inputs and outputs + // those tensors and all tensors inbetween are not intended to be reusable between multiple optimization contexts + struct ggml_tensor * inputs; + struct ggml_tensor * outputs; + + enum ggml_opt_loss_type loss_type; + enum ggml_opt_build_type build_type; + + int32_t opt_period; // after how many gradient accumulation steps an optimizer step should be done + + ggml_opt_get_optimizer_params get_opt_pars; // callback for calculating optimizer parameters + void * get_opt_pars_ud; // userdata for calculating optimizer parameters + }; + + // get parameters for an optimization context with defaults set where possible + // parameters for which no sensible defaults exist are supplied as arguments to this function + GGML_API ggml_opt_params ggml_opt_default_params( + ggml_backend_sched_t backend_sched, + struct ggml_context * ctx_compute, + struct ggml_tensor * inputs, + struct ggml_tensor * outputs, + enum ggml_opt_loss_type loss_type); + + GGML_API ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params); + GGML_API void ggml_opt_free(ggml_opt_context_t opt_ctx); + + // set gradients to zero, initilize loss, and optionally reset the optimizer + GGML_API void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer); + + // get underlying tensors that store data + GGML_API struct ggml_tensor * ggml_opt_inputs( ggml_opt_context_t opt_ctx); // forward graph input tensor + GGML_API struct ggml_tensor * ggml_opt_outputs( ggml_opt_context_t opt_ctx); // forward graph output tensor + GGML_API struct ggml_tensor * ggml_opt_labels( ggml_opt_context_t opt_ctx); // labels to compare outputs against + GGML_API struct ggml_tensor * ggml_opt_loss( ggml_opt_context_t opt_ctx); // scalar tensor that contains the loss + GGML_API struct ggml_tensor * ggml_opt_pred( ggml_opt_context_t opt_ctx); // predictions made by outputs + GGML_API struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx); // number of matching predictions between outputs and labels + + GGML_API struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node); + + // ====== Optimization Result ====== + + GGML_API ggml_opt_result_t ggml_opt_result_init(); + GGML_API void ggml_opt_result_free(ggml_opt_result_t result); + GGML_API void ggml_opt_result_reset(ggml_opt_result_t result); + + // get data from result, uncertainties are optional and can be ignored by passing NULL + GGML_API void ggml_opt_result_ndata( ggml_opt_result_t result, int64_t * ndata); // writes 1 value, number of datapoints + GGML_API void ggml_opt_result_loss( ggml_opt_result_t result, double * loss, double * unc); // writes 1 value + GGML_API void ggml_opt_result_pred( ggml_opt_result_t result, int32_t * pred); // writes ndata values + GGML_API void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc); // writes 1 value + + // ====== Computation ====== + + // do forward pass, increment result if not NULL + GGML_API void ggml_opt_forward(ggml_opt_context_t opt_ctx, ggml_opt_result_t result); + + // do forward pass, increment result if not NULL, do backward pass + GGML_API void ggml_opt_forward_backward(ggml_opt_context_t opt_ctx, ggml_opt_result_t result); + + // ############################################################################ + // ## The high-level functions start here. They do not depend on any private ## + // ## functions or structs and can be copied to and adapted for user code. ## + // ############################################################################ + + // ====== Intended Usage ====== + // + // 1. Select the appropriate loss for your problem. + // 2. Create a dataset and set the data for the "data" tensor. Also set the "labels" tensor if your loss needs them. + // Setting the shard size to 1 will be fine, it's the granularity with which data is shuffled/loaded (bigger values are faster). + // 3. Create a GGML graph for your model with no_alloc == true. Use two separate contexts for the tensors. + // The first context should contain the model parameters and inputs and be allocated statically in user code. + // The second context should contain all other tensors and will be (re)allocated automatically. + // Due to this automated allocation the data of the second context is not defined when accessed in user code. + // Note that the second dimension of the inputs/outputs are interpreted as the number of datapoints in those tensors. + // 4. Call ggml_opt_fit. If you need more control you can use ggml_opt_epoch instead. + + // signature for a callback while evaluating opt_ctx on dataset, called after an evaluation + typedef void (*ggml_opt_epoch_callback)( + bool train, // true after training evaluation, false after validation evaluation + ggml_opt_context_t opt_ctx, + ggml_opt_dataset_t dataset, + ggml_opt_result_t result, // result associated with the dataset subsection + int64_t ibatch, // number of batches that have been evaluated so far + int64_t ibatch_max, // total number of batches in this dataset subsection + int64_t t_start_us); // time at which the evaluation on the dataset subsection was started + + // do training on front of dataset, do evaluation only on back of dataset + GGML_API void ggml_opt_epoch( + ggml_opt_context_t opt_ctx, + ggml_opt_dataset_t dataset, + ggml_opt_result_t result_train, // result to increment during training, ignored if NULL + ggml_opt_result_t result_eval, // result to increment during evaluation, ignored if NULL + int64_t idata_split, // data index at which to split training and evaluation + ggml_opt_epoch_callback callback_train, + ggml_opt_epoch_callback callback_eval); + + // callback that prints a progress bar on stderr + GGML_API void ggml_opt_epoch_callback_progress_bar( + bool train, + ggml_opt_context_t opt_ctx, + ggml_opt_dataset_t dataset, + ggml_opt_result_t result, + int64_t ibatch, + int64_t ibatch_max, + int64_t t_start_us); + + // fit model defined by inputs and outputs to dataset + GGML_API void ggml_opt_fit( + ggml_backend_sched_t backend_sched, // backend scheduler for constructing the compute graphs + ggml_context * ctx_compute, // context with temporarily allocated tensors to calculate the outputs + ggml_tensor * inputs, // input tensor with shape [ne_datapoint, ndata_batch] + ggml_tensor * outputs, // output tensor, must have shape [ne_label, ndata_batch] if labels are used + ggml_opt_dataset_t dataset, // dataset with data and optionally also labels + enum ggml_opt_loss_type loss_type, // loss to minimize + ggml_opt_get_optimizer_params get_opt_pars, // callback to get optimizer params, userdata is pointer to epoch (of type int64_t) + int64_t nepoch, // how many times the dataset should be iterated over + int64_t nbatch_logical, // datapoints optimizer step, must be a multiple of ndata_batch in inputs/outputs + float val_split, // fraction of the dataset to use for validation, must be in [0.0f, 1.0f) + bool silent); // whether or not info prints to stderr should be suppressed + +#ifdef __cplusplus +} +#endif diff --git a/ml/backend/ggml/ggml/include/ggml-rpc.h b/ml/backend/ggml/ggml/include/ggml-rpc.h new file mode 100644 index 00000000..ade6c3b0 --- /dev/null +++ b/ml/backend/ggml/ggml/include/ggml-rpc.h @@ -0,0 +1,28 @@ +#pragma once + +#include "ggml.h" +#include "ggml-backend.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define GGML_RPC_MAX_SERVERS 16 + +// backend API +GGML_BACKEND_API ggml_backend_t ggml_backend_rpc_init(const char * endpoint); +GGML_BACKEND_API bool ggml_backend_is_rpc(ggml_backend_t backend); + +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint); + +GGML_BACKEND_API void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total); + +GGML_BACKEND_API void ggml_backend_rpc_start_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem); + +GGML_BACKEND_API ggml_backend_reg_t ggml_backend_rpc_reg(void); + +GGML_BACKEND_API ggml_backend_dev_t ggml_backend_rpc_add_device(const char * endpoint); + +#ifdef __cplusplus +} +#endif diff --git a/ml/backend/ggml/ggml/include/ggml-sycl.h b/ml/backend/ggml/ggml/include/ggml-sycl.h new file mode 100644 index 00000000..5ce349a8 --- /dev/null +++ b/ml/backend/ggml/ggml/include/ggml-sycl.h @@ -0,0 +1,49 @@ +// +// MIT license +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "ggml.h" +#include "ggml-backend.h" + +#define GGML_SYCL_NAME "SYCL" +#define GGML_SYCL_MAX_DEVICES 48 + +#ifdef __cplusplus +extern "C" { +#endif + +// backend API +GGML_BACKEND_API ggml_backend_t ggml_backend_sycl_init(int device); + +GGML_BACKEND_API bool ggml_backend_is_sycl(ggml_backend_t backend); + +// devide buffer +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device); + +// split tensor buffer that splits matrices by rows across multiple devices +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split); + +// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type(void); + +GGML_BACKEND_API void ggml_backend_sycl_print_sycl_devices(void); +GGML_BACKEND_API void ggml_backend_sycl_get_gpu_list(int *id_list, int max_len); +GGML_BACKEND_API void ggml_backend_sycl_get_device_description(int device, + char *description, + size_t description_size); +GGML_BACKEND_API int ggml_backend_sycl_get_device_count(); +GGML_BACKEND_API void ggml_backend_sycl_get_device_memory(int device, size_t *free, size_t *total); + +// SYCL doesn't support registering host memory, keep here for reference +// GGML_BACKEND_API bool ggml_backend_sycl_register_host_buffer(void * buffer, size_t size); +// GGML_BACKEND_API void ggml_backend_sycl_unregister_host_buffer(void * buffer); + +GGML_BACKEND_API ggml_backend_reg_t ggml_backend_sycl_reg(void); + +#ifdef __cplusplus +} +#endif diff --git a/ml/backend/ggml/ggml/include/ggml-vulkan.h b/ml/backend/ggml/ggml/include/ggml-vulkan.h new file mode 100644 index 00000000..53cdba07 --- /dev/null +++ b/ml/backend/ggml/ggml/include/ggml-vulkan.h @@ -0,0 +1,31 @@ +#pragma once + +#include "ggml.h" +#include "ggml-backend.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define GGML_VK_NAME "Vulkan" +#define GGML_VK_MAX_DEVICES 16 + +GGML_BACKEND_API void ggml_vk_instance_init(void); + +// backend API +GGML_BACKEND_API ggml_backend_t ggml_backend_vk_init(size_t dev_num); + +GGML_BACKEND_API bool ggml_backend_is_vk(ggml_backend_t backend); +GGML_BACKEND_API int ggml_backend_vk_get_device_count(void); +GGML_BACKEND_API void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size); +GGML_BACKEND_API void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total); + +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num); +// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU +GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type(void); + +GGML_BACKEND_API ggml_backend_reg_t ggml_backend_vk_reg(void); + +#ifdef __cplusplus +} +#endif diff --git a/llama/ggml.h b/ml/backend/ggml/ggml/include/ggml.h similarity index 98% rename from llama/ggml.h rename to ml/backend/ggml/ggml/include/ggml.h index 621362c8..1bc50fca 100644 --- a/llama/ggml.h +++ b/ml/backend/ggml/ggml/include/ggml.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once // diff --git a/ml/backend/ggml/ggml/src/CMakeLists.txt b/ml/backend/ggml/ggml/src/CMakeLists.txt new file mode 100644 index 00000000..72b488dd --- /dev/null +++ b/ml/backend/ggml/ggml/src/CMakeLists.txt @@ -0,0 +1,340 @@ +include(CheckCXXCompilerFlag) + +add_compile_definitions(GGML_SCHED_MAX_COPIES=${GGML_SCHED_MAX_COPIES}) + +# enable libstdc++ assertions for debug builds +if (CMAKE_SYSTEM_NAME MATCHES "Linux") + add_compile_definitions($<$:_GLIBCXX_ASSERTIONS>) +endif() + +if (NOT MSVC) + if (GGML_SANITIZE_THREAD) + add_compile_options(-fsanitize=thread) + link_libraries (-fsanitize=thread) + endif() + + if (GGML_SANITIZE_ADDRESS) + add_compile_options(-fsanitize=address -fno-omit-frame-pointer) + link_libraries (-fsanitize=address) + endif() + + if (GGML_SANITIZE_UNDEFINED) + add_compile_options(-fsanitize=undefined) + link_libraries (-fsanitize=undefined) + endif() +endif() + +function(ggml_get_flags CCID CCVER) + set(C_FLAGS "") + set(CXX_FLAGS "") + + if (CCID MATCHES "Clang") + set(C_FLAGS -Wunreachable-code-break -Wunreachable-code-return) + set(CXX_FLAGS -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi) + + if ( + (CCID STREQUAL "Clang" AND CCVER VERSION_GREATER_EQUAL 3.8.0) OR + (CCID STREQUAL "AppleClang" AND CCVER VERSION_GREATER_EQUAL 7.3.0) + ) + list(APPEND C_FLAGS -Wdouble-promotion) + endif() + elseif (CCID STREQUAL "GNU") + set(C_FLAGS -Wdouble-promotion) + set(CXX_FLAGS -Wno-array-bounds) + + if (CCVER VERSION_GREATER_EQUAL 8.1.0) + list(APPEND CXX_FLAGS -Wextra-semi) + endif() + endif() + + set(GF_C_FLAGS ${C_FLAGS} PARENT_SCOPE) + set(GF_CXX_FLAGS ${CXX_FLAGS} PARENT_SCOPE) +endfunction() + +if (GGML_FATAL_WARNINGS) + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + list(APPEND C_FLAGS -Werror) + list(APPEND CXX_FLAGS -Werror) + elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + add_compile_options(/WX) + endif() +endif() + +if (GGML_ALL_WARNINGS) + if (NOT MSVC) + list(APPEND WARNING_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function) + list(APPEND C_FLAGS -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes + -Werror=implicit-int -Werror=implicit-function-declaration) + list(APPEND CXX_FLAGS -Wmissing-declarations -Wmissing-noreturn) + + list(APPEND C_FLAGS ${WARNING_FLAGS}) + list(APPEND CXX_FLAGS ${WARNING_FLAGS}) + + ggml_get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}) + + add_compile_options("$<$:${C_FLAGS};${GF_C_FLAGS}>" + "$<$:${CXX_FLAGS};${GF_CXX_FLAGS}>") + else() + # todo : msvc + set(C_FLAGS "") + set(CXX_FLAGS "") + endif() +endif() + +if (GGML_LTO) + include(CheckIPOSupported) + check_ipo_supported(RESULT result OUTPUT output) + if (result) + set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE) + else() + message(WARNING "IPO is not supported: ${output}") + endif() +endif() + +if (GGML_CCACHE) + find_program(GGML_CCACHE_FOUND ccache) + + if (GGML_CCACHE_FOUND) + # TODO: should not be set globally + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) + set(ENV{CCACHE_SLOPPINESS} time_macros) + message(STATUS "ccache found, compilation results will be cached. Disable with GGML_CCACHE=OFF.") + else() + message(STATUS "Warning: ccache not found - consider installing it for faster compilation or disable this warning with GGML_CCACHE=OFF") + endif () +endif() + +# this version of Apple ld64 is buggy +execute_process( + COMMAND ${CMAKE_C_COMPILER} ${CMAKE_EXE_LINKER_FLAGS} -Wl,-v + ERROR_VARIABLE output + OUTPUT_QUIET +) + +if (output MATCHES "dyld-1015\.7") + add_compile_definitions(HAVE_BUGGY_APPLE_LINKER) +endif() + +# architecture specific +# TODO: probably these flags need to be tweaked on some architectures +# feel free to update the Makefile for your architecture and send a pull request or issue +message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}") +if (MSVC) + string(TOLOWER "${CMAKE_GENERATOR_PLATFORM}" CMAKE_GENERATOR_PLATFORM_LWR) + message(STATUS "CMAKE_GENERATOR_PLATFORM: ${CMAKE_GENERATOR_PLATFORM}") +else () + set(CMAKE_GENERATOR_PLATFORM_LWR "") +endif () + +if (NOT MSVC) + if (GGML_STATIC) + add_link_options(-static) + if (MINGW) + add_link_options(-static-libgcc -static-libstdc++) + endif() + endif() + if (GGML_GPROF) + add_compile_options(-pg) + endif() +endif() + +if (MINGW) + # Target Windows 8 for PrefetchVirtualMemory + add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER}) +endif() + +# +# POSIX conformance +# + +# clock_gettime came in POSIX.1b (1993) +# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional +# posix_memalign came in POSIX.1-2001 / SUSv3 +# M_PI is an XSI extension since POSIX.1-2001 / SUSv3, came in XPG1 (1985) + +# Somehow in OpenBSD whenever POSIX conformance is specified +# some string functions rely on locale_t availability, +# which was introduced in POSIX.1-2008, forcing us to go higher +if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD") + add_compile_definitions(_XOPEN_SOURCE=700) +else() + add_compile_definitions(_XOPEN_SOURCE=600) +endif() + +# Data types, macros and functions related to controlling CPU affinity and +# some memory allocation are available on Linux through GNU extensions in libc +if (CMAKE_SYSTEM_NAME MATCHES "Linux" OR CMAKE_SYSTEM_NAME MATCHES "Android") + add_compile_definitions(_GNU_SOURCE) +endif() + +# RLIMIT_MEMLOCK came in BSD, is not specified in POSIX.1, +# and on macOS its availability depends on enabling Darwin extensions +# similarly on DragonFly, enabling BSD extensions is necessary +if ( + CMAKE_SYSTEM_NAME MATCHES "Darwin" OR + CMAKE_SYSTEM_NAME MATCHES "iOS" OR + CMAKE_SYSTEM_NAME MATCHES "tvOS" OR + CMAKE_SYSTEM_NAME MATCHES "DragonFly" +) + add_compile_definitions(_DARWIN_C_SOURCE) +endif() + +# alloca is a non-standard interface that is not visible on BSDs when +# POSIX conformance is specified, but not all of them provide a clean way +# to enable it in such cases +if (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") + add_compile_definitions(__BSD_VISIBLE) +endif() +if (CMAKE_SYSTEM_NAME MATCHES "NetBSD") + add_compile_definitions(_NETBSD_SOURCE) +endif() +if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD") + add_compile_definitions(_BSD_SOURCE) +endif() + +if (WIN32) + add_compile_definitions(_CRT_SECURE_NO_WARNINGS) +endif() + +# ggml + +if (GGML_BACKEND_DL AND NOT BUILD_SHARED_LIBS) + message(FATAL_ERROR "GGML_BACKEND_DL requires BUILD_SHARED_LIBS") +endif() + +add_library(ggml-base + ../include/ggml.h + ../include/ggml-alloc.h + ../include/ggml-backend.h + ../include/ggml-cpp.h + ../include/ggml-opt.h + ggml.c + ggml-alloc.c + ggml-backend.cpp + ggml-opt.cpp + ggml-threading.cpp + ggml-threading.h + ggml-quants.c + ggml-quants.h) + +target_include_directories(ggml-base PRIVATE .) + +add_library(ggml + ggml-backend-reg.cpp) + +target_link_libraries(ggml PUBLIC ggml-base) + +if (CMAKE_SYSTEM_NAME MATCHES "Linux") + target_link_libraries(ggml PRIVATE dl) +endif() + +function(ggml_add_backend_library backend) + if (GGML_BACKEND_DL) + add_library(${backend} MODULE ${ARGN}) + # write the shared library to the output directory + set_target_properties(${backend} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + target_compile_definitions(${backend} PRIVATE GGML_BACKEND_DL) + add_dependencies(ggml ${backend}) + else() + add_library(${backend} ${ARGN}) + target_link_libraries(ggml PUBLIC ${backend}) + install(TARGETS ${backend} LIBRARY) + endif() + + target_link_libraries(${backend} PRIVATE ggml-base) + target_include_directories(${backend} PRIVATE ..) + + if (${BUILD_SHARED_LIBS}) + target_compile_definitions(${backend} PRIVATE GGML_BACKEND_BUILD) + target_compile_definitions(${backend} PUBLIC GGML_BACKEND_SHARED) + endif() +endfunction() + +function(ggml_add_backend backend) + string(TOUPPER "GGML_${backend}" backend_id) + if (${backend_id}) + string(TOLOWER "ggml-${backend}" backend_target) + add_subdirectory(${backend_target}) + message(STATUS "Including ${backend} backend") + if (NOT GGML_BACKEND_DL) + string(TOUPPER "GGML_USE_${backend}" backend_use) + target_compile_definitions(ggml PUBLIC ${backend_use}) + endif() + endif() +endfunction() + +function(ggml_add_cpu_backend_variant tag_name) + set(GGML_CPU_TAG_NAME ${tag_name}) + # other: OPENMP LLAMAFILE CPU_HBM + foreach (feat NATIVE + AVX AVX2 AVX_VNNI FMA F16C + AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 + AMX_TILE AMX_INT8 AMX_BF16) + set(GGML_${feat} OFF) + endforeach() + + foreach (feat ${ARGN}) + set(GGML_${feat} ON) + endforeach() + + ggml_add_cpu_backend_variant_impl(${tag_name}) + add_dependencies(ggml-cpu ggml-cpu-${tag_name}) +endfunction() + +ggml_add_backend(CPU) + +if (GGML_CPU_ALL_VARIANTS) + if (NOT GGML_BACKEND_DL) + message(FATAL_ERROR "GGML_CPU_ALL_VARIANTS requires GGML_BACKEND_DL") + endif() + add_custom_target(ggml-cpu) + ggml_add_cpu_backend_variant(sandybridge AVX) + ggml_add_cpu_backend_variant(haswell AVX F16C AVX2 FMA) + ggml_add_cpu_backend_variant(skylakex AVX F16C AVX2 FMA AVX512) + ggml_add_cpu_backend_variant(icelake AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI) + ggml_add_cpu_backend_variant(alderlake AVX F16C AVX2 FMA AVX_VNNI) + if (NOT MSVC) + # MSVC doesn't support AMX + ggml_add_cpu_backend_variant(sapphirerapids AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) + endif() +else () + ggml_add_cpu_backend_variant_impl("") +endif() + +ggml_add_backend(BLAS) +ggml_add_backend(CANN) +ggml_add_backend(CUDA) +ggml_add_backend(HIP) +ggml_add_backend(Kompute) +ggml_add_backend(METAL) +ggml_add_backend(MUSA) +ggml_add_backend(RPC) +ggml_add_backend(SYCL) +ggml_add_backend(Vulkan) +ggml_add_backend(OpenCL) + +foreach (target ggml-base ggml) + target_include_directories(${target} PUBLIC $ $) + target_compile_features (${target} PRIVATE c_std_11 cxx_std_17) # don't bump +endforeach() + +target_link_libraries(ggml-base PRIVATE Threads::Threads) + +find_library(MATH_LIBRARY m) +if (MATH_LIBRARY) + if (NOT WIN32 OR NOT DEFINED ENV{ONEAPI_ROOT}) + target_link_libraries(ggml-base PRIVATE m) + endif() +endif() + +if (CMAKE_SYSTEM_NAME MATCHES "Android") + target_link_libraries(ggml-base PRIVATE dl) +endif() + +if (BUILD_SHARED_LIBS) + foreach (target ggml-base ggml) + set_target_properties(${target} PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(${target} PRIVATE GGML_BUILD) + target_compile_definitions(${target} PUBLIC GGML_SHARED) + endforeach() +endif() diff --git a/llama/ggml-alloc.c b/ml/backend/ggml/ggml/src/ggml-alloc.c similarity index 96% rename from llama/ggml-alloc.c rename to ml/backend/ggml/ggml/src/ggml-alloc.c index 6ea83a90..8dc8226a 100644 --- a/llama/ggml-alloc.c +++ b/ml/backend/ggml/ggml/src/ggml-alloc.c @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "ggml-alloc.h" #include "ggml-backend-impl.h" #include "ggml.h" diff --git a/llama/ggml-backend-impl.h b/ml/backend/ggml/ggml/src/ggml-backend-impl.h similarity index 90% rename from llama/ggml-backend-impl.h rename to ml/backend/ggml/ggml/src/ggml-backend-impl.h index 37b59207..36d72e95 100644 --- a/llama/ggml-backend-impl.h +++ b/ml/backend/ggml/ggml/src/ggml-backend-impl.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once // ggml-backend internal header diff --git a/llama/ggml-backend-reg.cpp b/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp similarity index 90% rename from llama/ggml-backend-reg.cpp rename to ml/backend/ggml/ggml/src/ggml-backend-reg.cpp index 2ebc3439..ac5cda07 100644 --- a/llama/ggml-backend-reg.cpp +++ b/ml/backend/ggml/ggml/src/ggml-backend-reg.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "ggml-backend-impl.h" #include "ggml-backend.h" #include "ggml-impl.h" @@ -176,7 +150,7 @@ struct ggml_backend_reg_entry { struct ggml_backend_registry { std::vector backends; - std::vector devices; + std::vector> devices; ggml_backend_registry() { #ifdef GGML_USE_CUDA @@ -221,7 +195,7 @@ struct ggml_backend_registry { } } - void register_backend(ggml_backend_reg_t reg, dl_handle_ptr handle = nullptr) { + void register_backend(ggml_backend_reg_t reg, int score = -1, dl_handle_ptr handle = nullptr) { if (!reg) { return; } @@ -232,15 +206,15 @@ struct ggml_backend_registry { #endif backends.push_back({ reg, std::move(handle) }); for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); i++) { - register_device(ggml_backend_reg_dev_get(reg, i)); + register_device(ggml_backend_reg_dev_get(reg, i), score); } } - void register_device(ggml_backend_dev_t device) { + void register_device(ggml_backend_dev_t device, int score = -1) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: registered device %s (%s)\n", __func__, ggml_backend_dev_name(device), ggml_backend_dev_description(device)); #endif - devices.push_back(device); + devices.push_back({device, score}); } ggml_backend_reg_t load_backend(const std::wstring & path, bool silent) { @@ -283,7 +257,7 @@ struct ggml_backend_registry { GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, ggml_backend_reg_name(reg), utf16_to_utf8(path).c_str()); - register_backend(reg, std::move(handle)); + register_backend(reg, score_fn ? score_fn() : -1, std::move(handle)); return reg; } @@ -306,7 +280,7 @@ struct ggml_backend_registry { // remove devices devices.erase( std::remove_if(devices.begin(), devices.end(), - [reg](ggml_backend_dev_t dev) { return ggml_backend_dev_backend_reg(dev) == reg; }), + [reg](std::pair dev) { return ggml_backend_dev_backend_reg(dev.first) == reg; }), devices.end()); // remove backend @@ -364,7 +338,12 @@ size_t ggml_backend_dev_count() { ggml_backend_dev_t ggml_backend_dev_get(size_t index) { GGML_ASSERT(index < ggml_backend_dev_count()); - return get_reg().devices[index]; + auto devices = get_reg().devices; + if (!std::is_heap(devices.begin(), devices.end())) { + std::make_heap(devices.begin(), devices.end(), [](const auto & a, const auto & b) { return a.second < b.second; }); + } + + return devices[index].first; } ggml_backend_dev_t ggml_backend_dev_by_name(const char * name) { diff --git a/llama/ggml-backend.cpp b/ml/backend/ggml/ggml/src/ggml-backend.cpp similarity index 98% rename from llama/ggml-backend.cpp rename to ml/backend/ggml/ggml/src/ggml-backend.cpp index 3e11d73f..a12172dc 100644 --- a/llama/ggml-backend.cpp +++ b/ml/backend/ggml/ggml/src/ggml-backend.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - // Note: porting this file to C++ is a work in progress #ifdef _WIN32 @@ -132,12 +106,6 @@ void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { if (buffer->iface.free_buffer != NULL) { buffer->iface.free_buffer(buffer); } - -// TODO: this needs to be freed in cuda and hip backends because -// the cuda backend implementation compiled with msvc -#if !defined(GGML_USE_CUDA) && !defined(GGML_USE_HIP) - delete buffer; -#endif } size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) { diff --git a/ml/backend/ggml/ggml/src/ggml-blas/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-blas/CMakeLists.txt new file mode 100644 index 00000000..0bf3c05d --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-blas/CMakeLists.txt @@ -0,0 +1,87 @@ +if (GGML_STATIC) + set(BLA_STATIC ON) +endif() +#if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.22) +# set(BLA_SIZEOF_INTEGER 8) +#endif() + +set(BLA_VENDOR ${GGML_BLAS_VENDOR}) +find_package(BLAS) + +if (BLAS_FOUND) + message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}") + + ggml_add_backend_library(ggml-blas + ggml-blas.cpp + ) + + if (${GGML_BLAS_VENDOR} MATCHES "Apple") + add_compile_definitions(ACCELERATE_NEW_LAPACK) + add_compile_definitions(ACCELERATE_LAPACK_ILP64) + add_compile_definitions(GGML_BLAS_USE_ACCELERATE) + elseif ("${BLAS_INCLUDE_DIRS}" STREQUAL "") + # BLAS_INCLUDE_DIRS is missing in FindBLAS.cmake. + # see https://gitlab.kitware.com/cmake/cmake/-/issues/20268 + find_package(PkgConfig REQUIRED) + if (${GGML_BLAS_VENDOR} MATCHES "Generic") + pkg_check_modules(DepBLAS blas) + elseif (${GGML_BLAS_VENDOR} MATCHES "OpenBLAS") + # As of openblas v0.3.22, the 64-bit is named openblas64.pc + pkg_check_modules(DepBLAS openblas64) + if (NOT DepBLAS_FOUND) + pkg_check_modules(DepBLAS openblas) + endif() + elseif (${GGML_BLAS_VENDOR} MATCHES "FLAME") + add_compile_definitions(GGML_BLAS_USE_BLIS) + pkg_check_modules(DepBLAS blis) + elseif (${GGML_BLAS_VENDOR} MATCHES "ATLAS") + pkg_check_modules(DepBLAS blas-atlas) + elseif (${GGML_BLAS_VENDOR} MATCHES "FlexiBLAS") + pkg_check_modules(DepBLAS flexiblas_api) + elseif (${GGML_BLAS_VENDOR} MATCHES "Intel") + add_compile_definitions(GGML_BLAS_USE_MKL) + # all Intel* libraries share the same include path + pkg_check_modules(DepBLAS mkl-sdl) + elseif (${GGML_BLAS_VENDOR} MATCHES "NVHPC") + # this doesn't provide pkg-config + # suggest to assign BLAS_INCLUDE_DIRS on your own + if ("${NVHPC_VERSION}" STREQUAL "") + message(WARNING "Better to set NVHPC_VERSION") + else() + set(DepBLAS_FOUND ON) + set(DepBLAS_INCLUDE_DIRS "/opt/nvidia/hpc_sdk/${CMAKE_SYSTEM_NAME}_${CMAKE_SYSTEM_PROCESSOR}/${NVHPC_VERSION}/math_libs/include") + endif() + endif() + if (DepBLAS_FOUND) + set(BLAS_INCLUDE_DIRS ${DepBLAS_INCLUDE_DIRS}) + else() + message(WARNING "BLAS_INCLUDE_DIRS neither been provided nor been automatically" + " detected by pkgconfig, trying to find cblas.h from possible paths...") + find_path(BLAS_INCLUDE_DIRS + NAMES cblas.h + HINTS + /usr/include + /usr/local/include + /usr/include/openblas + /opt/homebrew/opt/openblas/include + /usr/local/opt/openblas/include + /usr/include/x86_64-linux-gnu/openblas/include + ) + endif() + endif() + + message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}") + + target_compile_options(ggml-blas PRIVATE ${BLAS_LINKER_FLAGS}) + + if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${GGML_BLAS_VENDOR} MATCHES "Generic" OR ${GGML_BLAS_VENDOR} MATCHES "Intel")) + add_compile_definitions(GGML_BLAS_USE_MKL) + endif() + + target_link_libraries (ggml-blas PRIVATE ${BLAS_LIBRARIES}) + target_include_directories(ggml-blas PRIVATE ${BLAS_INCLUDE_DIRS}) +else() + message(ERROR "BLAS not found, please refer to " + "https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors" + " to set correct GGML_BLAS_VENDOR") +endif() diff --git a/ml/backend/ggml/ggml/src/ggml-blas/blas.go b/ml/backend/ggml/ggml/src/ggml-blas/blas.go new file mode 100644 index 00000000..b29c9f14 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-blas/blas.go @@ -0,0 +1,10 @@ +//go:build darwin && arm64 + +package blas + +// #cgo CXXFLAGS: -std=c++11 +// #cgo CPPFLAGS: -DGGML_USE_BLAS +// #cgo CPPFLAGS: -I${SRCDIR}/.. -I${SRCDIR}/../../include +// #cgo darwin,arm64 CPPFLAGS: -DGGML_BLAS_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 +// #cgo darwin,arm64 LDFLAGS: -framework Accelerate +import "C" diff --git a/llama/ggml-blas.cpp b/ml/backend/ggml/ggml/src/ggml-blas/ggml-blas.cpp similarity index 92% rename from llama/ggml-blas.cpp rename to ml/backend/ggml/ggml/src/ggml-blas/ggml-blas.cpp index 44acf0bd..ec158dfa 100644 --- a/llama/ggml-blas.cpp +++ b/ml/backend/ggml/ggml/src/ggml-blas/ggml-blas.cpp @@ -1,31 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifdef GGML_USE_BLAS - #include "ggml-impl.h" #include "ggml-blas.h" #include "ggml-backend-impl.h" @@ -543,5 +515,3 @@ ggml_backend_reg_t ggml_backend_blas_reg(void) { } GGML_BACKEND_DL_IMPL(ggml_backend_blas_reg) - -#endif // GGML_USE_BLAS \ No newline at end of file diff --git a/llama/ggml-common.h b/ml/backend/ggml/ggml/src/ggml-common.h similarity index 99% rename from llama/ggml-common.h rename to ml/backend/ggml/ggml/src/ggml-common.h index e227c13f..f13fd4de 100644 --- a/llama/ggml-common.h +++ b/ml/backend/ggml/ggml/src/ggml-common.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #ifndef GGML_COMMON_DECL #if defined(GGML_COMMON_DECL_C) diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-cpu/CMakeLists.txt new file mode 100644 index 00000000..6b3641c4 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/CMakeLists.txt @@ -0,0 +1,346 @@ +function(ggml_add_cpu_backend_variant_impl tag_name) + if (tag_name) + set(GGML_CPU_NAME ggml-cpu-${tag_name}) + else() + set(GGML_CPU_NAME ggml-cpu) + endif() + + ggml_add_backend_library(${GGML_CPU_NAME}) + + list (APPEND GGML_CPU_SOURCES + ggml-cpu/ggml-cpu.c + ggml-cpu/ggml-cpu.cpp + ggml-cpu/ggml-cpu-aarch64.cpp + ggml-cpu/ggml-cpu-aarch64.h + ggml-cpu/ggml-cpu-hbm.cpp + ggml-cpu/ggml-cpu-hbm.h + ggml-cpu/ggml-cpu-quants.c + ggml-cpu/ggml-cpu-quants.h + ggml-cpu/ggml-cpu-traits.cpp + ggml-cpu/ggml-cpu-traits.h + ggml-cpu/amx/amx.cpp + ggml-cpu/amx/amx.h + ggml-cpu/amx/mmq.cpp + ggml-cpu/amx/mmq.h + ggml-cpu/ggml-cpu-impl.h + ) + + target_compile_features(${GGML_CPU_NAME} PRIVATE c_std_11 cxx_std_17) + target_include_directories(${GGML_CPU_NAME} PRIVATE . ggml-cpu) + + if (APPLE AND GGML_ACCELERATE) + find_library(ACCELERATE_FRAMEWORK Accelerate) + if (ACCELERATE_FRAMEWORK) + message(STATUS "Accelerate framework found") + + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_ACCELERATE) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE ACCELERATE_NEW_LAPACK) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE ACCELERATE_LAPACK_ILP64) + + target_link_libraries(${GGML_CPU_NAME} PRIVATE ${ACCELERATE_FRAMEWORK}) + else() + message(WARNING "Accelerate framework not found") + endif() + endif() + + if (GGML_OPENMP) + find_package(OpenMP) + if (OpenMP_FOUND) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_OPENMP) + + target_link_libraries(${GGML_CPU_NAME} PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX) + else() + message(WARNING "OpenMP not found") + endif() + endif() + + if (GGML_LLAMAFILE) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_LLAMAFILE) + + list(APPEND GGML_CPU_SOURCES + ggml-cpu/llamafile/sgemm.cpp + ggml-cpu/llamafile/sgemm.h) + endif() + + if (GGML_CPU_HBM) + find_library(memkind memkind REQUIRED) + + message(STATUS "Using memkind for CPU HBM") + + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_HBM) + + target_link_libraries(${GGML_CPU_NAME} PUBLIC memkind) + endif() + + if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR + CMAKE_GENERATOR_PLATFORM_LWR STREQUAL "arm64" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64|arm.*|ARM64)$")) + + message(STATUS "ARM detected") + + if (MSVC AND NOT CMAKE_C_COMPILER_ID STREQUAL "Clang") + message(FATAL_ERROR "MSVC is not supported for ARM, use clang") + else() + check_cxx_compiler_flag(-mfp16-format=ieee GGML_COMPILER_SUPPORTS_FP16_FORMAT_I3E) + if (NOT "${GGML_COMPILER_SUPPORTS_FP16_FORMAT_I3E}" STREQUAL "") + list(APPEND ARCH_FLAGS -mfp16-format=ieee) + endif() + + if (GGML_NATIVE) + # -mcpu=native does not always enable all the features in some compilers, + # so we check for them manually and enable them if available + + execute_process( + COMMAND ${CMAKE_C_COMPILER} -mcpu=native -E -v - + INPUT_FILE "/dev/null" + OUTPUT_QUIET + ERROR_VARIABLE ARM_MCPU + RESULT_VARIABLE ARM_MCPU_RESULT + ) + if (NOT ARM_MCPU_RESULT) + string(REGEX MATCH "-mcpu=[^ ']+" ARM_MCPU_FLAG "${ARM_MCPU}") + endif() + if ("${ARM_MCPU_FLAG}" STREQUAL "") + set(ARM_MCPU_FLAG -mcpu=native) + message(STATUS "ARM -mcpu not found, -mcpu=native will be used") + endif() + + include(CheckCXXSourceRuns) + + function(check_arm_feature tag code) + set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS}) + set(CMAKE_REQUIRED_FLAGS "${ARM_MCPU_FLAG}+${tag}") + check_cxx_source_runs( + "${code}" + GGML_MACHINE_SUPPORTS_${tag} + ) + if (GGML_MACHINE_SUPPORTS_${tag}) + set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+${tag}" PARENT_SCOPE) + else() + set(ARM_MCPU_FLAG_FIX "${ARM_MCPU_FLAG_FIX}+no${tag}" PARENT_SCOPE) + endif() + set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE}) + endfunction() + + check_arm_feature(dotprod "#include \nint main() { int8x16_t _a, _b; volatile int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }") + check_arm_feature(i8mm "#include \nint main() { int8x16_t _a, _b; volatile int32x4_t _s = vmmlaq_s32(_s, _a, _b); return 0; }") + check_arm_feature(sve "#include \nint main() { svfloat32_t _a, _b; volatile svfloat32_t _c = svadd_f32_z(svptrue_b8(), _a, _b); return 0; }") + + list(APPEND ARCH_FLAGS "${ARM_MCPU_FLAG}${ARM_MCPU_FLAG_FIX}") + else() + if (GGML_CPU_ARM_ARCH) + list(APPEND ARCH_FLAGS -march=${GGML_CPU_ARM_ARCH}) + endif() + endif() + + # show enabled features + if (CMAKE_HOST_SYSTEM_NAME STREQUAL "Windows") + set(FEAT_INPUT_FILE "NUL") + else() + set(FEAT_INPUT_FILE "/dev/null") + endif() + + execute_process( + COMMAND ${CMAKE_C_COMPILER} ${ARCH_FLAGS} -dM -E - + INPUT_FILE ${FEAT_INPUT_FILE} + OUTPUT_VARIABLE ARM_FEATURE + RESULT_VARIABLE ARM_FEATURE_RESULT + ) + if (ARM_FEATURE_RESULT) + message(WARNING "Failed to get ARM features") + else() + foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC) + string(FIND "${ARM_FEATURE}" "__ARM_FEATURE_${feature} 1" feature_pos) + if (NOT ${feature_pos} EQUAL -1) + message(STATUS "ARM feature ${feature} enabled") + endif() + endforeach() + endif() + endif() + elseif (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR + (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND + CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64|amd64)$")) + + message(STATUS "x86 detected") + + if (MSVC) + # instruction set detection for MSVC only + if (GGML_NATIVE) + include(ggml-cpu/cmake/FindSIMD.cmake) + endif () + if (GGML_AVX512) + list(APPEND ARCH_FLAGS /arch:AVX512) + # /arch:AVX512 includes: __AVX512F__, __AVX512CD__, __AVX512BW__, __AVX512DQ__, and __AVX512VL__ + # MSVC has no compile-time flags enabling specific + # AVX512 extensions, neither it defines the + # macros corresponding to the extensions. + # Do it manually. + list(APPEND ARCH_DEFINITIONS GGML_AVX512) + if (GGML_AVX512_VBMI) + list(APPEND ARCH_DEFINITIONS __AVX512VBMI__) + if (CMAKE_C_COMPILER_ID STREQUAL "Clang") + list(APPEND ARCH_FLAGS -mavx512vbmi) + endif() + endif() + if (GGML_AVX512_VNNI) + list(APPEND ARCH_DEFINITIONS __AVX512VNNI__ GGML_AVX512_VNNI) + if (CMAKE_C_COMPILER_ID STREQUAL "Clang") + list(APPEND ARCH_FLAGS -mavx512vnni) + endif() + endif() + if (GGML_AVX512_BF16) + list(APPEND ARCH_DEFINITIONS __AVX512BF16__ GGML_AVX512_BF16) + if (CMAKE_C_COMPILER_ID STREQUAL "Clang") + list(APPEND ARCH_FLAGS -mavx512bf16) + endif() + endif() + if (GGML_AMX_TILE) + list(APPEND ARCH_DEFINITIONS __AMX_TILE__ GGML_AMX_TILE) + endif() + if (GGML_AMX_INT8) + list(APPEND ARCH_DEFINITIONS __AMX_INT8__ GGML_AMX_INT8) + endif() + if (GGML_AMX_BF16) + list(APPEND ARCH_DEFINITIONS __AMX_BF16__ GGML_AMX_BF16) + endif() + elseif (GGML_AVX2) + list(APPEND ARCH_FLAGS /arch:AVX2) + list(APPEND ARCH_DEFINITIONS GGML_AVX2 GGML_FMA GGML_F16C) + elseif (GGML_AVX) + list(APPEND ARCH_FLAGS /arch:AVX) + list(APPEND ARCH_DEFINITIONS GGML_AVX) + else () + list(APPEND ARCH_FLAGS /arch:SSE4.2) + list(APPEND ARCH_DEFINITIONS GGML_SSE42) + endif() + if (GGML_AVX_VNNI) + list(APPEND ARCH_DEFINITIONS __AVXVNNI__ GGML_AVX_VNNI) + endif() + else () + if (GGML_NATIVE) + list(APPEND ARCH_FLAGS -march=native) + else () + list(APPEND ARCH_FLAGS -msse4.2) + list(APPEND ARCH_DEFINITIONS GGML_SSE42) + if (GGML_F16C) + list(APPEND ARCH_FLAGS -mf16c) + list(APPEND ARCH_DEFINITIONS GGML_F16C) + endif() + if (GGML_FMA) + list(APPEND ARCH_FLAGS -mfma) + list(APPEND ARCH_DEFINITIONS GGML_FMA) + endif() + if (GGML_AVX) + list(APPEND ARCH_FLAGS -mavx) + list(APPEND ARCH_DEFINITIONS GGML_AVX) + endif() + if (GGML_AVX2) + list(APPEND ARCH_FLAGS -mavx2) + list(APPEND ARCH_DEFINITIONS GGML_AVX2) + endif() + if (GGML_AVX_VNNI) + list(APPEND ARCH_FLAGS -mavxvnni) + list(APPEND ARCH_DEFINITIONS GGML_AVX_VNNI) + endif() + if (GGML_AVX512) + list(APPEND ARCH_FLAGS -mavx512f) + list(APPEND ARCH_FLAGS -mavx512cd) + list(APPEND ARCH_FLAGS -mavx512vl) + list(APPEND ARCH_FLAGS -mavx512dq) + list(APPEND ARCH_FLAGS -mavx512bw) + list(APPEND ARCH_DEFINITIONS GGML_AVX512) + endif() + if (GGML_AVX512_VBMI) + list(APPEND ARCH_FLAGS -mavx512vbmi) + list(APPEND ARCH_DEFINITIONS GGML_AVX512_VBMI) + endif() + if (GGML_AVX512_VNNI) + list(APPEND ARCH_FLAGS -mavx512vnni) + list(APPEND ARCH_DEFINITIONS GGML_AVX512_VNNI) + endif() + if (GGML_AVX512_BF16) + list(APPEND ARCH_FLAGS -mavx512bf16) + list(APPEND ARCH_DEFINITIONS GGML_AVX512_BF16) + endif() + if (GGML_AMX_TILE) + list(APPEND ARCH_FLAGS -mamx-tile) + list(APPEND ARCH_DEFINITIONS GGML_AMX_TILE) + endif() + if (GGML_AMX_INT8) + list(APPEND ARCH_FLAGS -mamx-int8) + list(APPEND ARCH_DEFINITIONS GGML_AMX_INT8) + endif() + if (GGML_AMX_BF16) + list(APPEND ARCH_FLAGS -mamx-bf16) + list(APPEND ARCH_DEFINITIONS GGML_AMX_BF16) + endif() + endif() + endif() + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64") + message(STATUS "PowerPC detected") + execute_process(COMMAND bash -c "grep POWER10 /proc/cpuinfo | head -n 1" OUTPUT_VARIABLE POWER10_M) + string(FIND "${POWER10_M}" "POWER10" substring_index) + if (NOT DEFINED substring_index OR "${substring_index}" STREQUAL "") + set(substring_index -1) + endif() + + if (${substring_index} GREATER_EQUAL 0) + list(APPEND ARCH_FLAGS -mcpu=power10) + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64le") + list(APPEND ARCH_FLAGS -mcpu=powerpc64le) + else() + list(APPEND ARCH_FLAGS -mcpu=native -mtune=native) + # TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be) + endif() + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") + message(STATUS "loongarch64 detected") + + list(APPEND ARCH_FLAGS -march=loongarch64) + if (GGML_LASX) + list(APPEND ARCH_FLAGS -mlasx) + endif() + if (GGML_LSX) + list(APPEND ARCH_FLAGS -mlsx) + endif() + elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "riscv64") + message(STATUS "RISC-V detected") + if (GGML_RVV) + list(APPEND ARCH_FLAGS -march=rv64gcv -mabi=lp64d) + endif() + else() + message(STATUS "Unknown architecture") + endif() + + if (GGML_CPU_AARCH64) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE GGML_USE_CPU_AARCH64) + endif() + + message(STATUS "Adding CPU backend variant ${GGML_CPU_NAME}: ${ARCH_FLAGS} ${ARCH_DEFINITIONS}") + target_sources(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_SOURCES}) + target_compile_options(${GGML_CPU_NAME} PRIVATE ${ARCH_FLAGS}) + target_compile_definitions(${GGML_CPU_NAME} PRIVATE ${ARCH_DEFINITIONS}) + + if (GGML_BACKEND_DL) + if (GGML_NATIVE) + # the feature check relies on ARCH_DEFINITIONS, but it is not set with GGML_NATIVE + message(FATAL_ERROR "GGML_NATIVE is not compatible with GGML_BACKEND_DL, consider using GGML_CPU_ALL_VARIANTS") + endif() + + # The feature detection code is compiled as a separate target so that + # it can be built without the architecture flags + # Since multiple variants of the CPU backend may be included in the same + # build, using set_source_files_properties() to set the arch flags is not possible + set(GGML_CPU_FEATS_NAME ${GGML_CPU_NAME}-feats) + add_library(${GGML_CPU_FEATS_NAME} OBJECT ggml-cpu/cpu-feats-x86.cpp) + target_include_directories(${GGML_CPU_FEATS_NAME} PRIVATE . .. ../include) + target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE ${ARCH_DEFINITIONS}) + target_compile_definitions(${GGML_CPU_FEATS_NAME} PRIVATE GGML_BACKEND_DL GGML_BACKEND_BUILD GGML_BACKEND_SHARED) + set_target_properties(${GGML_CPU_FEATS_NAME} PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_link_libraries(${GGML_CPU_NAME} PRIVATE ${GGML_CPU_FEATS_NAME}) + endif() + + if (EMSCRIPTEN) + set_target_properties(${GGML_CPU_NAME} PROPERTIES COMPILE_FLAGS "-msimd128") + endif() +endfunction() diff --git a/llama/amx.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.cpp similarity index 86% rename from llama/amx.cpp rename to ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.cpp index a2c7e8e5..5ec5263c 100644 --- a/llama/amx.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "amx.h" #include "common.h" #include "mmq.h" diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.h b/ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.h new file mode 100644 index 00000000..5b65d76b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/amx/amx.h @@ -0,0 +1,8 @@ +#include "ggml-backend.h" +#include "ggml-cpu-impl.h" + +// GGML internal header + +#if defined(__AMX_INT8__) && defined(__AVX512VNNI__) +ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void); +#endif diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/amx/common.h b/ml/backend/ggml/ggml/src/ggml-cpu/amx/common.h new file mode 100644 index 00000000..f392e898 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/amx/common.h @@ -0,0 +1,91 @@ +#pragma once + +#include "ggml.h" +#include "ggml-cpu-impl.h" + +#include +#include +#include + +#if defined(GGML_USE_OPENMP) +#include +#endif + +#define TILE_M 16 +#define TILE_N 16 +#define TILE_K 32 +#define VNNI_BLK 4 + +#define AMX_BLK_SIZE 32 + +#define TMM0 0 +#define TMM1 1 +#define TMM2 2 +#define TMM3 3 +#define TMM4 4 +#define TMM5 5 +#define TMM6 6 +#define TMM7 7 + +// parallel routines +template ::value, int>::type = 0> +inline T div_up(T x, T y) { return (x + y - 1) / y; } + +template +inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) { +#if 0 + // onednn partition pattern + T& n_my = n_end; + if (nth <= 1 || n == 0) { + n_start = 0; + n_my = n; + } else { + T n1 = div_up(n, nth); + T n2 = n1 - 1; + T T1 = n - n2 * nth; + n_my = ith < T1 ? n1 : n2; + n_start = ith <= T1 ? ith*n1 : T1 * n1 + (ith - T1) * n2; + } + n_end += n_start; +#else + // pytorch aten partition pattern + T n_my = div_up(n, nth); + n_start = ith * n_my; + n_end = std::min(n_start + n_my, n); +#endif +} + +template +inline void parallel_for(int n, const func_t& f) { +#if defined(GGML_USE_OPENMP) +#pragma omp parallel +{ + int nth = omp_get_num_threads(); + int ith = omp_get_thread_num(); + int tbegin, tend; + balance211(n, nth, ith, tbegin, tend); + f(tbegin, tend); +} +#else + f(0, n); +#endif +} + +template +inline void parallel_for_ggml(const ggml_compute_params * params, int n, const func_t & f) { + int tbegin, tend; + balance211(n, params->nth, params->ith, tbegin, tend); + f(tbegin, tend); +} + +// quantized types that have AMX support +inline bool qtype_has_amx_kernels(const enum ggml_type type) { + // TODO: fix padding for vnni format + return (type == GGML_TYPE_Q4_0) || + (type == GGML_TYPE_Q4_1) || + (type == GGML_TYPE_Q8_0) || + (type == GGML_TYPE_Q4_K) || + (type == GGML_TYPE_Q5_K) || + (type == GGML_TYPE_Q6_K) || + (type == GGML_TYPE_IQ4_XS); +} diff --git a/llama/mmq.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.cpp similarity index 98% rename from llama/mmq.cpp rename to ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.cpp index bb20e999..0ea91596 100644 --- a/llama/mmq.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #if defined(__GNUC__) #pragma GCC diagnostic ignored "-Wpedantic" diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.h b/ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.h new file mode 100644 index 00000000..baf76847 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/amx/mmq.h @@ -0,0 +1,10 @@ +#pragma once +#include "common.h" + +size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst); + +size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor); + +void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); + +void ggml_backend_amx_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/cpu-feats-x86.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/cpu-feats-x86.cpp new file mode 100644 index 00000000..e8133d41 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/cpu-feats-x86.cpp @@ -0,0 +1,323 @@ +#include "ggml-backend-impl.h" + +#if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64)) + +#ifdef _MSC_VER +#include +#endif + +#include +#include +#include +#include +#include + +// ref: https://cdrdv2-public.intel.com/782156/325383-sdm-vol-2abcd.pdf +struct cpuid_x86 { + bool SSE3(void) { return f_1_ecx[0]; } + bool PCLMULQDQ(void) { return f_1_ecx[1]; } + bool MONITOR(void) { return f_1_ecx[3]; } + bool SSSE3(void) { return f_1_ecx[9]; } + bool FMA(void) { return f_1_ecx[12]; } + bool CMPXCHG16B(void) { return f_1_ecx[13]; } + bool SSE41(void) { return f_1_ecx[19]; } + bool SSE42(void) { return f_1_ecx[20]; } + bool MOVBE(void) { return f_1_ecx[22]; } + bool POPCNT(void) { return f_1_ecx[23]; } + bool AES(void) { return f_1_ecx[25]; } + bool XSAVE(void) { return f_1_ecx[26]; } + bool OSXSAVE(void) { return f_1_ecx[27]; } + bool AVX(void) { return f_1_ecx[28]; } + bool F16C(void) { return f_1_ecx[29]; } + bool RDRAND(void) { return f_1_ecx[30]; } + + bool MSR(void) { return f_1_edx[5]; } + bool CX8(void) { return f_1_edx[8]; } + bool SEP(void) { return f_1_edx[11]; } + bool CMOV(void) { return f_1_edx[15]; } + bool CLFSH(void) { return f_1_edx[19]; } + bool MMX(void) { return f_1_edx[23]; } + bool FXSR(void) { return f_1_edx[24]; } + bool SSE(void) { return f_1_edx[25]; } + bool SSE2(void) { return f_1_edx[26]; } + + bool FSGSBASE(void) { return f_7_ebx[0]; } + bool BMI1(void) { return f_7_ebx[3]; } + bool HLE(void) { return is_intel && f_7_ebx[4]; } + bool AVX2(void) { return f_7_ebx[5]; } + bool BMI2(void) { return f_7_ebx[8]; } + bool ERMS(void) { return f_7_ebx[9]; } + bool INVPCID(void) { return f_7_ebx[10]; } + bool RTM(void) { return is_intel && f_7_ebx[11]; } + bool AVX512F(void) { return f_7_ebx[16]; } + bool AVX512DQ(void) { return f_7_ebx[17]; } + bool RDSEED(void) { return f_7_ebx[18]; } + bool ADX(void) { return f_7_ebx[19]; } + bool AVX512PF(void) { return f_7_ebx[26]; } + bool AVX512ER(void) { return f_7_ebx[27]; } + bool AVX512CD(void) { return f_7_ebx[28]; } + bool AVX512BW(void) { return f_7_ebx[30]; } + bool AVX512VL(void) { return f_7_ebx[31]; } + + bool SHA(void) { return f_7_ebx[29]; } + + bool PREFETCHWT1(void) { return f_7_ecx[0]; } + + bool LAHF(void) { return f_81_ecx[0]; } + bool LZCNT(void) { return is_intel && f_81_ecx[5]; } + bool ABM(void) { return is_amd && f_81_ecx[5]; } + bool SSE4a(void) { return is_amd && f_81_ecx[6]; } + bool XOP(void) { return is_amd && f_81_ecx[11]; } + bool TBM(void) { return is_amd && f_81_ecx[21]; } + + bool SYSCALL(void) { return is_intel && f_81_edx[11]; } + bool MMXEXT(void) { return is_amd && f_81_edx[22]; } + bool RDTSCP(void) { return is_intel && f_81_edx[27]; } + bool _3DNOWEXT(void) { return is_amd && f_81_edx[30]; } + bool _3DNOW(void) { return is_amd && f_81_edx[31]; } + + bool AVX512_VBMI(void) { return f_7_ecx[1]; } + bool AVX512_VNNI(void) { return f_7_ecx[11]; } + bool AVX512_FP16(void) { return f_7_edx[23]; } + bool AVX512_BF16(void) { return f_7_1_eax[5]; } + bool AVX_VNNI(void) { return f_7_1_eax[4]; } + + bool AMX_TILE(void) { return f_7_edx[24]; } + bool AMX_INT8(void) { return f_7_edx[25]; } + bool AMX_FP16(void) { return f_7_1_eax[21]; } + bool AMX_BF16(void) { return f_7_edx[22]; } + +#ifdef _MSC_VER + static void cpuid(int cpu_info[4], int eax) { + __cpuid(cpu_info, eax); + } + static void cpuidex(int cpu_info[4], int eax, int ecx) { + __cpuidex(cpu_info, eax, ecx); + } +#else + static void cpuid(int cpu_info[4], int eax) { + __asm__ __volatile__( + "cpuid" + : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(eax), "c"(0)); + } + static void cpuidex(int cpu_info[4], int eax, int ecx) { + __asm__ __volatile__( + "cpuid" + : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3]) + : "a"(eax), "c"(ecx)); + } +#endif + + cpuid_x86() { + std::array cpui; + std::vector> data; + + // calling __cpuid with 0x0 as the function_id argument + // gets the number of the highest valid function ID. + cpuid(cpui.data(), 0); + int n_ids = cpui[0]; + + for (int i = 0; i <= n_ids; ++i) { + cpuidex(cpui.data(), i, 0); + data.push_back(cpui); + } + + // capture vendor string + char vendor[0x20] = {}; + *reinterpret_cast(vendor) = data[0][1]; + *reinterpret_cast(vendor + 4) = data[0][3]; + *reinterpret_cast(vendor + 8) = data[0][2]; + this->vendor = vendor; + if (this->vendor == "GenuineIntel") { + is_intel = true; + } else if (this->vendor == "AuthenticAMD") { + is_amd = true; + } + + // load bitset with flags for function 0x00000001 + if (n_ids >= 1) { + f_1_ecx = data[1][2]; + f_1_edx = data[1][3]; + } + + // load bitset with flags for function 0x00000007 + if (n_ids >= 7) { + f_7_ebx = data[7][1]; + f_7_ecx = data[7][2]; + f_7_edx = data[7][3]; + cpuidex(cpui.data(), 7, 1); + f_7_1_eax = cpui[0]; + } + + // calling __cpuid with 0x80000000 as the function_id argument + // gets the number of the highest valid extended ID. + cpuid(cpui.data(), 0x80000000); + unsigned int n_ex_ids = cpui[0]; + + std::vector> ext_data; + for (unsigned int i = 0x80000000; i <= n_ex_ids; ++i) { + cpuidex(cpui.data(), i, 0); + ext_data.push_back(cpui); + } + + // load bitset with flags for function 0x80000001 + if (n_ex_ids >= 0x80000001) { + f_81_ecx = ext_data[1][2]; + f_81_edx = ext_data[1][3]; + } + + // interpret CPU brand string if reported + char brand[0x40] = {}; + if (n_ex_ids >= 0x80000004) { + std::memcpy(brand, ext_data[2].data(), sizeof(cpui)); + std::memcpy(brand + 16, ext_data[3].data(), sizeof(cpui)); + std::memcpy(brand + 32, ext_data[4].data(), sizeof(cpui)); + this->brand = brand; + } + } + + bool is_intel = false; + bool is_amd = false; + std::string vendor; + std::string brand; + std::bitset<32> f_1_ecx; + std::bitset<32> f_1_edx; + std::bitset<32> f_7_ebx; + std::bitset<32> f_7_ecx; + std::bitset<32> f_7_edx; + std::bitset<32> f_7_1_eax; + std::bitset<32> f_81_ecx; + std::bitset<32> f_81_edx; +}; + +#if 0 +void test_x86_is() { + cpuid_x86 is; + printf("CPU Vendor: %s\n", is.vendor.c_str()); + printf("Brand: %s\n", is.brand.c_str()); + printf("is_intel: %d\n", is.is_intel); + printf("is_amd: %d\n", is.is_amd); + printf("sse3: %d\n", is.SSE3()); + printf("pclmulqdq: %d\n", is.PCLMULQDQ()); + printf("ssse3: %d\n", is.SSSE3()); + printf("fma: %d\n", is.FMA()); + printf("cmpxchg16b: %d\n", is.CMPXCHG16B()); + printf("sse41: %d\n", is.SSE41()); + printf("sse42: %d\n", is.SSE42()); + printf("movbe: %d\n", is.MOVBE()); + printf("popcnt: %d\n", is.POPCNT()); + printf("aes: %d\n", is.AES()); + printf("xsave: %d\n", is.XSAVE()); + printf("osxsave: %d\n", is.OSXSAVE()); + printf("avx: %d\n", is.AVX()); + printf("f16c: %d\n", is.F16C()); + printf("rdrand: %d\n", is.RDRAND()); + printf("msr: %d\n", is.MSR()); + printf("cx8: %d\n", is.CX8()); + printf("sep: %d\n", is.SEP()); + printf("cmov: %d\n", is.CMOV()); + printf("clflush: %d\n", is.CLFSH()); + printf("mmx: %d\n", is.MMX()); + printf("fxsr: %d\n", is.FXSR()); + printf("sse: %d\n", is.SSE()); + printf("sse2: %d\n", is.SSE2()); + printf("fsgsbase: %d\n", is.FSGSBASE()); + printf("bmi1: %d\n", is.BMI1()); + printf("hle: %d\n", is.HLE()); + printf("avx2: %d\n", is.AVX2()); + printf("bmi2: %d\n", is.BMI2()); + printf("erms: %d\n", is.ERMS()); + printf("invpcid: %d\n", is.INVPCID()); + printf("rtm: %d\n", is.RTM()); + printf("avx512f: %d\n", is.AVX512F()); + printf("rdseed: %d\n", is.RDSEED()); + printf("adx: %d\n", is.ADX()); + printf("avx512pf: %d\n", is.AVX512PF()); + printf("avx512er: %d\n", is.AVX512ER()); + printf("avx512cd: %d\n", is.AVX512CD()); + printf("sha: %d\n", is.SHA()); + printf("prefetchwt1: %d\n", is.PREFETCHWT1()); + printf("lahf: %d\n", is.LAHF()); + printf("lzcnt: %d\n", is.LZCNT()); + printf("abm: %d\n", is.ABM()); + printf("sse4a: %d\n", is.SSE4a()); + printf("xop: %d\n", is.XOP()); + printf("tbm: %d\n", is.TBM()); + printf("syscall: %d\n", is.SYSCALL()); + printf("mmxext: %d\n", is.MMXEXT()); + printf("rdtscp: %d\n", is.RDTSCP()); + printf("3dnowext: %d\n", is._3DNOWEXT()); + printf("3dnow: %d\n", is._3DNOW()); + printf("avx512_vbmi: %d\n", is.AVX512_VBMI()); + printf("avx512_vnni: %d\n", is.AVX512_VNNI()); + printf("avx512_fp16: %d\n", is.AVX512_FP16()); + printf("avx512_bf16: %d\n", is.AVX512_BF16()); + printf("amx_tile: %d\n", is.AMX_TILE()); + printf("amx_int8: %d\n", is.AMX_INT8()); + printf("amx_fp16: %d\n", is.AMX_FP16()); + printf("amx_bf16: %d\n", is.AMX_BF16()); +} +#endif + +static int ggml_backend_cpu_x86_score() { + // FIXME: this does not check for OS support + + int score = 0; + cpuid_x86 is; + +#ifdef GGML_FMA + if (!is.FMA()) { return 0; } + score += 1; +#endif +#ifdef GGML_F16C + if (!is.F16C()) { return 0; } + score += 1<<1; +#endif +#ifdef GGML_SSE42 + if (!is.SSE42()) { return 0; } + score += 1<<2; +#endif +#ifdef GGML_AVX + if (!is.AVX()) { return 0; } + score += 1<<4; +#endif +#ifdef GGML_AVX2 + if (!is.AVX2()) { return 0; } + score += 1<<5; +#endif +#ifdef GGML_AVX_VNNI + if (!is.AVX_VNNI()) { return 0; } + score += 1<<6; +#endif +#ifdef GGML_AVX512 + if (!is.AVX512F()) { return 0; } + if (!is.AVX512CD()) { return 0; } + if (!is.AVX512VL()) { return 0; } + if (!is.AVX512DQ()) { return 0; } + if (!is.AVX512BW()) { return 0; } + score += 1<<7; +#endif +#ifdef GGML_AVX512_VBMI + if (!is.AVX512_VBMI()) { return 0; } + score += 1<<8; +#endif +#ifdef GGML_AVX512_BF16 + if (!is.AVX512_BF16()) { return 0; } + score += 1<<9; +#endif +#ifdef GGML_AVX512_VNNI + if (!is.AVX512_VNNI()) { return 0; } + score += 1<<10; +#endif +#ifdef GGML_AMX_INT8 + if (!is.AMX_INT8()) { return 0; } + score += 1<<11; +#endif + + return score; +} + +GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_x86_score) + +#endif // defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64)) diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go b/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go new file mode 100644 index 00000000..f0bb54c2 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go @@ -0,0 +1,11 @@ +package cpu + +// #cgo CFLAGS: -Wno-implicit-function-declaration +// #cgo CXXFLAGS: -std=c++17 +// #cgo CPPFLAGS: -I${SRCDIR}/amx -I${SRCDIR}/llamafile -I${SRCDIR}/.. -I${SRCDIR}/../../include +// #cgo CPPFLAGS: -DGGML_USE_LLAMAFILE +// #cgo linux CPPFLAGS: -D_GNU_SOURCE +// #cgo darwin,arm64 CPPFLAGS: -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 +// #cgo darwin,arm64 LDFLAGS: -framework Accelerate +import "C" +import _ "github.com/ollama/ollama/ml/backend/ggml/ggml/src/ggml-cpu/llamafile" diff --git a/llama/ggml-cpu-aarch64.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp similarity index 99% rename from llama/ggml-cpu-aarch64.cpp rename to ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp index 0989fb20..622c63f1 100644 --- a/llama/ggml-cpu-aarch64.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #define GGML_COMMON_IMPL_CPP #define GGML_COMMON_DECL_CPP #include "ggml-common.h" diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.h b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.h new file mode 100644 index 00000000..6e84c826 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-aarch64.h @@ -0,0 +1,8 @@ +#pragma once + +#include "ggml-cpu-traits.h" +#include "ggml.h" + +// GGML internal header + +ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void); diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp new file mode 100644 index 00000000..fa8dea2a --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.cpp @@ -0,0 +1,55 @@ +#ifdef GGML_USE_CPU_HBM + +#include "ggml-backend.h" +#include "ggml-backend-impl.h" +#include "ggml-cpu.h" +#include "ggml-impl.h" + +#include "ggml-cpu-hbm.h" + +// buffer type HBM + +#include + +static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return "CPU_HBM"; + + GGML_UNUSED(buft); +} + +static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) { + hbw_free(buffer->context); +} + +static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, + size_t size) { + void * ptr; + int result = hbw_posix_memalign(&ptr, ggml_backend_cpu_buffer_type_get_alignment(buft), size); + if (result != 0) { + GGML_LOG_ERROR("failed to allocate HBM buffer of size %zu\n", size); + return NULL; + } + + ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); + buffer->buft = buft; + buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer; + + return buffer; +} + +ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) { + static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = { + /* .iface = */ { + /* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name, + /* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, + /* .get_max_size = */ nullptr, // defaults to SIZE_MAX + /* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes + /* .is_host = */ ggml_backend_cpu_buffer_type_is_host, + }, + /* .context = */ nullptr, + }; + + return &ggml_backend_cpu_buffer_type_hbm; +} +#endif diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.h b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.h new file mode 100644 index 00000000..09a1f09d --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-hbm.h @@ -0,0 +1,8 @@ +#pragma once + +#include "ggml-backend.h" +#include "ggml.h" + +// GGML CPU internal header + +ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void); diff --git a/llama/ggml-cpu-impl.h b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-impl.h similarity index 87% rename from llama/ggml-cpu-impl.h rename to ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-impl.h index 54dc108c..d71076ad 100644 --- a/llama/ggml-cpu-impl.h +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once // GGML CPU internal header diff --git a/llama/ggml-cpu-quants.c b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.c similarity index 99% rename from llama/ggml-cpu-quants.c rename to ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.c index a8288dec..8e147226 100644 --- a/llama/ggml-cpu-quants.c +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.c @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #define GGML_COMMON_IMPL_C #include "ggml-common.h" diff --git a/llama/ggml-cpu-quants.h b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.h similarity index 80% rename from llama/ggml-cpu-quants.h rename to ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.h index e2cdf03e..e33d9d47 100644 --- a/llama/ggml-cpu-quants.h +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-quants.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #define GGML_COMMON_DECL_C diff --git a/llama/ggml-cpu-traits.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.cpp similarity index 50% rename from llama/ggml-cpu-traits.cpp rename to ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.cpp index 6d7ca024..62a0712d 100644 --- a/llama/ggml-cpu-traits.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.cpp @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "ggml-cpu-traits.h" #include "ggml-backend-impl.h" diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.h b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.h new file mode 100644 index 00000000..99a6186b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu-traits.h @@ -0,0 +1,38 @@ +#pragma once +#include "ggml-backend-impl.h" +#include "ggml-cpu-impl.h" +#include "ggml.h" + +#ifdef __cplusplus +# include +extern "C" { +#endif + +// return true if op part of extra "accelerator" +bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op); +bool ggml_cpu_extra_work_size(int n_threads, const struct ggml_tensor * op, size_t * size); + +#ifdef __cplusplus +} + +namespace ggml::cpu { +// register in tensor->extra +class tensor_traits { + public: + virtual ~tensor_traits(); + virtual bool work_size(int n_threads, const struct ggml_tensor * op, size_t & size) = 0; + virtual bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) = 0; +}; + +class extra_buffer_type { + public: + virtual ~extra_buffer_type(); + virtual bool supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) = 0; + virtual tensor_traits * get_tensor_traits(const struct ggml_tensor * op) = 0; +}; +} // namespace ggml::cpu + +// implemented in ggml-cpu.cpp. +std::vector & ggml_backend_cpu_get_extra_buffers_type(); + +#endif diff --git a/llama/ggml-cpu.c b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.c similarity index 99% rename from llama/ggml-cpu.c rename to ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.c index 272f03e3..b307d554 100644 --- a/llama/ggml-cpu.c +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.c @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #define _CRT_SECURE_NO_DEPRECATE // Disables "unsafe" warnings on Windows #define _USE_MATH_DEFINES // For M_PI on MSVC @@ -36,7 +10,7 @@ #include "ggml-quants.h" #include "ggml-cpu-quants.h" #include "ggml-threading.h" -#include "amx.h" +#include "amx/amx.h" #include "ggml.h" #if defined(_MSC_VER) || defined(__MINGW32__) diff --git a/llama/ggml-cpu.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.cpp similarity index 94% rename from llama/ggml-cpu.cpp rename to ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.cpp index 38395101..f11399cc 100644 --- a/llama/ggml-cpu.cpp +++ b/ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.cpp @@ -1,36 +1,11 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "ggml-backend.h" #include "ggml-backend-impl.h" #include "ggml-cpu.h" #include "ggml-cpu-aarch64.h" #include "ggml-cpu-traits.h" #include "ggml-impl.h" -#include "amx.h" +#include "amx/amx.h" + #include #include #include diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go new file mode 100644 index 00000000..09b002ce --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go @@ -0,0 +1,5 @@ +package llamafile + +// #cgo CXXFLAGS: -std=c++17 +// #cgo CPPFLAGS: -I${SRCDIR}/.. -I${SRCDIR}/../.. -I${SRCDIR}/../../../include +import "C" diff --git a/llama/sgemm.cpp b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.cpp similarity index 100% rename from llama/sgemm.cpp rename to ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.cpp diff --git a/llama/llamafile/sgemm.h b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.h similarity index 100% rename from llama/llamafile/sgemm.h rename to ml/backend/ggml/ggml/src/ggml-cpu/llamafile/sgemm.h diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt new file mode 100644 index 00000000..14761650 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/CMakeLists.txt @@ -0,0 +1,152 @@ +cmake_minimum_required(VERSION 3.18) # for CMAKE_CUDA_ARCHITECTURES + +find_package(CUDAToolkit) + +if (CUDAToolkit_FOUND) + message(STATUS "CUDA Toolkit found") + + if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES) + # native == GPUs available at build time + # 52 == Maxwell, lowest CUDA 12 standard + # 60 == P100, FP16 CUDA intrinsics + # 61 == Pascal, __dp4a instruction (per-byte integer dot product) + # 70 == V100, FP16 tensor cores + # 75 == Turing, int8 tensor cores + if (GGML_NATIVE AND CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.6" AND CMAKE_VERSION VERSION_GREATER_EQUAL "3.24") + set(CMAKE_CUDA_ARCHITECTURES "native") + elseif(GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16) + set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75") + else() + set(CMAKE_CUDA_ARCHITECTURES "52;61;70;75") + endif() + endif() + message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}") + + enable_language(CUDA) + + file(GLOB GGML_HEADERS_CUDA "*.cuh") + list(APPEND GGML_HEADERS_CUDA "../../include/ggml-cuda.h") + + file(GLOB GGML_SOURCES_CUDA "*.cu") + file(GLOB SRCS "template-instances/fattn-wmma*.cu") + list(APPEND GGML_SOURCES_CUDA ${SRCS}) + file(GLOB SRCS "template-instances/mmq*.cu") + list(APPEND GGML_SOURCES_CUDA ${SRCS}) + + if (GGML_CUDA_FA_ALL_QUANTS) + file(GLOB SRCS "template-instances/fattn-vec*.cu") + list(APPEND GGML_SOURCES_CUDA ${SRCS}) + add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS) + else() + file(GLOB SRCS "template-instances/fattn-vec*q4_0-q4_0.cu") + list(APPEND GGML_SOURCES_CUDA ${SRCS}) + file(GLOB SRCS "template-instances/fattn-vec*q8_0-q8_0.cu") + list(APPEND GGML_SOURCES_CUDA ${SRCS}) + file(GLOB SRCS "template-instances/fattn-vec*f16-f16.cu") + list(APPEND GGML_SOURCES_CUDA ${SRCS}) + endif() + + ggml_add_backend_library(ggml-cuda + ${GGML_HEADERS_CUDA} + ${GGML_SOURCES_CUDA} + ) + + add_compile_definitions(GGML_CUDA_PEER_MAX_BATCH_SIZE=${GGML_CUDA_PEER_MAX_BATCH_SIZE}) + + if (GGML_CUDA_GRAPHS) + add_compile_definitions(GGML_CUDA_USE_GRAPHS) + endif() + + if (GGML_CUDA_FORCE_MMQ) + add_compile_definitions(GGML_CUDA_FORCE_MMQ) + endif() + + if (GGML_CUDA_FORCE_CUBLAS) + add_compile_definitions(GGML_CUDA_FORCE_CUBLAS) + endif() + + if (GGML_CUDA_NO_VMM) + add_compile_definitions(GGML_CUDA_NO_VMM) + endif() + + if (GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16) + add_compile_definitions(GGML_CUDA_F16) + endif() + + if (GGML_CUDA_NO_PEER_COPY) + add_compile_definitions(GGML_CUDA_NO_PEER_COPY) + endif() + + if (GGML_STATIC) + if (WIN32) + # As of 12.3.1 CUDA Toolkit for Windows does not offer a static cublas library + target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas CUDA::cublasLt) + else () + target_link_libraries(ggml-cuda PRIVATE CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static) + endif() + else() + target_link_libraries(ggml-cuda PRIVATE CUDA::cudart CUDA::cublas CUDA::cublasLt) + endif() + + if (GGML_CUDA_NO_VMM) + # No VMM requested, no need to link directly with the cuda driver lib (libcuda.so) + else() + target_link_libraries(ggml-cuda PRIVATE CUDA::cuda_driver) + endif() + + set(CUDA_CXX_FLAGS "") + + set(CUDA_FLAGS -use_fast_math) + + if (GGML_FATAL_WARNINGS) + list(APPEND CUDA_FLAGS -Werror all-warnings) + endif() + + if (GGML_ALL_WARNINGS AND NOT MSVC) + set(NVCC_CMD ${CMAKE_CUDA_COMPILER} .c) + if (NOT CMAKE_CUDA_HOST_COMPILER STREQUAL "") + list(APPEND NVCC_CMD -ccbin ${CMAKE_CUDA_HOST_COMPILER}) + endif() + + execute_process( + COMMAND ${NVCC_CMD} -Xcompiler --version + OUTPUT_VARIABLE CUDA_CCFULLVER + ERROR_QUIET + ) + + if (NOT CUDA_CCFULLVER MATCHES clang) + set(CUDA_CCID "GNU") + execute_process( + COMMAND ${NVCC_CMD} -Xcompiler "-dumpfullversion -dumpversion" + OUTPUT_VARIABLE CUDA_CCVER + ERROR_QUIET + ) + else() + if (CUDA_CCFULLVER MATCHES Apple) + set(CUDA_CCID "AppleClang") + else() + set(CUDA_CCID "Clang") + endif() + string(REGEX REPLACE "^.* version ([0-9.]*).*$" "\\1" CUDA_CCVER ${CUDA_CCFULLVER}) + endif() + + message("-- CUDA host compiler is ${CUDA_CCID} ${CUDA_CCVER}") + + ggml_get_flags(${CUDA_CCID} ${CUDA_CCVER}) + list(APPEND CUDA_CXX_FLAGS ${CXX_FLAGS} ${GF_CXX_FLAGS}) # This is passed to -Xcompiler later + endif() + + if (NOT MSVC) + list(APPEND CUDA_CXX_FLAGS -Wno-pedantic) + endif() + + list(JOIN CUDA_CXX_FLAGS " " CUDA_CXX_FLAGS_JOINED) # pass host compiler flags as a single argument + + if (NOT CUDA_CXX_FLAGS_JOINED STREQUAL "") + list(APPEND CUDA_FLAGS -Xcompiler ${CUDA_CXX_FLAGS_JOINED}) + endif() + + target_compile_options(ggml-cuda PRIVATE "$<$:${CUDA_FLAGS}>") +else() + message(FATAL_ERROR "CUDA Toolkit not found") +endif() diff --git a/llama/ggml-cuda/acc.cu b/ml/backend/ggml/ggml/src/ggml-cuda/acc.cu similarity index 61% rename from llama/ggml-cuda/acc.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/acc.cu index 9ce47e60..96bfe1c9 100644 --- a/llama/ggml-cuda/acc.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/acc.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "acc.cuh" static __global__ void acc_f32(const float * x, const float * y, float * dst, const int ne, diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/acc.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/acc.cuh new file mode 100644 index 00000000..1168ea1b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/acc.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_ACC_BLOCK_SIZE 256 + +void ggml_cuda_op_acc(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/arange.cu b/ml/backend/ggml/ggml/src/ggml-cuda/arange.cu new file mode 100644 index 00000000..b5e495a2 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/arange.cu @@ -0,0 +1,34 @@ +#include "arange.cuh" + +static __global__ void arange_f32(float * dst, const int ne0, const float start, const float step) { + // blockIDx.x: idx of ne0 / BLOCK_SIZE + int nidx = threadIdx.x + blockIdx.x * blockDim.x; + if (nidx >= ne0) { + return; + } + dst[nidx] = start + step * nidx; +} + +static void arange_f32_cuda(float * dst, const int ne0, const float start, const float step, cudaStream_t stream) { + int num_blocks = (ne0 + CUDA_ARANGE_BLOCK_SIZE - 1) / CUDA_ARANGE_BLOCK_SIZE; + arange_f32<<>>(dst, ne0, start, step); +} + +void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + float start; + float stop; + float step; + memcpy(&start, (float *)dst->op_params + 0, sizeof(float)); + memcpy(&stop, (float *)dst->op_params + 1, sizeof(float)); + memcpy(&step, (float *)dst->op_params + 2, sizeof(float)); + + int64_t steps = (int64_t)ceil((stop - start) / step); + GGML_ASSERT(ggml_nelements(dst) == steps); + + arange_f32_cuda(dst_d, dst->ne[0], start, step, stream); +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/arange.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/arange.cuh new file mode 100644 index 00000000..41e74fdf --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/arange.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_ARANGE_BLOCK_SIZE 256 + +void ggml_cuda_op_arange(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/argmax.cu b/ml/backend/ggml/ggml/src/ggml-cuda/argmax.cu similarity index 69% rename from llama/ggml-cuda/argmax.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/argmax.cu index 8bbfd7c0..5340eedc 100644 --- a/llama/ggml-cuda/argmax.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/argmax.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include #include diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/argmax.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/argmax.cuh new file mode 100644 index 00000000..5b7223ad --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/argmax.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_argmax(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/argsort.cu b/ml/backend/ggml/ggml/src/ggml-cuda/argsort.cu similarity index 73% rename from llama/ggml-cuda/argsort.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/argsort.cu index d9aaaa13..607ded85 100644 --- a/llama/ggml-cuda/argsort.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/argsort.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "argsort.cuh" template diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/argsort.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/argsort.cuh new file mode 100644 index 00000000..68a00154 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/argsort.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_op_argsort(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/binbcast.cu b/ml/backend/ggml/ggml/src/ggml-cuda/binbcast.cu similarity index 91% rename from llama/ggml-cuda/binbcast.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/binbcast.cu index 40b9fcbe..c7b6be4e 100644 --- a/llama/ggml-cuda/binbcast.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/binbcast.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "binbcast.cuh" #include diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/binbcast.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/binbcast.cuh new file mode 100644 index 00000000..3ac1c9b0 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/binbcast.cuh @@ -0,0 +1,9 @@ +#include "common.cuh" + +void ggml_cuda_op_repeat(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_sub(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_mul(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_div(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_repeat_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/clamp.cu b/ml/backend/ggml/ggml/src/ggml-cuda/clamp.cu new file mode 100644 index 00000000..8009a3e3 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/clamp.cu @@ -0,0 +1,34 @@ +#include "clamp.cuh" + +static __global__ void clamp_f32(const float * x, float * dst, const float min, const float max, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]); +} + +static void clamp_f32_cuda(const float * x, float * dst, const float min, const float max, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_CLAMP_BLOCK_SIZE - 1) / CUDA_CLAMP_BLOCK_SIZE; + clamp_f32<<>>(x, dst, min, max, k); +} + + +void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + float min; + float max; + memcpy(&min, dst->op_params, sizeof(float)); + memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); + + clamp_f32_cuda(src0_d, dst_d, min, max, ggml_nelements(src0), stream); +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/clamp.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/clamp.cuh new file mode 100644 index 00000000..7f9559dd --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/clamp.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_CLAMP_BLOCK_SIZE 256 + +void ggml_cuda_op_clamp(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/common.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/common.cuh similarity index 94% rename from llama/ggml-cuda/common.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/common.cuh index 2a40b849..2c0a5622 100644 --- a/llama/ggml-cuda/common.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/common.cuh @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "ggml.h" diff --git a/llama/ggml-cuda/concat.cu b/ml/backend/ggml/ggml/src/ggml-cuda/concat.cu similarity index 85% rename from llama/ggml-cuda/concat.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/concat.cu index d8c47391..5eb9f08d 100644 --- a/llama/ggml-cuda/concat.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/concat.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "concat.cuh" // contiguous kernels diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/concat.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/concat.cuh new file mode 100644 index 00000000..aa506a05 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/concat.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_CONCAT_BLOCK_SIZE 256 + +void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/conv-transpose-1d.cu b/ml/backend/ggml/ggml/src/ggml-cuda/conv-transpose-1d.cu similarity index 72% rename from llama/ggml-cuda/conv-transpose-1d.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/conv-transpose-1d.cu index da53e946..b1e94d6f 100644 --- a/llama/ggml-cuda/conv-transpose-1d.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/conv-transpose-1d.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "conv-transpose-1d.cuh" static __global__ void conv_transpose_1d_kernel( diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/conv-transpose-1d.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/conv-transpose-1d.cuh new file mode 100644 index 00000000..6c2cf666 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/conv-transpose-1d.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE 256 + +void ggml_cuda_op_conv_transpose_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/convert.cu b/ml/backend/ggml/ggml/src/ggml-cuda/convert.cu similarity index 95% rename from llama/ggml-cuda/convert.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/convert.cu index 6ddb87fc..5b0dface 100644 --- a/llama/ggml-cuda/convert.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/convert.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "convert.cuh" #include "dequantize.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/convert.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/convert.cuh new file mode 100644 index 00000000..5394be9f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/convert.cuh @@ -0,0 +1,13 @@ +#include "common.cuh" + +#define CUDA_DEQUANTIZE_BLOCK_SIZE 256 + +template +using to_t_cuda_t = void (*)(const void * __restrict__ x, T * __restrict__ y, int64_t k, cudaStream_t stream); + +typedef to_t_cuda_t to_fp32_cuda_t; +typedef to_t_cuda_t to_fp16_cuda_t; + +to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type); + +to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type); diff --git a/llama/ggml-cuda/count-equal.cu b/ml/backend/ggml/ggml/src/ggml-cuda/count-equal.cu similarity index 62% rename from llama/ggml-cuda/count-equal.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/count-equal.cu index e4496fc1..08898115 100644 --- a/llama/ggml-cuda/count-equal.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/count-equal.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "count-equal.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/count-equal.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/count-equal.cuh new file mode 100644 index 00000000..8467da79 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/count-equal.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_COUNT_EQUAL_CHUNK_SIZE 128 + +void ggml_cuda_count_equal(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/cpy.cu b/ml/backend/ggml/ggml/src/ggml-cuda/cpy.cu similarity index 94% rename from llama/ggml-cuda/cpy.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/cpy.cu index ffdef8c4..54c0f66d 100644 --- a/llama/ggml-cuda/cpy.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/cpy.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "cpy.cuh" typedef void (*cpy_kernel_t)(const char * cx, char * cdst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/cpy.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/cpy.cuh new file mode 100644 index 00000000..28b06cdd --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/cpy.cuh @@ -0,0 +1,9 @@ +#include "common.cuh" + +#define CUDA_CPY_BLOCK_SIZE 64 + +void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1); + +void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1); diff --git a/llama/ggml-cuda/cross-entropy-loss.cu b/ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cu similarity index 82% rename from llama/ggml-cuda/cross-entropy-loss.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cu index 5bfddc79..ed09406a 100644 --- a/llama/ggml-cuda/cross-entropy-loss.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "cross-entropy-loss.cuh" #include "sum.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cuh new file mode 100644 index 00000000..9ec7152f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/cross-entropy-loss.cuh @@ -0,0 +1,7 @@ +#include "common.cuh" + +#define CUDA_CROSS_ENTROPY_LOSS_BLOCK_SIZE 256 + +void ggml_cuda_cross_entropy_loss(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_cross_entropy_loss_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/dequantize.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/dequantize.cuh similarity index 68% rename from llama/ggml-cuda/dequantize.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/dequantize.cuh index 016de0db..bd3c2d9d 100644 --- a/llama/ggml-cuda/dequantize.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/dequantize.cuh @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){ diff --git a/llama/ggml-cuda/diagmask.cu b/ml/backend/ggml/ggml/src/ggml-cuda/diagmask.cu similarity index 58% rename from llama/ggml-cuda/diagmask.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/diagmask.cu index e80a953a..4b713ba2 100644 --- a/llama/ggml-cuda/diagmask.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/diagmask.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "diagmask.cuh" static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past) { diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/diagmask.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/diagmask.cuh new file mode 100644 index 00000000..6cdbef17 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/diagmask.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32 + +void ggml_cuda_op_diag_mask_inf(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/fattn-common.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-common.cuh similarity index 95% rename from llama/ggml-cuda/fattn-common.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/fattn-common.cuh index 011654d3..ee9752da 100644 --- a/llama/ggml-cuda/fattn-common.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-common.cuh @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "common.cuh" diff --git a/llama/ggml-cuda/fattn-tile-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cu similarity index 91% rename from llama/ggml-cuda/fattn-tile-f16.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cu index 72d265ef..4d314dac 100644 --- a/llama/ggml-cuda/fattn-tile-f16.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "fattn-common.cuh" #include "fattn-tile-f16.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cuh new file mode 100644 index 00000000..ffc58784 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f16.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_flash_attn_ext_tile_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/fattn-tile-f32.cu b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cu similarity index 91% rename from llama/ggml-cuda/fattn-tile-f32.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cu index 3be1c7a6..bb336044 100644 --- a/llama/ggml-cuda/fattn-tile-f32.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "fattn-common.cuh" #include "fattn-tile-f32.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cuh new file mode 100644 index 00000000..b1c546c8 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-tile-f32.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_flash_attn_ext_tile_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/fattn-vec-f16.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f16.cuh similarity index 93% rename from llama/ggml-cuda/fattn-vec-f16.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f16.cuh index 334a05c3..34a2992c 100644 --- a/llama/ggml-cuda/fattn-vec-f16.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f16.cuh @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "fattn-common.cuh" diff --git a/llama/ggml-cuda/fattn-vec-f32.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f32.cuh similarity index 92% rename from llama/ggml-cuda/fattn-vec-f32.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f32.cuh index 0bb23000..a28fc8b7 100644 --- a/llama/ggml-cuda/fattn-vec-f32.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-vec-f32.cuh @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "fattn-common.cuh" diff --git a/llama/ggml-cuda/fattn-wmma-f16.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-wmma-f16.cuh similarity index 94% rename from llama/ggml-cuda/fattn-wmma-f16.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/fattn-wmma-f16.cuh index d82984f4..860d0e6d 100644 --- a/llama/ggml-cuda/fattn-wmma-f16.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn-wmma-f16.cuh @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "fattn-common.cuh" diff --git a/llama/ggml-cuda/fattn.cu b/ml/backend/ggml/ggml/src/ggml-cuda/fattn.cu similarity index 92% rename from llama/ggml-cuda/fattn.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/fattn.cu index 4828e9d8..0b26b0f8 100644 --- a/llama/ggml-cuda/fattn.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "fattn-common.cuh" #include "fattn-tile-f16.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/fattn.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/fattn.cuh new file mode 100644 index 00000000..ad3ca7a8 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/fattn.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/getrows.cu b/ml/backend/ggml/ggml/src/ggml-cuda/getrows.cu similarity index 84% rename from llama/ggml-cuda/getrows.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/getrows.cu index 6cf1e516..4c370323 100644 --- a/llama/ggml-cuda/getrows.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/getrows.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "getrows.cuh" #include "dequantize.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/getrows.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/getrows.cuh new file mode 100644 index 00000000..bbf13023 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/getrows.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_GET_ROWS_BLOCK_SIZE 256 + +void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/ggml-cuda.cu b/ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu similarity index 98% rename from llama/ggml-cuda/ggml-cuda.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu index 0894fdad..9286f866 100644 --- a/llama/ggml-cuda/ggml-cuda.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "ggml-cuda.h" #include "ggml-impl.h" #include "ggml-backend-impl.h" @@ -450,10 +424,7 @@ struct ggml_backend_cuda_buffer_context { static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; delete ctx; - - // TODO: this needs to be freed in cuda and hipblas backends because - // the cuda backend implementation compiled with msvc - free(buffer); + delete buffer; } static bool ggml_backend_buffer_is_cuda(ggml_backend_buffer_t buffer) { diff --git a/llama/ggml-cuda/im2col.cu b/ml/backend/ggml/ggml/src/ggml-cuda/im2col.cu similarity index 78% rename from llama/ggml-cuda/im2col.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/im2col.cu index 0ceaa02c..86a54e42 100644 --- a/llama/ggml-cuda/im2col.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/im2col.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "im2col.cuh" template diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/im2col.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/im2col.cuh new file mode 100644 index 00000000..1ce8fae4 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/im2col.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_IM2COL_BLOCK_SIZE 256 + +void ggml_cuda_op_im2col(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/mma.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/mma.cuh similarity index 86% rename from llama/ggml-cuda/mma.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/mma.cuh index 557cdcd1..7d11540a 100644 --- a/llama/ggml-cuda/mma.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mma.cuh @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" struct mma_int_A_I16K4 { diff --git a/llama/ggml-cuda/mmq.cu b/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cu similarity index 80% rename from llama/ggml-cuda/mmq.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/mmq.cu index 0dc63b31..270251df 100644 --- a/llama/ggml-cuda/mmq.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "mmq.cuh" void ggml_cuda_op_mul_mat_q( diff --git a/llama/ggml-cuda/mmq.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cuh similarity index 98% rename from llama/ggml-cuda/mmq.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/mmq.cuh index 1da4680a..3cd508a1 100644 --- a/llama/ggml-cuda/mmq.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmq.cuh @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include "common.cuh" diff --git a/llama/ggml-cuda/mmv.cu b/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cu similarity index 89% rename from llama/ggml-cuda/mmv.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/mmv.cu index 37559c74..ac45f2d1 100644 --- a/llama/ggml-cuda/mmv.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "mmv.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cuh new file mode 100644 index 00000000..78a1cd4a --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmv.cuh @@ -0,0 +1,12 @@ +#include "common.cuh" + +// maximum number of src0 rows with which to use mul_mat_vec over cuBLAS if FP16 tensor cores are available +#define MMV_MAX_ROWS 512 + +void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); + +void ggml_cuda_op_mul_mat_vec( + ggml_backend_cuda_context & ctx, + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, + const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, + const int64_t src1_padded_row_size, cudaStream_t stream); diff --git a/llama/ggml-cuda/mmvq.cu b/ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cu similarity index 93% rename from llama/ggml-cuda/mmvq.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cu index 19ea9aa9..e3b912d8 100644 --- a/llama/ggml-cuda/mmvq.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "mmvq.cuh" #include "vecdotq.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cuh new file mode 100644 index 00000000..d9e42fdd --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/mmvq.cuh @@ -0,0 +1,9 @@ +#include "common.cuh" + +#define MMVQ_MAX_BATCH_SIZE 8 // Max. batch size for which to use MMVQ kernels. + +void ggml_cuda_op_mul_mat_vec_q( + ggml_backend_cuda_context & ctx, + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, + const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, + const int64_t src1_padded_row_size, cudaStream_t stream); diff --git a/llama/ggml-cuda/norm.cu b/ml/backend/ggml/ggml/src/ggml-cuda/norm.cu similarity index 85% rename from llama/ggml-cuda/norm.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/norm.cu index 6bc05ff7..133e219f 100644 --- a/llama/ggml-cuda/norm.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/norm.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "norm.cuh" template diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/norm.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/norm.cuh new file mode 100644 index 00000000..431a8f74 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/norm.cuh @@ -0,0 +1,7 @@ +#include "common.cuh" + +void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); + +void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/opt-step-adamw.cu b/ml/backend/ggml/ggml/src/ggml-cuda/opt-step-adamw.cu similarity index 70% rename from llama/ggml-cuda/opt-step-adamw.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/opt-step-adamw.cu index 4bde5c59..35154f29 100644 --- a/llama/ggml-cuda/opt-step-adamw.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/opt-step-adamw.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "ggml-impl.h" #include "opt-step-adamw.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/opt-step-adamw.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/opt-step-adamw.cuh new file mode 100644 index 00000000..58d6f6e5 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/opt-step-adamw.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_OPT_STEP_ADAMW_BLOCK_SIZE 256 + +void ggml_cuda_opt_step_adamw(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/out-prod.cu b/ml/backend/ggml/ggml/src/ggml-cuda/out-prod.cu similarity index 57% rename from llama/ggml-cuda/out-prod.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/out-prod.cu index fb2cc383..619cfdcb 100644 --- a/llama/ggml-cuda/out-prod.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/out-prod.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "out-prod.cuh" #include diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/out-prod.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/out-prod.cuh new file mode 100644 index 00000000..a0046f5f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/out-prod.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_out_prod(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/pad.cu b/ml/backend/ggml/ggml/src/ggml-cuda/pad.cu similarity index 74% rename from llama/ggml-cuda/pad.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/pad.cu index aa61c0ad..39fd4b16 100644 --- a/llama/ggml-cuda/pad.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/pad.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "pad.cuh" static __global__ void pad_f32(const float * x, float * dst, const int ne0, const int ne00, const int ne01, const int ne02, const int ne03) { diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/pad.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/pad.cuh new file mode 100644 index 00000000..e2ededc3 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/pad.cuh @@ -0,0 +1,6 @@ +#include "common.cuh" + +#define CUDA_PAD_BLOCK_SIZE 256 + +void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst); +void ggml_cuda_op_unpad(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/pool2d.cu b/ml/backend/ggml/ggml/src/ggml-cuda/pool2d.cu similarity index 72% rename from llama/ggml-cuda/pool2d.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/pool2d.cu index adbf1b55..c6d51e4d 100644 --- a/llama/ggml-cuda/pool2d.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/pool2d.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "pool2d.cuh" template diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/pool2d.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/pool2d.cuh new file mode 100644 index 00000000..7841292b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/pool2d.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_POOL2D_BLOCK_SIZE 256 + +void ggml_cuda_op_pool2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/quantize.cu b/ml/backend/ggml/ggml/src/ggml-cuda/quantize.cu similarity index 81% rename from llama/ggml-cuda/quantize.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/quantize.cu index 60341bee..1702e4ce 100644 --- a/llama/ggml-cuda/quantize.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/quantize.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "quantize.cuh" #include diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/quantize.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/quantize.cuh new file mode 100644 index 00000000..03bf322b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/quantize.cuh @@ -0,0 +1,24 @@ +#pragma once + +#include "common.cuh" +#include "mmq.cuh" + +#include + +#define CUDA_QUANTIZE_BLOCK_SIZE 256 +#define CUDA_QUANTIZE_BLOCK_SIZE_MMQ 128 + +static_assert(MATRIX_ROW_PADDING % CUDA_QUANTIZE_BLOCK_SIZE == 0, "Risk of out-of-bounds access."); +static_assert(MATRIX_ROW_PADDING % (4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ) == 0, "Risk of out-of-bounds access."); + +typedef void (*quantize_cuda_t)( + const float * x, void * vy, const int64_t kx0, const int64_t kx1, const int64_t channels, const int64_t kx0_padded, + const ggml_type type_x, cudaStream_t stream); + +void quantize_row_q8_1_cuda( + const float * x, void * vy, const int64_t kx0, const int64_t kx1, const int64_t channels, const int64_t kx0_padded, + const ggml_type type_x, cudaStream_t stream); + +void quantize_mmq_q8_1_cuda( + const float * x, void * vy, const int64_t kx0, const int64_t kx1, const int64_t channels, const int64_t kx0_padded, + const ggml_type type_x, cudaStream_t stream); diff --git a/llama/ggml-cuda/rope.cu b/ml/backend/ggml/ggml/src/ggml-cuda/rope.cu similarity index 94% rename from llama/ggml-cuda/rope.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/rope.cu index fc9f6f2f..2c84778d 100644 --- a/llama/ggml-cuda/rope.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/rope.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "rope.cuh" struct rope_corr_dims { diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/rope.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/rope.cuh new file mode 100644 index 00000000..0f787a0b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/rope.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_ROPE_BLOCK_SIZE 256 + +void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/scale.cu b/ml/backend/ggml/ggml/src/ggml-cuda/scale.cu new file mode 100644 index 00000000..1405e066 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/scale.cu @@ -0,0 +1,31 @@ +#include "scale.cuh" + +static __global__ void scale_f32(const float * x, float * dst, const float scale, const int k) { + const int i = blockDim.x*blockIdx.x + threadIdx.x; + + if (i >= k) { + return; + } + + dst[i] = scale * x[i]; +} + +static void scale_f32_cuda(const float * x, float * dst, const float scale, const int k, cudaStream_t stream) { + const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE; + scale_f32<<>>(x, dst, scale, k); +} + +void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + + float scale; + memcpy(&scale, dst->op_params, sizeof(float)); + + scale_f32_cuda(src0_d, dst_d, scale, ggml_nelements(src0), stream); +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/scale.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/scale.cuh new file mode 100644 index 00000000..8ff75c82 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/scale.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_SCALE_BLOCK_SIZE 256 + +void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/softmax.cu b/ml/backend/ggml/ggml/src/ggml-cuda/softmax.cu similarity index 86% rename from llama/ggml-cuda/softmax.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/softmax.cu index 52aad62f..c24abae1 100644 --- a/llama/ggml-cuda/softmax.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/softmax.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "softmax.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/softmax.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/softmax.cuh new file mode 100644 index 00000000..4ef4ff86 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/softmax.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_SOFT_MAX_BLOCK_SIZE 1024 + +void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/sum.cu b/ml/backend/ggml/ggml/src/ggml-cuda/sum.cu similarity index 54% rename from llama/ggml-cuda/sum.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/sum.cu index e1f0b86e..e0dafc1d 100644 --- a/llama/ggml-cuda/sum.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/sum.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #if !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) && CUDART_VERSION >= 11700 #define USE_CUB #endif // !defined(GGML_USE_HIP) && !defined(GGML_USE_MUSA) && CUDART_VERSION >= 11700 diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/sum.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/sum.cuh new file mode 100644 index 00000000..8cadc373 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/sum.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +void sum_f32_cuda(ggml_cuda_pool & pool, const float * x, float * dst, const int64_t ne, cudaStream_t stream); + +void ggml_cuda_op_sum(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cu b/ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cu new file mode 100644 index 00000000..38dbf1b5 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cu @@ -0,0 +1,39 @@ +#include "sumrows.cuh" + +static __global__ void k_sum_rows_f32(const float * x, float * dst, const int ncols) { + const int row = blockIdx.x; + const int col = threadIdx.x; + + float sum = 0.0f; + for (int i = col; i < ncols; i += blockDim.x) { + sum += x[row * ncols + i]; + } + + sum = warp_reduce_sum(sum); + + if (col == 0) { + dst[row] = sum; + } +} + +void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) { + const dim3 block_dims(WARP_SIZE, 1, 1); + const dim3 block_nums(nrows, 1, 1); + k_sum_rows_f32<<>>(x, dst, ncols); +} + +void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT( dst->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous(src0)); + + const int64_t ncols = src0->ne[0]; + const int64_t nrows = ggml_nrows(src0); + + sum_rows_f32_cuda(src0_d, dst_d, ncols, nrows, stream); +} diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cuh new file mode 100644 index 00000000..191db1c1 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream); + +void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu new file mode 100644 index 00000000..6696a238 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu new file mode 100644 index 00000000..dd070db2 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu new file mode 100644 index 00000000..54dcde6f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu new file mode 100644 index 00000000..4ec22f79 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu new file mode 100644 index 00000000..3c15bf7f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu new file mode 100644 index 00000000..7e61b5fd --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu new file mode 100644 index 00000000..fdb15b58 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu new file mode 100644 index 00000000..0f7c417d --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu new file mode 100644 index 00000000..851f33c4 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu new file mode 100644 index 00000000..763809cb --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu new file mode 100644 index 00000000..f2a276e5 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu new file mode 100644 index 00000000..cb227f6f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu new file mode 100644 index 00000000..97ac0520 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu new file mode 100644 index 00000000..c772b426 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu new file mode 100644 index 00000000..5cb74308 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu new file mode 100644 index 00000000..98a709d1 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu new file mode 100644 index 00000000..4f2f947a --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu new file mode 100644 index 00000000..11f96b6f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu new file mode 100644 index 00000000..b39bdc06 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu new file mode 100644 index 00000000..bbd6a2c7 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu new file mode 100644 index 00000000..9d84ff2b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu new file mode 100644 index 00000000..bc8a5bff --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu new file mode 100644 index 00000000..a679100c --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu new file mode 100644 index 00000000..8f21bccf --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu new file mode 100644 index 00000000..858b00fd --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu new file mode 100644 index 00000000..0fc8011f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu new file mode 100644 index 00000000..261fdf62 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu new file mode 100644 index 00000000..0fb82473 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu new file mode 100644 index 00000000..a9d9d089 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu new file mode 100644 index 00000000..7d7b2792 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu new file mode 100644 index 00000000..a092ee2d --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu new file mode 100644 index 00000000..db55927a --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu new file mode 100644 index 00000000..c3c21cef --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu new file mode 100644 index 00000000..35dd9f52 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu new file mode 100644 index 00000000..050c22ac --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu new file mode 100644 index 00000000..de4866c5 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu new file mode 100644 index 00000000..57a10bc4 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu new file mode 100644 index 00000000..e0f08b46 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu new file mode 100644 index 00000000..1c8e8a46 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu new file mode 100644 index 00000000..cefed83f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu new file mode 100644 index 00000000..aede6e35 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu new file mode 100644 index 00000000..1a1a92c7 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu new file mode 100644 index 00000000..ad667473 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f16.cuh" + +DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu new file mode 100644 index 00000000..c499f455 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu new file mode 100644 index 00000000..8286ebf3 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu new file mode 100644 index 00000000..45878688 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu new file mode 100644 index 00000000..d89103ce --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu new file mode 100644 index 00000000..bb75fd42 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu new file mode 100644 index 00000000..b1629817 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu new file mode 100644 index 00000000..d8657604 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu new file mode 100644 index 00000000..2e5bd2f1 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu new file mode 100644 index 00000000..be5f302d --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu new file mode 100644 index 00000000..8dd91cd7 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu new file mode 100644 index 00000000..4cb79150 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu new file mode 100644 index 00000000..09dea426 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu new file mode 100644 index 00000000..0fbb6076 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu new file mode 100644 index 00000000..2aeab83b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu new file mode 100644 index 00000000..599415b4 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu new file mode 100644 index 00000000..e4f8e308 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu new file mode 100644 index 00000000..34d16652 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu new file mode 100644 index 00000000..4bebef45 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu new file mode 100644 index 00000000..326468da --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu new file mode 100644 index 00000000..511b58f4 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu new file mode 100644 index 00000000..d9906d14 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu new file mode 100644 index 00000000..f61c183a --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu new file mode 100644 index 00000000..c10450fd --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu new file mode 100644 index 00000000..2d5cb195 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu new file mode 100644 index 00000000..b384f34d --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu new file mode 100644 index 00000000..446e293b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu new file mode 100644 index 00000000..6f430298 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu new file mode 100644 index 00000000..1cd8ba88 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu new file mode 100644 index 00000000..1ee2eab6 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu new file mode 100644 index 00000000..2bc77816 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu new file mode 100644 index 00000000..d55ced08 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu new file mode 100644 index 00000000..8361e99c --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu new file mode 100644 index 00000000..7507a67c --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu new file mode 100644 index 00000000..61f050b2 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu new file mode 100644 index 00000000..d4a49d9c --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu new file mode 100644 index 00000000..d1462789 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu new file mode 100644 index 00000000..e73f917a --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu new file mode 100644 index 00000000..d40825df --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_F16); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu new file mode 100644 index 00000000..b5c6869f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu new file mode 100644 index 00000000..4e21b0cc --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu new file mode 100644 index 00000000..2eac321b --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu new file mode 100644 index 00000000..f7d2c3b4 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu new file mode 100644 index 00000000..a013f400 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f32.cuh" + +DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q8_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu new file mode 100644 index 00000000..2d94e65c --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu @@ -0,0 +1,10 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-wmma-f16.cuh" + +DECL_FATTN_WMMA_F16_CASE(64, 16, float); +DECL_FATTN_WMMA_F16_CASE(80, 16, float); +DECL_FATTN_WMMA_F16_CASE(96, 16, float); +DECL_FATTN_WMMA_F16_CASE(112, 16, float); +DECL_FATTN_WMMA_F16_CASE(128, 16, float); +DECL_FATTN_WMMA_F16_CASE(256, 16, float); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu new file mode 100644 index 00000000..c3d9df3c --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu @@ -0,0 +1,9 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-wmma-f16.cuh" + +DECL_FATTN_WMMA_F16_CASE(64, 32, float); +DECL_FATTN_WMMA_F16_CASE(80, 32, float); +DECL_FATTN_WMMA_F16_CASE(96, 32, float); +DECL_FATTN_WMMA_F16_CASE(112, 32, float); +DECL_FATTN_WMMA_F16_CASE(128, 32, float); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu new file mode 100644 index 00000000..bb680e40 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu @@ -0,0 +1,10 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-wmma-f16.cuh" + +DECL_FATTN_WMMA_F16_CASE(64, 16, half); +DECL_FATTN_WMMA_F16_CASE(80, 16, half); +DECL_FATTN_WMMA_F16_CASE(96, 16, half); +DECL_FATTN_WMMA_F16_CASE(112, 16, half); +DECL_FATTN_WMMA_F16_CASE(128, 16, half); +DECL_FATTN_WMMA_F16_CASE(256, 16, half); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu new file mode 100644 index 00000000..073f71b1 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu @@ -0,0 +1,10 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-wmma-f16.cuh" + +DECL_FATTN_WMMA_F16_CASE(64, 32, half); +DECL_FATTN_WMMA_F16_CASE(80, 32, half); +DECL_FATTN_WMMA_F16_CASE(96, 32, half); +DECL_FATTN_WMMA_F16_CASE(112, 32, half); +DECL_FATTN_WMMA_F16_CASE(128, 32, half); +DECL_FATTN_WMMA_F16_CASE(256, 32, half); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu new file mode 100644 index 00000000..d30710c5 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu @@ -0,0 +1,8 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-wmma-f16.cuh" + +DECL_FATTN_WMMA_F16_CASE(64, 8, half); +DECL_FATTN_WMMA_F16_CASE(96, 8, half); +DECL_FATTN_WMMA_F16_CASE(128, 8, half); +DECL_FATTN_WMMA_F16_CASE(256, 8, half); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/generate_cu_files.py b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/generate_cu_files.py new file mode 100755 index 00000000..d7874e6e --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/generate_cu_files.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 + +from glob import glob +import os + +TYPES_KV = ["GGML_TYPE_Q4_0", "GGML_TYPE_Q4_1", "GGML_TYPE_Q5_0", "GGML_TYPE_Q5_1", "GGML_TYPE_Q8_0", "GGML_TYPE_F16"] + +SOURCE_FATTN_VEC = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-vec-f{vkq_size}.cuh" + +DECL_FATTN_VEC_F{vkq_size}_CASE({head_size}, {type_k}, {type_v}); +""" + +SOURCE_FATTN_WMMA_START = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-wmma-f16.cuh" + +""" + +SOURCE_FATTN_WMMA_CASE = "DECL_FATTN_WMMA_F16_CASE({head_size}, {cols_per_block}, {kq_acc_t});\n" + +TYPES_MMQ = [ + "GGML_TYPE_Q4_0", "GGML_TYPE_Q4_1", "GGML_TYPE_Q5_0", "GGML_TYPE_Q5_1", "GGML_TYPE_Q8_0", + "GGML_TYPE_Q2_K", "GGML_TYPE_Q3_K", "GGML_TYPE_Q4_K", "GGML_TYPE_Q5_K", "GGML_TYPE_Q6_K", + "GGML_TYPE_IQ2_XXS", "GGML_TYPE_IQ2_XS", "GGML_TYPE_IQ2_S", "GGML_TYPE_IQ3_XXS", "GGML_TYPE_IQ3_S", + "GGML_TYPE_IQ1_S", "GGML_TYPE_IQ4_NL", "GGML_TYPE_IQ4_XS" +] + +SOURCE_MMQ = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE({type}); +""" + + +def get_short_name(long_quant_name): + return long_quant_name.replace("GGML_TYPE_", "").lower() + + +def get_head_sizes(type_k, type_v): + if type_k == "GGML_TYPE_F16" and type_v == "GGML_TYPE_F16": + return [64, 128, 256] + if type_k == "GGML_TYPE_F16": + return [64, 128] + return [128] + + +for filename in glob("*.cu"): + os.remove(filename) + +for vkq_size in [16, 32]: + for type_k in TYPES_KV: + for type_v in TYPES_KV: + for head_size in get_head_sizes(type_k, type_v): + with open(f"fattn-vec-f{vkq_size}-instance-hs{head_size}-{get_short_name(type_k)}-{get_short_name(type_v)}.cu", "w") as f: + f.write(SOURCE_FATTN_VEC.format(vkq_size=vkq_size, head_size=head_size, type_k=type_k, type_v=type_v)) + +for kq_acc_t in ["half", "float"]: + for cols_per_block in [8, 16, 32]: + if kq_acc_t == "float" and cols_per_block == 8: + continue + + with open(f"fattn-wmma-f16-instance-kq{kq_acc_t}-cpb{cols_per_block}.cu", "w") as f: + f.write(SOURCE_FATTN_WMMA_START) + + for head_size in [64, 80, 96, 112, 128, 256]: + if cols_per_block == 8 and head_size % 32 != 0: # wmma fragment is 8x32 + continue + if kq_acc_t == "float" and cols_per_block == 32 and head_size == 256: # register spilling, bad performance + continue + f.write(SOURCE_FATTN_WMMA_CASE.format(kq_acc_t=kq_acc_t, cols_per_block=cols_per_block, head_size=head_size)) + +for type in TYPES_MMQ: + with open(f"mmq-instance-{get_short_name(type)}.cu", "w") as f: + f.write(SOURCE_MMQ.format(type=type)) diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu new file mode 100644 index 00000000..84ec8502 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_s.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_IQ1_S); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu new file mode 100644 index 00000000..583c4e5a --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_s.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_IQ2_S); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu new file mode 100644 index 00000000..edaf1560 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xs.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_IQ2_XS); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu new file mode 100644 index 00000000..233d9342 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_xxs.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_IQ2_XXS); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu new file mode 100644 index 00000000..6092dc71 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_s.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_IQ3_S); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu new file mode 100644 index 00000000..1d5bd201 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_xxs.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_IQ3_XXS); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu new file mode 100644 index 00000000..eb02fab0 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_nl.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_IQ4_NL); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu new file mode 100644 index 00000000..1eb3b743 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-iq4_xs.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_IQ4_XS); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu new file mode 100644 index 00000000..6415369d --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q2_k.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_Q2_K); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu new file mode 100644 index 00000000..ffb6213a --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q3_k.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_Q3_K); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu new file mode 100644 index 00000000..0c0b0c8a --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_Q4_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu new file mode 100644 index 00000000..ee67f694 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_Q4_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu new file mode 100644 index 00000000..9eeb3cd7 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q4_k.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_Q4_K); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu new file mode 100644 index 00000000..cc57fb97 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_Q5_0); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu new file mode 100644 index 00000000..721ac790 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_1.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_Q5_1); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu new file mode 100644 index 00000000..a2e90ffd --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q5_k.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_Q5_K); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu new file mode 100644 index 00000000..470938fe --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q6_k.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_Q6_K); diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu new file mode 100644 index 00000000..974477bb --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/mmq-instance-q8_0.cu @@ -0,0 +1,5 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +DECL_MMQ_CASE(GGML_TYPE_Q8_0); diff --git a/llama/ggml-cuda/tsembd.cu b/ml/backend/ggml/ggml/src/ggml-cuda/tsembd.cu similarity index 59% rename from llama/ggml-cuda/tsembd.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/tsembd.cu index c6036783..153ddbcd 100644 --- a/llama/ggml-cuda/tsembd.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/tsembd.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "tsembd.cuh" static __global__ void timestep_embedding_f32(const float * timesteps, float * dst, const int nb1, const int dim, const int max_period) { diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/tsembd.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/tsembd.cuh new file mode 100644 index 00000000..84340e3d --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/tsembd.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_TIMESTEP_EMBEDDING_BLOCK_SIZE 256 + +void ggml_cuda_op_timestep_embedding(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/unary.cu b/ml/backend/ggml/ggml/src/ggml-cuda/unary.cu similarity index 92% rename from llama/ggml-cuda/unary.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/unary.cu index e20cba02..81fc9220 100644 --- a/llama/ggml-cuda/unary.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/unary.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "unary.cuh" static __global__ void neg_f32(const float * x, float * dst, const int k) { diff --git a/llama/ggml-cuda/unary.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/unary.cuh similarity index 58% rename from llama/ggml-cuda/unary.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/unary.cuh index 3a9161bf..c9193672 100644 --- a/llama/ggml-cuda/unary.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/unary.cuh @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #define CUDA_NEG_BLOCK_SIZE 256 diff --git a/llama/ggml-cuda/upscale.cu b/ml/backend/ggml/ggml/src/ggml-cuda/upscale.cu similarity index 63% rename from llama/ggml-cuda/upscale.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/upscale.cu index 19c8f2a1..cf513c3a 100644 --- a/llama/ggml-cuda/upscale.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/upscale.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "upscale.cuh" static __global__ void upscale_f32(const float * x, float * dst, diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/upscale.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/upscale.cuh new file mode 100644 index 00000000..d4d76523 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/upscale.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_UPSCALE_BLOCK_SIZE 256 + +void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/llama/ggml-cuda/vecdotq.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/vecdotq.cuh similarity index 96% rename from llama/ggml-cuda/vecdotq.cuh rename to ml/backend/ggml/ggml/src/ggml-cuda/vecdotq.cuh index 43719cbd..40091a0e 100644 --- a/llama/ggml-cuda/vecdotq.cuh +++ b/ml/backend/ggml/ggml/src/ggml-cuda/vecdotq.cuh @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/vendors/cuda.h b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/cuda.h new file mode 100644 index 00000000..db9f6a16 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/cuda.h @@ -0,0 +1,14 @@ +#pragma once + +#include +#include +#include +#include + +#if CUDART_VERSION < 11020 +#define CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED +#define CUBLAS_TF32_TENSOR_OP_MATH CUBLAS_TENSOR_OP_MATH +#define CUBLAS_COMPUTE_16F CUDA_R_16F +#define CUBLAS_COMPUTE_32F CUDA_R_32F +#define cublasComputeType_t cudaDataType_t +#endif // CUDART_VERSION < 11020 diff --git a/llama/ggml-cuda/vendors/hip.h b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/hip.h similarity index 85% rename from llama/ggml-cuda/vendors/hip.h rename to ml/backend/ggml/ggml/src/ggml-cuda/vendors/hip.h index 7b3102f3..c905b15d 100644 --- a/llama/ggml-cuda/vendors/hip.h +++ b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/hip.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include diff --git a/llama/ggml-cuda/vendors/musa.h b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/musa.h similarity index 83% rename from llama/ggml-cuda/vendors/musa.h rename to ml/backend/ggml/ggml/src/ggml-cuda/vendors/musa.h index 7b1a4ac4..6cc1b69e 100644 --- a/llama/ggml-cuda/vendors/musa.h +++ b/ml/backend/ggml/ggml/src/ggml-cuda/vendors/musa.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #include diff --git a/llama/ggml-cuda/wkv6.cu b/ml/backend/ggml/ggml/src/ggml-cuda/wkv6.cu similarity index 71% rename from llama/ggml-cuda/wkv6.cu rename to ml/backend/ggml/ggml/src/ggml-cuda/wkv6.cu index fe4e5b9d..42578341 100644 --- a/llama/ggml-cuda/wkv6.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/wkv6.cu @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #include "common.cuh" #include "wkv6.cuh" diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/wkv6.cuh b/ml/backend/ggml/ggml/src/ggml-cuda/wkv6.cuh new file mode 100644 index 00000000..a7124ee5 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-cuda/wkv6.cuh @@ -0,0 +1,5 @@ +#include "common.cuh" + +#define CUDA_WKV_BLOCK_SIZE 64 + +void ggml_cuda_op_rwkv_wkv6(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ml/backend/ggml/ggml/src/ggml-hip/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-hip/CMakeLists.txt new file mode 100644 index 00000000..b15fbd24 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-hip/CMakeLists.txt @@ -0,0 +1,104 @@ +if (NOT EXISTS $ENV{ROCM_PATH}) + if (NOT EXISTS /opt/rocm) + set(ROCM_PATH /usr) + else() + set(ROCM_PATH /opt/rocm) + endif() +else() + set(ROCM_PATH $ENV{ROCM_PATH}) +endif() + +list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH}) +list(APPEND CMAKE_PREFIX_PATH "${ROCM_PATH}/lib64/cmake") + +# CMake on Windows doesn't support the HIP language yet +if (WIN32) + set(CXX_IS_HIPCC TRUE) +else() + string(REGEX MATCH "hipcc(\.bat)?$" CXX_IS_HIPCC "${CMAKE_CXX_COMPILER}") +endif() + +if (CXX_IS_HIPCC) + if (LINUX) + if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") + message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++") + endif() + + message(WARNING "Setting hipcc as the C++ compiler is legacy behavior." + " Prefer setting the HIP compiler directly. See README for details.") + endif() +else() + # Forward AMDGPU_TARGETS to CMAKE_HIP_ARCHITECTURES. + if (AMDGPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES) + set(CMAKE_HIP_ARCHITECTURES ${AMDGPU_TARGETS}) + endif() + cmake_minimum_required(VERSION 3.21) + enable_language(HIP) +endif() + +find_package(hip REQUIRED) +find_package(hipblas REQUIRED) +find_package(rocblas REQUIRED) + +message(STATUS "HIP and hipBLAS found") + +file(GLOB GGML_HEADERS_ROCM "../ggml-cuda/*.cuh") +list(APPEND GGML_HEADERS_ROCM "../../include/ggml-cuda.h") + +file(GLOB GGML_SOURCES_ROCM "../ggml-cuda/*.cu") +file(GLOB SRCS "../ggml-cuda/template-instances/fattn-wmma*.cu") +list(APPEND GGML_SOURCES_ROCM ${SRCS}) +file(GLOB SRCS "../ggml-cuda/template-instances/mmq*.cu") +list(APPEND GGML_SOURCES_ROCM ${SRCS}) + +if (GGML_CUDA_FA_ALL_QUANTS) + file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*.cu") + list(APPEND GGML_SOURCES_ROCM ${SRCS}) + add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS) +else() + file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu") + list(APPEND GGML_SOURCES_ROCM ${SRCS}) + file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu") + list(APPEND GGML_SOURCES_ROCM ${SRCS}) + file(GLOB SRCS "../ggml-cuda/template-instances/fattn-vec*f16-f16.cu") + list(APPEND GGML_SOURCES_ROCM ${SRCS}) +endif() + +ggml_add_backend_library(ggml-hip + ${GGML_HEADERS_ROCM} + ${GGML_SOURCES_ROCM} + ) + +# TODO: do not use CUDA definitions for HIP +target_compile_definitions(ggml PUBLIC GGML_USE_CUDA) + +add_compile_definitions(GGML_USE_HIP) + +if (GGML_HIP_UMA) + add_compile_definitions(GGML_HIP_UMA) +endif() + +if (GGML_CUDA_FORCE_MMQ) + add_compile_definitions(GGML_CUDA_FORCE_MMQ) +endif() + +if (GGML_CUDA_FORCE_CUBLAS) + add_compile_definitions(GGML_CUDA_FORCE_CUBLAS) +endif() + +if (GGML_CUDA_NO_PEER_COPY) + add_compile_definitions(GGML_CUDA_NO_PEER_COPY) +endif() + +if (CXX_IS_HIPCC) + set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX) + target_link_libraries(ggml-hip PRIVATE hip::device) +else() + set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE HIP) +endif() + +if (GGML_STATIC) + message(FATAL_ERROR "Static linking not supported for HIP/ROCm") +endif() + +target_link_libraries(ggml-hip PRIVATE ggml-base hip::host roc::rocblas roc::hipblas) diff --git a/llama/ggml-impl.h b/ml/backend/ggml/ggml/src/ggml-impl.h similarity index 93% rename from llama/ggml-impl.h rename to ml/backend/ggml/ggml/src/ggml-impl.h index 46760fb3..549772c5 100644 --- a/llama/ggml-impl.h +++ b/ml/backend/ggml/ggml/src/ggml-impl.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once // GGML internal header diff --git a/ml/backend/ggml/ggml/src/ggml-metal/CMakeLists.txt b/ml/backend/ggml/ggml/src/ggml-metal/CMakeLists.txt new file mode 100644 index 00000000..89fcde2f --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-metal/CMakeLists.txt @@ -0,0 +1,121 @@ +find_library(FOUNDATION_LIBRARY Foundation REQUIRED) +find_library(METAL_FRAMEWORK Metal REQUIRED) +find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) + +message(STATUS "Metal framework found") + +ggml_add_backend_library(ggml-metal + ggml-metal.m + ) + +target_link_libraries(ggml-metal PRIVATE + ${FOUNDATION_LIBRARY} + ${METAL_FRAMEWORK} + ${METALKIT_FRAMEWORK} + ) + +if (GGML_METAL_NDEBUG) + add_compile_definitions(GGML_METAL_NDEBUG) +endif() + +if (GGML_METAL_USE_BF16) + add_compile_definitions(GGML_METAL_USE_BF16) +endif() + +# copy metal files to bin directory +configure_file(../ggml-common.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h COPYONLY) +configure_file(ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY) +configure_file(ggml-metal-impl.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal-impl.h COPYONLY) + +if (GGML_METAL_EMBED_LIBRARY) + enable_language(ASM) + + add_compile_definitions(GGML_METAL_EMBED_LIBRARY) + + set(METALLIB_COMMON "${CMAKE_CURRENT_SOURCE_DIR}/../ggml-common.h") + set(METALLIB_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal") + set(METALLIB_IMPL "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal-impl.h") + + file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/autogenerated") + + # merge ggml-common.h and ggml-metal.metal into a single file + set(METALLIB_EMBED_ASM "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-embed.s") + set(METALLIB_SOURCE_EMBED "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-embed.metal") + set(METALLIB_SOURCE_EMBED_TMP "${CMAKE_BINARY_DIR}/autogenerated/ggml-metal-embed.metal.tmp") + + add_custom_command( + OUTPUT ${METALLIB_EMBED_ASM} + COMMAND echo "Embedding Metal library" + COMMAND sed -e '/__embed_ggml-common.h__/r ${METALLIB_COMMON}' -e '/__embed_ggml-common.h__/d' < ${METALLIB_SOURCE} > ${METALLIB_SOURCE_EMBED_TMP} + COMMAND sed -e '/\#include \"ggml-metal-impl.h\"/r ${METALLIB_IMPL}' -e '/\#include \"ggml-metal-impl.h\"/d' < ${METALLIB_SOURCE_EMBED_TMP} > ${METALLIB_SOURCE_EMBED} + COMMAND echo ".section __DATA,__ggml_metallib" > ${METALLIB_EMBED_ASM} + COMMAND echo ".globl _ggml_metallib_start" >> ${METALLIB_EMBED_ASM} + COMMAND echo "_ggml_metallib_start:" >> ${METALLIB_EMBED_ASM} + COMMAND echo ".incbin \\\"${METALLIB_SOURCE_EMBED}\\\"" >> ${METALLIB_EMBED_ASM} + COMMAND echo ".globl _ggml_metallib_end" >> ${METALLIB_EMBED_ASM} + COMMAND echo "_ggml_metallib_end:" >> ${METALLIB_EMBED_ASM} + DEPENDS ../ggml-common.h ggml-metal.metal ggml-metal-impl.h + COMMENT "Generate assembly for embedded Metal library" + ) + + target_sources(ggml-metal PRIVATE ${METALLIB_EMBED_ASM}) +else() + if (GGML_METAL_SHADER_DEBUG) + # custom command to do the following: + # xcrun -sdk macosx metal -fno-fast-math -c ggml-metal.metal -o ggml-metal.air + # xcrun -sdk macosx metallib ggml-metal.air -o default.metallib + # + # note: this is the only way I found to disable fast-math in Metal. it's ugly, but at least it works + # disabling fast math is needed in order to pass tests/test-backend-ops + # note: adding -fno-inline fixes the tests when using MTL_SHADER_VALIDATION=1 + # note: unfortunately, we have to call it default.metallib instead of ggml.metallib + # ref: https://github.com/ggerganov/whisper.cpp/issues/1720 + set(XC_FLAGS -fno-fast-math -fno-inline -g) + else() + set(XC_FLAGS -O3) + endif() + + # Append macOS metal versioning flags + if (GGML_METAL_MACOSX_VERSION_MIN) + message(STATUS "Adding -mmacosx-version-min=${GGML_METAL_MACOSX_VERSION_MIN} flag to metal compilation") + list (APPEND XC_FLAGS -mmacosx-version-min=${GGML_METAL_MACOSX_VERSION_MIN}) + endif() + + if (GGML_METAL_STD) + message(STATUS "Adding -std=${GGML_METAL_STD} flag to metal compilation") + list (APPEND XC_FLAGS -std=${GGML_METAL_STD}) + endif() + + add_custom_command( + OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib + COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air + COMMAND xcrun -sdk macosx metallib ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib + COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.air + COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h + COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal + DEPENDS ggml-metal.metal ggml-common.h + COMMENT "Compiling Metal kernels" + ) + + # FIXME: only add to the ggml-metal target? + add_custom_target( + ggml-metal-lib ALL + DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib + ) +endif() # GGML_METAL_EMBED_LIBRARY + +if (NOT GGML_METAL_EMBED_LIBRARY) + install( + FILES src/ggml-metal/ggml-metal.metal + PERMISSIONS + OWNER_READ + OWNER_WRITE + GROUP_READ + WORLD_READ + DESTINATION ${CMAKE_INSTALL_BINDIR}) + + install( + FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib + DESTINATION ${CMAKE_INSTALL_BINDIR} + ) +endif() diff --git a/llama/ggml-metal-embed.metal b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.metal similarity index 99% rename from llama/ggml-metal-embed.metal rename to ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.metal index 7f4666c9..2e51b87a 100644 --- a/llama/ggml-metal-embed.metal +++ b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.metal @@ -1,58 +1,7 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - +// Code generated Fri Jan 10 13:05:45 PST 2025. DO NOT EDIT. #define GGML_COMMON_DECL_METAL #define GGML_COMMON_IMPL_METAL #if defined(GGML_METAL_EMBED_LIBRARY) -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #ifndef GGML_COMMON_DECL #if defined(GGML_COMMON_DECL_C) @@ -1910,32 +1859,6 @@ GGML_TABLE_END() // TODO: this should not be a relative path, but can't figure out how to set Metal include paths in Package.swift #include "../ggml-common.h" #endif -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #ifndef GGML_METAL_IMPL #define GGML_METAL_IMPL diff --git a/llama/ggml-metal-embed_darwin_arm64.s b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.s similarity index 87% rename from llama/ggml-metal-embed_darwin_arm64.s rename to ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.s index a108c825..47c729a6 100644 --- a/llama/ggml-metal-embed_darwin_arm64.s +++ b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.s @@ -3,4 +3,4 @@ _ggml_metallib_start: .incbin "ggml-metal-embed.metal" .globl _ggml_metallib_end -_ggml_metallib_end: \ No newline at end of file +_ggml_metallib_end: diff --git a/llama/ggml-metal-impl.h b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-impl.h similarity index 81% rename from llama/ggml-metal-impl.h rename to ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-impl.h index 19103fb5..e3dc25f1 100644 --- a/llama/ggml-metal-impl.h +++ b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-impl.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #ifndef GGML_METAL_IMPL #define GGML_METAL_IMPL diff --git a/llama/ggml-metal_darwin_arm64.m b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m similarity index 99% rename from llama/ggml-metal_darwin_arm64.m rename to ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m index d72129c3..318addec 100644 --- a/llama/ggml-metal_darwin_arm64.m +++ b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #import "ggml-metal.h" #import "ggml-impl.h" @@ -4246,6 +4220,7 @@ static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) } free(ctx); + free(buffer); } static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) { diff --git a/llama/ggml-metal.metal b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.metal similarity index 99% rename from llama/ggml-metal.metal rename to ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.metal index 1bca0972..204c93e6 100644 --- a/llama/ggml-metal.metal +++ b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.metal @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #define GGML_COMMON_DECL_METAL #define GGML_COMMON_IMPL_METAL #if defined(GGML_METAL_EMBED_LIBRARY) diff --git a/ml/backend/ggml/ggml/src/ggml-metal/metal.go b/ml/backend/ggml/ggml/src/ggml-metal/metal.go new file mode 100644 index 00000000..1025e205 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-metal/metal.go @@ -0,0 +1,9 @@ +//go:build darwin && arm64 + +package metal + +//go:generate sh -c "{ echo // Code generated $(date). DO NOT EDIT.; sed -e '/__embed_ggml-common.h__/r ../ggml-common.h' -e '/__embed_ggml-common.h__/d' -e '/#include \"ggml-metal-impl.h\"/r ggml-metal-impl.h' -e '/#include \"ggml-metal-impl.h\"/d' ggml-metal.metal; } >ggml-metal-embed.metal" + +// #cgo CPPFLAGS: -DGGML_METAL_EMBED_LIBRARY -I.. -I../../include +// #cgo LDFLAGS: -framework Metal -framework MetalKit +import "C" diff --git a/ml/backend/ggml/ggml/src/ggml-opt.cpp b/ml/backend/ggml/ggml/src/ggml-opt.cpp new file mode 100644 index 00000000..7c3e2410 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-opt.cpp @@ -0,0 +1,854 @@ +#include "ggml-opt.h" + +#include "ggml.h" +#include "ggml-alloc.h" +#include "ggml-backend.h" +#include "ggml-impl.h" + +#include +#include +#include +#include +#include +#include +#include + +struct ggml_opt_dataset { + struct ggml_context * ctx = nullptr; + ggml_backend_buffer_t buf = nullptr; + struct ggml_tensor * data = nullptr; + struct ggml_tensor * labels = nullptr; + + int64_t ndata = -1; + int64_t ndata_shard = -1; + size_t nbs_data = -1; + size_t nbs_labels = -1; + + std::vector permutation; +}; + +struct ggml_opt_context { + ggml_backend_sched_t backend_sched = nullptr; + ggml_cgraph * allocated_graph = nullptr; + ggml_cgraph * allocated_graph_copy = nullptr; + struct ggml_context * ctx_static = nullptr; + struct ggml_context * ctx_static_cpu = nullptr; + struct ggml_context * ctx_compute = nullptr; + struct ggml_context * ctx_copy = nullptr; + ggml_backend_buffer_t buf_static = nullptr; + ggml_backend_buffer_t buf_static_cpu = nullptr; + std::mt19937 rng; + + struct ggml_tensor * inputs = nullptr; + struct ggml_tensor * outputs = nullptr; + struct ggml_tensor * labels = nullptr; + + struct ggml_tensor * loss = nullptr; + struct ggml_tensor * pred = nullptr; + struct ggml_tensor * ncorrect = nullptr; + + struct ggml_cgraph * gf = nullptr; + struct ggml_cgraph * gb_grad = nullptr; + struct ggml_cgraph * gb_opt = nullptr; + + int64_t iter = 1; + int32_t opt_period = 1; + int32_t opt_i = 0; + bool loss_per_datapoint = false; + + ggml_opt_get_optimizer_params get_opt_pars = nullptr; + void * get_opt_pars_ud = nullptr; + struct ggml_tensor * adamw_params = nullptr; +}; + +struct ggml_opt_result { + int64_t ndata = 0; + std::vector loss; + std::vector pred; + int64_t ncorrect = 0; + + int64_t opt_period = -1; + bool loss_per_datapoint = false; +}; + +// ====== Dataset ====== + +ggml_opt_dataset_t ggml_opt_dataset_init(int64_t ne_datapoint, int64_t ne_label, int64_t ndata, int64_t ndata_shard) { + GGML_ASSERT(ne_datapoint > 0); + GGML_ASSERT(ne_label >= 0); + GGML_ASSERT(ndata > 0); + GGML_ASSERT(ndata_shard > 0); + + ggml_opt_dataset_t result = new ggml_opt_dataset; + result->ndata = ndata; + result->ndata_shard = ndata_shard; + + { + struct ggml_init_params params = { + /*.mem_size =*/ 2*ggml_tensor_overhead(), + /*.mem_buffer =*/ nullptr, + /*.no_alloc =*/ true, + }; + result->ctx = ggml_init(params); + } + + result->data = ggml_new_tensor_2d(result->ctx, GGML_TYPE_F32, ne_datapoint, ndata); + result->nbs_data = ggml_nbytes(result->data) * ndata_shard/ndata; + + if (ne_label > 0) { + result->labels = ggml_new_tensor_2d(result->ctx, GGML_TYPE_F32, ne_label, ndata); + result->nbs_labels = ggml_nbytes(result->labels) * ndata_shard/ndata; + } else { + result->labels = nullptr; + result->nbs_labels = 0; + } + + result->buf = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx, ggml_backend_cpu_buffer_type()); + + const int64_t nshards = ndata/ndata_shard; + result->permutation.resize(nshards); + for (int64_t i = 0; i < nshards; ++i) { + result->permutation[i] = i; + } + return result; +} + +void ggml_opt_dataset_free(ggml_opt_dataset_t dataset) { + ggml_backend_buffer_free(dataset->buf); + ggml_free(dataset->ctx); + delete dataset; +} + +struct ggml_tensor * ggml_opt_dataset_data(ggml_opt_dataset_t dataset) { + return dataset->data; +} + +struct ggml_tensor * ggml_opt_dataset_labels(ggml_opt_dataset_t dataset) { + return dataset->labels; +} + +void ggml_opt_dataset_shuffle(ggml_opt_context_t opt_ctx, ggml_opt_dataset_t dataset, int64_t idata) { + GGML_ASSERT(idata <= dataset->ndata); + + if (idata < 0) { + std::shuffle(dataset->permutation.begin(), dataset->permutation.end(), opt_ctx->rng); + return; + } + + GGML_ASSERT(idata % dataset->ndata_shard == 0); + const int64_t ishard_max = idata / dataset->ndata_shard; + std::shuffle(dataset->permutation.begin(), dataset->permutation.begin() + ishard_max, opt_ctx->rng); +} + +void ggml_opt_dataset_get_batch(ggml_opt_dataset_t dataset, struct ggml_tensor * data_batch, struct ggml_tensor * labels_batch, int64_t ibatch) { + GGML_ASSERT( data_batch && ggml_is_contiguous(data_batch)); + GGML_ASSERT(!labels_batch || ggml_is_contiguous(labels_batch)); + GGML_ASSERT((labels_batch == nullptr) == (dataset->labels == nullptr)); + + const size_t nb_data_batch = ggml_nbytes(data_batch); + GGML_ASSERT(nb_data_batch % dataset->nbs_data == 0); + const int64_t shards_per_batch = nb_data_batch / dataset->nbs_data; + + if (labels_batch) { + const size_t nb_labels_batch = ggml_nbytes(labels_batch); + GGML_ASSERT(nb_labels_batch == shards_per_batch*dataset->nbs_labels); + } + + GGML_ASSERT((ibatch + 1)*shards_per_batch <= int64_t(dataset->permutation.size())); + + for (int64_t ishard_batch = 0; ishard_batch < shards_per_batch; ++ishard_batch) { + const int64_t ishard = dataset->permutation[ibatch*shards_per_batch + ishard_batch]; + + const char * ptr_data = (const char *) dataset->data->data + ishard*dataset->nbs_data; + ggml_backend_tensor_set(data_batch, ptr_data, ishard_batch*dataset->nbs_data, dataset->nbs_data); + + if (!labels_batch) { + continue; + } + + const char * ptr_labels = (const char *) dataset->labels->data + ishard*dataset->nbs_labels; + ggml_backend_tensor_set(labels_batch, ptr_labels, ishard_batch*dataset->nbs_labels, dataset->nbs_labels); + } +} + +// ====== Model / Context ====== + +struct ggml_opt_optimizer_params ggml_opt_get_default_optimizer_params(void * userdata) { + GGML_UNUSED(userdata); + + ggml_opt_optimizer_params result; + + result.adamw.alpha = 0.001f; + result.adamw.beta1 = 0.9f; + result.adamw.beta2 = 0.999f; + result.adamw.eps = 1e-8f; + result.adamw.wd = 0.0f; + + return result; +} + +struct ggml_opt_params ggml_opt_default_params( + ggml_backend_sched_t backend_sched, + struct ggml_context * ctx_compute, + struct ggml_tensor * inputs, + struct ggml_tensor * outputs, + enum ggml_opt_loss_type loss_type) { + return { + /*backend_sched =*/ backend_sched, + /*ctx_compute =*/ ctx_compute, + /*inputs =*/ inputs, + /*logits =*/ outputs, + /*loss_type =*/ loss_type, + /*build_type =*/ GGML_OPT_BUILD_TYPE_OPT, + /*opt_period =*/ 1, + /*get_opt_pars =*/ ggml_opt_get_default_optimizer_params, + /*get_opt_pars_ud =*/ nullptr, + }; +} + +static ggml_tensor * map_tensor(std::map & tensor_map, ggml_context * ctx, ggml_tensor * tensor) { + if (!tensor) { + return nullptr; + } + + if (tensor_map.find(tensor) != tensor_map.end()) { + return tensor_map[tensor]; + } + + ggml_tensor * new_tensor = ggml_dup_tensor(ctx, tensor); + tensor_map[tensor] = new_tensor; + + new_tensor->op = tensor->op; + for (int i = 0; i < GGML_MAX_DIMS; i++) { + new_tensor->nb[i] = tensor->nb[i]; + } + new_tensor->flags = tensor->flags; + memcpy(new_tensor->op_params, tensor->op_params, sizeof(tensor->op_params)); + strcpy(new_tensor->name, tensor->name); + new_tensor->data = tensor->data; + new_tensor->buffer = tensor->buffer; + new_tensor->extra = tensor->extra; + new_tensor->view_offs = tensor->view_offs; + new_tensor->view_src = map_tensor(tensor_map, ctx, tensor->view_src); + for (int i = 0; i < GGML_MAX_SRC; i++) { + new_tensor->src[i] = map_tensor(tensor_map, ctx, tensor->src[i]); + } + + return new_tensor; +} + +static ggml_cgraph * dup_graph(ggml_context * ctx, ggml_cgraph * src) { + std::map tensor_map; + + ggml_cgraph * dst = ggml_new_graph_custom(ctx, src->size, /*grads =*/ true); + + for (int i = 0; i < src->n_leafs; i++) { + ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->leafs[i])); + } + GGML_ASSERT(dst->n_leafs == src->n_leafs); + for (int i = 0; i < src->n_nodes; i++) { + ggml_build_forward_expand(dst, map_tensor(tensor_map, ctx, src->nodes[i])); + } + GGML_ASSERT(dst->n_nodes == src->n_nodes); + for (int i = 0; i < src->n_nodes; ++i) { + const size_t igrad_src = ggml_hash_find(&src->visited_hash_set, src->nodes[i]); + const size_t igrad_dst = ggml_hash_find(&dst->visited_hash_set, dst->nodes[i]); + + GGML_ASSERT(igrad_src != GGML_HASHSET_FULL); + GGML_ASSERT(ggml_bitset_get(src->visited_hash_set.used, igrad_src)); + GGML_ASSERT(igrad_dst != GGML_HASHSET_FULL); + GGML_ASSERT(ggml_bitset_get(dst->visited_hash_set.used, igrad_dst)); + + dst->grads[igrad_dst] = src->grads[igrad_src]; + dst->grad_accs[igrad_dst] = src->grad_accs[igrad_src]; + } + + return dst; +} + +static void ggml_opt_alloc_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph) { + GGML_ASSERT(graph); + if (opt_ctx->allocated_graph == graph) { + return; + } + + ggml_backend_sched_reset(opt_ctx->backend_sched); // clear allocation of previous graph + + { + ggml_init_params params = { + /*.mem_size =*/ ggml_tensor_overhead() * GGML_DEFAULT_GRAPH_SIZE, + /*.mem_buffer =*/ nullptr, + /*.no_alloc =*/ true, + }; + ggml_free(opt_ctx->ctx_copy); + opt_ctx->ctx_copy = ggml_init(params); + } + + opt_ctx->allocated_graph_copy = dup_graph(opt_ctx->ctx_copy, graph); + + ggml_backend_sched_alloc_graph(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy); + opt_ctx->allocated_graph = graph; +} + +ggml_opt_context_t ggml_opt_init(struct ggml_opt_params params) { + ggml_opt_context_t result = new struct ggml_opt_context; + result->backend_sched = params.backend_sched; + result->ctx_compute = params.ctx_compute; + result->inputs = params.inputs; + result->outputs = params.outputs; + result->opt_period = params.opt_period; + result->get_opt_pars = params.get_opt_pars; + result->get_opt_pars_ud = params.get_opt_pars_ud; + + GGML_ASSERT(result->inputs->data && "the inputs must be allocated statically"); + GGML_ASSERT(result->opt_period >= 1); + + const bool accumulate = params.build_type == GGML_OPT_BUILD_TYPE_GRAD || + (params.build_type == GGML_OPT_BUILD_TYPE_OPT && result->opt_period > 1); + + ggml_set_input(result->inputs); + ggml_set_output(result->outputs); + + result->gf = ggml_new_graph_custom(result->ctx_compute, GGML_DEFAULT_GRAPH_SIZE, /*grads =*/ true); // Forward pass. + ggml_build_forward_expand(result->gf, result->outputs); + + int n_param = 0; + for (int i = 0; i < result->gf->n_nodes; ++i) { + if (result->gf->nodes[i]->flags & GGML_TENSOR_FLAG_PARAM) { + n_param++; + } + } + + { + // The static context is used for: + // - gradients (1 tensor per param if using gradient accumulation) + // - optimizer momenta (2 tensors per param) + // - labels + // - loss + its gradient (up to 5 tensors) + // - pred + // - ncorrect (2 tensors). + const size_t tensors_per_param = (accumulate ? 1 : 0) + (params.build_type == GGML_OPT_BUILD_TYPE_OPT ? 2 : 0); + const size_t size_meta = (tensors_per_param*n_param + 9) * ggml_tensor_overhead(); + struct ggml_init_params params = { + /*.mem_size =*/ size_meta, + /*.mem_buffer =*/ nullptr, + /*.no_alloc =*/ true, + }; + result->ctx_static = ggml_init(params); + } + { + // The static cpu context is used for: + // - optimizer parameters (1 for the entire context) + const size_t size_meta = 1 * ggml_tensor_overhead(); + struct ggml_init_params params = { + /*.mem_size =*/ size_meta, + /*.mem_buffer =*/ nullptr, + /*.no_alloc =*/ true, + }; + result->ctx_static_cpu = ggml_init(params); + } + + + switch (params.loss_type) { + case GGML_OPT_LOSS_TYPE_MEAN: { + result->loss = ggml_sum(result->ctx_static, result->outputs); + ggml_set_name(result->loss, "loss_sum"); + const float scale = 1.0f / (result->opt_period * ggml_nelements(result->outputs)); + result->loss = ggml_scale(result->ctx_static, result->loss, scale); + ggml_set_name(result->loss, "loss_mean"); + result->loss_per_datapoint = true; + break; + } + case GGML_OPT_LOSS_TYPE_SUM: { + result->loss = ggml_sum(result->ctx_static, result->outputs); + ggml_set_name(result->loss, "loss_sum"); + result->loss_per_datapoint = false; + break; + } + case GGML_OPT_LOSS_TYPE_CROSS_ENTROPY: { + result->labels = ggml_dup_tensor(result->ctx_static, result->outputs); + ggml_set_input(result->labels); + ggml_set_name(result->labels, "labels"); + result->loss = ggml_cross_entropy_loss(result->ctx_static, result->outputs, result->labels); + ggml_set_name(result->loss, "loss_cross_entropy"); + if (result->opt_period > 1) { + result->loss = ggml_scale(result->ctx_static, result->loss, 1.0f / result->opt_period); + ggml_set_name(result->loss, "loss_cross_entropy_scaled"); + } + result->loss_per_datapoint = true; + break; + } + case GGML_OPT_LOSS_TYPE_MEAN_SQUARED_ERROR: { + result->labels = ggml_dup_tensor(result->ctx_static, result->outputs); + ggml_set_input(result->labels); + ggml_set_name(result->labels, "labels"); + result->loss = ggml_sub(result->ctx_static, result->outputs, result->labels); + ggml_set_name(result->loss, "loss_error"); + result->loss = ggml_sqr(result->ctx_static, result->loss); + ggml_set_name(result->loss, "loss_squared_error"); + result->loss = ggml_sum(result->ctx_static, result->loss); + ggml_set_name(result->loss, "loss_sum_squared_error"); + const float scale = 1.0f / (result->opt_period * ggml_nelements(result->outputs)); + result->loss = ggml_scale(result->ctx_static, result->loss, scale); + ggml_set_name(result->loss, "loss_mean_squared_error"); + result->loss_per_datapoint = true; + break; + } + } + ggml_set_output(result->loss); + ggml_set_loss(result->loss); + ggml_build_forward_expand(result->gf, result->loss); + + result->pred = ggml_argmax(result->ctx_static, result->outputs); + ggml_set_name(result->pred, "pred"); + ggml_set_output(result->pred); + ggml_build_forward_expand(result->gf, result->pred); + + if (result->labels) { + result->ncorrect = ggml_count_equal(result->ctx_static, result->pred, ggml_argmax(result->ctx_static, result->labels)); + ggml_set_name(result->ncorrect, "ncorrect"); + ggml_set_output(result->ncorrect); + ggml_build_forward_expand(result->gf, result->ncorrect); + } else { + result->ncorrect = nullptr; + } + + if (params.build_type == GGML_OPT_BUILD_TYPE_FORWARD) { + result->buf_static = ggml_backend_alloc_ctx_tensors(result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0)); + return result; + } + + // gb_grad == graph backward gradients, forward pass, then backward pass to calculate gradients. + result->gb_grad = ggml_graph_dup(result->ctx_compute, result->gf); + ggml_build_backward_expand(result->ctx_static, result->ctx_compute, result->gb_grad, accumulate); + + if (params.build_type == GGML_OPT_BUILD_TYPE_GRAD) { + result->buf_static = ggml_backend_alloc_ctx_tensors(result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0)); + ggml_graph_reset(result->gb_grad); + return result; + } + + GGML_ASSERT(params.build_type == GGML_OPT_BUILD_TYPE_OPT); + + // gb_opt == graph backward optimize, forward pass, then backward pass to calculate gradients, then optimizer step. + result->gb_opt = ggml_graph_dup(result->ctx_compute, result->gb_grad); + + result->adamw_params = ggml_new_tensor_1d(result->ctx_static_cpu, GGML_TYPE_F32, 7); + ggml_set_input(result->adamw_params); + ggml_set_name(result->adamw_params, "adamw_params"); + + for (int i = result->gf->n_nodes-1; i >= 0; --i) { + struct ggml_tensor * node = result->gb_opt->nodes[i]; + struct ggml_tensor * grad = ggml_graph_get_grad(result->gb_opt, node); + + if (node->flags & GGML_TENSOR_FLAG_PARAM) { + struct ggml_tensor * m = ggml_dup_tensor(result->ctx_static, node); + struct ggml_tensor * v = ggml_dup_tensor(result->ctx_static, node); + struct ggml_tensor * opt_step = ggml_opt_step_adamw(result->ctx_compute, node, grad, m, v, result->adamw_params); + ggml_build_forward_expand(result->gb_opt, opt_step); + } + } + + result->buf_static = ggml_backend_alloc_ctx_tensors( + result->ctx_static, ggml_backend_sched_get_backend(result->backend_sched, 0)); + + result->buf_static_cpu = ggml_backend_alloc_ctx_tensors_from_buft(result->ctx_static_cpu, ggml_backend_cpu_buffer_type()); + + ggml_graph_reset(result->gb_opt); + + return result; +} + +void ggml_opt_free(ggml_opt_context_t opt_ctx) { + if (opt_ctx == nullptr) { + return; + } + ggml_backend_buffer_free(opt_ctx->buf_static); + ggml_backend_buffer_free(opt_ctx->buf_static_cpu); + ggml_free(opt_ctx->ctx_static); + ggml_free(opt_ctx->ctx_static_cpu); + delete opt_ctx; +} + +void ggml_opt_reset(ggml_opt_context_t opt_ctx, bool optimizer) { + if (optimizer) { + ggml_graph_reset(opt_ctx->gb_opt); + opt_ctx->iter = 1; + } else { + ggml_graph_reset(opt_ctx->gb_grad); + } +} + +struct ggml_tensor * ggml_opt_inputs(ggml_opt_context_t opt_ctx) { + return opt_ctx->inputs; +} + +struct ggml_tensor * ggml_opt_outputs(ggml_opt_context_t opt_ctx) { + return opt_ctx->outputs; +} + +struct ggml_tensor * ggml_opt_labels(ggml_opt_context_t opt_ctx) { + return opt_ctx->labels; +} + +struct ggml_tensor * ggml_opt_loss(ggml_opt_context_t opt_ctx) { + return opt_ctx->loss; +} + +struct ggml_tensor * ggml_opt_pred(ggml_opt_context_t opt_ctx) { + return opt_ctx->pred; +} + +struct ggml_tensor * ggml_opt_ncorrect(ggml_opt_context_t opt_ctx) { + return opt_ctx->ncorrect; +} + +struct ggml_tensor * ggml_opt_grad_acc(ggml_opt_context_t opt_ctx, struct ggml_tensor * node) { + return ggml_graph_get_grad_acc(opt_ctx->gb_opt, node); +} + +// ====== Optimization Result ====== + +ggml_opt_result_t ggml_opt_result_init() { + return new ggml_opt_result; +} + +void ggml_opt_result_free(ggml_opt_result_t result) { + delete result; +} + +void ggml_opt_result_reset(ggml_opt_result_t result) { + result->ndata = 0; + result->loss.clear(); + result->pred.clear(); + result->ncorrect = 0; +} + +void ggml_opt_result_ndata(ggml_opt_result_t result, int64_t * ndata) { + *ndata = result->ndata; +} + +void ggml_opt_result_loss(ggml_opt_result_t result, double * loss, double * unc) { + const int64_t nbatches = result->loss.size(); // Number of physical batches. + + if (nbatches == 0) { + *loss = 0.0; + *unc = NAN; + return; + } + + double sum = 0.0; + double sum_squared = 0.0; + + for (const float & loss : result->loss) { + // If the loss is per datapoint it was scaled by 1.0f/opt_period for each physical batch. + const float loss_scaled = result->loss_per_datapoint ? loss*result->opt_period : loss; + sum += loss_scaled; + sum_squared += loss_scaled*loss_scaled; + } + + const double mean = sum/nbatches; + *loss = result->loss_per_datapoint ? mean : sum; + + if (!unc) { + return; + } + + if (nbatches < 2) { + *unc = NAN; + return; + } + + const double var_sum = sum_squared/nbatches - mean*mean; // variance without Bessel's correction, i.e. nbatches/(nbatches-1) + *unc = result->loss_per_datapoint ? sqrt(var_sum / (nbatches - 1)) : sqrt(var_sum * nbatches/(nbatches - 1)); +} + +void ggml_opt_result_pred(ggml_opt_result_t result, int32_t * pred) { + for (size_t i = 0; i < result->pred.size(); ++i) { + pred[i] = result->pred[i]; + } +} + +void ggml_opt_result_accuracy(ggml_opt_result_t result, double * accuracy, double * unc) { + *accuracy = result->ncorrect >= 0 ? double(result->ncorrect) / double(result->ndata) : NAN; + + if (!unc) { + return; + } + + *unc = result->ncorrect >= 0 && result->ndata >= 2 ? + sqrt((*accuracy) * (1.0 - (*accuracy)) / double(result->ndata - 1)) : NAN; +} + +// ====== Computation ====== + +static void ggml_opt_eval_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph, ggml_opt_result * result) { + if (graph != opt_ctx->gf) { + struct ggml_opt_optimizer_params opt_pars = opt_ctx->get_opt_pars(opt_ctx->get_opt_pars_ud); + + GGML_ASSERT(opt_pars.adamw.alpha > 0.0f); + GGML_ASSERT(opt_pars.adamw.beta1 >= 0.0f); + GGML_ASSERT(opt_pars.adamw.beta1 <= 1.0f); + GGML_ASSERT(opt_pars.adamw.beta2 >= 0.0f); + GGML_ASSERT(opt_pars.adamw.beta2 <= 1.0f); + GGML_ASSERT(opt_pars.adamw.eps >= 0.0f); + GGML_ASSERT(opt_pars.adamw.wd >= 0.0f); + GGML_ASSERT(opt_pars.adamw.wd <= 1.0f); + + // beta1, beta2 after applying warmup + const float beta1h = 1.0f/(1.0f - powf(opt_pars.adamw.beta1, opt_ctx->iter)); + const float beta2h = 1.0f/(1.0f - powf(opt_pars.adamw.beta2, opt_ctx->iter)); + + float * adamw_par_data = ggml_get_data_f32(opt_ctx->adamw_params); + adamw_par_data[0] = opt_pars.adamw.alpha; + adamw_par_data[1] = opt_pars.adamw.beta1; + adamw_par_data[2] = opt_pars.adamw.beta2; + adamw_par_data[3] = opt_pars.adamw.eps; + adamw_par_data[4] = opt_pars.adamw.wd; + adamw_par_data[5] = beta1h; + adamw_par_data[6] = beta2h; + } + + ggml_opt_alloc_graph(opt_ctx, graph); + ggml_backend_sched_graph_compute(opt_ctx->backend_sched, opt_ctx->allocated_graph_copy); + opt_ctx->iter += opt_ctx->allocated_graph == opt_ctx->gb_opt; + + if (!result) { + return; + } + + if (result->ndata == 0) { + result->loss_per_datapoint = opt_ctx->loss_per_datapoint; + result->opt_period = opt_ctx->opt_period; + } else { + GGML_ASSERT(result->loss_per_datapoint == opt_ctx->loss_per_datapoint); + GGML_ASSERT(result->opt_period == opt_ctx->opt_period); + } + + const int64_t ndata = opt_ctx->outputs->ne[1]; + GGML_ASSERT(result->ndata == ndata*int64_t(result->loss.size()) && "varying batch size not supported"); + result->ndata += ndata; + + GGML_ASSERT(ggml_is_scalar(opt_ctx->loss)); + GGML_ASSERT(opt_ctx->loss->type == GGML_TYPE_F32); + float loss; + ggml_backend_tensor_get(opt_ctx->loss, &loss, 0, ggml_nbytes(opt_ctx->loss)); + result->loss.push_back(loss); + + GGML_ASSERT(opt_ctx->pred->type == GGML_TYPE_I32); + std::vector pred(ndata); + ggml_backend_tensor_get(opt_ctx->pred, pred.data(), 0, ggml_nbytes(opt_ctx->pred)); + result->pred.insert(result->pred.end(), pred.begin(), pred.end()); + + if (!opt_ctx->labels || result->ncorrect < 0) { + result->ncorrect = -1; + return; + } + + GGML_ASSERT(ggml_is_scalar(opt_ctx->ncorrect)); + GGML_ASSERT(opt_ctx->ncorrect->type == GGML_TYPE_I64); + int64_t ncorrect; + ggml_backend_tensor_get(opt_ctx->ncorrect, &ncorrect, 0, ggml_nbytes(opt_ctx->ncorrect)); + result->ncorrect += ncorrect; +} + +void ggml_opt_forward(ggml_opt_context_t opt_ctx, ggml_opt_result * result) { + ggml_opt_eval_graph(opt_ctx, opt_ctx->gf, result); +} + +void ggml_opt_forward_backward(ggml_opt_context_t opt_ctx, ggml_opt_result * result) { + if (opt_ctx->opt_period == 1) { + ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_opt, result); + return; + } + + const int32_t opt_i_next = (opt_ctx->opt_i + 1) % opt_ctx->opt_period; + if (opt_i_next == 0) { + ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_opt, result); + ggml_opt_reset(opt_ctx, /*optimizer =*/ false); + } else { + ggml_opt_eval_graph(opt_ctx, opt_ctx->gb_grad, result); + } + opt_ctx->opt_i = opt_i_next; +} + +// ====== High-Level Functions ====== + +void ggml_opt_epoch( + ggml_opt_context_t opt_ctx, + ggml_opt_dataset_t dataset, + ggml_opt_result_t result_train, + ggml_opt_result_t result_eval, + int64_t idata_split, + ggml_opt_epoch_callback callback_train, + ggml_opt_epoch_callback callback_eval) { + struct ggml_tensor * inputs = ggml_opt_inputs(opt_ctx); + struct ggml_tensor * labels = ggml_opt_labels(opt_ctx); + struct ggml_tensor * data = ggml_opt_dataset_data(dataset); + GGML_ASSERT(data->ne[0] == inputs->ne[0]); + + const int64_t ndata = data->ne[1]; + const int64_t ndata_batch = inputs->ne[1]; + + GGML_ASSERT(data->ne[1] % inputs->ne[1] == 0); + const int64_t nbatches = ndata/ndata_batch; + + idata_split = idata_split < 0 ? ndata : idata_split; + GGML_ASSERT(idata_split % ndata_batch == 0); + const int64_t ibatch_split = idata_split / ndata_batch; + + int64_t ibatch = 0; + int64_t t_loop_start = ggml_time_us(); + for (; ibatch < ibatch_split; ++ibatch) { + ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch); + ggml_opt_forward_backward(opt_ctx, result_train); + if (callback_train) { + callback_train(true, opt_ctx, dataset, result_train, ibatch+1, ibatch_split, t_loop_start); + } + } + t_loop_start = ggml_time_us(); + for (; ibatch < nbatches; ++ibatch) { + ggml_opt_dataset_get_batch(dataset, inputs, labels, ibatch); + ggml_opt_forward(opt_ctx, result_eval); + if (callback_eval) { + callback_eval(false, opt_ctx, dataset, result_eval, ibatch+1-ibatch_split, nbatches-ibatch_split, t_loop_start); + } + } +} + +void ggml_opt_epoch_callback_progress_bar( + bool train, + ggml_opt_context_t opt_ctx, + ggml_opt_dataset_t dataset, + ggml_opt_result_t result, + int64_t ibatch, + int64_t ibatch_max, + int64_t t_start_us) { + fprintf(stderr, "%s[", train ? "train: " : "val: "); + + constexpr int64_t bar_length = 25; + for (int64_t j = 0; j < bar_length; ++j) { + const int64_t ibatch_j = ibatch_max * j/bar_length; + if (ibatch_j < ibatch) { + fprintf(stderr, "="); + } else if (ibatch_max * (j - 1)/bar_length < ibatch) { + fprintf(stderr, ">"); + } else { + fprintf(stderr, " "); + } + } + + const int64_t batch_size = ggml_opt_inputs(opt_ctx)->ne[1]; + const int64_t idata = ibatch*batch_size; + const int64_t idata_max = ibatch_max*batch_size; + + double loss; + double loss_unc; + ggml_opt_result_loss(result, &loss, &loss_unc); + + double accuracy; + double accuracy_unc; + ggml_opt_result_accuracy(result, &accuracy, &accuracy_unc); + + const int64_t t_ibatch_us = ggml_time_us() - t_start_us; + int64_t t_ibatch_s = t_ibatch_us / 1000000; + const int64_t t_ibatch_h = t_ibatch_s / 3600; + t_ibatch_s -= t_ibatch_h * 3600; + const int64_t t_ibatch_m = t_ibatch_s / 60; + t_ibatch_s -= t_ibatch_m * 60; + + const int64_t t_eta_us = t_ibatch_us * (ibatch_max - ibatch)/ibatch; + int64_t t_eta_s = t_eta_us / 1000000; + const int64_t t_eta_h = t_eta_s / 3600; + t_eta_s -= t_eta_h * 3600; + const int64_t t_eta_m = t_eta_s / 60; + t_eta_s -= t_eta_m * 60; + + fprintf(stderr, "| data=%06" PRId64 "/%06" PRId64 ", loss=%.6lf+-%.6lf, accuracy=%.2lf+-%.2lf%%, " + "t=%02" PRId64 ":%02" PRId64 ":%02" PRId64 ", ETA=%02" PRId64 ":%02" PRId64 ":%02" PRId64 "]\r", + idata, idata_max, loss, loss_unc, 100.0*accuracy, 100.0*accuracy_unc, + t_ibatch_h, t_ibatch_m, t_ibatch_s, t_eta_h, t_eta_m, t_eta_s); + if (ibatch == ibatch_max) { + fprintf(stderr, "\n"); + } + fflush(stderr); + + GGML_UNUSED(dataset); +} + +void ggml_opt_fit( + ggml_backend_sched_t backend_sched, + ggml_context * ctx_compute, + ggml_tensor * inputs, + ggml_tensor * outputs, + ggml_opt_dataset_t dataset, + enum ggml_opt_loss_type loss_type, + ggml_opt_get_optimizer_params get_opt_pars, + int64_t nepoch, + int64_t nbatch_logical, + float val_split, + bool silent) { + ggml_time_init(); + const int64_t t_start_us = ggml_time_us(); + + const int64_t ndata = ggml_opt_dataset_data(dataset)->ne[1]; + const int64_t nbatch_physical = inputs->ne[1]; + GGML_ASSERT(ndata % nbatch_logical == 0); + GGML_ASSERT(nbatch_logical % nbatch_physical == 0); + + const int64_t opt_period = nbatch_logical / nbatch_physical; + const int64_t nbatches_logical = ndata / nbatch_logical; + + GGML_ASSERT(val_split >= 0.0f); + GGML_ASSERT(val_split < 1.0f); + const int64_t ibatch_split = int64_t(((1.0f - val_split) * nbatches_logical)) * opt_period; // train <-> val split index (physical) + const int64_t idata_split = ibatch_split * nbatch_physical; + + int64_t epoch = 1; + + ggml_opt_params params = ggml_opt_default_params(backend_sched, ctx_compute, inputs, outputs, loss_type); + params.opt_period = opt_period; + params.get_opt_pars = get_opt_pars; + params.get_opt_pars_ud = &epoch; + ggml_opt_context_t opt_ctx = ggml_opt_init(params); + + // Shuffling the data is generally useful but there is only a point if not all data is used in a single batch. + if (nbatch_logical < ndata) { + ggml_opt_dataset_shuffle(opt_ctx, dataset, -1); // Shuffle all data (train + validation). + } + + ggml_opt_result_t result_train = ggml_opt_result_init(); + ggml_opt_result_t result_val = ggml_opt_result_init(); + + ggml_opt_epoch_callback epoch_callback = silent ? nullptr : ggml_opt_epoch_callback_progress_bar; + + for (; epoch <= nepoch; ++epoch) { + if (nbatch_logical < idata_split) { + ggml_opt_dataset_shuffle(opt_ctx, dataset, idata_split); + } + + ggml_opt_result_reset(result_train); + ggml_opt_result_reset(result_val); + + if (!silent) { + fprintf(stderr, "%s: epoch %04" PRId64 "/%04" PRId64 ":\n", __func__, epoch, nepoch); + } + ggml_opt_epoch(opt_ctx, dataset, result_train, result_val, idata_split, epoch_callback, epoch_callback); + if (!silent) { + fprintf(stderr, "\n"); + } + } + + if (!silent) { + int64_t t_total_s = (ggml_time_us() - t_start_us) / 1000000; + const int64_t t_total_h = t_total_s / 3600; + t_total_s -= t_total_h * 3600; + const int64_t t_total_m = t_total_s / 60; + t_total_s -= t_total_m * 60; + fprintf(stderr, "%s: training took %02" PRId64 ":%02" PRId64 ":%02" PRId64 "\n", __func__, t_total_h, t_total_m, t_total_s); + } + + ggml_opt_free(opt_ctx); + ggml_opt_result_free(result_train); + ggml_opt_result_free(result_val); +} diff --git a/llama/ggml-quants.c b/ml/backend/ggml/ggml/src/ggml-quants.c similarity index 99% rename from llama/ggml-quants.c rename to ml/backend/ggml/ggml/src/ggml-quants.c index 6f824d42..7918388a 100644 --- a/llama/ggml-quants.c +++ b/ml/backend/ggml/ggml/src/ggml-quants.c @@ -1,35 +1,9 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #define GGML_COMMON_IMPL_C #include "ggml-common.h" #include "ggml-quants.h" #include "ggml-impl.h" -#include "ggml-cpu-impl.h" +#include "ggml-cpu/ggml-cpu-impl.h" #include "ggml-cpu.h" #include diff --git a/llama/ggml-quants.h b/ml/backend/ggml/ggml/src/ggml-quants.h similarity index 87% rename from llama/ggml-quants.h rename to ml/backend/ggml/ggml/src/ggml-quants.h index cf518ba0..d09173e1 100644 --- a/llama/ggml-quants.h +++ b/ml/backend/ggml/ggml/src/ggml-quants.h @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #pragma once #define GGML_COMMON_DECL_C diff --git a/ml/backend/ggml/ggml/src/ggml-threading.cpp b/ml/backend/ggml/ggml/src/ggml-threading.cpp new file mode 100644 index 00000000..25a19eed --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-threading.cpp @@ -0,0 +1,12 @@ +#include "ggml-threading.h" +#include + +std::mutex ggml_critical_section_mutex; + +void ggml_critical_section_start() { + ggml_critical_section_mutex.lock(); +} + +void ggml_critical_section_end(void) { + ggml_critical_section_mutex.unlock(); +} diff --git a/ml/backend/ggml/ggml/src/ggml-threading.h b/ml/backend/ggml/ggml/src/ggml-threading.h new file mode 100644 index 00000000..dec2c884 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml-threading.h @@ -0,0 +1,14 @@ +#pragma once + +#include "ggml.h" + +#ifdef __cplusplus +extern "C" { +#endif + +GGML_API void ggml_critical_section_start(void); +GGML_API void ggml_critical_section_end(void); + +#ifdef __cplusplus +} +#endif diff --git a/llama/ggml.c b/ml/backend/ggml/ggml/src/ggml.c similarity index 99% rename from llama/ggml.c rename to ml/backend/ggml/ggml/src/ggml.c index 8d442e08..7ffcd907 100644 --- a/llama/ggml.c +++ b/ml/backend/ggml/ggml/src/ggml.c @@ -1,29 +1,3 @@ -/** - * llama.cpp - commit 46e3556e01b824e52395fb050b29804b6cff2a7c - do not edit this file - * - * MIT License - * - * Copyright (c) 2023-2024 The ggml authors - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - #define _CRT_SECURE_NO_DEPRECATE // Disables "unsafe" warnings on Windows #define _USE_MATH_DEFINES // For M_PI on MSVC diff --git a/ml/backend/ggml/ggml/src/ggml.go b/ml/backend/ggml/ggml/src/ggml.go new file mode 100644 index 00000000..7cf40e70 --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml.go @@ -0,0 +1,81 @@ +package ggml + +// #cgo CXXFLAGS: -std=c++17 +// #cgo CPPFLAGS: -DNDEBUG -DGGML_USE_CPU +// #cgo CPPFLAGS: -I${SRCDIR}/../include -I${SRCDIR}/ggml-cpu +// #cgo windows LDFLAGS: -lmsvcrt -static -static-libgcc -static-libstdc++ +// #include +// #include "ggml-backend.h" +// extern void sink(int level, char *text, void *user_data); +import "C" + +import ( + "log/slog" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "unsafe" + + _ "github.com/ollama/ollama/ml/backend/ggml/ggml/src/ggml-cpu" +) + +func init() { + C.ggml_log_set((C.ggml_log_callback)(C.sink), nil) +} + +//export sink +func sink(level C.int, text *C.char, _ unsafe.Pointer) { + msg := strings.TrimSpace(C.GoString(text)) + switch level { + case C.GGML_LOG_LEVEL_DEBUG: + slog.Debug(msg) + case C.GGML_LOG_LEVEL_INFO: + slog.Info(msg) + case C.GGML_LOG_LEVEL_WARN: + slog.Warn(msg) + case C.GGML_LOG_LEVEL_ERROR: + slog.Error(msg) + } +} + +var OnceLoad = sync.OnceFunc(func() { + var lib struct{ name, defaultValue string } + switch runtime.GOOS { + case "darwin", "linux": + lib.name = "LD_LIBRARY_PATH" + lib.defaultValue = "/usr/local/lib:/usr/lib" + case "windows": + lib.name = "PATH" + lib.defaultValue = "." + default: + return + } + + paths, ok := os.LookupEnv(lib.name) + if !ok { + paths = lib.defaultValue + } + + if runtime.GOOS == "darwin" { + if _, ok := os.LookupEnv("DYLD_LIBRARY_PATH"); !ok { + os.Setenv("DYLD_LIBRARY_PATH", paths) + } + } + + split := filepath.SplitList(paths) + visited := make(map[string]struct{}, len(split)) + for _, path := range split { + abspath, _ := filepath.Abs(path) + if _, ok := visited[abspath]; !ok { + func() { + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + C.ggml_backend_load_all_from_path(cpath) + }() + + visited[abspath] = struct{}{} + } + } +}) diff --git a/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go b/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go new file mode 100644 index 00000000..beffa64e --- /dev/null +++ b/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go @@ -0,0 +1,10 @@ +package ggml + +// #cgo CPPFLAGS: -DGGML_USE_METAL -DGGML_USE_BLAS +// #cgo LDFLAGS: -framework Foundation +import "C" + +import ( + _ "github.com/ollama/ollama/ml/backend/ggml/ggml/src/ggml-blas" + _ "github.com/ollama/ollama/ml/backend/ggml/ggml/src/ggml-metal" +) diff --git a/ml/backend/ggml/ggml_debug.go b/ml/backend/ggml/ggml_debug.go new file mode 100644 index 00000000..9ddb2718 --- /dev/null +++ b/ml/backend/ggml/ggml_debug.go @@ -0,0 +1,6 @@ +//go:build debug + +package ggml + +// #cgo CPPFLAGS: -DOLLAMA_DEBUG +import "C" diff --git a/runners/common.go b/runners/common.go deleted file mode 100644 index 11279ed4..00000000 --- a/runners/common.go +++ /dev/null @@ -1,207 +0,0 @@ -package runners - -import ( - "log/slog" - "os" - "path/filepath" - "runtime" - "slices" - "strings" - "sync" - - "golang.org/x/sys/cpu" - - "github.com/ollama/ollama/envconfig" -) - -var ( - runnersDir = "" - once = sync.Once{} -) - -type CPUCapability uint32 - -// Override at build time when building base GPU runners -// var GPURunnerCPUCapability = CPUCapabilityAVX - -const ( - CPUCapabilityNone CPUCapability = iota - CPUCapabilityAVX - CPUCapabilityAVX2 - // TODO AVX512 -) - -func (c CPUCapability) String() string { - switch c { - case CPUCapabilityAVX: - return "avx" - case CPUCapabilityAVX2: - return "avx2" - default: - return "no vector extensions" - } -} - -func GetCPUCapability() CPUCapability { - if cpu.X86.HasAVX2 { - return CPUCapabilityAVX2 - } - if cpu.X86.HasAVX { - return CPUCapabilityAVX - } - // else LCD - return CPUCapabilityNone -} - -// Return the location where runners were located -// empty string indicates only builtin is present -func Locate() string { - once.Do(locateRunnersOnce) - return runnersDir -} - -// searches for runners in a prioritized set of locations -// 1. local build, with executable at the top of the tree -// 2. lib directory relative to executable -func locateRunnersOnce() { - exe, err := os.Executable() - if err != nil { - slog.Debug("runner locate", "error", err) - } - - paths := []string{ - filepath.Join(filepath.Dir(exe), "llama", "build", runtime.GOOS+"-"+runtime.GOARCH, "runners"), - filepath.Join(filepath.Dir(exe), envconfig.LibRelativeToExe(), "lib", "ollama", "runners"), - filepath.Join(filepath.Dir(exe), "lib", "ollama", "runners"), - } - for _, path := range paths { - if _, err := os.Stat(path); err == nil { - runnersDir = path - slog.Debug("runners located", "dir", runnersDir) - return - } - } - // Fall back to built-in - slog.Debug("no dynamic runners detected, using only built-in") - runnersDir = "" -} - -// Return the well-known name of the builtin runner for the given platform -func BuiltinName() string { - if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" { - return "metal" - } - return "cpu" -} - -// directory names are the name of the runner and may contain an optional -// variant prefixed with '_' as the separator. For example, "cuda_v11" and -// "cuda_v12" or "cpu" and "cpu_avx2". Any library without a variant is the -// lowest common denominator -func GetAvailableServers() map[string]string { - once.Do(locateRunnersOnce) - - servers := make(map[string]string) - exe, err := os.Executable() - if err == nil { - servers[BuiltinName()] = exe - } - - if runnersDir == "" { - return servers - } - - // glob runnersDir for files that start with ollama_ - pattern := filepath.Join(runnersDir, "*", "ollama_*") - - files, err := filepath.Glob(pattern) - if err != nil { - slog.Debug("could not glob", "pattern", pattern, "error", err) - return nil - } - - for _, file := range files { - slog.Debug("availableServers : found", "file", file) - runnerName := filepath.Base(filepath.Dir(file)) - // Special case for our GPU runners - if compiled with standard AVX flag - // detect incompatible system - // Custom builds will omit this and its up to the user to ensure compatibility - parsed := strings.Split(runnerName, "_") - if len(parsed) == 3 && parsed[2] == "avx" && !cpu.X86.HasAVX { - slog.Info("GPU runner incompatible with host system, CPU does not have AVX", "runner", runnerName) - continue - } - servers[runnerName] = file - } - - return servers -} - -// serversForGpu returns a list of compatible servers give the provided GPU library/variant -func ServersForGpu(requested string) []string { - // glob workDir for files that start with ollama_ - availableServers := GetAvailableServers() - - // Short circuit if the only option is built-in - if _, ok := availableServers[BuiltinName()]; ok && len(availableServers) == 1 { - return []string{BuiltinName()} - } - - bestCPUVariant := GetCPUCapability() - requestedLib := strings.Split(requested, "_")[0] - servers := []string{} - - // exact match first - for a := range availableServers { - short := a - parsed := strings.Split(a, "_") - if len(parsed) == 3 { - // Strip off optional _avx for comparison - short = parsed[0] + "_" + parsed[1] - } - if a == requested || short == requested { - servers = []string{a} - } - } - - // If no exact match, then try without variant - if len(servers) == 0 { - alt := []string{} - for a := range availableServers { - if requestedLib == strings.Split(a, "_")[0] && a != requested { - alt = append(alt, a) - } - } - slices.Sort(alt) - servers = append(servers, alt...) - } - - // Finally append the best CPU option if found, then builtin - if bestCPUVariant != CPUCapabilityNone { - for cmp := range availableServers { - if cmp == "cpu_"+bestCPUVariant.String() { - servers = append(servers, cmp) - break - } - } - } - servers = append(servers, BuiltinName()) - return servers -} - -// Return the optimal server for this CPU architecture -func ServerForCpu() string { - if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" { - return BuiltinName() - } - variant := GetCPUCapability() - availableServers := GetAvailableServers() - if variant != CPUCapabilityNone { - for cmp := range availableServers { - if cmp == "cpu_"+variant.String() { - return cmp - } - } - } - return BuiltinName() -} diff --git a/scripts/build.sh b/scripts/build.sh deleted file mode 100644 index a50dc7db..00000000 --- a/scripts/build.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -set -eu - -usage() { - echo "usage: $(basename $0) VERSION" - exit 1 -} - -[ "$#" -eq 1 ] || usage - -export VERSION="$1" - -# build universal MacOS binary -sh $(dirname $0)/build_darwin.sh - -# # build arm64 and amd64 Linux binaries -sh $(dirname $0)/build_linux.sh - -# # build arm64 and amd64 Docker images -sh $(dirname $0)/build_docker.sh diff --git a/scripts/build_darwin.sh b/scripts/build_darwin.sh index cbf6f61d..7e586f5f 100755 --- a/scripts/build_darwin.sh +++ b/scripts/build_darwin.sh @@ -2,55 +2,92 @@ set -e -. $(dirname $0)/env.sh +status() { echo >&2 ">>> $@"; } +usage() { + echo "usage: $(basename $0) [build [sign]]" + exit 1 +} -mkdir -p dist +export VERSION=${VERSION:-$(git describe --tags --dirty)} +export GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${VERSION#v}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" +export CGO_CPPFLAGS='-mmacosx-version-min=11.3' -# These require Xcode v13 or older to target MacOS v11 -# If installed to an alternate location use the following to enable -# export SDKROOT=/Applications/Xcode_12.5.1.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk -# export DEVELOPER_DIR=/Applications/Xcode_12.5.1.app/Contents/Developer -export CGO_CFLAGS=-mmacosx-version-min=11.3 -export CGO_CXXFLAGS=-mmacosx-version-min=11.3 -export CGO_LDFLAGS=-mmacosx-version-min=11.3 +ARCHS="arm64 amd64" +while getopts "a:h" OPTION; do + case $OPTION in + a) ARCHS=$OPTARG ;; + h) usage ;; + esac +done -rm -rf llama/build dist/darwin-* +shift $(( $OPTIND - 1 )) -# Generate the universal ollama binary for stand-alone usage: metal + avx -echo "Building binary" -echo "Building darwin arm64" -GOOS=darwin ARCH=arm64 GOARCH=arm64 make -j 8 dist -echo "Building darwin amd64 with AVX enabled" -GOOS=darwin ARCH=amd64 GOARCH=amd64 CUSTOM_CPU_FLAGS="avx" make -j 8 dist_exe -lipo -create -output dist/ollama-darwin dist/darwin-arm64/bin/ollama dist/darwin-amd64/bin/ollama +_build_darwin() { + for ARCH in $ARCHS; do + status "Building darwin $ARCH" + INSTALL_PREFIX=dist/darwin-$ARCH/ + GOOS=darwin GOARCH=$ARCH CGO_ENABLED=1 go build -o $INSTALL_PREFIX . -# sign the binary and rename it -if [ -n "$APPLE_IDENTITY" ]; then - codesign -f --timestamp -s "$APPLE_IDENTITY" --identifier ai.ollama.ollama --options=runtime dist/ollama-darwin -else - echo "WARNING: Skipping code signing - set APPLE_IDENTITY" + if [ "$ARCH" = "amd64" ]; then + status "Building darwin $ARCH dynamic backends" + cmake -B build/darwin-$ARCH \ + -DCMAKE_OSX_ARCHITECTURES=x86_64 \ + -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 + cmake --build build/darwin-$ARCH --target ggml-cpu -j + install build/darwin-$ARCH/lib/ollama/*.{dylib,so} $INSTALL_PREFIX + fi + done +} + +_sign_darwin() { + status "Creating universal binary..." + lipo -create -output dist/darwin/ollama dist/darwin/*/ollama + + if [ -z "$APPLE_IDENTITY" ]; then + status "No APPLE_IDENTITY set, skipping code signing" + return + fi + + for F in dist/darwin/ollama dist/darwin/amd64/lib*; do + codesign -f --timestamp -s "$APPLE_IDENTITY" --identifier ai.ollama.ollama --options=runtime $F + done + + # create a temporary zip for notarization + TEMP=$(mktemp -u).zip + ditto -c -k --keepParent dist/darwin/ollama "$TEMP" + xcrun notarytool submit dist/darwin/temp.zip --wait --timeout 10m --apple-id $APPLE_ID --password $APPLE_PASSWORD --team-id $APPLE_TEAM_ID + rm -f "$TEMP" + + # create a universal tarball + tar -cf dist/ollama-darwin.tar --strip-components 2 dist/darwin/ollama + tar -rf dist/ollama-darwin.tar --strip-components 3 dist/darwin/amd64/lib* + gzip -9vc dist/ollama-darwin.tgz +} + +_build_macapp() { + # build and optionally sign the mac app + npm install --prefix macapp + if [ -n "$APPLE_IDENTITY" ]; then + npm run --prefix macapp make:sign + else + npm run --prefix macapp make + fi + + mv ./macapp/out/make/zip/darwin/universal/Ollama-darwin-universal-$VERSION.zip dist/Ollama-darwin.zip +} + +if [ "$#" -eq 0 ]; then + _build_darwin + _sign_darwin + _build_macapp + exit 0 fi -ditto -c -k --keepParent dist/ollama-darwin dist/temp.zip -if [ -n "$APPLE_IDENTITY" ]; then - xcrun notarytool submit dist/temp.zip --wait --timeout 10m --apple-id $APPLE_ID --password $APPLE_PASSWORD --team-id $APPLE_TEAM_ID -fi -rm -f dist/temp.zip - -# Build the app bundle -echo "Building app" -echo "Building darwin amd64 with runners" -rm dist/darwin-amd64/bin/ollama -GOOS=darwin ARCH=amd64 GOARCH=amd64 make -j 8 dist - -# Generate the universal ollama binary for the app bundle: metal + no-avx -lipo -create -output dist/ollama dist/darwin-arm64/bin/ollama dist/darwin-amd64/bin/ollama - -# build and optionally sign the mac app -npm install --prefix macapp -if [ -n "$APPLE_IDENTITY" ]; then - npm run --prefix macapp make:sign -else - npm run --prefix macapp make -fi -cp macapp/out/make/zip/darwin/universal/Ollama-darwin-universal-$VERSION.zip dist/Ollama-darwin.zip +for CMD in "$@"; do + case $CMD in + build) _build_darwin ;; + sign) _sign_darwin ;; + macapp) _build_macapp ;; + *) usage ;; + esac +done diff --git a/scripts/build_linux.sh b/scripts/build_linux.sh index 894d9dd2..a0c3d2f0 100755 --- a/scripts/build_linux.sh +++ b/scripts/build_linux.sh @@ -18,7 +18,7 @@ docker buildx build \ --output type=local,dest=./dist/ \ --platform=${PLATFORM} \ ${OLLAMA_COMMON_BUILD_ARGS} \ - --target dist \ + --target archive \ -f Dockerfile \ . @@ -26,4 +26,4 @@ docker buildx build \ if echo $PLATFORM | grep "," > /dev/null ; then mv -f ./dist/linux_*64/ollama* ./dist/ rmdir ./dist/linux_*64 -fi \ No newline at end of file +fi diff --git a/scripts/build_windows.ps1 b/scripts/build_windows.ps1 index 0a69c60c..30cf9827 100644 --- a/scripts/build_windows.ps1 +++ b/scripts/build_windows.ps1 @@ -80,18 +80,61 @@ function checkEnv() { function buildOllama() { if ($null -eq ${env:OLLAMA_SKIP_GENERATE}) { - write-host "Building ollama runners" Remove-Item -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}" - & make -j 12 dist + New-Item "${script:SRC_DIR}\dist\windows-${script:ARCH}\lib\ollama\" -ItemType Directory -ea 0 + + + # Default first, then conditionall ROCm and cuda v11 + write-host "Building Default native backend libraries" + $env:CMAKE_GENERATOR="ninja" + & cmake --preset Default if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} + & cmake --build --preset Default -j 12 + if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} + & cmake --install build -j 12 + + # TODO - add steps for v11 and ROCm + # + # if ("$script:CUDA_DIRS".Contains("v11") -and "$script:CUDA_DIRS".Contains("v12")) { + # # We assume the default is v12, so override for v11 + # $origCUDA_PATH=$env:CUDA_PATH + # $hashEnv = @{} + # Get-ChildItem env: | foreach { $hashEnv[$_.Name] = $_.Value } + # $hashEnv.Keys | foreach { if ($_.Contains("CUDA_PATH_V11")) { $v11="$_" }} + # write-host "$v11" + # # $env:CUDA_PATH=$hashEnv[$v11] + # # $env:CUDACXX=$hashEnv[$v11]+"\bin\nvcc.exe" + # $env:CUDAToolkit_ROOT=$hashEnv[$v11] + # # ls env: + # write-host "Building CUDA v11 backend libraries" + # & cmake --preset "CUDA 11" + # $env:CUDA_PATH=$origCUDA_PATH + # exit(1) + # if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} + # # & cmake --build --preset "CUDA 11" -j 12 + # # if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} + # } + + # if ($env:HIP_PATH) { + # write-host "Building ROCm backend libraries" + # $env:HIPCXX="${env:HIP_PATH}\bin\clang++.exe" + # $env:HIP_PLATFORM="amd" + # $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}" + # & cmake --preset "ROCm" + # $env:HIPCXX="" + # $env:HIP_PLATFORM="" + # $env:CMAKE_PREFIX_PATH="" + # if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} + # & cmake --build --preset "ROCm" -j 12 + # if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} + # } } else { write-host "Skipping generate step with OLLAMA_SKIP_GENERATE set" } write-host "Building ollama CLI" & go build -trimpath -ldflags "-s -w -X=github.com/ollama/ollama/version.Version=$script:VERSION -X=github.com/ollama/ollama/server.mode=release" . if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)} - New-Item -ItemType Directory -Path .\dist\windows-${script:TARGET_ARCH}\ -Force - cp .\ollama.exe .\dist\windows-${script:TARGET_ARCH}\ + cp .\ollama.exe "${script:DIST_DIR}\" } function buildApp() { diff --git a/scripts/fast.sh b/scripts/fast.sh deleted file mode 100755 index 8fd1e908..00000000 --- a/scripts/fast.sh +++ /dev/null @@ -1,20 +0,0 @@ -#/bin/sh - -# Wrapper script to speed up builds by disabling some permutations and reduce compatibility matrix -# Don't use for release builds, but suitable for local developer iteration - -# Only build cuda v12 -export OLLAMA_SKIP_CUDA_11_GENERATE=1 -# Major versions only -export CUDA_V12_ARCHITECTURES="60;70;80;90" -# Skip ROCm -export OLLAMA_SKIP_ROCM_GENERATE=1 -# Disable various less common quants and fattn -export OLLAMA_FAST_BUILD=1 - -if [ $# -ne 1 ] ; then - echo "Usage: ./scripts/fast.sh " - exit 1 -fi - -exec $1 \ No newline at end of file diff --git a/scripts/publish.sh b/scripts/publish.sh deleted file mode 100755 index 5bf15dcb..00000000 --- a/scripts/publish.sh +++ /dev/null @@ -1,25 +0,0 @@ -# Set your variables here. -REPO="jmorganca/ollama" - -# Check if VERSION is set -if [[ -z "${VERSION}" ]]; then - echo "VERSION is not set. Please set the VERSION environment variable." - exit 1 -fi - -OS=$(go env GOOS) - -./script/build_${OS}.sh - -# Create a new tag if it doesn't exist. -if ! git rev-parse v$VERSION >/dev/null 2>&1; then - git tag v$VERSION -fi - -git push origin v$VERSION - -# Create a new release. -gh release create -p v$VERSION -t v$VERSION - -# Upload the zip file. -gh release upload v$VERSION ./dist/* --clobber diff --git a/scripts/rh_linux_deps.sh b/scripts/rh_linux_deps.sh deleted file mode 100644 index d0cadd45..00000000 --- a/scripts/rh_linux_deps.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/sh - -# Script for common Dockerfile dependency installation in redhat linux based images - -set -ex -set -o pipefail -MACHINE=$(uname -m) - -if grep -i "centos" /etc/system-release >/dev/null; then - # As of 7/1/2024 mirrorlist.centos.org has been taken offline, so adjust accordingly - sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo - sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo - sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo - - # Centos 7 derivatives have too old of a git version to run our generate script - # uninstall and ignore failures - yum remove -y git - yum -y install epel-release centos-release-scl - - # The release packages reinstate the mirrors, undo that again - sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo - sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo - sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo - - yum -y install dnf - if [ "${MACHINE}" = "x86_64" ]; then - yum -y install https://repo.ius.io/ius-release-el7.rpm - dnf install -y git236 - else - dnf install -y rh-git227-git - ln -s /opt/rh/rh-git227/root/usr/bin/git /usr/local/bin/git - fi - dnf install -y devtoolset-10-gcc devtoolset-10-gcc-c++ pigz findutils -elif grep -i "rocky" /etc/system-release >/dev/null; then - # Temporary workaround until rocky 8 AppStream ships GCC 10.4 (10.3 is incompatible with NVCC) - cat << EOF > /etc/yum.repos.d/Rocky-Vault.repo -[vault] -name=Rocky Vault -baseurl=https://dl.rockylinux.org/vault/rocky/8.5/AppStream/\$basearch/os/ -gpgcheck=1 -enabled=1 -countme=1 -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-rockyofficial -EOF - dnf install -y git \ - gcc-toolset-10-gcc-10.2.1-8.2.el8 \ - gcc-toolset-10-gcc-c++-10.2.1-8.2.el8 \ - findutils \ - yum-utils \ - pigz -else - echo "ERROR Unexpected distro" - exit 1 -fi - -if [ "${MACHINE}" = "x86_64" ] ; then - curl -s -L https://github.com/ccache/ccache/releases/download/v4.10.2/ccache-4.10.2-linux-x86_64.tar.xz | tar -Jx -C /tmp --strip-components 1 && \ - mv /tmp/ccache /usr/local/bin/ -else - yum -y install epel-release - yum install -y ccache -fi - -if [ -n "${CMAKE_VERSION}" ]; then - curl -s -L https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-$(uname -m).tar.gz | tar -zx -C /usr --strip-components 1 -fi - -if [ -n "${GOLANG_VERSION}" ]; then - if [ "${MACHINE}" = "x86_64" ]; then - GO_ARCH="amd64" - else - GO_ARCH="arm64" - fi - mkdir -p /usr/local - curl -s -L https://dl.google.com/go/go${GOLANG_VERSION}.linux-${GO_ARCH}.tar.gz | tar xz -C /usr/local - ln -s /usr/local/go/bin/go /usr/local/bin/go - ln -s /usr/local/go/bin/gofmt /usr/local/bin/gofmt -fi diff --git a/server/routes.go b/server/routes.go index c2ec360a..5a4bb485 100644 --- a/server/routes.go +++ b/server/routes.go @@ -33,7 +33,6 @@ import ( "github.com/ollama/ollama/llm" "github.com/ollama/ollama/model/mllama" "github.com/ollama/ollama/openai" - "github.com/ollama/ollama/runners" "github.com/ollama/ollama/template" "github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/model" @@ -1259,14 +1258,6 @@ func Serve(ln net.Listener) error { done() }() - // Locate and log what runners are present at startup - var runnerNames []string - for v := range runners.GetAvailableServers() { - runnerNames = append(runnerNames, v) - } - slog.Info("Dynamic LLM libraries", "runners", runnerNames) - slog.Debug("Override detection logic by setting OLLAMA_LLM_LIBRARY") - s.sched.Run(schedCtx) // At startup we retrieve GPU information so we can get log messages before loading a model From 711648c9bbc186697c19775bf140b174b6a5687a Mon Sep 17 00:00:00 2001 From: Parth Sareen Date: Wed, 29 Jan 2025 15:14:30 -0800 Subject: [PATCH 21/68] docs: update api.md with streaming with tools is enabled (#8676) --- docs/api.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/api.md b/docs/api.md index ede6446f..e9719ad4 100644 --- a/docs/api.md +++ b/docs/api.md @@ -495,14 +495,14 @@ Generate the next message in a chat with a provided model. This is a streaming e - `model`: (required) the [model name](#model-names) - `messages`: the messages of the chat, this can be used to keep a chat memory -- `tools`: tools for the model to use if supported. Requires `stream` to be set to `false` +- `tools`: list of tools in JSON for the model to use if supported The `message` object has the following fields: - `role`: the role of the message, either `system`, `user`, `assistant`, or `tool` - `content`: the content of the message - `images` (optional): a list of images to include in the message (for multimodal models such as `llava`) -- `tool_calls` (optional): a list of tools the model wants to use +- `tool_calls` (optional): a list of tools in JSON that the model wants to use Advanced parameters (optional): From 5d75d837efc9315c19f538f2b2130baf5fbc242a Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 30 Jan 2025 12:21:38 -0800 Subject: [PATCH 22/68] discover: fix default LibOllamaPath value (#8702) --- discover/path.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/discover/path.go b/discover/path.go index a9a6518d..b6b648db 100644 --- a/discover/path.go +++ b/discover/path.go @@ -24,12 +24,14 @@ var LibOllamaPath string = func() string { return "" } - libPath := filepath.Dir(exe) + var libPath string switch runtime.GOOS { case "windows": libPath = filepath.Join(filepath.Dir(exe), "lib", "ollama") case "linux": libPath = filepath.Join(filepath.Dir(exe), "..", "lib", "ollama") + case "darwin": + libPath = filepath.Dir(exe) } cwd, err := os.Getwd() @@ -37,17 +39,19 @@ var LibOllamaPath string = func() string { return "" } - // build paths for development - buildPaths := []string{ + paths := []string{ + libPath, + + // build paths for development filepath.Join(filepath.Dir(exe), "build", "lib", "ollama"), filepath.Join(cwd, "build", "lib", "ollama"), } - for _, p := range buildPaths { + for _, p := range paths { if _, err := os.Stat(p); err == nil { return p } } - return libPath + return filepath.Dir(exe) }() From bea1f1fac6b6b51bb3b8a666789c518b7aaa8b94 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 30 Jan 2025 12:05:50 -0800 Subject: [PATCH 23/68] cgo: use O3 --- discover/gpu.go | 1 + discover/gpu_darwin.go | 1 + llama/llama.cpp/examples/llava/llava.go | 1 + llama/llama.cpp/src/llama.go | 1 + llama/llama.go | 1 + ml/backend/ggml/ggml/src/ggml-blas/blas.go | 1 + ml/backend/ggml/ggml/src/ggml-cpu/cpu.go | 4 +++- ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go | 1 + ml/backend/ggml/ggml/src/ggml-metal/metal.go | 1 + ml/backend/ggml/ggml/src/ggml.go | 1 + ml/backend/ggml/ggml/src/ggml_darwin_arm64.go | 1 + 11 files changed, 13 insertions(+), 1 deletion(-) diff --git a/discover/gpu.go b/discover/gpu.go index ba906a18..22195d22 100644 --- a/discover/gpu.go +++ b/discover/gpu.go @@ -3,6 +3,7 @@ package discover /* +#cgo CPPFLAGS: -O3 #cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm #cgo windows LDFLAGS: -lpthread diff --git a/discover/gpu_darwin.go b/discover/gpu_darwin.go index dd5bf6e2..f102a6fe 100644 --- a/discover/gpu_darwin.go +++ b/discover/gpu_darwin.go @@ -4,6 +4,7 @@ package discover /* #cgo CFLAGS: -x objective-c +#cgo CPPFLAGS: -O3 #cgo LDFLAGS: -framework Foundation -framework CoreGraphics -framework Metal #include "gpu_info_darwin.h" */ diff --git a/llama/llama.cpp/examples/llava/llava.go b/llama/llama.cpp/examples/llava/llava.go index 37b031cb..c8b83d81 100644 --- a/llama/llama.cpp/examples/llava/llava.go +++ b/llama/llama.cpp/examples/llava/llava.go @@ -1,6 +1,7 @@ package llava // #cgo CXXFLAGS: -std=c++11 +// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -I${SRCDIR}/../../include -I${SRCDIR}/../../common // #cgo CPPFLAGS: -I${SRCDIR}/../../../../ml/backend/ggml/ggml/include import "C" diff --git a/llama/llama.cpp/src/llama.go b/llama/llama.cpp/src/llama.go index ddbd5378..d3a2bb97 100644 --- a/llama/llama.cpp/src/llama.go +++ b/llama/llama.cpp/src/llama.go @@ -1,6 +1,7 @@ package llama // #cgo CXXFLAGS: -std=c++17 +// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -I${SRCDIR}/../include // #cgo CPPFLAGS: -I${SRCDIR}/../../../ml/backend/ggml/ggml/include // #cgo windows CPPFLAGS: -D_WIN32_WINNT=0x0602 diff --git a/llama/llama.go b/llama/llama.go index 1d4513e3..36450f28 100644 --- a/llama/llama.go +++ b/llama/llama.go @@ -3,6 +3,7 @@ package llama /* #cgo CFLAGS: -std=c11 #cgo CXXFLAGS: -std=c++17 +#cgo CPPFLAGS: -O3 #cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/include #cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/common #cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/examples/llava diff --git a/ml/backend/ggml/ggml/src/ggml-blas/blas.go b/ml/backend/ggml/ggml/src/ggml-blas/blas.go index b29c9f14..54678178 100644 --- a/ml/backend/ggml/ggml/src/ggml-blas/blas.go +++ b/ml/backend/ggml/ggml/src/ggml-blas/blas.go @@ -3,6 +3,7 @@ package blas // #cgo CXXFLAGS: -std=c++11 +// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -DGGML_USE_BLAS // #cgo CPPFLAGS: -I${SRCDIR}/.. -I${SRCDIR}/../../include // #cgo darwin,arm64 CPPFLAGS: -DGGML_BLAS_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go b/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go index f0bb54c2..1fd6c430 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go +++ b/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go @@ -2,9 +2,11 @@ package cpu // #cgo CFLAGS: -Wno-implicit-function-declaration // #cgo CXXFLAGS: -std=c++17 -// #cgo CPPFLAGS: -I${SRCDIR}/amx -I${SRCDIR}/llamafile -I${SRCDIR}/.. -I${SRCDIR}/../../include +// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -DGGML_USE_LLAMAFILE +// #cgo CPPFLAGS: -I${SRCDIR}/amx -I${SRCDIR}/llamafile -I${SRCDIR}/.. -I${SRCDIR}/../../include // #cgo linux CPPFLAGS: -D_GNU_SOURCE +// #cgo arm64 CPPFLAGS: -DGGML_USE_AARCH64 // #cgo darwin,arm64 CPPFLAGS: -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 // #cgo darwin,arm64 LDFLAGS: -framework Accelerate import "C" diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go index 09b002ce..44b9ea42 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go +++ b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go @@ -1,5 +1,6 @@ package llamafile // #cgo CXXFLAGS: -std=c++17 +// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -I${SRCDIR}/.. -I${SRCDIR}/../.. -I${SRCDIR}/../../../include import "C" diff --git a/ml/backend/ggml/ggml/src/ggml-metal/metal.go b/ml/backend/ggml/ggml/src/ggml-metal/metal.go index 1025e205..cbebff06 100644 --- a/ml/backend/ggml/ggml/src/ggml-metal/metal.go +++ b/ml/backend/ggml/ggml/src/ggml-metal/metal.go @@ -4,6 +4,7 @@ package metal //go:generate sh -c "{ echo // Code generated $(date). DO NOT EDIT.; sed -e '/__embed_ggml-common.h__/r ../ggml-common.h' -e '/__embed_ggml-common.h__/d' -e '/#include \"ggml-metal-impl.h\"/r ggml-metal-impl.h' -e '/#include \"ggml-metal-impl.h\"/d' ggml-metal.metal; } >ggml-metal-embed.metal" +// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -DGGML_METAL_EMBED_LIBRARY -I.. -I../../include // #cgo LDFLAGS: -framework Metal -framework MetalKit import "C" diff --git a/ml/backend/ggml/ggml/src/ggml.go b/ml/backend/ggml/ggml/src/ggml.go index 7cf40e70..12c2a01a 100644 --- a/ml/backend/ggml/ggml/src/ggml.go +++ b/ml/backend/ggml/ggml/src/ggml.go @@ -1,6 +1,7 @@ package ggml // #cgo CXXFLAGS: -std=c++17 +// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -DNDEBUG -DGGML_USE_CPU // #cgo CPPFLAGS: -I${SRCDIR}/../include -I${SRCDIR}/ggml-cpu // #cgo windows LDFLAGS: -lmsvcrt -static -static-libgcc -static-libstdc++ diff --git a/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go b/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go index beffa64e..7ac46867 100644 --- a/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go +++ b/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go @@ -1,5 +1,6 @@ package ggml +// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -DGGML_USE_METAL -DGGML_USE_BLAS // #cgo LDFLAGS: -framework Foundation import "C" From 3f0cb36bdbe4dd6ac35a39598c7253cb0cf1349a Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 30 Jan 2025 12:20:11 -0800 Subject: [PATCH 24/68] build: set goflags in linux release --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f9ab533a..c92d59c9 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -291,7 +291,7 @@ jobs: - uses: docker/setup-buildx-action@v3 - run: | apt-get update && apt-get install pigz - for TARGET in ${{ matrix.targets }}; do docker buildx build --platform $PLATFORM --target $TARGET --output type=local,dest=dist/$PLATFORM .; done + for TARGET in ${{ matrix.targets }}; do docker buildx build --platform $PLATFORM --target $TARGET --build-arg GOFLAGS --output type=local,dest=dist/$PLATFORM .; done tar c -C dist/$PLATFORM . | pigz -9cv >dist/ollama-${PLATFORM//\//-}.tgz env: PLATFORM: ${{ matrix.os }}/${{ matrix.arch }} From 548a9f56a6315316481a9a901f77922cb77e0f68 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 30 Jan 2025 13:06:48 -0800 Subject: [PATCH 25/68] Revert "cgo: use O3" This reverts commit bea1f1fac6b6b51bb3b8a666789c518b7aaa8b94. --- discover/gpu.go | 1 - discover/gpu_darwin.go | 1 - llama/llama.cpp/examples/llava/llava.go | 1 - llama/llama.cpp/src/llama.go | 1 - llama/llama.go | 1 - ml/backend/ggml/ggml/src/ggml-blas/blas.go | 1 - ml/backend/ggml/ggml/src/ggml-cpu/cpu.go | 4 +--- ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go | 1 - ml/backend/ggml/ggml/src/ggml-metal/metal.go | 1 - ml/backend/ggml/ggml/src/ggml.go | 1 - ml/backend/ggml/ggml/src/ggml_darwin_arm64.go | 1 - 11 files changed, 1 insertion(+), 13 deletions(-) diff --git a/discover/gpu.go b/discover/gpu.go index 22195d22..ba906a18 100644 --- a/discover/gpu.go +++ b/discover/gpu.go @@ -3,7 +3,6 @@ package discover /* -#cgo CPPFLAGS: -O3 #cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm #cgo windows LDFLAGS: -lpthread diff --git a/discover/gpu_darwin.go b/discover/gpu_darwin.go index f102a6fe..dd5bf6e2 100644 --- a/discover/gpu_darwin.go +++ b/discover/gpu_darwin.go @@ -4,7 +4,6 @@ package discover /* #cgo CFLAGS: -x objective-c -#cgo CPPFLAGS: -O3 #cgo LDFLAGS: -framework Foundation -framework CoreGraphics -framework Metal #include "gpu_info_darwin.h" */ diff --git a/llama/llama.cpp/examples/llava/llava.go b/llama/llama.cpp/examples/llava/llava.go index c8b83d81..37b031cb 100644 --- a/llama/llama.cpp/examples/llava/llava.go +++ b/llama/llama.cpp/examples/llava/llava.go @@ -1,7 +1,6 @@ package llava // #cgo CXXFLAGS: -std=c++11 -// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -I${SRCDIR}/../../include -I${SRCDIR}/../../common // #cgo CPPFLAGS: -I${SRCDIR}/../../../../ml/backend/ggml/ggml/include import "C" diff --git a/llama/llama.cpp/src/llama.go b/llama/llama.cpp/src/llama.go index d3a2bb97..ddbd5378 100644 --- a/llama/llama.cpp/src/llama.go +++ b/llama/llama.cpp/src/llama.go @@ -1,7 +1,6 @@ package llama // #cgo CXXFLAGS: -std=c++17 -// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -I${SRCDIR}/../include // #cgo CPPFLAGS: -I${SRCDIR}/../../../ml/backend/ggml/ggml/include // #cgo windows CPPFLAGS: -D_WIN32_WINNT=0x0602 diff --git a/llama/llama.go b/llama/llama.go index 36450f28..1d4513e3 100644 --- a/llama/llama.go +++ b/llama/llama.go @@ -3,7 +3,6 @@ package llama /* #cgo CFLAGS: -std=c11 #cgo CXXFLAGS: -std=c++17 -#cgo CPPFLAGS: -O3 #cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/include #cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/common #cgo CPPFLAGS: -I${SRCDIR}/llama.cpp/examples/llava diff --git a/ml/backend/ggml/ggml/src/ggml-blas/blas.go b/ml/backend/ggml/ggml/src/ggml-blas/blas.go index 54678178..b29c9f14 100644 --- a/ml/backend/ggml/ggml/src/ggml-blas/blas.go +++ b/ml/backend/ggml/ggml/src/ggml-blas/blas.go @@ -3,7 +3,6 @@ package blas // #cgo CXXFLAGS: -std=c++11 -// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -DGGML_USE_BLAS // #cgo CPPFLAGS: -I${SRCDIR}/.. -I${SRCDIR}/../../include // #cgo darwin,arm64 CPPFLAGS: -DGGML_BLAS_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go b/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go index 1fd6c430..f0bb54c2 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go +++ b/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go @@ -2,11 +2,9 @@ package cpu // #cgo CFLAGS: -Wno-implicit-function-declaration // #cgo CXXFLAGS: -std=c++17 -// #cgo CPPFLAGS: -O3 -// #cgo CPPFLAGS: -DGGML_USE_LLAMAFILE // #cgo CPPFLAGS: -I${SRCDIR}/amx -I${SRCDIR}/llamafile -I${SRCDIR}/.. -I${SRCDIR}/../../include +// #cgo CPPFLAGS: -DGGML_USE_LLAMAFILE // #cgo linux CPPFLAGS: -D_GNU_SOURCE -// #cgo arm64 CPPFLAGS: -DGGML_USE_AARCH64 // #cgo darwin,arm64 CPPFLAGS: -DGGML_USE_ACCELERATE -DACCELERATE_NEW_LAPACK -DACCELERATE_LAPACK_ILP64 // #cgo darwin,arm64 LDFLAGS: -framework Accelerate import "C" diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go index 44b9ea42..09b002ce 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go +++ b/ml/backend/ggml/ggml/src/ggml-cpu/llamafile/llamafile.go @@ -1,6 +1,5 @@ package llamafile // #cgo CXXFLAGS: -std=c++17 -// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -I${SRCDIR}/.. -I${SRCDIR}/../.. -I${SRCDIR}/../../../include import "C" diff --git a/ml/backend/ggml/ggml/src/ggml-metal/metal.go b/ml/backend/ggml/ggml/src/ggml-metal/metal.go index cbebff06..1025e205 100644 --- a/ml/backend/ggml/ggml/src/ggml-metal/metal.go +++ b/ml/backend/ggml/ggml/src/ggml-metal/metal.go @@ -4,7 +4,6 @@ package metal //go:generate sh -c "{ echo // Code generated $(date). DO NOT EDIT.; sed -e '/__embed_ggml-common.h__/r ../ggml-common.h' -e '/__embed_ggml-common.h__/d' -e '/#include \"ggml-metal-impl.h\"/r ggml-metal-impl.h' -e '/#include \"ggml-metal-impl.h\"/d' ggml-metal.metal; } >ggml-metal-embed.metal" -// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -DGGML_METAL_EMBED_LIBRARY -I.. -I../../include // #cgo LDFLAGS: -framework Metal -framework MetalKit import "C" diff --git a/ml/backend/ggml/ggml/src/ggml.go b/ml/backend/ggml/ggml/src/ggml.go index 12c2a01a..7cf40e70 100644 --- a/ml/backend/ggml/ggml/src/ggml.go +++ b/ml/backend/ggml/ggml/src/ggml.go @@ -1,7 +1,6 @@ package ggml // #cgo CXXFLAGS: -std=c++17 -// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -DNDEBUG -DGGML_USE_CPU // #cgo CPPFLAGS: -I${SRCDIR}/../include -I${SRCDIR}/ggml-cpu // #cgo windows LDFLAGS: -lmsvcrt -static -static-libgcc -static-libstdc++ diff --git a/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go b/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go index 7ac46867..beffa64e 100644 --- a/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go +++ b/ml/backend/ggml/ggml/src/ggml_darwin_arm64.go @@ -1,6 +1,5 @@ package ggml -// #cgo CPPFLAGS: -O3 // #cgo CPPFLAGS: -DGGML_USE_METAL -DGGML_USE_BLAS // #cgo LDFLAGS: -framework Foundation import "C" From 39fd89308c0bbe26311db583cf9729f81ffa9a94 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 30 Jan 2025 13:17:17 -0800 Subject: [PATCH 26/68] build: set CFLAGS=-O3 specifically for cpu.go --- .github/workflows/release.yaml | 12 ++++++++++-- ml/backend/ggml/ggml/src/ggml-cpu/cpu.go | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index c92d59c9..6f0706e4 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -5,6 +5,10 @@ on: tags: - 'v*' +env: + CGO_CFLAGS: '-O3' + CGO_CXXFLAGS: '-O3' + jobs: setup-environment: runs-on: ubuntu-latest @@ -291,7 +295,7 @@ jobs: - uses: docker/setup-buildx-action@v3 - run: | apt-get update && apt-get install pigz - for TARGET in ${{ matrix.targets }}; do docker buildx build --platform $PLATFORM --target $TARGET --build-arg GOFLAGS --output type=local,dest=dist/$PLATFORM .; done + for TARGET in ${{ matrix.targets }}; do docker buildx build --platform $PLATFORM --target $TARGET --build-arg GOFLAGS --build-arg CGO_CFLAGS --build-args CGO_CXXFLAGS --output type=local,dest=dist/$PLATFORM .; done tar c -C dist/$PLATFORM . | pigz -9cv >dist/ollama-${PLATFORM//\//-}.tgz env: PLATFORM: ${{ matrix.os }}/${{ matrix.arch }} @@ -308,12 +312,16 @@ jobs: - flavor: 'latest=false' platforms: linux/amd64,linux/arm64 build-args: | + CGO_CFLAGS=${{ env.CGO_CFLAGS }} + CGO_CXXFLAGS=${{ env.CGO_CXXFLAGS }} GOFLAGS=${{ needs.setup-environment.outputs.GOFLAGS }} - flavor: 'latest=false,suffix=rocm' platforms: linux/amd64 build-args: | - GOFLAGS=${{ needs.setup-environment.outputs.GOFLAGS }} + CGO_CFLAGS=${{ env.CGO_CFLAGS }} + CGO_CXXFLAGS=${{ env.CGO_CXXFLAGS }} FLAVOR=rocm + GOFLAGS=${{ needs.setup-environment.outputs.GOFLAGS }} runs-on: linux environment: release needs: setup-environment diff --git a/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go b/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go index f0bb54c2..895d093c 100644 --- a/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go +++ b/ml/backend/ggml/ggml/src/ggml-cpu/cpu.go @@ -1,6 +1,6 @@ package cpu -// #cgo CFLAGS: -Wno-implicit-function-declaration +// #cgo CFLAGS: -O3 -Wno-implicit-function-declaration // #cgo CXXFLAGS: -std=c++17 // #cgo CPPFLAGS: -I${SRCDIR}/amx -I${SRCDIR}/llamafile -I${SRCDIR}/.. -I${SRCDIR}/../../include // #cgo CPPFLAGS: -DGGML_USE_LLAMAFILE From 475333d533e088538e5d77c0ba65ea60833b2c77 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 31 Jan 2025 14:25:49 -0800 Subject: [PATCH 27/68] fix docker build-args env context is not accessible from job.*.strategy. since it's in the environment, just tell docker to use the environment variable[1] [1]: https://docs.docker.com/reference/cli/docker/buildx/build/#build-arg --- .github/workflows/release.yaml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 6f0706e4..e9d8f721 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -312,16 +312,18 @@ jobs: - flavor: 'latest=false' platforms: linux/amd64,linux/arm64 build-args: | - CGO_CFLAGS=${{ env.CGO_CFLAGS }} - CGO_CXXFLAGS=${{ env.CGO_CXXFLAGS }} - GOFLAGS=${{ needs.setup-environment.outputs.GOFLAGS }} + CGO_CFLAGS + CGO_CXXFLAGS + GOFLAGS - flavor: 'latest=false,suffix=rocm' platforms: linux/amd64 build-args: | - CGO_CFLAGS=${{ env.CGO_CFLAGS }} - CGO_CXXFLAGS=${{ env.CGO_CXXFLAGS }} + CGO_CFLAGS + CGO_CXXFLAGS + GOFLAGS FLAVOR=rocm - GOFLAGS=${{ needs.setup-environment.outputs.GOFLAGS }} + env: + GOFLAGS: ${{ needs.setup-environment.outputs.GOFLAGS }} runs-on: linux environment: release needs: setup-environment From f4321a421caf4df3ebe4bfdacc3b2e27e911761b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?An=C4=B1l=20Kaynar?= Date: Sun, 2 Feb 2025 23:56:10 +0300 Subject: [PATCH 28/68] readme: add MinimalNextOllamaChat to community integrations (#8767) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index f864f99f..d9e8a099 100644 --- a/README.md +++ b/README.md @@ -370,6 +370,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [aidful-ollama-model-delete](https://github.com/AidfulAI/aidful-ollama-model-delete) (User interface for simplified model cleanup) - [Perplexica](https://github.com/ItzCrazyKns/Perplexica) (An AI-powered search engine & an open-source alternative to Perplexity AI) - [AI Toolkit for Visual Studio Code](https://aka.ms/ai-tooklit/ollama-docs) (Microsoft-official VSCode extension to chat, test, evaluate models with Ollama support, and use them in your AI applications.) +- [MinimalNextOllamaChat](https://github.com/anilkay/MinimalNextOllamaChat) (Minimal Web UI for Chat and Model Control) ### Cloud From ad22ace439eb3fab7230134e56bb6276a78347e4 Mon Sep 17 00:00:00 2001 From: Davide Bertoni <121551954+Bert0ns@users.noreply.github.com> Date: Sun, 2 Feb 2025 22:12:55 +0100 Subject: [PATCH 29/68] docs: add missing json and shell code blocks in api.md (#8766) --- docs/api.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/api.md b/docs/api.md index e9719ad4..5d1b7d64 100644 --- a/docs/api.md +++ b/docs/api.md @@ -306,7 +306,7 @@ curl http://localhost:11434/api/generate -d '{ #### Response -``` +```json { "model": "llava", "created_at": "2023-11-03T15:36:02.583064Z", @@ -795,7 +795,7 @@ curl http://localhost:11434/api/chat -d '{ ##### Request -``` +```shell curl http://localhost:11434/api/chat -d '{ "model": "llama3.2", "messages": [ @@ -870,7 +870,7 @@ If the messages array is empty, the model will be loaded into memory. ##### Request -``` +```shell curl http://localhost:11434/api/chat -d '{ "model": "llama3.2", "messages": [] @@ -897,7 +897,7 @@ If the messages array is empty and the `keep_alive` parameter is set to `0`, a m ##### Request -``` +```shell curl http://localhost:11434/api/chat -d '{ "model": "llama3.2", "messages": [], From 50566113ace20bad141859710e2f2689248324b4 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Mon, 3 Feb 2025 12:27:48 -0800 Subject: [PATCH 30/68] llm: do not error if LibOllamaPath does not exist (#8801) --- llm/server.go | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/llm/server.go b/llm/server.go index 640c6816..9daec344 100644 --- a/llm/server.go +++ b/llm/server.go @@ -89,7 +89,6 @@ func LoadModel(model string, maxArraySize int) (*GGML, error) { // NewLlamaServer will run a server for the given GPUs // The gpu list must be a single family. func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) { - var err error var systemTotalMemory uint64 var systemFreeMemory uint64 var systemSwapFreeMemory uint64 @@ -233,19 +232,9 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter params = append(params, "--multiuser-cache") } - // get available libraries - if err != nil { - return nil, fmt.Errorf("could not get libollama dir: %w", err) - } - - entries, err := os.ReadDir(discover.LibOllamaPath) - if err != nil { - return nil, fmt.Errorf("could not read libollama dir: %w", err) - } - libs := make(map[string]string) - for _, entry := range entries { - if entry.IsDir() { + if entries, err := os.ReadDir(discover.LibOllamaPath); err == nil { + for _, entry := range entries { libs[entry.Name()] = filepath.Join(discover.LibOllamaPath, entry.Name()) } } @@ -285,7 +274,7 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter } } if port == 0 { - slog.Debug("ResolveTCPAddr failed ", "error", err) + slog.Debug("ResolveTCPAddr failed, using random port") port = rand.Intn(65535-49152) + 49152 // get a random port in the ephemeral range } finalParams := []string{"runner"} From e8061840235bb953d621796b7d65e86541dbc0a1 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 31 Jan 2025 16:19:41 -0800 Subject: [PATCH 31/68] fix release workflow --- .github/workflows/release.yaml | 57 +++++++++++++++++----------------- macapp/forge.config.ts | 2 +- scripts/build_darwin.sh | 30 +++++++++--------- scripts/build_windows.ps1 | 11 +++++-- 4 files changed, 53 insertions(+), 47 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index e9d8f721..e9221bdf 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -81,7 +81,7 @@ jobs: path: dist/darwin-arm64 - run: | export VERSION=${GITHUB_REF_NAME#v} - ./scripts/build_darwin.sh macapp sign + ./scripts/build_darwin.sh sign macapp env: APPLE_IDENTITY: ${{ secrets.APPLE_IDENTITY }} APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }} @@ -197,33 +197,38 @@ jobs: env: GOFLAGS: ${{ needs.setup-environment.outputs.GOFLAGS }} steps: - - name: Install system dependencies + - name: Install AMD64 system dependencies + if: matrix.arch == 'amd64' run: | $ErrorActionPreference = "Stop" - if ("${{ matrix.arch }}" -eq 'amd64') { - Start-Process "C:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait - echo "C:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - } elseif ("${{ matrix.arch }}" -eq 'arm64') { - Set-ExecutionPolicy Bypass -Scope Process -Force - [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072 - iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) - echo "C:\ProgramData\chocolatey\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + Start-Process "C:\msys64\usr\bin\pacman.exe" -ArgumentList @("-S", "--noconfirm", "mingw-w64-clang-x86_64-gcc-compat", "mingw-w64-clang-x86_64-clang") -NoNewWindow -Wait + echo "C:\msys64\usr\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "C:\msys64\clang64\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + - name: Install ARM64 system dependencies + if: matrix.arch == 'arm64' + run: | + $ErrorActionPreference = "Stop" + Set-ExecutionPolicy Bypass -Scope Process -Force + [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072 + iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) + echo "C:\ProgramData\chocolatey\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - choco install -y --no-progress git gzip - echo "C:\Program Files\Git\cmd" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + choco install -y --no-progress git gzip + echo "C:\Program Files\Git\cmd" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - Invoke-WebRequest -Uri "https://github.com/mstorsjo/llvm-mingw/releases/download/20240619/llvm-mingw-20240619-ucrt-aarch64.zip" -OutFile "${{ runner.temp }}\llvm-mingw-ucrt-aarch64.zip" - Expand-Archive -Path ${{ runner.temp }}\llvm-mingw-ucrt-aarch64.zip -DestinationPath "C:\Program Files\" - $installPath=(Resolve-Path -Path "C:\Program Files\llvm-mingw-*-ucrt-aarch64").path - echo $installPath\bin | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - } + Invoke-WebRequest -Uri "https://github.com/mstorsjo/llvm-mingw/releases/download/20240619/llvm-mingw-20240619-ucrt-aarch64.zip" -OutFile "${{ runner.temp }}\llvm-mingw-ucrt-aarch64.zip" + Expand-Archive -Path ${{ runner.temp }}\llvm-mingw-ucrt-aarch64.zip -DestinationPath "C:\Program Files\" + $installPath=(Resolve-Path -Path "C:\Program Files\llvm-mingw-*-ucrt-aarch64").path + echo $installPath\bin | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - uses: actions/checkout@v4 - uses: actions/setup-go@v5 with: go-version-file: go.mod - run: | go build -o dist/${{ matrix.os }}-${{ matrix.arch }}/ . + - if: matrix.arch == 'arm64' + run: | + Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vc_redist.arm64.exe" -OutFile "dist\windows-arm64\vc_redist.arm64.exe" - run: | $env:VERSION='${{ github.ref_name }}' -Replace "v(.*)", '$1' & .\scripts\build_windows.ps1 buildApp @@ -258,12 +263,12 @@ jobs: echo "${{ vars.OLLAMA_CERT }}" >ollama_inc.crt - uses: actions/download-artifact@v4 with: - name: build-windows-* + pattern: build-windows-* path: dist\ merge-multiple: true - uses: actions/download-artifact@v4 with: - name: depends-windows-amd64-* + pattern: depends-windows-amd64-* path: dist\windows-amd64\ merge-multiple: true - run: | @@ -294,8 +299,8 @@ jobs: - uses: actions/checkout@v4 - uses: docker/setup-buildx-action@v3 - run: | - apt-get update && apt-get install pigz - for TARGET in ${{ matrix.targets }}; do docker buildx build --platform $PLATFORM --target $TARGET --build-arg GOFLAGS --build-arg CGO_CFLAGS --build-args CGO_CXXFLAGS --output type=local,dest=dist/$PLATFORM .; done + sudo apt-get update && sudo apt-get install pigz + for TARGET in ${{ matrix.targets }}; do docker buildx build --platform $PLATFORM --target $TARGET --build-arg GOFLAGS --build-arg CGO_CFLAGS --build-arg CGO_CXXFLAGS --output type=local,dest=dist/$PLATFORM .; done tar c -C dist/$PLATFORM . | pigz -9cv >dist/ollama-${PLATFORM//\//-}.tgz env: PLATFORM: ${{ matrix.os }}/${{ matrix.arch }} @@ -371,20 +376,16 @@ jobs: run: | - uses: actions/download-artifact@v4 with: + name: dist-darwin path: dist - pattern: dist-darwin - uses: actions/download-artifact@v4 with: + name: dist-windows path: dist - pattern: dist-windows - uses: actions/download-artifact@v4 with: - path: dist pattern: dist-linux-* - - uses: actions/download-artifact@v4 - with: path: dist - pattern: dist-windows - run: | ls -lh dist/ (cd dist; find . -type f | xargs sha256sum > ../sha256sum.txt) diff --git a/macapp/forge.config.ts b/macapp/forge.config.ts index d347eed4..540fc0a0 100644 --- a/macapp/forge.config.ts +++ b/macapp/forge.config.ts @@ -19,7 +19,7 @@ const config: ForgeConfig = { icon: './assets/icon.icns', extraResource: [ path.join(__dirname, '../dist/darwin/ollama'), - ...fs.readdirSync(path.join(__dirname, '../dist/darwin/amd64')).map(f => path.join(__dirname, '../dist/darwin/amd64', f)), + ...fs.readdirSync(path.join(__dirname, '../dist/darwin-amd64/lib/ollama')).map(f => path.join(__dirname, '../dist/darwin-amd64/lib/ollama', f)), path.join(__dirname, './assets/iconTemplate.png'), path.join(__dirname, './assets/iconTemplate@2x.png'), path.join(__dirname, './assets/iconUpdateTemplate.png'), diff --git a/scripts/build_darwin.sh b/scripts/build_darwin.sh index 7e586f5f..1f1442f3 100755 --- a/scripts/build_darwin.sh +++ b/scripts/build_darwin.sh @@ -41,26 +41,24 @@ _build_darwin() { _sign_darwin() { status "Creating universal binary..." - lipo -create -output dist/darwin/ollama dist/darwin/*/ollama + mkdir -p dist/darwin + lipo -create -output dist/darwin/ollama dist/darwin-*/ollama - if [ -z "$APPLE_IDENTITY" ]; then - status "No APPLE_IDENTITY set, skipping code signing" - return + if [ -n "$APPLE_IDENTITY" ]; then + for F in dist/darwin/ollama dist/darwin-amd64/lib/ollama/*; do + codesign -f --timestamp -s "$APPLE_IDENTITY" --identifier ai.ollama.ollama --options=runtime $F + done + + # create a temporary zip for notarization + TEMP=$(mktemp -u).zip + ditto -c -k --keepParent dist/darwin/ollama "$TEMP" + xcrun notarytool submit dist/darwin/temp.zip --wait --timeout 10m --apple-id $APPLE_ID --password $APPLE_PASSWORD --team-id $APPLE_TEAM_ID + rm -f "$TEMP" fi - for F in dist/darwin/ollama dist/darwin/amd64/lib*; do - codesign -f --timestamp -s "$APPLE_IDENTITY" --identifier ai.ollama.ollama --options=runtime $F - done - - # create a temporary zip for notarization - TEMP=$(mktemp -u).zip - ditto -c -k --keepParent dist/darwin/ollama "$TEMP" - xcrun notarytool submit dist/darwin/temp.zip --wait --timeout 10m --apple-id $APPLE_ID --password $APPLE_PASSWORD --team-id $APPLE_TEAM_ID - rm -f "$TEMP" - - # create a universal tarball + status "Creating universal tarball..." tar -cf dist/ollama-darwin.tar --strip-components 2 dist/darwin/ollama - tar -rf dist/ollama-darwin.tar --strip-components 3 dist/darwin/amd64/lib* + tar -rf dist/ollama-darwin.tar --strip-components 4 dist/darwin-amd64/lib/ gzip -9vc dist/ollama-darwin.tgz } diff --git a/scripts/build_windows.ps1 b/scripts/build_windows.ps1 index 30cf9827..b0c7e32f 100644 --- a/scripts/build_windows.ps1 +++ b/scripts/build_windows.ps1 @@ -208,8 +208,15 @@ function buildInstaller() { } function distZip() { - write-host "Generating stand-alone distribution zip file ${script:SRC_DIR}\dist\ollama-windows-${script:TARGET_ARCH}.zip" - Compress-Archive -Path "${script:SRC_DIR}\dist\windows-${script:TARGET_ARCH}\*" -DestinationPath "${script:SRC_DIR}\dist\ollama-windows-${script:TARGET_ARCH}.zip" -Force + if (Test-Path -Path "${script:SRC_DIR}\dist\windows-amd64") { + write-host "Generating stand-alone distribution zip file ${script:SRC_DIR}\dist\ollama-windows-amd64.zip" + Compress-Archive -Path "${script:SRC_DIR}\dist\windows-amd64\*" -DestinationPath "${script:SRC_DIR}\dist\ollama-windows-amd64.zip" -Force + } + + if (Test-Path -Path "${script:SRC_DIR}\dist\windows-arm64") { + write-host "Generating stand-alone distribution zip file ${script:SRC_DIR}\dist\ollama-windows-arm64.zip" + Compress-Archive -Path "${script:SRC_DIR}\dist\windows-arm64\*" -DestinationPath "${script:SRC_DIR}\dist\ollama-windows-arm64.zip" -Force + } } checkEnv From bfdeffc375f27b04a4ae7eeb22af24643582fcea Mon Sep 17 00:00:00 2001 From: Melroy van den Berg Date: Mon, 3 Feb 2025 22:54:08 +0100 Subject: [PATCH 32/68] docs: use OLLAMA_VERSION=0.5.7 for install version override (#8802) --- docs/linux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/linux.md b/docs/linux.md index 13655f42..12c38de1 100644 --- a/docs/linux.md +++ b/docs/linux.md @@ -152,7 +152,7 @@ Use `OLLAMA_VERSION` environment variable with the install script to install a s For example: ```shell -curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=0.3.9 sh +curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=0.5.7 sh ``` ## Viewing logs From d4d338c2248ad015334e7b94c4ddb8cc744083a2 Mon Sep 17 00:00:00 2001 From: Tilman Griesel Date: Mon, 3 Feb 2025 23:18:19 +0100 Subject: [PATCH 33/68] readme: add Chipper to community integrations (#8803) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d9e8a099..5962b18a 100644 --- a/README.md +++ b/README.md @@ -371,6 +371,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Perplexica](https://github.com/ItzCrazyKns/Perplexica) (An AI-powered search engine & an open-source alternative to Perplexity AI) - [AI Toolkit for Visual Studio Code](https://aka.ms/ai-tooklit/ollama-docs) (Microsoft-official VSCode extension to chat, test, evaluate models with Ollama support, and use them in your AI applications.) - [MinimalNextOllamaChat](https://github.com/anilkay/MinimalNextOllamaChat) (Minimal Web UI for Chat and Model Control) +- [Chipper](https://github.com/TilmanGriesel/chipper) AI interface for tinkerers (Ollama, Haystack RAG, Python) ### Cloud From 669dc31cf33a1d167dbbe5398e3e2ec71620bcef Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 3 Feb 2025 13:31:22 -0800 Subject: [PATCH 34/68] fix build --- .github/workflows/release.yaml | 32 ++++++++++++++++++++++++++------ scripts/build_darwin.sh | 2 +- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index e9221bdf..dd354463 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -273,6 +273,8 @@ jobs: merge-multiple: true - run: | & .\scripts\build_windows.ps1 gatherDependencies sign buildInstaller distZip + env: + KEY_CONTAINER: ${{ vars.KEY_CONTAINER }} - uses: actions/upload-artifact@v4 with: name: dist-windows @@ -286,10 +288,13 @@ jobs: include: - os: linux arch: amd64 - targets: 'archive rocm' + target: archive + - os: linux + arch: amd64 + target: rocm - os: linux arch: arm64 - targets: archive + target: archive runs-on: ${{ matrix.arch == 'arm64' && format('{0}-{1}', matrix.os, matrix.arch) || matrix.os }} environment: release needs: setup-environment @@ -300,15 +305,30 @@ jobs: - uses: docker/setup-buildx-action@v3 - run: | sudo apt-get update && sudo apt-get install pigz - for TARGET in ${{ matrix.targets }}; do docker buildx build --platform $PLATFORM --target $TARGET --build-arg GOFLAGS --build-arg CGO_CFLAGS --build-arg CGO_CXXFLAGS --output type=local,dest=dist/$PLATFORM .; done - tar c -C dist/$PLATFORM . | pigz -9cv >dist/ollama-${PLATFORM//\//-}.tgz + docker buildx build --platform $PLATFORM --target ${{ matrix.target }} --build-arg GOFLAGS --build-arg CGO_CFLAGS --build-arg CGO_CXXFLAGS --output type=local,dest=dist/$PLATFORM . + + for COMPONENTS in dist/$PLATFORM/* dist/$PLATFORM/lib/ollama/*; do + if [ -d "$COMPONENTS" ]; then + case "$COMPONENTS" in + */bin) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}.tar.in ;; + */lib/ollama) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}.tar.in;; + */lib/ollama/cuda_v11) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}.tar.in;; + */lib/ollama/cuda_v12) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}.tar.in;; + */lib/ollama/cuda_jetpack5) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}-jetpack5.tar.in ;; + */lib/ollama/cuda_jetpack6) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}-jetpack6.tar.in ;; + */lib/ollama/rocm) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}-rocm.tar.in ;; + esac + fi + done + + for ARCHIVE in dist/*.tar.in; do tar c -T $ARCHIVE --strip-components 3 | pigz -9cv >${ARCHIVE//.*/}.tgz; done env: PLATFORM: ${{ matrix.os }}/${{ matrix.arch }} - uses: actions/upload-artifact@v4 with: name: dist-${{ matrix.os }}-${{ matrix.arch }} path: | - dist/ollama-${{ matrix.os }}-${{ matrix.arch }}.tgz + dist/*.tgz docker-build: strategy: @@ -320,7 +340,7 @@ jobs: CGO_CFLAGS CGO_CXXFLAGS GOFLAGS - - flavor: 'latest=false,suffix=rocm' + - flavor: 'latest=false,suffix=-rocm' platforms: linux/amd64 build-args: | CGO_CFLAGS diff --git a/scripts/build_darwin.sh b/scripts/build_darwin.sh index 1f1442f3..0beb765e 100755 --- a/scripts/build_darwin.sh +++ b/scripts/build_darwin.sh @@ -52,7 +52,7 @@ _sign_darwin() { # create a temporary zip for notarization TEMP=$(mktemp -u).zip ditto -c -k --keepParent dist/darwin/ollama "$TEMP" - xcrun notarytool submit dist/darwin/temp.zip --wait --timeout 10m --apple-id $APPLE_ID --password $APPLE_PASSWORD --team-id $APPLE_TEAM_ID + xcrun notarytool submit "$TEMP" --wait --timeout 10m --apple-id $APPLE_ID --password $APPLE_PASSWORD --team-id $APPLE_TEAM_ID rm -f "$TEMP" fi From f9d2d8913554d78b1cae47c5eaa9cbbd0ea79273 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 3 Feb 2025 15:12:54 -0800 Subject: [PATCH 35/68] fix linux archive --- .github/workflows/release.yaml | 13 ++++--------- scripts/build_darwin.sh | 4 +++- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index dd354463..22bf6660 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -326,7 +326,7 @@ jobs: PLATFORM: ${{ matrix.os }}/${{ matrix.arch }} - uses: actions/upload-artifact@v4 with: - name: dist-${{ matrix.os }}-${{ matrix.arch }} + name: dist-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.target }} path: | dist/*.tgz @@ -391,9 +391,6 @@ jobs: GH_TOKEN: ${{ github.token }} steps: - uses: actions/checkout@v4 - - name: Set Version - shell: bash - run: | - uses: actions/download-artifact@v4 with: name: dist-darwin @@ -406,11 +403,9 @@ jobs: with: pattern: dist-linux-* path: dist - - run: | - ls -lh dist/ - (cd dist; find . -type f | xargs sha256sum > ../sha256sum.txt) - mv sha256sum.txt dist/ - cat dist/sha256sum.txt + merge-multiple: true + - run: find . -type f -not -name 'sha256sum.txt' | xargs sha256sum | tee sha256sum.txt + working-directory: dist - name: Create or update Release run: | RELEASE_VERSION=$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)" diff --git a/scripts/build_darwin.sh b/scripts/build_darwin.sh index 0beb765e..baba3c1f 100755 --- a/scripts/build_darwin.sh +++ b/scripts/build_darwin.sh @@ -34,7 +34,8 @@ _build_darwin() { -DCMAKE_OSX_ARCHITECTURES=x86_64 \ -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 cmake --build build/darwin-$ARCH --target ggml-cpu -j - install build/darwin-$ARCH/lib/ollama/*.{dylib,so} $INSTALL_PREFIX + install -d $INSTALL_PREFIX/lib/ollama + install build/darwin-$ARCH/lib/ollama/*.{dylib,so} $INSTALL_PREFIX/lib/ollama fi done } @@ -43,6 +44,7 @@ _sign_darwin() { status "Creating universal binary..." mkdir -p dist/darwin lipo -create -output dist/darwin/ollama dist/darwin-*/ollama + chmod +x dist/darwin/ollama if [ -n "$APPLE_IDENTITY" ]; then for F in dist/darwin/ollama dist/darwin-amd64/lib/ollama/*; do From 65b7ecac7bd4346fae8f49764b0d6d2eb8de39ae Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 3 Feb 2025 18:19:17 -0800 Subject: [PATCH 36/68] fix extra quote --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 22bf6660..5ff3fafd 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -408,7 +408,7 @@ jobs: working-directory: dist - name: Create or update Release run: | - RELEASE_VERSION=$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)" + RELEASE_VERSION="$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)" echo "Looking for existing release for ${RELEASE_VERSION}" OLD_TAG=$(gh release ls --json name,tagName | jq -r ".[] | select(.name == \"${RELEASE_VERSION}\") | .tagName") From 4759ecae19fb8246826a8b87da9afbe1601895f9 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Tue, 4 Feb 2025 15:05:39 -0800 Subject: [PATCH 37/68] ml/backend/ggml: fix library loading on macOS amd64 (#8827) --- CMakeLists.txt | 5 ++++ llm/server.go | 12 ++++++--- ml/backend/ggml/ggml/src/ggml.go | 46 ++++++++++++++++++++------------ scripts/build_darwin.sh | 6 ++--- 4 files changed, 46 insertions(+), 23 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 19d9bd8f..58cb8b44 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -29,6 +29,11 @@ if((NOT CMAKE_OSX_ARCHITECTURES MATCHES "arm64") set(GGML_CPU_ALL_VARIANTS ON) endif() +if (CMAKE_OSX_ARCHITECTURES MATCHES "x86_64") + set(CMAKE_BUILD_RPATH "@loader_path") + set(CMAKE_INSTALL_RPATH "@loader_path") +endif() + set(OLLAMA_BUILD_DIR ${CMAKE_BINARY_DIR}/lib/ollama) set(OLLAMA_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/lib/ollama) diff --git a/llm/server.go b/llm/server.go index 9daec344..881209b3 100644 --- a/llm/server.go +++ b/llm/server.go @@ -281,9 +281,14 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter finalParams = append(finalParams, params...) finalParams = append(finalParams, "--port", strconv.Itoa(port)) - pathEnv := "LD_LIBRARY_PATH" - if runtime.GOOS == "windows" { + var pathEnv string + switch runtime.GOOS { + case "windows": pathEnv = "PATH" + case "darwin": + pathEnv = "DYLD_LIBRARY_PATH" + default: + pathEnv = "LD_LIBRARY_PATH" } var libraryPaths []string @@ -385,7 +390,8 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter strings.HasPrefix(ev, "HSA_") || strings.HasPrefix(ev, "GGML_") || strings.HasPrefix(ev, "PATH=") || - strings.HasPrefix(ev, "LD_LIBRARY_PATH=") { + strings.HasPrefix(ev, "LD_LIBRARY_PATH=") || + strings.HasPrefix(ev, "DYLD_LIBRARY_PATH=") { filteredEnv = append(filteredEnv, ev) } } diff --git a/ml/backend/ggml/ggml/src/ggml.go b/ml/backend/ggml/ggml/src/ggml.go index 7cf40e70..94b0d185 100644 --- a/ml/backend/ggml/ggml/src/ggml.go +++ b/ml/backend/ggml/ggml/src/ggml.go @@ -41,36 +41,48 @@ func sink(level C.int, text *C.char, _ unsafe.Pointer) { } var OnceLoad = sync.OnceFunc(func() { - var lib struct{ name, defaultValue string } + exe, err := os.Executable() + if err != nil { + slog.Warn("failed to get executable path", "error", err) + exe = "." + } + + // PATH, LD_LIBRARY_PATH, and DYLD_LIBRARY_PATH are often + // set by the parent process, however, use a default value + // if the environment variable is not set. + var name, value string switch runtime.GOOS { - case "darwin", "linux": - lib.name = "LD_LIBRARY_PATH" - lib.defaultValue = "/usr/local/lib:/usr/lib" + case "darwin": + // On macOS, DYLD_LIBRARY_PATH is often not set, so + // we use the directory of the executable as the default. + name = "DYLD_LIBRARY_PATH" + value = filepath.Dir(exe) case "windows": - lib.name = "PATH" - lib.defaultValue = "." + name = "PATH" + value = filepath.Join(filepath.Dir(exe), "lib", "ollama") default: - return + name = "LD_LIBRARY_PATH" + value = filepath.Join(filepath.Dir(exe), "..", "lib", "ollama") } - paths, ok := os.LookupEnv(lib.name) + paths, ok := os.LookupEnv(name) if !ok { - paths = lib.defaultValue - } - - if runtime.GOOS == "darwin" { - if _, ok := os.LookupEnv("DYLD_LIBRARY_PATH"); !ok { - os.Setenv("DYLD_LIBRARY_PATH", paths) - } + paths = value } split := filepath.SplitList(paths) visited := make(map[string]struct{}, len(split)) for _, path := range split { - abspath, _ := filepath.Abs(path) + abspath, err := filepath.Abs(path) + if err != nil { + slog.Error("failed to get absolute path", "error", err) + continue + } + if _, ok := visited[abspath]; !ok { func() { - cpath := C.CString(path) + slog.Debug("ggml backend load all from path", "path", abspath) + cpath := C.CString(abspath) defer C.free(unsafe.Pointer(cpath)) C.ggml_backend_load_all_from_path(cpath) }() diff --git a/scripts/build_darwin.sh b/scripts/build_darwin.sh index baba3c1f..76d0a6c2 100755 --- a/scripts/build_darwin.sh +++ b/scripts/build_darwin.sh @@ -32,10 +32,10 @@ _build_darwin() { status "Building darwin $ARCH dynamic backends" cmake -B build/darwin-$ARCH \ -DCMAKE_OSX_ARCHITECTURES=x86_64 \ - -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 + -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 \ + -DCMAKE_INSTALL_PREFIX=$INSTALL_PREFIX cmake --build build/darwin-$ARCH --target ggml-cpu -j - install -d $INSTALL_PREFIX/lib/ollama - install build/darwin-$ARCH/lib/ollama/*.{dylib,so} $INSTALL_PREFIX/lib/ollama + cmake --install build/darwin-$ARCH --component CPU fi done } From 63f0269f7f9f6256ff92ff6a11b032df01ffba5f Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 4 Feb 2025 11:17:59 -0800 Subject: [PATCH 38/68] ci: split docker build by platform this improves build reliability and concurrency --- .github/workflows/release.yaml | 80 +++++++++++++++++++++++++--------- 1 file changed, 60 insertions(+), 20 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 5ff3fafd..699c444c 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -330,32 +330,71 @@ jobs: path: | dist/*.tgz - docker-build: + # Build each Docker variant (OS, arch, and flavor) separately. Using QEMU is unreliable and slower. + docker-build-push: strategy: matrix: include: - - flavor: 'latest=false' - platforms: linux/amd64,linux/arm64 + - os: linux + arch: arm64 build-args: | CGO_CFLAGS CGO_CXXFLAGS GOFLAGS - - flavor: 'latest=false,suffix=-rocm' - platforms: linux/amd64 + - os: linux + arch: amd64 + build-args: | + CGO_CFLAGS + CGO_CXXFLAGS + GOFLAGS + - os: linux + arch: amd64 + suffix: '-rocm' build-args: | CGO_CFLAGS CGO_CXXFLAGS GOFLAGS FLAVOR=rocm - env: - GOFLAGS: ${{ needs.setup-environment.outputs.GOFLAGS }} - runs-on: linux + runs-on: ${{ matrix.arch == 'arm64' && format('{0}-{1}', matrix.os, matrix.arch) || matrix.os }} environment: release needs: setup-environment + env: + GOFLAGS: ${{ needs.setup-environment.outputs.GOFLAGS }} steps: - uses: actions/checkout@v4 - - uses: docker/setup-qemu-action@v2 - uses: docker/setup-buildx-action@v2 + - uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKER_USER }} + password: ${{ secrets.DOCKER_ACCESS_TOKEN }} + - id: build-push + uses: docker/build-push-action@v6 + with: + context: . + platforms: ${{ matrix.os }}/${{ matrix.arch }} + build-args: ${{ matrix.build-args }} + outputs: type=image,name=ollama/ollama,push-by-digest=true,name-canonical=true,push=true + cache-from: type=registry,ref=ollama/ollama:latest + cache-to: type=inline + - run: | + mkdir -p ${{ matrix.os }}-${{ matrix.arch }} + echo "${{ steps.build-push.outputs.digest }}" >${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.suffix }}.txt + working-directory: ${{ runner.temp }} + - uses: actions/upload-artifact@v4 + with: + name: digest-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.suffix }} + path: | + ${{ runner.temp }}/${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.suffix }}.txt + + # Merge Docker images for the same flavor into a single multi-arch manifest + docker-merge-push: + strategy: + matrix: + suffix: ['', '-rocm'] + runs-on: linux + environment: release + needs: [docker-build-push] + steps: - uses: docker/login-action@v3 with: username: ${{ vars.DOCKER_USER }} @@ -363,22 +402,23 @@ jobs: - id: metadata uses: docker/metadata-action@v4 with: - flavor: ${{ matrix.flavor }} + flavor: | + latest=false + suffix=${{ matrix.suffix }} images: | ollama/ollama tags: | + type=ref,enable=true,priority=600,prefix=pr-,event=pr type=semver,pattern={{version}} - - uses: docker/build-push-action@v6 + - uses: actions/download-artifact@v4 with: - context: . - push: true - platforms: ${{ matrix.platforms }} - build-args: ${{ matrix.build-args }} - tags: ${{ steps.metadata.outputs.tags }} - labels: ${{ steps.metadata.outputs.labels }} - cache-from: type=registry,ref=ollama/ollama:latest - cache-to: type=inline - provenance: false + pattern: digest-* + path: ${{ runner.temp }} + merge-multiple: true + - run: | + docker buildx imagetools create $(echo '${{ steps.metadata.outputs.json }}' | jq -cr '.tags | map("-t", .) | join(" ")') $(cat *-${{ matrix.suffix }}.txt | xargs printf 'ollama/ollama@%s ') + docker buildx imagetools inspect ollama/ollama:${{ steps.metadata.outputs.version }} + working-directory: ${{ runner.temp }} # Aggregate all the assets and ship a release release: From d8932c55e72f919cd92d0d019fb7fd0e01d857cd Mon Sep 17 00:00:00 2001 From: William Date: Tue, 4 Feb 2025 21:52:47 -0500 Subject: [PATCH 39/68] server: fix out of bounds exception on model download (#8746) --- server/download.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/download.go b/server/download.go index a3b53189..1674bffd 100644 --- a/server/download.go +++ b/server/download.go @@ -170,9 +170,9 @@ func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *r offset += size } + } else { + slog.Info(fmt.Sprintf("downloading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size))) } - - slog.Info(fmt.Sprintf("downloading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size))) return nil } From c852b8e0214a727d44300270db3672837b93dbb8 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Tue, 4 Feb 2025 19:30:49 -0800 Subject: [PATCH 40/68] server: always print upload/download part info (#8832) --- server/download.go | 5 ++++- server/upload.go | 4 +++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/server/download.go b/server/download.go index 1674bffd..857b1692 100644 --- a/server/download.go +++ b/server/download.go @@ -170,9 +170,12 @@ func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *r offset += size } - } else { + } + + if len(b.Parts) > 0 { slog.Info(fmt.Sprintf("downloading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size))) } + return nil } diff --git a/server/upload.go b/server/upload.go index 020e8955..312545ee 100644 --- a/server/upload.go +++ b/server/upload.go @@ -108,7 +108,9 @@ func (b *blobUpload) Prepare(ctx context.Context, requestURL *url.URL, opts *reg offset += size } - slog.Info(fmt.Sprintf("uploading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size))) + if len(b.Parts) > 0 { + slog.Info(fmt.Sprintf("uploading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size))) + } requestURL, err = url.Parse(location) if err != nil { From cd3fbf1c49aa6e68f982934c3d11b8a4b8e71f58 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Wed, 5 Feb 2025 09:46:56 -0800 Subject: [PATCH 41/68] llama: use dynamic backend loading for mllama and clip (#8835) --- llama/llama.cpp/examples/llava/clip.cpp | 36 +++---------- llama/mllama.cpp | 31 +++-------- ...se-dynamic-backend-loading-for-clip.patch} | 51 ++++++++----------- 3 files changed, 36 insertions(+), 82 deletions(-) rename llama/patches/{0013-re-enable-gpu-for-clip.patch => 0013-use-dynamic-backend-loading-for-clip.patch} (64%) diff --git a/llama/llama.cpp/examples/llava/clip.cpp b/llama/llama.cpp/examples/llava/clip.cpp index 718052e1..86b91d5c 100644 --- a/llama/llama.cpp/examples/llava/clip.cpp +++ b/llama/llama.cpp/examples/llava/clip.cpp @@ -1235,35 +1235,15 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { } } -#ifdef GGML_USE_CUDA - new_clip->backend = ggml_backend_cuda_init(0); - LOG_INF("%s: CLIP using CUDA backend\n", __func__); -#endif - -#ifdef GGML_USE_METAL - new_clip->backend = ggml_backend_metal_init(); - LOG_INF("%s: CLIP using Metal backend\n", __func__); -#endif - -#ifdef GGML_USE_CANN - new_clip->backend = ggml_backend_cann_init(0); - LOG_INF("%s: CLIP using CANN backend\n", __func__); -#endif - -#ifdef GGML_USE_VULKAN - new_clip->backend = ggml_backend_vk_init(0); - LOG_INF("%s: CLIP using Vulkan backend\n", __func__); -#endif - -#ifdef GGML_USE_SYCL - new_clip->backend = ggml_backend_sycl_init(0); - LOG_INF("%s: CLIP using SYCL backend\n", __func__); -#endif - - if (!new_clip->backend) { - new_clip->backend = ggml_backend_cpu_init(); - LOG_INF("%s: CLIP using CPU backend\n", __func__); + ggml_backend_t backend = ggml_backend_init_best(); + if (backend == nullptr) { + LOG_ERR("%s: failed to initialize backend\n", __func__); + clip_free(new_clip); + gguf_free(ctx); + return nullptr; } + LOG_INF("%s: using %s backend\n", __func__, ggml_backend_name(backend)); + new_clip->backend = backend; // model size and capabilities { diff --git a/llama/mllama.cpp b/llama/mllama.cpp index df5bd6a9..4e84c60a 100644 --- a/llama/mllama.cpp +++ b/llama/mllama.cpp @@ -558,30 +558,15 @@ struct mllama_ctx *mllama_model_load(const char *fname, const int verbosity = 1) mllama_ctx *new_mllama = new mllama_ctx{}; -#ifdef GGML_USE_CUDA - new_mllama->backend = ggml_backend_cuda_init(0); - LOG("vision using CUDA backend"); -#endif - -#ifdef GGML_USE_METAL - new_mllama->backend = ggml_backend_metal_init(); - LOG("vision using Metal backend"); -#endif - -#ifdef GGML_USE_CANN - new_mllama->backend = ggml_backend_cann_init(0); - LOG("vision using CANN backend"); -#endif - -#ifdef GGML_USE_VULKAN - new_mllama->backend = ggml_backend_vk_init(0); - LOG("vision using Vulkan backend"); -#endif - - if (!new_mllama->backend) { - new_mllama->backend = ggml_backend_cpu_init(); - LOG("vision using CPU backend"); + ggml_backend_t backend = ggml_backend_init_best(); + if (backend == nullptr) { + LOG("%s: failed to initialize backend\n", __func__); + mllama_free(new_mllama); + gguf_free(ctx); + return nullptr; } + LOG("%s: using %s backend\n", __func__, ggml_backend_name(backend)); + new_mllama->backend = backend; // load tensors { diff --git a/llama/patches/0013-re-enable-gpu-for-clip.patch b/llama/patches/0013-use-dynamic-backend-loading-for-clip.patch similarity index 64% rename from llama/patches/0013-re-enable-gpu-for-clip.patch rename to llama/patches/0013-use-dynamic-backend-loading-for-clip.patch index a38d0884..e283a857 100644 --- a/llama/patches/0013-re-enable-gpu-for-clip.patch +++ b/llama/patches/0013-use-dynamic-backend-loading-for-clip.patch @@ -1,14 +1,14 @@ From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 4 Jan 2025 22:52:48 -0800 -Subject: [PATCH] re-enable gpu for clip +Subject: [PATCH] use dynamic backend loading for clip --- - examples/llava/clip.cpp | 86 ++++++++++++++++++++--------------------- - 1 file changed, 43 insertions(+), 43 deletions(-) + examples/llava/clip.cpp | 74 +++++++++++++++-------------------------- + 1 file changed, 27 insertions(+), 47 deletions(-) diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp -index b3c1829f..718052e1 100644 +index b3c1829f..86b91d5c 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -8,25 +8,25 @@ @@ -56,7 +56,7 @@ index b3c1829f..718052e1 100644 #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" -@@ -1235,30 +1235,30 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { +@@ -1235,35 +1235,15 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { } } @@ -84,30 +84,19 @@ index b3c1829f..718052e1 100644 -// new_clip->backend = ggml_backend_sycl_init(0); -// LOG_INF("%s: CLIP using SYCL backend\n", __func__); -//#endif -+#ifdef GGML_USE_CUDA -+ new_clip->backend = ggml_backend_cuda_init(0); -+ LOG_INF("%s: CLIP using CUDA backend\n", __func__); -+#endif -+ -+#ifdef GGML_USE_METAL -+ new_clip->backend = ggml_backend_metal_init(); -+ LOG_INF("%s: CLIP using Metal backend\n", __func__); -+#endif -+ -+#ifdef GGML_USE_CANN -+ new_clip->backend = ggml_backend_cann_init(0); -+ LOG_INF("%s: CLIP using CANN backend\n", __func__); -+#endif -+ -+#ifdef GGML_USE_VULKAN -+ new_clip->backend = ggml_backend_vk_init(0); -+ LOG_INF("%s: CLIP using Vulkan backend\n", __func__); -+#endif -+ -+#ifdef GGML_USE_SYCL -+ new_clip->backend = ggml_backend_sycl_init(0); -+ LOG_INF("%s: CLIP using SYCL backend\n", __func__); -+#endif +- +- if (!new_clip->backend) { +- new_clip->backend = ggml_backend_cpu_init(); +- LOG_INF("%s: CLIP using CPU backend\n", __func__); ++ ggml_backend_t backend = ggml_backend_init_best(); ++ if (backend == nullptr) { ++ LOG_ERR("%s: failed to initialize backend\n", __func__); ++ clip_free(new_clip); ++ gguf_free(ctx); ++ return nullptr; + } ++ LOG_INF("%s: using %s backend\n", __func__, ggml_backend_name(backend)); ++ new_clip->backend = backend; - if (!new_clip->backend) { - new_clip->backend = ggml_backend_cpu_init(); + // model size and capabilities + { From 291def6adbdb191e1a67f2e3f231cb54fe18d417 Mon Sep 17 00:00:00 2001 From: Yashwanth A <123303508+yashwanth2706@users.noreply.github.com> Date: Wed, 5 Feb 2025 23:30:26 +0530 Subject: [PATCH 42/68] server: increase timeout in stall detection from 5s to 30s (#8831) In some cases, downloads slow due to disk i/o or other factors, causing the download to restart a part. This causes the download to "reverse" in percent completion. By increasing the timeout to 30s, this should happen less frequently. --- server/download.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/download.go b/server/download.go index 857b1692..8f49351e 100644 --- a/server/download.go +++ b/server/download.go @@ -368,7 +368,7 @@ func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, w lastUpdated := part.lastUpdated part.lastUpdatedMu.Unlock() - if !lastUpdated.IsZero() && time.Since(lastUpdated) > 5*time.Second { + if !lastUpdated.IsZero() && time.Since(lastUpdated) > 30*time.Second { const msg = "%s part %d stalled; retrying. If this persists, press ctrl-c to exit, then 'ollama pull' to find a faster connection." slog.Info(fmt.Sprintf(msg, b.Digest[7:19], part.N)) // reset last updated From f00d359a6729ab52a300b8ef8a738f3faa1006af Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Wed, 5 Feb 2025 11:16:27 -0800 Subject: [PATCH 43/68] docs: add section in development.md on library detection (#8855) --- docs/development.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/development.md b/docs/development.md index 3e2ed49b..618e98e1 100644 --- a/docs/development.md +++ b/docs/development.md @@ -118,3 +118,14 @@ To run tests, use `go test`: ``` go test ./... ``` + +## Library detection + +Ollama looks for acceleration libraries in the following paths relative to the `ollama` executable: + +* `./lib/ollama` (Windows) +* `../lib/ollama` (Linux) +* `.` (macOS) +* `build/lib/ollama` (for development) + +If the libraries are not found, Ollama will not run with any acceleration libraries. \ No newline at end of file From 8d8b9f83ae998fef76ceb1350175af74cb1a697a Mon Sep 17 00:00:00 2001 From: Azis Alvriyanto Date: Thu, 6 Feb 2025 03:23:07 +0700 Subject: [PATCH 44/68] format: byte formatting test coverage (#8692) Removed redundant checks and streamlined the switch-case structure. Added test cases for both HumanBytes and HumanBytes2 to cover a wide range of scenarios. --- format/byte_test.go | 91 +++++++++++++++++++++++++++++++++++++++++++++ format/bytes.go | 2 - 2 files changed, 91 insertions(+), 2 deletions(-) create mode 100644 format/byte_test.go diff --git a/format/byte_test.go b/format/byte_test.go new file mode 100644 index 00000000..5881af45 --- /dev/null +++ b/format/byte_test.go @@ -0,0 +1,91 @@ +package format + +import ( + "testing" +) + +func TestHumanBytes(t *testing.T) { + type testCase struct { + input int64 + expected string + } + + tests := []testCase{ + // Test bytes (B) + {0, "0 B"}, + {1, "1 B"}, + {999, "999 B"}, + + // Test kilobytes (KB) + {1000, "1 KB"}, + {1500, "1.5 KB"}, + {999999, "999 KB"}, + + // Test megabytes (MB) + {1000000, "1 MB"}, + {1500000, "1.5 MB"}, + {999999999, "999 MB"}, + + // Test gigabytes (GB) + {1000000000, "1 GB"}, + {1500000000, "1.5 GB"}, + {999999999999, "999 GB"}, + + // Test terabytes (TB) + {1000000000000, "1 TB"}, + {1500000000000, "1.5 TB"}, + {1999999999999, "2.0 TB"}, + + // Test fractional values + {1234, "1.2 KB"}, + {1234567, "1.2 MB"}, + {1234567890, "1.2 GB"}, + } + + for _, tc := range tests { + t.Run(tc.expected, func(t *testing.T) { + result := HumanBytes(tc.input) + if result != tc.expected { + t.Errorf("Expected %s, got %s", tc.expected, result) + } + }) + } +} + +func TestHumanBytes2(t *testing.T) { + type testCase struct { + input uint64 + expected string + } + + tests := []testCase{ + // Test bytes (B) + {0, "0 B"}, + {1, "1 B"}, + {1023, "1023 B"}, + + // Test kibibytes (KiB) + {1024, "1.0 KiB"}, + {1536, "1.5 KiB"}, + {1048575, "1024.0 KiB"}, + + // Test mebibytes (MiB) + {1048576, "1.0 MiB"}, + {1572864, "1.5 MiB"}, + {1073741823, "1024.0 MiB"}, + + // Test gibibytes (GiB) + {1073741824, "1.0 GiB"}, + {1610612736, "1.5 GiB"}, + {2147483648, "2.0 GiB"}, + } + + for _, tc := range tests { + t.Run(tc.expected, func(t *testing.T) { + result := HumanBytes2(tc.input) + if result != tc.expected { + t.Errorf("Expected %s, got %s", tc.expected, result) + } + }) + } +} diff --git a/format/bytes.go b/format/bytes.go index 13d8575e..a24231df 100644 --- a/format/bytes.go +++ b/format/bytes.go @@ -40,8 +40,6 @@ func HumanBytes(b int64) string { } switch { - case value >= 100: - return fmt.Sprintf("%d %s", int(value), unit) case value >= 10: return fmt.Sprintf("%d %s", int(value), unit) case value != math.Trunc(value): From 070ad913acf2b62df55ba6c638f5d287ffd6fa8f Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 5 Feb 2025 14:02:48 -0800 Subject: [PATCH 45/68] ci: fix linux archive --- .github/workflows/release.yaml | 35 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 699c444c..5267cbe0 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -305,30 +305,27 @@ jobs: - uses: docker/setup-buildx-action@v3 - run: | sudo apt-get update && sudo apt-get install pigz - docker buildx build --platform $PLATFORM --target ${{ matrix.target }} --build-arg GOFLAGS --build-arg CGO_CFLAGS --build-arg CGO_CXXFLAGS --output type=local,dest=dist/$PLATFORM . - - for COMPONENTS in dist/$PLATFORM/* dist/$PLATFORM/lib/ollama/*; do - if [ -d "$COMPONENTS" ]; then - case "$COMPONENTS" in - */bin) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}.tar.in ;; - */lib/ollama) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}.tar.in;; - */lib/ollama/cuda_v11) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}.tar.in;; - */lib/ollama/cuda_v12) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}.tar.in;; - */lib/ollama/cuda_jetpack5) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}-jetpack5.tar.in ;; - */lib/ollama/cuda_jetpack6) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}-jetpack6.tar.in ;; - */lib/ollama/rocm) echo $COMPONENTS >>dist/ollama-${PLATFORM//\//-}-rocm.tar.in ;; - esac - fi + docker buildx build --platform ${{ matrix.os }}/${{ matrix.arch }} --target ${{ matrix.target }} --build-arg GOFLAGS --build-arg CGO_CFLAGS --build-arg CGO_CXXFLAGS --output type=local,dest=dist/${{ matrix.os }}-${{ matrix.arch }} . + - run: | + find . -mindepth 1 -maxdepth 3 -type d | while read COMPONENT; do + case "$COMPONENT" in + ./bin) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; + ./lib/ollama) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; + ./lib/ollama/cuda_v11) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; + ./lib/ollama/cuda_v12) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; + ./lib/ollama/cuda_jetpack5) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack5.tar.in ;; + ./lib/ollama/cuda_jetpack6) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack6.tar.in ;; + ./lib/ollama/rocm) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-rocm.tar.in ;; + esac done - - for ARCHIVE in dist/*.tar.in; do tar c -T $ARCHIVE --strip-components 3 | pigz -9cv >${ARCHIVE//.*/}.tgz; done - env: - PLATFORM: ${{ matrix.os }}/${{ matrix.arch }} + working-directory: dist/${{ matrix.os }}-${matrix.arch }} + - run: | + for ARCHIVE in dist/${{ matrix.os }}-${{ matrix.arch }}/*.tar.in; do tar c -C dist/${{ matrix.os }}-${{ matrix.arch }} -T $ARCHIVE | pigz -9vc >$(basenme ${ARCHIVE//.*/}.tgz); done - uses: actions/upload-artifact@v4 with: name: dist-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.target }} path: | - dist/*.tgz + *.tgz # Build each Docker variant (OS, arch, and flavor) separately. Using QEMU is unreliable and slower. docker-build-push: From 932bded12f1a1589f61664732428bbeb7569693b Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 5 Feb 2025 11:48:50 -0800 Subject: [PATCH 46/68] chore: add optional field for server logs --- .github/ISSUE_TEMPLATE/10_bug_report.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/10_bug_report.yml b/.github/ISSUE_TEMPLATE/10_bug_report.yml index d0c79bca..4ac6fee6 100644 --- a/.github/ISSUE_TEMPLATE/10_bug_report.yml +++ b/.github/ISSUE_TEMPLATE/10_bug_report.yml @@ -9,6 +9,14 @@ body: description: What happened? What did you expect to happen? validations: required: true + - type: textarea + id: logs + attributes: + label: Relevant log output + description: Please copy and paste any relevant log output. See [Troubleshooting Guide](https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md#how-to-troubleshoot-issues) for details. + render: shell + validations: + required: false - type: dropdown id: os attributes: From 451c1596af991acb427b33cabea4abb3d4ab5117 Mon Sep 17 00:00:00 2001 From: Daniel Lok Date: Thu, 6 Feb 2025 08:04:24 +0800 Subject: [PATCH 47/68] readme: add MLflow Tracing as an observability integration (#8811) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 5962b18a..b4d75b63 100644 --- a/README.md +++ b/README.md @@ -545,3 +545,4 @@ See the [API documentation](./docs/api.md) for all endpoints. - [OpenLIT](https://github.com/openlit/openlit) is an OpenTelemetry-native tool for monitoring Ollama Applications & GPUs using traces and metrics. - [HoneyHive](https://docs.honeyhive.ai/integrations/ollama) is an AI observability and evaluation platform for AI agents. Use HoneyHive to evaluate agent performance, interrogate failures, and monitor quality in production. - [Langfuse](https://langfuse.com/docs/integrations/ollama) is an open source LLM observability platform that enables teams to collaboratively monitor, evaluate and debug AI applications. +- [MLflow Tracing](https://mlflow.org/docs/latest/llms/tracing/index.html#automatic-tracing) is an open source LLM observability tool with a convenient API to log and visualize traces, making it easy to debug and evaluate GenAI applications. From 5b446cc8150db0986c4c6afa9dbcee3cefcdc27f Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 5 Feb 2025 16:37:18 -0800 Subject: [PATCH 48/68] chore: update gitattributes (#8860) * chore: update gitattributes * chore: add build info source --- .gitattributes | 4 + .github/workflows/test.yaml | 2 +- Makefile.sync | 6 +- llama/build-info.cpp | 2 +- llama/build-info.cpp.in | 4 + .../template-instances/generate_cu_files.py | 77 ------------------- 6 files changed, 15 insertions(+), 80 deletions(-) create mode 100644 llama/build-info.cpp.in delete mode 100755 ml/backend/ggml/ggml/src/ggml-cuda/template-instances/generate_cu_files.py diff --git a/.gitattributes b/.gitattributes index 4bcd95b0..b1279845 100644 --- a/.gitattributes +++ b/.gitattributes @@ -15,6 +15,10 @@ ml/backend/**/*.cu linguist-vendored ml/backend/**/*.cuh linguist-vendored ml/backend/**/*.m linguist-vendored ml/backend/**/*.metal linguist-vendored +ml/backend/**/CMakeLists.txt linguist-vendored + +llama/build-info.cpp linguist-generated +ml/backend/ggml/ggml/src/ggml-metal/ggml-metal-embed.s linguist-generated * text=auto *.go text eol=lf diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index f8e1cadf..8af8812f 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -163,5 +163,5 @@ jobs: - uses: actions/checkout@v4 - name: Verify patches apply cleanly and do not change files run: | - make -f Makefile.sync clean checkout sync + make -f Makefile.sync clean sync git diff --compact-summary --exit-code diff --git a/Makefile.sync b/Makefile.sync index 3001487d..00728274 100644 --- a/Makefile.sync +++ b/Makefile.sync @@ -15,7 +15,11 @@ help: @echo " make -f $(lastword $(MAKEFILE_LIST)) clean sync" .PHONY: sync -sync: llama/llama.cpp ml/backend/ggml/ggml apply-patches +sync: llama/build-info.cpp llama/llama.cpp ml/backend/ggml/ggml apply-patches + +.PHONY: llama/build-info.cpp +llama/build-info.cpp: llama/build-info.cpp.in + sed -e 's|@FETCH_HEAD@|$(FETCH_HEAD)|' $< > $@ .PHONY: llama/llama.cpp llama/llama.cpp: llama/vendor/ apply-patches diff --git a/llama/build-info.cpp b/llama/build-info.cpp index b2c1dba7..e169b926 100644 --- a/llama/build-info.cpp +++ b/llama/build-info.cpp @@ -1,4 +1,4 @@ int LLAMA_BUILD_NUMBER = 0; -char const *LLAMA_COMMIT = "ba1cb19cdd0d92e012e0f6e009e0620f854b6afd"; +char const *LLAMA_COMMIT = "46e3556e01b824e52395fb050b29804b6cff2a7c"; char const *LLAMA_COMPILER = ""; char const *LLAMA_BUILD_TARGET = ""; diff --git a/llama/build-info.cpp.in b/llama/build-info.cpp.in new file mode 100644 index 00000000..07cbd0e4 --- /dev/null +++ b/llama/build-info.cpp.in @@ -0,0 +1,4 @@ +int LLAMA_BUILD_NUMBER = 0; +char const *LLAMA_COMMIT = "@FETCH_HEAD@"; +char const *LLAMA_COMPILER = ""; +char const *LLAMA_BUILD_TARGET = ""; diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/generate_cu_files.py b/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/generate_cu_files.py deleted file mode 100755 index d7874e6e..00000000 --- a/ml/backend/ggml/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 - -from glob import glob -import os - -TYPES_KV = ["GGML_TYPE_Q4_0", "GGML_TYPE_Q4_1", "GGML_TYPE_Q5_0", "GGML_TYPE_Q5_1", "GGML_TYPE_Q8_0", "GGML_TYPE_F16"] - -SOURCE_FATTN_VEC = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-vec-f{vkq_size}.cuh" - -DECL_FATTN_VEC_F{vkq_size}_CASE({head_size}, {type_k}, {type_v}); -""" - -SOURCE_FATTN_WMMA_START = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -""" - -SOURCE_FATTN_WMMA_CASE = "DECL_FATTN_WMMA_F16_CASE({head_size}, {cols_per_block}, {kq_acc_t});\n" - -TYPES_MMQ = [ - "GGML_TYPE_Q4_0", "GGML_TYPE_Q4_1", "GGML_TYPE_Q5_0", "GGML_TYPE_Q5_1", "GGML_TYPE_Q8_0", - "GGML_TYPE_Q2_K", "GGML_TYPE_Q3_K", "GGML_TYPE_Q4_K", "GGML_TYPE_Q5_K", "GGML_TYPE_Q6_K", - "GGML_TYPE_IQ2_XXS", "GGML_TYPE_IQ2_XS", "GGML_TYPE_IQ2_S", "GGML_TYPE_IQ3_XXS", "GGML_TYPE_IQ3_S", - "GGML_TYPE_IQ1_S", "GGML_TYPE_IQ4_NL", "GGML_TYPE_IQ4_XS" -] - -SOURCE_MMQ = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../mmq.cuh" - -DECL_MMQ_CASE({type}); -""" - - -def get_short_name(long_quant_name): - return long_quant_name.replace("GGML_TYPE_", "").lower() - - -def get_head_sizes(type_k, type_v): - if type_k == "GGML_TYPE_F16" and type_v == "GGML_TYPE_F16": - return [64, 128, 256] - if type_k == "GGML_TYPE_F16": - return [64, 128] - return [128] - - -for filename in glob("*.cu"): - os.remove(filename) - -for vkq_size in [16, 32]: - for type_k in TYPES_KV: - for type_v in TYPES_KV: - for head_size in get_head_sizes(type_k, type_v): - with open(f"fattn-vec-f{vkq_size}-instance-hs{head_size}-{get_short_name(type_k)}-{get_short_name(type_v)}.cu", "w") as f: - f.write(SOURCE_FATTN_VEC.format(vkq_size=vkq_size, head_size=head_size, type_k=type_k, type_v=type_v)) - -for kq_acc_t in ["half", "float"]: - for cols_per_block in [8, 16, 32]: - if kq_acc_t == "float" and cols_per_block == 8: - continue - - with open(f"fattn-wmma-f16-instance-kq{kq_acc_t}-cpb{cols_per_block}.cu", "w") as f: - f.write(SOURCE_FATTN_WMMA_START) - - for head_size in [64, 80, 96, 112, 128, 256]: - if cols_per_block == 8 and head_size % 32 != 0: # wmma fragment is 8x32 - continue - if kq_acc_t == "float" and cols_per_block == 32 and head_size == 256: # register spilling, bad performance - continue - f.write(SOURCE_FATTN_WMMA_CASE.format(kq_acc_t=kq_acc_t, cols_per_block=cols_per_block, head_size=head_size)) - -for type in TYPES_MMQ: - with open(f"mmq-instance-{get_short_name(type)}.cu", "w") as f: - f.write(SOURCE_MMQ.format(type=type)) From 928911bc683a9234343e2542e1a13564dd0f2684 Mon Sep 17 00:00:00 2001 From: Diego Pereira <309799+dpereira@users.noreply.github.com> Date: Wed, 5 Feb 2025 21:53:33 -0300 Subject: [PATCH 49/68] runner: avoid buffer overwrite when generating multiple embeddings (#8714) Shield the code processing the embedding result from subsequent calls that may overwrite the same buffer to process a second input when retrieving model embeddings. --- llama/llama.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/llama/llama.go b/llama/llama.go index 1d4513e3..a20f2357 100644 --- a/llama/llama.go +++ b/llama/llama.go @@ -199,21 +199,25 @@ func (c *Context) KvCacheDefrag() { // Get the embeddings for a sequence id func (c *Context) GetEmbeddingsSeq(seqId int) []float32 { - embeddings := unsafe.Pointer(C.llama_get_embeddings_seq(c.c, C.int(seqId))) - if embeddings == nil { + e := unsafe.Pointer(C.llama_get_embeddings_seq(c.c, C.int(seqId))) + if e == nil { return nil } - return unsafe.Slice((*float32)(embeddings), c.Model().NEmbd()) + embeddings := make([]float32, c.Model().NEmbd()) + _ = copy(embeddings, unsafe.Slice((*float32)(e), c.Model().NEmbd())) + return embeddings } func (c *Context) GetEmbeddingsIth(i int) []float32 { - embeddings := unsafe.Pointer(C.llama_get_embeddings_ith(c.c, C.int32_t(i))) - if embeddings == nil { + e := unsafe.Pointer(C.llama_get_embeddings_ith(c.c, C.int32_t(i))) + if e == nil { return nil } - return unsafe.Slice((*float32)(embeddings), c.Model().NEmbd()) + embeddings := make([]float32, c.Model().NEmbd()) + _ = copy(embeddings, unsafe.Slice((*float32)(e), c.Model().NEmbd())) + return embeddings } type ModelParams struct { From 330b6c50b040d46b83cd66913a8b00df147a7e9b Mon Sep 17 00:00:00 2001 From: zyphixor <78001268+zyphixor@users.noreply.github.com> Date: Wed, 5 Feb 2025 21:35:04 -0500 Subject: [PATCH 50/68] readme: add simple-discord-ai to community integrations (#8659) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index b4d75b63..9134e2c3 100644 --- a/README.md +++ b/README.md @@ -535,6 +535,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [TextCraft](https://github.com/suncloudsmoon/TextCraft) (Copilot in Word alternative using Ollama) - [Alfred Ollama](https://github.com/zeitlings/alfred-ollama) (Alfred Workflow) - [TextLLaMA](https://github.com/adarshM84/TextLLaMA) A Chrome Extension that helps you write emails, correct grammar, and translate into any language +- [Simple-Discord-AI](https://github.com/zyphixor/simple-discord-ai) ### Supported backends From 1c198977ecdd471aee827a378080ace73c02fa8d Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 5 Feb 2025 19:45:58 -0800 Subject: [PATCH 51/68] ci: fix linux archive (#8862) the find returns intermediate directories which pulls the parent directories. it also omits files under lib/ollama. switch back to globbing --- .github/workflows/release.yaml | 41 +++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 5267cbe0..ca83a429 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -303,24 +303,33 @@ jobs: steps: - uses: actions/checkout@v4 - uses: docker/setup-buildx-action@v3 + - uses: docker/build-push-action@v6 + with: + context: . + platforms: ${{ matrix.os }}/${{ matrix.arch }} + target: ${{ matrix.target }} + build-args: | + GOFLAGS=${{ env.GOFLAGS }} + CGO_CFLAGS=${{ env.CGO_CFLAGS }} + CGO_CXXFLAGS=${{ env.CGO_CXXFLAGS }} + outputs: type=local,dest=dist/${{ matrix.os }}-${{ matrix.arch }} + cache-from: type=registry,ref=ollama/ollama:latest + cache-to: type=inline - run: | - sudo apt-get update && sudo apt-get install pigz - docker buildx build --platform ${{ matrix.os }}/${{ matrix.arch }} --target ${{ matrix.target }} --build-arg GOFLAGS --build-arg CGO_CFLAGS --build-arg CGO_CXXFLAGS --output type=local,dest=dist/${{ matrix.os }}-${{ matrix.arch }} . - - run: | - find . -mindepth 1 -maxdepth 3 -type d | while read COMPONENT; do - case "$COMPONENT" in - ./bin) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; - ./lib/ollama) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; - ./lib/ollama/cuda_v11) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; - ./lib/ollama/cuda_v12) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; - ./lib/ollama/cuda_jetpack5) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack5.tar.in ;; - ./lib/ollama/cuda_jetpack6) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack6.tar.in ;; - ./lib/ollama/rocm) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-rocm.tar.in ;; - esac + for COMPONENT in bin/* lib/ollama/*; do + case "$COMPONENT" in + bin/ollama) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; + lib/ollama/*.so) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; + lib/ollama/cuda_v11) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; + lib/ollama/cuda_v12) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;; + lib/ollama/cuda_jetpack5) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack5.tar.in ;; + lib/ollama/cuda_jetpack6) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack6.tar.in ;; + lib/ollama/rocm) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-rocm.tar.in ;; + esac done - working-directory: dist/${{ matrix.os }}-${matrix.arch }} + working-directory: dist/${{ matrix.os }}-${{ matrix.arch }} - run: | - for ARCHIVE in dist/${{ matrix.os }}-${{ matrix.arch }}/*.tar.in; do tar c -C dist/${{ matrix.os }}-${{ matrix.arch }} -T $ARCHIVE | pigz -9vc >$(basenme ${ARCHIVE//.*/}.tgz); done + for ARCHIVE in dist/${{ matrix.os }}-${{ matrix.arch }}/*.tar.in; do tar c -C dist/${{ matrix.os }}-${{ matrix.arch }} -T $ARCHIVE | pigz -9vc >$(basename ${ARCHIVE//.*/}.tgz); done - uses: actions/upload-artifact@v4 with: name: dist-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.target }} @@ -359,7 +368,7 @@ jobs: GOFLAGS: ${{ needs.setup-environment.outputs.GOFLAGS }} steps: - uses: actions/checkout@v4 - - uses: docker/setup-buildx-action@v2 + - uses: docker/setup-buildx-action@v3 - uses: docker/login-action@v3 with: username: ${{ vars.DOCKER_USER }} From 32285a6d198b3d5c3f086e0a3ef1f64b98510618 Mon Sep 17 00:00:00 2001 From: Azis Alvriyanto Date: Fri, 7 Feb 2025 04:06:15 +0700 Subject: [PATCH 52/68] format: rename test file from byte_test.go to bytes_test.go (#8865) --- format/{byte_test.go => bytes_test.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename format/{byte_test.go => bytes_test.go} (100%) diff --git a/format/byte_test.go b/format/bytes_test.go similarity index 100% rename from format/byte_test.go rename to format/bytes_test.go From b698f9a0d89c0dffbe8912cbd8b85f43f768ec1e Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 6 Feb 2025 13:12:16 -0800 Subject: [PATCH 53/68] build: add missing dependencies (#8896) --- CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 58cb8b44..1b4caedf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -96,11 +96,12 @@ if(CMAKE_HIP_COMPILER) if(AMDGPU_TARGETS) add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-hip) + set(OLLAMA_HIP_INSTALL_DIR ${OLLAMA_INSTALL_DIR}/rocm) install(TARGETS ggml-hip RUNTIME_DEPENDENCIES DIRECTORIES ${HIP_BIN_INSTALL_DIR} ${HIP_LIB_INSTALL_DIR} - PRE_INCLUDE_REGEXES amdhip64 hipblas rocblas amd_comgr hsa_runtime64 rocprofiler-register drm_amdgpu drm numa + PRE_INCLUDE_REGEXES hipblas rocblas amdhip64 rocsolver amd_comgr hsa-runtime64 rocsparse rocprofiler-register drm drm_amdgpu PRE_EXCLUDE_REGEXES ".*" POST_EXCLUDE_REGEXES "system32" RUNTIME DESTINATION ${OLLAMA_HIP_INSTALL_DIR} COMPONENT HIP From 78140197088cdec181c354a5cc00be4a5f080468 Mon Sep 17 00:00:00 2001 From: Abhinav Pant <67090539+abhitrueprogrammer@users.noreply.github.com> Date: Fri, 7 Feb 2025 04:24:58 +0530 Subject: [PATCH 54/68] docs: add step for removing libraries in linux.md (#8897) --- docs/linux.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/linux.md b/docs/linux.md index 12c38de1..a5b6dd91 100644 --- a/docs/linux.md +++ b/docs/linux.md @@ -186,3 +186,9 @@ sudo rm -r /usr/share/ollama sudo userdel ollama sudo groupdel ollama ``` + +Remove installed libraries: + +```shell +sudo rm -rf /usr/local/lib/ollama +``` From 9a4757ae6690605298fda69c2cc1909a60508d1f Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 6 Feb 2025 15:08:12 -0800 Subject: [PATCH 55/68] build(rocm): add tinfo (#8899) --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1b4caedf..ba3d3286 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -101,7 +101,7 @@ if(CMAKE_HIP_COMPILER) install(TARGETS ggml-hip RUNTIME_DEPENDENCIES DIRECTORIES ${HIP_BIN_INSTALL_DIR} ${HIP_LIB_INSTALL_DIR} - PRE_INCLUDE_REGEXES hipblas rocblas amdhip64 rocsolver amd_comgr hsa-runtime64 rocsparse rocprofiler-register drm drm_amdgpu + PRE_INCLUDE_REGEXES hipblas rocblas amdhip64 rocsolver amd_comgr hsa-runtime64 rocsparse tinfo rocprofiler-register drm drm_amdgpu PRE_EXCLUDE_REGEXES ".*" POST_EXCLUDE_REGEXES "system32" RUNTIME DESTINATION ${OLLAMA_HIP_INSTALL_DIR} COMPONENT HIP From 31acd1ebf97528932714619f7123eeff25e0e149 Mon Sep 17 00:00:00 2001 From: oslook Date: Fri, 7 Feb 2025 07:41:02 +0800 Subject: [PATCH 56/68] readme: add Ollama Chat WebUI for Docker to community integrations (#8084) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 9134e2c3..4fde30fa 100644 --- a/README.md +++ b/README.md @@ -369,6 +369,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Minima](https://github.com/dmayboroda/minima) (RAG with on-premises or fully local workflow) - [aidful-ollama-model-delete](https://github.com/AidfulAI/aidful-ollama-model-delete) (User interface for simplified model cleanup) - [Perplexica](https://github.com/ItzCrazyKns/Perplexica) (An AI-powered search engine & an open-source alternative to Perplexity AI) +- [Ollama Chat WebUI for Docker ](https://github.com/oslook/ollama-webui) (Support for local docker deployment, lightweight ollama webui) - [AI Toolkit for Visual Studio Code](https://aka.ms/ai-tooklit/ollama-docs) (Microsoft-official VSCode extension to chat, test, evaluate models with Ollama support, and use them in your AI applications.) - [MinimalNextOllamaChat](https://github.com/anilkay/MinimalNextOllamaChat) (Minimal Web UI for Chat and Model Control) - [Chipper](https://github.com/TilmanGriesel/chipper) AI interface for tinkerers (Ollama, Haystack RAG, Python) From ae7e368f75488a98ea7dae0131dfaf5963ad1a9c Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 6 Feb 2025 15:46:30 -0800 Subject: [PATCH 57/68] build(rocm): add numa, elf (#8900) --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ba3d3286..71d9bf0e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -101,7 +101,7 @@ if(CMAKE_HIP_COMPILER) install(TARGETS ggml-hip RUNTIME_DEPENDENCIES DIRECTORIES ${HIP_BIN_INSTALL_DIR} ${HIP_LIB_INSTALL_DIR} - PRE_INCLUDE_REGEXES hipblas rocblas amdhip64 rocsolver amd_comgr hsa-runtime64 rocsparse tinfo rocprofiler-register drm drm_amdgpu + PRE_INCLUDE_REGEXES hipblas rocblas amdhip64 rocsolver amd_comgr hsa-runtime64 rocsparse tinfo rocprofiler-register drm drm_amdgpu numa elf PRE_EXCLUDE_REGEXES ".*" POST_EXCLUDE_REGEXES "system32" RUNTIME DESTINATION ${OLLAMA_HIP_INSTALL_DIR} COMPONENT HIP From e8d4eb3e68b222f930f7818744ac6f6084bce0a7 Mon Sep 17 00:00:00 2001 From: CosmicEventHorizon <130989451+CosmicEventHorizon@users.noreply.github.com> Date: Thu, 6 Feb 2025 19:08:46 -0500 Subject: [PATCH 58/68] readme: add ChibiChat to community integrations (#8883) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 4fde30fa..2b156ea0 100644 --- a/README.md +++ b/README.md @@ -373,6 +373,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [AI Toolkit for Visual Studio Code](https://aka.ms/ai-tooklit/ollama-docs) (Microsoft-official VSCode extension to chat, test, evaluate models with Ollama support, and use them in your AI applications.) - [MinimalNextOllamaChat](https://github.com/anilkay/MinimalNextOllamaChat) (Minimal Web UI for Chat and Model Control) - [Chipper](https://github.com/TilmanGriesel/chipper) AI interface for tinkerers (Ollama, Haystack RAG, Python) +- [ChibiChat](https://github.com/CosmicEventHorizon/ChibiChat) (Kotlin-based Android app to chat with Ollama and Koboldcpp API endpoints) ### Cloud From 6ab4ba4c26afb61e14cb5fa16bd3401f9bfdb2e7 Mon Sep 17 00:00:00 2001 From: annilq <410493517@qq.com> Date: Fri, 7 Feb 2025 09:15:48 +0800 Subject: [PATCH 59/68] readme: add React Native client to community integrations (#8877) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2b156ea0..187d6362 100644 --- a/README.md +++ b/README.md @@ -353,6 +353,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Web management](https://github.com/lemonit-eric-mao/ollama-web-management) (Web management page) - [Promptery](https://github.com/promptery/promptery) (desktop client for Ollama.) - [Ollama App](https://github.com/JHubi1/ollama-app) (Modern and easy-to-use multi-platform client for Ollama) +- [chat-ollama](https://github.com/annilq/chat-ollama) (a React Native client for Ollama) - [SpaceLlama](https://github.com/tcsenpai/spacellama) (Firefox and Chrome extension to quickly summarize web pages with ollama in a sidebar) - [YouLama](https://github.com/tcsenpai/youlama) (Webapp to quickly summarize any YouTube video, supporting Invidious as well) - [DualMind](https://github.com/tcsenpai/dualmind) (Experimental app allowing two models to talk to each other in the terminal or in a web interface) From a400df48c06f6526ed74bfa3fd1af783ed0b4899 Mon Sep 17 00:00:00 2001 From: Leisure Linux Date: Fri, 7 Feb 2025 10:45:09 +0800 Subject: [PATCH 60/68] docs: include port in faq.md OLLAMA_HOST examples (#8905) --- docs/faq.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/faq.md b/docs/faq.md index 387d752b..b58798e2 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -66,7 +66,7 @@ If Ollama is run as a macOS application, environment variables should be set usi 1. For each environment variable, call `launchctl setenv`. ```bash - launchctl setenv OLLAMA_HOST "0.0.0.0" + launchctl setenv OLLAMA_HOST "0.0.0.0:11434" ``` 2. Restart Ollama application. @@ -81,7 +81,7 @@ If Ollama is run as a systemd service, environment variables should be set using ```ini [Service] - Environment="OLLAMA_HOST=0.0.0.0" + Environment="OLLAMA_HOST=0.0.0.0:11434" ``` 3. Save and exit. From abb8dd57f8a86a71b5f8fe1f059aee3636a658b1 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 7 Feb 2025 09:51:22 -0800 Subject: [PATCH 61/68] add gfx instinct gpus (#8933) --- CMakeLists.txt | 7 +++++++ CMakePresets.json | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 71d9bf0e..940c5916 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,6 +85,11 @@ if(CMAKE_CUDA_COMPILER) ) endif() +set(WINDOWS_AMDGPU_TARGETS_EXCLUDE_REGEX "^gfx(906|908|90a):xnack[+-]$" + CACHE STRING + "Regular expression describing AMDGPU_TARGETS not supported on Windows. Override to force building these targets. Default \"^gfx(906|908|90a):xnack[+-]$\"." +) + check_language(HIP) if(CMAKE_HIP_COMPILER) set(HIP_PLATFORM "amd") @@ -92,6 +97,8 @@ if(CMAKE_HIP_COMPILER) find_package(hip REQUIRED) if(NOT AMDGPU_TARGETS) list(FILTER AMDGPU_TARGETS INCLUDE REGEX "^gfx(900|94[012]|101[02]|1030|110[012])$") + elseif(WIN32 AND WINDOWS_AMDGPU_TARGETS_EXCLUDE_REGEX) + list(FILTER AMDGPU_TARGETS EXCLUDE REGEX ${WINDOWS_AMDGPU_TARGETS_EXCLUDE_REGEX}) endif() if(AMDGPU_TARGETS) diff --git a/CMakePresets.json b/CMakePresets.json index e6d3f6e7..c789ad7f 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -56,7 +56,7 @@ "name": "ROCm 6", "inherits": [ "ROCm" ], "cacheVariables": { - "AMDGPU_TARGETS": "gfx900;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102" + "AMDGPU_TARGETS": "gfx900;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-" } } ], From b901a712c6b0afe88aef7e5318f193d5b889cf34 Mon Sep 17 00:00:00 2001 From: Azis Alvriyanto Date: Sat, 8 Feb 2025 00:55:07 +0700 Subject: [PATCH 62/68] docs: improve syntax highlighting in code blocks (#8854) --- README.md | 44 ++++++++++++++-------------- api/examples/README.md | 3 +- app/README.md | 2 +- docs/api.md | 33 ++++++++++----------- docs/development.md | 20 ++++++------- docs/docker.md | 50 +++++++++++++++++--------------- docs/faq.md | 18 +++++++++--- docs/import.md | 4 +-- docs/linux.md | 2 +- docs/modelfile.md | 64 ++++++++++++++++++++++------------------- docs/openai.md | 13 +++++---- docs/troubleshooting.md | 11 ++++--- docs/windows.md | 1 + llama/README.md | 10 +++---- llama/runner/README.md | 6 ++-- macapp/README.md | 4 +-- 16 files changed, 158 insertions(+), 127 deletions(-) diff --git a/README.md b/README.md index 187d6362..959d4c61 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ Get up and running with large language models. ### Linux -``` +```shell curl -fsSL https://ollama.com/install.sh | sh ``` @@ -42,7 +42,7 @@ The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `olla To run and chat with [Llama 3.2](https://ollama.com/library/llama3.2): -``` +```shell ollama run llama3.2 ``` @@ -92,13 +92,13 @@ Ollama supports importing GGUF models in the Modelfile: 2. Create the model in Ollama - ``` + ```shell ollama create example -f Modelfile ``` 3. Run the model - ``` + ```shell ollama run example ``` @@ -110,7 +110,7 @@ See the [guide](docs/import.md) on importing models for more information. Models from the Ollama library can be customized with a prompt. For example, to customize the `llama3.2` model: -``` +```shell ollama pull llama3.2 ``` @@ -145,13 +145,13 @@ For more information on working with a Modelfile, see the [Modelfile](docs/model `ollama create` is used to create a model from a Modelfile. -``` +```shell ollama create mymodel -f ./Modelfile ``` ### Pull a model -``` +```shell ollama pull llama3.2 ``` @@ -159,13 +159,13 @@ ollama pull llama3.2 ### Remove a model -``` +```shell ollama rm llama3.2 ``` ### Copy a model -``` +```shell ollama cp llama3.2 my-model ``` @@ -184,37 +184,39 @@ I'm a basic program that prints the famous "Hello, world!" message to the consol ``` ollama run llava "What's in this image? /Users/jmorgan/Desktop/smile.png" -The image features a yellow smiley face, which is likely the central focus of the picture. ``` +> **Output**: The image features a yellow smiley face, which is likely the central focus of the picture. + ### Pass the prompt as an argument +```shell +ollama run llama3.2 "Summarize this file: $(cat README.md)" ``` -$ ollama run llama3.2 "Summarize this file: $(cat README.md)" - Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications. -``` + +> **Output**: Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications. ### Show model information -``` +```shell ollama show llama3.2 ``` ### List models on your computer -``` +```shell ollama list ``` ### List which models are currently loaded -``` +```shell ollama ps ``` ### Stop a model which is currently running -``` +```shell ollama stop llama3.2 ``` @@ -230,13 +232,13 @@ See the [developer guide](https://github.com/ollama/ollama/blob/main/docs/develo Next, start the server: -``` +```shell ./ollama serve ``` Finally, in a separate shell, run a model: -``` +```shell ./ollama run llama3.2 ``` @@ -246,7 +248,7 @@ Ollama has a REST API for running and managing models. ### Generate a response -``` +```shell curl http://localhost:11434/api/generate -d '{ "model": "llama3.2", "prompt":"Why is the sky blue?" @@ -255,7 +257,7 @@ curl http://localhost:11434/api/generate -d '{ ### Chat with a model -``` +```shell curl http://localhost:11434/api/chat -d '{ "model": "llama3.2", "messages": [ diff --git a/api/examples/README.md b/api/examples/README.md index b5a8917f..e83b5360 100644 --- a/api/examples/README.md +++ b/api/examples/README.md @@ -2,9 +2,10 @@ Run the examples in this directory with: -``` +```shell go run example_name/main.go ``` + ## Chat - Chat with a model - [chat/main.go](chat/main.go) diff --git a/app/README.md b/app/README.md index 883d7ab7..433ee44e 100644 --- a/app/README.md +++ b/app/README.md @@ -17,6 +17,6 @@ If you want to build the installer, youll need to install In the top directory of this repo, run the following powershell script to build the ollama CLI, ollama app, and ollama installer. -``` +```powershell powershell -ExecutionPolicy Bypass -File .\scripts\build_windows.ps1 ``` diff --git a/docs/api.md b/docs/api.md index 5d1b7d64..7de81049 100644 --- a/docs/api.md +++ b/docs/api.md @@ -31,7 +31,7 @@ Certain endpoints stream responses as JSON objects. Streaming can be disabled by ## Generate a completion -```shell +``` POST /api/generate ``` @@ -485,7 +485,7 @@ A single JSON object is returned: ## Generate a chat completion -```shell +``` POST /api/chat ``` @@ -878,6 +878,7 @@ curl http://localhost:11434/api/chat -d '{ ``` ##### Response + ```json { "model": "llama3.2", @@ -924,7 +925,7 @@ A single JSON object is returned: ## Create a Model -```shell +``` POST /api/create ``` @@ -1020,7 +1021,7 @@ curl http://localhost:11434/api/create -d '{ A stream of JSON objects is returned: -``` +```json {"status":"quantizing F16 model to Q4_K_M"} {"status":"creating new layer sha256:667b0c1932bc6ffc593ed1d03f895bf2dc8dc6df21db3042284a6f4416b06a29"} {"status":"using existing layer sha256:11ce4ee3e170f6adebac9a991c22e22ab3f8530e154ee669954c4bc73061c258"} @@ -1051,7 +1052,7 @@ curl http://localhost:11434/api/create -d '{ A stream of JSON objects is returned: -``` +```json {"status":"parsing GGUF"} {"status":"using existing layer sha256:432f310a77f4650a88d0fd59ecdd7cebed8d684bafea53cbff0473542964f0c3"} {"status":"writing manifest"} @@ -1118,7 +1119,7 @@ Return 200 OK if the blob exists, 404 Not Found if it does not. ## Push a Blob -```shell +``` POST /api/blobs/:digest ``` @@ -1142,7 +1143,7 @@ Return 201 Created if the blob was successfully created, 400 Bad Request if the ## List Local Models -```shell +``` GET /api/tags ``` @@ -1195,7 +1196,7 @@ A single JSON object will be returned. ## Show Model Information -```shell +``` POST /api/show ``` @@ -1261,7 +1262,7 @@ curl http://localhost:11434/api/show -d '{ ## Copy a Model -```shell +``` POST /api/copy ``` @@ -1284,7 +1285,7 @@ Returns a 200 OK if successful, or a 404 Not Found if the source model doesn't e ## Delete a Model -```shell +``` DELETE /api/delete ``` @@ -1310,7 +1311,7 @@ Returns a 200 OK if successful, 404 Not Found if the model to be deleted doesn't ## Pull a Model -```shell +``` POST /api/pull ``` @@ -1382,7 +1383,7 @@ if `stream` is set to false, then the response is a single JSON object: ## Push a Model -```shell +``` POST /api/push ``` @@ -1447,7 +1448,7 @@ If `stream` is set to `false`, then the response is a single JSON object: ## Generate Embeddings -```shell +``` POST /api/embed ``` @@ -1515,7 +1516,7 @@ curl http://localhost:11434/api/embed -d '{ ``` ## List Running Models -```shell +``` GET /api/ps ``` @@ -1562,7 +1563,7 @@ A single JSON object will be returned. > Note: this endpoint has been superseded by `/api/embed` -```shell +``` POST /api/embeddings ``` @@ -1602,7 +1603,7 @@ curl http://localhost:11434/api/embeddings -d '{ ## Version -```shell +``` GET /api/version ``` diff --git a/docs/development.md b/docs/development.md index 618e98e1..5a6463fc 100644 --- a/docs/development.md +++ b/docs/development.md @@ -7,7 +7,7 @@ Install prerequisites: Then build and run Ollama from the root directory of the repository: -``` +```shell go run . serve ``` @@ -23,14 +23,14 @@ Install prerequisites: Then, configure and build the project: -``` +```shell cmake -B build cmake --build build ``` Lastly, run Ollama: -``` +```shell go run . serve ``` @@ -57,14 +57,14 @@ Install prerequisites: Then, configure and build the project: -``` +```shell cmake -B build cmake --build build --config Release ``` Lastly, run Ollama: -``` +```shell go run . serve ``` @@ -88,26 +88,26 @@ Install prerequisites: Then, configure and build the project: -``` +```shell cmake -B build cmake --build build ``` Lastly, run Ollama: -``` +```shell go run . serve ``` ## Docker -``` +```shell docker build . ``` ### ROCm -``` +```shell docker build --build-arg FLAVOR=rocm . ``` @@ -115,7 +115,7 @@ docker build --build-arg FLAVOR=rocm . To run tests, use `go test`: -``` +```shell go test ./... ``` diff --git a/docs/docker.md b/docs/docker.md index 9dd387e3..dce090a2 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -2,7 +2,7 @@ ### CPU only -```bash +```shell docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama ``` @@ -11,42 +11,46 @@ Install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud- #### Install with Apt 1. Configure the repository -```bash -curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey \ - | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg -curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \ - | sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' \ - | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list -sudo apt-get update -``` + + ```shell + curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey \ + | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg + curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \ + | sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' \ + | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list + sudo apt-get update + ``` + 2. Install the NVIDIA Container Toolkit packages -```bash -sudo apt-get install -y nvidia-container-toolkit -``` + + ```shell + sudo apt-get install -y nvidia-container-toolkit + ``` #### Install with Yum or Dnf 1. Configure the repository -```bash -curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo \ - | sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo -``` + ```shell + curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo \ + | sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo + ``` 2. Install the NVIDIA Container Toolkit packages -```bash -sudo yum install -y nvidia-container-toolkit -``` + ```shell + sudo yum install -y nvidia-container-toolkit + ``` #### Configure Docker to use Nvidia driver -``` + +```shell sudo nvidia-ctk runtime configure --runtime=docker sudo systemctl restart docker ``` #### Start the container -```bash +```shell docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama ``` @@ -57,7 +61,7 @@ docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama ol To run Ollama using Docker with AMD GPUs, use the `rocm` tag and the following command: -``` +```shell docker run -d --device /dev/kfd --device /dev/dri -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama:rocm ``` @@ -65,7 +69,7 @@ docker run -d --device /dev/kfd --device /dev/dri -v ollama:/root/.ollama -p 114 Now you can run a model: -``` +```shell docker exec -it ollama ollama run llama3.2 ``` diff --git a/docs/faq.md b/docs/faq.md index b58798e2..04e8433d 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -24,7 +24,7 @@ By default, Ollama uses a context window size of 2048 tokens. To change this when using `ollama run`, use `/set parameter`: -``` +```shell /set parameter num_ctx 4096 ``` @@ -46,10 +46,15 @@ Use the `ollama ps` command to see what models are currently loaded into memory. ```shell ollama ps -NAME ID SIZE PROCESSOR UNTIL -llama3:70b bcfb190ca3a7 42 GB 100% GPU 4 minutes from now ``` +> **Output**: +> +> ``` +> NAME ID SIZE PROCESSOR UNTIL +> llama3:70b bcfb190ca3a7 42 GB 100% GPU 4 minutes from now +> ``` + The `Processor` column will show which memory the model was loaded in to: * `100% GPU` means the model was loaded entirely into the GPU * `100% CPU` means the model was loaded entirely in system memory @@ -88,7 +93,7 @@ If Ollama is run as a systemd service, environment variables should be set using 4. Reload `systemd` and restart Ollama: - ```bash + ```shell systemctl daemon-reload systemctl restart ollama ``` @@ -221,16 +226,19 @@ properties. If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints. To preload the mistral model using the generate endpoint, use: + ```shell curl http://localhost:11434/api/generate -d '{"model": "mistral"}' ``` To use the chat completions endpoint, use: + ```shell curl http://localhost:11434/api/chat -d '{"model": "mistral"}' ``` To preload a model using the CLI, use the command: + ```shell ollama run llama3.2 "" ``` @@ -250,11 +258,13 @@ If you're using the API, use the `keep_alive` parameter with the `/api/generate` * '0' which will unload the model immediately after generating a response For example, to preload a model and leave it in memory use: + ```shell curl http://localhost:11434/api/generate -d '{"model": "llama3.2", "keep_alive": -1}' ``` To unload the model and free up memory use: + ```shell curl http://localhost:11434/api/generate -d '{"model": "llama3.2", "keep_alive": 0}' ``` diff --git a/docs/import.md b/docs/import.md index 040fa299..01fea542 100644 --- a/docs/import.md +++ b/docs/import.md @@ -20,13 +20,13 @@ Make sure that you use the same base model in the `FROM` command as you used to Now run `ollama create` from the directory where the `Modelfile` was created: -```bash +```shell ollama create my-model ``` Lastly, test the model: -```bash +```shell ollama run my-model ``` diff --git a/docs/linux.md b/docs/linux.md index a5b6dd91..12581bdd 100644 --- a/docs/linux.md +++ b/docs/linux.md @@ -119,7 +119,7 @@ sudo systemctl status ollama To customize the installation of Ollama, you can edit the systemd service file or the environment variables by running: -``` +```shell sudo systemctl edit ollama ``` diff --git a/docs/modelfile.md b/docs/modelfile.md index cc2115b3..a71183f4 100644 --- a/docs/modelfile.md +++ b/docs/modelfile.md @@ -28,7 +28,7 @@ A model file is the blueprint to create and share models with Ollama. The format of the `Modelfile`: -```modelfile +``` # comment INSTRUCTION arguments ``` @@ -49,7 +49,7 @@ INSTRUCTION arguments An example of a `Modelfile` creating a mario blueprint: -```modelfile +``` FROM llama3.2 # sets the temperature to 1 [higher is more creative, lower is more coherent] PARAMETER temperature 1 @@ -69,24 +69,30 @@ To use this: To view the Modelfile of a given model, use the `ollama show --modelfile` command. - ```bash - > ollama show --modelfile llama3.2 - # Modelfile generated by "ollama show" - # To build a new Modelfile based on this one, replace the FROM line with: - # FROM llama3.2:latest - FROM /Users/pdevine/.ollama/models/blobs/sha256-00e1317cbf74d901080d7100f57580ba8dd8de57203072dc6f668324ba545f29 - TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|> +```shell +ollama show --modelfile llama3.2 +``` - {{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> +> **Output**: +> +> ``` +> # Modelfile generated by "ollama show" +> # To build a new Modelfile based on this one, replace the FROM line with: +> # FROM llama3.2:latest +> FROM /Users/pdevine/.ollama/models/blobs/sha256-00e1317cbf74d901080d7100f57580ba8dd8de57203072dc6f668324ba545f29 +> TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|> +> +> {{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> +> +> {{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> +> +> {{ .Response }}<|eot_id|>""" +> PARAMETER stop "<|start_header_id|>" +> PARAMETER stop "<|end_header_id|>" +> PARAMETER stop "<|eot_id|>" +> PARAMETER stop "<|reserved_special_token" +> ``` - {{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> - - {{ .Response }}<|eot_id|>""" - PARAMETER stop "<|start_header_id|>" - PARAMETER stop "<|end_header_id|>" - PARAMETER stop "<|eot_id|>" - PARAMETER stop "<|reserved_special_token" - ``` ## Instructions @@ -94,13 +100,13 @@ To view the Modelfile of a given model, use the `ollama show --modelfile` comman The `FROM` instruction defines the base model to use when creating a model. -```modelfile +``` FROM : ``` #### Build from existing model -```modelfile +``` FROM llama3.2 ``` @@ -111,7 +117,7 @@ Additional models can be found at: #### Build from a Safetensors model -```modelfile +``` FROM ``` @@ -125,7 +131,7 @@ Currently supported model architectures: #### Build from a GGUF file -```modelfile +``` FROM ./ollama-model.gguf ``` @@ -136,7 +142,7 @@ The GGUF file location should be specified as an absolute path or relative to th The `PARAMETER` instruction defines a parameter that can be set when the model is run. -```modelfile +``` PARAMETER ``` @@ -183,7 +189,7 @@ TEMPLATE """{{ if .System }}<|im_start|>system The `SYSTEM` instruction specifies the system message to be used in the template, if applicable. -```modelfile +``` SYSTEM """""" ``` @@ -193,7 +199,7 @@ The `ADAPTER` instruction specifies a fine tuned LoRA adapter that should apply #### Safetensor adapter -```modelfile +``` ADAPTER ``` @@ -204,7 +210,7 @@ Currently supported Safetensor adapters: #### GGUF adapter -```modelfile +``` ADAPTER ./ollama-lora.gguf ``` @@ -212,7 +218,7 @@ ADAPTER ./ollama-lora.gguf The `LICENSE` instruction allows you to specify the legal license under which the model used with this Modelfile is shared or distributed. -```modelfile +``` LICENSE """ """ @@ -222,7 +228,7 @@ LICENSE """ The `MESSAGE` instruction allows you to specify a message history for the model to use when responding. Use multiple iterations of the MESSAGE command to build up a conversation which will guide the model to answer in a similar way. -```modelfile +``` MESSAGE ``` @@ -237,7 +243,7 @@ MESSAGE #### Example conversation -```modelfile +``` MESSAGE user Is Toronto in Canada? MESSAGE assistant yes MESSAGE user Is Sacramento in Canada? diff --git a/docs/openai.md b/docs/openai.md index b0f9b353..d0bac4cd 100644 --- a/docs/openai.md +++ b/docs/openai.md @@ -1,6 +1,7 @@ # OpenAI compatibility -> **Note:** OpenAI compatibility is experimental and is subject to major adjustments including breaking changes. For fully-featured access to the Ollama API, see the Ollama [Python library](https://github.com/ollama/ollama-python), [JavaScript library](https://github.com/ollama/ollama-js) and [REST API](https://github.com/ollama/ollama/blob/main/docs/api.md). +> [!NOTE] +> OpenAI compatibility is experimental and is subject to major adjustments including breaking changes. For fully-featured access to the Ollama API, see the Ollama [Python library](https://github.com/ollama/ollama-python), [JavaScript library](https://github.com/ollama/ollama-js) and [REST API](https://github.com/ollama/ollama/blob/main/docs/api.md). Ollama provides experimental compatibility with parts of the [OpenAI API](https://platform.openai.com/docs/api-reference) to help connect existing applications to Ollama. @@ -59,8 +60,10 @@ embeddings = client.embeddings.create( input=["why is the sky blue?", "why is the grass green?"], ) ``` + #### Structured outputs -```py + +```python from pydantic import BaseModel from openai import OpenAI @@ -144,7 +147,7 @@ const embedding = await openai.embeddings.create({ ### `curl` -``` shell +```shell curl http://localhost:11434/v1/chat/completions \ -H "Content-Type: application/json" \ -d '{ @@ -319,7 +322,7 @@ ollama pull llama3.2 For tooling that relies on default OpenAI model names such as `gpt-3.5-turbo`, use `ollama cp` to copy an existing model name to a temporary name: -``` +```shell ollama cp llama3.2 gpt-3.5-turbo ``` @@ -343,7 +346,7 @@ curl http://localhost:11434/v1/chat/completions \ The OpenAI API does not have a way of setting the context size for a model. If you need to change the context size, create a `Modelfile` which looks like: -```modelfile +``` FROM PARAMETER num_ctx ``` diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 28f4350a..7ef1618e 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -17,6 +17,7 @@ When you run Ollama in a **container**, the logs go to stdout/stderr in the cont ```shell docker logs ``` + (Use `docker ps` to find the container name) If manually running `ollama serve` in a terminal, the logs will be on that terminal. @@ -28,6 +29,7 @@ When you run Ollama on **Windows**, there are a few different locations. You can - `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal + ```powershell $env:OLLAMA_DEBUG="1" & "ollama app.exe" @@ -49,12 +51,13 @@ Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5] You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass autodetection, so for example, if you have a CUDA card, but want to force the CPU LLM library with AVX2 vector support, use: -``` +```shell OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve ``` You can see what features your CPU has with the following. -``` + +```shell cat /proc/cpuinfo| grep flags | head -1 ``` @@ -62,8 +65,8 @@ cat /proc/cpuinfo| grep flags | head -1 If you run into problems on Linux and want to install an older version, or you'd like to try out a pre-release before it's officially released, you can tell the install script which version to install. -```sh -curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh +```shell +curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=0.5.7 sh ``` ## Linux tmp noexec diff --git a/docs/windows.md b/docs/windows.md index 80bebed4..2a0d08d9 100644 --- a/docs/windows.md +++ b/docs/windows.md @@ -47,6 +47,7 @@ If Ollama is already running, Quit the tray application and relaunch it from the ## API Access Here's a quick example showing API access from `powershell` + ```powershell (Invoke-WebRequest -method POST -Body '{"model":"llama3.2", "prompt":"Why is the sky blue?", "stream": false}' -uri http://localhost:11434/api/generate ).Content | ConvertFrom-json ``` diff --git a/llama/README.md b/llama/README.md index f6a39725..bfe66a8b 100644 --- a/llama/README.md +++ b/llama/README.md @@ -8,7 +8,7 @@ Ollama vendors [llama.cpp](https://github.com/ggerganov/llama.cpp/) and [ggml](h If you update the vendoring code, start by running the following command to establish the tracking llama.cpp repo in the `./vendor/` directory. -``` +```shell make -f Makefile.sync apply-patches ``` @@ -22,7 +22,7 @@ When updating to a newer base commit, the existing patches may not apply cleanly Start by applying the patches. If any of the patches have conflicts, the `git am` will stop at the first failure. -``` +```shell make -f Makefile.sync apply-patches ``` @@ -30,7 +30,7 @@ If there are conflicts, you will see an error message. Resolve the conflicts in Once all patches are applied, commit the changes to the tracking repository. -``` +```shell make -f Makefile.sync format-patches sync ``` @@ -38,13 +38,13 @@ make -f Makefile.sync format-patches sync When working on new fixes or features that impact vendored code, use the following model. First get a clean tracking repo with all current patches applied: -``` +```shell make -f Makefile.sync clean apply-patches ``` Iterate until you're ready to submit PRs. Once your code is ready, commit a change in the `./vendor/` directory, then generate the patches for ollama with -``` +```shell make -f Makefile.sync format-patches ``` diff --git a/llama/runner/README.md b/llama/runner/README.md index 75f61682..80ffda81 100644 --- a/llama/runner/README.md +++ b/llama/runner/README.md @@ -4,18 +4,18 @@ A minimial runner for loading a model and running inference via a http web server. -``` +```shell ./runner -model ``` ### Completion -``` +```shell curl -X POST -H "Content-Type: application/json" -d '{"prompt": "hi"}' http://localhost:8080/completion ``` ### Embeddings -``` +```shell curl -X POST -H "Content-Type: application/json" -d '{"prompt": "turn me into an embedding"}' http://localhost:8080/embedding ``` diff --git a/macapp/README.md b/macapp/README.md index 8bde06e2..bdaf05e7 100644 --- a/macapp/README.md +++ b/macapp/README.md @@ -6,14 +6,14 @@ This app builds upon Ollama to provide a desktop experience for running models. First, build the `ollama` binary: -``` +```shell cd .. go build . ``` Then run the desktop app with `npm start`: -``` +```shell cd macapp npm install npm start From 7e402ebb8cc95e1fd2b59fe6d9ef9baf8972977e Mon Sep 17 00:00:00 2001 From: Guddu Kumar <49631628+kontactguddu@users.noreply.github.com> Date: Sat, 8 Feb 2025 00:58:28 +0530 Subject: [PATCH 63/68] readme: add deepseek to supported models --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 959d4c61..205f3127 100644 --- a/README.md +++ b/README.md @@ -54,6 +54,8 @@ Here are some example models that can be downloaded: | Model | Parameters | Size | Download | | ------------------ | ---------- | ----- | -------------------------------- | +| DeepSeek-R1 | 7B | 4.7GB | `ollama run deepseek-r1` | +| DeepSeek-R1 | 671B | 404GB | `ollama run deepseek-r1:671b` | | Llama 3.3 | 70B | 43GB | `ollama run llama3.3` | | Llama 3.2 | 3B | 2.0GB | `ollama run llama3.2` | | Llama 3.2 | 1B | 1.3GB | `ollama run llama3.2:1b` | From b86c0a1500279a79484135a41c9ef4bd3798ae9e Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sat, 8 Feb 2025 00:21:10 -0800 Subject: [PATCH 64/68] docs: link directly to latest release page for tdm-gcc (#8939) --- docs/development.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/development.md b/docs/development.md index 5a6463fc..88fec3db 100644 --- a/docs/development.md +++ b/docs/development.md @@ -3,7 +3,7 @@ Install prerequisites: - [Go](https://go.dev/doc/install) -- C/C++ Compiler e.g. Clang on macOS, [TDM-GCC](https://jmeubank.github.io/tdm-gcc/download/) (Windows amd64) or [llvm-mingw](https://github.com/mstorsjo/llvm-mingw) (Windows arm64), GCC/Clang on Linux. +- C/C++ Compiler e.g. Clang on macOS, [TDM-GCC](https://github.com/jmeubank/tdm-gcc/releases/latest) (Windows amd64) or [llvm-mingw](https://github.com/mstorsjo/llvm-mingw) (Windows arm64), GCC/Clang on Linux. Then build and run Ollama from the root directory of the repository: From ec6121c331e66b7e6abe42866002b03768c0db2c Mon Sep 17 00:00:00 2001 From: DravenK Date: Sun, 9 Feb 2025 03:10:47 +0800 Subject: [PATCH 65/68] docs: ollama zig community lib (#8688) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 205f3127..2d1c13b7 100644 --- a/README.md +++ b/README.md @@ -492,6 +492,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Ollama for Haskell](https://github.com/tusharad/ollama-haskell) - [multi-llm-ts](https://github.com/nbonamy/multi-llm-ts) (A Typescript/JavaScript library allowing access to different LLM in unified API) - [LlmTornado](https://github.com/lofcz/llmtornado) (C# library providing a unified interface for major FOSS & Commercial inference APIs) +- [Ollama for Zig](https://github.com/dravenk/ollama-zig) ### Mobile From 484a99e428bff772886f4585751f897db4ff7fbd Mon Sep 17 00:00:00 2001 From: Qusai Ismael Date: Sat, 8 Feb 2025 23:28:01 +0300 Subject: [PATCH 66/68] docs: add LocalLLM app to community integrations (#8953) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 2d1c13b7..13587b33 100644 --- a/README.md +++ b/README.md @@ -379,6 +379,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [MinimalNextOllamaChat](https://github.com/anilkay/MinimalNextOllamaChat) (Minimal Web UI for Chat and Model Control) - [Chipper](https://github.com/TilmanGriesel/chipper) AI interface for tinkerers (Ollama, Haystack RAG, Python) - [ChibiChat](https://github.com/CosmicEventHorizon/ChibiChat) (Kotlin-based Android app to chat with Ollama and Koboldcpp API endpoints) +- [LocalLLM](https://github.com/qusaismael/localllm) (Minimal Web-App to run ollama models on it with a GUI) ### Cloud From 1f766c36fb61f7b1969664645bf38dae93f568a2 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Sat, 8 Feb 2025 21:07:00 +0000 Subject: [PATCH 67/68] ci: use windows-2022 to sign and bundle (#8941) ollama requires vcruntime140_1.dll which isn't found on 2019. previously the job used the windows runner (2019) but it explicitly installs 2022 to build the app. since the sign job doesn't actually build anything, it can use the windows-2022 runner instead. --- .github/workflows/release.yaml | 2 +- scripts/build_windows.ps1 | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index ca83a429..acae0050 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -242,7 +242,7 @@ jobs: dist\${{ matrix.os }}-${{ matrix.arch }}-app.exe windows-sign: - runs-on: windows + runs-on: windows-2022 environment: release needs: [windows-depends, windows-build] steps: diff --git a/scripts/build_windows.ps1 b/scripts/build_windows.ps1 index b0c7e32f..68f3b11d 100644 --- a/scripts/build_windows.ps1 +++ b/scripts/build_windows.ps1 @@ -162,8 +162,11 @@ function gatherDependencies() { $depArch=$script:TARGET_ARCH } if ($depArch -eq "x64") { + write-host "cp ${env:VCToolsRedistDir}\${depArch}\Microsoft.VC*.CRT\msvcp140*.dll ${script:DIST_DIR}\lib\ollama\" cp "${env:VCToolsRedistDir}\${depArch}\Microsoft.VC*.CRT\msvcp140*.dll" "${script:DIST_DIR}\lib\ollama\" + write-host "cp ${env:VCToolsRedistDir}\${depArch}\Microsoft.VC*.CRT\vcruntime140.dll ${script:DIST_DIR}\lib\ollama\" cp "${env:VCToolsRedistDir}\${depArch}\Microsoft.VC*.CRT\vcruntime140.dll" "${script:DIST_DIR}\lib\ollama\" + write-host "cp ${env:VCToolsRedistDir}\${depArch}\Microsoft.VC*.CRT\vcruntime140_1.dll ${script:DIST_DIR}\lib\ollama\" cp "${env:VCToolsRedistDir}\${depArch}\Microsoft.VC*.CRT\vcruntime140_1.dll" "${script:DIST_DIR}\lib\ollama\" $llvmCrtDir="$env:VCToolsRedistDir\..\..\..\Tools\Llvm\${depArch}\bin" foreach ($part in $("runtime", "stdio", "filesystem", "math", "convert", "heap", "string", "time", "locale", "environment")) { From 38117fba83c8291e28abe564d56600c200ff4cab Mon Sep 17 00:00:00 2001 From: Hugues Chocart Date: Mon, 10 Feb 2025 06:08:46 +0000 Subject: [PATCH 68/68] readme: add Lunary to observability community integrations (#8975) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 13587b33..e342ab83 100644 --- a/README.md +++ b/README.md @@ -551,7 +551,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov. ### Observability - +- [Lunary](https://lunary.ai/docs/integrations/ollama) is the leading open-source LLM observability platform. It provides a variety of enterprise-grade features such as real-time analytics, prompt templates management, PII masking, and comprehensive agent tracing. - [OpenLIT](https://github.com/openlit/openlit) is an OpenTelemetry-native tool for monitoring Ollama Applications & GPUs using traces and metrics. - [HoneyHive](https://docs.honeyhive.ai/integrations/ollama) is an AI observability and evaluation platform for AI agents. Use HoneyHive to evaluate agent performance, interrogate failures, and monitor quality in production. - [Langfuse](https://langfuse.com/docs/integrations/ollama) is an open source LLM observability platform that enables teams to collaboratively monitor, evaluate and debug AI applications.