mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-21 22:33:56 +00:00
fix conflicts
This commit is contained in:
166
.github/workflows/release.yaml
vendored
166
.github/workflows/release.yaml
vendored
@@ -54,48 +54,6 @@ jobs:
|
|||||||
name: build-${{ matrix.os }}-${{ matrix.arch }}
|
name: build-${{ matrix.os }}-${{ matrix.arch }}
|
||||||
path: dist/*
|
path: dist/*
|
||||||
|
|
||||||
darwin-sign:
|
|
||||||
runs-on: macos-13
|
|
||||||
environment: release
|
|
||||||
needs: darwin-build
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- run: |
|
|
||||||
echo $MACOS_SIGNING_KEY | base64 --decode > certificate.p12
|
|
||||||
security create-keychain -p password build.keychain
|
|
||||||
security default-keychain -s build.keychain
|
|
||||||
security unlock-keychain -p password build.keychain
|
|
||||||
security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign
|
|
||||||
security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain
|
|
||||||
security set-keychain-settings -lut 3600 build.keychain
|
|
||||||
env:
|
|
||||||
MACOS_SIGNING_KEY: ${{ secrets.MACOS_SIGNING_KEY }}
|
|
||||||
MACOS_SIGNING_KEY_PASSWORD: ${{ secrets.MACOS_SIGNING_KEY_PASSWORD }}
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: build-darwin-amd64
|
|
||||||
path: dist/darwin-amd64
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: build-darwin-arm64
|
|
||||||
path: dist/darwin-arm64
|
|
||||||
- run: |
|
|
||||||
export VERSION=${GITHUB_REF_NAME#v}
|
|
||||||
./scripts/build_darwin.sh sign macapp
|
|
||||||
env:
|
|
||||||
APPLE_IDENTITY: ${{ secrets.APPLE_IDENTITY }}
|
|
||||||
APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }}
|
|
||||||
APPLE_TEAM_ID: ${{ vars.APPLE_TEAM_ID }}
|
|
||||||
APPLE_ID: ${{ vars.APPLE_ID }}
|
|
||||||
SDKROOT: /Applications/Xcode_14.1.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk
|
|
||||||
DEVELOPER_DIR: /Applications/Xcode_14.1.0.app/Contents/Developer
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: dist-darwin
|
|
||||||
path: |
|
|
||||||
dist/Ollama-darwin.zip
|
|
||||||
dist/ollama-darwin.tgz
|
|
||||||
|
|
||||||
windows-depends:
|
windows-depends:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@@ -103,21 +61,18 @@ jobs:
|
|||||||
arch: [amd64]
|
arch: [amd64]
|
||||||
preset: ['CPU']
|
preset: ['CPU']
|
||||||
include:
|
include:
|
||||||
- os: windows
|
|
||||||
arch: amd64
|
|
||||||
preset: 'CUDA 11'
|
|
||||||
install: https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe
|
|
||||||
cuda-version: '11.3'
|
|
||||||
- os: windows
|
- os: windows
|
||||||
arch: amd64
|
arch: amd64
|
||||||
preset: 'CUDA 12'
|
preset: 'CUDA 12'
|
||||||
install: https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda_12.8.0_571.96_windows.exe
|
install: https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda_12.8.0_571.96_windows.exe
|
||||||
cuda-version: '12.8'
|
cuda-version: '12.8'
|
||||||
|
flags: ''
|
||||||
- os: windows
|
- os: windows
|
||||||
arch: amd64
|
arch: amd64
|
||||||
preset: 'ROCm 6'
|
preset: 'ROCm 6'
|
||||||
install: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q4-WinSvr2022-For-HIP.exe
|
install: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q4-WinSvr2022-For-HIP.exe
|
||||||
rocm-version: '6.2'
|
rocm-version: '6.2'
|
||||||
|
flags: '-DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma" -DCMAKE_CXX_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma"'
|
||||||
runs-on: ${{ matrix.arch == 'arm64' && format('{0}-{1}', matrix.os, matrix.arch) || matrix.os }}
|
runs-on: ${{ matrix.arch == 'arm64' && format('{0}-{1}', matrix.os, matrix.arch) || matrix.os }}
|
||||||
environment: release
|
environment: release
|
||||||
env:
|
env:
|
||||||
@@ -160,6 +115,9 @@ jobs:
|
|||||||
echo "$hipPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
echo "$hipPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||||
echo "CC=$hipPath\bin\clang.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
echo "CC=$hipPath\bin\clang.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
echo "CXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
echo "CXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
|
echo "HIPCXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
|
echo "HIP_PLATFORM=amd" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
|
echo "CMAKE_PREFIX_PATH=$hipPath" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
- if: matrix.preset == 'CPU'
|
- if: matrix.preset == 'CPU'
|
||||||
run: |
|
run: |
|
||||||
echo "CC=clang.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
echo "CC=clang.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
@@ -178,9 +136,9 @@ jobs:
|
|||||||
key: ccache-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.preset }}
|
key: ccache-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.preset }}
|
||||||
- name: Build target "${{ matrix.preset }}"
|
- name: Build target "${{ matrix.preset }}"
|
||||||
run: |
|
run: |
|
||||||
Import-Module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
Import-Module 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||||
Enter-VsDevShell -VsInstallPath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'
|
Enter-VsDevShell -VsInstallPath 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise' -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||||
cmake --preset "${{ matrix.preset }}"
|
cmake --preset "${{ matrix.preset }}" ${{ matrix.flags }}
|
||||||
cmake --build --parallel --preset "${{ matrix.preset }}"
|
cmake --build --parallel --preset "${{ matrix.preset }}"
|
||||||
cmake --install build --component "${{ startsWith(matrix.preset, 'CUDA ') && 'CUDA' || startsWith(matrix.preset, 'ROCm ') && 'HIP' || 'CPU' }}" --strip --parallel 8
|
cmake --install build --component "${{ startsWith(matrix.preset, 'CUDA ') && 'CUDA' || startsWith(matrix.preset, 'ROCm ') && 'HIP' || 'CPU' }}" --strip --parallel 8
|
||||||
env:
|
env:
|
||||||
@@ -230,61 +188,11 @@ jobs:
|
|||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
- run: |
|
- run: |
|
||||||
go build -o dist/${{ matrix.os }}-${{ matrix.arch }}/ .
|
go build -o dist/${{ matrix.os }}-${{ matrix.arch }}/ .
|
||||||
- if: matrix.arch == 'arm64'
|
|
||||||
run: |
|
|
||||||
Invoke-WebRequest -Uri "https://aka.ms/vs/17/release/vc_redist.arm64.exe" -OutFile "dist\windows-arm64\vc_redist.arm64.exe"
|
|
||||||
- run: |
|
|
||||||
$env:VERSION='${{ github.ref_name }}' -Replace "v(.*)", '$1'
|
|
||||||
& .\scripts\build_windows.ps1 buildApp
|
|
||||||
env:
|
|
||||||
VCToolsRedistDir: stub
|
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: build-${{ matrix.os }}-${{ matrix.arch }}
|
name: build-${{ matrix.os }}-${{ matrix.arch }}
|
||||||
path: |
|
path: |
|
||||||
dist\${{ matrix.os }}-${{ matrix.arch }}\*.exe
|
dist\${{ matrix.os }}-${{ matrix.arch }}\*.exe
|
||||||
dist\${{ matrix.os }}-${{ matrix.arch }}-app.exe
|
|
||||||
|
|
||||||
windows-sign:
|
|
||||||
runs-on: windows-2022
|
|
||||||
environment: release
|
|
||||||
needs: [windows-depends, windows-build]
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: google-github-actions/auth@v2
|
|
||||||
with:
|
|
||||||
project_id: ollama
|
|
||||||
credentials_json: ${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}
|
|
||||||
- run: |
|
|
||||||
$ErrorActionPreference = "Stop"
|
|
||||||
Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${{ runner.temp }}\sdksetup.exe"
|
|
||||||
Start-Process "${{ runner.temp }}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait
|
|
||||||
|
|
||||||
Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${{ runner.temp }}\plugin.zip"
|
|
||||||
Expand-Archive -Path "${{ runner.temp }}\plugin.zip" -DestinationPath "${{ runner.temp }}\plugin\"
|
|
||||||
& "${{ runner.temp }}\plugin\*\kmscng.msi" /quiet
|
|
||||||
|
|
||||||
echo "${{ vars.OLLAMA_CERT }}" >ollama_inc.crt
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
pattern: build-windows-*
|
|
||||||
path: dist\
|
|
||||||
merge-multiple: true
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
pattern: depends-windows-amd64-*
|
|
||||||
path: dist\windows-amd64\
|
|
||||||
merge-multiple: true
|
|
||||||
- run: |
|
|
||||||
& .\scripts\build_windows.ps1 gatherDependencies sign buildInstaller distZip
|
|
||||||
env:
|
|
||||||
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: dist-windows
|
|
||||||
path: |
|
|
||||||
dist\OllamaSetup.exe
|
|
||||||
dist\ollama-windows-*.zip
|
|
||||||
|
|
||||||
linux-build:
|
linux-build:
|
||||||
strategy:
|
strategy:
|
||||||
@@ -323,15 +231,20 @@ jobs:
|
|||||||
for COMPONENT in bin/* lib/ollama/*; do
|
for COMPONENT in bin/* lib/ollama/*; do
|
||||||
case "$COMPONENT" in
|
case "$COMPONENT" in
|
||||||
bin/ollama) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
bin/ollama) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
||||||
lib/ollama/*.so) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
lib/ollama/*.so*) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
||||||
lib/ollama/cuda_v11) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
lib/ollama/cuda_sbsa) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
||||||
lib/ollama/cuda_v12) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}.tar.in ;;
|
|
||||||
lib/ollama/cuda_jetpack5) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack5.tar.in ;;
|
lib/ollama/cuda_jetpack5) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack5.tar.in ;;
|
||||||
lib/ollama/cuda_jetpack6) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack6.tar.in ;;
|
lib/ollama/cuda_jetpack6) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-jetpack6.tar.in ;;
|
||||||
lib/ollama/rocm) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-rocm.tar.in ;;
|
lib/ollama/rocm) echo $COMPONENT >>ollama-${{ matrix.os }}-${{ matrix.arch }}-rocm.tar.in ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
working-directory: dist/${{ matrix.os }}-${{ matrix.arch }}
|
working-directory: dist/${{ matrix.os }}-${{ matrix.arch }}
|
||||||
|
- run: |
|
||||||
|
echo "Manifests"
|
||||||
|
for ARCHIVE in dist/${{ matrix.os }}-${{ matrix.arch }}/*.tar.in ; do
|
||||||
|
echo $ARCHIVE
|
||||||
|
cat $ARCHIVE
|
||||||
|
done
|
||||||
- run: |
|
- run: |
|
||||||
for ARCHIVE in dist/${{ matrix.os }}-${{ matrix.arch }}/*.tar.in; do
|
for ARCHIVE in dist/${{ matrix.os }}-${{ matrix.arch }}/*.tar.in; do
|
||||||
tar c -C dist/${{ matrix.os }}-${{ matrix.arch }} -T $ARCHIVE --owner 0 --group 0 | pigz -9vc >$(basename ${ARCHIVE//.*/}.tgz);
|
tar c -C dist/${{ matrix.os }}-${{ matrix.arch }} -T $ARCHIVE --owner 0 --group 0 | pigz -9vc >$(basename ${ARCHIVE//.*/}.tgz);
|
||||||
@@ -436,48 +349,16 @@ jobs:
|
|||||||
trigger:
|
trigger:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
environment: release
|
environment: release
|
||||||
needs: [darwin-build, windows-build, windows-depends]
|
needs: [darwin-build, windows-build, windows-depends, linux-build]
|
||||||
steps:
|
|
||||||
- name: Trigger downstream release process
|
|
||||||
run: |
|
|
||||||
curl -L \
|
|
||||||
-X POST \
|
|
||||||
-H "Accept: application/vnd.github+json" \
|
|
||||||
-H "Authorization: Bearer ${{ secrets.RELEASE_TOKEN }}" \
|
|
||||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
|
||||||
https://api.github.com/repos/ollama/${{ vars.RELEASE_REPO }}/dispatches \
|
|
||||||
-d "{\"event_type\": \"trigger-workflow\", \"client_payload\": {\"run_id\": \"${GITHUB_RUN_ID}\", \"version\": \"${GITHUB_REF_NAME#v}\"}}"
|
|
||||||
|
|
||||||
# Aggregate all the assets and ship a release
|
|
||||||
release:
|
|
||||||
needs: [darwin-sign, windows-sign, linux-build]
|
|
||||||
runs-on: linux
|
|
||||||
environment: release
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
env:
|
env:
|
||||||
GH_TOKEN: ${{ github.token }}
|
GH_TOKEN: ${{ github.token }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v4
|
- name: Create or update Release for tag
|
||||||
with:
|
|
||||||
name: dist-darwin
|
|
||||||
path: dist
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: dist-windows
|
|
||||||
path: dist
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
pattern: dist-linux-*
|
|
||||||
path: dist
|
|
||||||
merge-multiple: true
|
|
||||||
- run: find . -type f -not -name 'sha256sum.txt' | xargs sha256sum | tee sha256sum.txt
|
|
||||||
working-directory: dist
|
|
||||||
- name: Create or update Release
|
|
||||||
run: |
|
run: |
|
||||||
RELEASE_VERSION="$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)"
|
RELEASE_VERSION="$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)"
|
||||||
|
|
||||||
echo "Looking for existing release for ${RELEASE_VERSION}"
|
echo "Looking for existing release for ${RELEASE_VERSION}"
|
||||||
OLD_TAG=$(gh release ls --json name,tagName | jq -r ".[] | select(.name == \"${RELEASE_VERSION}\") | .tagName")
|
OLD_TAG=$(gh release ls --json name,tagName | jq -r ".[] | select(.name == \"${RELEASE_VERSION}\") | .tagName")
|
||||||
if [ -n "$OLD_TAG" ]; then
|
if [ -n "$OLD_TAG" ]; then
|
||||||
@@ -491,5 +372,12 @@ jobs:
|
|||||||
--generate-notes \
|
--generate-notes \
|
||||||
--prerelease
|
--prerelease
|
||||||
fi
|
fi
|
||||||
echo "Uploading artifacts for tag ${GITHUB_REF_NAME}"
|
- name: Trigger downstream release process
|
||||||
gh release upload ${GITHUB_REF_NAME} dist/* --clobber
|
run: |
|
||||||
|
curl -L \
|
||||||
|
-X POST \
|
||||||
|
-H "Accept: application/vnd.github+json" \
|
||||||
|
-H "Authorization: Bearer ${{ secrets.RELEASE_TOKEN }}" \
|
||||||
|
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||||
|
https://api.github.com/repos/ollama/${{ vars.RELEASE_REPO }}/dispatches \
|
||||||
|
-d "{\"event_type\": \"trigger-workflow\", \"client_payload\": {\"run_id\": \"${GITHUB_RUN_ID}\", \"version\": \"${GITHUB_REF_NAME#v}\", \"publish\": \"1\"}}"
|
||||||
|
|||||||
17
.github/workflows/test.yaml
vendored
17
.github/workflows/test.yaml
vendored
@@ -36,7 +36,7 @@ jobs:
|
|||||||
| xargs python3 -c "import sys; from pathlib import Path; print(any(Path(x).match(glob) for x in sys.argv[1:] for glob in '$*'.split(' ')))"
|
| xargs python3 -c "import sys; from pathlib import Path; print(any(Path(x).match(glob) for x in sys.argv[1:] for glob in '$*'.split(' ')))"
|
||||||
}
|
}
|
||||||
|
|
||||||
echo changed=$(changed 'llama/llama.cpp/**' 'ml/backend/ggml/ggml/**') | tee -a $GITHUB_OUTPUT
|
echo changed=$(changed 'llama/llama.cpp/**/*' 'ml/backend/ggml/ggml/**/*') | tee -a $GITHUB_OUTPUT
|
||||||
|
|
||||||
linux:
|
linux:
|
||||||
needs: [changes]
|
needs: [changes]
|
||||||
@@ -46,7 +46,7 @@ jobs:
|
|||||||
include:
|
include:
|
||||||
- preset: CPU
|
- preset: CPU
|
||||||
- preset: CUDA
|
- preset: CUDA
|
||||||
container: nvidia/cuda:11.8.0-devel-ubuntu22.04
|
container: nvidia/cuda:12.8.1-devel-ubuntu22.04
|
||||||
flags: '-DCMAKE_CUDA_ARCHITECTURES=87'
|
flags: '-DCMAKE_CUDA_ARCHITECTURES=87'
|
||||||
- preset: ROCm
|
- preset: ROCm
|
||||||
container: rocm/dev-ubuntu-22.04:6.1.2
|
container: rocm/dev-ubuntu-22.04:6.1.2
|
||||||
@@ -78,11 +78,11 @@ jobs:
|
|||||||
include:
|
include:
|
||||||
- preset: CPU
|
- preset: CPU
|
||||||
- preset: CUDA
|
- preset: CUDA
|
||||||
install: https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe
|
install: https://developer.download.nvidia.com/compute/cuda/12.8.0/local_installers/cuda_12.8.0_571.96_windows.exe
|
||||||
flags: '-DCMAKE_CUDA_ARCHITECTURES=80'
|
flags: '-DCMAKE_CUDA_ARCHITECTURES=80'
|
||||||
- preset: ROCm
|
- preset: ROCm
|
||||||
install: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q4-WinSvr2022-For-HIP.exe
|
install: https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q4-WinSvr2022-For-HIP.exe
|
||||||
flags: '-DAMDGPU_TARGETS=gfx1010'
|
flags: '-DAMDGPU_TARGETS=gfx1010 -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma" -DCMAKE_CXX_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma"'
|
||||||
runs-on: windows
|
runs-on: windows
|
||||||
steps:
|
steps:
|
||||||
- run: |
|
- run: |
|
||||||
@@ -102,7 +102,7 @@ jobs:
|
|||||||
$ErrorActionPreference = "Stop"
|
$ErrorActionPreference = "Stop"
|
||||||
if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') {
|
if ("${{ steps.cache-install.outputs.cache-hit }}" -ne 'true') {
|
||||||
Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe"
|
Invoke-WebRequest -Uri "${{ matrix.install }}" -OutFile "install.exe"
|
||||||
Start-Process -FilePath .\install.exe -ArgumentList (@("-s", "cudart_11.3", "nvcc_11.3", "cublas_11.3", "cublas_dev_11.3")) -NoNewWindow -Wait
|
Start-Process -FilePath .\install.exe -ArgumentList (@("-s", "cudart_12.8", "nvcc_12.8", "cublas_12.8", "cublas_dev_12.8")) -NoNewWindow -Wait
|
||||||
}
|
}
|
||||||
|
|
||||||
$cudaPath = (Resolve-Path "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\*").path
|
$cudaPath = (Resolve-Path "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\*").path
|
||||||
@@ -120,6 +120,9 @@ jobs:
|
|||||||
echo "$hipPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
echo "$hipPath\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||||
echo "CC=$hipPath\bin\clang.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
echo "CC=$hipPath\bin\clang.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
echo "CXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
echo "CXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
|
echo "HIPCXX=$hipPath\bin\clang++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
|
echo "HIP_PLATFORM=amd" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
|
echo "CMAKE_PREFIX_PATH=$hipPath" | Out-File -FilePath $env:GITHUB_ENV -Append
|
||||||
- if: ${{ !cancelled() && steps.cache-install.outputs.cache-hit != 'true' }}
|
- if: ${{ !cancelled() && steps.cache-install.outputs.cache-hit != 'true' }}
|
||||||
uses: actions/cache/save@v4
|
uses: actions/cache/save@v4
|
||||||
with:
|
with:
|
||||||
@@ -133,8 +136,8 @@ jobs:
|
|||||||
path: ${{ github.workspace }}\.ccache
|
path: ${{ github.workspace }}\.ccache
|
||||||
key: ccache-${{ runner.os }}-${{ runner.arch }}-${{ matrix.preset }}
|
key: ccache-${{ runner.os }}-${{ runner.arch }}-${{ matrix.preset }}
|
||||||
- run: |
|
- run: |
|
||||||
Import-Module 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
Import-Module 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise\Common7\Tools\Microsoft.VisualStudio.DevShell.dll'
|
||||||
Enter-VsDevShell -VsInstallPath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'
|
Enter-VsDevShell -VsInstallPath 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise' -SkipAutomaticLocation -DevCmdArguments '-arch=x64 -no_logo'
|
||||||
cmake --preset "${{ matrix.preset }}" ${{ matrix.flags }}
|
cmake --preset "${{ matrix.preset }}" ${{ matrix.flags }}
|
||||||
cmake --build --parallel --preset "${{ matrix.preset }}"
|
cmake --build --parallel --preset "${{ matrix.preset }}"
|
||||||
env:
|
env:
|
||||||
|
|||||||
@@ -78,14 +78,13 @@ if(CMAKE_CUDA_COMPILER)
|
|||||||
|
|
||||||
find_package(CUDAToolkit)
|
find_package(CUDAToolkit)
|
||||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cuda)
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/ml/backend/ggml/ggml/src/ggml-cuda)
|
||||||
set(OLLAMA_CUDA_INSTALL_DIR ${OLLAMA_INSTALL_DIR}/cuda_v${CUDAToolkit_VERSION_MAJOR})
|
|
||||||
install(TARGETS ggml-cuda
|
install(TARGETS ggml-cuda
|
||||||
RUNTIME_DEPENDENCIES
|
RUNTIME_DEPENDENCIES
|
||||||
DIRECTORIES ${CUDAToolkit_BIN_DIR} ${CUDAToolkit_LIBRARY_DIR}
|
DIRECTORIES ${CUDAToolkit_BIN_DIR} ${CUDAToolkit_LIBRARY_DIR}
|
||||||
PRE_INCLUDE_REGEXES cublas cublasLt cudart
|
PRE_INCLUDE_REGEXES cublas cublasLt cudart
|
||||||
PRE_EXCLUDE_REGEXES ".*"
|
PRE_EXCLUDE_REGEXES ".*"
|
||||||
RUNTIME DESTINATION ${OLLAMA_CUDA_INSTALL_DIR} COMPONENT CUDA
|
RUNTIME DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT CUDA
|
||||||
LIBRARY DESTINATION ${OLLAMA_CUDA_INSTALL_DIR} COMPONENT CUDA
|
LIBRARY DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT CUDA
|
||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@@ -117,7 +116,11 @@ if(CMAKE_HIP_COMPILER)
|
|||||||
|
|
||||||
set(OLLAMA_HIP_INSTALL_DIR ${OLLAMA_INSTALL_DIR}/rocm)
|
set(OLLAMA_HIP_INSTALL_DIR ${OLLAMA_INSTALL_DIR}/rocm)
|
||||||
install(TARGETS ggml-hip
|
install(TARGETS ggml-hip
|
||||||
RUNTIME_DEPENDENCIES
|
RUNTIME_DEPENDENCY_SET rocm
|
||||||
|
RUNTIME DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT HIP
|
||||||
|
LIBRARY DESTINATION ${OLLAMA_INSTALL_DIR} COMPONENT HIP
|
||||||
|
)
|
||||||
|
install(RUNTIME_DEPENDENCY_SET rocm
|
||||||
DIRECTORIES ${HIP_BIN_INSTALL_DIR} ${HIP_LIB_INSTALL_DIR}
|
DIRECTORIES ${HIP_BIN_INSTALL_DIR} ${HIP_LIB_INSTALL_DIR}
|
||||||
PRE_INCLUDE_REGEXES hipblas rocblas amdhip64 rocsolver amd_comgr hsa-runtime64 rocsparse tinfo rocprofiler-register drm drm_amdgpu numa elf
|
PRE_INCLUDE_REGEXES hipblas rocblas amdhip64 rocsolver amd_comgr hsa-runtime64 rocsparse tinfo rocprofiler-register drm drm_amdgpu numa elf
|
||||||
PRE_EXCLUDE_REGEXES ".*"
|
PRE_EXCLUDE_REGEXES ".*"
|
||||||
|
|||||||
@@ -17,20 +17,12 @@
|
|||||||
"name": "CUDA",
|
"name": "CUDA",
|
||||||
"inherits": [ "Default" ]
|
"inherits": [ "Default" ]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"name": "CUDA 11",
|
|
||||||
"inherits": [ "CUDA" ],
|
|
||||||
"cacheVariables": {
|
|
||||||
"CMAKE_CUDA_ARCHITECTURES": "50;52;53;60;61;70;75;80;86",
|
|
||||||
"CMAKE_CUDA_FLAGS": "-Wno-deprecated-gpu-targets"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"name": "CUDA 12",
|
"name": "CUDA 12",
|
||||||
"inherits": [ "CUDA" ],
|
"inherits": [ "CUDA" ],
|
||||||
"cacheVariables": {
|
"cacheVariables": {
|
||||||
"CMAKE_CUDA_ARCHITECTURES": "50;60;61;70;75;80;86;87;89;90;90a;120",
|
"CMAKE_CUDA_ARCHITECTURES": "50;60;61;70;75;80;86;87;89;90;90a;120",
|
||||||
"CMAKE_CUDA_FLAGS": "-Wno-deprecated-gpu-targets"
|
"CMAKE_CUDA_FLAGS": "-Wno-deprecated-gpu-targets -t 2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -58,6 +50,7 @@
|
|||||||
"name": "ROCm 6",
|
"name": "ROCm 6",
|
||||||
"inherits": [ "ROCm" ],
|
"inherits": [ "ROCm" ],
|
||||||
"cacheVariables": {
|
"cacheVariables": {
|
||||||
|
"CMAKE_HIP_FLAGS": "-parallel-jobs=4",
|
||||||
"AMDGPU_TARGETS": "gfx803;gfx902;gfx1030;gfx1031;gfx1032;gfx1034;gfx1035;gfx1036;gfx1100;gfx1101;gfx1102;gfx1103;gfx1150;gfx1200;gfx1201;gfx900:xnack-;gfx906:xnack-;gfx90c:xnack-;gfx1010:xnack-;gfx1011:xnack-;gfx1012:xnack-;"
|
"AMDGPU_TARGETS": "gfx803;gfx902;gfx1030;gfx1031;gfx1032;gfx1034;gfx1035;gfx1036;gfx1100;gfx1101;gfx1102;gfx1103;gfx1150;gfx1200;gfx1201;gfx900:xnack-;gfx906:xnack-;gfx90c:xnack-;gfx1010:xnack-;gfx1011:xnack-;gfx1012:xnack-;"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -78,11 +71,6 @@
|
|||||||
"configurePreset": "CUDA",
|
"configurePreset": "CUDA",
|
||||||
"targets": [ "ggml-cuda" ]
|
"targets": [ "ggml-cuda" ]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"name": "CUDA 11",
|
|
||||||
"inherits": [ "CUDA" ],
|
|
||||||
"configurePreset": "CUDA 11"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"name": "CUDA 12",
|
"name": "CUDA 12",
|
||||||
"inherits": [ "CUDA" ],
|
"inherits": [ "CUDA" ],
|
||||||
|
|||||||
24
Dockerfile
24
Dockerfile
@@ -7,12 +7,13 @@ ARG JETPACK5VERSION=r35.4.1
|
|||||||
ARG JETPACK6VERSION=r36.4.0
|
ARG JETPACK6VERSION=r36.4.0
|
||||||
ARG CMAKEVERSION=3.31.2
|
ARG CMAKEVERSION=3.31.2
|
||||||
|
|
||||||
# CUDA v11 requires gcc v10. v10.3 has regressions, so the rockylinux 8.5 AppStream has the latest compatible version
|
# We require gcc v10 minimum. v10.3 has regressions, so the rockylinux 8.5 AppStream has the latest compatible version
|
||||||
FROM --platform=linux/amd64 rocm/dev-almalinux-8:${ROCMVERSION}-complete AS base-amd64
|
FROM --platform=linux/amd64 rocm/dev-almalinux-8:${ROCMVERSION}-complete AS base-amd64
|
||||||
RUN yum install -y yum-utils \
|
RUN yum install -y yum-utils \
|
||||||
&& yum-config-manager --add-repo https://dl.rockylinux.org/vault/rocky/8.5/AppStream/\$basearch/os/ \
|
&& yum-config-manager --add-repo https://dl.rockylinux.org/vault/rocky/8.5/AppStream/\$basearch/os/ \
|
||||||
&& rpm --import https://dl.rockylinux.org/pub/rocky/RPM-GPG-KEY-Rocky-8 \
|
&& rpm --import https://dl.rockylinux.org/pub/rocky/RPM-GPG-KEY-Rocky-8 \
|
||||||
&& dnf install -y yum-utils ccache gcc-toolset-10-gcc-10.2.1-8.2.el8 gcc-toolset-10-gcc-c++-10.2.1-8.2.el8 gcc-toolset-10-binutils-2.35-11.el8 \
|
&& dnf install -y yum-utils ccache gcc-toolset-10-gcc-10.2.1-8.2.el8 gcc-toolset-10-gcc-c++-10.2.1-8.2.el8 gcc-toolset-10-binutils-2.35-11.el8 \
|
||||||
|
&& dnf install -y ccache \
|
||||||
&& yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo
|
&& yum-config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/rhel8/x86_64/cuda-rhel8.repo
|
||||||
ENV PATH=/opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
ENV PATH=/opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
||||||
|
|
||||||
@@ -38,15 +39,6 @@ RUN --mount=type=cache,target=/root/.ccache \
|
|||||||
&& cmake --build --parallel --preset 'CPU' \
|
&& cmake --build --parallel --preset 'CPU' \
|
||||||
&& cmake --install build --component CPU --strip --parallel 8
|
&& cmake --install build --component CPU --strip --parallel 8
|
||||||
|
|
||||||
FROM base AS cuda-11
|
|
||||||
ARG CUDA11VERSION=11.3
|
|
||||||
RUN dnf install -y cuda-toolkit-${CUDA11VERSION//./-}
|
|
||||||
ENV PATH=/usr/local/cuda-11/bin:$PATH
|
|
||||||
RUN --mount=type=cache,target=/root/.ccache \
|
|
||||||
cmake --preset 'CUDA 11' \
|
|
||||||
&& cmake --build --parallel --preset 'CUDA 11' \
|
|
||||||
&& cmake --install build --component CUDA --strip --parallel 8
|
|
||||||
|
|
||||||
FROM base AS cuda-12
|
FROM base AS cuda-12
|
||||||
ARG CUDA12VERSION=12.8
|
ARG CUDA12VERSION=12.8
|
||||||
RUN dnf install -y cuda-toolkit-${CUDA12VERSION//./-}
|
RUN dnf install -y cuda-toolkit-${CUDA12VERSION//./-}
|
||||||
@@ -98,17 +90,15 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
|||||||
go build -trimpath -buildmode=pie -o /bin/ollama .
|
go build -trimpath -buildmode=pie -o /bin/ollama .
|
||||||
|
|
||||||
FROM --platform=linux/amd64 scratch AS amd64
|
FROM --platform=linux/amd64 scratch AS amd64
|
||||||
COPY --from=cuda-11 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_v11
|
COPY --from=cuda-12 dist/lib/ollama /lib/ollama
|
||||||
COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12
|
|
||||||
|
|
||||||
FROM --platform=linux/arm64 scratch AS arm64
|
FROM --platform=linux/arm64 scratch AS arm64
|
||||||
COPY --from=cuda-11 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_v11
|
COPY --from=cuda-12 dist/lib/ollama /lib/ollama/cuda_sbsa
|
||||||
COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12
|
COPY --from=jetpack-5 dist/lib/ollama /lib/ollama/cuda_jetpack5
|
||||||
COPY --from=jetpack-5 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_jetpack5
|
COPY --from=jetpack-6 dist/lib/ollama /lib/ollama/cuda_jetpack6
|
||||||
COPY --from=jetpack-6 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_jetpack6
|
|
||||||
|
|
||||||
FROM scratch AS rocm
|
FROM scratch AS rocm
|
||||||
COPY --from=rocm-6 dist/lib/ollama/rocm /lib/ollama/rocm
|
COPY --from=rocm-6 dist/lib/ollama /lib/ollama
|
||||||
|
|
||||||
FROM ${FLAVOR} AS archive
|
FROM ${FLAVOR} AS archive
|
||||||
COPY --from=cpu dist/lib/ollama /lib/ollama
|
COPY --from=cpu dist/lib/ollama /lib/ollama
|
||||||
|
|||||||
@@ -431,6 +431,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [macLlama (macOS native)](https://github.com/hellotunamayo/macLlama) (A native macOS GUI application for interacting with Ollama models, featuring a chat interface.)
|
- [macLlama (macOS native)](https://github.com/hellotunamayo/macLlama) (A native macOS GUI application for interacting with Ollama models, featuring a chat interface.)
|
||||||
- [GPTranslate](https://github.com/philberndt/GPTranslate) (A fast and lightweight, AI powered desktop translation application written with Rust and Tauri. Features real-time translation with OpenAI/Azure/Ollama.)
|
- [GPTranslate](https://github.com/philberndt/GPTranslate) (A fast and lightweight, AI powered desktop translation application written with Rust and Tauri. Features real-time translation with OpenAI/Azure/Ollama.)
|
||||||
- [ollama launcher](https://github.com/NGC13009/ollama-launcher) (A launcher for Ollama, aiming to provide users with convenient functions such as ollama server launching, management, or configuration.)
|
- [ollama launcher](https://github.com/NGC13009/ollama-launcher) (A launcher for Ollama, aiming to provide users with convenient functions such as ollama server launching, management, or configuration.)
|
||||||
|
- [ai-hub](https://github.com/Aj-Seven/ai-hub) (AI Hub supports multiple models via API keys and Chat support via Ollama API.)
|
||||||
|
|
||||||
### Cloud
|
### Cloud
|
||||||
|
|
||||||
@@ -476,6 +477,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [GGUF-to-Ollama](https://github.com/jonathanhecl/gguf-to-ollama) - Importing GGUF to Ollama made easy (multiplatform)
|
- [GGUF-to-Ollama](https://github.com/jonathanhecl/gguf-to-ollama) - Importing GGUF to Ollama made easy (multiplatform)
|
||||||
- [AWS-Strands-With-Ollama](https://github.com/rapidarchitect/ollama_strands) - AWS Strands Agents with Ollama Examples
|
- [AWS-Strands-With-Ollama](https://github.com/rapidarchitect/ollama_strands) - AWS Strands Agents with Ollama Examples
|
||||||
- [ollama-multirun](https://github.com/attogram/ollama-multirun) - A bash shell script to run a single prompt against any or all of your locally installed ollama models, saving the output and performance statistics as easily navigable web pages. ([Demo](https://attogram.github.io/ai_test_zone/))
|
- [ollama-multirun](https://github.com/attogram/ollama-multirun) - A bash shell script to run a single prompt against any or all of your locally installed ollama models, saving the output and performance statistics as easily navigable web pages. ([Demo](https://attogram.github.io/ai_test_zone/))
|
||||||
|
- [ollama-bash-toolshed](https://github.com/attogram/ollama-bash-toolshed) - Bash scripts to chat with tool using models. Add new tools to your shed with ease. Runs on Ollama.
|
||||||
|
|
||||||
### Apple Vision Pro
|
### Apple Vision Pro
|
||||||
|
|
||||||
|
|||||||
@@ -190,6 +190,8 @@ func ConvertModel(fsys fs.FS, f *os.File) error {
|
|||||||
conv = &gemma2Model{}
|
conv = &gemma2Model{}
|
||||||
case "Gemma3ForCausalLM", "Gemma3ForConditionalGeneration":
|
case "Gemma3ForCausalLM", "Gemma3ForConditionalGeneration":
|
||||||
conv = &gemma3Model{Architecture: p.Architectures[0]}
|
conv = &gemma3Model{Architecture: p.Architectures[0]}
|
||||||
|
case "Gemma3nForConditionalGeneration":
|
||||||
|
conv = &gemma3nModel{}
|
||||||
case "Phi3ForCausalLM":
|
case "Phi3ForCausalLM":
|
||||||
conv = &phi3Model{}
|
conv = &phi3Model{}
|
||||||
case "Qwen2ForCausalLM":
|
case "Qwen2ForCausalLM":
|
||||||
|
|||||||
165
convert/convert_gemma3n.go
Normal file
165
convert/convert_gemma3n.go
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/fs/ggml"
|
||||||
|
"github.com/pdevine/tensor"
|
||||||
|
"github.com/pdevine/tensor/native"
|
||||||
|
"gonum.org/v1/gonum/stat/distuv"
|
||||||
|
)
|
||||||
|
|
||||||
|
type gemma3nModel struct {
|
||||||
|
ModelParameters
|
||||||
|
|
||||||
|
TextModel struct {
|
||||||
|
ActivationSparsityPattern []float32 `json:"activation_sparsity_pattern"`
|
||||||
|
AltupActiveIdx uint32 `json:"altup_active_idx"`
|
||||||
|
AltupCoefClip float32 `json:"altup_coef_clip"`
|
||||||
|
AltupCorrectScale bool `json:"altup_correct_scale"`
|
||||||
|
AltupLRMultiplier float32 `json:"altup_lr_multiplier"`
|
||||||
|
AltupNumInputs uint32 `json:"altup_num_inputs"`
|
||||||
|
HeadDim uint32 `json:"head_dim"`
|
||||||
|
HiddenSize uint32 `json:"hidden_size"`
|
||||||
|
HiddenSizePerLayerInput uint32 `json:"hidden_size_per_layer_input"`
|
||||||
|
IntermediateSize uint32 `json:"intermediate_size"`
|
||||||
|
MaxPositionEmbeddings uint32 `json:"max_position_embeddings"`
|
||||||
|
NumAttentionHeads uint32 `json:"num_attention_heads"`
|
||||||
|
NumHiddenLayers uint32 `json:"num_hidden_layers"`
|
||||||
|
NumKeyValueHeads uint32 `json:"num_key_value_heads"`
|
||||||
|
NumKVSharedLayers uint32 `json:"num_kv_shared_layers"`
|
||||||
|
RMSNormEPS float32 `json:"rms_norm_eps"`
|
||||||
|
RopeLocalBaseFreq float32 `json:"rope_local_base_freq"`
|
||||||
|
RopeTheta float32 `json:"rope_theta"`
|
||||||
|
SlidingWindow uint32 `json:"sliding_window"`
|
||||||
|
LayerTypes []string `json:"layer_types"`
|
||||||
|
} `json:"text_config"`
|
||||||
|
VisionModel struct{} `json:"vision_config"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gemma3nModel) KV(t *Tokenizer) ggml.KV {
|
||||||
|
kv := m.ModelParameters.KV(t)
|
||||||
|
kv["general.architecture"] = "gemma3n"
|
||||||
|
kv["gemma3n.activation_sparsity_scale"] = slices.Collect(func(yield func(float32) bool) {
|
||||||
|
norm := distuv.Normal{Mu: 0, Sigma: 1}
|
||||||
|
for _, v := range m.TextModel.ActivationSparsityPattern {
|
||||||
|
if !yield(float32(norm.Quantile(float64(v)))) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
kv["gemma3n.altup.active_idx"] = m.TextModel.AltupActiveIdx
|
||||||
|
kv["gemma3n.altup.correct_scale"] = m.TextModel.AltupCorrectScale
|
||||||
|
kv["gemma3n.altup.lr_multiplier"] = m.TextModel.AltupLRMultiplier
|
||||||
|
kv["gemma3n.altup.num_inputs"] = m.TextModel.AltupNumInputs
|
||||||
|
kv["gemma3n.attention.head_count_kv"] = m.TextModel.NumKeyValueHeads
|
||||||
|
kv["gemma3n.attention.head_count"] = m.TextModel.NumAttentionHeads
|
||||||
|
kv["gemma3n.attention.layer_norm_rms_epsilon"] = m.TextModel.RMSNormEPS
|
||||||
|
kv["gemma3n.attention.sliding_window"] = m.TextModel.SlidingWindow
|
||||||
|
kv["gemma3n.attention.sliding_window_pattern"] = slices.Collect(func(yield func(bool) bool) {
|
||||||
|
for _, t := range m.TextModel.LayerTypes {
|
||||||
|
if !yield(t == "sliding_attention") {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
kv["gemma3n.attention.shared_kv_layers"] = m.TextModel.NumKVSharedLayers
|
||||||
|
kv["gemma3n.block_count"] = m.TextModel.NumHiddenLayers
|
||||||
|
kv["gemma3n.context_length"] = m.TextModel.MaxPositionEmbeddings
|
||||||
|
kv["gemma3n.embedding_length_per_layer_input"] = m.TextModel.HiddenSizePerLayerInput
|
||||||
|
kv["gemma3n.embedding_length"] = m.TextModel.HiddenSize
|
||||||
|
kv["gemma3n.feed_forward_length"] = m.TextModel.IntermediateSize
|
||||||
|
kv["gemma3n.head_dim"] = m.TextModel.HeadDim
|
||||||
|
kv["gemma3n.rope.freq_base_local"] = m.TextModel.RopeLocalBaseFreq
|
||||||
|
kv["gemma3n.rope.freq_base"] = m.TextModel.RopeTheta
|
||||||
|
return kv
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gemma3nModel) Tensors(ts []Tensor) []*ggml.Tensor {
|
||||||
|
out, ts := mergeTensors(ts,
|
||||||
|
merge{"altup_proj.*.weight", "altup_proj.weight"},
|
||||||
|
merge{"altup_unembd_proj.*.weight", "altup_unembd_proj.weight"},
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, t := range ts {
|
||||||
|
switch {
|
||||||
|
case strings.Contains(t.Name(), "audio_tower"),
|
||||||
|
strings.Contains(t.Name(), "embed_audio"),
|
||||||
|
strings.Contains(t.Name(), "vision_tower"),
|
||||||
|
strings.Contains(t.Name(), "embed_vision"):
|
||||||
|
// TODO: handle audio and vision towers
|
||||||
|
continue
|
||||||
|
case strings.Contains(t.Name(), "altup_predict_coef"),
|
||||||
|
strings.Contains(t.Name(), "altup_correct_coef"):
|
||||||
|
if m.TextModel.AltupCoefClip > 0 {
|
||||||
|
t.SetRepacker(func(name string, data []float32, shape []uint64) (_ []float32, err error) {
|
||||||
|
dims := make([]int, len(shape))
|
||||||
|
for i := range shape {
|
||||||
|
dims[i] = int(shape[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
var t tensor.Tensor = tensor.New(tensor.WithShape(dims...), tensor.WithBacking(data))
|
||||||
|
|
||||||
|
t, err = tensor.Clamp(t, -m.TextModel.AltupCoefClip, m.TextModel.AltupCoefClip)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := t.Reshape(t.Shape().TotalSize()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return native.VectorF32(t.(*tensor.Dense))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out = append(out, &ggml.Tensor{
|
||||||
|
Name: t.Name(),
|
||||||
|
Kind: t.Kind(),
|
||||||
|
Shape: t.Shape(),
|
||||||
|
WriterTo: t,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *gemma3nModel) Replacements() []string {
|
||||||
|
return []string{
|
||||||
|
"model.language_model.embed_tokens_per_layer", "per_layer_token_embd",
|
||||||
|
"model.language_model.embed_tokens", "token_embd",
|
||||||
|
"model.language_model.per_layer_model_projection", "per_layer_model_proj",
|
||||||
|
"model.language_model.per_layer_projection_norm", "per_layer_proj_norm", "model.language_model.altup_projections", "altup_proj",
|
||||||
|
"model.language_model.altup_unembed_projections", "altup_unembd_proj",
|
||||||
|
"model.language_model.norm", "output_norm",
|
||||||
|
"model.language_model.layers", "blk",
|
||||||
|
|
||||||
|
"input_layernorm", "attn_norm",
|
||||||
|
"self_attn.q_proj", "attn_q",
|
||||||
|
"self_attn.q_norm", "attn_q_norm",
|
||||||
|
"self_attn.k_proj", "attn_k",
|
||||||
|
"self_attn.k_norm", "attn_k_norm",
|
||||||
|
"self_attn.v_proj", "attn_v",
|
||||||
|
"self_attn.o_proj", "attn_output",
|
||||||
|
"post_attention_layernorm", "post_attention_norm",
|
||||||
|
"pre_feedforward_layernorm", "ffn_norm",
|
||||||
|
"mlp.gate_proj", "ffn_gate",
|
||||||
|
"mlp.up_proj", "ffn_up",
|
||||||
|
"mlp.down_proj", "ffn_down",
|
||||||
|
"post_feedforward_layernorm", "post_ffw_norm",
|
||||||
|
"per_layer_input_gate", "inp_gate",
|
||||||
|
"per_layer_projection", "proj",
|
||||||
|
"post_per_layer_input_norm", "post_norm",
|
||||||
|
"altup.", "altup_",
|
||||||
|
"modality_router", "router",
|
||||||
|
"prediction_coefs", "predict_coef",
|
||||||
|
"correction_coefs", "correct_coef",
|
||||||
|
"correct_output_scale", "correct_scale.weight",
|
||||||
|
"laurel.", "laurel_",
|
||||||
|
"linear_left", "l",
|
||||||
|
"linear_right", "r",
|
||||||
|
"post_laurel_norm", "post_norm",
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,9 +2,6 @@ package convert
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/ollama/ollama/fs/ggml"
|
"github.com/ollama/ollama/fs/ggml"
|
||||||
)
|
)
|
||||||
@@ -30,65 +27,38 @@ func (p *mixtralModel) KV(t *Tokenizer) ggml.KV {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *mixtralModel) Tensors(ts []Tensor) []*ggml.Tensor {
|
func (p *mixtralModel) Tensors(ts []Tensor) []*ggml.Tensor {
|
||||||
oldnew := []string{
|
merges := make([]merge, 0, p.NumHiddenLayers*6)
|
||||||
"model.layers", "blk",
|
for i := range p.NumHiddenLayers {
|
||||||
"w1", "ffn_gate_exps",
|
merges = append(merges, merge{
|
||||||
"w2", "ffn_down_exps",
|
fmt.Sprintf("blk.%d.*.w1.weight", i),
|
||||||
"w3", "ffn_up_exps",
|
fmt.Sprintf("blk.%d.ffn_gate_exps.weight", i),
|
||||||
}
|
}, merge{
|
||||||
|
fmt.Sprintf("blk.%d.*.w1.bias", i),
|
||||||
for i := range p.NumLocalExperts {
|
fmt.Sprintf("blk.%d.ffn_gate_exps.bias", i),
|
||||||
oldnew = append(oldnew, fmt.Sprintf(".block_sparse_moe.experts.%d.", i), ".")
|
}, merge{
|
||||||
}
|
fmt.Sprintf("blk.%d.*.w2.weight", i),
|
||||||
|
fmt.Sprintf("blk.%d.ffn_up_exps.weight", i),
|
||||||
// group experts of the same layer (model.layers.%d) and type (w[123]) into a single tensor
|
}, merge{
|
||||||
namer := strings.NewReplacer(oldnew...)
|
fmt.Sprintf("blk.%d.*.w2.bias", i),
|
||||||
experts := make(map[string]experts)
|
fmt.Sprintf("blk.%d.ffn_up_exps.bias", i),
|
||||||
|
}, merge{
|
||||||
// merge experts into a single tensor while removing them from ts
|
fmt.Sprintf("blk.%d.*.w3.weight", i),
|
||||||
ts = slices.DeleteFunc(ts, func(t Tensor) bool {
|
fmt.Sprintf("blk.%d.ffn_down_exps.weight", i),
|
||||||
if !strings.Contains(t.Name(), ".block_sparse_moe.experts.") {
|
}, merge{
|
||||||
return false
|
fmt.Sprintf("blk.%d.*.w3.bias", i),
|
||||||
}
|
fmt.Sprintf("blk.%d.ffn_down_exps.bias", i),
|
||||||
|
|
||||||
name := namer.Replace(t.Name())
|
|
||||||
experts[name] = append(experts[name], t)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
|
|
||||||
var out []*ggml.Tensor
|
|
||||||
for n, e := range experts {
|
|
||||||
// TODO(mxyng): sanity check experts
|
|
||||||
out = append(out, &ggml.Tensor{
|
|
||||||
Name: n,
|
|
||||||
Kind: e[0].Kind(),
|
|
||||||
Shape: append([]uint64{uint64(len(e))}, e[0].Shape()...),
|
|
||||||
WriterTo: e,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out, ts := mergeTensors(ts, merges...)
|
||||||
return append(out, p.llamaModel.Tensors(ts)...)
|
return append(out, p.llamaModel.Tensors(ts)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *mixtralModel) Replacements() []string {
|
func (p *mixtralModel) Replacements() []string {
|
||||||
return append(
|
return append(
|
||||||
p.llamaModel.Replacements(),
|
p.llamaModel.Replacements(),
|
||||||
|
"model.layers", "blk",
|
||||||
"block_sparse_moe.gate", "ffn_gate_inp",
|
"block_sparse_moe.gate", "ffn_gate_inp",
|
||||||
|
"block_sparse_moe.experts.", ".",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
type experts []Tensor
|
|
||||||
|
|
||||||
func (e experts) WriteTo(w io.Writer) (int64, error) {
|
|
||||||
// TODO(mxyng): experts _should_ be numerically sorted by expert but this should check
|
|
||||||
for _, t := range e {
|
|
||||||
// the canonical merged experts tensor stacks all experts along a new, 0 axis,
|
|
||||||
// e.g. `tensor.Stack(0, e[0], e[1:]...)`, which requires allocating temporary buffers
|
|
||||||
// this accomplishes the same thing by writing each expert tensor in sequence
|
|
||||||
if _, err := t.WriteTo(w); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -2,7 +2,9 @@ package convert
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"cmp"
|
"cmp"
|
||||||
|
"io"
|
||||||
"iter"
|
"iter"
|
||||||
|
"path"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -74,3 +76,54 @@ func splitDim(t Tensor, dim int, splits ...split) iter.Seq[*ggml.Tensor] {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type merge struct {
|
||||||
|
pattern, name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeTensors merges tensors that match a given pattern into a single tensor.
|
||||||
|
func mergeTensors(unmatched []Tensor, merges ...merge) (out []*ggml.Tensor, _ []Tensor) {
|
||||||
|
var matched []Tensor
|
||||||
|
for i := range merges {
|
||||||
|
matched, unmatched = slicesSplitFunc(unmatched, func(t Tensor) bool {
|
||||||
|
matched, _ := path.Match(merges[i].pattern, t.Name())
|
||||||
|
return matched
|
||||||
|
})
|
||||||
|
|
||||||
|
if len(matched) > 0 {
|
||||||
|
out = append(out, &ggml.Tensor{
|
||||||
|
Name: merges[i].name,
|
||||||
|
Kind: matched[0].Kind(),
|
||||||
|
Shape: append([]uint64{uint64(len(matched))}, matched[0].Shape()...),
|
||||||
|
WriterTo: mergeGroup(matched),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, unmatched
|
||||||
|
}
|
||||||
|
|
||||||
|
// slicesSplitFunc splits a slice into two slices based on a predicate function.
|
||||||
|
func slicesSplitFunc[S ~[]E, E comparable](s S, fn func(e E) bool) (matched, unmatched S) {
|
||||||
|
for _, e := range s {
|
||||||
|
if fn(e) {
|
||||||
|
matched = append(matched, e)
|
||||||
|
} else {
|
||||||
|
unmatched = append(unmatched, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return matched, unmatched
|
||||||
|
}
|
||||||
|
|
||||||
|
type mergeGroup []Tensor
|
||||||
|
|
||||||
|
func (g mergeGroup) WriteTo(w io.Writer) (int64, error) {
|
||||||
|
for _, t := range g {
|
||||||
|
if _, err := t.WriteTo(w); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/ollama/ollama/fs/ggml"
|
||||||
"github.com/pdevine/tensor"
|
"github.com/pdevine/tensor"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -302,3 +304,99 @@ func TestSplitDim(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMerge(t *testing.T) {
|
||||||
|
unmatched := []Tensor{
|
||||||
|
&fakeTensor{
|
||||||
|
name: "a.0.b",
|
||||||
|
shape: []uint64{5, 2},
|
||||||
|
data: []float32{10, 11, 12, 13, 14, 15, 16, 17, 18, 19},
|
||||||
|
},
|
||||||
|
&fakeTensor{
|
||||||
|
name: "a.1.b",
|
||||||
|
shape: []uint64{5, 2},
|
||||||
|
data: []float32{20, 21, 22, 23, 24, 25, 26, 27, 28, 29},
|
||||||
|
},
|
||||||
|
&fakeTensor{
|
||||||
|
name: "c.0.d",
|
||||||
|
shape: []uint64{5, 2},
|
||||||
|
data: []float32{30, 31, 32, 33, 34, 35, 36, 37, 38, 39},
|
||||||
|
},
|
||||||
|
&fakeTensor{
|
||||||
|
name: "c.1.d",
|
||||||
|
shape: []uint64{5, 2},
|
||||||
|
data: []float32{40, 41, 42, 43, 44, 45, 46, 47, 48, 49},
|
||||||
|
},
|
||||||
|
&fakeTensor{
|
||||||
|
name: "e.0.f",
|
||||||
|
shape: []uint64{5, 2},
|
||||||
|
data: []float32{50, 51, 52, 53, 54, 55, 56, 57, 58, 59},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
checkMatched := func(t *testing.T, n int, matched []*ggml.Tensor) {
|
||||||
|
for i := range n {
|
||||||
|
got := matched[i]
|
||||||
|
if diff := cmp.Diff([]uint64{2, 5, 2}, got.Shape); diff != "" {
|
||||||
|
t.Errorf("unexpected (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
if _, err := got.WriteTo(&b); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f32s := make([]float32, 20)
|
||||||
|
if err := binary.Read(&b, binary.LittleEndian, &f32s); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
offset := 10 + (i * 20)
|
||||||
|
want := make([]float32, 20)
|
||||||
|
for j := range 20 {
|
||||||
|
want[j] = float32(offset + j)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(want, f32s); diff != "" {
|
||||||
|
t.Errorf("unexpected data (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("single merge", func(t *testing.T) {
|
||||||
|
matched, unmatched := mergeTensors(unmatched, merge{"a.*.b", "a.b"})
|
||||||
|
if len(unmatched) != 3 {
|
||||||
|
t.Error("expected 3 remaining tensors, got", len(unmatched))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(matched) != 1 {
|
||||||
|
t.Error("expected 1 merged tensor, got", len(matched))
|
||||||
|
}
|
||||||
|
|
||||||
|
checkMatched(t, 1, matched)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("multiple merges", func(t *testing.T) {
|
||||||
|
matched, unmatched := mergeTensors(unmatched, merge{"a.*.b", "a.b"}, merge{"c.*.d", "c.d"})
|
||||||
|
if len(unmatched) != 1 {
|
||||||
|
t.Error("expected 1 remaining tensors, got", len(unmatched))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(matched) != 2 {
|
||||||
|
t.Error("expected 2 merged tensor, got", len(matched))
|
||||||
|
}
|
||||||
|
|
||||||
|
checkMatched(t, 2, matched)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no match", func(t *testing.T) {
|
||||||
|
matched, unmatched := mergeTensors(unmatched, merge{"x.*.y", "x.y"})
|
||||||
|
if len(unmatched) != 5 {
|
||||||
|
t.Error("expected 5 remaining tensors, got", len(unmatched))
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(matched) != 0 {
|
||||||
|
t.Error("expected no merged tensors, got", len(matched))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
package discover
|
package discover
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
@@ -55,10 +56,13 @@ func cudaVariant(gpuInfo CudaGPUInfo) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return "sbsa"
|
||||||
}
|
}
|
||||||
|
|
||||||
// driver 12.0 has problems with the cuda v12 library, so run v11 on those older drivers
|
// driver 12.0 has problems with the cuda v12 library, so run v11 on those older drivers
|
||||||
if gpuInfo.DriverMajor < 12 || (gpuInfo.DriverMajor == 12 && gpuInfo.DriverMinor == 0) {
|
if gpuInfo.DriverMajor < 12 || (gpuInfo.DriverMajor == 12 && gpuInfo.DriverMinor == 0) {
|
||||||
|
// The detected driver is older than Feb 2023
|
||||||
|
slog.Warn("old CUDA driver detected - please upgrade to a newer driver", "version", fmt.Sprintf("%d.%d", gpuInfo.DriverMajor, gpuInfo.DriverMinor))
|
||||||
return "v11"
|
return "v11"
|
||||||
}
|
}
|
||||||
return "v12"
|
return "v12"
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
// '../lib/ollama' on Linux and the executable's directory on macOS
|
// '../lib/ollama' on Linux and the executable's directory on macOS
|
||||||
// note: distribution builds, additional GPU-specific libraries are
|
// note: distribution builds, additional GPU-specific libraries are
|
||||||
// found in subdirectories of the returned path, such as
|
// found in subdirectories of the returned path, such as
|
||||||
// 'cuda_v11', 'cuda_v12', 'rocm', etc.
|
// 'cuda_v12', 'rocm', etc.
|
||||||
var LibOllamaPath string = func() string {
|
var LibOllamaPath string = func() string {
|
||||||
exe, err := os.Executable()
|
exe, err := os.Executable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# GPU
|
# GPU
|
||||||
## Nvidia
|
## Nvidia
|
||||||
Ollama supports Nvidia GPUs with compute capability 5.0+.
|
Ollama supports Nvidia GPUs with compute capability 5.0+ and driver version 531 and newer.
|
||||||
|
|
||||||
Check your compute compatibility to see if your card is supported:
|
Check your compute compatibility to see if your card is supported:
|
||||||
[https://developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus)
|
[https://developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus)
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ Ollama includes multiple LLM libraries compiled for different GPUs and CPU vecto
|
|||||||
In the server log, you will see a message that looks something like this (varies from release to release):
|
In the server log, you will see a message that looks something like this (varies from release to release):
|
||||||
|
|
||||||
```
|
```
|
||||||
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v12 rocm_v5]
|
||||||
```
|
```
|
||||||
|
|
||||||
**Experimental LLM Library Override**
|
**Experimental LLM Library Override**
|
||||||
|
|||||||
@@ -10,4 +10,5 @@ type Config interface {
|
|||||||
Strings(string, ...[]string) []string
|
Strings(string, ...[]string) []string
|
||||||
Ints(string, ...[]int32) []int32
|
Ints(string, ...[]int32) []int32
|
||||||
Floats(string, ...[]float32) []float32
|
Floats(string, ...[]float32) []float32
|
||||||
|
Bools(string, ...[]bool) []bool
|
||||||
}
|
}
|
||||||
|
|||||||
116
fs/ggml/ggml.go
116
fs/ggml/ggml.go
@@ -34,7 +34,8 @@ func (kv KV) Kind() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) ParameterCount() uint64 {
|
func (kv KV) ParameterCount() uint64 {
|
||||||
return keyValue(kv, "general.parameter_count", uint64(0))
|
val, _ := keyValue(kv, "general.parameter_count", uint64(0))
|
||||||
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) FileType() FileType {
|
func (kv KV) FileType() FileType {
|
||||||
@@ -53,16 +54,27 @@ func (kv KV) EmbeddingLength() uint64 {
|
|||||||
return uint64(kv.Uint("embedding_length"))
|
return uint64(kv.Uint("embedding_length"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) HeadCount() uint64 {
|
func (kv KV) HeadCountMax() uint64 {
|
||||||
return uint64(kv.Uint("attention.head_count"))
|
// TODO(drifkin): using the max value can cause an overestimation. In the
|
||||||
|
// future if array values become more popular, we can adapt the more invasive
|
||||||
|
// <https://github.com/ollama/ollama/pull/10225>
|
||||||
|
return uint64(kv.UintOrMaxArrayValue("attention.head_count", 1))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) HeadCountKV() uint64 {
|
func (kv KV) HeadCountMin() uint64 {
|
||||||
return uint64(kv.Uint("attention.head_count_kv", 1))
|
return uint64(kv.UintOrMinArrayValue("attention.head_count", 1))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) EmbeddingHeadCount() uint64 {
|
func (kv KV) HeadCountKVMax() uint64 {
|
||||||
if heads := kv.HeadCount(); heads > 0 {
|
return uint64(kv.UintOrMaxArrayValue("attention.head_count_kv", 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv KV) HeadCountKVMin() uint64 {
|
||||||
|
return uint64(kv.UintOrMinArrayValue("attention.head_count_kv", 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv KV) EmbeddingHeadCountMax() uint64 {
|
||||||
|
if heads := kv.HeadCountMin(); heads > 0 {
|
||||||
return kv.EmbeddingLength() / heads
|
return kv.EmbeddingLength() / heads
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,15 +82,11 @@ func (kv KV) EmbeddingHeadCount() uint64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) EmbeddingHeadCountK() uint64 {
|
func (kv KV) EmbeddingHeadCountK() uint64 {
|
||||||
return uint64(kv.Uint("attention.key_length", uint32(kv.EmbeddingHeadCount())))
|
return uint64(kv.Uint("attention.key_length", uint32(kv.EmbeddingHeadCountMax())))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) EmbeddingHeadCountV() uint64 {
|
func (kv KV) EmbeddingHeadCountV() uint64 {
|
||||||
return uint64(kv.Uint("attention.value_length", uint32(kv.EmbeddingHeadCount())))
|
return uint64(kv.Uint("attention.value_length", uint32(kv.EmbeddingHeadCountMax())))
|
||||||
}
|
|
||||||
|
|
||||||
func (kv KV) GQA() uint64 {
|
|
||||||
return kv.HeadCount() / kv.HeadCountKV()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) ContextLength() uint64 {
|
func (kv KV) ContextLength() uint64 {
|
||||||
@@ -90,40 +98,83 @@ func (kv KV) ChatTemplate() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) String(key string, defaultValue ...string) string {
|
func (kv KV) String(key string, defaultValue ...string) string {
|
||||||
return keyValue(kv, key, append(defaultValue, "")...)
|
val, _ := keyValue(kv, key, append(defaultValue, "")...)
|
||||||
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) Uint(key string, defaultValue ...uint32) uint32 {
|
func (kv KV) Uint(key string, defaultValue ...uint32) uint32 {
|
||||||
return keyValue(kv, key, append(defaultValue, 0)...)
|
val, _ := keyValue(kv, key, append(defaultValue, 0)...)
|
||||||
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) Float(key string, defaultValue ...float32) float32 {
|
func (kv KV) Float(key string, defaultValue ...float32) float32 {
|
||||||
return keyValue(kv, key, append(defaultValue, 0)...)
|
val, _ := keyValue(kv, key, append(defaultValue, 0)...)
|
||||||
|
return val
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) Bool(key string, defaultValue ...bool) bool {
|
func (kv KV) Bool(key string, defaultValue ...bool) bool {
|
||||||
return keyValue(kv, key, append(defaultValue, false)...)
|
val, _ := keyValue(kv, key, append(defaultValue, false)...)
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv KV) UintOrMaxArrayValue(key string, defaultValue uint32) uint32 {
|
||||||
|
_, max := kv.UintOrArrayValue(key, defaultValue)
|
||||||
|
return max
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv KV) UintOrMinArrayValue(key string, defaultValue uint32) uint32 {
|
||||||
|
min, _ := kv.UintOrArrayValue(key, defaultValue)
|
||||||
|
return min
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv KV) UintOrArrayValue(key string, defaultValue uint32) (uint32, uint32) {
|
||||||
|
if u32, ok := keyValue(kv, key, uint32(0)); ok {
|
||||||
|
return u32, u32
|
||||||
|
} else if u32s, ok := keyValue(kv, key, &array[uint32]{}); ok {
|
||||||
|
min := slices.Min(u32s.values)
|
||||||
|
max := slices.Max(u32s.values)
|
||||||
|
return min, max
|
||||||
|
} else if i32s, ok := keyValue(kv, key, &array[int32]{}); ok {
|
||||||
|
min := slices.Min(i32s.values)
|
||||||
|
max := slices.Max(i32s.values)
|
||||||
|
if min < 0 || max < 0 {
|
||||||
|
slog.Warn("array values are unexpectedly negative", "key", key, "min", min, "max", max)
|
||||||
|
}
|
||||||
|
return uint32(min), uint32(max)
|
||||||
|
}
|
||||||
|
|
||||||
|
return defaultValue, defaultValue
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) Strings(key string, defaultValue ...[]string) []string {
|
func (kv KV) Strings(key string, defaultValue ...[]string) []string {
|
||||||
return keyValue(kv, key, &array[string]{values: append(defaultValue, []string(nil))[0]}).values
|
val, _ := keyValue(kv, key, &array[string]{values: append(defaultValue, []string(nil))[0]})
|
||||||
|
return val.values
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) Ints(key string, defaultValue ...[]int32) []int32 {
|
func (kv KV) Ints(key string, defaultValue ...[]int32) []int32 {
|
||||||
return keyValue(kv, key, &array[int32]{values: append(defaultValue, []int32(nil))[0]}).values
|
val, _ := keyValue(kv, key, &array[int32]{values: append(defaultValue, []int32(nil))[0]})
|
||||||
|
return val.values
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) Uints(key string, defaultValue ...[]uint32) []uint32 {
|
func (kv KV) Uints(key string, defaultValue ...[]uint32) []uint32 {
|
||||||
return keyValue(kv, key, &array[uint32]{values: append(defaultValue, []uint32(nil))[0]}).values
|
val, _ := keyValue(kv, key, &array[uint32]{values: append(defaultValue, []uint32(nil))[0]})
|
||||||
|
return val.values
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) Floats(key string, defaultValue ...[]float32) []float32 {
|
func (kv KV) Floats(key string, defaultValue ...[]float32) []float32 {
|
||||||
return keyValue(kv, key, &array[float32]{values: append(defaultValue, []float32(nil))[0]}).values
|
val, _ := keyValue(kv, key, &array[float32]{values: append(defaultValue, []float32(nil))[0]})
|
||||||
|
return val.values
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv KV) Bools(key string, defaultValue ...[]bool) []bool {
|
||||||
|
val, _ := keyValue(kv, key, &array[bool]{values: append(defaultValue, []bool(nil))[0]})
|
||||||
|
return val.values
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kv KV) OllamaEngineRequired() bool {
|
func (kv KV) OllamaEngineRequired() bool {
|
||||||
return slices.Contains([]string{
|
return slices.Contains([]string{
|
||||||
"gemma3",
|
"gemma3",
|
||||||
|
"gemma3n",
|
||||||
"mistral3",
|
"mistral3",
|
||||||
"llama4",
|
"llama4",
|
||||||
"mllama",
|
"mllama",
|
||||||
@@ -143,17 +194,17 @@ type arrayValueTypes interface {
|
|||||||
*array[string] | *array[float32] | *array[float64] | *array[bool]
|
*array[string] | *array[float32] | *array[float64] | *array[bool]
|
||||||
}
|
}
|
||||||
|
|
||||||
func keyValue[T valueTypes | arrayValueTypes](kv KV, key string, defaultValue ...T) T {
|
func keyValue[T valueTypes | arrayValueTypes](kv KV, key string, defaultValue ...T) (T, bool) {
|
||||||
if !strings.HasPrefix(key, "tokenizer.") && !strings.HasPrefix(key, "general.") {
|
if !strings.HasPrefix(key, "tokenizer.") && !strings.HasPrefix(key, "general.") {
|
||||||
key = kv.Architecture() + "." + key
|
key = kv.Architecture() + "." + key
|
||||||
}
|
}
|
||||||
|
|
||||||
if val, ok := kv[key]; ok {
|
if val, ok := kv[key].(T); ok {
|
||||||
return val.(T)
|
return val, true
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("key not found", "key", key, "default", defaultValue[0])
|
slog.Debug("key with type not found", "key", key, "default", defaultValue[0])
|
||||||
return defaultValue[0]
|
return defaultValue[0], false
|
||||||
}
|
}
|
||||||
|
|
||||||
type Tensors struct {
|
type Tensors struct {
|
||||||
@@ -425,11 +476,11 @@ func Decode(rs io.ReadSeeker, maxArraySize int) (*GGML, error) {
|
|||||||
|
|
||||||
func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType string) (kv []uint64, partialOffload, fullOffload uint64) {
|
func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType string) (kv []uint64, partialOffload, fullOffload uint64) {
|
||||||
embedding := f.KV().EmbeddingLength()
|
embedding := f.KV().EmbeddingLength()
|
||||||
heads := f.KV().HeadCount()
|
heads := f.KV().HeadCountMax()
|
||||||
headsKV := f.KV().HeadCountKV()
|
headsKV := f.KV().HeadCountKVMax()
|
||||||
vocab := uint64(f.KV()["tokenizer.ggml.tokens"].(*array[string]).size)
|
vocab := uint64(f.KV()["tokenizer.ggml.tokens"].(*array[string]).size)
|
||||||
|
|
||||||
embeddingHeads := f.KV().EmbeddingHeadCount()
|
embeddingHeads := f.KV().EmbeddingHeadCountMax()
|
||||||
embeddingHeadsK := f.KV().EmbeddingHeadCountK()
|
embeddingHeadsK := f.KV().EmbeddingHeadCountK()
|
||||||
embeddingHeadsV := f.KV().EmbeddingHeadCountV()
|
embeddingHeadsV := f.KV().EmbeddingHeadCountV()
|
||||||
|
|
||||||
@@ -504,7 +555,7 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
|
|||||||
// vocab graph
|
// vocab graph
|
||||||
4*batch*(embedding+vocab)+embedding*vocab*105/128,
|
4*batch*(embedding+vocab)+embedding*vocab*105/128,
|
||||||
)
|
)
|
||||||
case "gemma", "gemma2", "gemma3":
|
case "gemma", "gemma2", "gemma3", "gemma3n":
|
||||||
fullOffload = max(
|
fullOffload = max(
|
||||||
4*batch*(embedding+vocab),
|
4*batch*(embedding+vocab),
|
||||||
4*batch*(2+context+context*heads+2*embedding+2*embeddingHeadsK*heads),
|
4*batch*(2+context+context*heads+2*embedding+2*embeddingHeadsK*heads),
|
||||||
@@ -517,6 +568,11 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
|
|||||||
embedding*embeddingHeadsK*heads*9/16,
|
embedding*embeddingHeadsK*heads*9/16,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if f.KV().Architecture() == "gemma3n" {
|
||||||
|
fullOffload *= 4
|
||||||
|
partialOffload *= 4
|
||||||
|
}
|
||||||
|
|
||||||
// Gemma2 also has sliding window attention but we only have an optimized implementation in the Ollama
|
// Gemma2 also has sliding window attention but we only have an optimized implementation in the Ollama
|
||||||
// engine. Gemma3 always uses the Ollama engine.
|
// engine. Gemma3 always uses the Ollama engine.
|
||||||
if f.KV().Architecture() == "gemma3" {
|
if f.KV().Architecture() == "gemma3" {
|
||||||
|
|||||||
@@ -269,3 +269,33 @@ func TestKeyValue(t *testing.T) {
|
|||||||
t.Errorf("unexpected uint8s (-got +want):\n%s", diff)
|
t.Errorf("unexpected uint8s (-got +want):\n%s", diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHeadCount(t *testing.T) {
|
||||||
|
valuesArray := []int32{1, 5, 3, 4}
|
||||||
|
cases := []struct {
|
||||||
|
kv KV
|
||||||
|
want uint64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
kv: KV{
|
||||||
|
"general.architecture": "abc",
|
||||||
|
"abc.attention.head_count": &array[int32]{values: valuesArray, size: len(valuesArray)},
|
||||||
|
},
|
||||||
|
want: uint64(5),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
kv: KV{
|
||||||
|
"general.architecture": "abc",
|
||||||
|
"abc.attention.head_count": uint32(3),
|
||||||
|
},
|
||||||
|
want: uint64(3),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range cases {
|
||||||
|
got := tt.kv.HeadCountMax()
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("unexpected max value: got=%d want=%d", got, tt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -609,6 +609,10 @@ func ggufWriteKV(ws io.WriteSeeker, k string, v any) error {
|
|||||||
err = writeGGUFArray(ws, ggufTypeString, v)
|
err = writeGGUFArray(ws, ggufTypeString, v)
|
||||||
case *array[string]:
|
case *array[string]:
|
||||||
err = writeGGUFArray(ws, ggufTypeString, v.values)
|
err = writeGGUFArray(ws, ggufTypeString, v.values)
|
||||||
|
case []bool:
|
||||||
|
err = writeGGUFArray(ws, ggufTypeBool, v)
|
||||||
|
case *array[bool]:
|
||||||
|
err = writeGGUFArray(ws, ggufTypeBool, v.values)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("improper type for '%s'", k)
|
return fmt.Errorf("improper type for '%s'", k)
|
||||||
}
|
}
|
||||||
|
|||||||
347
fs/gguf/gguf.go
Normal file
347
fs/gguf/gguf.go
Normal file
@@ -0,0 +1,347 @@
|
|||||||
|
package gguf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"cmp"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"iter"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
typeUint8 uint32 = iota
|
||||||
|
typeInt8
|
||||||
|
typeUint16
|
||||||
|
typeInt16
|
||||||
|
typeUint32
|
||||||
|
typeInt32
|
||||||
|
typeFloat32
|
||||||
|
typeBool
|
||||||
|
typeString
|
||||||
|
typeArray
|
||||||
|
typeUint64
|
||||||
|
typeInt64
|
||||||
|
typeFloat64
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrUnsupported = errors.New("unsupported")
|
||||||
|
|
||||||
|
type File struct {
|
||||||
|
Magic [4]byte
|
||||||
|
Version uint32
|
||||||
|
|
||||||
|
keyValues *lazy[KeyValue]
|
||||||
|
tensors *lazy[TensorInfo]
|
||||||
|
offset int64
|
||||||
|
|
||||||
|
file *os.File
|
||||||
|
reader *bufferedReader
|
||||||
|
bts []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func Open(path string) (f *File, err error) {
|
||||||
|
f = &File{bts: make([]byte, 4096)}
|
||||||
|
f.file, err = os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.reader = newBufferedReader(f.file, 32<<10)
|
||||||
|
|
||||||
|
if err := binary.Read(f.reader, binary.LittleEndian, &f.Magic); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.Equal(f.Magic[:], []byte("gguf")) {
|
||||||
|
return nil, fmt.Errorf("%w file type %v", ErrUnsupported, f.Magic)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := binary.Read(f.reader, binary.LittleEndian, &f.Version); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Version < 2 {
|
||||||
|
return nil, fmt.Errorf("%w version %v", ErrUnsupported, f.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.tensors, err = newLazy(f, f.readTensor)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.tensors.successFunc = func() error {
|
||||||
|
offset := f.reader.offset
|
||||||
|
|
||||||
|
alignment := cmp.Or(f.KeyValue("general.alignment").Int(), 32)
|
||||||
|
f.offset = offset + (alignment-offset%alignment)%alignment
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
f.keyValues, err = newLazy(f, f.readKeyValue)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) readTensor() (TensorInfo, error) {
|
||||||
|
name, err := readString(f)
|
||||||
|
if err != nil {
|
||||||
|
return TensorInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dims, err := read[uint32](f)
|
||||||
|
if err != nil {
|
||||||
|
return TensorInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
shape := make([]uint64, dims)
|
||||||
|
for i := range dims {
|
||||||
|
shape[i], err = read[uint64](f)
|
||||||
|
if err != nil {
|
||||||
|
return TensorInfo{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type_, err := read[uint32](f)
|
||||||
|
if err != nil {
|
||||||
|
return TensorInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset, err := read[uint64](f)
|
||||||
|
if err != nil {
|
||||||
|
return TensorInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return TensorInfo{
|
||||||
|
Name: name,
|
||||||
|
Offset: offset,
|
||||||
|
Shape: shape,
|
||||||
|
Type: TensorType(type_),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) readKeyValue() (KeyValue, error) {
|
||||||
|
key, err := readString(f)
|
||||||
|
if err != nil {
|
||||||
|
return KeyValue{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
t, err := read[uint32](f)
|
||||||
|
if err != nil {
|
||||||
|
return KeyValue{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := func() (any, error) {
|
||||||
|
switch t {
|
||||||
|
case typeUint8:
|
||||||
|
return read[uint8](f)
|
||||||
|
case typeInt8:
|
||||||
|
return read[int8](f)
|
||||||
|
case typeUint16:
|
||||||
|
return read[uint16](f)
|
||||||
|
case typeInt16:
|
||||||
|
return read[int16](f)
|
||||||
|
case typeUint32:
|
||||||
|
return read[uint32](f)
|
||||||
|
case typeInt32:
|
||||||
|
return read[int32](f)
|
||||||
|
case typeUint64:
|
||||||
|
return read[uint64](f)
|
||||||
|
case typeInt64:
|
||||||
|
return read[int64](f)
|
||||||
|
case typeFloat32:
|
||||||
|
return read[float32](f)
|
||||||
|
case typeFloat64:
|
||||||
|
return read[float64](f)
|
||||||
|
case typeBool:
|
||||||
|
return read[bool](f)
|
||||||
|
case typeString:
|
||||||
|
return readString(f)
|
||||||
|
case typeArray:
|
||||||
|
return readArray(f)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("%w type %d", ErrUnsupported, t)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return KeyValue{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return KeyValue{
|
||||||
|
Key: key,
|
||||||
|
Value: Value{value},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func read[T any](f *File) (t T, err error) {
|
||||||
|
err = binary.Read(f.reader, binary.LittleEndian, &t)
|
||||||
|
return t, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func readString(f *File) (string, error) {
|
||||||
|
n, err := read[uint64](f)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if int(n) > len(f.bts) {
|
||||||
|
f.bts = make([]byte, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
bts := f.bts[:n]
|
||||||
|
if _, err := io.ReadFull(f.reader, bts); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer clear(bts)
|
||||||
|
|
||||||
|
return string(bts), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readArray(f *File) (any, error) {
|
||||||
|
t, err := read[uint32](f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err := read[uint64](f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch t {
|
||||||
|
case typeUint8:
|
||||||
|
return readArrayData[uint8](f, n)
|
||||||
|
case typeInt8:
|
||||||
|
return readArrayData[int8](f, n)
|
||||||
|
case typeUint16:
|
||||||
|
return readArrayData[uint16](f, n)
|
||||||
|
case typeInt16:
|
||||||
|
return readArrayData[int16](f, n)
|
||||||
|
case typeUint32:
|
||||||
|
return readArrayData[uint32](f, n)
|
||||||
|
case typeInt32:
|
||||||
|
return readArrayData[int32](f, n)
|
||||||
|
case typeUint64:
|
||||||
|
return readArrayData[uint64](f, n)
|
||||||
|
case typeInt64:
|
||||||
|
return readArrayData[int64](f, n)
|
||||||
|
case typeFloat32:
|
||||||
|
return readArrayData[float32](f, n)
|
||||||
|
case typeFloat64:
|
||||||
|
return readArrayData[float64](f, n)
|
||||||
|
case typeBool:
|
||||||
|
return readArrayData[bool](f, n)
|
||||||
|
case typeString:
|
||||||
|
return readArrayString(f, n)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("%w type %d", ErrUnsupported, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func readArrayData[T any](f *File, n uint64) (s []T, err error) {
|
||||||
|
s = make([]T, n)
|
||||||
|
for i := range n {
|
||||||
|
e, err := read[T](f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s[i] = e
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readArrayString(f *File, n uint64) (s []string, err error) {
|
||||||
|
s = make([]string, n)
|
||||||
|
for i := range n {
|
||||||
|
e, err := readString(f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s[i] = e
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) Close() error {
|
||||||
|
f.keyValues.stop()
|
||||||
|
f.tensors.stop()
|
||||||
|
return f.file.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) KeyValue(key string) KeyValue {
|
||||||
|
if !strings.HasPrefix(key, "general.") && !strings.HasPrefix(key, "tokenizer.") {
|
||||||
|
key = f.KeyValue("general.architecture").String() + "." + key
|
||||||
|
}
|
||||||
|
|
||||||
|
if index := slices.IndexFunc(f.keyValues.values, func(kv KeyValue) bool {
|
||||||
|
return kv.Key == key
|
||||||
|
}); index >= 0 {
|
||||||
|
return f.keyValues.values[index]
|
||||||
|
}
|
||||||
|
|
||||||
|
for keyValue, ok := f.keyValues.next(); ok; keyValue, ok = f.keyValues.next() {
|
||||||
|
if keyValue.Key == key {
|
||||||
|
return keyValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return KeyValue{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) NumKeyValues() int {
|
||||||
|
return int(f.keyValues.count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) KeyValues() iter.Seq2[int, KeyValue] {
|
||||||
|
return f.keyValues.All()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) TensorInfo(name string) TensorInfo {
|
||||||
|
if index := slices.IndexFunc(f.tensors.values, func(t TensorInfo) bool {
|
||||||
|
return t.Name == name
|
||||||
|
}); index >= 0 {
|
||||||
|
return f.tensors.values[index]
|
||||||
|
}
|
||||||
|
|
||||||
|
// fast-forward through key values if we haven't already
|
||||||
|
_ = f.keyValues.rest()
|
||||||
|
for tensor, ok := f.tensors.next(); ok; tensor, ok = f.tensors.next() {
|
||||||
|
if tensor.Name == name {
|
||||||
|
return tensor
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return TensorInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) NumTensors() int {
|
||||||
|
return int(f.tensors.count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) TensorInfos() iter.Seq2[int, TensorInfo] {
|
||||||
|
// fast forward through key values if we haven't already
|
||||||
|
f.keyValues.rest()
|
||||||
|
return f.tensors.All()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *File) TensorReader(name string) (TensorInfo, io.Reader, error) {
|
||||||
|
t := f.TensorInfo(name)
|
||||||
|
if t.NumBytes() == 0 {
|
||||||
|
return TensorInfo{}, nil, fmt.Errorf("tensor %s not found", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fast forward through tensor info if we haven't already
|
||||||
|
_ = f.tensors.rest()
|
||||||
|
return t, io.NewSectionReader(f.file, f.offset+int64(t.Offset), t.NumBytes()), nil
|
||||||
|
}
|
||||||
249
fs/gguf/gguf_test.go
Normal file
249
fs/gguf/gguf_test.go
Normal file
@@ -0,0 +1,249 @@
|
|||||||
|
package gguf_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
|
"github.com/ollama/ollama/fs/ggml"
|
||||||
|
"github.com/ollama/ollama/fs/gguf"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createBinFile(tb testing.TB) string {
|
||||||
|
tb.Helper()
|
||||||
|
f, err := os.CreateTemp(tb.TempDir(), "")
|
||||||
|
if err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
kv := ggml.KV{
|
||||||
|
"general.architecture": "llama",
|
||||||
|
"llama.block_count": uint32(8),
|
||||||
|
"llama.embedding_length": uint32(3),
|
||||||
|
"llama.attention.head_count": uint32(2),
|
||||||
|
"llama.attention.head_count_kv": uint32(2),
|
||||||
|
"llama.attention.key_length": uint32(3),
|
||||||
|
"llama.rope.dimension_count": uint32(4),
|
||||||
|
"llama.rope.freq_base": float32(10000.0),
|
||||||
|
"llama.rope.freq_scale": float32(1.0),
|
||||||
|
"llama.attention.layer_norm_rms_epsilon": float32(1e-6),
|
||||||
|
"tokenizer.ggml.eos_token_id": uint32(0),
|
||||||
|
"tokenizer.ggml.eos_token_ids": []int32{1, 2, 3},
|
||||||
|
"tokenizer.ggml.tokens": []string{"hello", "world"},
|
||||||
|
"tokenizer.ggml.scores": []float32{0, 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
tensors := []*ggml.Tensor{
|
||||||
|
{
|
||||||
|
Name: "token_embd.weight",
|
||||||
|
Kind: 0,
|
||||||
|
Shape: []uint64{2, 3},
|
||||||
|
WriterTo: bytes.NewBuffer(make([]byte, 4*2*3)),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "output.weight",
|
||||||
|
Kind: 0,
|
||||||
|
Shape: []uint64{3, 2},
|
||||||
|
WriterTo: bytes.NewBuffer(make([]byte, 4*3*2)),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range 8 {
|
||||||
|
tensors = append(tensors, &ggml.Tensor{
|
||||||
|
Name: "blk." + strconv.Itoa(i) + ".attn_q.weight",
|
||||||
|
Kind: 0,
|
||||||
|
Shape: []uint64{3, 3},
|
||||||
|
WriterTo: bytes.NewBuffer(make([]byte, 4*3*3)),
|
||||||
|
}, &ggml.Tensor{
|
||||||
|
Name: "blk." + strconv.Itoa(i) + ".attn_k.weight",
|
||||||
|
Kind: 0,
|
||||||
|
Shape: []uint64{3, 3},
|
||||||
|
WriterTo: bytes.NewBuffer(make([]byte, 4*3*3)),
|
||||||
|
}, &ggml.Tensor{
|
||||||
|
Name: "blk." + strconv.Itoa(i) + ".attn_v.weight",
|
||||||
|
Kind: 0,
|
||||||
|
Shape: []uint64{3, 3},
|
||||||
|
WriterTo: bytes.NewBuffer(make([]byte, 4*3*3)),
|
||||||
|
}, &ggml.Tensor{
|
||||||
|
Name: "blk." + strconv.Itoa(i) + ".attn_output.weight",
|
||||||
|
Kind: 0,
|
||||||
|
Shape: []uint64{3, 3},
|
||||||
|
WriterTo: bytes.NewBuffer(make([]byte, 4*3*3)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ggml.WriteGGUF(f, kv, tensors); err != nil {
|
||||||
|
tb.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRead(t *testing.T) {
|
||||||
|
f, err := gguf.Open(createBinFile(t))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if got := f.KeyValue("does.not.exist").Valid(); got {
|
||||||
|
t.Errorf(`KeyValue("does.not.exist").Exists() = %v, want false`, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := f.KeyValue("general.architecture").String(); got != "llama" {
|
||||||
|
t.Errorf(`KeyValue("general.architecture").String() = %q, want %q`, got, "llama")
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := f.TensorInfo("token_embd.weight"); got.Name != "token_embd.weight" {
|
||||||
|
t.Errorf(`TensorInfo("token_embd.weight").Name = %q, want %q`, got.Name, "token_embd.weight")
|
||||||
|
} else if diff := cmp.Diff(got.Shape, []uint64{2, 3}); diff != "" {
|
||||||
|
t.Errorf(`TensorInfo("token_embd.weight").Shape mismatch (-got +want):\n%s`, diff)
|
||||||
|
} else if got.Type != gguf.TensorTypeF32 {
|
||||||
|
t.Errorf(`TensorInfo("token_embd.weight").Type = %d, want %d`, got.Type, gguf.TensorTypeF32)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := f.KeyValue("block_count").Uint(); got != 8 {
|
||||||
|
t.Errorf(`KeyValue("block_count").Uint() = %d, want %d`, got, 8)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(f.KeyValue("tokenizer.ggml.tokens").Strings(), []string{"hello", "world"}); diff != "" {
|
||||||
|
t.Errorf("KeyValue(\"tokenizer.ggml.tokens\").Strings() mismatch (-got +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(f.KeyValue("tokenizer.ggml.scores").Floats(), []float64{0, 1}); diff != "" {
|
||||||
|
t.Errorf("KeyValue(\"tokenizer.ggml.scores\").Ints() mismatch (-got +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
var kvs []string
|
||||||
|
for _, kv := range f.KeyValues() {
|
||||||
|
if !kv.Valid() {
|
||||||
|
t.Error("found invalid key-value pair:", kv)
|
||||||
|
}
|
||||||
|
|
||||||
|
kvs = append(kvs, kv.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(kvs) != f.NumKeyValues() {
|
||||||
|
t.Errorf("iterated key count = %d, want %d", len(kvs), f.NumKeyValues())
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(kvs, []string{
|
||||||
|
"general.architecture",
|
||||||
|
"llama.block_count",
|
||||||
|
"llama.embedding_length",
|
||||||
|
"llama.attention.head_count",
|
||||||
|
"llama.attention.head_count_kv",
|
||||||
|
"llama.attention.key_length",
|
||||||
|
"llama.rope.dimension_count",
|
||||||
|
"llama.rope.freq_base",
|
||||||
|
"llama.rope.freq_scale",
|
||||||
|
"llama.attention.layer_norm_rms_epsilon",
|
||||||
|
"tokenizer.ggml.eos_token_id",
|
||||||
|
"tokenizer.ggml.eos_token_ids",
|
||||||
|
"tokenizer.ggml.tokens",
|
||||||
|
"tokenizer.ggml.scores",
|
||||||
|
}, cmpopts.SortSlices(strings.Compare)); diff != "" {
|
||||||
|
t.Errorf("KeyValues() mismatch (-got +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
var tis []string
|
||||||
|
for _, ti := range f.TensorInfos() {
|
||||||
|
if !ti.Valid() {
|
||||||
|
t.Error("found invalid tensor info:", ti)
|
||||||
|
}
|
||||||
|
|
||||||
|
tis = append(tis, ti.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tis) != f.NumTensors() {
|
||||||
|
t.Errorf("iterated tensor count = %d, want %d", len(tis), f.NumTensors())
|
||||||
|
}
|
||||||
|
|
||||||
|
if diff := cmp.Diff(tis, []string{
|
||||||
|
"token_embd.weight",
|
||||||
|
"output.weight",
|
||||||
|
"blk.0.attn_q.weight",
|
||||||
|
"blk.0.attn_k.weight",
|
||||||
|
"blk.0.attn_v.weight",
|
||||||
|
"blk.0.attn_output.weight",
|
||||||
|
"blk.1.attn_q.weight",
|
||||||
|
"blk.1.attn_k.weight",
|
||||||
|
"blk.1.attn_v.weight",
|
||||||
|
"blk.1.attn_output.weight",
|
||||||
|
"blk.2.attn_q.weight",
|
||||||
|
"blk.2.attn_k.weight",
|
||||||
|
"blk.2.attn_v.weight",
|
||||||
|
"blk.2.attn_output.weight",
|
||||||
|
"blk.3.attn_q.weight",
|
||||||
|
"blk.3.attn_k.weight",
|
||||||
|
"blk.3.attn_v.weight",
|
||||||
|
"blk.3.attn_output.weight",
|
||||||
|
"blk.4.attn_q.weight",
|
||||||
|
"blk.4.attn_k.weight",
|
||||||
|
"blk.4.attn_v.weight",
|
||||||
|
"blk.4.attn_output.weight",
|
||||||
|
"blk.5.attn_q.weight",
|
||||||
|
"blk.5.attn_k.weight",
|
||||||
|
"blk.5.attn_v.weight",
|
||||||
|
"blk.5.attn_output.weight",
|
||||||
|
"blk.6.attn_q.weight",
|
||||||
|
"blk.6.attn_k.weight",
|
||||||
|
"blk.6.attn_v.weight",
|
||||||
|
"blk.6.attn_output.weight",
|
||||||
|
"blk.7.attn_q.weight",
|
||||||
|
"blk.7.attn_k.weight",
|
||||||
|
"blk.7.attn_v.weight",
|
||||||
|
"blk.7.attn_output.weight",
|
||||||
|
}, cmpopts.SortSlices(strings.Compare)); diff != "" {
|
||||||
|
t.Errorf("TensorInfos() mismatch (-got +want):\n%s", diff)
|
||||||
|
}
|
||||||
|
|
||||||
|
ti, r, err := f.TensorReader("output.weight")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf(`TensorReader("output.weight") error: %v`, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ti.Name != "output.weight" {
|
||||||
|
t.Errorf(`TensorReader("output.weight").Name = %q, want %q`, ti.Name, "output.weight")
|
||||||
|
} else if diff := cmp.Diff(ti.Shape, []uint64{3, 2}); diff != "" {
|
||||||
|
t.Errorf(`TensorReader("output.weight").Shape mismatch (-got +want):\n%s`, diff)
|
||||||
|
} else if ti.Type != gguf.TensorTypeF32 {
|
||||||
|
t.Errorf(`TensorReader("output.weight").Type = %d, want %d`, ti.Type, gguf.TensorTypeF32)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
if _, err := b.ReadFrom(r); err != nil {
|
||||||
|
t.Fatalf(`ReadFrom TensorReader("output.weight") error: %v`, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Len() != int(ti.NumBytes()) {
|
||||||
|
t.Errorf(`ReadFrom TensorReader("output.weight") length = %d, want %d`, b.Len(), ti.NumBytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkRead(b *testing.B) {
|
||||||
|
b.ReportAllocs()
|
||||||
|
|
||||||
|
p := createBinFile(b)
|
||||||
|
for b.Loop() {
|
||||||
|
f, err := gguf.Open(p)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := f.KeyValue("general.architecture").String(); got != "llama" {
|
||||||
|
b.Errorf("got = %q, want %q", got, "llama")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate through some tensors
|
||||||
|
for range f.TensorInfos() {
|
||||||
|
}
|
||||||
|
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
90
fs/gguf/keyvalue.go
Normal file
90
fs/gguf/keyvalue.go
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
package gguf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"slices"
|
||||||
|
)
|
||||||
|
|
||||||
|
type KeyValue struct {
|
||||||
|
Key string
|
||||||
|
Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv KeyValue) Valid() bool {
|
||||||
|
return kv.Key != "" && kv.Value.value != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value struct {
|
||||||
|
value any
|
||||||
|
}
|
||||||
|
|
||||||
|
func value[T any](v Value, kinds ...reflect.Kind) (t T) {
|
||||||
|
vv := reflect.ValueOf(v.value)
|
||||||
|
if slices.Contains(kinds, vv.Kind()) {
|
||||||
|
t = vv.Convert(reflect.TypeOf(t)).Interface().(T)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func values[T any](v Value, kinds ...reflect.Kind) (ts []T) {
|
||||||
|
switch vv := reflect.ValueOf(v.value); vv.Kind() {
|
||||||
|
case reflect.Slice:
|
||||||
|
if slices.Contains(kinds, vv.Type().Elem().Kind()) {
|
||||||
|
ts = make([]T, vv.Len())
|
||||||
|
for i := range vv.Len() {
|
||||||
|
ts[i] = vv.Index(i).Convert(reflect.TypeOf(ts[i])).Interface().(T)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns Value as a signed integer. If it is not a signed integer, it returns 0.
|
||||||
|
func (v Value) Int() int64 {
|
||||||
|
return value[int64](v, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ints returns Value as a signed integer slice. If it is not a signed integer slice, it returns nil.
|
||||||
|
func (v Value) Ints() (i64s []int64) {
|
||||||
|
return values[int64](v, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint converts an unsigned integer value to uint64. If the value is not a unsigned integer, it returns 0.
|
||||||
|
func (v Value) Uint() uint64 {
|
||||||
|
return value[uint64](v, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uints returns Value as a unsigned integer slice. If it is not a unsigned integer slice, it returns nil.
|
||||||
|
func (v Value) Uints() (u64s []uint64) {
|
||||||
|
return values[uint64](v, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float returns Value as a float. If it is not a float, it returns 0.
|
||||||
|
func (v Value) Float() float64 {
|
||||||
|
return value[float64](v, reflect.Float32, reflect.Float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Floats returns Value as a float slice. If it is not a float slice, it returns nil.
|
||||||
|
func (v Value) Floats() (f64s []float64) {
|
||||||
|
return values[float64](v, reflect.Float32, reflect.Float64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns Value as a boolean. If it is not a boolean, it returns false.
|
||||||
|
func (v Value) Bool() bool {
|
||||||
|
return value[bool](v, reflect.Bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bools returns Value as a boolean slice. If it is not a boolean slice, it returns nil.
|
||||||
|
func (v Value) Bools() (bools []bool) {
|
||||||
|
return values[bool](v, reflect.Bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns Value as a string. If it is not a string, it returns an empty string.
|
||||||
|
func (v Value) String() string {
|
||||||
|
return value[string](v, reflect.String)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strings returns Value as a string slice. If it is not a string slice, it returns nil.
|
||||||
|
func (v Value) Strings() (strings []string) {
|
||||||
|
return values[string](v, reflect.String)
|
||||||
|
}
|
||||||
208
fs/gguf/keyvalue_test.go
Normal file
208
fs/gguf/keyvalue_test.go
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
package gguf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
)
|
||||||
|
|
||||||
|
func split(name string, values map[string][]any) (matched []any, unmatched []any) {
|
||||||
|
for key, value := range values {
|
||||||
|
if key == name {
|
||||||
|
matched = value
|
||||||
|
} else {
|
||||||
|
unmatched = append(unmatched, value...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValue(t *testing.T) {
|
||||||
|
values := map[string][]any{
|
||||||
|
"int64": {int(42), int8(42), int16(42), int32(42), int64(42)},
|
||||||
|
"uint64": {uint(42), uint8(42), uint16(42), uint32(42), uint64(42)},
|
||||||
|
"float64": {float32(42), float64(42)},
|
||||||
|
"string": {"42", "hello"},
|
||||||
|
"bool": {true, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("int64", func(t *testing.T) {
|
||||||
|
matched, unmatched := split("int64", values)
|
||||||
|
for _, v := range matched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if i64 := kv.Int(); i64 != 42 {
|
||||||
|
t.Errorf("expected 42, got %d", i64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range unmatched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if i64 := kv.Int(); i64 != 0 {
|
||||||
|
t.Errorf("expected 42, got %d", i64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uint64", func(t *testing.T) {
|
||||||
|
matched, unmatched := split("uint64", values)
|
||||||
|
for _, v := range matched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if u64 := kv.Uint(); u64 != 42 {
|
||||||
|
t.Errorf("expected 42, got %d", u64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range unmatched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if u64 := kv.Uint(); u64 != 0 {
|
||||||
|
t.Errorf("expected 42, got %d", u64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("float64", func(t *testing.T) {
|
||||||
|
matched, unmatched := split("float64", values)
|
||||||
|
for _, v := range matched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if f64 := kv.Float(); f64 != 42 {
|
||||||
|
t.Errorf("expected 42, got %f", f64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range unmatched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if f64 := kv.Float(); f64 != 0 {
|
||||||
|
t.Errorf("expected 42, got %f", f64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("string", func(t *testing.T) {
|
||||||
|
matched, unmatched := split("string", values)
|
||||||
|
for _, v := range matched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if s := kv.String(); s != v {
|
||||||
|
t.Errorf("expected 42, got %s", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range unmatched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if s := kv.String(); s != "" {
|
||||||
|
t.Errorf("expected 42, got %s", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("bool", func(t *testing.T) {
|
||||||
|
matched, unmatched := split("bool", values)
|
||||||
|
for _, v := range matched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if b := kv.Bool(); b != v {
|
||||||
|
t.Errorf("expected true, got %v", b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range unmatched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if b := kv.Bool(); b != false {
|
||||||
|
t.Errorf("expected false, got %v", b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValues(t *testing.T) {
|
||||||
|
values := map[string][]any{
|
||||||
|
"int64s": {[]int{42}, []int8{42}, []int16{42}, []int32{42}, []int64{42}},
|
||||||
|
"uint64s": {[]uint{42}, []uint8{42}, []uint16{42}, []uint32{42}, []uint64{42}},
|
||||||
|
"float64s": {[]float32{42}, []float64{42}},
|
||||||
|
"strings": {[]string{"42"}, []string{"hello"}},
|
||||||
|
"bools": {[]bool{true}, []bool{false}},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("int64s", func(t *testing.T) {
|
||||||
|
matched, unmatched := split("int64s", values)
|
||||||
|
for _, v := range matched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if diff := cmp.Diff(kv.Ints(), []int64{42}); diff != "" {
|
||||||
|
t.Errorf("diff: %s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range unmatched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if i64s := kv.Ints(); i64s != nil {
|
||||||
|
t.Errorf("expected nil, got %v", i64s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uint64s", func(t *testing.T) {
|
||||||
|
matched, unmatched := split("uint64s", values)
|
||||||
|
for _, v := range matched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if diff := cmp.Diff(kv.Uints(), []uint64{42}); diff != "" {
|
||||||
|
t.Errorf("diff: %s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range unmatched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if u64s := kv.Uints(); u64s != nil {
|
||||||
|
t.Errorf("expected nil, got %v", u64s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("float64s", func(t *testing.T) {
|
||||||
|
matched, unmatched := split("float64s", values)
|
||||||
|
for _, v := range matched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if diff := cmp.Diff(kv.Floats(), []float64{42}); diff != "" {
|
||||||
|
t.Errorf("diff: %s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range unmatched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if f64s := kv.Floats(); f64s != nil {
|
||||||
|
t.Errorf("expected nil, got %v", f64s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("strings", func(t *testing.T) {
|
||||||
|
matched, unmatched := split("strings", values)
|
||||||
|
for _, v := range matched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if diff := cmp.Diff(kv.Strings(), v); diff != "" {
|
||||||
|
t.Errorf("diff: %s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range unmatched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if s := kv.Strings(); s != nil {
|
||||||
|
t.Errorf("expected nil, got %v", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("bools", func(t *testing.T) {
|
||||||
|
matched, unmatched := split("bools", values)
|
||||||
|
for _, v := range matched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if diff := cmp.Diff(kv.Bools(), v); diff != "" {
|
||||||
|
t.Errorf("diff: %s", diff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range unmatched {
|
||||||
|
kv := KeyValue{"key", Value{v}}
|
||||||
|
if b := kv.Bools(); b != nil {
|
||||||
|
t.Errorf("expected nil, got %v", b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
89
fs/gguf/lazy.go
Normal file
89
fs/gguf/lazy.go
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
package gguf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"iter"
|
||||||
|
"log/slog"
|
||||||
|
)
|
||||||
|
|
||||||
|
type lazy[T any] struct {
|
||||||
|
count uint64
|
||||||
|
next func() (T, bool)
|
||||||
|
stop func()
|
||||||
|
values []T
|
||||||
|
|
||||||
|
// successFunc is called when all values have been successfully read.
|
||||||
|
successFunc func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLazy[T any](f *File, fn func() (T, error)) (*lazy[T], error) {
|
||||||
|
it := lazy[T]{}
|
||||||
|
if err := binary.Read(f.reader, binary.LittleEndian, &it.count); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
it.values = make([]T, 0)
|
||||||
|
it.next, it.stop = iter.Pull(func(yield func(T) bool) {
|
||||||
|
for i := range it.count {
|
||||||
|
t, err := fn()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("error reading tensor", "index", i, "error", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
it.values = append(it.values, t)
|
||||||
|
if !yield(t) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if it.successFunc != nil {
|
||||||
|
it.successFunc()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return &it, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *lazy[T]) Values() iter.Seq[T] {
|
||||||
|
return func(yield func(T) bool) {
|
||||||
|
for _, v := range g.All() {
|
||||||
|
if !yield(v) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *lazy[T]) All() iter.Seq2[int, T] {
|
||||||
|
return func(yield func(int, T) bool) {
|
||||||
|
for i := range int(g.count) {
|
||||||
|
if i < len(g.values) {
|
||||||
|
if !yield(i, g.values[i]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t, ok := g.next()
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if !yield(i, t) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *lazy[T]) rest() (collected bool) {
|
||||||
|
for {
|
||||||
|
_, ok := g.next()
|
||||||
|
collected = collected || ok
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return collected
|
||||||
|
}
|
||||||
23
fs/gguf/reader.go
Normal file
23
fs/gguf/reader.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
package gguf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
type bufferedReader struct {
|
||||||
|
offset int64
|
||||||
|
*bufio.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBufferedReader(rs io.ReadSeeker, size int) *bufferedReader {
|
||||||
|
return &bufferedReader{
|
||||||
|
Reader: bufio.NewReaderSize(rs, size),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *bufferedReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = rs.Reader.Read(p)
|
||||||
|
rs.offset += int64(n)
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
288
fs/gguf/tensor.go
Normal file
288
fs/gguf/tensor.go
Normal file
@@ -0,0 +1,288 @@
|
|||||||
|
package gguf
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TensorInfo struct {
|
||||||
|
Name string
|
||||||
|
Offset uint64
|
||||||
|
Shape []uint64
|
||||||
|
Type TensorType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ti TensorInfo) Valid() bool {
|
||||||
|
return ti.Name != "" && ti.NumBytes() > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ti TensorInfo) NumValues() int64 {
|
||||||
|
var numItems int64 = 1
|
||||||
|
for _, dim := range ti.Shape {
|
||||||
|
numItems *= int64(dim)
|
||||||
|
}
|
||||||
|
return numItems
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumBytes returns the number of bytes in the tensor.
|
||||||
|
func (ti TensorInfo) NumBytes() int64 {
|
||||||
|
return int64(float64(ti.NumValues()) * ti.Type.NumBytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ti TensorInfo) LogValue() slog.Value {
|
||||||
|
return slog.GroupValue(
|
||||||
|
slog.String("name", ti.Name),
|
||||||
|
slog.Int64("offset", int64(ti.Offset)),
|
||||||
|
slog.Any("shape", ti.Shape),
|
||||||
|
slog.Int64("num_values", ti.NumValues()),
|
||||||
|
slog.Int64("num_bytes", ti.NumBytes()),
|
||||||
|
slog.Any("type", ti.Type),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TensorType uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
TensorTypeF32 TensorType = iota
|
||||||
|
TensorTypeF16
|
||||||
|
TensorTypeQ4_0
|
||||||
|
TensorTypeQ4_1
|
||||||
|
|
||||||
|
// unexported // unused in gguf
|
||||||
|
tensorTypeQ4_2
|
||||||
|
tensorTypeQ4_3
|
||||||
|
|
||||||
|
TensorTypeQ5_0
|
||||||
|
TensorTypeQ5_1
|
||||||
|
TensorTypeQ8_0
|
||||||
|
TensorTypeQ8_1
|
||||||
|
TensorTypeQ2_K
|
||||||
|
TensorTypeQ3_K
|
||||||
|
TensorTypeQ4_K
|
||||||
|
TensorTypeQ5_K
|
||||||
|
TensorTypeQ6_K
|
||||||
|
TensorTypeQ8_K
|
||||||
|
|
||||||
|
// unexported // unquantizable by ollama
|
||||||
|
tensorTypeIQ2_XXS
|
||||||
|
tensorTypeIQ2_XS
|
||||||
|
tensorTypeIQ3_XXS
|
||||||
|
tensorTypeIQ1_S
|
||||||
|
tensorTypeIQ4_NL
|
||||||
|
tensorTypeIQ3_S
|
||||||
|
tensorTypeIQ2_S
|
||||||
|
tensorTypeIQ4_XS
|
||||||
|
|
||||||
|
TensorTypeI8
|
||||||
|
TensorTypeI16
|
||||||
|
TensorTypeI32
|
||||||
|
TensorTypeI64
|
||||||
|
TensorTypeF64
|
||||||
|
|
||||||
|
// unexported // unquantizable by ollama
|
||||||
|
tensorTypeIQ1_M
|
||||||
|
|
||||||
|
TensorTypeBF16
|
||||||
|
|
||||||
|
// unexported // unused in gguf
|
||||||
|
tensorTypeQ4_0_4_4
|
||||||
|
tensorTypeQ4_0_4_8
|
||||||
|
tensorTypeQ4_0_8_8
|
||||||
|
|
||||||
|
// unexported // unquantizable by ollama
|
||||||
|
tensorTypeTQ1_0
|
||||||
|
tensorTypeTQ2_0
|
||||||
|
|
||||||
|
// unexported // unused in gguf
|
||||||
|
tensorTypeIQ4_NL_4_4
|
||||||
|
tensorTypeIQ4_NL_4_8
|
||||||
|
tensorTypeIQ4_NL_8_8
|
||||||
|
)
|
||||||
|
|
||||||
|
func (tt TensorType) NumBytes() float64 {
|
||||||
|
return float64(tt.typeSize()) / float64(tt.blockSize())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TensorType) typeSize() int64 {
|
||||||
|
switch tt {
|
||||||
|
case TensorTypeF32:
|
||||||
|
return 4
|
||||||
|
case TensorTypeF16:
|
||||||
|
return 2
|
||||||
|
case TensorTypeQ4_0:
|
||||||
|
return 2 + tt.blockSize()/2
|
||||||
|
case TensorTypeQ4_1:
|
||||||
|
return 2 + 2 + tt.blockSize()/2
|
||||||
|
case TensorTypeQ5_0:
|
||||||
|
return 2 + 4 + tt.blockSize()/2
|
||||||
|
case TensorTypeQ5_1:
|
||||||
|
return 2 + 2 + 4 + tt.blockSize()/2
|
||||||
|
case TensorTypeQ8_0:
|
||||||
|
return 2 + tt.blockSize()
|
||||||
|
case TensorTypeQ8_1:
|
||||||
|
return 2 + 2 + tt.blockSize()
|
||||||
|
case TensorTypeQ2_K:
|
||||||
|
return tt.blockSize()/16 + tt.blockSize()/4 + 2 + 2
|
||||||
|
case TensorTypeQ3_K:
|
||||||
|
return tt.blockSize()/8 + tt.blockSize()/4 + 12 + 2
|
||||||
|
case TensorTypeQ4_K:
|
||||||
|
return 2 + 2 + 12 + tt.blockSize()/2
|
||||||
|
case TensorTypeQ5_K:
|
||||||
|
return 2 + 2 + 12 + tt.blockSize()/8 + tt.blockSize()/2
|
||||||
|
case TensorTypeQ6_K:
|
||||||
|
return tt.blockSize()/2 + tt.blockSize()/4 + tt.blockSize()/16 + 2
|
||||||
|
case TensorTypeQ8_K:
|
||||||
|
return 4 + tt.blockSize() + 2*tt.blockSize()/16
|
||||||
|
case tensorTypeIQ2_XXS:
|
||||||
|
return 2 + 2*tt.blockSize()/8
|
||||||
|
case tensorTypeIQ2_XS:
|
||||||
|
return 2 + 2*tt.blockSize()/8 + tt.blockSize()/32
|
||||||
|
case tensorTypeIQ3_XXS:
|
||||||
|
return 2 + tt.blockSize()/4 + tt.blockSize()/8
|
||||||
|
case tensorTypeIQ1_S:
|
||||||
|
return 2 + tt.blockSize()/8 + tt.blockSize()/16
|
||||||
|
case tensorTypeIQ4_NL:
|
||||||
|
return 2 + tt.blockSize()/2
|
||||||
|
case tensorTypeIQ3_S:
|
||||||
|
return 2 + tt.blockSize()/4 + tt.blockSize()/8 + tt.blockSize()/32 + 4
|
||||||
|
case tensorTypeIQ2_S:
|
||||||
|
return 2 + tt.blockSize()/4 + tt.blockSize()/16
|
||||||
|
case tensorTypeIQ4_XS:
|
||||||
|
return 2 + 2 + tt.blockSize()/2 + tt.blockSize()/64
|
||||||
|
case TensorTypeI8:
|
||||||
|
return 1
|
||||||
|
case TensorTypeI16:
|
||||||
|
return 2
|
||||||
|
case TensorTypeI32:
|
||||||
|
return 4
|
||||||
|
case TensorTypeI64:
|
||||||
|
return 8
|
||||||
|
case TensorTypeF64:
|
||||||
|
return 8
|
||||||
|
case tensorTypeIQ1_M:
|
||||||
|
return tt.blockSize()/8 + tt.blockSize()/16 + tt.blockSize()/32
|
||||||
|
case TensorTypeBF16:
|
||||||
|
return 2
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TensorType) blockSize() int64 {
|
||||||
|
switch tt {
|
||||||
|
case TensorTypeF32,
|
||||||
|
TensorTypeF16,
|
||||||
|
TensorTypeI8,
|
||||||
|
TensorTypeI16,
|
||||||
|
TensorTypeI32,
|
||||||
|
TensorTypeI64,
|
||||||
|
TensorTypeF64,
|
||||||
|
TensorTypeBF16:
|
||||||
|
return 1
|
||||||
|
case TensorTypeQ4_0,
|
||||||
|
TensorTypeQ4_1,
|
||||||
|
TensorTypeQ5_0,
|
||||||
|
TensorTypeQ5_1,
|
||||||
|
TensorTypeQ8_0,
|
||||||
|
TensorTypeQ8_1,
|
||||||
|
tensorTypeIQ4_NL:
|
||||||
|
return 32
|
||||||
|
default:
|
||||||
|
return 256
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TensorType) String() string {
|
||||||
|
switch tt {
|
||||||
|
case TensorTypeF32:
|
||||||
|
return "f32"
|
||||||
|
case TensorTypeF16:
|
||||||
|
return "f16"
|
||||||
|
case TensorTypeQ4_0:
|
||||||
|
return "q4_0"
|
||||||
|
case TensorTypeQ4_1:
|
||||||
|
return "q4_1"
|
||||||
|
case tensorTypeQ4_2:
|
||||||
|
return "q4_2"
|
||||||
|
case tensorTypeQ4_3:
|
||||||
|
return "q4_3"
|
||||||
|
case TensorTypeQ5_0:
|
||||||
|
return "q5_0"
|
||||||
|
case TensorTypeQ5_1:
|
||||||
|
return "q5_1"
|
||||||
|
case TensorTypeQ8_0:
|
||||||
|
return "q8_0"
|
||||||
|
case TensorTypeQ8_1:
|
||||||
|
return "q8_1"
|
||||||
|
case TensorTypeQ2_K:
|
||||||
|
return "q2_k"
|
||||||
|
case TensorTypeQ3_K:
|
||||||
|
return "q3_k"
|
||||||
|
case TensorTypeQ4_K:
|
||||||
|
return "q4_k"
|
||||||
|
case TensorTypeQ5_K:
|
||||||
|
return "q5_k"
|
||||||
|
case TensorTypeQ6_K:
|
||||||
|
return "q6_k"
|
||||||
|
case TensorTypeQ8_K:
|
||||||
|
return "q8_k"
|
||||||
|
case tensorTypeIQ2_XXS:
|
||||||
|
return "iq2_xxs"
|
||||||
|
case tensorTypeIQ2_XS:
|
||||||
|
return "iq2_xs"
|
||||||
|
case tensorTypeIQ3_XXS:
|
||||||
|
return "iq3_xxs"
|
||||||
|
case tensorTypeIQ1_S:
|
||||||
|
return "iq1_s"
|
||||||
|
case tensorTypeIQ4_NL:
|
||||||
|
return "iq4_nl"
|
||||||
|
case tensorTypeIQ3_S:
|
||||||
|
return "iq3_s"
|
||||||
|
case tensorTypeIQ2_S:
|
||||||
|
return "iq2_s"
|
||||||
|
case tensorTypeIQ4_XS:
|
||||||
|
return "iq4_xs"
|
||||||
|
case TensorTypeI8:
|
||||||
|
return "i8"
|
||||||
|
case TensorTypeI16:
|
||||||
|
return "i16"
|
||||||
|
case TensorTypeI32:
|
||||||
|
return "i32"
|
||||||
|
case TensorTypeI64:
|
||||||
|
return "i64"
|
||||||
|
case TensorTypeF64:
|
||||||
|
return "f64"
|
||||||
|
case tensorTypeIQ1_M:
|
||||||
|
return "iq1_m"
|
||||||
|
case TensorTypeBF16:
|
||||||
|
return "bf16"
|
||||||
|
case tensorTypeQ4_0_4_4:
|
||||||
|
return "q4_0_4_4"
|
||||||
|
case tensorTypeQ4_0_4_8:
|
||||||
|
return "q4_0_4_8"
|
||||||
|
case tensorTypeQ4_0_8_8:
|
||||||
|
return "q4_0_8_8"
|
||||||
|
case tensorTypeTQ1_0:
|
||||||
|
return "tq1_0"
|
||||||
|
case tensorTypeTQ2_0:
|
||||||
|
return "tq2_0"
|
||||||
|
case tensorTypeIQ4_NL_4_4:
|
||||||
|
return "iq4_nl_4_4"
|
||||||
|
case tensorTypeIQ4_NL_4_8:
|
||||||
|
return "iq4_nl_4_8"
|
||||||
|
case tensorTypeIQ4_NL_8_8:
|
||||||
|
return "iq4_nl_8_8"
|
||||||
|
default:
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tt TensorType) LogValue() slog.Value {
|
||||||
|
return slog.GroupValue(
|
||||||
|
slog.Uint64("value", uint64(tt)),
|
||||||
|
slog.String("name", strings.ToUpper(tt.String())),
|
||||||
|
slog.Int64("size", tt.typeSize()),
|
||||||
|
slog.Int64("block_size", tt.blockSize()),
|
||||||
|
slog.Float64("num_bytes", tt.NumBytes()),
|
||||||
|
)
|
||||||
|
}
|
||||||
4
go.mod
4
go.mod
@@ -19,12 +19,13 @@ require (
|
|||||||
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
|
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
|
||||||
github.com/dlclark/regexp2 v1.11.4
|
github.com/dlclark/regexp2 v1.11.4
|
||||||
github.com/emirpasic/gods/v2 v2.0.0-alpha
|
github.com/emirpasic/gods/v2 v2.0.0-alpha
|
||||||
github.com/google/go-cmp v0.6.0
|
github.com/google/go-cmp v0.7.0
|
||||||
github.com/mattn/go-runewidth v0.0.14
|
github.com/mattn/go-runewidth v0.0.14
|
||||||
github.com/nlpodyssey/gopickle v0.3.0
|
github.com/nlpodyssey/gopickle v0.3.0
|
||||||
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
|
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
|
||||||
golang.org/x/image v0.22.0
|
golang.org/x/image v0.22.0
|
||||||
golang.org/x/tools v0.30.0
|
golang.org/x/tools v0.30.0
|
||||||
|
gonum.org/v1/gonum v0.15.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@@ -44,7 +45,6 @@ require (
|
|||||||
github.com/xtgo/set v1.0.0 // indirect
|
github.com/xtgo/set v1.0.0 // indirect
|
||||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
|
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
gonum.org/v1/gonum v0.15.0 // indirect
|
|
||||||
gorgonia.org/vecf32 v0.9.0 // indirect
|
gorgonia.org/vecf32 v0.9.0 // indirect
|
||||||
gorgonia.org/vecf64 v0.9.0 // indirect
|
gorgonia.org/vecf64 v0.9.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
4
go.sum
4
go.sum
@@ -112,8 +112,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
|||||||
@@ -45,6 +45,8 @@ var (
|
|||||||
"qwen2.5-coder:latest",
|
"qwen2.5-coder:latest",
|
||||||
"qwen:latest",
|
"qwen:latest",
|
||||||
"solar-pro:latest",
|
"solar-pro:latest",
|
||||||
|
"codellama:latest",
|
||||||
|
"nous-hermes:latest",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -150,7 +150,7 @@ index 4cce5166..7f6617fa 100644
|
|||||||
llama_model_loader::llama_model_loader(
|
llama_model_loader::llama_model_loader(
|
||||||
const std::string & fname,
|
const std::string & fname,
|
||||||
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
|
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
|
||||||
index 3a4e72a3..831b68c0 100644
|
index 3a4e72a3..db62973f 100644
|
||||||
--- a/src/llama-model.cpp
|
--- a/src/llama-model.cpp
|
||||||
+++ b/src/llama-model.cpp
|
+++ b/src/llama-model.cpp
|
||||||
@@ -1402,6 +1402,21 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
@@ -1402,6 +1402,21 @@ void llama_model::load_hparams(llama_model_loader & ml) {
|
||||||
|
|||||||
@@ -22,10 +22,10 @@ multiple batches of processing until everything is complete.
|
|||||||
4 files changed, 59 insertions(+), 79 deletions(-)
|
4 files changed, 59 insertions(+), 79 deletions(-)
|
||||||
|
|
||||||
diff --git a/src/llama-context.cpp b/src/llama-context.cpp
|
diff --git a/src/llama-context.cpp b/src/llama-context.cpp
|
||||||
index c22687e4..c5948e8f 100644
|
index dca22d8b..1f3a3956 100644
|
||||||
--- a/src/llama-context.cpp
|
--- a/src/llama-context.cpp
|
||||||
+++ b/src/llama-context.cpp
|
+++ b/src/llama-context.cpp
|
||||||
@@ -950,9 +950,12 @@ int llama_context::decode(llama_batch & inp_batch) {
|
@@ -947,9 +947,12 @@ int llama_context::decode(llama_batch & inp_batch) {
|
||||||
|
|
||||||
// find KV slot
|
// find KV slot
|
||||||
if (!kv_self->find_slot(ubatch)) {
|
if (!kv_self->find_slot(ubatch)) {
|
||||||
@@ -41,7 +41,7 @@ index c22687e4..c5948e8f 100644
|
|||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_sched_reset(sched.get());
|
ggml_backend_sched_reset(sched.get());
|
||||||
@@ -1967,9 +1970,12 @@ void llama_context::opt_epoch_iter(
|
@@ -1965,9 +1968,12 @@ void llama_context::opt_epoch_iter(
|
||||||
|
|
||||||
// TODO: not sure if this is needed
|
// TODO: not sure if this is needed
|
||||||
if (!kv_self->find_slot(ubatch)) {
|
if (!kv_self->find_slot(ubatch)) {
|
||||||
|
|||||||
@@ -10,10 +10,10 @@ Subject: [PATCH] add argsort and cuda copy for i32
|
|||||||
3 files changed, 192 insertions(+), 2 deletions(-)
|
3 files changed, 192 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp
|
diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp
|
||||||
index becdae07..7a44b6cf 100644
|
index 955fec59..654e2f28 100644
|
||||||
--- a/ggml/src/ggml-cpu/ops.cpp
|
--- a/ggml/src/ggml-cpu/ops.cpp
|
||||||
+++ b/ggml/src/ggml-cpu/ops.cpp
|
+++ b/ggml/src/ggml-cpu/ops.cpp
|
||||||
@@ -6890,6 +6890,45 @@ static void ggml_compute_forward_argsort_f32(
|
@@ -6822,6 +6822,45 @@ static void ggml_compute_forward_argsort_f32(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -59,7 +59,7 @@ index becdae07..7a44b6cf 100644
|
|||||||
void ggml_compute_forward_argsort(
|
void ggml_compute_forward_argsort(
|
||||||
const ggml_compute_params * params,
|
const ggml_compute_params * params,
|
||||||
ggml_tensor * dst) {
|
ggml_tensor * dst) {
|
||||||
@@ -6901,6 +6940,10 @@ void ggml_compute_forward_argsort(
|
@@ -6833,6 +6872,10 @@ void ggml_compute_forward_argsort(
|
||||||
{
|
{
|
||||||
ggml_compute_forward_argsort_f32(params, dst);
|
ggml_compute_forward_argsort_f32(params, dst);
|
||||||
} break;
|
} break;
|
||||||
@@ -195,7 +195,7 @@ index 607ded85..53b02634 100644
|
|||||||
+ }
|
+ }
|
||||||
}
|
}
|
||||||
diff --git a/ggml/src/ggml-cuda/cpy.cu b/ggml/src/ggml-cuda/cpy.cu
|
diff --git a/ggml/src/ggml-cuda/cpy.cu b/ggml/src/ggml-cuda/cpy.cu
|
||||||
index 2d46176e..47383486 100644
|
index d027271f..4abd01d7 100644
|
||||||
--- a/ggml/src/ggml-cuda/cpy.cu
|
--- a/ggml/src/ggml-cuda/cpy.cu
|
||||||
+++ b/ggml/src/ggml-cuda/cpy.cu
|
+++ b/ggml/src/ggml-cuda/cpy.cu
|
||||||
@@ -38,6 +38,13 @@ static __device__ void cpy_1_f16_f32(const char * cxi, char * cdsti) {
|
@@ -38,6 +38,13 @@ static __device__ void cpy_1_f16_f32(const char * cxi, char * cdsti) {
|
||||||
@@ -257,7 +257,7 @@ index 2d46176e..47383486 100644
|
|||||||
static __device__ void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) {
|
static __device__ void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) {
|
||||||
const float * xi = (const float *) cxi;
|
const float * xi = (const float *) cxi;
|
||||||
block_q8_0 * dsti = (block_q8_0 *) cdsti;
|
block_q8_0 * dsti = (block_q8_0 *) cdsti;
|
||||||
@@ -631,6 +676,8 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg
|
@@ -633,6 +678,8 @@ void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, gg
|
||||||
ggml_cpy_f16_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index);
|
ggml_cpy_f16_f16_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index);
|
||||||
} else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) {
|
} else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) {
|
||||||
ggml_cpy_f16_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index);
|
ggml_cpy_f16_f32_cuda (src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream, dest_ptrs_d, graph_cpynode_index);
|
||||||
@@ -266,7 +266,7 @@ index 2d46176e..47383486 100644
|
|||||||
} else {
|
} else {
|
||||||
GGML_ABORT("%s: unsupported type combination (%s to %s)\n", __func__,
|
GGML_ABORT("%s: unsupported type combination (%s to %s)\n", __func__,
|
||||||
ggml_type_name(src0->type), ggml_type_name(src1->type));
|
ggml_type_name(src0->type), ggml_type_name(src1->type));
|
||||||
@@ -686,6 +733,8 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) {
|
@@ -688,6 +735,8 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) {
|
||||||
return (void*) cpy_f32_f16<cpy_1_f32_f16>;
|
return (void*) cpy_f32_f16<cpy_1_f32_f16>;
|
||||||
} else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) {
|
} else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) {
|
||||||
return (void*) cpy_f32_f16<cpy_1_f16_f32>;
|
return (void*) cpy_f32_f16<cpy_1_f16_f32>;
|
||||||
|
|||||||
@@ -0,0 +1,32 @@
|
|||||||
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Daniel Hiltgen <daniel@ollama.com>
|
||||||
|
Date: Sun, 22 Jun 2025 09:22:05 -0700
|
||||||
|
Subject: [PATCH] temporary prevent rocm+cuda mixed loading
|
||||||
|
|
||||||
|
---
|
||||||
|
ggml/src/ggml-backend-reg.cpp | 12 ++++++++++--
|
||||||
|
1 file changed, 10 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp
|
||||||
|
index 4e67d243..8f49f084 100644
|
||||||
|
--- a/ggml/src/ggml-backend-reg.cpp
|
||||||
|
+++ b/ggml/src/ggml-backend-reg.cpp
|
||||||
|
@@ -573,8 +573,16 @@ void ggml_backend_load_all_from_path(const char * dir_path) {
|
||||||
|
|
||||||
|
ggml_backend_load_best("blas", silent, dir_path);
|
||||||
|
ggml_backend_load_best("cann", silent, dir_path);
|
||||||
|
- ggml_backend_load_best("cuda", silent, dir_path);
|
||||||
|
- ggml_backend_load_best("hip", silent, dir_path);
|
||||||
|
+
|
||||||
|
+ // Avoid mixed hip+cuda configurations
|
||||||
|
+ const char * hip_devices = std::getenv("HIP_VISIBLE_DEVICES");
|
||||||
|
+ const char * rocr_devices = std::getenv("ROCR_VISIBLE_DEVICES");
|
||||||
|
+ if (!hip_devices && !rocr_devices) {
|
||||||
|
+ ggml_backend_load_best("cuda", silent, dir_path);
|
||||||
|
+ } else {
|
||||||
|
+ ggml_backend_load_best("hip", silent, dir_path);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
ggml_backend_load_best("kompute", silent, dir_path);
|
||||||
|
ggml_backend_load_best("metal", silent, dir_path);
|
||||||
|
ggml_backend_load_best("rpc", silent, dir_path);
|
||||||
169
llama/patches/0019-metal-add-mean-kernel-14267.patch
Normal file
169
llama/patches/0019-metal-add-mean-kernel-14267.patch
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Georgi Gerganov <ggerganov@gmail.com>
|
||||||
|
Date: Thu, 19 Jun 2025 08:05:21 +0300
|
||||||
|
Subject: [PATCH] metal : add mean kernel (#14267)
|
||||||
|
|
||||||
|
* metal : add mean kernel
|
||||||
|
|
||||||
|
ggml-ci
|
||||||
|
|
||||||
|
* cont : dedup implementation
|
||||||
|
|
||||||
|
ggml-ci
|
||||||
|
---
|
||||||
|
ggml/src/ggml-metal/ggml-metal.m | 33 ++++++++++++++++---
|
||||||
|
ggml/src/ggml-metal/ggml-metal.metal | 48 ++++++++++++++++++++++------
|
||||||
|
2 files changed, 67 insertions(+), 14 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m
|
||||||
|
index ee4f2dcb..f20f5615 100644
|
||||||
|
--- a/ggml/src/ggml-metal/ggml-metal.m
|
||||||
|
+++ b/ggml/src/ggml-metal/ggml-metal.m
|
||||||
|
@@ -489,6 +489,7 @@ enum ggml_metal_kernel_type {
|
||||||
|
GGML_METAL_KERNEL_TYPE_COS,
|
||||||
|
GGML_METAL_KERNEL_TYPE_NEG,
|
||||||
|
GGML_METAL_KERNEL_TYPE_SUM_ROWS,
|
||||||
|
+ GGML_METAL_KERNEL_TYPE_MEAN,
|
||||||
|
GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32,
|
||||||
|
GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32,
|
||||||
|
GGML_METAL_KERNEL_TYPE_ARGMAX,
|
||||||
|
@@ -1436,6 +1437,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS, cos, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NEG, neg, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true);
|
||||||
|
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MEAN, mean, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX, argmax, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, pool_2d_avg_f32, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, pool_2d_max_f32, true);
|
||||||
|
@@ -1634,6 +1636,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex
|
||||||
|
case GGML_OP_LOG:
|
||||||
|
return false; // TODO: implement
|
||||||
|
case GGML_OP_SUM_ROWS:
|
||||||
|
+ case GGML_OP_MEAN:
|
||||||
|
case GGML_OP_SOFT_MAX:
|
||||||
|
case GGML_OP_GROUP_NORM:
|
||||||
|
return has_simdgroup_reduction && ggml_is_contiguous(op->src[0]);
|
||||||
|
@@ -2362,11 +2365,30 @@ static bool ggml_metal_encode_node(
|
||||||
|
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
|
} break;
|
||||||
|
case GGML_OP_SUM_ROWS:
|
||||||
|
+ case GGML_OP_MEAN:
|
||||||
|
{
|
||||||
|
GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
|
||||||
|
|
||||||
|
- id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline;
|
||||||
|
+ id<MTLComputePipelineState> pipeline = nil;
|
||||||
|
+
|
||||||
|
+ switch (dst->op) {
|
||||||
|
+ case GGML_OP_SUM_ROWS:
|
||||||
|
+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline;
|
||||||
|
+ break;
|
||||||
|
+ case GGML_OP_MEAN:
|
||||||
|
+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MEAN].pipeline;
|
||||||
|
+ break;
|
||||||
|
+ default:
|
||||||
|
+ GGML_ABORT("fatal error");
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ int nth = 32; // SIMD width
|
||||||
|
+
|
||||||
|
+ while (nth < ne00 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) {
|
||||||
|
+ nth *= 2;
|
||||||
|
+ }
|
||||||
|
|
||||||
|
+ nth = MIN(nth, ne00);
|
||||||
|
|
||||||
|
ggml_metal_kargs_sum_rows args = {
|
||||||
|
/*.ne00 =*/ ne00,
|
||||||
|
@@ -2396,11 +2418,12 @@ static bool ggml_metal_encode_node(
|
||||||
|
};
|
||||||
|
|
||||||
|
[encoder setComputePipelineState:pipeline];
|
||||||
|
- [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
|
- [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||||
|
- [encoder setBytes:&args length:sizeof(args) atIndex:2];
|
||||||
|
+ [encoder setBytes:&args length:sizeof(args) atIndex:0];
|
||||||
|
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
|
||||||
|
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
||||||
|
+ [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
|
||||||
|
|
||||||
|
- [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
|
+ [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||||
|
} break;
|
||||||
|
case GGML_OP_SOFT_MAX:
|
||||||
|
{
|
||||||
|
diff --git a/ggml/src/ggml-metal/ggml-metal.metal b/ggml/src/ggml-metal/ggml-metal.metal
|
||||||
|
index 9cfddf45..08e8d807 100644
|
||||||
|
--- a/ggml/src/ggml-metal/ggml-metal.metal
|
||||||
|
+++ b/ggml/src/ggml-metal/ggml-metal.metal
|
||||||
|
@@ -956,31 +956,61 @@ kernel void kernel_neg(
|
||||||
|
dst[tpig] = -src0[tpig];
|
||||||
|
}
|
||||||
|
|
||||||
|
+template <bool norm>
|
||||||
|
kernel void kernel_sum_rows(
|
||||||
|
+ constant ggml_metal_kargs_sum_rows & args,
|
||||||
|
device const float * src0,
|
||||||
|
device float * dst,
|
||||||
|
- constant ggml_metal_kargs_sum_rows & args,
|
||||||
|
- uint3 tpig[[thread_position_in_grid]]) {
|
||||||
|
- int64_t i3 = tpig.z;
|
||||||
|
- int64_t i2 = tpig.y;
|
||||||
|
- int64_t i1 = tpig.x;
|
||||||
|
+ threadgroup float * shmem_f32 [[threadgroup(0)]],
|
||||||
|
+ uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
|
+ ushort3 tpitg[[thread_position_in_threadgroup]],
|
||||||
|
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
|
||||||
|
+ ushort tiisg[[thread_index_in_simdgroup]],
|
||||||
|
+ ushort3 ntg[[threads_per_threadgroup]]) {
|
||||||
|
+ int64_t i3 = tgpig.z;
|
||||||
|
+ int64_t i2 = tgpig.y;
|
||||||
|
+ int64_t i1 = tgpig.x;
|
||||||
|
|
||||||
|
if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ if (sgitg == 0) {
|
||||||
|
+ shmem_f32[tiisg] = 0.0f;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
device const float * src_row = (device const float *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03);
|
||||||
|
device float * dst_row = (device float *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3);
|
||||||
|
|
||||||
|
- float row_sum = 0;
|
||||||
|
+ float sumf = 0;
|
||||||
|
|
||||||
|
- for (int64_t i0 = 0; i0 < args.ne00; i0++) {
|
||||||
|
- row_sum += src_row[i0];
|
||||||
|
+ for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) {
|
||||||
|
+ sumf += src_row[i0];
|
||||||
|
}
|
||||||
|
|
||||||
|
- dst_row[0] = row_sum;
|
||||||
|
+ sumf = simd_sum(sumf);
|
||||||
|
+
|
||||||
|
+ threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
+
|
||||||
|
+ if (tiisg == 0) {
|
||||||
|
+ shmem_f32[sgitg] = sumf;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
+
|
||||||
|
+ sumf = shmem_f32[tiisg];
|
||||||
|
+ sumf = simd_sum(sumf);
|
||||||
|
+
|
||||||
|
+ if (tpitg.x == 0) {
|
||||||
|
+ dst_row[0] = norm ? sumf / args.ne00 : sumf;
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
|
||||||
|
+typedef decltype(kernel_sum_rows<false>) kernel_sum_rows_t;
|
||||||
|
+
|
||||||
|
+template [[host_name("kernel_sum_rows")]] kernel kernel_sum_rows_t kernel_sum_rows<false>;
|
||||||
|
+template [[host_name("kernel_mean")]] kernel kernel_sum_rows_t kernel_sum_rows<true>;
|
||||||
|
+
|
||||||
|
template<typename T>
|
||||||
|
kernel void kernel_soft_max(
|
||||||
|
device const char * src0,
|
||||||
5089
llama/patches/0020-CUDA-add-mean-operation-14313.patch
Normal file
5089
llama/patches/0020-CUDA-add-mean-operation-14313.patch
Normal file
File diff suppressed because it is too large
Load Diff
@@ -151,7 +151,12 @@ func EstimateGPULayers(gpus []discover.GpuInfo, f *ggml.GGML, projectors []strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
if graphPartialOffload == 0 {
|
if graphPartialOffload == 0 {
|
||||||
graphPartialOffload = f.KV().GQA() * kvTotal / 6
|
headsKV := f.KV().HeadCountKVMin()
|
||||||
|
if headsKV == 0 {
|
||||||
|
headsKV = 1
|
||||||
|
}
|
||||||
|
gqa := f.KV().HeadCountMax() / headsKV
|
||||||
|
graphPartialOffload = gqa * kvTotal / 6
|
||||||
}
|
}
|
||||||
if graphFullOffload == 0 {
|
if graphFullOffload == 0 {
|
||||||
graphFullOffload = graphPartialOffload
|
graphFullOffload = graphPartialOffload
|
||||||
|
|||||||
@@ -139,6 +139,13 @@ func NewLlamaServer(gpus discover.GpuInfoList, modelPath string, f *ggml.GGML, a
|
|||||||
gpus = discover.GetCPUInfo()
|
gpus = discover.GetCPUInfo()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verify the requested context size is <= the model training size
|
||||||
|
trainCtx := f.KV().ContextLength()
|
||||||
|
if opts.NumCtx/numParallel > int(trainCtx) && trainCtx > 0 {
|
||||||
|
slog.Warn("requested context size too large for model", "num_ctx", opts.NumCtx, "num_parallel", numParallel, "n_ctx_train", trainCtx)
|
||||||
|
opts.NumCtx = int(trainCtx) * numParallel
|
||||||
|
}
|
||||||
|
|
||||||
estimate := EstimateGPULayers(gpus, f, projectors, opts, numParallel)
|
estimate := EstimateGPULayers(gpus, f, projectors, opts, numParallel)
|
||||||
if len(gpus) > 1 || gpus[0].Library != "cpu" {
|
if len(gpus) > 1 || gpus[0].Library != "cpu" {
|
||||||
switch {
|
switch {
|
||||||
@@ -311,7 +318,7 @@ func NewLlamaServer(gpus discover.GpuInfoList, modelPath string, f *ggml.GGML, a
|
|||||||
params = append(params, "--mmproj", projectors[0])
|
params = append(params, "--mmproj", projectors[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
// iterate through compatible GPU libraries such as 'cuda_v12', 'cuda_v11', 'rocm', etc.
|
// iterate through compatible GPU libraries such as 'cuda_v12', 'rocm', etc.
|
||||||
// adding each library's respective path to the LD_LIBRARY_PATH, until finally running
|
// adding each library's respective path to the LD_LIBRARY_PATH, until finally running
|
||||||
// without any LD_LIBRARY_PATH flags
|
// without any LD_LIBRARY_PATH flags
|
||||||
for {
|
for {
|
||||||
|
|||||||
@@ -253,6 +253,7 @@ type Tensor interface {
|
|||||||
|
|
||||||
Neg(ctx Context) Tensor
|
Neg(ctx Context) Tensor
|
||||||
Add(ctx Context, t2 Tensor) Tensor
|
Add(ctx Context, t2 Tensor) Tensor
|
||||||
|
Sub(ctx Context, t2 Tensor) Tensor
|
||||||
Mul(ctx Context, t2 Tensor) Tensor
|
Mul(ctx Context, t2 Tensor) Tensor
|
||||||
Div(ctx Context, t2 Tensor) Tensor
|
Div(ctx Context, t2 Tensor) Tensor
|
||||||
|
|
||||||
@@ -276,6 +277,7 @@ type Tensor interface {
|
|||||||
Tanh(ctx Context) Tensor
|
Tanh(ctx Context) Tensor
|
||||||
GELU(ctx Context) Tensor
|
GELU(ctx Context) Tensor
|
||||||
SILU(ctx Context) Tensor
|
SILU(ctx Context) Tensor
|
||||||
|
RELU(ctx Context) Tensor
|
||||||
Sigmoid(ctx Context) Tensor
|
Sigmoid(ctx Context) Tensor
|
||||||
|
|
||||||
Reshape(ctx Context, shape ...int) Tensor
|
Reshape(ctx Context, shape ...int) Tensor
|
||||||
@@ -297,6 +299,12 @@ type Tensor interface {
|
|||||||
|
|
||||||
TopK(ctx Context, k int) Tensor
|
TopK(ctx Context, k int) Tensor
|
||||||
Argsort(ctx Context) Tensor
|
Argsort(ctx Context) Tensor
|
||||||
|
Mean(ctx Context) Tensor
|
||||||
|
Variance(ctx Context) Tensor
|
||||||
|
Stddev(ctx Context) Tensor
|
||||||
|
Sqr(ctx Context) Tensor
|
||||||
|
Sqrt(ctx Context) Tensor
|
||||||
|
Clamp(ctx Context, min, max float32) Tensor
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScaledDotProductAttention implements a fused attention
|
// ScaledDotProductAttention implements a fused attention
|
||||||
|
|||||||
@@ -138,7 +138,10 @@ func New(modelPath string, params ml.BackendParams) (ml.Backend, error) {
|
|||||||
requiredMemory.CPU.Name = C.GoString(C.ggml_backend_dev_name(cpuDeviceBufferType.d))
|
requiredMemory.CPU.Name = C.GoString(C.ggml_backend_dev_name(cpuDeviceBufferType.d))
|
||||||
var props C.struct_ggml_backend_dev_props
|
var props C.struct_ggml_backend_dev_props
|
||||||
C.ggml_backend_dev_get_props(cpuDeviceBufferType.d, &props)
|
C.ggml_backend_dev_get_props(cpuDeviceBufferType.d, &props)
|
||||||
requiredMemory.CPU.UUID = C.GoString(props.uuid)
|
|
||||||
|
// Bug #11211: Reporting of UUIDs is temporarily disabled due to causing segfaults
|
||||||
|
// This only affects debug information until the new memory management code is in place
|
||||||
|
// requiredMemory.CPU.UUID = C.GoString(props.uuid)
|
||||||
requiredMemory.CPU.Weights = make([]ml.Memory, blocks+1)
|
requiredMemory.CPU.Weights = make([]ml.Memory, blocks+1)
|
||||||
requiredMemory.CPU.Cache = make([]ml.Memory, blocks+1)
|
requiredMemory.CPU.Cache = make([]ml.Memory, blocks+1)
|
||||||
|
|
||||||
@@ -155,7 +158,7 @@ func New(modelPath string, params ml.BackendParams) (ml.Backend, error) {
|
|||||||
requiredMemory.GPUs[i].Name = C.GoString(C.ggml_backend_dev_name(d))
|
requiredMemory.GPUs[i].Name = C.GoString(C.ggml_backend_dev_name(d))
|
||||||
var props C.struct_ggml_backend_dev_props
|
var props C.struct_ggml_backend_dev_props
|
||||||
C.ggml_backend_dev_get_props(d, &props)
|
C.ggml_backend_dev_get_props(d, &props)
|
||||||
requiredMemory.GPUs[i].UUID = C.GoString(props.uuid)
|
// requiredMemory.GPUs[i].UUID = C.GoString(props.uuid)
|
||||||
requiredMemory.GPUs[i].Weights = make([]ml.Memory, blocks+1)
|
requiredMemory.GPUs[i].Weights = make([]ml.Memory, blocks+1)
|
||||||
requiredMemory.GPUs[i].Cache = make([]ml.Memory, blocks+1)
|
requiredMemory.GPUs[i].Cache = make([]ml.Memory, blocks+1)
|
||||||
}
|
}
|
||||||
@@ -297,7 +300,9 @@ func New(modelPath string, params ml.BackendParams) (ml.Backend, error) {
|
|||||||
if _, ok := meta.Tensors().GroupLayers()["output"]; !ok && t.Name == "token_embd.weight" {
|
if _, ok := meta.Tensors().GroupLayers()["output"]; !ok && t.Name == "token_embd.weight" {
|
||||||
createTensor(tensor{source: t, target: "output.weight"}, output.bts, blocks)
|
createTensor(tensor{source: t, target: "output.weight"}, output.bts, blocks)
|
||||||
}
|
}
|
||||||
case contains(t.Name, "cls", "output", "output_norm"):
|
case contains(t.Name, "cls", "output", "output_norm",
|
||||||
|
"altup_proj", "altup_unembd_proj",
|
||||||
|
"per_layer_token_embd", "per_layer_model_proj", "per_layer_proj_norm"):
|
||||||
createTensor(tensor{source: t}, output.bts, blocks)
|
createTensor(tensor{source: t}, output.bts, blocks)
|
||||||
case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."):
|
case strings.HasPrefix(t.Name, "v.") || strings.HasPrefix(t.Name, "mm."):
|
||||||
// TODO: assign vision tensors to the gpu if possible
|
// TODO: assign vision tensors to the gpu if possible
|
||||||
@@ -602,7 +607,9 @@ func (c *Context) Forward(tensors ...ml.Tensor) ml.Context {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Context) Compute(tensors ...ml.Tensor) {
|
func (c *Context) Compute(tensors ...ml.Tensor) {
|
||||||
C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph)
|
if status := C.ggml_backend_sched_graph_compute_async(c.b.sched, c.graph); status != C.GGML_STATUS_SUCCESS {
|
||||||
|
panic(fmt.Errorf("error computing ggml graph: %v", status))
|
||||||
|
}
|
||||||
C.ggml_backend_sched_reset(c.b.sched)
|
C.ggml_backend_sched_reset(c.b.sched)
|
||||||
|
|
||||||
needSync := true
|
needSync := true
|
||||||
@@ -891,6 +898,13 @@ func (t *Tensor) Add(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Tensor) Sub(ctx ml.Context, t2 ml.Tensor) ml.Tensor {
|
||||||
|
return &Tensor{
|
||||||
|
b: t.b,
|
||||||
|
t: C.ggml_sub(ctx.(*Context).ctx, t.t, t2.(*Tensor).t),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (t *Tensor) Repeat(ctx ml.Context, dim, n int) ml.Tensor {
|
func (t *Tensor) Repeat(ctx ml.Context, dim, n int) ml.Tensor {
|
||||||
if dim < 0 || dim >= C.GGML_MAX_DIMS {
|
if dim < 0 || dim >= C.GGML_MAX_DIMS {
|
||||||
panic("invalid dimension")
|
panic("invalid dimension")
|
||||||
@@ -1198,6 +1212,13 @@ func (t *Tensor) SILU(ctx ml.Context) ml.Tensor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Tensor) RELU(ctx ml.Context) ml.Tensor {
|
||||||
|
return &Tensor{
|
||||||
|
b: t.b,
|
||||||
|
t: C.ggml_relu_inplace(ctx.(*Context).ctx, t.t),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (t *Tensor) Conv2D(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
|
func (t *Tensor) Conv2D(ctx ml.Context, t2 ml.Tensor, s0, s1, p0, p1, d0, d1 int) ml.Tensor {
|
||||||
return &Tensor{
|
return &Tensor{
|
||||||
b: t.b,
|
b: t.b,
|
||||||
@@ -1273,3 +1294,42 @@ func (t *Tensor) Argsort(ctx ml.Context) ml.Tensor {
|
|||||||
t: C.ggml_argsort(ctx.(*Context).ctx, t.t, C.GGML_SORT_ORDER_ASC),
|
t: C.ggml_argsort(ctx.(*Context).ctx, t.t, C.GGML_SORT_ORDER_ASC),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Tensor) Mean(ctx ml.Context) ml.Tensor {
|
||||||
|
return &Tensor{
|
||||||
|
b: t.b,
|
||||||
|
t: C.ggml_mean(ctx.(*Context).ctx, t.t),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tensor) Variance(ctx ml.Context) ml.Tensor {
|
||||||
|
return t.Add(ctx, t.Mean(ctx).Scale(ctx, -1)).
|
||||||
|
Sqr(ctx).
|
||||||
|
SumRows(ctx).
|
||||||
|
Scale(ctx, 1/float64(t.Dim(0)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tensor) Stddev(ctx ml.Context) ml.Tensor {
|
||||||
|
return t.Variance(ctx).Sqrt(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tensor) Sqr(ctx ml.Context) ml.Tensor {
|
||||||
|
return &Tensor{
|
||||||
|
b: t.b,
|
||||||
|
t: C.ggml_sqr(ctx.(*Context).ctx, t.t),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tensor) Sqrt(ctx ml.Context) ml.Tensor {
|
||||||
|
return &Tensor{
|
||||||
|
b: t.b,
|
||||||
|
t: C.ggml_sqrt(ctx.(*Context).ctx, t.t),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tensor) Clamp(ctx ml.Context, min, max float32) ml.Tensor {
|
||||||
|
return &Tensor{
|
||||||
|
b: t.b,
|
||||||
|
t: C.ggml_clamp(ctx.(*Context).ctx, t.t, C.float(min), C.float(max)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -573,8 +573,16 @@ void ggml_backend_load_all_from_path(const char * dir_path) {
|
|||||||
|
|
||||||
ggml_backend_load_best("blas", silent, dir_path);
|
ggml_backend_load_best("blas", silent, dir_path);
|
||||||
ggml_backend_load_best("cann", silent, dir_path);
|
ggml_backend_load_best("cann", silent, dir_path);
|
||||||
|
|
||||||
|
// Avoid mixed hip+cuda configurations
|
||||||
|
const char * hip_devices = std::getenv("HIP_VISIBLE_DEVICES");
|
||||||
|
const char * rocr_devices = std::getenv("ROCR_VISIBLE_DEVICES");
|
||||||
|
if (!hip_devices && !rocr_devices) {
|
||||||
ggml_backend_load_best("cuda", silent, dir_path);
|
ggml_backend_load_best("cuda", silent, dir_path);
|
||||||
|
} else {
|
||||||
ggml_backend_load_best("hip", silent, dir_path);
|
ggml_backend_load_best("hip", silent, dir_path);
|
||||||
|
}
|
||||||
|
|
||||||
ggml_backend_load_best("kompute", silent, dir_path);
|
ggml_backend_load_best("kompute", silent, dir_path);
|
||||||
ggml_backend_load_best("metal", silent, dir_path);
|
ggml_backend_load_best("metal", silent, dir_path);
|
||||||
ggml_backend_load_best("rpc", silent, dir_path);
|
ggml_backend_load_best("rpc", silent, dir_path);
|
||||||
|
|||||||
20
ml/backend/ggml/ggml/src/ggml-cuda/common.cuh
vendored
20
ml/backend/ggml/ggml/src/ggml-cuda/common.cuh
vendored
@@ -362,6 +362,26 @@ static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) {
|
|||||||
#endif // FP16_AVAILABLE
|
#endif // FP16_AVAILABLE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Row reduction kernel template - compute sum (norm=false) or mean (norm=true)
|
||||||
|
template<bool norm>
|
||||||
|
static __global__ void reduce_rows_f32(const float * x, float * dst, const int ncols) {
|
||||||
|
const int row = blockIdx.x;
|
||||||
|
const int col = threadIdx.x;
|
||||||
|
|
||||||
|
float sum = 0.0f;
|
||||||
|
for (int i = col; i < ncols; i += blockDim.x) {
|
||||||
|
sum += x[row * ncols + i];
|
||||||
|
}
|
||||||
|
|
||||||
|
sum = warp_reduce_sum(sum);
|
||||||
|
|
||||||
|
if (col != 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[row] = norm ? sum / ncols : sum;
|
||||||
|
}
|
||||||
|
|
||||||
template<int width = WARP_SIZE>
|
template<int width = WARP_SIZE>
|
||||||
static __device__ __forceinline__ float warp_reduce_max(float x) {
|
static __device__ __forceinline__ float warp_reduce_max(float x) {
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
|
|||||||
@@ -35,6 +35,7 @@
|
|||||||
#include "ggml-cuda/ssm-scan.cuh"
|
#include "ggml-cuda/ssm-scan.cuh"
|
||||||
#include "ggml-cuda/sum.cuh"
|
#include "ggml-cuda/sum.cuh"
|
||||||
#include "ggml-cuda/sumrows.cuh"
|
#include "ggml-cuda/sumrows.cuh"
|
||||||
|
#include "ggml-cuda/mean.cuh"
|
||||||
#include "ggml-cuda/tsembd.cuh"
|
#include "ggml-cuda/tsembd.cuh"
|
||||||
#include "ggml-cuda/unary.cuh"
|
#include "ggml-cuda/unary.cuh"
|
||||||
#include "ggml-cuda/upscale.cuh"
|
#include "ggml-cuda/upscale.cuh"
|
||||||
@@ -2322,6 +2323,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
|||||||
case GGML_OP_SUM_ROWS:
|
case GGML_OP_SUM_ROWS:
|
||||||
ggml_cuda_op_sum_rows(ctx, dst);
|
ggml_cuda_op_sum_rows(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
case GGML_OP_MEAN:
|
||||||
|
ggml_cuda_op_mean(ctx, dst);
|
||||||
|
break;
|
||||||
case GGML_OP_SSM_CONV:
|
case GGML_OP_SSM_CONV:
|
||||||
ggml_cuda_op_ssm_conv(ctx, dst);
|
ggml_cuda_op_ssm_conv(ctx, dst);
|
||||||
break;
|
break;
|
||||||
@@ -3211,6 +3215,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
|
|||||||
case GGML_OP_POOL_2D:
|
case GGML_OP_POOL_2D:
|
||||||
case GGML_OP_SUM:
|
case GGML_OP_SUM:
|
||||||
case GGML_OP_SUM_ROWS:
|
case GGML_OP_SUM_ROWS:
|
||||||
|
case GGML_OP_MEAN:
|
||||||
case GGML_OP_ARGSORT:
|
case GGML_OP_ARGSORT:
|
||||||
case GGML_OP_ACC:
|
case GGML_OP_ACC:
|
||||||
return true;
|
return true;
|
||||||
|
|||||||
19
ml/backend/ggml/ggml/src/ggml-cuda/mean.cu
vendored
Normal file
19
ml/backend/ggml/ggml/src/ggml-cuda/mean.cu
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
#include "mean.cuh"
|
||||||
|
|
||||||
|
void ggml_cuda_op_mean(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
const float * src0_d = (const float *) src0->data;
|
||||||
|
float * dst_d = (float *) dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(dst->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||||
|
|
||||||
|
const int64_t ncols = src0->ne[0];
|
||||||
|
const int64_t nrows = ggml_nrows(src0);
|
||||||
|
|
||||||
|
const dim3 block_dims(WARP_SIZE, 1, 1);
|
||||||
|
const dim3 block_nums(nrows, 1, 1);
|
||||||
|
reduce_rows_f32</*norm*/ true><<<block_nums, block_dims, 0, stream>>>(src0_d, dst_d, ncols);
|
||||||
|
}
|
||||||
3
ml/backend/ggml/ggml/src/ggml-cuda/mean.cuh
vendored
Normal file
3
ml/backend/ggml/ggml/src/ggml-cuda/mean.cuh
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
void ggml_cuda_op_mean(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
23
ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cu
vendored
23
ml/backend/ggml/ggml/src/ggml-cuda/sumrows.cu
vendored
@@ -1,25 +1,9 @@
|
|||||||
#include "sumrows.cuh"
|
#include "sumrows.cuh"
|
||||||
|
|
||||||
static __global__ void k_sum_rows_f32(const float * x, float * dst, const int ncols) {
|
|
||||||
const int row = blockIdx.x;
|
|
||||||
const int col = threadIdx.x;
|
|
||||||
|
|
||||||
float sum = 0.0f;
|
|
||||||
for (int i = col; i < ncols; i += blockDim.x) {
|
|
||||||
sum += x[row * ncols + i];
|
|
||||||
}
|
|
||||||
|
|
||||||
sum = warp_reduce_sum(sum);
|
|
||||||
|
|
||||||
if (col == 0) {
|
|
||||||
dst[row] = sum;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
const dim3 block_dims(WARP_SIZE, 1, 1);
|
const dim3 block_dims(WARP_SIZE, 1, 1);
|
||||||
const dim3 block_nums(nrows, 1, 1);
|
const dim3 block_nums(nrows, 1, 1);
|
||||||
k_sum_rows_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols);
|
reduce_rows_f32</*norm*/false><<<block_nums, block_dims, 0, stream>>>(x, dst, ncols);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
@@ -35,5 +19,8 @@ void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|||||||
const int64_t ncols = src0->ne[0];
|
const int64_t ncols = src0->ne[0];
|
||||||
const int64_t nrows = ggml_nrows(src0);
|
const int64_t nrows = ggml_nrows(src0);
|
||||||
|
|
||||||
sum_rows_f32_cuda(src0_d, dst_d, ncols, nrows, stream);
|
const dim3 block_dims(WARP_SIZE, 1, 1);
|
||||||
|
const dim3 block_nums(nrows, 1, 1);
|
||||||
|
|
||||||
|
reduce_rows_f32</*norm=*/false><<<block_nums, block_dims, 0, stream>>>(src0_d, dst_d, ncols);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
#include "common.cuh"
|
#include "common.cuh"
|
||||||
|
|
||||||
void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream);
|
void sum_rows_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream);
|
||||||
|
|
||||||
void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_sum_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|||||||
@@ -3434,31 +3434,61 @@ kernel void kernel_neg(
|
|||||||
dst[tpig] = -src0[tpig];
|
dst[tpig] = -src0[tpig];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <bool norm>
|
||||||
kernel void kernel_sum_rows(
|
kernel void kernel_sum_rows(
|
||||||
|
constant ggml_metal_kargs_sum_rows & args,
|
||||||
device const float * src0,
|
device const float * src0,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
constant ggml_metal_kargs_sum_rows & args,
|
threadgroup float * shmem_f32 [[threadgroup(0)]],
|
||||||
uint3 tpig[[thread_position_in_grid]]) {
|
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
int64_t i3 = tpig.z;
|
ushort3 tpitg[[thread_position_in_threadgroup]],
|
||||||
int64_t i2 = tpig.y;
|
ushort sgitg[[simdgroup_index_in_threadgroup]],
|
||||||
int64_t i1 = tpig.x;
|
ushort tiisg[[thread_index_in_simdgroup]],
|
||||||
|
ushort3 ntg[[threads_per_threadgroup]]) {
|
||||||
|
int64_t i3 = tgpig.z;
|
||||||
|
int64_t i2 = tgpig.y;
|
||||||
|
int64_t i1 = tgpig.x;
|
||||||
|
|
||||||
if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) {
|
if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (sgitg == 0) {
|
||||||
|
shmem_f32[tiisg] = 0.0f;
|
||||||
|
}
|
||||||
|
|
||||||
device const float * src_row = (device const float *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03);
|
device const float * src_row = (device const float *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03);
|
||||||
device float * dst_row = (device float *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3);
|
device float * dst_row = (device float *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3);
|
||||||
|
|
||||||
float row_sum = 0;
|
float sumf = 0;
|
||||||
|
|
||||||
for (int64_t i0 = 0; i0 < args.ne00; i0++) {
|
for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) {
|
||||||
row_sum += src_row[i0];
|
sumf += src_row[i0];
|
||||||
}
|
}
|
||||||
|
|
||||||
dst_row[0] = row_sum;
|
sumf = simd_sum(sumf);
|
||||||
|
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
|
if (tiisg == 0) {
|
||||||
|
shmem_f32[sgitg] = sumf;
|
||||||
|
}
|
||||||
|
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
|
sumf = shmem_f32[tiisg];
|
||||||
|
sumf = simd_sum(sumf);
|
||||||
|
|
||||||
|
if (tpitg.x == 0) {
|
||||||
|
dst_row[0] = norm ? sumf / args.ne00 : sumf;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef decltype(kernel_sum_rows<false>) kernel_sum_rows_t;
|
||||||
|
|
||||||
|
template [[host_name("kernel_sum_rows")]] kernel kernel_sum_rows_t kernel_sum_rows<false>;
|
||||||
|
template [[host_name("kernel_mean")]] kernel kernel_sum_rows_t kernel_sum_rows<true>;
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
kernel void kernel_soft_max(
|
kernel void kernel_soft_max(
|
||||||
device const char * src0,
|
device const char * src0,
|
||||||
|
|||||||
33
ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m
vendored
33
ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m
vendored
@@ -489,6 +489,7 @@ enum ggml_metal_kernel_type {
|
|||||||
GGML_METAL_KERNEL_TYPE_COS,
|
GGML_METAL_KERNEL_TYPE_COS,
|
||||||
GGML_METAL_KERNEL_TYPE_NEG,
|
GGML_METAL_KERNEL_TYPE_NEG,
|
||||||
GGML_METAL_KERNEL_TYPE_SUM_ROWS,
|
GGML_METAL_KERNEL_TYPE_SUM_ROWS,
|
||||||
|
GGML_METAL_KERNEL_TYPE_MEAN,
|
||||||
GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32,
|
GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32,
|
||||||
GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32,
|
GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32,
|
||||||
GGML_METAL_KERNEL_TYPE_ARGMAX,
|
GGML_METAL_KERNEL_TYPE_ARGMAX,
|
||||||
@@ -1436,6 +1437,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de
|
|||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS, cos, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_COS, cos, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NEG, neg, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NEG, neg, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MEAN, mean, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX, argmax, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGMAX, argmax, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, pool_2d_avg_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_AVG_F32, pool_2d_avg_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, pool_2d_max_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_POOL_2D_MAX_F32, pool_2d_max_f32, true);
|
||||||
@@ -1634,6 +1636,7 @@ static bool ggml_metal_supports_op(const struct ggml_backend_metal_device_contex
|
|||||||
case GGML_OP_LOG:
|
case GGML_OP_LOG:
|
||||||
return false; // TODO: implement
|
return false; // TODO: implement
|
||||||
case GGML_OP_SUM_ROWS:
|
case GGML_OP_SUM_ROWS:
|
||||||
|
case GGML_OP_MEAN:
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
case GGML_OP_GROUP_NORM:
|
case GGML_OP_GROUP_NORM:
|
||||||
return has_simdgroup_reduction && ggml_is_contiguous(op->src[0]);
|
return has_simdgroup_reduction && ggml_is_contiguous(op->src[0]);
|
||||||
@@ -2362,11 +2365,30 @@ static bool ggml_metal_encode_node(
|
|||||||
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_SUM_ROWS:
|
case GGML_OP_SUM_ROWS:
|
||||||
|
case GGML_OP_MEAN:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
|
GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
|
||||||
|
|
||||||
id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline;
|
id<MTLComputePipelineState> pipeline = nil;
|
||||||
|
|
||||||
|
switch (dst->op) {
|
||||||
|
case GGML_OP_SUM_ROWS:
|
||||||
|
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline;
|
||||||
|
break;
|
||||||
|
case GGML_OP_MEAN:
|
||||||
|
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MEAN].pipeline;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ABORT("fatal error");
|
||||||
|
}
|
||||||
|
|
||||||
|
int nth = 32; // SIMD width
|
||||||
|
|
||||||
|
while (nth < ne00 && nth < (int) pipeline.maxTotalThreadsPerThreadgroup) {
|
||||||
|
nth *= 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
nth = MIN(nth, ne00);
|
||||||
|
|
||||||
ggml_metal_kargs_sum_rows args = {
|
ggml_metal_kargs_sum_rows args = {
|
||||||
/*.ne00 =*/ ne00,
|
/*.ne00 =*/ ne00,
|
||||||
@@ -2396,11 +2418,12 @@ static bool ggml_metal_encode_node(
|
|||||||
};
|
};
|
||||||
|
|
||||||
[encoder setComputePipelineState:pipeline];
|
[encoder setComputePipelineState:pipeline];
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBytes:&args length:sizeof(args) atIndex:0];
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
|
||||||
[encoder setBytes:&args length:sizeof(args) atIndex:2];
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
||||||
|
[encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -956,31 +956,61 @@ kernel void kernel_neg(
|
|||||||
dst[tpig] = -src0[tpig];
|
dst[tpig] = -src0[tpig];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <bool norm>
|
||||||
kernel void kernel_sum_rows(
|
kernel void kernel_sum_rows(
|
||||||
|
constant ggml_metal_kargs_sum_rows & args,
|
||||||
device const float * src0,
|
device const float * src0,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
constant ggml_metal_kargs_sum_rows & args,
|
threadgroup float * shmem_f32 [[threadgroup(0)]],
|
||||||
uint3 tpig[[thread_position_in_grid]]) {
|
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
int64_t i3 = tpig.z;
|
ushort3 tpitg[[thread_position_in_threadgroup]],
|
||||||
int64_t i2 = tpig.y;
|
ushort sgitg[[simdgroup_index_in_threadgroup]],
|
||||||
int64_t i1 = tpig.x;
|
ushort tiisg[[thread_index_in_simdgroup]],
|
||||||
|
ushort3 ntg[[threads_per_threadgroup]]) {
|
||||||
|
int64_t i3 = tgpig.z;
|
||||||
|
int64_t i2 = tgpig.y;
|
||||||
|
int64_t i1 = tgpig.x;
|
||||||
|
|
||||||
if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) {
|
if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (sgitg == 0) {
|
||||||
|
shmem_f32[tiisg] = 0.0f;
|
||||||
|
}
|
||||||
|
|
||||||
device const float * src_row = (device const float *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03);
|
device const float * src_row = (device const float *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03);
|
||||||
device float * dst_row = (device float *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3);
|
device float * dst_row = (device float *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3);
|
||||||
|
|
||||||
float row_sum = 0;
|
float sumf = 0;
|
||||||
|
|
||||||
for (int64_t i0 = 0; i0 < args.ne00; i0++) {
|
for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) {
|
||||||
row_sum += src_row[i0];
|
sumf += src_row[i0];
|
||||||
}
|
}
|
||||||
|
|
||||||
dst_row[0] = row_sum;
|
sumf = simd_sum(sumf);
|
||||||
|
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
|
if (tiisg == 0) {
|
||||||
|
shmem_f32[sgitg] = sumf;
|
||||||
|
}
|
||||||
|
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
|
sumf = shmem_f32[tiisg];
|
||||||
|
sumf = simd_sum(sumf);
|
||||||
|
|
||||||
|
if (tpitg.x == 0) {
|
||||||
|
dst_row[0] = norm ? sumf / args.ne00 : sumf;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef decltype(kernel_sum_rows<false>) kernel_sum_rows_t;
|
||||||
|
|
||||||
|
template [[host_name("kernel_sum_rows")]] kernel kernel_sum_rows_t kernel_sum_rows<false>;
|
||||||
|
template [[host_name("kernel_mean")]] kernel kernel_sum_rows_t kernel_sum_rows<true>;
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
kernel void kernel_soft_max(
|
kernel void kernel_soft_max(
|
||||||
device const char * src0,
|
device const char * src0,
|
||||||
|
|||||||
51
model/models/gemma3n/model.go
Normal file
51
model/models/gemma3n/model.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package gemma3n
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ollama/ollama/fs"
|
||||||
|
"github.com/ollama/ollama/kvcache"
|
||||||
|
"github.com/ollama/ollama/ml"
|
||||||
|
"github.com/ollama/ollama/model"
|
||||||
|
"github.com/ollama/ollama/model/input"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Model struct {
|
||||||
|
model.Base
|
||||||
|
model.SentencePieceModel
|
||||||
|
|
||||||
|
*TextModel
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forward implements model.Model.
|
||||||
|
func (m *Model) Forward(ctx ml.Context, batch input.Batch) (ml.Tensor, error) {
|
||||||
|
return m.TextModel.Forward(ctx, batch, m.Cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(c fs.Config) (model.Model, error) {
|
||||||
|
m := Model{
|
||||||
|
TextModel: newTextModel(c),
|
||||||
|
SentencePieceModel: model.NewSentencePieceModel(
|
||||||
|
&model.Vocabulary{
|
||||||
|
Values: c.Strings("tokenizer.ggml.tokens"),
|
||||||
|
Scores: c.Floats("tokenizer.ggml.scores"),
|
||||||
|
Types: c.Ints("tokenizer.ggml.token_type"),
|
||||||
|
AddBOS: c.Bool("tokenizer.ggml.add_bos_token", true),
|
||||||
|
BOS: []int32{int32(c.Uint("tokenizer.ggml.bos_token_id"))},
|
||||||
|
AddEOS: c.Bool("tokenizer.ggml.add_eos_token", false),
|
||||||
|
EOS: append(
|
||||||
|
[]int32{int32(c.Uint("tokenizer.ggml.eos_token_id"))},
|
||||||
|
c.Ints("tokenizer.ggml.eos_token_ids")...,
|
||||||
|
),
|
||||||
|
},
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Cache = kvcache.NewWrapperCache(
|
||||||
|
kvcache.NewCausalCache(m.Shift),
|
||||||
|
kvcache.NewSWACache(int32(c.Uint("attention.sliding_window")), m.Shift),
|
||||||
|
)
|
||||||
|
return &m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
model.Register("gemma3n", New)
|
||||||
|
}
|
||||||
360
model/models/gemma3n/model_text.go
Normal file
360
model/models/gemma3n/model_text.go
Normal file
@@ -0,0 +1,360 @@
|
|||||||
|
package gemma3n
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/fs"
|
||||||
|
"github.com/ollama/ollama/kvcache"
|
||||||
|
"github.com/ollama/ollama/ml"
|
||||||
|
"github.com/ollama/ollama/ml/nn"
|
||||||
|
"github.com/ollama/ollama/ml/nn/fast"
|
||||||
|
"github.com/ollama/ollama/ml/nn/rope"
|
||||||
|
"github.com/ollama/ollama/model/input"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TextModel struct {
|
||||||
|
TokenEmbedding *TextScaledWordEmbedding `gguf:"token_embd"`
|
||||||
|
|
||||||
|
*PerLayerProjector
|
||||||
|
|
||||||
|
AltupEmbd *nn.Linear `gguf:"altup_proj"`
|
||||||
|
AltupUnembd *nn.Linear `gguf:"altup_unembd_proj"`
|
||||||
|
|
||||||
|
TextLayers []TextLayer `gguf:"blk"`
|
||||||
|
OutputNorm *nn.RMSNorm `gguf:"output_norm"`
|
||||||
|
Output *nn.Linear `gguf:"output,alt:token_embd"`
|
||||||
|
|
||||||
|
TextOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TextModel) Forward(ctx ml.Context, batch input.Batch, cache kvcache.Cache) (ml.Tensor, error) {
|
||||||
|
positions := ctx.Input().FromIntSlice(batch.Positions, len(batch.Positions))
|
||||||
|
// Create a tensor of a single float32 value of 1.0 to use for altup correction
|
||||||
|
one := ctx.Input().FromFloatSlice([]float32{1.0}, 1)
|
||||||
|
|
||||||
|
inputs := m.TokenEmbedding.Forward(ctx, batch.Inputs, math.Sqrt(float64(m.hiddenSize)))
|
||||||
|
inputsPerLayer := m.PerLayerProjector.Forward(ctx, batch, inputs, &m.TextOptions)
|
||||||
|
|
||||||
|
targetMagnitude := inputs.Sqr(ctx).Mean(ctx).Sqrt(ctx)
|
||||||
|
targetMagnitude = targetMagnitude.Repeat(ctx, 2, m.altupInputs-1)
|
||||||
|
|
||||||
|
hiddenState := inputs.Repeat(ctx, 2, m.altupInputs-1)
|
||||||
|
altupProj := m.AltupEmbd.Forward(ctx, hiddenState)
|
||||||
|
altupProj = altupProj.Mul(ctx, targetMagnitude.Div(ctx, altupProj.Sqr(ctx).Mean(ctx).Sqrt(ctx)))
|
||||||
|
|
||||||
|
hiddenStates := inputs.Concat(ctx, altupProj, 2)
|
||||||
|
|
||||||
|
firstSharedKeyValue := m.hiddenLayers - m.sharedKeyValueLayers
|
||||||
|
for i, layer := range m.TextLayers {
|
||||||
|
if i < firstSharedKeyValue {
|
||||||
|
cache.SetLayer(i)
|
||||||
|
} else if m.isLocal(i) {
|
||||||
|
cache.SetLayer(firstSharedKeyValue - 2)
|
||||||
|
} else {
|
||||||
|
cache.SetLayer(firstSharedKeyValue - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
var layerType int
|
||||||
|
ropeBase := m.ropeBase
|
||||||
|
if m.isLocal(i) {
|
||||||
|
layerType = 1
|
||||||
|
ropeBase = m.ropeBaseLocal
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.(*kvcache.WrapperCache).SetLayerType(layerType)
|
||||||
|
|
||||||
|
// inputPerLayer = inputsPerLayer[:, i, :]
|
||||||
|
inputPerLayer := inputsPerLayer.View(ctx, i*inputsPerLayer.Stride(1), inputsPerLayer.Dim(0), inputsPerLayer.Stride(2), inputsPerLayer.Dim(2))
|
||||||
|
hiddenStates = layer.Forward(ctx, hiddenStates, inputPerLayer, positions, one, cache, i >= firstSharedKeyValue, ropeBase, float64(m.activationSparsityScale[i]), &m.TextOptions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hiddenStates = hiddenStates[:, :, 0]
|
||||||
|
hiddenStates0 := hiddenStates.View(ctx, 0, hiddenStates.Dim(0), hiddenStates.Stride(1), hiddenStates.Dim(1))
|
||||||
|
targetMagnitude = hiddenStates0.Sqr(ctx).Mean(ctx).Sqrt(ctx)
|
||||||
|
targetMagnitude = targetMagnitude.Repeat(ctx, 2, m.altupInputs-1)
|
||||||
|
|
||||||
|
// hiddenState = hiddenStates[:, :, 1:]
|
||||||
|
hiddenState = hiddenStates.View(ctx, hiddenStates.Stride(2), hiddenStates.Dim(0), hiddenStates.Stride(1), hiddenStates.Dim(1), hiddenStates.Stride(2), m.altupInputs-1)
|
||||||
|
altupUnembdProj := m.AltupUnembd.Forward(ctx, hiddenState)
|
||||||
|
altupUnembdProj = altupUnembdProj.Mul(ctx, targetMagnitude.Div(ctx, altupUnembdProj.Sqr(ctx).Mean(ctx).Sqrt(ctx)))
|
||||||
|
|
||||||
|
hiddenStates = hiddenStates0.Concat(ctx, altupUnembdProj, 2)
|
||||||
|
|
||||||
|
hiddenStates = hiddenStates.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx).Mean(ctx)
|
||||||
|
hiddenStates = hiddenStates.Permute(ctx, 2, 0, 1, 3).Contiguous(ctx)
|
||||||
|
hiddenStates = hiddenStates.Rows(ctx, ctx.Input().FromIntSlice(batch.Outputs, len(batch.Outputs)))
|
||||||
|
|
||||||
|
hiddenStates = m.OutputNorm.Forward(ctx, hiddenStates, m.eps)
|
||||||
|
return m.Output.Forward(ctx, hiddenStates), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TextModel) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor, error) {
|
||||||
|
ropeBase := m.ropeBase
|
||||||
|
if m.isLocal(layer) {
|
||||||
|
ropeBase = m.ropeBaseLocal
|
||||||
|
}
|
||||||
|
|
||||||
|
return fast.RoPE(ctx, key, shift, m.headDim(), ropeBase, m.ropeScale, rope.WithTypeNeoX()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type TextScaledWordEmbedding struct {
|
||||||
|
*nn.Embedding
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e TextScaledWordEmbedding) Forward(ctx ml.Context, inputIDs ml.Tensor, scale float64) ml.Tensor {
|
||||||
|
return e.Embedding.Forward(ctx, inputIDs).Scale(ctx, scale)
|
||||||
|
}
|
||||||
|
|
||||||
|
type PerLayerProjector struct {
|
||||||
|
TokenEmbedding *TextScaledWordEmbedding `gguf:"per_layer_token_embd"`
|
||||||
|
Projector *nn.Linear `gguf:"per_layer_model_proj"`
|
||||||
|
Norm *nn.RMSNorm `gguf:"per_layer_proj_norm"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p PerLayerProjector) Forward(ctx ml.Context, batch input.Batch, inputs ml.Tensor, opts *TextOptions) ml.Tensor {
|
||||||
|
inputsPerLayer := p.TokenEmbedding.Forward(ctx, batch.Inputs, math.Sqrt(float64(opts.hiddenSizePerLayerInput)))
|
||||||
|
inputsPerLayer = inputsPerLayer.Reshape(ctx, opts.hiddenSizePerLayerInput, opts.hiddenLayers, batch.Inputs.Dim(0), batch.Inputs.Dim(1))
|
||||||
|
|
||||||
|
perLayerProjection := p.Projector.Forward(ctx, inputs)
|
||||||
|
perLayerProjection = perLayerProjection.Scale(ctx, math.Sqrt(float64(opts.hiddenSize)))
|
||||||
|
perLayerProjection = perLayerProjection.Reshape(ctx, opts.hiddenSizePerLayerInput, opts.hiddenLayers, inputs.Dim(1))
|
||||||
|
perLayerProjection = p.Norm.Forward(ctx, perLayerProjection, opts.eps)
|
||||||
|
|
||||||
|
if inputsPerLayer != nil {
|
||||||
|
perLayerProjection = perLayerProjection.Add(ctx, inputsPerLayer)
|
||||||
|
perLayerProjection = perLayerProjection.Scale(ctx, 1/math.Sqrt(2))
|
||||||
|
}
|
||||||
|
|
||||||
|
return perLayerProjection
|
||||||
|
}
|
||||||
|
|
||||||
|
type TextLayer struct {
|
||||||
|
*AltUp
|
||||||
|
*Laurel
|
||||||
|
|
||||||
|
AttentionNorm *nn.RMSNorm `gguf:"attn_norm"`
|
||||||
|
Attention *TextAttention
|
||||||
|
PostAttentionNorm *nn.RMSNorm `gguf:"post_attention_norm"`
|
||||||
|
|
||||||
|
MLPNorm *nn.RMSNorm `gguf:"ffn_norm"`
|
||||||
|
MLP *TextMLP
|
||||||
|
PostMLPNorm *nn.RMSNorm `gguf:"post_ffw_norm"`
|
||||||
|
|
||||||
|
PerLayerInputGate *nn.Linear `gguf:"inp_gate"`
|
||||||
|
PerLayerProjection *nn.Linear `gguf:"proj"`
|
||||||
|
PostPerLayerNorm *nn.RMSNorm `gguf:"post_norm"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d TextLayer) Forward(ctx ml.Context, hiddenStates, perLayerInput, positions, one ml.Tensor, cache kvcache.Cache, sharedKV bool, ropeBase float32, activationSparsityScale float64, opts *TextOptions) ml.Tensor {
|
||||||
|
predictions := d.Predict(ctx, hiddenStates, opts)
|
||||||
|
active := opts.altupActive(ctx, predictions)
|
||||||
|
|
||||||
|
attn := d.AttentionNorm.Forward(ctx, active, opts.eps)
|
||||||
|
laurel := d.Laurel.Forward(ctx, attn, opts)
|
||||||
|
|
||||||
|
attn = d.Attention.Forward(ctx, attn, positions, cache, sharedKV, ropeBase, opts)
|
||||||
|
attn = d.PostAttentionNorm.Forward(ctx, attn, opts.eps)
|
||||||
|
attn = active.Add(ctx, attn)
|
||||||
|
attn = attn.Add(ctx, laurel).Scale(ctx, 1/math.Sqrt(2))
|
||||||
|
|
||||||
|
mlp := d.MLPNorm.Forward(ctx, attn, opts.eps)
|
||||||
|
mlp = d.MLP.Forward(ctx, mlp, activationSparsityScale)
|
||||||
|
mlp = d.PostMLPNorm.Forward(ctx, mlp, opts.eps)
|
||||||
|
mlp = attn.Add(ctx, mlp)
|
||||||
|
|
||||||
|
predictions = d.Correct(ctx, predictions, mlp, one, opts)
|
||||||
|
active = opts.altupActive(ctx, predictions)
|
||||||
|
if opts.altupCorrectScale {
|
||||||
|
active = d.ScaleCorrectedOutput(ctx, active)
|
||||||
|
}
|
||||||
|
|
||||||
|
active = d.PerLayerInputGate.Forward(ctx, active)
|
||||||
|
active = active.GELU(ctx)
|
||||||
|
active = active.Mul(ctx, perLayerInput)
|
||||||
|
|
||||||
|
active = d.PerLayerProjection.Forward(ctx, active)
|
||||||
|
active = d.PostPerLayerNorm.Forward(ctx, active, opts.eps)
|
||||||
|
|
||||||
|
// inactive := predictions[:, :, 1:]
|
||||||
|
inactive := predictions.View(ctx, predictions.Stride(2), predictions.Dim(0), predictions.Stride(1), predictions.Dim(1), predictions.Stride(2), predictions.Dim(2)-1)
|
||||||
|
active = inactive.Add(ctx, active)
|
||||||
|
|
||||||
|
predictions0 := predictions.View(ctx, 0, predictions.Dim(0), predictions.Stride(1), predictions.Dim(1))
|
||||||
|
return predictions0.Concat(ctx, active, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
type AltUp struct {
|
||||||
|
CorrectionScale ml.Tensor `gguf:"altup_correct_scale.weight"`
|
||||||
|
PredictionCoefficient *nn.Linear `gguf:"altup_predict_coef"`
|
||||||
|
CorrectionCoefficient *nn.Linear `gguf:"altup_correct_coef"`
|
||||||
|
Router *nn.Linear `gguf:"altup_router"`
|
||||||
|
RouterNorm *nn.RMSNorm `gguf:"altup_router_norm"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a AltUp) computeRouterModalities(ctx ml.Context, hiddenStates ml.Tensor, opts *TextOptions) ml.Tensor {
|
||||||
|
routerInputs := a.RouterNorm.Forward(ctx, hiddenStates, opts.eps).Scale(ctx, 1.0/float64(opts.hiddenSize))
|
||||||
|
return a.Router.Forward(ctx, routerInputs).Tanh(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a AltUp) Predict(ctx ml.Context, hiddenStates ml.Tensor, opts *TextOptions) ml.Tensor {
|
||||||
|
modalities := a.computeRouterModalities(ctx, opts.altupActive(ctx, hiddenStates), opts)
|
||||||
|
|
||||||
|
coefficients := a.PredictionCoefficient.Forward(ctx, modalities)
|
||||||
|
coefficients = coefficients.Reshape(ctx, opts.altupInputs, opts.altupInputs, coefficients.Dim(1), coefficients.Dim(2))
|
||||||
|
|
||||||
|
hiddenStates = hiddenStates.Permute(ctx, 1, 2, 0, 3).Contiguous(ctx)
|
||||||
|
predictions := coefficients.Mulmat(ctx, hiddenStates)
|
||||||
|
predictions = predictions.Add(ctx, hiddenStates)
|
||||||
|
return predictions.Permute(ctx, 2, 0, 1, 3).Contiguous(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a AltUp) Correct(ctx ml.Context, predictions, activated, one ml.Tensor, opts *TextOptions) ml.Tensor {
|
||||||
|
innovation := activated.Sub(ctx, opts.altupActive(ctx, predictions))
|
||||||
|
innovation = innovation.Repeat(ctx, 2, opts.altupInputs)
|
||||||
|
|
||||||
|
modalities := a.computeRouterModalities(ctx, activated, opts)
|
||||||
|
coefficients := a.CorrectionCoefficient.Forward(ctx, modalities)
|
||||||
|
coefficients = coefficients.Add(ctx, one)
|
||||||
|
|
||||||
|
coefficients = coefficients.Reshape(ctx, 1, coefficients.Dim(0), coefficients.Dim(1))
|
||||||
|
coefficients = coefficients.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx)
|
||||||
|
|
||||||
|
corrected := innovation.Mul(ctx, coefficients)
|
||||||
|
corrected = corrected.Add(ctx, predictions)
|
||||||
|
return corrected
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a AltUp) ScaleCorrectedOutput(ctx ml.Context, predictions ml.Tensor) ml.Tensor {
|
||||||
|
return predictions.Mul(ctx, a.CorrectionScale)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Laurel struct {
|
||||||
|
LinearLeft *nn.Linear `gguf:"laurel_l"`
|
||||||
|
LinearRight *nn.Linear `gguf:"laurel_r"`
|
||||||
|
PostLaurelNorm *nn.RMSNorm `gguf:"laurel_post_norm"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l Laurel) Forward(ctx ml.Context, hiddenStates ml.Tensor, opts *TextOptions) ml.Tensor {
|
||||||
|
residual := hiddenStates
|
||||||
|
hiddenStates = l.LinearLeft.Forward(ctx, hiddenStates)
|
||||||
|
hiddenStates = l.LinearRight.Forward(ctx, hiddenStates)
|
||||||
|
hiddenStates = l.PostLaurelNorm.Forward(ctx, hiddenStates, opts.eps)
|
||||||
|
return hiddenStates.Add(ctx, residual)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TextAttention struct {
|
||||||
|
Query *nn.Linear `gguf:"attn_q"`
|
||||||
|
QueryNorm *nn.RMSNorm `gguf:"attn_q_norm"`
|
||||||
|
Key *nn.Linear `gguf:"attn_k"`
|
||||||
|
KeyNorm *nn.RMSNorm `gguf:"attn_k_norm"`
|
||||||
|
Value *nn.Linear `gguf:"attn_v"`
|
||||||
|
Output *nn.Linear `gguf:"attn_output"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (attn TextAttention) Forward(ctx ml.Context, hiddenStates, positions ml.Tensor, cache kvcache.Cache, sharedKV bool, ropeBase float32, opts *TextOptions) ml.Tensor {
|
||||||
|
batchSize := hiddenStates.Dim(1)
|
||||||
|
|
||||||
|
query := attn.Query.Forward(ctx, hiddenStates)
|
||||||
|
query = query.Reshape(ctx, opts.headDim(), opts.numHeads, batchSize)
|
||||||
|
query = attn.QueryNorm.Forward(ctx, query, opts.eps)
|
||||||
|
query = fast.RoPE(ctx, query, positions, opts.headDim(), ropeBase, opts.ropeScale, rope.WithTypeNeoX())
|
||||||
|
|
||||||
|
var key, value ml.Tensor
|
||||||
|
if !sharedKV {
|
||||||
|
key = attn.Key.Forward(ctx, hiddenStates)
|
||||||
|
key = key.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize)
|
||||||
|
key = attn.KeyNorm.Forward(ctx, key, opts.eps)
|
||||||
|
key = fast.RoPE(ctx, key, positions, opts.headDim(), ropeBase, opts.ropeScale, rope.WithTypeNeoX())
|
||||||
|
|
||||||
|
value = attn.Value.Forward(ctx, hiddenStates)
|
||||||
|
value = value.Reshape(ctx, opts.headDim(), opts.numKVHeads, batchSize)
|
||||||
|
value = value.RMSNorm(ctx, nil, opts.eps)
|
||||||
|
}
|
||||||
|
|
||||||
|
attention := nn.Attention(ctx, query, key, value, 1., cache)
|
||||||
|
attention = attention.Reshape(ctx, attention.Dim(0)*attention.Dim(1), batchSize)
|
||||||
|
return attn.Output.Forward(ctx, attention)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TextMLP struct {
|
||||||
|
Gate *nn.Linear `gguf:"ffn_gate"`
|
||||||
|
Up *nn.Linear `gguf:"ffn_up"`
|
||||||
|
Down *nn.Linear `gguf:"ffn_down"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mlp TextMLP) Forward(ctx ml.Context, hiddenStates ml.Tensor, activationSparsityScale float64) ml.Tensor {
|
||||||
|
upStates := mlp.Up.Forward(ctx, hiddenStates)
|
||||||
|
hiddenStates = mlp.Gate.Forward(ctx, hiddenStates)
|
||||||
|
if activationSparsityScale > 0 {
|
||||||
|
mean := hiddenStates.Mean(ctx)
|
||||||
|
std := hiddenStates.Stddev(ctx).Scale(ctx, activationSparsityScale)
|
||||||
|
cutoff := mean.Add(ctx, std)
|
||||||
|
hiddenStates = hiddenStates.Sub(ctx, cutoff).RELU(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
hiddenStates = hiddenStates.GELU(ctx).Mul(ctx, upStates)
|
||||||
|
hiddenStates = mlp.Down.Forward(ctx, hiddenStates)
|
||||||
|
return hiddenStates
|
||||||
|
}
|
||||||
|
|
||||||
|
type TextOptions struct {
|
||||||
|
hiddenLayers int
|
||||||
|
hiddenSize int
|
||||||
|
hiddenSizePerLayerInput int
|
||||||
|
numHeads, numKVHeads int
|
||||||
|
keyLength, valueLength int
|
||||||
|
sharedKeyValueLayers int
|
||||||
|
|
||||||
|
altupActiveIndex int
|
||||||
|
altupInputs int
|
||||||
|
altupCorrectScale bool
|
||||||
|
|
||||||
|
eps float32
|
||||||
|
ropeBase float32
|
||||||
|
ropeBaseLocal float32
|
||||||
|
ropeScale float32
|
||||||
|
|
||||||
|
slidingWindowPattern []bool
|
||||||
|
activationSparsityScale []float32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *TextOptions) altupActive(ctx ml.Context, t ml.Tensor) ml.Tensor {
|
||||||
|
// t[:, :, o.altupActiveIndex]
|
||||||
|
return t.View(ctx, o.altupActiveIndex*t.Stride(2), t.Dim(0), t.Stride(1), t.Dim(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *TextOptions) headDim() int {
|
||||||
|
return cmp.Or(o.keyLength, o.valueLength, o.hiddenSize/o.numHeads)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o *TextOptions) isLocal(i int) bool {
|
||||||
|
return o.slidingWindowPattern[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTextModel(c fs.Config) *TextModel {
|
||||||
|
return &TextModel{
|
||||||
|
TextLayers: make([]TextLayer, c.Uint("block_count")),
|
||||||
|
TextOptions: TextOptions{
|
||||||
|
hiddenLayers: int(c.Uint("block_count")),
|
||||||
|
hiddenSize: int(c.Uint("embedding_length")),
|
||||||
|
hiddenSizePerLayerInput: int(c.Uint("embedding_length_per_layer_input")),
|
||||||
|
numHeads: int(c.Uint("attention.head_count")),
|
||||||
|
numKVHeads: int(c.Uint("attention.head_count_kv")),
|
||||||
|
keyLength: int(c.Uint("attention.key_length")),
|
||||||
|
valueLength: int(c.Uint("attention.value_length")),
|
||||||
|
sharedKeyValueLayers: int(c.Uint("attention.shared_kv_layers")),
|
||||||
|
|
||||||
|
altupActiveIndex: int(c.Uint("altup.active_idx")),
|
||||||
|
altupInputs: int(c.Uint("altup.num_inputs")),
|
||||||
|
|
||||||
|
eps: c.Float("attention.layer_norm_rms_epsilon", 1e-06),
|
||||||
|
ropeBase: c.Float("rope.freq_base", 1_000_000),
|
||||||
|
ropeBaseLocal: c.Float("rope.freq_base_local", 10_000),
|
||||||
|
ropeScale: c.Float("rope.freq_scale", 1.0),
|
||||||
|
|
||||||
|
slidingWindowPattern: c.Bools("attention.sliding_window_pattern"),
|
||||||
|
activationSparsityScale: c.Floats("activation_sparsity_scale"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@ package models
|
|||||||
import (
|
import (
|
||||||
_ "github.com/ollama/ollama/model/models/gemma2"
|
_ "github.com/ollama/ollama/model/models/gemma2"
|
||||||
_ "github.com/ollama/ollama/model/models/gemma3"
|
_ "github.com/ollama/ollama/model/models/gemma3"
|
||||||
|
_ "github.com/ollama/ollama/model/models/gemma3n"
|
||||||
_ "github.com/ollama/ollama/model/models/llama"
|
_ "github.com/ollama/ollama/model/models/llama"
|
||||||
_ "github.com/ollama/ollama/model/models/llama4"
|
_ "github.com/ollama/ollama/model/models/llama4"
|
||||||
_ "github.com/ollama/ollama/model/models/mistral3"
|
_ "github.com/ollama/ollama/model/models/mistral3"
|
||||||
|
|||||||
@@ -27,7 +27,6 @@ function checkEnv() {
|
|||||||
$env:VCToolsRedistDir=(get-item "${MSVC_INSTALL}\VC\Redist\MSVC\*")[0]
|
$env:VCToolsRedistDir=(get-item "${MSVC_INSTALL}\VC\Redist\MSVC\*")[0]
|
||||||
}
|
}
|
||||||
# Locate CUDA versions
|
# Locate CUDA versions
|
||||||
# Note: this assumes every version found will be built
|
|
||||||
$cudaList=(get-item "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v*\bin\" -ea 'silentlycontinue')
|
$cudaList=(get-item "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v*\bin\" -ea 'silentlycontinue')
|
||||||
if ($cudaList.length -eq 0) {
|
if ($cudaList.length -eq 0) {
|
||||||
$d=(get-command -ea 'silentlycontinue' nvcc).path
|
$d=(get-command -ea 'silentlycontinue' nvcc).path
|
||||||
@@ -94,19 +93,6 @@ function buildOllama() {
|
|||||||
|
|
||||||
$hashEnv = @{}
|
$hashEnv = @{}
|
||||||
Get-ChildItem env: | foreach { $hashEnv[$_.Name] = $_.Value }
|
Get-ChildItem env: | foreach { $hashEnv[$_.Name] = $_.Value }
|
||||||
if ("$script:CUDA_DIRS".Contains("v11")) {
|
|
||||||
$hashEnv.Keys | foreach { if ($_.Contains("CUDA_PATH_V11")) { $v11="$_" }}
|
|
||||||
$env:CUDAToolkit_ROOT=$hashEnv[$v11]
|
|
||||||
write-host "Building CUDA v11 backend libraries"
|
|
||||||
# Note: cuda v11 requires msvc 2019 so force the older generator
|
|
||||||
# to avoid 2022 (or newer) from being used as the default
|
|
||||||
& cmake --fresh --preset "CUDA 11" -G "Visual Studio 16 2019" --install-prefix $script:DIST_DIR
|
|
||||||
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
|
||||||
& cmake --build --preset "CUDA 11" --config Release --parallel $script:JOBS
|
|
||||||
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
|
||||||
& cmake --install build --component "CUDA" --strip
|
|
||||||
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
|
||||||
}
|
|
||||||
if ("$script:CUDA_DIRS".Contains("v12")) {
|
if ("$script:CUDA_DIRS".Contains("v12")) {
|
||||||
$hashEnv.Keys | foreach { if ($_.Contains("CUDA_PATH_V12")) { $v12="$_" }}
|
$hashEnv.Keys | foreach { if ($_.Contains("CUDA_PATH_V12")) { $v12="$_" }}
|
||||||
$env:CUDAToolkit_ROOT=$hashEnv[$v12]
|
$env:CUDAToolkit_ROOT=$hashEnv[$v12]
|
||||||
@@ -127,12 +113,17 @@ function buildOllama() {
|
|||||||
$env:HIPCXX="${env:HIP_PATH}\bin\clang++.exe"
|
$env:HIPCXX="${env:HIP_PATH}\bin\clang++.exe"
|
||||||
$env:HIP_PLATFORM="amd"
|
$env:HIP_PLATFORM="amd"
|
||||||
$env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
|
$env:CMAKE_PREFIX_PATH="${env:HIP_PATH}"
|
||||||
& cmake --fresh --preset "ROCm 6" -G Ninja -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ --install-prefix $script:DIST_DIR
|
& cmake --fresh --preset "ROCm 6" -G Ninja `
|
||||||
|
-DCMAKE_C_COMPILER=clang `
|
||||||
|
-DCMAKE_CXX_COMPILER=clang++ `
|
||||||
|
-DCMAKE_C_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma" `
|
||||||
|
-DCMAKE_CXX_FLAGS="-parallel-jobs=4 -Wno-ignored-attributes -Wno-deprecated-pragma" `
|
||||||
|
--install-prefix $script:DIST_DIR
|
||||||
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
||||||
$env:HIPCXX=""
|
$env:HIPCXX=""
|
||||||
$env:HIP_PLATFORM=""
|
$env:HIP_PLATFORM=""
|
||||||
$env:CMAKE_PREFIX_PATH=""
|
$env:CMAKE_PREFIX_PATH=""
|
||||||
& cmake --build --preset "ROCm" --config Release --parallel $script:JOBS
|
& cmake --build --preset "ROCm 6" --config Release --parallel $script:JOBS
|
||||||
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
||||||
& cmake --install build --component "HIP" --strip
|
& cmake --install build --component "HIP" --strip
|
||||||
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
||||||
|
|||||||
@@ -10,9 +10,7 @@ OLLAMA_COMMON_BUILD_ARGS="--build-arg=VERSION \
|
|||||||
--build-arg=GOFLAGS \
|
--build-arg=GOFLAGS \
|
||||||
--build-arg=OLLAMA_CUSTOM_CPU_DEFS \
|
--build-arg=OLLAMA_CUSTOM_CPU_DEFS \
|
||||||
--build-arg=OLLAMA_SKIP_CUDA_GENERATE \
|
--build-arg=OLLAMA_SKIP_CUDA_GENERATE \
|
||||||
--build-arg=OLLAMA_SKIP_CUDA_11_GENERATE \
|
|
||||||
--build-arg=OLLAMA_SKIP_CUDA_12_GENERATE \
|
--build-arg=OLLAMA_SKIP_CUDA_12_GENERATE \
|
||||||
--build-arg=CUDA_V11_ARCHITECTURES \
|
|
||||||
--build-arg=CUDA_V12_ARCHITECTURES \
|
--build-arg=CUDA_V12_ARCHITECTURES \
|
||||||
--build-arg=OLLAMA_SKIP_ROCM_GENERATE \
|
--build-arg=OLLAMA_SKIP_ROCM_GENERATE \
|
||||||
--build-arg=OLLAMA_FAST_BUILD \
|
--build-arg=OLLAMA_FAST_BUILD \
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
"github.com/ollama/ollama/envconfig"
|
"github.com/ollama/ollama/envconfig"
|
||||||
"github.com/ollama/ollama/fs/ggml"
|
"github.com/ollama/ollama/fs/gguf"
|
||||||
"github.com/ollama/ollama/parser"
|
"github.com/ollama/ollama/parser"
|
||||||
"github.com/ollama/ollama/template"
|
"github.com/ollama/ollama/template"
|
||||||
"github.com/ollama/ollama/thinking"
|
"github.com/ollama/ollama/thinking"
|
||||||
@@ -73,23 +73,19 @@ func (m *Model) Capabilities() []model.Capability {
|
|||||||
capabilities := []model.Capability{}
|
capabilities := []model.Capability{}
|
||||||
|
|
||||||
// Check for completion capability
|
// Check for completion capability
|
||||||
r, err := os.Open(m.ModelPath)
|
f, err := gguf.Open(m.ModelPath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
defer r.Close()
|
defer f.Close()
|
||||||
|
|
||||||
f, err := ggml.Decode(r, 1024)
|
if f.KeyValue("pooling_type").Valid() {
|
||||||
if err == nil {
|
|
||||||
if _, ok := f.KV()[fmt.Sprintf("%s.pooling_type", f.KV().Architecture())]; ok {
|
|
||||||
capabilities = append(capabilities, model.CapabilityEmbedding)
|
capabilities = append(capabilities, model.CapabilityEmbedding)
|
||||||
} else {
|
} else {
|
||||||
|
// If no embedding is specified, we assume the model supports completion
|
||||||
capabilities = append(capabilities, model.CapabilityCompletion)
|
capabilities = append(capabilities, model.CapabilityCompletion)
|
||||||
}
|
}
|
||||||
if _, ok := f.KV()[fmt.Sprintf("%s.vision.block_count", f.KV().Architecture())]; ok {
|
if f.KeyValue("vision.block_count").Valid() {
|
||||||
capabilities = append(capabilities, model.CapabilityVision)
|
capabilities = append(capabilities, model.CapabilityVision)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
slog.Error("couldn't decode ggml", "error", err)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
slog.Error("couldn't open model file", "error", err)
|
slog.Error("couldn't open model file", "error", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,123 +1,42 @@
|
|||||||
package server
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/fs/ggml"
|
||||||
"github.com/ollama/ollama/template"
|
"github.com/ollama/ollama/template"
|
||||||
"github.com/ollama/ollama/types/model"
|
"github.com/ollama/ollama/types/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants for GGUF magic bytes and version
|
|
||||||
var (
|
|
||||||
ggufMagic = []byte{0x47, 0x47, 0x55, 0x46} // "GGUF"
|
|
||||||
ggufVer = uint32(3) // Version 3
|
|
||||||
)
|
|
||||||
|
|
||||||
// Helper function to create mock GGUF data
|
|
||||||
func createMockGGUFData(architecture string, vision bool) []byte {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
// Write GGUF header
|
|
||||||
buf.Write(ggufMagic)
|
|
||||||
binary.Write(&buf, binary.LittleEndian, ggufVer)
|
|
||||||
|
|
||||||
// Write tensor count (0 for our test)
|
|
||||||
var numTensors uint64 = 0
|
|
||||||
binary.Write(&buf, binary.LittleEndian, numTensors)
|
|
||||||
|
|
||||||
// Calculate number of metadata entries
|
|
||||||
numMetaEntries := uint64(1) // architecture entry
|
|
||||||
if vision {
|
|
||||||
numMetaEntries++
|
|
||||||
}
|
|
||||||
// Add embedding entry if architecture is "bert"
|
|
||||||
if architecture == "bert" {
|
|
||||||
numMetaEntries++
|
|
||||||
}
|
|
||||||
binary.Write(&buf, binary.LittleEndian, numMetaEntries)
|
|
||||||
|
|
||||||
// Write architecture metadata
|
|
||||||
archKey := "general.architecture"
|
|
||||||
keyLen := uint64(len(archKey))
|
|
||||||
binary.Write(&buf, binary.LittleEndian, keyLen)
|
|
||||||
buf.WriteString(archKey)
|
|
||||||
|
|
||||||
// String type (8)
|
|
||||||
var strType uint32 = 8
|
|
||||||
binary.Write(&buf, binary.LittleEndian, strType)
|
|
||||||
|
|
||||||
// String length
|
|
||||||
strLen := uint64(len(architecture))
|
|
||||||
binary.Write(&buf, binary.LittleEndian, strLen)
|
|
||||||
buf.WriteString(architecture)
|
|
||||||
|
|
||||||
if vision {
|
|
||||||
visionKey := architecture + ".vision.block_count"
|
|
||||||
keyLen = uint64(len(visionKey))
|
|
||||||
binary.Write(&buf, binary.LittleEndian, keyLen)
|
|
||||||
buf.WriteString(visionKey)
|
|
||||||
|
|
||||||
// uint32 type (4)
|
|
||||||
var uint32Type uint32 = 4
|
|
||||||
binary.Write(&buf, binary.LittleEndian, uint32Type)
|
|
||||||
|
|
||||||
// uint32 value (1)
|
|
||||||
var countVal uint32 = 1
|
|
||||||
binary.Write(&buf, binary.LittleEndian, countVal)
|
|
||||||
}
|
|
||||||
// Write embedding metadata if architecture is "bert"
|
|
||||||
if architecture == "bert" {
|
|
||||||
poolKey := architecture + ".pooling_type"
|
|
||||||
keyLen = uint64(len(poolKey))
|
|
||||||
binary.Write(&buf, binary.LittleEndian, keyLen)
|
|
||||||
buf.WriteString(poolKey)
|
|
||||||
|
|
||||||
// uint32 type (4)
|
|
||||||
var uint32Type uint32 = 4
|
|
||||||
binary.Write(&buf, binary.LittleEndian, uint32Type)
|
|
||||||
|
|
||||||
// uint32 value (1)
|
|
||||||
var poolingVal uint32 = 1
|
|
||||||
binary.Write(&buf, binary.LittleEndian, poolingVal)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestModelCapabilities(t *testing.T) {
|
func TestModelCapabilities(t *testing.T) {
|
||||||
// Create a temporary directory for test files
|
// Create completion model (llama architecture without vision)
|
||||||
tempDir := t.TempDir()
|
completionModelPath, _ := createBinFile(t, ggml.KV{
|
||||||
|
"general.architecture": "llama",
|
||||||
|
}, []*ggml.Tensor{})
|
||||||
|
|
||||||
// Create different types of mock model files
|
// Create vision model (llama architecture with vision block count)
|
||||||
completionModelPath := filepath.Join(tempDir, "model.bin")
|
visionModelPath, _ := createBinFile(t, ggml.KV{
|
||||||
visionModelPath := filepath.Join(tempDir, "vision_model.bin")
|
"general.architecture": "llama",
|
||||||
embeddingModelPath := filepath.Join(tempDir, "embedding_model.bin")
|
"llama.vision.block_count": uint32(1),
|
||||||
// Create a simple model file for tests that don't depend on GGUF content
|
}, []*ggml.Tensor{})
|
||||||
simpleModelPath := filepath.Join(tempDir, "simple_model.bin")
|
|
||||||
|
|
||||||
if err := errors.Join(
|
// Create embedding model (bert architecture with pooling type)
|
||||||
os.WriteFile(completionModelPath, createMockGGUFData("llama", false), 0o644),
|
embeddingModelPath, _ := createBinFile(t, ggml.KV{
|
||||||
os.WriteFile(visionModelPath, createMockGGUFData("llama", true), 0o644),
|
"general.architecture": "bert",
|
||||||
os.WriteFile(embeddingModelPath, createMockGGUFData("bert", false), 0o644),
|
"bert.pooling_type": uint32(1),
|
||||||
os.WriteFile(simpleModelPath, []byte("dummy model data"), 0o644),
|
}, []*ggml.Tensor{})
|
||||||
); err != nil {
|
|
||||||
t.Fatalf("Failed to create model files: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
toolsInsertTemplate, err := template.Parse("{{ .prompt }}{{ if .tools }}{{ .tools }}{{ end }}{{ if .suffix }}{{ .suffix }}{{ end }}")
|
toolsInsertTemplate, err := template.Parse("{{ .prompt }}{{ if .tools }}{{ .tools }}{{ end }}{{ if .suffix }}{{ .suffix }}{{ end }}")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse template: %v", err)
|
t.Fatalf("Failed to parse template: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chatTemplate, err := template.Parse("{{ .prompt }}")
|
chatTemplate, err := template.Parse("{{ .prompt }}")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse template: %v", err)
|
t.Fatalf("Failed to parse template: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
toolsTemplate, err := template.Parse("{{ .prompt }}{{ if .tools }}{{ .tools }}{{ end }}")
|
toolsTemplate, err := template.Parse("{{ .prompt }}{{ if .tools }}{{ .tools }}{{ end }}")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse template: %v", err)
|
t.Fatalf("Failed to parse template: %v", err)
|
||||||
@@ -145,21 +64,13 @@ func TestModelCapabilities(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expectedCaps: []model.Capability{model.CapabilityCompletion, model.CapabilityTools, model.CapabilityInsert},
|
expectedCaps: []model.Capability{model.CapabilityCompletion, model.CapabilityTools, model.CapabilityInsert},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "model with tools and insert capability",
|
|
||||||
model: Model{
|
|
||||||
ModelPath: simpleModelPath,
|
|
||||||
Template: toolsInsertTemplate,
|
|
||||||
},
|
|
||||||
expectedCaps: []model.Capability{model.CapabilityTools, model.CapabilityInsert},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "model with tools capability",
|
name: "model with tools capability",
|
||||||
model: Model{
|
model: Model{
|
||||||
ModelPath: simpleModelPath,
|
ModelPath: completionModelPath,
|
||||||
Template: toolsTemplate,
|
Template: toolsTemplate,
|
||||||
},
|
},
|
||||||
expectedCaps: []model.Capability{model.CapabilityTools},
|
expectedCaps: []model.Capability{model.CapabilityCompletion, model.CapabilityTools},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "model with vision capability",
|
name: "model with vision capability",
|
||||||
@@ -224,29 +135,33 @@ func TestModelCapabilities(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestModelCheckCapabilities(t *testing.T) {
|
func TestModelCheckCapabilities(t *testing.T) {
|
||||||
// Create a temporary directory for test files
|
// Create simple model file for tests that don't depend on GGUF content
|
||||||
tempDir := t.TempDir()
|
completionModelPath, _ := createBinFile(t, ggml.KV{
|
||||||
|
"general.architecture": "llama",
|
||||||
|
}, []*ggml.Tensor{})
|
||||||
|
|
||||||
visionModelPath := filepath.Join(tempDir, "vision_model.bin")
|
// Create vision model (llama architecture with vision block count)
|
||||||
simpleModelPath := filepath.Join(tempDir, "model.bin")
|
visionModelPath, _ := createBinFile(t, ggml.KV{
|
||||||
embeddingModelPath := filepath.Join(tempDir, "embedding_model.bin")
|
"general.architecture": "llama",
|
||||||
|
"llama.vision.block_count": uint32(1),
|
||||||
|
}, []*ggml.Tensor{})
|
||||||
|
|
||||||
if err := errors.Join(
|
// Create embedding model (bert architecture with pooling type)
|
||||||
os.WriteFile(simpleModelPath, []byte("dummy model data"), 0o644),
|
embeddingModelPath, _ := createBinFile(t, ggml.KV{
|
||||||
os.WriteFile(visionModelPath, createMockGGUFData("llama", true), 0o644),
|
"general.architecture": "bert",
|
||||||
os.WriteFile(embeddingModelPath, createMockGGUFData("bert", false), 0o644),
|
"bert.pooling_type": uint32(1),
|
||||||
); err != nil {
|
}, []*ggml.Tensor{})
|
||||||
t.Fatalf("Failed to create model files: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
toolsInsertTemplate, err := template.Parse("{{ .prompt }}{{ if .tools }}{{ .tools }}{{ end }}{{ if .suffix }}{{ .suffix }}{{ end }}")
|
toolsInsertTemplate, err := template.Parse("{{ .prompt }}{{ if .tools }}{{ .tools }}{{ end }}{{ if .suffix }}{{ .suffix }}{{ end }}")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse template: %v", err)
|
t.Fatalf("Failed to parse template: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
chatTemplate, err := template.Parse("{{ .prompt }}")
|
chatTemplate, err := template.Parse("{{ .prompt }}")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse template: %v", err)
|
t.Fatalf("Failed to parse template: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
toolsTemplate, err := template.Parse("{{ .prompt }}{{ if .tools }}{{ .tools }}{{ end }}")
|
toolsTemplate, err := template.Parse("{{ .prompt }}{{ if .tools }}{{ .tools }}{{ end }}")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to parse template: %v", err)
|
t.Fatalf("Failed to parse template: %v", err)
|
||||||
@@ -261,7 +176,7 @@ func TestModelCheckCapabilities(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "completion model without tools capability",
|
name: "completion model without tools capability",
|
||||||
model: Model{
|
model: Model{
|
||||||
ModelPath: simpleModelPath,
|
ModelPath: completionModelPath,
|
||||||
Template: chatTemplate,
|
Template: chatTemplate,
|
||||||
},
|
},
|
||||||
checkCaps: []model.Capability{model.CapabilityTools},
|
checkCaps: []model.Capability{model.CapabilityTools},
|
||||||
@@ -270,7 +185,7 @@ func TestModelCheckCapabilities(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "model with all needed capabilities",
|
name: "model with all needed capabilities",
|
||||||
model: Model{
|
model: Model{
|
||||||
ModelPath: simpleModelPath,
|
ModelPath: completionModelPath,
|
||||||
Template: toolsInsertTemplate,
|
Template: toolsInsertTemplate,
|
||||||
},
|
},
|
||||||
checkCaps: []model.Capability{model.CapabilityTools, model.CapabilityInsert},
|
checkCaps: []model.Capability{model.CapabilityTools, model.CapabilityInsert},
|
||||||
@@ -278,7 +193,7 @@ func TestModelCheckCapabilities(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "model missing insert capability",
|
name: "model missing insert capability",
|
||||||
model: Model{
|
model: Model{
|
||||||
ModelPath: simpleModelPath,
|
ModelPath: completionModelPath,
|
||||||
Template: toolsTemplate,
|
Template: toolsTemplate,
|
||||||
},
|
},
|
||||||
checkCaps: []model.Capability{model.CapabilityInsert},
|
checkCaps: []model.Capability{model.CapabilityInsert},
|
||||||
@@ -287,7 +202,7 @@ func TestModelCheckCapabilities(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "model missing vision capability",
|
name: "model missing vision capability",
|
||||||
model: Model{
|
model: Model{
|
||||||
ModelPath: simpleModelPath,
|
ModelPath: completionModelPath,
|
||||||
Template: toolsTemplate,
|
Template: toolsTemplate,
|
||||||
},
|
},
|
||||||
checkCaps: []model.Capability{model.CapabilityVision},
|
checkCaps: []model.Capability{model.CapabilityVision},
|
||||||
@@ -312,7 +227,7 @@ func TestModelCheckCapabilities(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "unknown capability",
|
name: "unknown capability",
|
||||||
model: Model{
|
model: Model{
|
||||||
ModelPath: simpleModelPath,
|
ModelPath: completionModelPath,
|
||||||
Template: chatTemplate,
|
Template: chatTemplate,
|
||||||
},
|
},
|
||||||
checkCaps: []model.Capability{"unknown"},
|
checkCaps: []model.Capability{"unknown"},
|
||||||
|
|||||||
@@ -231,6 +231,8 @@ func newType(t *fsggml.Tensor, kv fsggml.KV, qs *quantizeState, ftype fsggml.Fil
|
|||||||
// do not quantize relative position bias (T5)
|
// do not quantize relative position bias (T5)
|
||||||
quantize = quantize && !strings.Contains(name, "attn_rel_b.weight")
|
quantize = quantize && !strings.Contains(name, "attn_rel_b.weight")
|
||||||
|
|
||||||
|
quantize = quantize && !strings.Contains(name, "per_layer_token_embd.weight")
|
||||||
|
|
||||||
newType := fsggml.TensorType(t.Kind)
|
newType := fsggml.TensorType(t.Kind)
|
||||||
if quantize {
|
if quantize {
|
||||||
// get more optimal quantization type based on the tensor shape, layer, etc.
|
// get more optimal quantization type based on the tensor shape, layer, etc.
|
||||||
|
|||||||
@@ -257,16 +257,8 @@ func TestQuantizeModel(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
f, err := os.CreateTemp(t.TempDir(), tt.name)
|
p, _ := createBinFile(t, tt.kv, tt.tensors)
|
||||||
if err != nil {
|
fp, err := os.Open(p)
|
||||||
t.Fatal(err.Error())
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
err = fsggml.WriteGGUF(f, tt.kv, tt.tensors)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create initial model: %s", err)
|
|
||||||
}
|
|
||||||
fp, err := os.Open(f.Name())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err.Error())
|
t.Fatal(err.Error())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -191,7 +191,7 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Load model for fitting
|
// Load model for fitting
|
||||||
ggml, err := llm.LoadModel(pending.model.ModelPath, 0)
|
ggml, err := llm.LoadModel(pending.model.ModelPath, 1024)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pending.errCh <- err
|
pending.errCh <- err
|
||||||
break
|
break
|
||||||
|
|||||||
@@ -112,11 +112,7 @@ func newScenarioRequest(t *testing.T, ctx context.Context, modelName string, est
|
|||||||
b.ctx, b.ctxDone = context.WithCancel(ctx)
|
b.ctx, b.ctxDone = context.WithCancel(ctx)
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
f, err := os.CreateTemp(t.TempDir(), modelName)
|
p, _ := createBinFile(t, ggml.KV{
|
||||||
require.NoError(t, err)
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
require.NoError(t, ggml.WriteGGUF(f, ggml.KV{
|
|
||||||
"general.architecture": "llama",
|
"general.architecture": "llama",
|
||||||
"llama.context_length": uint32(32),
|
"llama.context_length": uint32(32),
|
||||||
"llama.embedding_length": uint32(4096),
|
"llama.embedding_length": uint32(4096),
|
||||||
@@ -129,14 +125,14 @@ func newScenarioRequest(t *testing.T, ctx context.Context, modelName string, est
|
|||||||
}, []*ggml.Tensor{
|
}, []*ggml.Tensor{
|
||||||
{Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
{Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||||
{Name: "output.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
{Name: "output.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||||
}))
|
})
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
fname := f.Name()
|
|
||||||
model := &Model{Name: modelName, ModelPath: fname}
|
|
||||||
b.f, err = llm.LoadModel(model.ModelPath, 0)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
|
model := &Model{Name: modelName, ModelPath: p}
|
||||||
|
f, err := llm.LoadModel(model.ModelPath, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
b.f = f
|
||||||
if duration == nil {
|
if duration == nil {
|
||||||
duration = &api.Duration{Duration: 5 * time.Millisecond}
|
duration = &api.Duration{Duration: 5 * time.Millisecond}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user