From 784bf88b0d0005b771e1bab5adfd6094a3693494 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 18 Jun 2024 16:22:47 -0700 Subject: [PATCH 01/17] Wire up windows AMD driver reporting This seems to be ROCm version, not actually driver version, but it may be useful for toggling logic for VRAM reporting in the future --- gpu/amd_hip_windows.go | 5 ++--- gpu/amd_windows.go | 17 +++++++---------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/gpu/amd_hip_windows.go b/gpu/amd_hip_windows.go index 8572a24c..2586278c 100644 --- a/gpu/amd_hip_windows.go +++ b/gpu/amd_hip_windows.go @@ -84,9 +84,8 @@ func (hl *HipLib) AMDDriverVersion() (driverMajor, driverMinor int, err error) { } slog.Debug("hipDriverGetVersion", "version", version) - // TODO - this isn't actually right, but the docs claim hipDriverGetVersion isn't accurate anyway... - driverMajor = version / 1000 - driverMinor = (version - (driverMajor * 1000)) / 10 + driverMajor = version / 10000000 + driverMinor = (version - (driverMajor * 10000000)) / 100000 return driverMajor, driverMinor, nil } diff --git a/gpu/amd_windows.go b/gpu/amd_windows.go index 21585277..0c76f6b9 100644 --- a/gpu/amd_windows.go +++ b/gpu/amd_windows.go @@ -35,12 +35,11 @@ func AMDGetGPUInfo() []RocmGPUInfo { } defer hl.Release() - // TODO - this reports incorrect version information, so omitting for now - // driverMajor, driverMinor, err := hl.AMDDriverVersion() - // if err != nil { - // // For now this is benign, but we may eventually need to fail compatibility checks - // slog.Debug("error looking up amd driver version", "error", err) - // } + driverMajor, driverMinor, err := hl.AMDDriverVersion() + if err != nil { + // For now this is benign, but we may eventually need to fail compatibility checks + slog.Debug("error looking up amd driver version", "error", err) + } // Note: the HIP library automatically handles subsetting to any HIP_VISIBLE_DEVICES the user specified count := hl.HipGetDeviceCount() @@ -131,10 +130,8 @@ func AMDGetGPUInfo() []RocmGPUInfo { MinimumMemory: rocmMinimumMemory, Name: name, Compute: gfx, - - // TODO - this information isn't accurate on windows, so don't report it until we find the right way to retrieve - // DriverMajor: driverMajor, - // DriverMinor: driverMinor, + DriverMajor: driverMajor, + DriverMinor: driverMinor, }, index: i, } From b44320db1302baea88e2f318d984218c68faa5f1 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Mon, 8 Jul 2024 18:24:21 -0700 Subject: [PATCH 02/17] Bundle missing CRT libraries Some users are experienging runner startup errors due to not having these msvc redist libraries on their host --- scripts/build_windows.ps1 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/build_windows.ps1 b/scripts/build_windows.ps1 index b3991ce1..edc73759 100644 --- a/scripts/build_windows.ps1 +++ b/scripts/build_windows.ps1 @@ -107,9 +107,12 @@ function gatherDependencies() { # TODO - this varies based on host build system and MSVC version - drive from dumpbin output # currently works for Win11 + MSVC 2019 + Cuda V11 - cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\msvcp140.dll" "${script:DEPS_DIR}\ollama_runners\" + cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\msvcp140*.dll" "${script:DEPS_DIR}\ollama_runners\" cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140.dll" "${script:DEPS_DIR}\ollama_runners\" cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140_1.dll" "${script:DEPS_DIR}\ollama_runners\" + foreach ($part in $("runtime", "stdio", "filesystem", "math", "convert", "heap", "string", "time", "locale", "environment")) { + cp "$env:VCToolsRedistDir\..\..\..\Tools\Llvm\x64\bin\api-ms-win-crt-${part}*.dll" "${script:DEPS_DIR}\ollama_runners\" + } cp "${script:SRC_DIR}\app\ollama_welcome.ps1" "${script:SRC_DIR}\dist\" From f6f759fc5fb4868125b8a25c28ce96d2c0980ef7 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 9 Jul 2024 10:27:53 -0700 Subject: [PATCH 03/17] Detect CUDA OS Overhead This adds logic to detect skew between the driver and management library which can be attributed to OS overhead and records that so we can adjust subsequent management library free VRAM updates and avoid OOM scenarios. --- gpu/gpu.go | 27 +++++++++++++++++++++++++++ gpu/types.go | 3 ++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/gpu/gpu.go b/gpu/gpu.go index 29a3c103..58144991 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -274,6 +274,28 @@ func GetGPUInfo() GpuInfoList { gpuInfo.DriverMajor = driverMajor gpuInfo.DriverMinor = driverMinor + // query the management library as well so we can record any skew between the two + // which represents overhead on the GPU we must set aside on subsequent updates + if cHandles.nvml != nil { + C.nvml_get_free(*cHandles.nvml, C.int(gpuInfo.index), &memInfo.free, &memInfo.total, &memInfo.used) + if memInfo.err != nil { + slog.Warn("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err)) + C.free(unsafe.Pointer(memInfo.err)) + } else { + if memInfo.free != 0 && uint64(memInfo.free) > gpuInfo.FreeMemory { + gpuInfo.OSOverhead = uint64(memInfo.free) - gpuInfo.FreeMemory + slog.Info("detected OS VRAM overhead", + "id", gpuInfo.ID, + "library", gpuInfo.Library, + "compute", gpuInfo.Compute, + "driver", fmt.Sprintf("%d.%d", gpuInfo.DriverMajor, gpuInfo.DriverMinor), + "name", gpuInfo.Name, + "overhead", format.HumanBytes2(gpuInfo.OSOverhead), + ) + } + } + } + // TODO potentially sort on our own algorithm instead of what the underlying GPU library does... cudaGPUs = append(cudaGPUs, gpuInfo) } @@ -374,9 +396,14 @@ func GetGPUInfo() GpuInfoList { slog.Warn("error looking up nvidia GPU memory") continue } + if cHandles.nvml != nil && gpu.OSOverhead > 0 { + // When using the management library update based on recorded overhead + memInfo.free -= C.uint64_t(gpu.OSOverhead) + } slog.Debug("updating cuda memory data", "gpu", gpu.ID, "name", gpu.Name, + "overhead", format.HumanBytes2(gpu.OSOverhead), slog.Group( "before", "total", format.HumanBytes2(gpu.TotalMemory), diff --git a/gpu/types.go b/gpu/types.go index 2eaa9bae..7a7749b8 100644 --- a/gpu/types.go +++ b/gpu/types.go @@ -52,7 +52,8 @@ type CPUInfo struct { type CudaGPUInfo struct { GpuInfo - index int //nolint:unused,nolintlint + OSOverhead uint64 // Memory overhead between the driver library and management library + index int //nolint:unused,nolintlint } type CudaGPUInfoList []CudaGPUInfo From 22c81f62ec845bd8f77215ae5599be14117ec8db Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 10 Jul 2024 09:01:33 -0700 Subject: [PATCH 04/17] Remove duplicate merge glitch --- llm/server.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/llm/server.go b/llm/server.go index 08dc04d5..aa504d19 100644 --- a/llm/server.go +++ b/llm/server.go @@ -254,10 +254,6 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr params = append(params, "--tensor-split", estimate.TensorSplit) } - if estimate.TensorSplit != "" { - params = append(params, "--tensor-split", estimate.TensorSplit) - } - for i := range len(servers) { dir := availableServers[servers[i]] if dir == "" { From 1f50356e8e3c3a2956c5ffacc3b9fa33b8285541 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 10 Jul 2024 11:01:22 -0700 Subject: [PATCH 05/17] Bump ROCm on windows to 6.1.2 This also adjusts our algorithm to favor our bundled ROCm. I've confirmed VRAM reporting still doesn't work properly so we can't yet enable concurrency by default. --- .github/workflows/release.yaml | 2 +- .github/workflows/test.yaml | 2 +- docs/faq.md | 2 +- gpu/amd_common.go | 23 +++++++++++------------ gpu/amd_windows.go | 4 ++-- llm/generate/gen_windows.ps1 | 12 +----------- 6 files changed, 17 insertions(+), 28 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 61ca3c43..5ae630c3 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -147,7 +147,7 @@ jobs: run: | $ErrorActionPreference = "Stop" write-host "downloading AMD HIP Installer" - Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" + Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" write-host "Installing AMD HIP" Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait write-host "Completed AMD HIP" diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 13d1c957..977d8da1 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -169,7 +169,7 @@ jobs: run: | $ErrorActionPreference = "Stop" write-host "downloading AMD HIP Installer" - Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" + Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" write-host "Installing AMD HIP" Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait write-host "Completed AMD HIP" diff --git a/docs/faq.md b/docs/faq.md index 57411246..da1848f7 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -272,4 +272,4 @@ The following server settings may be used to adjust how Ollama handles concurren - `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default will auto-select either 4 or 1 based on available memory. - `OLLAMA_MAX_QUEUE` - The maximum number of requests Ollama will queue when busy before rejecting additional requests. The default is 512 -Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM. \ No newline at end of file +Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6.2 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM. \ No newline at end of file diff --git a/gpu/amd_common.go b/gpu/amd_common.go index 27a81e3f..7d1cab7c 100644 --- a/gpu/amd_common.go +++ b/gpu/amd_common.go @@ -49,9 +49,17 @@ func rocmGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) { } func commonAMDValidateLibDir() (string, error) { - // We try to favor system paths first, so that we can wire up the subprocess to use - // the system version. Only use our bundled version if the system version doesn't work - // This gives users a more recovery options if versions have subtle problems at runtime + // Favor our bundled version + + // Installer payload location if we're running the installed binary + exe, err := os.Executable() + if err == nil { + rocmTargetDir := filepath.Join(filepath.Dir(exe), "rocm") + if rocmLibUsable(rocmTargetDir) { + slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir) + return rocmTargetDir, nil + } + } // Prefer explicit HIP env var hipPath := os.Getenv("HIP_PATH") @@ -87,14 +95,5 @@ func commonAMDValidateLibDir() (string, error) { } } - // Installer payload location if we're running the installed binary - exe, err := os.Executable() - if err == nil { - rocmTargetDir := filepath.Join(filepath.Dir(exe), "rocm") - if rocmLibUsable(rocmTargetDir) { - slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir) - return rocmTargetDir, nil - } - } return "", fmt.Errorf("no suitable rocm found, falling back to CPU") } diff --git a/gpu/amd_windows.go b/gpu/amd_windows.go index 8b6fabeb..5d09be8b 100644 --- a/gpu/amd_windows.go +++ b/gpu/amd_windows.go @@ -22,8 +22,8 @@ const ( var ( // Used to validate if the given ROCm lib is usable - ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // TODO - probably include more coverage of files here... - RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\5.7\\bin"} // TODO glob? + ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // This is not sufficient to discern v5 vs v6 + RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\6.1\\bin"} // TODO glob? ) func AMDGetGPUInfo() []RocmGPUInfo { diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index 26bc4fa3..beb964f9 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -6,18 +6,9 @@ function amdGPUs { if ($env:AMDGPU_TARGETS) { return $env:AMDGPU_TARGETS } - # TODO - load from some common data file for linux + windows build consistency + # Current supported rocblas list from ROCm v6.1.2 on windows $GPU_LIST = @( - "gfx900" "gfx906:xnack-" - "gfx908:xnack-" - "gfx90a:xnack+" - "gfx90a:xnack-" - "gfx940" - "gfx941" - "gfx942" - "gfx1010" - "gfx1012" "gfx1030" "gfx1100" "gfx1101" @@ -395,7 +386,6 @@ function build_rocm() { sign install - # Assumes v5.7, may need adjustments for v6 rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\" md "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\rocblas\library\" -ea 0 > $null cp "${env:HIP_PATH}\bin\hipblas.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\" From 4e262eb2a8aaee31e228febc216c2a83a9a7e4d8 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Wed, 10 Jul 2024 13:17:13 -0700 Subject: [PATCH 06/17] remove `GGML_CUDA_FORCE_MMQ=on` from build (#5588) --- llm/generate/gen_linux.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index 304eadbd..5589f1ea 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -178,7 +178,7 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}" echo "Building custom CUDA GPU" else - CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DGGML_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat" + CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat" fi CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}" From 5a739ff4cb27f7804903adfb674f8a1e197ea86f Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 10 Jul 2024 13:18:04 -0700 Subject: [PATCH 07/17] chatglm graph --- llm/ggml.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/llm/ggml.go b/llm/ggml.go index cfead450..fddb5039 100644 --- a/llm/ggml.go +++ b/llm/ggml.go @@ -424,6 +424,32 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui 4*batch*(3*embedding+vocab)+embedding*vocab*105/128, 4*batch*(2*embedding+1+2*embeddingHeadsK*headsKV+context+context*headsKV)+4*embeddingHeadsK*context*headsKV+embedding*embeddingHeadsK*headsKV*9/16, ) + case "chatglm": + fullOffload = 4 * batch * (embedding + vocab) + partialOffload = 4*batch*(embedding+vocab) + embedding*vocab*105/128 + if qkvBias, ok := layers["blk.0"]["attn_qkv.bias"]; ok { + fullOffload = max( + fullOffload, + 4*batch*(2+ + 2*embedding+ + context+ + context*heads+ + embeddingHeadsK*heads+ + qkvBias.Shape[0]), + ) + + partialOffload = max( + partialOffload, + 4*batch*(1+ + 2*embedding+ + embeddingHeadsK*heads+ + context+ + context*heads)+ + 4*embeddingHeadsK*context+ + 4*context*embeddingHeadsK+ + 4*qkvBias.Shape[0], + ) + } } return From 41be28096aa597ded1ef91774ba3e6dfc0a8ccbb Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 10 Jul 2024 11:00:07 -0700 Subject: [PATCH 08/17] add system prompt to first legacy template --- server/prompt_test.go | 2 +- server/routes_create_test.go | 4 +- template/template.go | 101 +++++++++++++++++++++++++++++++---- template/template_test.go | 61 ++++++++++++++++----- 4 files changed, 140 insertions(+), 28 deletions(-) diff --git a/server/prompt_test.go b/server/prompt_test.go index d4cee98c..1435b143 100644 --- a/server/prompt_test.go +++ b/server/prompt_test.go @@ -161,7 +161,7 @@ func TestChatPrompt(t *testing.T) { {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, }, expect: expect{ - prompt: "You're a test, Harry! I-I'm a what? You are the Test Who Lived. A test. And a thumping good one at that, I'd wager. ", + prompt: "You are the Test Who Lived. You're a test, Harry! I-I'm a what? A test. And a thumping good one at that, I'd wager. ", }, }, } diff --git a/server/routes_create_test.go b/server/routes_create_test.go index 269a0ba1..40477937 100644 --- a/server/routes_create_test.go +++ b/server/routes_create_test.go @@ -546,8 +546,8 @@ func TestCreateDetectTemplate(t *testing.T) { checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-553c4a3f747b3d22a4946875f1cc8ed011c2930d83f864a0c7265f9ec0a20413"), - filepath.Join(p, "blobs", "sha256-9512c372dfc7d84d6065b8dd2b601aeed8cc1a78e7a7aa784a42fff37f5524b7"), - filepath.Join(p, "blobs", "sha256-b8b78cb8c6eefd14c06f1af042e6161255bf87bbf2dd14fce57cdac893db8139"), + filepath.Join(p, "blobs", "sha256-68b0323b2f21572bc09ba07554b16b379a5713ee48ef8c25a7661a1f71cfce77"), + filepath.Join(p, "blobs", "sha256-eb72fb7c550ee1f1dec4039bd65382acecf5f7536a30fb7ccace39a8d0cb590b"), }) }) diff --git a/template/template.go b/template/template.go index b133b97e..0b8f2434 100644 --- a/template/template.go +++ b/template/template.go @@ -143,11 +143,14 @@ func (t *Template) Vars() []string { type Values struct { Messages []api.Message + + // forceLegacy is a flag used to test compatibility with legacy templates + forceLegacy bool } func (t *Template) Execute(w io.Writer, v Values) error { system, collated := collate(v.Messages) - if slices.Contains(t.Vars(), "messages") { + if !v.forceLegacy && slices.Contains(t.Vars(), "messages") { return t.Template.Execute(w, map[string]any{ "System": system, "Messages": collated, @@ -157,15 +160,19 @@ func (t *Template) Execute(w io.Writer, v Values) error { var b bytes.Buffer var prompt, response string for i, m := range collated { - if m.Role == "user" { + switch m.Role { + case "user": prompt = m.Content - } else { + if i != 0 { + system = "" + } + case "assistant": response = m.Content } if i != len(collated)-1 && prompt != "" && response != "" { if err := t.Template.Execute(&b, map[string]any{ - "System": "", + "System": system, "Prompt": prompt, "Response": response, }); err != nil { @@ -178,18 +185,21 @@ func (t *Template) Execute(w io.Writer, v Values) error { } var cut bool - tree := t.Template.Copy() - // for the last message, cut everything after "{{ .Response }}" - tree.Root.Nodes = slices.DeleteFunc(tree.Root.Nodes, func(n parse.Node) bool { - if slices.Contains(parseNode(n), "Response") { - cut = true + nodes := deleteNode(t.Template.Root.Copy(), func(n parse.Node) bool { + switch t := n.(type) { + case *parse.ActionNode: + case *parse.FieldNode: + if slices.Contains(t.Ident, "Response") { + cut = true + } } return cut }) - if err := template.Must(template.New("").AddParseTree("", tree)).Execute(&b, map[string]any{ - "System": system, + tree := parse.Tree{Root: nodes.(*parse.ListNode)} + if err := template.Must(template.New("").AddParseTree("", &tree)).Execute(&b, map[string]any{ + "System": "", "Prompt": prompt, }); err != nil { return err @@ -286,3 +296,72 @@ func parseNode(n parse.Node) []string { return nil } + +// deleteNode walks the node list and deletes nodes that match the predicate +// this is currently to remove the {{ .Response }} node from templates +func deleteNode(n parse.Node, fn func(parse.Node) bool) parse.Node { + var walk func(n parse.Node) parse.Node + walk = func(n parse.Node) parse.Node { + if fn(n) { + return nil + } + + switch t := n.(type) { + case *parse.ListNode: + var nodes []parse.Node + for _, c := range t.Nodes { + if n := walk(c); n != nil { + nodes = append(nodes, n) + } + } + + t.Nodes = nodes + return t + case *parse.IfNode: + t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) + case *parse.WithNode: + t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) + case *parse.RangeNode: + t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) + case *parse.BranchNode: + t.List = walk(t.List).(*parse.ListNode) + if t.ElseList != nil { + t.ElseList = walk(t.ElseList).(*parse.ListNode) + } + case *parse.ActionNode: + n := walk(t.Pipe) + if n == nil { + return nil + } + + t.Pipe = n.(*parse.PipeNode) + case *parse.PipeNode: + var commands []*parse.CommandNode + for _, c := range t.Cmds { + var args []parse.Node + for _, a := range c.Args { + if n := walk(a); n != nil { + args = append(args, n) + } + } + + if len(args) == 0 { + return nil + } + + c.Args = args + commands = append(commands, c) + } + + if len(commands) == 0 { + return nil + } + + t.Cmds = commands + } + + return n + } + + return walk(n) +} diff --git a/template/template_test.go b/template/template_test.go index 428cdc77..e702a186 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -105,8 +105,8 @@ func TestTemplate(t *testing.T) { } for n, tt := range cases { + var actual bytes.Buffer t.Run(n, func(t *testing.T) { - var actual bytes.Buffer if err := tmpl.Execute(&actual, Values{Messages: tt}); err != nil { t.Fatal(err) } @@ -120,6 +120,25 @@ func TestTemplate(t *testing.T) { t.Errorf("mismatch (-got +want):\n%s", diff) } }) + + t.Run("legacy", func(t *testing.T) { + var legacy bytes.Buffer + if err := tmpl.Execute(&legacy, Values{Messages: tt, forceLegacy: true}); err != nil { + t.Fatal(err) + } + + legacyBytes := legacy.Bytes() + if slices.Contains([]string{"chatqa.gotmpl", "openchat.gotmpl", "vicuna.gotmpl"}, match) && legacyBytes[len(legacyBytes)-1] == ' ' { + t.Log("removing trailing space from legacy output") + legacyBytes = legacyBytes[:len(legacyBytes)-1] + } else if slices.Contains([]string{"codellama-70b-instruct.gotmpl", "llama2-chat.gotmpl", "mistral-instruct.gotmpl"}, match) { + t.Skip("legacy outputs cannot be compared to messages outputs") + } + + if diff := cmp.Diff(legacyBytes, actual.Bytes()); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) + } + }) } }) } @@ -136,6 +155,21 @@ func TestParse(t *testing.T) { {"{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", []string{"prompt", "response", "system", "tools"}}, {"{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}", []string{"content", "messages", "role"}}, {"{{ range .Messages }}{{ if eq .Role \"system\" }}SYSTEM: {{ .Content }}{{ else if eq .Role \"user\" }}USER: {{ .Content }}{{ else if eq .Role \"assistant\" }}ASSISTANT: {{ .Content }}{{ end }}{{ end }}", []string{"content", "messages", "role"}}, + {`{{- if .Messages }} +{{- if .System }}<|im_start|>system +{{ .System }}<|im_end|> +{{ end }} +{{- range .Messages }}<|im_start|>{{ .Role }} +{{ .Content }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ else -}} +{{ if .System }}<|im_start|>system +{{ .System }}<|im_end|> +{{ end }}{{ if .Prompt }}<|im_start|>user +{{ .Prompt }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ .Response }}<|im_end|> +{{- end -}}`, []string{"content", "messages", "prompt", "response", "role", "system"}}, } for _, tt := range cases { @@ -145,9 +179,8 @@ func TestParse(t *testing.T) { t.Fatal(err) } - vars := tmpl.Vars() - if !slices.Equal(tt.vars, vars) { - t.Errorf("expected %v, got %v", tt.vars, vars) + if diff := cmp.Diff(tmpl.Vars(), tt.vars); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) } }) } @@ -170,7 +203,7 @@ func TestExecuteWithMessages(t *testing.T) { {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, {"messages", `{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq (len (slice $.Messages $index)) 1) $.System }}{{ $.System }}{{ "\n\n" }} +{{- if eq .Role "user" }}[INST] {{ if and (eq $index 0) $.System }}{{ $.System }}{{ "\n\n" }} {{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, @@ -191,7 +224,7 @@ func TestExecuteWithMessages(t *testing.T) { {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, {"messages", ` {{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq (len (slice $.Messages $index)) 1) $.System }}{{ $.System }}{{ "\n\n" }} +{{- if eq .Role "user" }}[INST] {{ if and (eq $index 0) $.System }}{{ $.System }}{{ "\n\n" }} {{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, @@ -204,9 +237,9 @@ func TestExecuteWithMessages(t *testing.T) { {Role: "user", Content: "What is your name?"}, }, }, - `[INST] Hello friend![/INST] Hello human![INST] You are a helpful assistant! + `[INST] You are a helpful assistant! -What is your name?[/INST] `, +Hello friend![/INST] Hello human![INST] What is your name?[/INST] `, }, { "chatml", @@ -221,7 +254,7 @@ What is your name?[/INST] `, `}, {"messages", ` {{- range $index, $_ := .Messages }} -{{- if and (eq .Role "user") (eq (len (slice $.Messages $index)) 1) $.System }}<|im_start|>system +{{- if and (eq .Role "user") (eq $index 0) $.System }}<|im_start|>system {{ $.System }}<|im_end|>{{ "\n" }} {{- end }}<|im_start|>{{ .Role }} {{ .Content }}<|im_end|>{{ "\n" }} @@ -236,12 +269,12 @@ What is your name?[/INST] `, {Role: "user", Content: "What is your name?"}, }, }, - `<|im_start|>user + `<|im_start|>system +You are a helpful assistant!<|im_end|> +<|im_start|>user Hello friend!<|im_end|> <|im_start|>assistant Hello human!<|im_end|> -<|im_start|>system -You are a helpful assistant!<|im_end|> <|im_start|>user What is your name?<|im_end|> <|im_start|>assistant @@ -300,8 +333,8 @@ Answer: `, t.Fatal(err) } - if b.String() != tt.expected { - t.Errorf("expected\n%s,\ngot\n%s", tt.expected, b.String()) + if diff := cmp.Diff(b.String(), tt.expected); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) } }) } From 19753c18c01183b4c974e36e89b0c7cbdcc3c38a Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 10 Jul 2024 11:00:29 -0700 Subject: [PATCH 09/17] update embedded templates --- template/alfred.gotmpl | 4 ++-- template/alpaca.gotmpl | 8 +++++--- template/chatml.gotmpl | 4 ++-- template/chatqa.gotmpl | 7 ++++--- template/codellama-70b-instruct.gotmpl | 10 +++++----- template/falcon-instruct.gotmpl | 12 +++++++----- template/gemma-instruct.gotmpl | 7 ++++--- template/granite-instruct.gotmpl | 8 ++++---- template/llama2-chat.gotmpl | 8 ++++---- template/llama3-instruct.gotmpl | 4 ++-- template/magicoder.gotmpl | 5 +++-- template/mistral-instruct.gotmpl | 5 +++-- template/openchat.gotmpl | 12 ++++++------ template/phi-3.gotmpl | 4 ++-- template/solar-instruct.gotmpl | 7 ++++--- template/starcoder2-instruct.gotmpl | 5 ++--- .../alpaca.gotmpl/system-user-assistant-user | 4 +++- .../system-user-assistant-user | 1 + template/testdata/codellama-70b-instruct.gotmpl/user | 1 + .../user-assistant-user | 1 + .../openchat.gotmpl/system-user-assistant-user | 2 +- template/testdata/openchat.gotmpl/user | 2 +- .../testdata/openchat.gotmpl/user-assistant-user | 2 +- template/vicuna.gotmpl | 7 ++++--- template/zephyr.gotmpl | 4 ++-- 25 files changed, 74 insertions(+), 60 deletions(-) diff --git a/template/alfred.gotmpl b/template/alfred.gotmpl index 44284f04..71bc6706 100644 --- a/template/alfred.gotmpl +++ b/template/alfred.gotmpl @@ -3,6 +3,6 @@ {{- end }} {{- range .Messages }}{{ .Content }} {{- end }} -{{- else }} +{{- else -}} {{ if .System }}{{ .System }}{{ end }}{{ if .Prompt }}{{ .Prompt }}{{ end }}{{ .Response }} -{{- end }} \ No newline at end of file +{{- end -}} \ No newline at end of file diff --git a/template/alpaca.gotmpl b/template/alpaca.gotmpl index c1f69dc9..e9becb3d 100644 --- a/template/alpaca.gotmpl +++ b/template/alpaca.gotmpl @@ -1,6 +1,7 @@ {{- if .Messages }} {{- if .System }}{{ .System }} -{{- end }} + +{{ end }} {{- range .Messages }} {{- if eq .Role "user" }}### Instruction: {{- else if eq .Role "assistant" }}### Response: @@ -8,7 +9,7 @@ {{ .Content }} {{ end }}### Response: -{{ else }} +{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction: @@ -16,4 +17,5 @@ {{ end }}### Response: {{ .Response }} -{{- end }} \ No newline at end of file + +{{ end -}} \ No newline at end of file diff --git a/template/chatml.gotmpl b/template/chatml.gotmpl index d945547c..eb8ab0dc 100644 --- a/template/chatml.gotmpl +++ b/template/chatml.gotmpl @@ -5,11 +5,11 @@ {{- range .Messages }}<|im_start|>{{ .Role }} {{ .Content }}<|im_end|> {{ end }}<|im_start|>assistant -{{ else }} +{{ else -}} {{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant {{ .Response }}<|im_end|> -{{- end }} \ No newline at end of file +{{ end -}} \ No newline at end of file diff --git a/template/chatqa.gotmpl b/template/chatqa.gotmpl index 7022c479..41c6ced5 100644 --- a/template/chatqa.gotmpl +++ b/template/chatqa.gotmpl @@ -8,10 +8,11 @@ {{- end }} {{ .Content }} {{ end }}Assistant: -{{- else }} +{{- else -}} {{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}User: {{ .Prompt }} -{{ end }}Assistant: <|begin_of_text|>{{ .Response }} -{{- end }} \ No newline at end of file +{{ end }}Assistant: {{ .Response }} + +{{ end -}} \ No newline at end of file diff --git a/template/codellama-70b-instruct.gotmpl b/template/codellama-70b-instruct.gotmpl index 392d839e..0a313d38 100644 --- a/template/codellama-70b-instruct.gotmpl +++ b/template/codellama-70b-instruct.gotmpl @@ -7,13 +7,13 @@ {{ .Content }} {{ end }}Source: assistant Destination: user -{{ else }} -{{ if .System }} Source: system + {{ else -}} +{{ if .System }}Source: system - {{ .System }} {{ end }} Source: user + {{ .System }} {{ end }}Source: user {{ .Prompt }} Source: assistant Destination: user - {{ .Response }} -{{- end }} \ No newline at end of file + {{ .Response }} +{{- end -}} \ No newline at end of file diff --git a/template/falcon-instruct.gotmpl b/template/falcon-instruct.gotmpl index 99d67f93..3a403007 100644 --- a/template/falcon-instruct.gotmpl +++ b/template/falcon-instruct.gotmpl @@ -6,8 +6,10 @@ {{ else if eq .Role "assistant" }}Falcon: {{ end }}{{ .Content }} {{ end }}Falcon: -{{ else }} -{{ if .System }}{{ .System }} -{{ end }}{{ if .Prompt }}User: {{ .Prompt }} -{{ end }}Assistant: {{ .Response }} -{{- end }} \ No newline at end of file +{{ else -}} +{{ if .System }}System: {{ .System }} +{{ end }}{{ if .Prompt }}User: +{{ .Prompt }} +{{ end }}Falcon: +{{ .Response }} +{{ end -}} \ No newline at end of file diff --git a/template/gemma-instruct.gotmpl b/template/gemma-instruct.gotmpl index 870a8f2e..6d778a70 100644 --- a/template/gemma-instruct.gotmpl +++ b/template/gemma-instruct.gotmpl @@ -8,9 +8,10 @@ {{- end }} {{ .Content }} {{ end }}model -{{ else }} +{{ else -}} user -{{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} +{{ if .System }}{{ .System }} +{{ end }}{{ .Prompt }} model {{ .Response }} -{{- end }} \ No newline at end of file +{{ end -}} \ No newline at end of file diff --git a/template/granite-instruct.gotmpl b/template/granite-instruct.gotmpl index 327ff3ee..4a85a97b 100644 --- a/template/granite-instruct.gotmpl +++ b/template/granite-instruct.gotmpl @@ -10,9 +10,8 @@ {{ .Content }} {{ end }}Answer: -{{ else }} -{{ if .System }} -System: +{{ else -}} +{{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}Question: @@ -20,4 +19,5 @@ System: {{ end }}Answer: {{ .Response }} -{{- end }} \ No newline at end of file + +{{ end -}} \ No newline at end of file diff --git a/template/llama2-chat.gotmpl b/template/llama2-chat.gotmpl index 6327d581..1816fefd 100644 --- a/template/llama2-chat.gotmpl +++ b/template/llama2-chat.gotmpl @@ -9,8 +9,8 @@ {{- else }} [/INST] {{ .Content }} {{- end }} {{- end }} [/INST] -{{- else }} -[INST] <>{{ .System }}<> +{{- else -}} +[INST] <>{{ if .System }}{{ .System }}{{ end }}<> -{{ .Prompt }} [/INST] {{ .Response }} -{{- end }} \ No newline at end of file +{{ .Prompt }} [/INST] {{ .Response }} +{{- end -}} \ No newline at end of file diff --git a/template/llama3-instruct.gotmpl b/template/llama3-instruct.gotmpl index 9c81a953..7947b8da 100644 --- a/template/llama3-instruct.gotmpl +++ b/template/llama3-instruct.gotmpl @@ -8,7 +8,7 @@ {{ .Content }}<|eot_id|> {{- end }}<|start_header_id|>assistant<|end_header_id|> -{{ else }} +{{ else -}} {{ if .System }}<|start_header_id|>system<|end_header_id|> {{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> @@ -16,4 +16,4 @@ {{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> {{ .Response }}<|eot_id|> -{{- end }} \ No newline at end of file +{{- end -}} \ No newline at end of file diff --git a/template/magicoder.gotmpl b/template/magicoder.gotmpl index 73a58127..9227b666 100644 --- a/template/magicoder.gotmpl +++ b/template/magicoder.gotmpl @@ -9,7 +9,7 @@ {{ .Content }} {{ end }}@@ Response -{{ else }} +{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}@@ Instruction @@ -17,4 +17,5 @@ {{ end }}@@ Response {{ .Response }} -{{- end }} \ No newline at end of file + +{{ end -}} \ No newline at end of file diff --git a/template/mistral-instruct.gotmpl b/template/mistral-instruct.gotmpl index eb3d5ced..1d746dfd 100644 --- a/template/mistral-instruct.gotmpl +++ b/template/mistral-instruct.gotmpl @@ -5,5 +5,6 @@ {{- else if eq .Role "assistant" }}[/INST] {{ .Content }} {{- end }} {{- end }}[/INST] -{{- else }}[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST] {{ .Response }} -{{- end }} \ No newline at end of file +{{- else -}} +[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }} +{{- end -}} \ No newline at end of file diff --git a/template/openchat.gotmpl b/template/openchat.gotmpl index d5e1cbb0..649f0509 100644 --- a/template/openchat.gotmpl +++ b/template/openchat.gotmpl @@ -1,11 +1,11 @@ {{- if .Messages }} -{{- if .System }}GPT Correct System: {{ .System }}<|end_of_turn|> +{{- if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|> {{- end }} -{{- range .Messages }}GPT Correct +{{- range .Messages }}GPT4 Correct {{- if eq .Role "user" }} User: {{- else if eq .Role "assistant" }} Assistant: {{- end }} {{ .Content }}<|end_of_turn|> -{{- end }}GPT Correct Assistant: -{{- else }} -{{ .System }}<|end_of_turn|>GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> -{{- end }} \ No newline at end of file +{{- end }}GPT4 Correct Assistant: +{{- else -}} +{{ if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|>{{ end }}GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> +{{- end -}} \ No newline at end of file diff --git a/template/phi-3.gotmpl b/template/phi-3.gotmpl index a3558d2b..4ca56e95 100644 --- a/template/phi-3.gotmpl +++ b/template/phi-3.gotmpl @@ -5,11 +5,11 @@ {{- range .Messages }}<|{{ .Role }}|> {{ .Content }}<|end|> {{ end }}<|assistant|> -{{ else }} +{{ else -}} {{ if .System }}<|system|> {{ .System }}<|end|> {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }}<|end|> {{ end }}<|assistant|> {{ .Response }}<|end|> -{{- end }} \ No newline at end of file +{{ end -}} \ No newline at end of file diff --git a/template/solar-instruct.gotmpl b/template/solar-instruct.gotmpl index caa6e8e7..8a8331ca 100644 --- a/template/solar-instruct.gotmpl +++ b/template/solar-instruct.gotmpl @@ -10,7 +10,7 @@ {{ .Content }} {{ end }} {{ end }}### Assistant: -{{ else }} +{{ else -}} {{ if .System }}### System: {{ .System }} @@ -18,5 +18,6 @@ {{ .Prompt }} {{ end }}### Assistant: -{{ .Response }} -{{- end }} \ No newline at end of file +{{ .Response }} + +{{ end -}} \ No newline at end of file diff --git a/template/starcoder2-instruct.gotmpl b/template/starcoder2-instruct.gotmpl index 7d7ff932..17c6ad75 100644 --- a/template/starcoder2-instruct.gotmpl +++ b/template/starcoder2-instruct.gotmpl @@ -11,14 +11,13 @@ {{ end }} {{- end }}### Response -{{ else }} +{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction {{ .Prompt }} - {{ end }}### Response {{ .Response }}<|endoftext|> -{{- end }} \ No newline at end of file +{{ end -}} \ No newline at end of file diff --git a/template/testdata/alpaca.gotmpl/system-user-assistant-user b/template/testdata/alpaca.gotmpl/system-user-assistant-user index 20182d82..4caa8178 100644 --- a/template/testdata/alpaca.gotmpl/system-user-assistant-user +++ b/template/testdata/alpaca.gotmpl/system-user-assistant-user @@ -1,4 +1,6 @@ -You are a helpful assistant.### Instruction: +You are a helpful assistant. + +### Instruction: Hello, how are you? ### Response: diff --git a/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user b/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user index fdd0fc8b..d7528f80 100644 --- a/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user +++ b/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user @@ -9,3 +9,4 @@ Source: system I'd like to show off how chat templating works! Source: assistant Destination: user + \ No newline at end of file diff --git a/template/testdata/codellama-70b-instruct.gotmpl/user b/template/testdata/codellama-70b-instruct.gotmpl/user index 9e7174a8..8e07853c 100644 --- a/template/testdata/codellama-70b-instruct.gotmpl/user +++ b/template/testdata/codellama-70b-instruct.gotmpl/user @@ -3,3 +3,4 @@ Source: user Hello, how are you? Source: assistant Destination: user + \ No newline at end of file diff --git a/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user b/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user index b4ba1736..f732cc74 100644 --- a/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user +++ b/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user @@ -7,3 +7,4 @@ Source: user I'd like to show off how chat templating works! Source: assistant Destination: user + \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/system-user-assistant-user b/template/testdata/openchat.gotmpl/system-user-assistant-user index 1214c126..404b071a 100644 --- a/template/testdata/openchat.gotmpl/system-user-assistant-user +++ b/template/testdata/openchat.gotmpl/system-user-assistant-user @@ -1 +1 @@ -GPT Correct System: You are a helpful assistant.<|end_of_turn|>GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file +GPT4 Correct System: You are a helpful assistant.<|end_of_turn|>GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT4 Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT4 Correct Assistant: \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/user b/template/testdata/openchat.gotmpl/user index 611daa83..48229cb0 100644 --- a/template/testdata/openchat.gotmpl/user +++ b/template/testdata/openchat.gotmpl/user @@ -1 +1 @@ -GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file +GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/user-assistant-user b/template/testdata/openchat.gotmpl/user-assistant-user index f97b02b9..4719abb2 100644 --- a/template/testdata/openchat.gotmpl/user-assistant-user +++ b/template/testdata/openchat.gotmpl/user-assistant-user @@ -1 +1 @@ -GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file +GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT4 Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT4 Correct Assistant: \ No newline at end of file diff --git a/template/vicuna.gotmpl b/template/vicuna.gotmpl index 2e13e990..01465b99 100644 --- a/template/vicuna.gotmpl +++ b/template/vicuna.gotmpl @@ -7,8 +7,9 @@ {{ else if eq .Role "assistant" }}ASSISTANT: {{ .Content }} {{ end }} {{- end }}ASSISTANT: -{{- else }} +{{- else -}} {{ if .System }}{{ .System }} + {{ end }}{{ if .Prompt }}USER: {{ .Prompt }} -{{ end }}ASSISTANT: {{ .Response }} -{{- end }} \ No newline at end of file +{{ end }}ASSISTANT: {{ .Response }} +{{ end -}} \ No newline at end of file diff --git a/template/zephyr.gotmpl b/template/zephyr.gotmpl index e6668848..3ca1d1a1 100644 --- a/template/zephyr.gotmpl +++ b/template/zephyr.gotmpl @@ -5,11 +5,11 @@ {{- range .Messages }}<|{{ .Role }}|> {{ .Content }} {{ end }}<|assistant|> -{{ else }} +{{ else -}} {{ if .System }}<|system|> {{ .System }} {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }} {{ end }}<|assistant|> {{ .Response }} -{{- end }} \ No newline at end of file +{{ end -}} \ No newline at end of file From efbf41ed8151098b942c142e2522b9ab8364f97a Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Wed, 10 Jul 2024 20:01:52 -0700 Subject: [PATCH 10/17] llm: dont link cuda with compat libs (#5621) --- llm/generate/gen_linux.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index 5589f1ea..db2c6c30 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -178,7 +178,7 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}" echo "Building custom CUDA GPU" else - CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat" + CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}" fi CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}" From 791650ddef9eb11e011506dbd5d22ed6bfcb6a10 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 11 Jul 2024 00:53:12 -0700 Subject: [PATCH 11/17] sched: only error when over-allocating system memory (#5626) --- llm/server.go | 9 +++++++++ server/sched.go | 37 ------------------------------------- 2 files changed, 9 insertions(+), 37 deletions(-) diff --git a/llm/server.go b/llm/server.go index aa504d19..07c58cff 100644 --- a/llm/server.go +++ b/llm/server.go @@ -122,6 +122,15 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr } } + // On linux, over-allocating CPU memory will almost always result in an error + if runtime.GOOS == "linux" { + systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize + if systemMemoryRequired > systemTotalMemory { + slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "system", format.HumanBytes2(systemTotalMemory)) + return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(systemTotalMemory)) + } + } + estimate.log() // Loop through potential servers diff --git a/server/sched.go b/server/sched.go index 48047bfe..2daed3ab 100644 --- a/server/sched.go +++ b/server/sched.go @@ -135,11 +135,6 @@ func (s *Scheduler) processPending(ctx context.Context) { } for { - cpus := s.getCpuFn() - var systemMem gpu.GpuInfo - if len(cpus) > 0 { - systemMem = cpus[0] - } var runnerToExpire *runnerRef s.loadedMu.Lock() runner := s.loaded[pending.model.ModelPath] @@ -193,38 +188,6 @@ func (s *Scheduler) processPending(ctx context.Context) { break } - estimate := llm.EstimateGPULayers(gpus, ggml, pending.model.ProjectorPaths, pending.opts) - maxSize := systemMem.FreeMemory - - // Add available GPU memory to the total pool - // macOS hardware has unified memory so don't double count - if runtime.GOOS != "darwin" { - for _, gpu := range gpus { - if gpu.Library == "cpu" { - continue - } - if loadedCount == 0 { - // If no other models are loaded, set the limit based on what's available - maxSize += gpu.FreeMemory - } else { - // Other models could be unloaded, favor total memory for limit - maxSize += gpu.TotalMemory - } - } - } - - // Block attempting to load a model larger than system memory + GPU memory - if estimate.TotalSize > maxSize { - slog.Warn("model request too large for system", "requested", format.HumanBytes2(estimate.TotalSize), "system", format.HumanBytes2(maxSize)) - - // Linux will crash if over-allocating memory - return an error to the user. - // TODO (jmorganca): add reasonable upper limits for darwin and windows as well - if runtime.GOOS == "linux" { - pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize)) - break - } - } - // Evaluate if the model will fit in the available system memory, or if we should unload a model first if len(gpus) == 1 && gpus[0].Library == "cpu" { // simplifying assumption of defaultParallel when in CPU mode From e64f9ebb44b584d94094274f62acd90a5195dd89 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 11 Jul 2024 13:10:13 -0700 Subject: [PATCH 12/17] do no automatically aggregate system messages --- template/template.go | 39 ++++++++++++++++++++------------------- template/template_test.go | 11 +++++++---- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/template/template.go b/template/template.go index 0b8f2434..8d5ac51b 100644 --- a/template/template.go +++ b/template/template.go @@ -102,8 +102,21 @@ var response = parse.ActionNode{ }, } +var funcs = template.FuncMap{ + "aggregate": func(v []*api.Message, role string) string { + var aggregated []string + for _, m := range v { + if m.Role == role { + aggregated = append(aggregated, m.Content) + } + } + + return strings.Join(aggregated, "\n\n") + }, +} + func Parse(s string) (*Template, error) { - tmpl := template.New("").Option("missingkey=zero") + tmpl := template.New("").Option("missingkey=zero").Funcs(funcs) tmpl, err := tmpl.Parse(s) if err != nil { @@ -149,23 +162,21 @@ type Values struct { } func (t *Template) Execute(w io.Writer, v Values) error { - system, collated := collate(v.Messages) + collated := collate(v.Messages) if !v.forceLegacy && slices.Contains(t.Vars(), "messages") { return t.Template.Execute(w, map[string]any{ - "System": system, "Messages": collated, }) } var b bytes.Buffer - var prompt, response string + var system, prompt, response string for i, m := range collated { switch m.Role { + case "system": + system = m.Content case "user": prompt = m.Content - if i != 0 { - system = "" - } case "assistant": response = m.Content } @@ -179,6 +190,7 @@ func (t *Template) Execute(w io.Writer, v Values) error { return err } + system = "" prompt = "" response = "" } @@ -209,25 +221,14 @@ func (t *Template) Execute(w io.Writer, v Values) error { return err } -type messages []*api.Message - // collate messages based on role. consecutive messages of the same role are merged // into a single message. collate also pulls out and merges messages with Role == "system" // which are templated separately. As a side effect, it mangles message content adding image // tags ([img-%d]) as needed -func collate(msgs []api.Message) (system string, collated messages) { +func collate(msgs []api.Message) (collated []*api.Message) { var n int for i := range msgs { msg := msgs[i] - if msg.Role == "system" { - if system != "" { - system += "\n\n" - } - - system += msg.Content - continue - } - for range msg.Images { imageTag := fmt.Sprintf("[img-%d]", n) if !strings.Contains(msg.Content, "[img]") { diff --git a/template/template_test.go b/template/template_test.go index e702a186..b020eb67 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -122,6 +122,7 @@ func TestTemplate(t *testing.T) { }) t.Run("legacy", func(t *testing.T) { + t.Skip("legacy outputs are currently default outputs") var legacy bytes.Buffer if err := tmpl.Execute(&legacy, Values{Messages: tt, forceLegacy: true}); err != nil { t.Fatal(err) @@ -154,11 +155,13 @@ func TestParse(t *testing.T) { {"{{ .System }} {{ .Prompt }} {{ .Response }}", []string{"prompt", "response", "system"}}, {"{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", []string{"prompt", "response", "system", "tools"}}, {"{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}", []string{"content", "messages", "role"}}, - {"{{ range .Messages }}{{ if eq .Role \"system\" }}SYSTEM: {{ .Content }}{{ else if eq .Role \"user\" }}USER: {{ .Content }}{{ else if eq .Role \"assistant\" }}ASSISTANT: {{ .Content }}{{ end }}{{ end }}", []string{"content", "messages", "role"}}, + {`{{- range .Messages }} +{{- if eq .Role "system" }}SYSTEM: +{{- else if eq .Role "user" }}USER: +{{- else if eq .Role "assistant" }}ASSISTANT: +{{- end }} {{ .Content }} +{{- end }}`, []string{"content", "messages", "role"}}, {`{{- if .Messages }} -{{- if .System }}<|im_start|>system -{{ .System }}<|im_end|> -{{ end }} {{- range .Messages }}<|im_start|>{{ .Role }} {{ .Content }}<|im_end|> {{ end }}<|im_start|>assistant From 57ec6901eb59cca9d0c29adca3f0fd4b95c1c989 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 11 Jul 2024 13:11:40 -0700 Subject: [PATCH 13/17] revert embedded templates to use prompt/response This reverts commit 19753c18c01183b4c974e36e89b0c7cbdcc3c38a. for compat. messages will be added at a later date --- server/routes_create_test.go | 4 +- template/alfred.gotmpl | 9 +-- template/alpaca.gotmpl | 13 ---- template/chatml.gotmpl | 9 --- template/chatqa.gotmpl | 12 ---- template/codellama-70b-instruct.gotmpl | 15 +---- template/falcon-instruct.gotmpl | 10 ---- template/gemma-instruct.gotmpl | 12 ---- template/granite-instruct.gotmpl | 14 ----- template/llama2-chat.gotmpl | 18 ++---- template/llama3-instruct.gotmpl | 14 +---- template/magicoder.gotmpl | 13 ---- template/mistral-instruct.gotmpl | 13 +--- template/openchat.gotmpl | 12 +--- template/phi-3.gotmpl | 9 --- template/solar-instruct.gotmpl | 14 ----- template/starcoder2-instruct.gotmpl | 15 ----- template/template_test.go | 59 ++++++++++++------- .../system-user-assistant-user | 4 +- .../llama2-chat.gotmpl/user-assistant-user | 4 +- .../system-user-assistant-user | 5 +- template/vicuna.gotmpl | 11 ---- template/zephyr.gotmpl | 9 --- 23 files changed, 63 insertions(+), 235 deletions(-) diff --git a/server/routes_create_test.go b/server/routes_create_test.go index 40477937..04174b92 100644 --- a/server/routes_create_test.go +++ b/server/routes_create_test.go @@ -546,8 +546,8 @@ func TestCreateDetectTemplate(t *testing.T) { checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-553c4a3f747b3d22a4946875f1cc8ed011c2930d83f864a0c7265f9ec0a20413"), - filepath.Join(p, "blobs", "sha256-68b0323b2f21572bc09ba07554b16b379a5713ee48ef8c25a7661a1f71cfce77"), - filepath.Join(p, "blobs", "sha256-eb72fb7c550ee1f1dec4039bd65382acecf5f7536a30fb7ccace39a8d0cb590b"), + filepath.Join(p, "blobs", "sha256-c608dc615584cd20d9d830363dabf8a4783ae5d34245c3d8c115edb3bc7b28e4"), + filepath.Join(p, "blobs", "sha256-f836ee110db21567f826332e4cedd746c06d10664fd5a9ea3659e3683a944510"), }) }) diff --git a/template/alfred.gotmpl b/template/alfred.gotmpl index 71bc6706..cecb9d2c 100644 --- a/template/alfred.gotmpl +++ b/template/alfred.gotmpl @@ -1,8 +1 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} -{{- end }} -{{- range .Messages }}{{ .Content }} -{{- end }} -{{- else -}} -{{ if .System }}{{ .System }}{{ end }}{{ if .Prompt }}{{ .Prompt }}{{ end }}{{ .Response }} -{{- end -}} \ No newline at end of file +{{ if .System }}{{ .System }}{{ end }}{{ if .Prompt }}{{ .Prompt }}{{ end }}{{ .Response }} \ No newline at end of file diff --git a/template/alpaca.gotmpl b/template/alpaca.gotmpl index e9becb3d..ec7a8edc 100644 --- a/template/alpaca.gotmpl +++ b/template/alpaca.gotmpl @@ -1,15 +1,3 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}### Instruction: -{{- else if eq .Role "assistant" }}### Response: -{{- end }} -{{ .Content }} - -{{ end }}### Response: -{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction: @@ -18,4 +6,3 @@ {{ end }}### Response: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/chatml.gotmpl b/template/chatml.gotmpl index eb8ab0dc..fb672601 100644 --- a/template/chatml.gotmpl +++ b/template/chatml.gotmpl @@ -1,15 +1,6 @@ -{{- if .Messages }} -{{- if .System }}<|im_start|>system -{{ .System }}<|im_end|> -{{ end }} -{{- range .Messages }}<|im_start|>{{ .Role }} -{{ .Content }}<|im_end|> -{{ end }}<|im_start|>assistant -{{ else -}} {{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant {{ .Response }}<|im_end|> -{{ end -}} \ No newline at end of file diff --git a/template/chatqa.gotmpl b/template/chatqa.gotmpl index 41c6ced5..91679a72 100644 --- a/template/chatqa.gotmpl +++ b/template/chatqa.gotmpl @@ -1,18 +1,6 @@ -{{- if .Messages }} -{{- if .System }}System: {{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}User: -{{- else if eq .Role "assistant" }}Assistant: -{{- end }} {{ .Content }} - -{{ end }}Assistant: -{{- else -}} {{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}User: {{ .Prompt }} {{ end }}Assistant: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/codellama-70b-instruct.gotmpl b/template/codellama-70b-instruct.gotmpl index 0a313d38..e5856042 100644 --- a/template/codellama-70b-instruct.gotmpl +++ b/template/codellama-70b-instruct.gotmpl @@ -1,19 +1,10 @@ -{{- if .Messages }} -{{- if .System }}Source: system - - {{ .System }} {{ end }} -{{- range .Messages }}Source: {{ .Role }} - - {{ .Content }} {{ end }}Source: assistant -Destination: user - - {{ else -}} {{ if .System }}Source: system {{ .System }} {{ end }}Source: user {{ .Prompt }} Source: assistant +{{- if not .Response }} Destination: user +{{- end }} - {{ .Response }} -{{- end -}} \ No newline at end of file + {{ .Response }} \ No newline at end of file diff --git a/template/falcon-instruct.gotmpl b/template/falcon-instruct.gotmpl index 3a403007..0a5fe48e 100644 --- a/template/falcon-instruct.gotmpl +++ b/template/falcon-instruct.gotmpl @@ -1,15 +1,5 @@ -{{- if .Messages }} -{{- if .System }}System: {{ .System }} -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}User: -{{ else if eq .Role "assistant" }}Falcon: -{{ end }}{{ .Content }} -{{ end }}Falcon: -{{ else -}} {{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}User: {{ .Prompt }} {{ end }}Falcon: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/gemma-instruct.gotmpl b/template/gemma-instruct.gotmpl index 6d778a70..3c3a8425 100644 --- a/template/gemma-instruct.gotmpl +++ b/template/gemma-instruct.gotmpl @@ -1,17 +1,5 @@ -{{- if .Messages }} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}user -{{- if and $.System (eq $index 0) }} -{{ $.System }} -{{- end }} -{{- else if eq .Role "assistant" }}model -{{- end }} -{{ .Content }} -{{ end }}model -{{ else -}} user {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} model {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/granite-instruct.gotmpl b/template/granite-instruct.gotmpl index 4a85a97b..56690fce 100644 --- a/template/granite-instruct.gotmpl +++ b/template/granite-instruct.gotmpl @@ -1,16 +1,3 @@ -{{- if .Messages }} -{{- if .System }}System: -{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}Question: -{{- else if eq .Role "assistant" }}Answer: -{{- end }} -{{ .Content }} - -{{ end }}Answer: -{{ else -}} {{ if .System }}System: {{ .System }} @@ -20,4 +7,3 @@ {{ end }}Answer: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/llama2-chat.gotmpl b/template/llama2-chat.gotmpl index 1816fefd..013b414e 100644 --- a/template/llama2-chat.gotmpl +++ b/template/llama2-chat.gotmpl @@ -1,16 +1,6 @@ -{{- if .Messages }} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if eq $index 0 }}<> -{{- if $.System }} -{{ $.System }} +[INST] <> +{{- if .System }} +{{ .System }} {{ end }}<> -{{ end }}{{ .Content }} -{{- else }} [/INST] {{ .Content }} -{{- end }} -{{- end }} [/INST] -{{- else -}} -[INST] <>{{ if .System }}{{ .System }}{{ end }}<> - -{{ .Prompt }} [/INST] {{ .Response }} -{{- end -}} \ No newline at end of file +{{ .Prompt }} [/INST] {{ .Response }} \ No newline at end of file diff --git a/template/llama3-instruct.gotmpl b/template/llama3-instruct.gotmpl index 7947b8da..36d0218b 100644 --- a/template/llama3-instruct.gotmpl +++ b/template/llama3-instruct.gotmpl @@ -1,19 +1,7 @@ -{{- if .Messages }} -{{- if .System }}<|start_header_id|>system<|end_header_id|> - -{{ .System }}<|eot_id|> -{{- end }} -{{- range .Messages }}<|start_header_id|>{{ .Role }}<|end_header_id|> - -{{ .Content }}<|eot_id|> -{{- end }}<|start_header_id|>assistant<|end_header_id|> - -{{ else -}} {{ if .System }}<|start_header_id|>system<|end_header_id|> {{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> {{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> -{{ .Response }}<|eot_id|> -{{- end -}} \ No newline at end of file +{{ .Response }}<|eot_id|> \ No newline at end of file diff --git a/template/magicoder.gotmpl b/template/magicoder.gotmpl index 9227b666..52abc01a 100644 --- a/template/magicoder.gotmpl +++ b/template/magicoder.gotmpl @@ -1,15 +1,3 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}@@ Instruction -{{- else if eq .Role "assistant" }}@@ Response -{{- end }} -{{ .Content }} - -{{ end }}@@ Response -{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}@@ Instruction @@ -18,4 +6,3 @@ {{ end }}@@ Response {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/mistral-instruct.gotmpl b/template/mistral-instruct.gotmpl index 1d746dfd..e489bd4c 100644 --- a/template/mistral-instruct.gotmpl +++ b/template/mistral-instruct.gotmpl @@ -1,10 +1,3 @@ -{{- if .Messages }} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and $.System (eq (len (slice $.Messages $index)) 1) }}{{ $.System }} -{{ end }}{{ .Content }} -{{- else if eq .Role "assistant" }}[/INST] {{ .Content }} -{{- end }} -{{- end }}[/INST] -{{- else -}} -[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }} -{{- end -}} \ No newline at end of file +[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] {{ .Response }} \ No newline at end of file diff --git a/template/openchat.gotmpl b/template/openchat.gotmpl index 649f0509..9c183834 100644 --- a/template/openchat.gotmpl +++ b/template/openchat.gotmpl @@ -1,11 +1 @@ -{{- if .Messages }} -{{- if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|> -{{- end }} -{{- range .Messages }}GPT4 Correct -{{- if eq .Role "user" }} User: -{{- else if eq .Role "assistant" }} Assistant: -{{- end }} {{ .Content }}<|end_of_turn|> -{{- end }}GPT4 Correct Assistant: -{{- else -}} -{{ if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|>{{ end }}GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> -{{- end -}} \ No newline at end of file +{{ if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|>{{ end }}GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> \ No newline at end of file diff --git a/template/phi-3.gotmpl b/template/phi-3.gotmpl index 4ca56e95..6c3610dd 100644 --- a/template/phi-3.gotmpl +++ b/template/phi-3.gotmpl @@ -1,15 +1,6 @@ -{{- if .Messages }} -{{- if .System }}<|system|> -{{ .System }}<|end|> -{{ end }} -{{- range .Messages }}<|{{ .Role }}|> -{{ .Content }}<|end|> -{{ end }}<|assistant|> -{{ else -}} {{ if .System }}<|system|> {{ .System }}<|end|> {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }}<|end|> {{ end }}<|assistant|> {{ .Response }}<|end|> -{{ end -}} \ No newline at end of file diff --git a/template/solar-instruct.gotmpl b/template/solar-instruct.gotmpl index 8a8331ca..1c14960d 100644 --- a/template/solar-instruct.gotmpl +++ b/template/solar-instruct.gotmpl @@ -1,16 +1,3 @@ -{{- if .Messages }} -{{- if .System }}### System: -{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}### User: -{{ .Content }} -{{ else if eq .Role "assistant" }}### Assistant: -{{ .Content }} -{{ end }} -{{ end }}### Assistant: -{{ else -}} {{ if .System }}### System: {{ .System }} @@ -20,4 +7,3 @@ {{ end }}### Assistant: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/starcoder2-instruct.gotmpl b/template/starcoder2-instruct.gotmpl index 17c6ad75..6c93a7ab 100644 --- a/template/starcoder2-instruct.gotmpl +++ b/template/starcoder2-instruct.gotmpl @@ -1,17 +1,3 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}### Instruction -{{ .Content }} - -{{ else if eq .Role "assistant" }}### Response -{{ .Content }}<|endoftext|> - -{{ end }} -{{- end }}### Response -{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction @@ -20,4 +6,3 @@ {{ end }}### Response {{ .Response }}<|endoftext|> -{{ end -}} \ No newline at end of file diff --git a/template/template_test.go b/template/template_test.go index b020eb67..9cfa0bea 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -116,7 +116,14 @@ func TestTemplate(t *testing.T) { t.Fatal(err) } - if diff := cmp.Diff(actual.Bytes(), expect); diff != "" { + bts := actual.Bytes() + + if slices.Contains([]string{"chatqa.gotmpl", "llama2-chat.gotmpl", "mistral-instruct.gotmpl", "openchat.gotmpl", "vicuna.gotmpl"}, match) && bts[len(bts)-1] == ' ' { + t.Log("removing trailing space from output") + bts = bts[:len(bts)-1] + } + + if diff := cmp.Diff(bts, expect); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) @@ -203,11 +210,18 @@ func TestExecuteWithMessages(t *testing.T) { { "mistral", []template{ - {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, - {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", `{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq $index 0) $.System }}{{ $.System }}{{ "\n\n" }} -{{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} + {"no response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] `}, + {"response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, + {"messages", `{{- $system := aggregate $.Messages "system" -}} +{{- range $index, $_ := .Messages }} +{{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} +{{- $system = "" }} + +{{ end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, }, @@ -223,12 +237,18 @@ func TestExecuteWithMessages(t *testing.T) { { "mistral system", []template{ - {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, - {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", ` + {"no response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] `}, + {"response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, + {"messages", `{{- $system := aggregate $.Messages "system" -}} {{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq $index 0) $.System }}{{ $.System }}{{ "\n\n" }} -{{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} +{{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} +{{- $system = "" }} + +{{ end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, }, @@ -256,12 +276,9 @@ Hello friend![/INST] Hello human![INST] What is your name?[/INST] `, {{ .Response }}<|im_end|> `}, {"messages", ` -{{- range $index, $_ := .Messages }} -{{- if and (eq .Role "user") (eq $index 0) $.System }}<|im_start|>system -{{ $.System }}<|im_end|>{{ "\n" }} -{{- end }}<|im_start|>{{ .Role }} -{{ .Content }}<|im_end|>{{ "\n" }} -{{- end }}<|im_start|>assistant +{{- range $index, $_ := .Messages }}<|im_start|>{{ .Role }} +{{ .Content }}<|im_end|> +{{ end }}<|im_start|>assistant `}, }, Values{ @@ -294,9 +311,11 @@ What is your name?<|im_end|> `}, {"messages", ` {{- range .Messages }} -{{- if eq .Role "user" }}Question: {{ .Content }}{{ "\n\n" }} -{{- else if eq .Role "assistant" }}Answer: {{ .Content }}{{ "\n\n" }} -{{- end }} +{{- if eq .Role "user" }}Question: {{ .Content }} + +{{ else if eq .Role "assistant" }}Answer: {{ .Content }} + +{{ end }} {{- end }}Answer: `}, }, Values{ diff --git a/template/testdata/llama2-chat.gotmpl/system-user-assistant-user b/template/testdata/llama2-chat.gotmpl/system-user-assistant-user index fc2679bf..9db81cb4 100644 --- a/template/testdata/llama2-chat.gotmpl/system-user-assistant-user +++ b/template/testdata/llama2-chat.gotmpl/system-user-assistant-user @@ -2,4 +2,6 @@ You are a helpful assistant. <> -Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works! [/INST] \ No newline at end of file +Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] <><> + +I'd like to show off how chat templating works! [/INST] \ No newline at end of file diff --git a/template/testdata/llama2-chat.gotmpl/user-assistant-user b/template/testdata/llama2-chat.gotmpl/user-assistant-user index 42b4c529..ca58954f 100644 --- a/template/testdata/llama2-chat.gotmpl/user-assistant-user +++ b/template/testdata/llama2-chat.gotmpl/user-assistant-user @@ -1,3 +1,5 @@ [INST] <><> -Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works! [/INST] \ No newline at end of file +Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] <><> + +I'd like to show off how chat templating works! [/INST] \ No newline at end of file diff --git a/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user b/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user index b6b4bf93..2f1edaec 100644 --- a/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user +++ b/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user @@ -1,2 +1,3 @@ -[INST] Hello, how are you?[/INST] I'm doing great. How can I help you today?[INST] You are a helpful assistant. -I'd like to show off how chat templating works![/INST] \ No newline at end of file +[INST] You are a helpful assistant. + +Hello, how are you?[/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works![/INST] \ No newline at end of file diff --git a/template/vicuna.gotmpl b/template/vicuna.gotmpl index 01465b99..515b2fe9 100644 --- a/template/vicuna.gotmpl +++ b/template/vicuna.gotmpl @@ -1,15 +1,4 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}USER: {{ .Content }} -{{ else if eq .Role "assistant" }}ASSISTANT: {{ .Content }} -{{ end }} -{{- end }}ASSISTANT: -{{- else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}USER: {{ .Prompt }} {{ end }}ASSISTANT: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/zephyr.gotmpl b/template/zephyr.gotmpl index 3ca1d1a1..1f889f26 100644 --- a/template/zephyr.gotmpl +++ b/template/zephyr.gotmpl @@ -1,15 +1,6 @@ -{{- if .Messages }} -{{- if .System }}<|system|> -{{ .System }} -{{ end }} -{{- range .Messages }}<|{{ .Role }}|> -{{ .Content }} -{{ end }}<|assistant|> -{{ else -}} {{ if .System }}<|system|> {{ .System }} {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }} {{ end }}<|assistant|> {{ .Response }} -{{ end -}} \ No newline at end of file From c4cf8ad55966cc61c73f119ab9cbfaf57264fc81 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 11 Jul 2024 16:42:57 -0700 Subject: [PATCH 14/17] llm: avoid loading model if system memory is too small (#5637) * llm: avoid loading model if system memory is too small * update log * Instrument swap free space On linux and windows, expose how much swap space is available so we can take that into consideration when scheduling models * use `systemSwapFreeMemory` in check --------- Co-authored-by: Daniel Hiltgen --- gpu/gpu.go | 3 +++ gpu/gpu_darwin.go | 1 + gpu/gpu_linux.go | 17 +++++++++-------- gpu/gpu_windows.go | 2 +- gpu/types.go | 1 + llm/server.go | 11 +++++++---- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/gpu/gpu.go b/gpu/gpu.go index 58144991..6e25cb46 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -360,14 +360,17 @@ func GetGPUInfo() GpuInfoList { "before", "total", format.HumanBytes2(cpus[0].TotalMemory), "free", format.HumanBytes2(cpus[0].FreeMemory), + "free_swap", format.HumanBytes2(cpus[0].FreeSwap), ), slog.Group( "now", "total", format.HumanBytes2(mem.TotalMemory), "free", format.HumanBytes2(mem.FreeMemory), + "free_swap", format.HumanBytes2(mem.FreeSwap), ), ) cpus[0].FreeMemory = mem.FreeMemory + cpus[0].FreeSwap = mem.FreeSwap } var memInfo C.mem_info_t diff --git a/gpu/gpu_darwin.go b/gpu/gpu_darwin.go index 39d8fcf8..cb066e58 100644 --- a/gpu/gpu_darwin.go +++ b/gpu/gpu_darwin.go @@ -57,6 +57,7 @@ func GetCPUMem() (memInfo, error) { return memInfo{ TotalMemory: uint64(C.getPhysicalMemory()), FreeMemory: uint64(C.getFreeMemory()), + // FreeSwap omitted as Darwin uses dynamic paging }, nil } diff --git a/gpu/gpu_linux.go b/gpu/gpu_linux.go index a099bf82..0d08ce8d 100644 --- a/gpu/gpu_linux.go +++ b/gpu/gpu_linux.go @@ -50,7 +50,7 @@ var OneapiMgmtName = "libze_intel_gpu.so" func GetCPUMem() (memInfo, error) { var mem memInfo - var total, available, free, buffers, cached uint64 + var total, available, free, buffers, cached, freeSwap uint64 f, err := os.Open("/proc/meminfo") if err != nil { return mem, err @@ -70,20 +70,21 @@ func GetCPUMem() (memInfo, error) { _, err = fmt.Sscanf(line, "Buffers:%d", &buffers) case strings.HasPrefix(line, "Cached:"): _, err = fmt.Sscanf(line, "Cached:%d", &cached) + case strings.HasPrefix(line, "SwapFree:"): + _, err = fmt.Sscanf(line, "SwapFree:%d", &freeSwap) default: continue } if err != nil { return mem, err } - - if total > 0 && available > 0 { - mem.TotalMemory = total * format.KibiByte - mem.FreeMemory = available * format.KibiByte - return mem, nil - } } mem.TotalMemory = total * format.KibiByte - mem.FreeMemory = (free + buffers + cached) * format.KibiByte + mem.FreeSwap = freeSwap * format.KibiByte + if available > 0 { + mem.FreeMemory = available * format.KibiByte + } else { + mem.FreeMemory = (free + buffers + cached) * format.KibiByte + } return mem, nil } diff --git a/gpu/gpu_windows.go b/gpu/gpu_windows.go index f8c2e76f..cd0629da 100644 --- a/gpu/gpu_windows.go +++ b/gpu/gpu_windows.go @@ -51,5 +51,5 @@ func GetCPUMem() (memInfo, error) { if r1 == 0 { return memInfo{}, fmt.Errorf("GlobalMemoryStatusEx failed: %w", err) } - return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys}, nil + return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys, FreeSwap: memStatus.AvailPageFile}, nil } diff --git a/gpu/types.go b/gpu/types.go index 7a7749b8..8d22b06b 100644 --- a/gpu/types.go +++ b/gpu/types.go @@ -10,6 +10,7 @@ import ( type memInfo struct { TotalMemory uint64 `json:"total_memory,omitempty"` FreeMemory uint64 `json:"free_memory,omitempty"` + FreeSwap uint64 `json:"free_swap,omitempty"` } // Beginning of an `ollama info` command diff --git a/llm/server.go b/llm/server.go index 07c58cff..8f37aa23 100644 --- a/llm/server.go +++ b/llm/server.go @@ -88,6 +88,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr var estimate MemoryEstimate var systemTotalMemory uint64 var systemFreeMemory uint64 + var systemSwapFreeMemory uint64 systemMemInfo, err := gpu.GetCPUMem() if err != nil { @@ -95,7 +96,8 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr } else { systemTotalMemory = systemMemInfo.TotalMemory systemFreeMemory = systemMemInfo.FreeMemory - slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", systemFreeMemory) + systemSwapFreeMemory = systemMemInfo.FreeSwap + slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory)) } // If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info @@ -125,9 +127,10 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr // On linux, over-allocating CPU memory will almost always result in an error if runtime.GOOS == "linux" { systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize - if systemMemoryRequired > systemTotalMemory { - slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "system", format.HumanBytes2(systemTotalMemory)) - return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(systemTotalMemory)) + available := min(systemTotalMemory, systemFreeMemory+systemSwapFreeMemory) + if systemMemoryRequired > available { + slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory)) + return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available)) } } From 5056bb9c010f06316b0ff280b879b9c36a7c995c Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 11 Jul 2024 16:06:57 -0700 Subject: [PATCH 15/17] rename aggregate to contents --- template/template.go | 11 ++++++----- template/template_test.go | 37 +++++++++++++++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 7 deletions(-) diff --git a/template/template.go b/template/template.go index 8d5ac51b..21e1614d 100644 --- a/template/template.go +++ b/template/template.go @@ -103,15 +103,16 @@ var response = parse.ActionNode{ } var funcs = template.FuncMap{ - "aggregate": func(v []*api.Message, role string) string { - var aggregated []string + // contents returns the contents of messages with an optional role filter + "contents": func(v []*api.Message, role ...string) string { + var parts []string for _, m := range v { - if m.Role == role { - aggregated = append(aggregated, m.Content) + if len(role) == 0 || role[0] == "" || m.Role == role[0] { + parts = append(parts, m.Content) } } - return strings.Join(aggregated, "\n\n") + return strings.Join(parts, "\n\n") }, } diff --git a/template/template_test.go b/template/template_test.go index 9cfa0bea..5e5f4257 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -216,7 +216,7 @@ func TestExecuteWithMessages(t *testing.T) { {"response", `[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", `{{- $system := aggregate $.Messages "system" -}} + {"messages", `{{- $system := contents .Messages "system" -}} {{- range $index, $_ := .Messages }} {{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} {{- $system = "" }} @@ -243,7 +243,7 @@ func TestExecuteWithMessages(t *testing.T) { {"response", `[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", `{{- $system := aggregate $.Messages "system" -}} + {"messages", `{{- $system := contents .Messages "system" -}} {{- range $index, $_ := .Messages }} {{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} {{- $system = "" }} @@ -363,3 +363,36 @@ Answer: `, }) } } + +func TestFuncs(t *testing.T) { + t.Run("contents", func(t *testing.T) { + cases := map[string]string{ + "": "A\n\nB\n\nC\n\nD\n\nE\n\nF", + "system": "A\n\nF", + "user": "B\n\nE", + "assistant": "C\n\nD", + } + + s := []*api.Message{ + {Role: "system", Content: "A"}, + {Role: "user", Content: "B"}, + {Role: "assistant", Content: "C"}, + {Role: "assistant", Content: "D"}, + {Role: "user", Content: "E"}, + {Role: "system", Content: "F"}, + } + + fn, ok := funcs["contents"].(func([]*api.Message, ...string) string) + if !ok { + t.Fatal("contents is not a function") + } + + for k, v := range cases { + t.Run(k, func(t *testing.T) { + if diff := cmp.Diff(fn(s, k), v); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) + } + }) + } + }) +} From 10e768826c7d5a8f7d7fab13832299a466a01f87 Mon Sep 17 00:00:00 2001 From: Josh <76125168+joshyan1@users.noreply.github.com> Date: Thu, 11 Jul 2024 17:24:29 -0700 Subject: [PATCH 16/17] fix: quant err message (#5616) --- llm/llm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/llm.go b/llm/llm.go index f2a5e557..d24507cc 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -33,7 +33,7 @@ func Quantize(infile, outfile string, ftype fileType) error { params.ftype = ftype.Value() if rc := C.llama_model_quantize(cinfile, coutfile, ¶ms); rc != 0 { - return fmt.Errorf("llama_model_quantize: %d", rc) + return fmt.Errorf("failed to quantize model. This model architecture may not be supported, or you may need to upgrade Ollama to the latest version") } return nil From 179737feb7311fc57c507a93378a3ac15da3a346 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 11 Jul 2024 22:53:46 -0700 Subject: [PATCH 17/17] Clean up old files when installing on Windows (#5645) * app: always clean up install dir; force close applications * remove wildcard * revert `CloseApplications` * whitespace * update `LOCALAPPDATA` var --- app/ollama.iss | 3 +++ 1 file changed, 3 insertions(+) diff --git a/app/ollama.iss b/app/ollama.iss index e6502abd..fef4a7b2 100644 --- a/app/ollama.iss +++ b/app/ollama.iss @@ -127,6 +127,9 @@ Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\models" Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\history" ; NOTE: if the user has a custom OLLAMA_MODELS it will be preserved +[InstallDelete] +Type: filesandordirs; Name: "{%LOCALAPPDATA}\Programs\Ollama" + [Messages] WizardReady=Ollama Windows Preview ReadyLabel1=%nLet's get you up and running with your own large language models.