diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 61ca3c43..5ae630c3 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -147,7 +147,7 @@ jobs: run: | $ErrorActionPreference = "Stop" write-host "downloading AMD HIP Installer" - Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" + Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" write-host "Installing AMD HIP" Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait write-host "Completed AMD HIP" diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 13d1c957..977d8da1 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -169,7 +169,7 @@ jobs: run: | $ErrorActionPreference = "Stop" write-host "downloading AMD HIP Installer" - Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" + Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" write-host "Installing AMD HIP" Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait write-host "Completed AMD HIP" diff --git a/app/ollama.iss b/app/ollama.iss index e6502abd..fef4a7b2 100644 --- a/app/ollama.iss +++ b/app/ollama.iss @@ -127,6 +127,9 @@ Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\models" Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\history" ; NOTE: if the user has a custom OLLAMA_MODELS it will be preserved +[InstallDelete] +Type: filesandordirs; Name: "{%LOCALAPPDATA}\Programs\Ollama" + [Messages] WizardReady=Ollama Windows Preview ReadyLabel1=%nLet's get you up and running with your own large language models. diff --git a/docs/faq.md b/docs/faq.md index 57411246..da1848f7 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -272,4 +272,4 @@ The following server settings may be used to adjust how Ollama handles concurren - `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default will auto-select either 4 or 1 based on available memory. - `OLLAMA_MAX_QUEUE` - The maximum number of requests Ollama will queue when busy before rejecting additional requests. The default is 512 -Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM. \ No newline at end of file +Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6.2 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM. \ No newline at end of file diff --git a/gpu/amd_common.go b/gpu/amd_common.go index 27a81e3f..7d1cab7c 100644 --- a/gpu/amd_common.go +++ b/gpu/amd_common.go @@ -49,9 +49,17 @@ func rocmGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) { } func commonAMDValidateLibDir() (string, error) { - // We try to favor system paths first, so that we can wire up the subprocess to use - // the system version. Only use our bundled version if the system version doesn't work - // This gives users a more recovery options if versions have subtle problems at runtime + // Favor our bundled version + + // Installer payload location if we're running the installed binary + exe, err := os.Executable() + if err == nil { + rocmTargetDir := filepath.Join(filepath.Dir(exe), "rocm") + if rocmLibUsable(rocmTargetDir) { + slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir) + return rocmTargetDir, nil + } + } // Prefer explicit HIP env var hipPath := os.Getenv("HIP_PATH") @@ -87,14 +95,5 @@ func commonAMDValidateLibDir() (string, error) { } } - // Installer payload location if we're running the installed binary - exe, err := os.Executable() - if err == nil { - rocmTargetDir := filepath.Join(filepath.Dir(exe), "rocm") - if rocmLibUsable(rocmTargetDir) { - slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir) - return rocmTargetDir, nil - } - } return "", fmt.Errorf("no suitable rocm found, falling back to CPU") } diff --git a/gpu/amd_hip_windows.go b/gpu/amd_hip_windows.go index 8572a24c..2586278c 100644 --- a/gpu/amd_hip_windows.go +++ b/gpu/amd_hip_windows.go @@ -84,9 +84,8 @@ func (hl *HipLib) AMDDriverVersion() (driverMajor, driverMinor int, err error) { } slog.Debug("hipDriverGetVersion", "version", version) - // TODO - this isn't actually right, but the docs claim hipDriverGetVersion isn't accurate anyway... - driverMajor = version / 1000 - driverMinor = (version - (driverMajor * 1000)) / 10 + driverMajor = version / 10000000 + driverMinor = (version - (driverMajor * 10000000)) / 100000 return driverMajor, driverMinor, nil } diff --git a/gpu/amd_windows.go b/gpu/amd_windows.go index 1e41e0fb..a6e136f9 100644 --- a/gpu/amd_windows.go +++ b/gpu/amd_windows.go @@ -22,8 +22,8 @@ const ( var ( // Used to validate if the given ROCm lib is usable - ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // TODO - probably include more coverage of files here... - RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\5.7\\bin"} // TODO glob? + ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // This is not sufficient to discern v5 vs v6 + RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\6.1\\bin"} // TODO glob? ) func AMDGetGPUInfo() []RocmGPUInfo { @@ -35,12 +35,11 @@ func AMDGetGPUInfo() []RocmGPUInfo { } defer hl.Release() - // TODO - this reports incorrect version information, so omitting for now - // driverMajor, driverMinor, err := hl.AMDDriverVersion() - // if err != nil { - // // For now this is benign, but we may eventually need to fail compatibility checks - // slog.Debug("error looking up amd driver version", "error", err) - // } + driverMajor, driverMinor, err := hl.AMDDriverVersion() + if err != nil { + // For now this is benign, but we may eventually need to fail compatibility checks + slog.Debug("error looking up amd driver version", "error", err) + } // Note: the HIP library automatically handles subsetting to any HIP_VISIBLE_DEVICES the user specified count := hl.HipGetDeviceCount() @@ -132,10 +131,8 @@ func AMDGetGPUInfo() []RocmGPUInfo { MinimumMemory: rocmMinimumMemory, Name: name, Compute: gfx, - - // TODO - this information isn't accurate on windows, so don't report it until we find the right way to retrieve - // DriverMajor: driverMajor, - // DriverMinor: driverMinor, + DriverMajor: driverMajor, + DriverMinor: driverMinor, }, index: i, } diff --git a/gpu/gpu.go b/gpu/gpu.go index 29a3c103..6e25cb46 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -274,6 +274,28 @@ func GetGPUInfo() GpuInfoList { gpuInfo.DriverMajor = driverMajor gpuInfo.DriverMinor = driverMinor + // query the management library as well so we can record any skew between the two + // which represents overhead on the GPU we must set aside on subsequent updates + if cHandles.nvml != nil { + C.nvml_get_free(*cHandles.nvml, C.int(gpuInfo.index), &memInfo.free, &memInfo.total, &memInfo.used) + if memInfo.err != nil { + slog.Warn("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err)) + C.free(unsafe.Pointer(memInfo.err)) + } else { + if memInfo.free != 0 && uint64(memInfo.free) > gpuInfo.FreeMemory { + gpuInfo.OSOverhead = uint64(memInfo.free) - gpuInfo.FreeMemory + slog.Info("detected OS VRAM overhead", + "id", gpuInfo.ID, + "library", gpuInfo.Library, + "compute", gpuInfo.Compute, + "driver", fmt.Sprintf("%d.%d", gpuInfo.DriverMajor, gpuInfo.DriverMinor), + "name", gpuInfo.Name, + "overhead", format.HumanBytes2(gpuInfo.OSOverhead), + ) + } + } + } + // TODO potentially sort on our own algorithm instead of what the underlying GPU library does... cudaGPUs = append(cudaGPUs, gpuInfo) } @@ -338,14 +360,17 @@ func GetGPUInfo() GpuInfoList { "before", "total", format.HumanBytes2(cpus[0].TotalMemory), "free", format.HumanBytes2(cpus[0].FreeMemory), + "free_swap", format.HumanBytes2(cpus[0].FreeSwap), ), slog.Group( "now", "total", format.HumanBytes2(mem.TotalMemory), "free", format.HumanBytes2(mem.FreeMemory), + "free_swap", format.HumanBytes2(mem.FreeSwap), ), ) cpus[0].FreeMemory = mem.FreeMemory + cpus[0].FreeSwap = mem.FreeSwap } var memInfo C.mem_info_t @@ -374,9 +399,14 @@ func GetGPUInfo() GpuInfoList { slog.Warn("error looking up nvidia GPU memory") continue } + if cHandles.nvml != nil && gpu.OSOverhead > 0 { + // When using the management library update based on recorded overhead + memInfo.free -= C.uint64_t(gpu.OSOverhead) + } slog.Debug("updating cuda memory data", "gpu", gpu.ID, "name", gpu.Name, + "overhead", format.HumanBytes2(gpu.OSOverhead), slog.Group( "before", "total", format.HumanBytes2(gpu.TotalMemory), diff --git a/gpu/gpu_darwin.go b/gpu/gpu_darwin.go index 39d8fcf8..cb066e58 100644 --- a/gpu/gpu_darwin.go +++ b/gpu/gpu_darwin.go @@ -57,6 +57,7 @@ func GetCPUMem() (memInfo, error) { return memInfo{ TotalMemory: uint64(C.getPhysicalMemory()), FreeMemory: uint64(C.getFreeMemory()), + // FreeSwap omitted as Darwin uses dynamic paging }, nil } diff --git a/gpu/gpu_linux.go b/gpu/gpu_linux.go index a099bf82..0d08ce8d 100644 --- a/gpu/gpu_linux.go +++ b/gpu/gpu_linux.go @@ -50,7 +50,7 @@ var OneapiMgmtName = "libze_intel_gpu.so" func GetCPUMem() (memInfo, error) { var mem memInfo - var total, available, free, buffers, cached uint64 + var total, available, free, buffers, cached, freeSwap uint64 f, err := os.Open("/proc/meminfo") if err != nil { return mem, err @@ -70,20 +70,21 @@ func GetCPUMem() (memInfo, error) { _, err = fmt.Sscanf(line, "Buffers:%d", &buffers) case strings.HasPrefix(line, "Cached:"): _, err = fmt.Sscanf(line, "Cached:%d", &cached) + case strings.HasPrefix(line, "SwapFree:"): + _, err = fmt.Sscanf(line, "SwapFree:%d", &freeSwap) default: continue } if err != nil { return mem, err } - - if total > 0 && available > 0 { - mem.TotalMemory = total * format.KibiByte - mem.FreeMemory = available * format.KibiByte - return mem, nil - } } mem.TotalMemory = total * format.KibiByte - mem.FreeMemory = (free + buffers + cached) * format.KibiByte + mem.FreeSwap = freeSwap * format.KibiByte + if available > 0 { + mem.FreeMemory = available * format.KibiByte + } else { + mem.FreeMemory = (free + buffers + cached) * format.KibiByte + } return mem, nil } diff --git a/gpu/gpu_windows.go b/gpu/gpu_windows.go index f8c2e76f..cd0629da 100644 --- a/gpu/gpu_windows.go +++ b/gpu/gpu_windows.go @@ -51,5 +51,5 @@ func GetCPUMem() (memInfo, error) { if r1 == 0 { return memInfo{}, fmt.Errorf("GlobalMemoryStatusEx failed: %w", err) } - return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys}, nil + return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys, FreeSwap: memStatus.AvailPageFile}, nil } diff --git a/gpu/types.go b/gpu/types.go index 2eaa9bae..8d22b06b 100644 --- a/gpu/types.go +++ b/gpu/types.go @@ -10,6 +10,7 @@ import ( type memInfo struct { TotalMemory uint64 `json:"total_memory,omitempty"` FreeMemory uint64 `json:"free_memory,omitempty"` + FreeSwap uint64 `json:"free_swap,omitempty"` } // Beginning of an `ollama info` command @@ -52,7 +53,8 @@ type CPUInfo struct { type CudaGPUInfo struct { GpuInfo - index int //nolint:unused,nolintlint + OSOverhead uint64 // Memory overhead between the driver library and management library + index int //nolint:unused,nolintlint } type CudaGPUInfoList []CudaGPUInfo diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index ab38b932..73b0bb49 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -187,7 +187,7 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}" echo "Building custom CUDA GPU" else - CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DGGML_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat" + CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}" fi CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}" diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index 85c43f54..d146f2f7 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -6,12 +6,11 @@ function amdGPUs { if ($env:AMDGPU_TARGETS) { return $env:AMDGPU_TARGETS } - # TODO - load from some common data file for linux + windows build consistency + # Current supported rocblas list from ROCm v6.1.2 on windows $GPU_LIST = @( "gfx803" "gfx900" "gfx902" - "gfx904" "gfx90c:xnack-" "gfx906:xnack-" "gfx908:xnack-" @@ -407,7 +406,6 @@ function build_rocm() { sign install - # Assumes v5.7, may need adjustments for v6 rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\" md "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\rocblas\library\" -ea 0 > $null cp "${env:HIP_PATH}\bin\hipblas.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\" diff --git a/llm/ggml.go b/llm/ggml.go index cfead450..fddb5039 100644 --- a/llm/ggml.go +++ b/llm/ggml.go @@ -424,6 +424,32 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui 4*batch*(3*embedding+vocab)+embedding*vocab*105/128, 4*batch*(2*embedding+1+2*embeddingHeadsK*headsKV+context+context*headsKV)+4*embeddingHeadsK*context*headsKV+embedding*embeddingHeadsK*headsKV*9/16, ) + case "chatglm": + fullOffload = 4 * batch * (embedding + vocab) + partialOffload = 4*batch*(embedding+vocab) + embedding*vocab*105/128 + if qkvBias, ok := layers["blk.0"]["attn_qkv.bias"]; ok { + fullOffload = max( + fullOffload, + 4*batch*(2+ + 2*embedding+ + context+ + context*heads+ + embeddingHeadsK*heads+ + qkvBias.Shape[0]), + ) + + partialOffload = max( + partialOffload, + 4*batch*(1+ + 2*embedding+ + embeddingHeadsK*heads+ + context+ + context*heads)+ + 4*embeddingHeadsK*context+ + 4*context*embeddingHeadsK+ + 4*qkvBias.Shape[0], + ) + } } return diff --git a/llm/llm.go b/llm/llm.go index f2a5e557..d24507cc 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -33,7 +33,7 @@ func Quantize(infile, outfile string, ftype fileType) error { params.ftype = ftype.Value() if rc := C.llama_model_quantize(cinfile, coutfile, ¶ms); rc != 0 { - return fmt.Errorf("llama_model_quantize: %d", rc) + return fmt.Errorf("failed to quantize model. This model architecture may not be supported, or you may need to upgrade Ollama to the latest version") } return nil diff --git a/llm/server.go b/llm/server.go index 08dc04d5..8f37aa23 100644 --- a/llm/server.go +++ b/llm/server.go @@ -88,6 +88,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr var estimate MemoryEstimate var systemTotalMemory uint64 var systemFreeMemory uint64 + var systemSwapFreeMemory uint64 systemMemInfo, err := gpu.GetCPUMem() if err != nil { @@ -95,7 +96,8 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr } else { systemTotalMemory = systemMemInfo.TotalMemory systemFreeMemory = systemMemInfo.FreeMemory - slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", systemFreeMemory) + systemSwapFreeMemory = systemMemInfo.FreeSwap + slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory)) } // If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info @@ -122,6 +124,16 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr } } + // On linux, over-allocating CPU memory will almost always result in an error + if runtime.GOOS == "linux" { + systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize + available := min(systemTotalMemory, systemFreeMemory+systemSwapFreeMemory) + if systemMemoryRequired > available { + slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory)) + return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available)) + } + } + estimate.log() // Loop through potential servers @@ -254,10 +266,6 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr params = append(params, "--tensor-split", estimate.TensorSplit) } - if estimate.TensorSplit != "" { - params = append(params, "--tensor-split", estimate.TensorSplit) - } - for i := range len(servers) { dir := availableServers[servers[i]] if dir == "" { diff --git a/scripts/build_windows.ps1 b/scripts/build_windows.ps1 index b3991ce1..edc73759 100644 --- a/scripts/build_windows.ps1 +++ b/scripts/build_windows.ps1 @@ -107,9 +107,12 @@ function gatherDependencies() { # TODO - this varies based on host build system and MSVC version - drive from dumpbin output # currently works for Win11 + MSVC 2019 + Cuda V11 - cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\msvcp140.dll" "${script:DEPS_DIR}\ollama_runners\" + cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\msvcp140*.dll" "${script:DEPS_DIR}\ollama_runners\" cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140.dll" "${script:DEPS_DIR}\ollama_runners\" cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140_1.dll" "${script:DEPS_DIR}\ollama_runners\" + foreach ($part in $("runtime", "stdio", "filesystem", "math", "convert", "heap", "string", "time", "locale", "environment")) { + cp "$env:VCToolsRedistDir\..\..\..\Tools\Llvm\x64\bin\api-ms-win-crt-${part}*.dll" "${script:DEPS_DIR}\ollama_runners\" + } cp "${script:SRC_DIR}\app\ollama_welcome.ps1" "${script:SRC_DIR}\dist\" diff --git a/server/prompt_test.go b/server/prompt_test.go index d4cee98c..1435b143 100644 --- a/server/prompt_test.go +++ b/server/prompt_test.go @@ -161,7 +161,7 @@ func TestChatPrompt(t *testing.T) { {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, }, expect: expect{ - prompt: "You're a test, Harry! I-I'm a what? You are the Test Who Lived. A test. And a thumping good one at that, I'd wager. ", + prompt: "You are the Test Who Lived. You're a test, Harry! I-I'm a what? A test. And a thumping good one at that, I'd wager. ", }, }, } diff --git a/server/routes_create_test.go b/server/routes_create_test.go index 269a0ba1..04174b92 100644 --- a/server/routes_create_test.go +++ b/server/routes_create_test.go @@ -546,8 +546,8 @@ func TestCreateDetectTemplate(t *testing.T) { checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-553c4a3f747b3d22a4946875f1cc8ed011c2930d83f864a0c7265f9ec0a20413"), - filepath.Join(p, "blobs", "sha256-9512c372dfc7d84d6065b8dd2b601aeed8cc1a78e7a7aa784a42fff37f5524b7"), - filepath.Join(p, "blobs", "sha256-b8b78cb8c6eefd14c06f1af042e6161255bf87bbf2dd14fce57cdac893db8139"), + filepath.Join(p, "blobs", "sha256-c608dc615584cd20d9d830363dabf8a4783ae5d34245c3d8c115edb3bc7b28e4"), + filepath.Join(p, "blobs", "sha256-f836ee110db21567f826332e4cedd746c06d10664fd5a9ea3659e3683a944510"), }) }) diff --git a/server/sched.go b/server/sched.go index 48047bfe..2daed3ab 100644 --- a/server/sched.go +++ b/server/sched.go @@ -135,11 +135,6 @@ func (s *Scheduler) processPending(ctx context.Context) { } for { - cpus := s.getCpuFn() - var systemMem gpu.GpuInfo - if len(cpus) > 0 { - systemMem = cpus[0] - } var runnerToExpire *runnerRef s.loadedMu.Lock() runner := s.loaded[pending.model.ModelPath] @@ -193,38 +188,6 @@ func (s *Scheduler) processPending(ctx context.Context) { break } - estimate := llm.EstimateGPULayers(gpus, ggml, pending.model.ProjectorPaths, pending.opts) - maxSize := systemMem.FreeMemory - - // Add available GPU memory to the total pool - // macOS hardware has unified memory so don't double count - if runtime.GOOS != "darwin" { - for _, gpu := range gpus { - if gpu.Library == "cpu" { - continue - } - if loadedCount == 0 { - // If no other models are loaded, set the limit based on what's available - maxSize += gpu.FreeMemory - } else { - // Other models could be unloaded, favor total memory for limit - maxSize += gpu.TotalMemory - } - } - } - - // Block attempting to load a model larger than system memory + GPU memory - if estimate.TotalSize > maxSize { - slog.Warn("model request too large for system", "requested", format.HumanBytes2(estimate.TotalSize), "system", format.HumanBytes2(maxSize)) - - // Linux will crash if over-allocating memory - return an error to the user. - // TODO (jmorganca): add reasonable upper limits for darwin and windows as well - if runtime.GOOS == "linux" { - pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize)) - break - } - } - // Evaluate if the model will fit in the available system memory, or if we should unload a model first if len(gpus) == 1 && gpus[0].Library == "cpu" { // simplifying assumption of defaultParallel when in CPU mode diff --git a/template/alfred.gotmpl b/template/alfred.gotmpl index 44284f04..cecb9d2c 100644 --- a/template/alfred.gotmpl +++ b/template/alfred.gotmpl @@ -1,8 +1 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} -{{- end }} -{{- range .Messages }}{{ .Content }} -{{- end }} -{{- else }} -{{ if .System }}{{ .System }}{{ end }}{{ if .Prompt }}{{ .Prompt }}{{ end }}{{ .Response }} -{{- end }} \ No newline at end of file +{{ if .System }}{{ .System }}{{ end }}{{ if .Prompt }}{{ .Prompt }}{{ end }}{{ .Response }} \ No newline at end of file diff --git a/template/alpaca.gotmpl b/template/alpaca.gotmpl index c1f69dc9..ec7a8edc 100644 --- a/template/alpaca.gotmpl +++ b/template/alpaca.gotmpl @@ -1,14 +1,3 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} -{{- end }} -{{- range .Messages }} -{{- if eq .Role "user" }}### Instruction: -{{- else if eq .Role "assistant" }}### Response: -{{- end }} -{{ .Content }} - -{{ end }}### Response: -{{ else }} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction: @@ -16,4 +5,4 @@ {{ end }}### Response: {{ .Response }} -{{- end }} \ No newline at end of file + diff --git a/template/chatml.gotmpl b/template/chatml.gotmpl index d945547c..fb672601 100644 --- a/template/chatml.gotmpl +++ b/template/chatml.gotmpl @@ -1,15 +1,6 @@ -{{- if .Messages }} -{{- if .System }}<|im_start|>system -{{ .System }}<|im_end|> -{{ end }} -{{- range .Messages }}<|im_start|>{{ .Role }} -{{ .Content }}<|im_end|> -{{ end }}<|im_start|>assistant -{{ else }} {{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant {{ .Response }}<|im_end|> -{{- end }} \ No newline at end of file diff --git a/template/chatqa.gotmpl b/template/chatqa.gotmpl index 7022c479..91679a72 100644 --- a/template/chatqa.gotmpl +++ b/template/chatqa.gotmpl @@ -1,17 +1,6 @@ -{{- if .Messages }} -{{- if .System }}System: {{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}User: -{{- else if eq .Role "assistant" }}Assistant: -{{- end }} {{ .Content }} - -{{ end }}Assistant: -{{- else }} {{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}User: {{ .Prompt }} -{{ end }}Assistant: <|begin_of_text|>{{ .Response }} -{{- end }} \ No newline at end of file +{{ end }}Assistant: {{ .Response }} + diff --git a/template/codellama-70b-instruct.gotmpl b/template/codellama-70b-instruct.gotmpl index 392d839e..e5856042 100644 --- a/template/codellama-70b-instruct.gotmpl +++ b/template/codellama-70b-instruct.gotmpl @@ -1,19 +1,10 @@ -{{- if .Messages }} -{{- if .System }}Source: system +{{ if .System }}Source: system - {{ .System }} {{ end }} -{{- range .Messages }}Source: {{ .Role }} - - {{ .Content }} {{ end }}Source: assistant -Destination: user - -{{ else }} -{{ if .System }} Source: system - - {{ .System }} {{ end }} Source: user + {{ .System }} {{ end }}Source: user {{ .Prompt }} Source: assistant +{{- if not .Response }} Destination: user +{{- end }} - {{ .Response }} -{{- end }} \ No newline at end of file + {{ .Response }} \ No newline at end of file diff --git a/template/falcon-instruct.gotmpl b/template/falcon-instruct.gotmpl index 99d67f93..0a5fe48e 100644 --- a/template/falcon-instruct.gotmpl +++ b/template/falcon-instruct.gotmpl @@ -1,13 +1,5 @@ -{{- if .Messages }} -{{- if .System }}System: {{ .System }} -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}User: -{{ else if eq .Role "assistant" }}Falcon: -{{ end }}{{ .Content }} +{{ if .System }}System: {{ .System }} +{{ end }}{{ if .Prompt }}User: +{{ .Prompt }} {{ end }}Falcon: -{{ else }} -{{ if .System }}{{ .System }} -{{ end }}{{ if .Prompt }}User: {{ .Prompt }} -{{ end }}Assistant: {{ .Response }} -{{- end }} \ No newline at end of file +{{ .Response }} diff --git a/template/gemma-instruct.gotmpl b/template/gemma-instruct.gotmpl index 870a8f2e..3c3a8425 100644 --- a/template/gemma-instruct.gotmpl +++ b/template/gemma-instruct.gotmpl @@ -1,16 +1,5 @@ -{{- if .Messages }} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}user -{{- if and $.System (eq $index 0) }} -{{ $.System }} -{{- end }} -{{- else if eq .Role "assistant" }}model -{{- end }} -{{ .Content }} -{{ end }}model -{{ else }} user -{{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} +{{ if .System }}{{ .System }} +{{ end }}{{ .Prompt }} model {{ .Response }} -{{- end }} \ No newline at end of file diff --git a/template/granite-instruct.gotmpl b/template/granite-instruct.gotmpl index 327ff3ee..56690fce 100644 --- a/template/granite-instruct.gotmpl +++ b/template/granite-instruct.gotmpl @@ -1,18 +1,4 @@ -{{- if .Messages }} -{{- if .System }}System: -{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}Question: -{{- else if eq .Role "assistant" }}Answer: -{{- end }} -{{ .Content }} - -{{ end }}Answer: -{{ else }} -{{ if .System }} -System: +{{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}Question: @@ -20,4 +6,4 @@ System: {{ end }}Answer: {{ .Response }} -{{- end }} \ No newline at end of file + diff --git a/template/llama2-chat.gotmpl b/template/llama2-chat.gotmpl index 6327d581..013b414e 100644 --- a/template/llama2-chat.gotmpl +++ b/template/llama2-chat.gotmpl @@ -1,16 +1,6 @@ -{{- if .Messages }} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if eq $index 0 }}<> -{{- if $.System }} -{{ $.System }} +[INST] <> +{{- if .System }} +{{ .System }} {{ end }}<> -{{ end }}{{ .Content }} -{{- else }} [/INST] {{ .Content }} -{{- end }} -{{- end }} [/INST] -{{- else }} -[INST] <>{{ .System }}<> - -{{ .Prompt }} [/INST] {{ .Response }} -{{- end }} \ No newline at end of file +{{ .Prompt }} [/INST] {{ .Response }} \ No newline at end of file diff --git a/template/llama3-instruct.gotmpl b/template/llama3-instruct.gotmpl index 9c81a953..36d0218b 100644 --- a/template/llama3-instruct.gotmpl +++ b/template/llama3-instruct.gotmpl @@ -1,19 +1,7 @@ -{{- if .Messages }} -{{- if .System }}<|start_header_id|>system<|end_header_id|> - -{{ .System }}<|eot_id|> -{{- end }} -{{- range .Messages }}<|start_header_id|>{{ .Role }}<|end_header_id|> - -{{ .Content }}<|eot_id|> -{{- end }}<|start_header_id|>assistant<|end_header_id|> - -{{ else }} {{ if .System }}<|start_header_id|>system<|end_header_id|> {{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> {{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> -{{ .Response }}<|eot_id|> -{{- end }} \ No newline at end of file +{{ .Response }}<|eot_id|> \ No newline at end of file diff --git a/template/magicoder.gotmpl b/template/magicoder.gotmpl index 73a58127..52abc01a 100644 --- a/template/magicoder.gotmpl +++ b/template/magicoder.gotmpl @@ -1,15 +1,3 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}@@ Instruction -{{- else if eq .Role "assistant" }}@@ Response -{{- end }} -{{ .Content }} - -{{ end }}@@ Response -{{ else }} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}@@ Instruction @@ -17,4 +5,4 @@ {{ end }}@@ Response {{ .Response }} -{{- end }} \ No newline at end of file + diff --git a/template/mistral-instruct.gotmpl b/template/mistral-instruct.gotmpl index eb3d5ced..e489bd4c 100644 --- a/template/mistral-instruct.gotmpl +++ b/template/mistral-instruct.gotmpl @@ -1,9 +1,3 @@ -{{- if .Messages }} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and $.System (eq (len (slice $.Messages $index)) 1) }}{{ $.System }} -{{ end }}{{ .Content }} -{{- else if eq .Role "assistant" }}[/INST] {{ .Content }} -{{- end }} -{{- end }}[/INST] -{{- else }}[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST] {{ .Response }} -{{- end }} \ No newline at end of file +[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] {{ .Response }} \ No newline at end of file diff --git a/template/openchat.gotmpl b/template/openchat.gotmpl index d5e1cbb0..9c183834 100644 --- a/template/openchat.gotmpl +++ b/template/openchat.gotmpl @@ -1,11 +1 @@ -{{- if .Messages }} -{{- if .System }}GPT Correct System: {{ .System }}<|end_of_turn|> -{{- end }} -{{- range .Messages }}GPT Correct -{{- if eq .Role "user" }} User: -{{- else if eq .Role "assistant" }} Assistant: -{{- end }} {{ .Content }}<|end_of_turn|> -{{- end }}GPT Correct Assistant: -{{- else }} -{{ .System }}<|end_of_turn|>GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> -{{- end }} \ No newline at end of file +{{ if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|>{{ end }}GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> \ No newline at end of file diff --git a/template/phi-3.gotmpl b/template/phi-3.gotmpl index a3558d2b..6c3610dd 100644 --- a/template/phi-3.gotmpl +++ b/template/phi-3.gotmpl @@ -1,15 +1,6 @@ -{{- if .Messages }} -{{- if .System }}<|system|> -{{ .System }}<|end|> -{{ end }} -{{- range .Messages }}<|{{ .Role }}|> -{{ .Content }}<|end|> -{{ end }}<|assistant|> -{{ else }} {{ if .System }}<|system|> {{ .System }}<|end|> {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }}<|end|> {{ end }}<|assistant|> {{ .Response }}<|end|> -{{- end }} \ No newline at end of file diff --git a/template/solar-instruct.gotmpl b/template/solar-instruct.gotmpl index caa6e8e7..1c14960d 100644 --- a/template/solar-instruct.gotmpl +++ b/template/solar-instruct.gotmpl @@ -1,16 +1,3 @@ -{{- if .Messages }} -{{- if .System }}### System: -{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}### User: -{{ .Content }} -{{ else if eq .Role "assistant" }}### Assistant: -{{ .Content }} -{{ end }} -{{ end }}### Assistant: -{{ else }} {{ if .System }}### System: {{ .System }} @@ -18,5 +5,5 @@ {{ .Prompt }} {{ end }}### Assistant: -{{ .Response }} -{{- end }} \ No newline at end of file +{{ .Response }} + diff --git a/template/starcoder2-instruct.gotmpl b/template/starcoder2-instruct.gotmpl index 7d7ff932..6c93a7ab 100644 --- a/template/starcoder2-instruct.gotmpl +++ b/template/starcoder2-instruct.gotmpl @@ -1,24 +1,8 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}### Instruction -{{ .Content }} - -{{ else if eq .Role "assistant" }}### Response -{{ .Content }}<|endoftext|> - -{{ end }} -{{- end }}### Response -{{ else }} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction {{ .Prompt }} - {{ end }}### Response {{ .Response }}<|endoftext|> -{{- end }} \ No newline at end of file diff --git a/template/template.go b/template/template.go index b133b97e..21e1614d 100644 --- a/template/template.go +++ b/template/template.go @@ -102,8 +102,22 @@ var response = parse.ActionNode{ }, } +var funcs = template.FuncMap{ + // contents returns the contents of messages with an optional role filter + "contents": func(v []*api.Message, role ...string) string { + var parts []string + for _, m := range v { + if len(role) == 0 || role[0] == "" || m.Role == role[0] { + parts = append(parts, m.Content) + } + } + + return strings.Join(parts, "\n\n") + }, +} + func Parse(s string) (*Template, error) { - tmpl := template.New("").Option("missingkey=zero") + tmpl := template.New("").Option("missingkey=zero").Funcs(funcs) tmpl, err := tmpl.Parse(s) if err != nil { @@ -143,53 +157,62 @@ func (t *Template) Vars() []string { type Values struct { Messages []api.Message + + // forceLegacy is a flag used to test compatibility with legacy templates + forceLegacy bool } func (t *Template) Execute(w io.Writer, v Values) error { - system, collated := collate(v.Messages) - if slices.Contains(t.Vars(), "messages") { + collated := collate(v.Messages) + if !v.forceLegacy && slices.Contains(t.Vars(), "messages") { return t.Template.Execute(w, map[string]any{ - "System": system, "Messages": collated, }) } var b bytes.Buffer - var prompt, response string + var system, prompt, response string for i, m := range collated { - if m.Role == "user" { + switch m.Role { + case "system": + system = m.Content + case "user": prompt = m.Content - } else { + case "assistant": response = m.Content } if i != len(collated)-1 && prompt != "" && response != "" { if err := t.Template.Execute(&b, map[string]any{ - "System": "", + "System": system, "Prompt": prompt, "Response": response, }); err != nil { return err } + system = "" prompt = "" response = "" } } var cut bool - tree := t.Template.Copy() - // for the last message, cut everything after "{{ .Response }}" - tree.Root.Nodes = slices.DeleteFunc(tree.Root.Nodes, func(n parse.Node) bool { - if slices.Contains(parseNode(n), "Response") { - cut = true + nodes := deleteNode(t.Template.Root.Copy(), func(n parse.Node) bool { + switch t := n.(type) { + case *parse.ActionNode: + case *parse.FieldNode: + if slices.Contains(t.Ident, "Response") { + cut = true + } } return cut }) - if err := template.Must(template.New("").AddParseTree("", tree)).Execute(&b, map[string]any{ - "System": system, + tree := parse.Tree{Root: nodes.(*parse.ListNode)} + if err := template.Must(template.New("").AddParseTree("", &tree)).Execute(&b, map[string]any{ + "System": "", "Prompt": prompt, }); err != nil { return err @@ -199,25 +222,14 @@ func (t *Template) Execute(w io.Writer, v Values) error { return err } -type messages []*api.Message - // collate messages based on role. consecutive messages of the same role are merged // into a single message. collate also pulls out and merges messages with Role == "system" // which are templated separately. As a side effect, it mangles message content adding image // tags ([img-%d]) as needed -func collate(msgs []api.Message) (system string, collated messages) { +func collate(msgs []api.Message) (collated []*api.Message) { var n int for i := range msgs { msg := msgs[i] - if msg.Role == "system" { - if system != "" { - system += "\n\n" - } - - system += msg.Content - continue - } - for range msg.Images { imageTag := fmt.Sprintf("[img-%d]", n) if !strings.Contains(msg.Content, "[img]") { @@ -286,3 +298,72 @@ func parseNode(n parse.Node) []string { return nil } + +// deleteNode walks the node list and deletes nodes that match the predicate +// this is currently to remove the {{ .Response }} node from templates +func deleteNode(n parse.Node, fn func(parse.Node) bool) parse.Node { + var walk func(n parse.Node) parse.Node + walk = func(n parse.Node) parse.Node { + if fn(n) { + return nil + } + + switch t := n.(type) { + case *parse.ListNode: + var nodes []parse.Node + for _, c := range t.Nodes { + if n := walk(c); n != nil { + nodes = append(nodes, n) + } + } + + t.Nodes = nodes + return t + case *parse.IfNode: + t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) + case *parse.WithNode: + t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) + case *parse.RangeNode: + t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) + case *parse.BranchNode: + t.List = walk(t.List).(*parse.ListNode) + if t.ElseList != nil { + t.ElseList = walk(t.ElseList).(*parse.ListNode) + } + case *parse.ActionNode: + n := walk(t.Pipe) + if n == nil { + return nil + } + + t.Pipe = n.(*parse.PipeNode) + case *parse.PipeNode: + var commands []*parse.CommandNode + for _, c := range t.Cmds { + var args []parse.Node + for _, a := range c.Args { + if n := walk(a); n != nil { + args = append(args, n) + } + } + + if len(args) == 0 { + return nil + } + + c.Args = args + commands = append(commands, c) + } + + if len(commands) == 0 { + return nil + } + + t.Cmds = commands + } + + return n + } + + return walk(n) +} diff --git a/template/template_test.go b/template/template_test.go index 428cdc77..5e5f4257 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -105,8 +105,8 @@ func TestTemplate(t *testing.T) { } for n, tt := range cases { + var actual bytes.Buffer t.Run(n, func(t *testing.T) { - var actual bytes.Buffer if err := tmpl.Execute(&actual, Values{Messages: tt}); err != nil { t.Fatal(err) } @@ -116,7 +116,34 @@ func TestTemplate(t *testing.T) { t.Fatal(err) } - if diff := cmp.Diff(actual.Bytes(), expect); diff != "" { + bts := actual.Bytes() + + if slices.Contains([]string{"chatqa.gotmpl", "llama2-chat.gotmpl", "mistral-instruct.gotmpl", "openchat.gotmpl", "vicuna.gotmpl"}, match) && bts[len(bts)-1] == ' ' { + t.Log("removing trailing space from output") + bts = bts[:len(bts)-1] + } + + if diff := cmp.Diff(bts, expect); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) + } + }) + + t.Run("legacy", func(t *testing.T) { + t.Skip("legacy outputs are currently default outputs") + var legacy bytes.Buffer + if err := tmpl.Execute(&legacy, Values{Messages: tt, forceLegacy: true}); err != nil { + t.Fatal(err) + } + + legacyBytes := legacy.Bytes() + if slices.Contains([]string{"chatqa.gotmpl", "openchat.gotmpl", "vicuna.gotmpl"}, match) && legacyBytes[len(legacyBytes)-1] == ' ' { + t.Log("removing trailing space from legacy output") + legacyBytes = legacyBytes[:len(legacyBytes)-1] + } else if slices.Contains([]string{"codellama-70b-instruct.gotmpl", "llama2-chat.gotmpl", "mistral-instruct.gotmpl"}, match) { + t.Skip("legacy outputs cannot be compared to messages outputs") + } + + if diff := cmp.Diff(legacyBytes, actual.Bytes()); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) @@ -135,7 +162,24 @@ func TestParse(t *testing.T) { {"{{ .System }} {{ .Prompt }} {{ .Response }}", []string{"prompt", "response", "system"}}, {"{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", []string{"prompt", "response", "system", "tools"}}, {"{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}", []string{"content", "messages", "role"}}, - {"{{ range .Messages }}{{ if eq .Role \"system\" }}SYSTEM: {{ .Content }}{{ else if eq .Role \"user\" }}USER: {{ .Content }}{{ else if eq .Role \"assistant\" }}ASSISTANT: {{ .Content }}{{ end }}{{ end }}", []string{"content", "messages", "role"}}, + {`{{- range .Messages }} +{{- if eq .Role "system" }}SYSTEM: +{{- else if eq .Role "user" }}USER: +{{- else if eq .Role "assistant" }}ASSISTANT: +{{- end }} {{ .Content }} +{{- end }}`, []string{"content", "messages", "role"}}, + {`{{- if .Messages }} +{{- range .Messages }}<|im_start|>{{ .Role }} +{{ .Content }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ else -}} +{{ if .System }}<|im_start|>system +{{ .System }}<|im_end|> +{{ end }}{{ if .Prompt }}<|im_start|>user +{{ .Prompt }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ .Response }}<|im_end|> +{{- end -}}`, []string{"content", "messages", "prompt", "response", "role", "system"}}, } for _, tt := range cases { @@ -145,9 +189,8 @@ func TestParse(t *testing.T) { t.Fatal(err) } - vars := tmpl.Vars() - if !slices.Equal(tt.vars, vars) { - t.Errorf("expected %v, got %v", tt.vars, vars) + if diff := cmp.Diff(tmpl.Vars(), tt.vars); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) } }) } @@ -167,11 +210,18 @@ func TestExecuteWithMessages(t *testing.T) { { "mistral", []template{ - {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, - {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", `{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq (len (slice $.Messages $index)) 1) $.System }}{{ $.System }}{{ "\n\n" }} -{{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} + {"no response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] `}, + {"response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, + {"messages", `{{- $system := contents .Messages "system" -}} +{{- range $index, $_ := .Messages }} +{{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} +{{- $system = "" }} + +{{ end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, }, @@ -187,12 +237,18 @@ func TestExecuteWithMessages(t *testing.T) { { "mistral system", []template{ - {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, - {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", ` + {"no response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] `}, + {"response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, + {"messages", `{{- $system := contents .Messages "system" -}} {{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq (len (slice $.Messages $index)) 1) $.System }}{{ $.System }}{{ "\n\n" }} -{{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} +{{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} +{{- $system = "" }} + +{{ end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, }, @@ -204,9 +260,9 @@ func TestExecuteWithMessages(t *testing.T) { {Role: "user", Content: "What is your name?"}, }, }, - `[INST] Hello friend![/INST] Hello human![INST] You are a helpful assistant! + `[INST] You are a helpful assistant! -What is your name?[/INST] `, +Hello friend![/INST] Hello human![INST] What is your name?[/INST] `, }, { "chatml", @@ -220,12 +276,9 @@ What is your name?[/INST] `, {{ .Response }}<|im_end|> `}, {"messages", ` -{{- range $index, $_ := .Messages }} -{{- if and (eq .Role "user") (eq (len (slice $.Messages $index)) 1) $.System }}<|im_start|>system -{{ $.System }}<|im_end|>{{ "\n" }} -{{- end }}<|im_start|>{{ .Role }} -{{ .Content }}<|im_end|>{{ "\n" }} -{{- end }}<|im_start|>assistant +{{- range $index, $_ := .Messages }}<|im_start|>{{ .Role }} +{{ .Content }}<|im_end|> +{{ end }}<|im_start|>assistant `}, }, Values{ @@ -236,12 +289,12 @@ What is your name?[/INST] `, {Role: "user", Content: "What is your name?"}, }, }, - `<|im_start|>user + `<|im_start|>system +You are a helpful assistant!<|im_end|> +<|im_start|>user Hello friend!<|im_end|> <|im_start|>assistant Hello human!<|im_end|> -<|im_start|>system -You are a helpful assistant!<|im_end|> <|im_start|>user What is your name?<|im_end|> <|im_start|>assistant @@ -258,9 +311,11 @@ What is your name?<|im_end|> `}, {"messages", ` {{- range .Messages }} -{{- if eq .Role "user" }}Question: {{ .Content }}{{ "\n\n" }} -{{- else if eq .Role "assistant" }}Answer: {{ .Content }}{{ "\n\n" }} -{{- end }} +{{- if eq .Role "user" }}Question: {{ .Content }} + +{{ else if eq .Role "assistant" }}Answer: {{ .Content }} + +{{ end }} {{- end }}Answer: `}, }, Values{ @@ -300,11 +355,44 @@ Answer: `, t.Fatal(err) } - if b.String() != tt.expected { - t.Errorf("expected\n%s,\ngot\n%s", tt.expected, b.String()) + if diff := cmp.Diff(b.String(), tt.expected); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) } }) } }) } } + +func TestFuncs(t *testing.T) { + t.Run("contents", func(t *testing.T) { + cases := map[string]string{ + "": "A\n\nB\n\nC\n\nD\n\nE\n\nF", + "system": "A\n\nF", + "user": "B\n\nE", + "assistant": "C\n\nD", + } + + s := []*api.Message{ + {Role: "system", Content: "A"}, + {Role: "user", Content: "B"}, + {Role: "assistant", Content: "C"}, + {Role: "assistant", Content: "D"}, + {Role: "user", Content: "E"}, + {Role: "system", Content: "F"}, + } + + fn, ok := funcs["contents"].(func([]*api.Message, ...string) string) + if !ok { + t.Fatal("contents is not a function") + } + + for k, v := range cases { + t.Run(k, func(t *testing.T) { + if diff := cmp.Diff(fn(s, k), v); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) + } + }) + } + }) +} diff --git a/template/testdata/alpaca.gotmpl/system-user-assistant-user b/template/testdata/alpaca.gotmpl/system-user-assistant-user index 20182d82..4caa8178 100644 --- a/template/testdata/alpaca.gotmpl/system-user-assistant-user +++ b/template/testdata/alpaca.gotmpl/system-user-assistant-user @@ -1,4 +1,6 @@ -You are a helpful assistant.### Instruction: +You are a helpful assistant. + +### Instruction: Hello, how are you? ### Response: diff --git a/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user b/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user index fdd0fc8b..d7528f80 100644 --- a/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user +++ b/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user @@ -9,3 +9,4 @@ Source: system I'd like to show off how chat templating works! Source: assistant Destination: user + \ No newline at end of file diff --git a/template/testdata/codellama-70b-instruct.gotmpl/user b/template/testdata/codellama-70b-instruct.gotmpl/user index 9e7174a8..8e07853c 100644 --- a/template/testdata/codellama-70b-instruct.gotmpl/user +++ b/template/testdata/codellama-70b-instruct.gotmpl/user @@ -3,3 +3,4 @@ Source: user Hello, how are you? Source: assistant Destination: user + \ No newline at end of file diff --git a/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user b/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user index b4ba1736..f732cc74 100644 --- a/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user +++ b/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user @@ -7,3 +7,4 @@ Source: user I'd like to show off how chat templating works! Source: assistant Destination: user + \ No newline at end of file diff --git a/template/testdata/llama2-chat.gotmpl/system-user-assistant-user b/template/testdata/llama2-chat.gotmpl/system-user-assistant-user index fc2679bf..9db81cb4 100644 --- a/template/testdata/llama2-chat.gotmpl/system-user-assistant-user +++ b/template/testdata/llama2-chat.gotmpl/system-user-assistant-user @@ -2,4 +2,6 @@ You are a helpful assistant. <> -Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works! [/INST] \ No newline at end of file +Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] <><> + +I'd like to show off how chat templating works! [/INST] \ No newline at end of file diff --git a/template/testdata/llama2-chat.gotmpl/user-assistant-user b/template/testdata/llama2-chat.gotmpl/user-assistant-user index 42b4c529..ca58954f 100644 --- a/template/testdata/llama2-chat.gotmpl/user-assistant-user +++ b/template/testdata/llama2-chat.gotmpl/user-assistant-user @@ -1,3 +1,5 @@ [INST] <><> -Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works! [/INST] \ No newline at end of file +Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] <><> + +I'd like to show off how chat templating works! [/INST] \ No newline at end of file diff --git a/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user b/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user index b6b4bf93..2f1edaec 100644 --- a/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user +++ b/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user @@ -1,2 +1,3 @@ -[INST] Hello, how are you?[/INST] I'm doing great. How can I help you today?[INST] You are a helpful assistant. -I'd like to show off how chat templating works![/INST] \ No newline at end of file +[INST] You are a helpful assistant. + +Hello, how are you?[/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works![/INST] \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/system-user-assistant-user b/template/testdata/openchat.gotmpl/system-user-assistant-user index 1214c126..404b071a 100644 --- a/template/testdata/openchat.gotmpl/system-user-assistant-user +++ b/template/testdata/openchat.gotmpl/system-user-assistant-user @@ -1 +1 @@ -GPT Correct System: You are a helpful assistant.<|end_of_turn|>GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file +GPT4 Correct System: You are a helpful assistant.<|end_of_turn|>GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT4 Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT4 Correct Assistant: \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/user b/template/testdata/openchat.gotmpl/user index 611daa83..48229cb0 100644 --- a/template/testdata/openchat.gotmpl/user +++ b/template/testdata/openchat.gotmpl/user @@ -1 +1 @@ -GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file +GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/user-assistant-user b/template/testdata/openchat.gotmpl/user-assistant-user index f97b02b9..4719abb2 100644 --- a/template/testdata/openchat.gotmpl/user-assistant-user +++ b/template/testdata/openchat.gotmpl/user-assistant-user @@ -1 +1 @@ -GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file +GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT4 Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT4 Correct Assistant: \ No newline at end of file diff --git a/template/vicuna.gotmpl b/template/vicuna.gotmpl index 2e13e990..515b2fe9 100644 --- a/template/vicuna.gotmpl +++ b/template/vicuna.gotmpl @@ -1,14 +1,4 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}USER: {{ .Content }} -{{ else if eq .Role "assistant" }}ASSISTANT: {{ .Content }} -{{ end }} -{{- end }}ASSISTANT: -{{- else }} {{ if .System }}{{ .System }} + {{ end }}{{ if .Prompt }}USER: {{ .Prompt }} -{{ end }}ASSISTANT: {{ .Response }} -{{- end }} \ No newline at end of file +{{ end }}ASSISTANT: {{ .Response }} diff --git a/template/zephyr.gotmpl b/template/zephyr.gotmpl index e6668848..1f889f26 100644 --- a/template/zephyr.gotmpl +++ b/template/zephyr.gotmpl @@ -1,15 +1,6 @@ -{{- if .Messages }} -{{- if .System }}<|system|> -{{ .System }} -{{ end }} -{{- range .Messages }}<|{{ .Role }}|> -{{ .Content }} -{{ end }}<|assistant|> -{{ else }} {{ if .System }}<|system|> {{ .System }} {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }} {{ end }}<|assistant|> {{ .Response }} -{{- end }} \ No newline at end of file