From 089daaeabcc6f05ee5ad171dd123b66d3572efaa Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 30 Apr 2024 16:42:48 -0700 Subject: [PATCH 01/18] Add CUDA Driver API for GPU discovery We're seeing some corner cases with cudart which might be resolved by switching to the driver API which comes bundled with the driver package --- gpu/gpu.go | 66 +++++++++++++- gpu/gpu_info.h | 1 + gpu/gpu_info_cudart.h | 6 +- gpu/gpu_info_nvcuda.c | 203 ++++++++++++++++++++++++++++++++++++++++++ gpu/gpu_info_nvcuda.h | 71 +++++++++++++++ 5 files changed, 342 insertions(+), 5 deletions(-) create mode 100644 gpu/gpu_info_nvcuda.c create mode 100644 gpu/gpu_info_nvcuda.h diff --git a/gpu/gpu.go b/gpu/gpu.go index 9b915015..35c8d5ad 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -26,6 +26,7 @@ import ( type handles struct { deviceCount int cudart *C.cudart_handle_t + nvcuda *C.nvcuda_handle_t } const ( @@ -62,6 +63,22 @@ var CudartWindowsGlobs = []string{ "c:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\bin\\cudart64_*.dll", } +var NvcudaLinuxGlobs = []string{ + "/usr/local/cuda*/targets/*/lib/libcuda.so*", + "/usr/lib/*-linux-gnu/nvidia/current/libcuda.so*", + "/usr/lib/*-linux-gnu/libcuda.so*", + "/usr/lib/wsl/lib/libcuda.so*", + "/usr/lib/wsl/drivers/*/libcuda.so*", + "/opt/cuda/lib*/libcuda.so*", + "/usr/local/cuda/lib*/libcuda.so*", + "/usr/lib*/libcuda.so*", + "/usr/local/lib*/libcuda.so*", +} + +var NvcudaWindowsGlobs = []string{ + "c:\\windows\\system*\\nvcuda.dll", +} + // Jetson devices have JETSON_JETPACK="x.y.z" factory set to the Jetpack version installed. // Included to drive logic for reducing Ollama-allocated overhead on L4T/Jetson devices. var CudaTegra string = os.Getenv("JETSON_JETPACK") @@ -74,6 +91,8 @@ func initGPUHandles() *handles { gpuHandles := &handles{} var cudartMgmtName string var cudartMgmtPatterns []string + var nvcudaMgmtName string + var nvcudaMgmtPatterns []string tmpDir, _ := PayloadsDir() switch runtime.GOOS { @@ -82,6 +101,9 @@ func initGPUHandles() *handles { localAppData := os.Getenv("LOCALAPPDATA") cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", cudartMgmtName)} cudartMgmtPatterns = append(cudartMgmtPatterns, CudartWindowsGlobs...) + // Aligned with driver, we can't carry as payloads + nvcudaMgmtName = "nvcuda.dll" + nvcudaMgmtPatterns = NvcudaWindowsGlobs case "linux": cudartMgmtName = "libcudart.so*" if tmpDir != "" { @@ -89,11 +111,25 @@ func initGPUHandles() *handles { cudartMgmtPatterns = []string{filepath.Join(tmpDir, "cuda*", cudartMgmtName)} } cudartMgmtPatterns = append(cudartMgmtPatterns, CudartLinuxGlobs...) + // Aligned with driver, we can't carry as payloads + nvcudaMgmtName = "libcuda.so*" + nvcudaMgmtPatterns = NvcudaLinuxGlobs default: return gpuHandles } slog.Info("Detecting GPUs") + nvcudaLibPaths := FindGPULibs(nvcudaMgmtName, nvcudaMgmtPatterns) + if len(nvcudaLibPaths) > 0 { + deviceCount, nvcuda, libPath := LoadNVCUDAMgmt(nvcudaLibPaths) + if nvcuda != nil { + slog.Info("detected GPUs", "count", deviceCount, "library", libPath) + gpuHandles.nvcuda = nvcuda + gpuHandles.deviceCount = deviceCount + return gpuHandles + } + } + cudartLibPaths := FindGPULibs(cudartMgmtName, cudartMgmtPatterns) if len(cudartLibPaths) > 0 { deviceCount, cudart, libPath := LoadCUDARTMgmt(cudartLibPaths) @@ -118,6 +154,9 @@ func GetGPUInfo() GpuInfoList { if gpuHandles.cudart != nil { C.cudart_release(*gpuHandles.cudart) } + if gpuHandles.nvcuda != nil { + C.nvcuda_release(*gpuHandles.nvcuda) + } }() // All our GPU builds on x86 have AVX enabled, so fallback to CPU if we don't detect at least AVX @@ -138,7 +177,11 @@ func GetGPUInfo() GpuInfoList { gpuInfo := GpuInfo{ Library: "cuda", } - C.cudart_check_vram(*gpuHandles.cudart, C.int(i), &memInfo) + if gpuHandles.cudart != nil { + C.cudart_check_vram(*gpuHandles.cudart, C.int(i), &memInfo) + } else { + C.nvcuda_check_vram(*gpuHandles.nvcuda, C.int(i), &memInfo) + } if memInfo.err != nil { slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err)) C.free(unsafe.Pointer(memInfo.err)) @@ -196,9 +239,10 @@ func GetCPUMem() (memInfo, error) { return ret, nil } -func FindGPULibs(baseLibName string, patterns []string) []string { +func FindGPULibs(baseLibName string, defaultPatterns []string) []string { // Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them var ldPaths []string + var patterns []string gpuLibPaths := []string{} slog.Debug("Searching for GPU library", "name", baseLibName) @@ -218,6 +262,7 @@ func FindGPULibs(baseLibName string, patterns []string) []string { } patterns = append(patterns, filepath.Join(d, baseLibName+"*")) } + patterns = append(patterns, defaultPatterns...) slog.Debug("gpu library search", "globs", patterns) for _, pattern := range patterns { // Ignore glob discovery errors @@ -267,6 +312,23 @@ func LoadCUDARTMgmt(cudartLibPaths []string) (int, *C.cudart_handle_t, string) { return 0, nil, "" } +func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) { + var resp C.nvcuda_init_resp_t + resp.ch.verbose = getVerboseState() + for _, libPath := range nvcudaLibPaths { + lib := C.CString(libPath) + defer C.free(unsafe.Pointer(lib)) + C.nvcuda_init(lib, &resp) + if resp.err != nil { + slog.Debug("Unable to load nvcuda", "library", libPath, "error", C.GoString(resp.err)) + C.free(unsafe.Pointer(resp.err)) + } else { + return int(resp.num_devices), &resp.ch, libPath + } + } + return 0, nil, "" +} + func getVerboseState() C.uint16_t { if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" { return C.uint16_t(1) diff --git a/gpu/gpu_info.h b/gpu/gpu_info.h index 0f67442f..577bd3f0 100644 --- a/gpu/gpu_info.h +++ b/gpu/gpu_info.h @@ -58,6 +58,7 @@ void cpu_check_ram(mem_info_t *resp); #endif #include "gpu_info_cudart.h" +#include "gpu_info_nvcuda.h" #endif // __GPU_INFO_H__ #endif // __APPLE__ \ No newline at end of file diff --git a/gpu/gpu_info_cudart.h b/gpu/gpu_info_cudart.h index ae2579a2..e8a89856 100644 --- a/gpu/gpu_info_cudart.h +++ b/gpu/gpu_info_cudart.h @@ -6,9 +6,9 @@ // Just enough typedef's to dlopen/dlsym for memory information typedef enum cudartReturn_enum { CUDART_SUCCESS = 0, - CUDA_ERROR_INVALID_VALUE = 1, - CUDA_ERROR_MEMORY_ALLOCATION = 2, - CUDA_ERROR_INSUFFICIENT_DRIVER = 35, + CUDART_ERROR_INVALID_VALUE = 1, + CUDART_ERROR_MEMORY_ALLOCATION = 2, + CUDART_ERROR_INSUFFICIENT_DRIVER = 35, // Other values omitted for now... } cudartReturn_t; diff --git a/gpu/gpu_info_nvcuda.c b/gpu/gpu_info_nvcuda.c new file mode 100644 index 00000000..e192d2e6 --- /dev/null +++ b/gpu/gpu_info_nvcuda.c @@ -0,0 +1,203 @@ +#ifndef __APPLE__ // TODO - maybe consider nvidia support on intel macs? + +#include +#include "gpu_info_nvcuda.h" + +void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { + CUresult ret; + resp->err = NULL; + resp->num_devices = 0; + const int buflen = 256; + char buf[buflen + 1]; + int i; + + struct lookup { + char *s; + void **p; + } l[] = { + + {"cuInit", (void *)&resp->ch.cuInit}, + {"cuDriverGetVersion", (void *)&resp->ch.cuDriverGetVersion}, + {"cuDeviceGetCount", (void *)&resp->ch.cuDeviceGetCount}, + {"cuDeviceGet", (void *)&resp->ch.cuDeviceGet}, + {"cuDeviceGetAttribute", (void *)&resp->ch.cuDeviceGetAttribute}, + {"cuDeviceGetUuid", (void *)&resp->ch.cuDeviceGetUuid}, + {"cuCtxCreate_v3", (void *)&resp->ch.cuCtxCreate_v3}, + {"cuMemGetInfo_v2", (void *)&resp->ch.cuMemGetInfo_v2}, + {"cuCtxDestroy", (void *)&resp->ch.cuCtxDestroy}, + {NULL, NULL}, + }; + + resp->ch.handle = LOAD_LIBRARY(nvcuda_lib_path, RTLD_LAZY); + if (!resp->ch.handle) { + char *msg = LOAD_ERR(); + LOG(resp->ch.verbose, "library %s load err: %s\n", nvcuda_lib_path, msg); + snprintf(buf, buflen, + "Unable to load %s library to query for Nvidia GPUs: %s", + nvcuda_lib_path, msg); + free(msg); + resp->err = strdup(buf); + return; + } + + for (i = 0; l[i].s != NULL; i++) { + *l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s); + if (!*l[i].p) { + char *msg = LOAD_ERR(); + LOG(resp->ch.verbose, "dlerr: %s\n", msg); + UNLOAD_LIBRARY(resp->ch.handle); + resp->ch.handle = NULL; + snprintf(buf, buflen, "symbol lookup for %s failed: %s", l[i].s, + msg); + free(msg); + resp->err = strdup(buf); + return; + } + } + + ret = (*resp->ch.cuInit)(0); + if (ret != CUDA_SUCCESS) { + LOG(resp->ch.verbose, "cuInit err: %d\n", ret); + UNLOAD_LIBRARY(resp->ch.handle); + resp->ch.handle = NULL; + if (ret == CUDA_ERROR_INSUFFICIENT_DRIVER) { + resp->err = strdup("your nvidia driver is too old or missing. If you have a CUDA GPU please upgrade to run ollama"); + return; + } + snprintf(buf, buflen, "nvcuda init failure: %d", ret); + resp->err = strdup(buf); + return; + } + + int version = 0; + nvcudaDriverVersion_t driverVersion; + driverVersion.major = 0; + driverVersion.minor = 0; + + // Report driver version if we're in verbose mode, ignore errors + ret = (*resp->ch.cuDriverGetVersion)(&version); + if (ret != CUDA_SUCCESS) { + LOG(resp->ch.verbose, "cuDriverGetVersion failed: %d\n", ret); + } else { + driverVersion.major = version / 1000; + driverVersion.minor = (version - (driverVersion.major * 1000)) / 10; + LOG(resp->ch.verbose, "CUDA driver version: %d-%d\n", driverVersion.major, driverVersion.minor); + } + + ret = (*resp->ch.cuDeviceGetCount)(&resp->num_devices); + if (ret != CUDA_SUCCESS) { + LOG(resp->ch.verbose, "cuDeviceGetCount err: %d\n", ret); + UNLOAD_LIBRARY(resp->ch.handle); + resp->ch.handle = NULL; + snprintf(buf, buflen, "unable to get device count: %d", ret); + resp->err = strdup(buf); + return; + } +} + +const int buflen = 256; +void nvcuda_check_vram(nvcuda_handle_t h, int i, mem_info_t *resp) { + resp->err = NULL; + nvcudaMemory_t memInfo = {0,0}; + CUresult ret; + CUdevice device = -1; + CUcontext ctx = NULL; + char buf[buflen + 1]; + CUuuid uuid = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + + if (h.handle == NULL) { + resp->err = strdup("nvcuda handle isn't initialized"); + return; + } + + ret = (*h.cuDeviceGet)(&device, i); + if (ret != CUDA_SUCCESS) { + snprintf(buf, buflen, "nvcuda device failed to initialize"); + resp->err = strdup(buf); + return; + } + + resp->major = 0; + resp->minor = 0; + int major = 0; + int minor = 0; + ret = (*h.cuDeviceGetAttribute)(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, device); + if (ret != CUDA_SUCCESS) { + LOG(h.verbose, "[%d] device major lookup failure: %d\n", i, ret); + } else { + ret = (*h.cuDeviceGetAttribute)(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, device); + if (ret != CUDA_SUCCESS) { + LOG(h.verbose, "[%d] device minor lookup failure: %d\n", i, ret); + } else { + resp->minor = minor; + resp->major = major; + } + } + + ret = (*h.cuDeviceGetUuid)(&uuid, device); + if (ret != CUDA_SUCCESS) { + LOG(h.verbose, "[%d] device uuid lookup failure: %d\n", i, ret); + snprintf(&resp->gpu_id[0], GPU_ID_LEN, "%d", i); + } else { + // GPU-d110a105-ac29-1d54-7b49-9c90440f215b + snprintf(&resp->gpu_id[0], GPU_ID_LEN, + "GPU-%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", + uuid.bytes[0], + uuid.bytes[1], + uuid.bytes[2], + uuid.bytes[3], + uuid.bytes[4], + uuid.bytes[5], + uuid.bytes[6], + uuid.bytes[7], + uuid.bytes[8], + uuid.bytes[9], + uuid.bytes[10], + uuid.bytes[11], + uuid.bytes[12], + uuid.bytes[13], + uuid.bytes[14], + uuid.bytes[15] + ); + } + + // To get memory we have to set (and release) a context + ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device); + if (ret != CUDA_SUCCESS) { + snprintf(buf, buflen, "nvcuda failed to get primary device context %d", ret); + resp->err = strdup(buf); + return; + } + + ret = (*h.cuMemGetInfo_v2)(&memInfo.free, &memInfo.total); + if (ret != CUDA_SUCCESS) { + snprintf(buf, buflen, "nvcuda device memory info lookup failure %d", ret); + resp->err = strdup(buf); + // Best effort on failure... + (*h.cuCtxDestroy)(ctx); + return; + } + + resp->total = memInfo.total; + resp->free = memInfo.free; + + LOG(h.verbose, "[%s] CUDA totalMem %lu mb\n", resp->gpu_id, resp->total / 1024 / 1024); + LOG(h.verbose, "[%s] CUDA freeMem %lu mb\n", resp->gpu_id, resp->free / 1024 / 1024); + LOG(h.verbose, "[%s] Compute Capability %d.%d\n", resp->gpu_id, resp->major, resp->minor); + + + + ret = (*h.cuCtxDestroy)(ctx); + if (ret != CUDA_SUCCESS) { + LOG(1, "nvcuda failed to release primary device context %d", ret); + } +} + +void nvcuda_release(nvcuda_handle_t h) { + LOG(h.verbose, "releasing nvcuda library\n"); + UNLOAD_LIBRARY(h.handle); + // TODO and other context release logic? + h.handle = NULL; +} + +#endif // __APPLE__ \ No newline at end of file diff --git a/gpu/gpu_info_nvcuda.h b/gpu/gpu_info_nvcuda.h new file mode 100644 index 00000000..c4d94edd --- /dev/null +++ b/gpu/gpu_info_nvcuda.h @@ -0,0 +1,71 @@ +#ifndef __APPLE__ +#ifndef __GPU_INFO_NVCUDA_H__ +#define __GPU_INFO_NVCUDA_H__ +#include "gpu_info.h" + +// Just enough typedef's to dlopen/dlsym for memory information +typedef enum cudaError_enum { + CUDA_SUCCESS = 0, + CUDA_ERROR_INVALID_VALUE = 1, + CUDA_ERROR_MEMORY_ALLOCATION = 2, + CUDA_ERROR_NOT_INITIALIZED = 3, + CUDA_ERROR_INSUFFICIENT_DRIVER = 35, + // Other values omitted for now... +} CUresult; + +typedef enum CUdevice_attribute_enum { + CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = 75, + CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = 76, + + // TODO - not yet wired up but may be useful for Jetson or other + // integrated GPU scenarios with shared memory + CU_DEVICE_ATTRIBUTE_INTEGRATED = 18 + +} CUdevice_attribute; + +typedef void *nvcudaDevice_t; // Opaque is sufficient +typedef struct nvcudaMemory_st { + uint64_t total; + uint64_t free; +} nvcudaMemory_t; + +typedef struct nvcudaDriverVersion { + int major; + int minor; +} nvcudaDriverVersion_t; + +typedef struct CUuuid_st { + unsigned char bytes[16]; +} CUuuid; + +typedef int CUdevice; +typedef void* CUcontext; + +typedef struct nvcuda_handle { + void *handle; + uint16_t verbose; + CUresult (*cuInit)(unsigned int Flags); + CUresult (*cuDriverGetVersion)(int *driverVersion); + CUresult (*cuDeviceGetCount)(int *); + CUresult (*cuDeviceGet)(CUdevice* device, int ordinal); + CUresult (*cuDeviceGetAttribute)(int* pi, CUdevice_attribute attrib, CUdevice dev); + CUresult (*cuDeviceGetUuid)(CUuuid* uuid, CUdevice dev); // signature compatible with cuDeviceGetUuid_v2 + + // Context specific aspects + CUresult (*cuCtxCreate_v3)(CUcontext* pctx, void *params, int len, unsigned int flags, CUdevice dev); + CUresult (*cuMemGetInfo_v2)(uint64_t* free, uint64_t* total); + CUresult (*cuCtxDestroy)(CUcontext ctx); +} nvcuda_handle_t; + +typedef struct nvcuda_init_resp { + char *err; // If err is non-null handle is invalid + nvcuda_handle_t ch; + int num_devices; +} nvcuda_init_resp_t; + +void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp); +void nvcuda_check_vram(nvcuda_handle_t ch, int device_id, mem_info_t *resp); +void nvcuda_release(nvcuda_handle_t ch); + +#endif // __GPU_INFO_NVCUDA_H__ +#endif // __APPLE__ From e592e8fccb390073a51c61a529d4a52529c44aa2 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 1 May 2024 15:47:12 -0700 Subject: [PATCH 02/18] Support Fedoras standard ROCm location --- gpu/amd_common.go | 6 ++++-- gpu/amd_linux.go | 4 ++-- gpu/amd_windows.go | 4 ++-- scripts/install.sh | 4 ++-- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/gpu/amd_common.go b/gpu/amd_common.go index 6fa4fce4..27a81e3f 100644 --- a/gpu/amd_common.go +++ b/gpu/amd_common.go @@ -81,8 +81,10 @@ func commonAMDValidateLibDir() (string, error) { } // Well known location(s) - if rocmLibUsable(RocmStandardLocation) { - return RocmStandardLocation, nil + for _, path := range RocmStandardLocations { + if rocmLibUsable(path) { + return path, nil + } } // Installer payload location if we're running the installed binary diff --git a/gpu/amd_linux.go b/gpu/amd_linux.go index cbbdf030..9f9f8e74 100644 --- a/gpu/amd_linux.go +++ b/gpu/amd_linux.go @@ -25,12 +25,12 @@ const ( // Prefix with the node dir GPUTotalMemoryFileGlob = "mem_banks/*/properties" // size_in_bytes line GPUUsedMemoryFileGlob = "mem_banks/*/used_memory" - RocmStandardLocation = "/opt/rocm/lib" ) var ( // Used to validate if the given ROCm lib is usable - ROCmLibGlobs = []string{"libhipblas.so.2*", "rocblas"} // TODO - probably include more coverage of files here... + ROCmLibGlobs = []string{"libhipblas.so.2*", "rocblas"} // TODO - probably include more coverage of files here... + RocmStandardLocations = []string{"/opt/rocm/lib", "/usr/lib64"} ) // Gather GPU information from the amdgpu driver if any supported GPUs are detected diff --git a/gpu/amd_windows.go b/gpu/amd_windows.go index dfa7e974..22c9f427 100644 --- a/gpu/amd_windows.go +++ b/gpu/amd_windows.go @@ -14,7 +14,6 @@ import ( ) const ( - RocmStandardLocation = "C:\\Program Files\\AMD\\ROCm\\5.7\\bin" // TODO glob? // TODO We're lookinng for this exact name to detect iGPUs since hipGetDeviceProperties never reports integrated==true iGPUName = "AMD Radeon(TM) Graphics" @@ -22,7 +21,8 @@ const ( var ( // Used to validate if the given ROCm lib is usable - ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // TODO - probably include more coverage of files here... + ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // TODO - probably include more coverage of files here... + RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\5.7\\bin"} // TODO glob? ) func AMDGetGPUInfo() []GpuInfo { diff --git a/scripts/install.sh b/scripts/install.sh index eb3ff504..20b0db60 100644 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -166,8 +166,8 @@ fi if check_gpu lspci amdgpu || check_gpu lshw amdgpu; then # Look for pre-existing ROCm v6 before downloading the dependencies - for search in "${HIP_PATH:-''}" "${ROCM_PATH:-''}" "/opt/rocm"; do - if [ -n "${search}" ] && [ -e "${search}/lib/libhipblas.so.2" ]; then + for search in "${HIP_PATH:-''}" "${ROCM_PATH:-''}" "/opt/rocm" "/usr/lib64"; do + if [ -n "${search}" ] && [ -e "${search}/libhipblas.so.2" -o -e "${search}/lib/libhipblas.so.2" ]; then status "Compatible AMD GPU ROCm library detected at ${search}" install_success exit 0 From b1ad3a43cb58c3c01dae8b68a5e8fac34a5620f8 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Fri, 3 May 2024 11:55:32 -0700 Subject: [PATCH 03/18] Skip PhysX cudart library For some reason this library gives incorrect GPU information, so skip it --- gpu/gpu.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/gpu/gpu.go b/gpu/gpu.go index 9b915015..73dbe275 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -220,6 +220,11 @@ func FindGPULibs(baseLibName string, patterns []string) []string { } slog.Debug("gpu library search", "globs", patterns) for _, pattern := range patterns { + + // Nvidia PhysX known to return bogus results + if strings.Contains(pattern, "PhysX") { + slog.Debug("skipping PhysX cuda library path", "path", pattern) + } // Ignore glob discovery errors matches, _ := filepath.Glob(pattern) for _, match := range matches { From 242efe661130841caa7d683d8327d61cb4efbd8e Mon Sep 17 00:00:00 2001 From: Saif Date: Mon, 6 May 2024 22:55:23 +0530 Subject: [PATCH 04/18] =?UTF-8?q?=F0=9F=91=8C=20IMPROVE:=20add=20portkey?= =?UTF-8?q?=20library=20for=20production=20tools=20(#4119)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 8d7659c5..c487680f 100644 --- a/README.md +++ b/README.md @@ -351,6 +351,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Ollama-ex for Elixir](https://github.com/lebrunel/ollama-ex) - [Ollama Connector for SAP ABAP](https://github.com/b-tocs/abap_btocs_ollama) - [Testcontainers](https://testcontainers.com/modules/ollama/) +- [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama) ### Mobile From fb8ddc564ebd43539caf90579c798bfe525468e1 Mon Sep 17 00:00:00 2001 From: Hyden Liu Date: Tue, 7 May 2024 01:32:30 +0800 Subject: [PATCH 05/18] chore: delete `HEAD` (#4194) --- docs/windows.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/windows.md b/docs/windows.md index 72c5d32c..242b810a 100644 --- a/docs/windows.md +++ b/docs/windows.md @@ -1,4 +1,3 @@ -<<<<<<< HEAD # Ollama Windows Preview Welcome to the Ollama Windows preview. @@ -59,4 +58,4 @@ If you'd like to install or integrate Ollama as a service, a standalone `ollama-windows-amd64.zip` zip file is available containing only the Ollama CLI and GPU library dependencies for Nvidia and AMD. This allows for embedding Ollama in existing applications, or running it as a system service via `ollama -serve` with tools such as [NSSM](https://nssm.cc/). \ No newline at end of file +serve` with tools such as [NSSM](https://nssm.cc/). From 86b7fcac324b5edebac4d450612b5e49e70c7cc5 Mon Sep 17 00:00:00 2001 From: Tony Loehr Date: Mon, 6 May 2024 19:14:41 +0100 Subject: [PATCH 06/18] Update README.md with StreamDeploy (#3621) Co-authored-by: Bruce MacDonald --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index c487680f..d5c2458e 100644 --- a/README.md +++ b/README.md @@ -292,6 +292,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [ChatOllama: Open Source Chatbot based on Ollama with Knowledge Bases](https://github.com/sugarforever/chat-ollama) - [CRAG Ollama Chat: Simple Web Search with Corrective RAG](https://github.com/Nagi-ovo/CRAG-Ollama-Chat) - [RAGFlow: Open-source Retrieval-Augmented Generation engine based on deep document understanding](https://github.com/infiniflow/ragflow) +- [StreamDeploy: LLM Application Scaffold](https://github.com/StreamDeploy-DevRel/streamdeploy-llm-app-scaffold) - [chat: chat web app for teams](https://github.com/swuecho/chat) - [Lobe Chat](https://github.com/lobehub/lobe-chat) with [Integrating Doc](https://lobehub.com/docs/self-hosting/examples/ollama) - [Ollama RAG Chatbot: Local Chat with multiple PDFs using Ollama and RAG.](https://github.com/datvodinh/rag-chatbot.git) From 01c9386267514dfcfcb7ae29803302300564561b Mon Sep 17 00:00:00 2001 From: Nurgo Date: Mon, 6 May 2024 22:42:16 +0200 Subject: [PATCH 07/18] Add BrainSoup to compatible clients list (#3473) --- README.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index d5c2458e..54315277 100644 --- a/README.md +++ b/README.md @@ -284,18 +284,19 @@ See the [API documentation](./docs/api.md) for all endpoints. - [OllamaGUI](https://github.com/enoch1118/ollamaGUI) - [OpenAOE](https://github.com/InternLM/OpenAOE) - [Odin Runes](https://github.com/leonid20000/OdinRunes) -- [LLM-X: Progressive Web App](https://github.com/mrdjohnson/llm-x) +- [LLM-X](https://github.com/mrdjohnson/llm-x) (Progressive Web App) - [AnythingLLM (Docker + MacOs/Windows/Linux native app)](https://github.com/Mintplex-Labs/anything-llm) - [Ollama Basic Chat: Uses HyperDiv Reactive UI](https://github.com/rapidarchitect/ollama_basic_chat) - [Ollama-chats RPG](https://github.com/drazdra/ollama-chats) -- [QA-Pilot: Chat with Code Repository](https://github.com/reid41/QA-Pilot) -- [ChatOllama: Open Source Chatbot based on Ollama with Knowledge Bases](https://github.com/sugarforever/chat-ollama) -- [CRAG Ollama Chat: Simple Web Search with Corrective RAG](https://github.com/Nagi-ovo/CRAG-Ollama-Chat) -- [RAGFlow: Open-source Retrieval-Augmented Generation engine based on deep document understanding](https://github.com/infiniflow/ragflow) -- [StreamDeploy: LLM Application Scaffold](https://github.com/StreamDeploy-DevRel/streamdeploy-llm-app-scaffold) -- [chat: chat web app for teams](https://github.com/swuecho/chat) +- [QA-Pilot](https://github.com/reid41/QA-Pilot) (Chat with Code Repository) +- [ChatOllama](https://github.com/sugarforever/chat-ollama) (Open Source Chatbot based on Ollama with Knowledge Bases) +- [CRAG Ollama Chat](https://github.com/Nagi-ovo/CRAG-Ollama-Chat) (Simple Web Search with Corrective RAG) +- [RAGFlow](https://github.com/infiniflow/ragflow) (Open-source Retrieval-Augmented Generation engine based on deep document understanding) +- [StreamDeploy](https://github.com/StreamDeploy-DevRel/streamdeploy-llm-app-scaffold) (LLM Application Scaffold) +- [chat](https://github.com/swuecho/chat) (chat web app for teams) - [Lobe Chat](https://github.com/lobehub/lobe-chat) with [Integrating Doc](https://lobehub.com/docs/self-hosting/examples/ollama) -- [Ollama RAG Chatbot: Local Chat with multiple PDFs using Ollama and RAG.](https://github.com/datvodinh/rag-chatbot.git) +- [Ollama RAG Chatbot](https://github.com/datvodinh/rag-chatbot.git) (Local Chat with multiple PDFs using Ollama and RAG) +- [BrainSoup](https://www.nurgo-software.com/products/brainsoup) (Flexible native client with RAG & multi-agent automation) ### Terminal From aa93423fbf003ad75c56a0a3577244638d24f9c5 Mon Sep 17 00:00:00 2001 From: Adrien Brault Date: Mon, 6 May 2024 22:47:00 +0200 Subject: [PATCH 08/18] docs: pbcopy on mac (#3129) --- docs/import.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/import.md b/docs/import.md index 672916b5..a492778f 100644 --- a/docs/import.md +++ b/docs/import.md @@ -125,7 +125,7 @@ Publishing models is in early alpha. If you'd like to publish your model to shar 1. Create [an account](https://ollama.com/signup) 2. Copy your Ollama public key: - - macOS: `cat ~/.ollama/id_ed25519.pub` + - macOS: `cat ~/.ollama/id_ed25519.pub | pbcopy` - Windows: `type %USERPROFILE%\.ollama\id_ed25519.pub` - Linux: `cat /usr/share/ollama/.ollama/id_ed25519.pub` 3. Add your public key to your [Ollama account](https://ollama.com/settings/keys) From 0a954e50661f0fa6fc74ab81809ef7d76c6e1d66 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Mon, 6 May 2024 14:15:37 -0700 Subject: [PATCH 09/18] Fix stale test logic The model processing was recently changed to be deferred but this test scenario hadn't been adjusted for that change in behavior. --- server/sched_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/server/sched_test.go b/server/sched_test.go index 3e47ed02..7e4faa61 100644 --- a/server/sched_test.go +++ b/server/sched_test.go @@ -352,11 +352,9 @@ func TestGetRunner(t *testing.T) { scenario1c.req.model.ModelPath = "bad path" slog.Info("scenario1c") successCh1c, errCh1c := s.GetRunner(scenario1c.ctx, scenario1c.req.model, scenario1c.req.opts, scenario1c.req.sessionDuration) - require.Len(t, s.pendingReqCh, 0) - require.Len(t, successCh1c, 0) - require.Len(t, errCh1c, 0) - + // Starts in pending channel, then should be quickly processsed to return an error time.Sleep(5 * time.Millisecond) + require.Len(t, successCh1c, 0) s.loadedMu.Lock() require.Len(t, s.loaded, 0) s.loadedMu.Unlock() From c9f98622b1daba55477dcb330de1739fb2f02ce6 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Mon, 6 May 2024 14:22:24 -0700 Subject: [PATCH 10/18] Skip scheduling cancelled requests, always reload unloaded runners (#4189) --- server/sched.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/server/sched.go b/server/sched.go index 164814a3..c4a071c1 100644 --- a/server/sched.go +++ b/server/sched.go @@ -100,6 +100,12 @@ func (s *Scheduler) processPending(ctx context.Context) { return case pending := <-s.pendingReqCh: // Block other requests until we get this pending request running + + if pending.ctx.Err() != nil { + slog.Debug("pending request cancelled or timed out, skipping scheduling") + continue + } + for { var runnerToExpire *runnerRef s.loadedMu.Lock() @@ -435,6 +441,10 @@ func (runner *runnerRef) needsReload(ctx context.Context, req *LlmRequest) bool timeout = 2 * time.Minute // Initial load can take a long time for big models on slow systems... } + if runner.Options == nil { + return true + } + // Don't reload runner if num_gpu=-1 was provided optsExisting := runner.Options.Runner optsNew := req.opts.Runner From ed740a2504f18663f4cb0b326c1c48fbc54c2cbd Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Mon, 6 May 2024 14:22:53 -0700 Subject: [PATCH 11/18] Fix `no slots available` error with concurrent requests (#4160) --- llm/server.go | 235 +++++++++++++++++++++++++------------------------- 1 file changed, 119 insertions(+), 116 deletions(-) diff --git a/llm/server.go b/llm/server.go index 44bada08..db1b0e23 100644 --- a/llm/server.go +++ b/llm/server.go @@ -338,7 +338,7 @@ type ServerStatus int const ( // iota is reset to 0 ServerStatusReady ServerStatus = iota - ServerStatusNoSlotsAvaialble + ServerStatusNoSlotsAvailable ServerStatusLoadingModel ServerStatusNotResponding ServerStatusError @@ -348,7 +348,7 @@ func (s ServerStatus) ToString() string { switch s { case ServerStatusReady: return "llm server ready" - case ServerStatusNoSlotsAvaialble: + case ServerStatusNoSlotsAvailable: return "llm busy - no slots available" case ServerStatusLoadingModel: return "llm server loading model" @@ -405,7 +405,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) { case "ok": return ServerStatusReady, nil case "no slot available": - return ServerStatusNoSlotsAvaialble, nil + return ServerStatusNoSlotsAvailable, nil case "loading model": return ServerStatusLoadingModel, nil default: @@ -413,6 +413,29 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) { } } +// getServerStatusRetry will retry if ServerStatusNoSlotsAvailable is received +func (s *llmServer) getServerStatusRetry(ctx context.Context) (ServerStatus, error) { + var retries int + for { + status, err := s.getServerStatus(ctx) + if err != nil { + return status, err + } + + if status == ServerStatusNoSlotsAvailable { + if retries >= 10 { + return status, fmt.Errorf("no slots available after %d retries", retries) + } + + time.Sleep(5 * time.Millisecond) + retries++ + continue + } + + return status, nil + } +} + func (s *llmServer) Ping(ctx context.Context) error { _, err := s.getServerStatus(ctx) if err != nil { @@ -510,7 +533,6 @@ ws ::= ([ \t\n] ws)? ` const maxBufferSize = 512 * format.KiloByte -const maxRetries = 3 type ImageData struct { Data []byte `json:"data"` @@ -586,7 +608,7 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu } // Make sure the server is ready - status, err := s.getServerStatus(ctx) + status, err := s.getServerStatusRetry(ctx) if err != nil { return err } else if status != ServerStatusReady { @@ -600,133 +622,113 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu } } - retryDelay := 100 * time.Microsecond - for retries := 0; retries < maxRetries; retries++ { - if retries > 0 { - time.Sleep(retryDelay) // wait before retrying - retryDelay *= 2 // exponential backoff - } + // Handling JSON marshaling with special characters unescaped. + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + enc.SetEscapeHTML(false) - // Handling JSON marshaling with special characters unescaped. - buffer := &bytes.Buffer{} - enc := json.NewEncoder(buffer) - enc.SetEscapeHTML(false) + if err := enc.Encode(request); err != nil { + return fmt.Errorf("failed to marshal data: %v", err) + } - if err := enc.Encode(request); err != nil { - return fmt.Errorf("failed to marshal data: %v", err) - } + endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port) + serverReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer) + if err != nil { + return fmt.Errorf("error creating POST request: %v", err) + } + serverReq.Header.Set("Content-Type", "application/json") - endpoint := fmt.Sprintf("http://127.0.0.1:%d/completion", s.port) - req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, buffer) + res, err := http.DefaultClient.Do(serverReq) + if err != nil { + return fmt.Errorf("POST predict: %v", err) + } + defer res.Body.Close() + + if res.StatusCode >= 400 { + bodyBytes, err := io.ReadAll(res.Body) if err != nil { - return fmt.Errorf("error creating POST request: %v", err) + return fmt.Errorf("failed reading llm error response: %w", err) } - req.Header.Set("Content-Type", "application/json") + log.Printf("llm predict error: %s", bodyBytes) + return fmt.Errorf("%s", bodyBytes) + } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return fmt.Errorf("POST predict: %v", err) - } - defer resp.Body.Close() + scanner := bufio.NewScanner(res.Body) + buf := make([]byte, 0, maxBufferSize) + scanner.Buffer(buf, maxBufferSize) - if resp.StatusCode >= 400 { - bodyBytes, err := io.ReadAll(resp.Body) - if err != nil { - return fmt.Errorf("failed reading llm error response: %w", err) + // keep track of the last token generated, this is used to abort if the model starts looping + var lastToken string + var tokenRepeat int + + for scanner.Scan() { + select { + case <-ctx.Done(): + // This handles the request cancellation + return ctx.Err() + default: + line := scanner.Bytes() + if len(line) == 0 { + continue } - log.Printf("llm predict error: %s", bodyBytes) - return fmt.Errorf("%s", bodyBytes) - } - scanner := bufio.NewScanner(resp.Body) - buf := make([]byte, 0, maxBufferSize) - scanner.Buffer(buf, maxBufferSize) + evt, ok := bytes.CutPrefix(line, []byte("data: ")) + if !ok { + return fmt.Errorf("error parsing llm response stream: %s", line) + } - retryNeeded := false - // keep track of the last token generated, this is used to abort if the model starts looping - var lastToken string - var tokenRepeat int + var c completion + if err := json.Unmarshal(evt, &c); err != nil { + return fmt.Errorf("error unmarshaling llm prediction response: %v", err) + } - for scanner.Scan() { - select { - case <-ctx.Done(): - // This handles the request cancellation - return ctx.Err() + switch { + case strings.TrimSpace(c.Content) == lastToken: + tokenRepeat++ default: - line := scanner.Bytes() - if len(line) == 0 { - continue - } - - // try again on slot unavailable - if bytes.Contains(line, []byte("slot unavailable")) { - retryNeeded = true - break - } - - evt, ok := bytes.CutPrefix(line, []byte("data: ")) - if !ok { - return fmt.Errorf("error parsing llm response stream: %s", line) - } - - var c completion - if err := json.Unmarshal(evt, &c); err != nil { - return fmt.Errorf("error unmarshaling llm prediction response: %v", err) - } - - switch { - case strings.TrimSpace(c.Content) == lastToken: - tokenRepeat++ - default: - lastToken = strings.TrimSpace(c.Content) - tokenRepeat = 0 - } - - // 30 picked as an arbitrary max token repeat limit, modify as needed - if tokenRepeat > 30 { - slog.Debug("prediction aborted, token repeat limit reached") - return ctx.Err() - } - - if c.Content != "" { - fn(CompletionResponse{ - Content: c.Content, - }) - } - - if c.Stop { - fn(CompletionResponse{ - Done: true, - PromptEvalCount: c.Timings.PromptN, - PromptEvalDuration: parseDurationMs(c.Timings.PromptMS), - EvalCount: c.Timings.PredictedN, - EvalDuration: parseDurationMs(c.Timings.PredictedMS), - }) - return nil - } + lastToken = strings.TrimSpace(c.Content) + tokenRepeat = 0 } - } - if err := scanner.Err(); err != nil { - if strings.Contains(err.Error(), "unexpected EOF") { - s.Close() - msg := "" - if s.status != nil && s.status.LastErrMsg != "" { - msg = s.status.LastErrMsg - } - - return fmt.Errorf("an unknown error was encountered while running the model %s", msg) + // 30 picked as an arbitrary max token repeat limit, modify as needed + if tokenRepeat > 30 { + slog.Debug("prediction aborted, token repeat limit reached") + return ctx.Err() } - return fmt.Errorf("error reading llm response: %v", err) - } - if !retryNeeded { - return nil // success + if c.Content != "" { + fn(CompletionResponse{ + Content: c.Content, + }) + } + + if c.Stop { + fn(CompletionResponse{ + Done: true, + PromptEvalCount: c.Timings.PromptN, + PromptEvalDuration: parseDurationMs(c.Timings.PromptMS), + EvalCount: c.Timings.PredictedN, + EvalDuration: parseDurationMs(c.Timings.PredictedMS), + }) + return nil + } } } - // should never reach here ideally - return fmt.Errorf("max retries exceeded") + if err := scanner.Err(); err != nil { + if strings.Contains(err.Error(), "unexpected EOF") { + s.Close() + msg := "" + if s.status != nil && s.status.LastErrMsg != "" { + msg = s.status.LastErrMsg + } + return fmt.Errorf("an unknown error was encountered while running the model %s", msg) + } + + return fmt.Errorf("error reading llm response: %v", err) + } + + return nil } type EmbeddingRequest struct { @@ -743,8 +745,9 @@ func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, er return nil, err } defer s.sem.Release(1) + // Make sure the server is ready - status, err := s.getServerStatus(ctx) + status, err := s.getServerStatusRetry(ctx) if err != nil { return nil, err } else if status != ServerStatusReady { @@ -799,7 +802,7 @@ func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) status, err := s.getServerStatus(ctx) if err != nil { return nil, err - } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvaialble { + } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable { return nil, fmt.Errorf("unexpected server status: %s", status.ToString()) } @@ -851,7 +854,7 @@ func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error status, err := s.getServerStatus(ctx) if err != nil { return "", err - } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvaialble { + } else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable { return "", fmt.Errorf("unexpected server status: %s", status.ToString()) } From 380378cc80f3cffd703d5946e87a5ac990df273c Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Sun, 5 May 2024 17:45:43 -0700 Subject: [PATCH 12/18] Use our libraries first Trying to live off the land for cuda libraries was not the right strategy. We need to use the version we compiled against to ensure things work properly --- gpu/gpu.go | 7 +++++++ llm/server.go | 39 ++++++++++++++++++++++++--------------- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/gpu/gpu.go b/gpu/gpu.go index 30c25bfc..21666c8d 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -166,6 +166,12 @@ func GetGPUInfo() GpuInfoList { slog.Warn("CPU does not have AVX or AVX2, disabling GPU support.") } + // On windows we bundle the nvidia library one level above the runner dir + depPath := "" + if runtime.GOOS == "windows" && envconfig.RunnersDir != "" { + depPath = filepath.Dir(envconfig.RunnersDir) + } + var memInfo C.mem_info_t resp := []GpuInfo{} @@ -198,6 +204,7 @@ func GetGPUInfo() GpuInfoList { gpuInfo.Major = int(memInfo.major) gpuInfo.Minor = int(memInfo.minor) gpuInfo.MinimumMemory = cudaMinimumMemory + gpuInfo.DependencyPath = depPath // TODO potentially sort on our own algorithm instead of what the underlying GPU library does... resp = append(resp, gpuInfo) diff --git a/llm/server.go b/llm/server.go index db1b0e23..e2402256 100644 --- a/llm/server.go +++ b/llm/server.go @@ -233,13 +233,13 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr if runtime.GOOS == "windows" { pathEnv = "PATH" } - // append the server directory to LD_LIBRARY_PATH/PATH + // prepend the server directory to LD_LIBRARY_PATH/PATH libraryPaths := []string{dir} if libraryPath, ok := os.LookupEnv(pathEnv); ok { // Append our runner directory to the path // This will favor system libraries over our bundled library dependencies - libraryPaths = append(filepath.SplitList(libraryPath), libraryPaths...) + libraryPaths = append(libraryPaths, filepath.SplitList(libraryPath)...) } // Note: we always put the dependency path first @@ -275,15 +275,31 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr sem: semaphore.NewWeighted(int64(numParallel)), } - libEnv := fmt.Sprintf("%s=%s", pathEnv, strings.Join(libraryPaths, string(filepath.ListSeparator))) - s.cmd.Env = append(os.Environ(), libEnv) + s.cmd.Env = os.Environ() s.cmd.Stdout = os.Stdout s.cmd.Stderr = s.status - // TODO - multiple GPU selection logic... - key, val := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv() - if key != "" { - s.cmd.Env = append(s.cmd.Env, key+"="+val) + visibleDevicesEnv, visibleDevicesEnvVal := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv() + pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator)) + + // Update or add the path and visible devices variable with our adjusted version + pathNeeded := true + devicesNeeded := visibleDevicesEnv != "" + for i := range s.cmd.Env { + cmp := strings.SplitN(s.cmd.Env[i], "=", 2) + if strings.EqualFold(cmp[0], pathEnv) { + s.cmd.Env[i] = pathEnv + "=" + pathEnvVal + pathNeeded = false + } else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) { + s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal + devicesNeeded = false + } + } + if pathNeeded { + s.cmd.Env = append(s.cmd.Env, pathEnv+"="+pathEnvVal) + } + if devicesNeeded { + s.cmd.Env = append(s.cmd.Env, visibleDevicesEnv+"="+visibleDevicesEnvVal) } slog.Info("starting llama server", "cmd", s.cmd.String()) @@ -300,13 +316,6 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr continue } - // TODO - make sure this is all wired up correctly - // if err = s.WaitUntilRunning(); err != nil { - // slog.Error("error starting llama server", "server", servers[i], "error", err) - // s.Close() - // finalErr = err - // continue - // } return s, nil } From 3ecae420ac3569f7feee6ab2577811ea01959d66 Mon Sep 17 00:00:00 2001 From: Darinka <39233990+Darinochka@users.noreply.github.com> Date: Tue, 7 May 2024 00:39:58 +0300 Subject: [PATCH 13/18] Update api.md (#3945) * Update api.md Changed the calculation of tps (token/s) in the documentation * Update docs/api.md --------- Co-authored-by: Jeffrey Morgan --- docs/api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api.md b/docs/api.md index e79b6f5a..2f52c55a 100644 --- a/docs/api.md +++ b/docs/api.md @@ -95,7 +95,7 @@ The final response in the stream also includes additional data about the generat - `context`: an encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory - `response`: empty if the response was streamed, if not streamed, this will contain the full response -To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration`. +To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration` * `10^9`. ```json { From ee02f548c82f8a81dd023fa0d15ccdf840389ef9 Mon Sep 17 00:00:00 2001 From: "Mohamed A. Fouad" <111041768+moresearch@users.noreply.github.com> Date: Mon, 6 May 2024 19:02:25 -0300 Subject: [PATCH 14/18] Update linux.md (#3847) Add -e to viewing logs in order to show end of ollama logs --- docs/linux.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/linux.md b/docs/linux.md index 0ef4a30f..9e7e06fa 100644 --- a/docs/linux.md +++ b/docs/linux.md @@ -105,7 +105,7 @@ sudo chmod +x /usr/bin/ollama To view logs of Ollama running as a startup service, run: ```bash -journalctl -u ollama +journalctl -e -u ollama ``` ## Uninstall From d091fe3c21850a54182db4f7a9fc46257b4f3368 Mon Sep 17 00:00:00 2001 From: Jeffrey Chen <78434827+TCOTC@users.noreply.github.com> Date: Tue, 7 May 2024 06:03:14 +0800 Subject: [PATCH 15/18] Windows automatically recognizes username (#3214) --- docs/faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/faq.md b/docs/faq.md index 109a1144..3fe3da89 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -140,7 +140,7 @@ Refer to the section [above](#how-do-i-configure-ollama-server) for how to set e - macOS: `~/.ollama/models` - Linux: `/usr/share/ollama/.ollama/models` -- Windows: `C:\Users\\.ollama\models` +- Windows: `C:\Users\%username%\.ollama\models` ### How do I set them to a different location? From af47413dbab0bad3e9985cd9978cd9235851bfed Mon Sep 17 00:00:00 2001 From: Jackie Li Date: Mon, 6 May 2024 23:59:18 +0100 Subject: [PATCH 16/18] Add MarshalJSON to Duration (#3284) --------- Co-authored-by: Patrick Devine --- api/types.go | 11 ++++++++- api/types_test.go | 57 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/api/types.go b/api/types.go index 7cfd5ff7..70caee87 100644 --- a/api/types.go +++ b/api/types.go @@ -436,6 +436,13 @@ type Duration struct { time.Duration } +func (d Duration) MarshalJSON() ([]byte, error) { + if d.Duration < 0 { + return []byte("-1"), nil + } + return []byte("\"" + d.Duration.String() + "\""), nil +} + func (d *Duration) UnmarshalJSON(b []byte) (err error) { var v any if err := json.Unmarshal(b, &v); err != nil { @@ -449,7 +456,7 @@ func (d *Duration) UnmarshalJSON(b []byte) (err error) { if t < 0 { d.Duration = time.Duration(math.MaxInt64) } else { - d.Duration = time.Duration(t * float64(time.Second)) + d.Duration = time.Duration(int(t) * int(time.Second)) } case string: d.Duration, err = time.ParseDuration(t) @@ -459,6 +466,8 @@ func (d *Duration) UnmarshalJSON(b []byte) (err error) { if d.Duration < 0 { d.Duration = time.Duration(math.MaxInt64) } + default: + return fmt.Errorf("Unsupported type: '%s'", reflect.TypeOf(v)) } return nil diff --git a/api/types_test.go b/api/types_test.go index 5a093be2..cfe1331f 100644 --- a/api/types_test.go +++ b/api/types_test.go @@ -21,6 +21,11 @@ func TestKeepAliveParsingFromJSON(t *testing.T) { req: `{ "keep_alive": 42 }`, exp: &Duration{42 * time.Second}, }, + { + name: "Positive Float", + req: `{ "keep_alive": 42.5 }`, + exp: &Duration{42 * time.Second}, + }, { name: "Positive Integer String", req: `{ "keep_alive": "42m" }`, @@ -31,6 +36,11 @@ func TestKeepAliveParsingFromJSON(t *testing.T) { req: `{ "keep_alive": -1 }`, exp: &Duration{math.MaxInt64}, }, + { + name: "Negative Float", + req: `{ "keep_alive": -3.14 }`, + exp: &Duration{math.MaxInt64}, + }, { name: "Negative Integer String", req: `{ "keep_alive": "-1m" }`, @@ -48,3 +58,50 @@ func TestKeepAliveParsingFromJSON(t *testing.T) { }) } } + +func TestDurationMarshalUnmarshal(t *testing.T) { + tests := []struct { + name string + input time.Duration + expected time.Duration + }{ + { + "negative duration", + time.Duration(-1), + time.Duration(math.MaxInt64), + }, + { + "positive duration", + time.Duration(42 * time.Second), + time.Duration(42 * time.Second), + }, + { + "another positive duration", + time.Duration(42 * time.Minute), + time.Duration(42 * time.Minute), + }, + { + "zero duration", + time.Duration(0), + time.Duration(0), + }, + { + "max duration", + time.Duration(math.MaxInt64), + time.Duration(math.MaxInt64), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + b, err := json.Marshal(Duration{test.input}) + require.NoError(t, err) + + var d Duration + err = json.Unmarshal(b, &d) + require.NoError(t, err) + + assert.Equal(t, test.expected, d.Duration, "input %v, marshalled %v, got %v", test.input, string(b), d.Duration) + }) + } +} From 39d9d22ca3d20d9b76d7f767ac846eb995e1669b Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Mon, 6 May 2024 16:01:37 -0700 Subject: [PATCH 17/18] close server on receiving signal (#4213) --- cmd/cmd.go | 7 ++++++- server/routes.go | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index faac424c..bf305d81 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -898,7 +898,12 @@ func RunServer(cmd *cobra.Command, _ []string) error { return err } - return server.Serve(ln) + err = server.Serve(ln) + if errors.Is(err, http.ErrServerClosed) { + return nil + } + + return err } func initializeKeypair() error { diff --git a/server/routes.go b/server/routes.go index e878598a..da51fbbe 100644 --- a/server/routes.go +++ b/server/routes.go @@ -1041,6 +1041,7 @@ func Serve(ln net.Listener) error { signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) go func() { <-signals + srvr.Close() done() sched.unloadAllRunners() gpu.Cleanup() From 7c5330413b268ca0023bcc2f62736730cea58d7e Mon Sep 17 00:00:00 2001 From: CrispStrobe <154636388+CrispStrobe@users.noreply.github.com> Date: Tue, 7 May 2024 01:03:21 +0200 Subject: [PATCH 18/18] note on naming restrictions (#2625) * note on naming restrictions else push would fail with cryptic retrieving manifest Error: file does not exist ==> maybe change that in code too * Update docs/import.md --------- Co-authored-by: C-4-5-3 <154636388+C-4-5-3@users.noreply.github.com> Co-authored-by: Jeffrey Morgan --- docs/import.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/import.md b/docs/import.md index a492778f..7041b74d 100644 --- a/docs/import.md +++ b/docs/import.md @@ -136,6 +136,8 @@ Next, copy your model to your username's namespace: ollama cp example /example ``` +> Note: model names may only contain lowercase letters, digits, and the characters `.`, `-`, and `_`. + Then push the model: ```