From 34088dbcfb47546fc0f375276173467bc8bbed29 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 8 Jul 2025 11:59:06 -0700 Subject: [PATCH 01/16] API/CLI context enhancements (#11331) * API: expose context size of loaded models * CLI: add context UX This adds a column in the ps output to show the models context size. --- api/types.go | 15 ++++++++------- cmd/cmd.go | 5 +++-- server/routes.go | 3 +++ 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/api/types.go b/api/types.go index f1e47c59..699dba42 100644 --- a/api/types.go +++ b/api/types.go @@ -468,13 +468,14 @@ type ListModelResponse struct { // ProcessModelResponse is a single model description in [ProcessResponse]. type ProcessModelResponse struct { - Name string `json:"name"` - Model string `json:"model"` - Size int64 `json:"size"` - Digest string `json:"digest"` - Details ModelDetails `json:"details,omitempty"` - ExpiresAt time.Time `json:"expires_at"` - SizeVRAM int64 `json:"size_vram"` + Name string `json:"name"` + Model string `json:"model"` + Size int64 `json:"size"` + Digest string `json:"digest"` + Details ModelDetails `json:"details,omitempty"` + ExpiresAt time.Time `json:"expires_at"` + SizeVRAM int64 `json:"size_vram"` + ContextLength int `json:"context_length"` } type TokenResponse struct { diff --git a/cmd/cmd.go b/cmd/cmd.go index 2d165379..b569dddd 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -583,12 +583,13 @@ func ListRunningHandler(cmd *cobra.Command, args []string) error { } else { until = format.HumanTime(m.ExpiresAt, "Never") } - data = append(data, []string{m.Name, m.Digest[:12], format.HumanBytes(m.Size), procStr, until}) + ctxStr := strconv.Itoa(m.ContextLength) + data = append(data, []string{m.Name, m.Digest[:12], format.HumanBytes(m.Size), procStr, ctxStr, until}) } } table := tablewriter.NewWriter(os.Stdout) - table.SetHeader([]string{"NAME", "ID", "SIZE", "PROCESSOR", "UNTIL"}) + table.SetHeader([]string{"NAME", "ID", "SIZE", "PROCESSOR", "CONTEXT", "UNTIL"}) table.SetHeaderAlignment(tablewriter.ALIGN_LEFT) table.SetAlignment(tablewriter.ALIGN_LEFT) table.SetHeaderLine(false) diff --git a/server/routes.go b/server/routes.go index cb46cef1..603cd42a 100644 --- a/server/routes.go +++ b/server/routes.go @@ -1404,6 +1404,9 @@ func (s *Server) PsHandler(c *gin.Context) { Details: modelDetails, ExpiresAt: v.expiresAt, } + if v.Options != nil { + mr.ContextLength = v.Options.NumCtx / v.numParallel + } // The scheduler waits to set expiresAt, so if a model is loading it's // possible that it will be set to the unix epoch. For those cases, just // calculate the time w/ the sessionDuration instead. From 20c3266e943f62ef7947f00b563de5f6c790ecb7 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 8 Jul 2025 12:08:37 -0700 Subject: [PATCH 02/16] Reduce default parallelism to 1 (#11330) The current scheduler algorithm of picking the paralellism based on available VRAM complicates the upcoming dynamic layer memory allocation algorithm. This changes the default to 1, with the intent going forward that parallelism is explicit and will no longer be dynamically determined. Removal of the dynamic logic will come in a follow up. --- docs/faq.md | 4 ++-- envconfig/config.go | 2 +- server/sched.go | 4 +--- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/docs/faq.md b/docs/faq.md index 6fe63341..8931b6aa 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -292,7 +292,7 @@ If too many requests are sent to the server, it will respond with a 503 error in ## How does Ollama handle concurrent requests? -Ollama supports two levels of concurrent processing. If your system has sufficient available memory (system memory when using CPU inference, or VRAM for GPU inference) then multiple models can be loaded at the same time. For a given model, if there is sufficient available memory when the model is loaded, it is configured to allow parallel request processing. +Ollama supports two levels of concurrent processing. If your system has sufficient available memory (system memory when using CPU inference, or VRAM for GPU inference) then multiple models can be loaded at the same time. For a given model, if there is sufficient available memory when the model is loaded, it can be configured to allow parallel request processing. If there is insufficient available memory to load a new model request while one or more models are already loaded, all new requests will be queued until the new model can be loaded. As prior models become idle, one or more will be unloaded to make room for the new model. Queued requests will be processed in order. When using GPU inference new models must be able to completely fit in VRAM to allow concurrent model loads. @@ -301,7 +301,7 @@ Parallel request processing for a given model results in increasing the context The following server settings may be used to adjust how Ollama handles concurrent requests on most platforms: - `OLLAMA_MAX_LOADED_MODELS` - The maximum number of models that can be loaded concurrently provided they fit in available memory. The default is 3 * the number of GPUs or 3 for CPU inference. -- `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default will auto-select either 4 or 1 based on available memory. +- `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default is 1, and will handle 1 request per model at a time. - `OLLAMA_MAX_QUEUE` - The maximum number of requests Ollama will queue when busy before rejecting additional requests. The default is 512 Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6.2 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM. diff --git a/envconfig/config.go b/envconfig/config.go index 763f0464..7fc01887 100644 --- a/envconfig/config.go +++ b/envconfig/config.go @@ -219,7 +219,7 @@ func Uint(key string, defaultValue uint) func() uint { var ( // NumParallel sets the number of parallel model requests. NumParallel can be configured via the OLLAMA_NUM_PARALLEL environment variable. - NumParallel = Uint("OLLAMA_NUM_PARALLEL", 0) + NumParallel = Uint("OLLAMA_NUM_PARALLEL", 1) // MaxRunners sets the maximum number of loaded models. MaxRunners can be configured via the OLLAMA_MAX_LOADED_MODELS environment variable. MaxRunners = Uint("OLLAMA_MAX_LOADED_MODELS", 0) // MaxQueue sets the maximum number of queued requests. MaxQueue can be configured via the OLLAMA_MAX_QUEUE environment variable. diff --git a/server/sched.go b/server/sched.go index e71cdd1b..2842bb3a 100644 --- a/server/sched.go +++ b/server/sched.go @@ -57,9 +57,7 @@ type Scheduler struct { var defaultModelsPerGPU = 3 // Default automatic value for parallel setting -// Model will still need to fit in VRAM. If this setting won't fit -// we'll back off down to 1 to try to get it to fit -var defaultParallel = 2 +var defaultParallel = 1 var ErrMaxQueue = errors.New("server busy, please try again. maximum pending requests exceeded") From 66fb8575ced090a969c9529c88ee57a8df1259c2 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 8 Jul 2025 15:38:04 -0700 Subject: [PATCH 03/16] doc: add MacOS docs (#11334) also removes stale model dir instructions for windows --- docs/README.md | 1 + docs/faq.md | 13 +++++++++++++ docs/macos.md | 42 ++++++++++++++++++++++++++++++++++++++++++ docs/windows.md | 14 -------------- 4 files changed, 56 insertions(+), 14 deletions(-) create mode 100644 docs/macos.md diff --git a/docs/README.md b/docs/README.md index 4d3b7140..310a4399 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,6 +4,7 @@ * [Quickstart](../README.md#quickstart) * [Examples](./examples.md) * [Importing models](./import.md) +* [MacOS Documentation](./macos.md) * [Linux Documentation](./linux.md) * [Windows Documentation](./windows.md) * [Docker Documentation](./docker.md) diff --git a/docs/faq.md b/docs/faq.md index 8931b6aa..a6ad6f6e 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -333,3 +333,16 @@ The currently available K/V cache quantization types are: How much the cache quantization impacts the model's response quality will depend on the model and the task. Models that have a high GQA count (e.g. Qwen2) may see a larger impact on precision from quantization than models with a low GQA count. You may need to experiment with different quantization types to find the best balance between memory usage and quality. + +## How can I stop Ollama from starting when I login to my computer + +Ollama for Windows and macOS register as a login item during installation. You can disable this if you prefer not to have Ollama automatically start. Ollama will respect this setting across upgrades, unless you uninstall the application. + +**Windows** +- Remove `%APPDATA%\Microsoft\Windows\Start Menu\Programs\Startup\Ollama.lnk` + +**MacOS Monterey (v12)** +- Open `Settings` -> `Users & Groups` -> `Login Items` and find the `Ollama` entry, then click the `-` (minus) to remove + +**MacOS Ventura (v13) and later** +- Open `Settings` and search for "Login Items", find the `Ollama` entry under "Allow in the Background`, then click the slider to disable. \ No newline at end of file diff --git a/docs/macos.md b/docs/macos.md new file mode 100644 index 00000000..63bf14b1 --- /dev/null +++ b/docs/macos.md @@ -0,0 +1,42 @@ +# Ollama for macOS + +## System Requirements + +* MacOS Monterey (v12) or newer +* Apple M series (CPU and GPU support) or x86 (CPU only) + + +## Filesystem Requirements + +The preferred method of installation is to mount the `ollama.dmg` and drag-and-drop the Ollama application to the system-wide `Applications` folder. Upon startup, the Ollama app will verify the `ollama` CLI is present in your PATH, and if not detected, will prompt for permission to create a link in `/usr/local/bin` + +Once you've installed Ollama, you'll need additional space for storing the Large Language models, which can be tens to hundreds of GB in size. If your home directory doesn't have enough space, you can change where the binaries are installed, and where the models are stored. + +### Changing Install Location + +To install the Ollama application somewhere other than `Applications`, place the Ollama application in the desired location, and ensure the CLI `Ollama.app/Contents/Resources/ollama` or a sym-link to the CLI can be found in your path. Upon first start decline the "Move to Applications?" request. + + +## Troubleshooting + +Ollama on MacOS stores files in a few different locations. +- `~/.ollama` contains models and configuration +- `~/.ollama/logs` contains logs + - *app.log* contains most resent logs from the GUI application + - *server.log* contains the most recent server logs +- `/Ollama.app/Contents/Resources/ollama` the CLI binary + +## Uninstall + +To fully remove Ollama from your system, remove the following files and folders: + +``` +sudo rm -rf /Applications/Ollama.app +sudo rm /usr/local/bin/ollama +rm -rf "~/Library/Application Support/Ollama" +rm -rf "~/Library/Saved Application State/com.electron.ollama.savedState" +rm -rf ~/Library/Caches/com.electron.ollama/ +rm -rf ~/Library/Caches/ollama +rm -rf ~/Library/WebKit/com.electron.ollama +rm -rf ~/.ollama +``` \ No newline at end of file diff --git a/docs/windows.md b/docs/windows.md index 0bffa4b4..2e495e49 100644 --- a/docs/windows.md +++ b/docs/windows.md @@ -30,20 +30,6 @@ To install the Ollama application in a location different than your home directo OllamaSetup.exe /DIR="d:\some\location" ``` -### Changing Model Location - -To change where Ollama stores the downloaded models instead of using your home directory, set the environment variable `OLLAMA_MODELS` in your user account. - -1. Start the Settings (Windows 11) or Control Panel (Windows 10) application and search for _environment variables_. - -2. Click on _Edit environment variables for your account_. - -3. Edit or create a new variable for your user account for `OLLAMA_MODELS` where you want the models stored - -4. Click OK/Apply to save. - -If Ollama is already running, Quit the tray application and relaunch it from the Start menu, or a new terminal started after you saved the environment variables. - ## API Access Here's a quick example showing API access from `powershell` From 35fda7b4af556e7eeef2b5dcb3638435382b2576 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Wed, 25 Jun 2025 17:13:32 -0700 Subject: [PATCH 04/16] ggml: Report ordinal IDs for AMD GPUs on Windows We don't get valid UUIDs for AMD GPUs on Windows, so the best option is to use the ordinal IDs. This brings us in line with what we currently do on the Ollama server - the only exception is AMD GPUs on Linux, which falls back to using ordinal IDs. The GGML implementation has no fallback but it doesn't appear to occur for any of the GPUs that we support. It's also possible that there are collisions between ordinal IDs for different libraries - however the only places where we use them are AMD on Windows and Metal on Mac, which can never occur on the same system. --- .../patches/0017-ggml-Export-GPU-UUIDs.patch | 38 +++++++++++-------- ml/backend.go | 10 ++--- ml/backend/ggml/ggml.go | 4 +- ml/backend/ggml/ggml/include/ggml-backend.h | 2 +- .../ggml/ggml/src/ggml-cuda/ggml-cuda.cu | 22 +++++++---- .../ggml/ggml/src/ggml-metal/ggml-metal.m | 2 +- 6 files changed, 45 insertions(+), 33 deletions(-) diff --git a/llama/patches/0017-ggml-Export-GPU-UUIDs.patch b/llama/patches/0017-ggml-Export-GPU-UUIDs.patch index a2539034..b7d56b0d 100644 --- a/llama/patches/0017-ggml-Export-GPU-UUIDs.patch +++ b/llama/patches/0017-ggml-Export-GPU-UUIDs.patch @@ -7,31 +7,31 @@ This enables matching up devices and information reported by the backend with tools (e.g. nvidia-smi) and system management libraries (e.g. nvml). --- ggml/include/ggml-backend.h | 1 + - ggml/src/ggml-cuda/ggml-cuda.cu | 33 ++++++++++++++++++++++++++++++++ + ggml/src/ggml-cuda/ggml-cuda.cu | 39 ++++++++++++++++++++++++++++++++ ggml/src/ggml-metal/ggml-metal.m | 1 + - 3 files changed, 35 insertions(+) + 3 files changed, 41 insertions(+) diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h -index 74e46716..a880df33 100644 +index 74e46716..48839339 100644 --- a/ggml/include/ggml-backend.h +++ b/ggml/include/ggml-backend.h @@ -152,6 +152,7 @@ extern "C" { struct ggml_backend_dev_props { const char * name; const char * description; -+ const char * uuid; ++ const char * id; size_t memory_free; size_t memory_total; enum ggml_backend_dev_type type; diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu -index cb0d8528..4c829153 100644 +index cb0d8528..d6960174 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2884,6 +2884,7 @@ struct ggml_backend_cuda_device_context { int device; std::string name; std::string description; -+ std::string uuid; ++ std::string id; }; static const char * ggml_backend_cuda_device_get_name(ggml_backend_dev_t dev) { @@ -39,9 +39,9 @@ index cb0d8528..4c829153 100644 return ctx->description.c_str(); } -+static const char * ggml_backend_cuda_device_get_uuid(ggml_backend_dev_t dev) { ++static const char * ggml_backend_cuda_device_get_id(ggml_backend_dev_t dev) { + ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context; -+ return ctx->uuid.c_str(); ++ return ctx->id.c_str(); +} + static void ggml_backend_cuda_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { @@ -51,17 +51,17 @@ index cb0d8528..4c829153 100644 static void ggml_backend_cuda_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) { props->name = ggml_backend_cuda_device_get_name(dev); props->description = ggml_backend_cuda_device_get_description(dev); -+ props->uuid = ggml_backend_cuda_device_get_uuid(dev); ++ props->id = ggml_backend_cuda_device_get_id(dev); props->type = ggml_backend_cuda_device_get_type(dev); ggml_backend_cuda_device_get_memory(dev, &props->memory_free, &props->memory_total); -@@ -3458,6 +3465,32 @@ ggml_backend_reg_t ggml_backend_cuda_reg() { +@@ -3458,6 +3465,38 @@ ggml_backend_reg_t ggml_backend_cuda_reg() { CUDA_CHECK(cudaGetDeviceProperties(&prop, i)); dev_ctx->description = prop.name; + #if !defined(GGML_USE_HIP) -+ char uuid[64]; -+ snprintf(uuid, sizeof(uuid), ++ char id[64]; ++ snprintf(id, sizeof(id), + "GPU-%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", + (unsigned char)prop.uuid.bytes[0], + (unsigned char)prop.uuid.bytes[1], @@ -80,23 +80,29 @@ index cb0d8528..4c829153 100644 + (unsigned char)prop.uuid.bytes[14], + (unsigned char)prop.uuid.bytes[15] + ); -+ dev_ctx->uuid = uuid; ++ dev_ctx->id = id; + #else -+ dev_ctx->uuid = "GPU-" + std::string(prop.uuid.bytes, 16); ++ #ifdef _WIN32 ++ char id[16]; ++ snprintf(id, sizeof(id), "%d", i); ++ dev_ctx->id = id; ++ #else ++ dev_ctx->id = "GPU-" + std::string(prop.uuid.bytes, 16); ++ #endif + #endif + ggml_backend_dev_t dev = new ggml_backend_device { /* .iface = */ ggml_backend_cuda_device_interface, /* .reg = */ ®, diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m -index 1b56f858..ee4f2dcb 100644 +index 1b56f858..a9eeebc6 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -5703,6 +5703,7 @@ static enum ggml_backend_dev_type ggml_backend_metal_device_get_type(ggml_backen static void ggml_backend_metal_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { props->name = ggml_backend_metal_device_get_name(dev); props->description = ggml_backend_metal_device_get_description(dev); -+ props->uuid = "0"; ++ props->id = "0"; props->type = ggml_backend_metal_device_get_type(dev); ggml_backend_metal_device_get_memory(dev, &props->memory_free, &props->memory_total); props->caps = (struct ggml_backend_dev_caps) { diff --git a/ml/backend.go b/ml/backend.go index 61066c1a..06f9de9a 100644 --- a/ml/backend.go +++ b/ml/backend.go @@ -124,9 +124,9 @@ type DeviceMemory struct { // may not be persistent across instances of the runner. Name string - // UUID is a unique persistent identifier for the device for matching - // with system management libraries - UUID string + // ID is an identifier for the device for matching with system + // management libraries. + ID string // Weights is the per-layer memory needed for the model weights. Weights []Memory @@ -156,8 +156,8 @@ func (m DeviceMemory) LogValue() slog.Value { attrs = append(attrs, slog.Any("Graph", m.Graph)) } - if len(attrs) > 0 && m.UUID != "" { - attrs = append([]slog.Attr{slog.String("UUID", m.UUID)}, attrs...) + if len(attrs) > 0 && m.ID != "" { + attrs = append([]slog.Attr{slog.String("ID", m.ID)}, attrs...) } return slog.GroupValue(attrs...) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 4f1212de..680910f8 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -138,7 +138,7 @@ func New(modelPath string, params ml.BackendParams) (ml.Backend, error) { requiredMemory.CPU.Name = C.GoString(C.ggml_backend_dev_name(cpuDeviceBufferType.d)) var props C.struct_ggml_backend_dev_props C.ggml_backend_dev_get_props(cpuDeviceBufferType.d, &props) - requiredMemory.CPU.UUID = C.GoString(props.uuid) + requiredMemory.CPU.ID = C.GoString(props.id) requiredMemory.CPU.Weights = make([]ml.Memory, blocks+1) requiredMemory.CPU.Cache = make([]ml.Memory, blocks+1) @@ -155,7 +155,7 @@ func New(modelPath string, params ml.BackendParams) (ml.Backend, error) { requiredMemory.GPUs[i].Name = C.GoString(C.ggml_backend_dev_name(d)) var props C.struct_ggml_backend_dev_props C.ggml_backend_dev_get_props(d, &props) - requiredMemory.GPUs[i].UUID = C.GoString(props.uuid) + requiredMemory.GPUs[i].ID = C.GoString(props.id) requiredMemory.GPUs[i].Weights = make([]ml.Memory, blocks+1) requiredMemory.GPUs[i].Cache = make([]ml.Memory, blocks+1) } diff --git a/ml/backend/ggml/ggml/include/ggml-backend.h b/ml/backend/ggml/ggml/include/ggml-backend.h index a880df33..48839339 100644 --- a/ml/backend/ggml/ggml/include/ggml-backend.h +++ b/ml/backend/ggml/ggml/include/ggml-backend.h @@ -152,7 +152,7 @@ extern "C" { struct ggml_backend_dev_props { const char * name; const char * description; - const char * uuid; + const char * id; size_t memory_free; size_t memory_total; enum ggml_backend_dev_type type; diff --git a/ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu b/ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu index 9e64e5ae..2b9fabf4 100644 --- a/ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ml/backend/ggml/ggml/src/ggml-cuda/ggml-cuda.cu @@ -2888,7 +2888,7 @@ struct ggml_backend_cuda_device_context { int device; std::string name; std::string description; - std::string uuid; + std::string id; }; static const char * ggml_backend_cuda_device_get_name(ggml_backend_dev_t dev) { @@ -2901,9 +2901,9 @@ static const char * ggml_backend_cuda_device_get_description(ggml_backend_dev_t return ctx->description.c_str(); } -static const char * ggml_backend_cuda_device_get_uuid(ggml_backend_dev_t dev) { +static const char * ggml_backend_cuda_device_get_id(ggml_backend_dev_t dev) { ggml_backend_cuda_device_context * ctx = (ggml_backend_cuda_device_context *)dev->context; - return ctx->uuid.c_str(); + return ctx->id.c_str(); } static void ggml_backend_cuda_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) { @@ -2920,7 +2920,7 @@ static enum ggml_backend_dev_type ggml_backend_cuda_device_get_type(ggml_backend static void ggml_backend_cuda_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) { props->name = ggml_backend_cuda_device_get_name(dev); props->description = ggml_backend_cuda_device_get_description(dev); - props->uuid = ggml_backend_cuda_device_get_uuid(dev); + props->id = ggml_backend_cuda_device_get_id(dev); props->type = ggml_backend_cuda_device_get_type(dev); ggml_backend_cuda_device_get_memory(dev, &props->memory_free, &props->memory_total); @@ -3471,8 +3471,8 @@ ggml_backend_reg_t ggml_backend_cuda_reg() { dev_ctx->description = prop.name; #if !defined(GGML_USE_HIP) - char uuid[64]; - snprintf(uuid, sizeof(uuid), + char id[64]; + snprintf(id, sizeof(id), "GPU-%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", (unsigned char)prop.uuid.bytes[0], (unsigned char)prop.uuid.bytes[1], @@ -3491,9 +3491,15 @@ ggml_backend_reg_t ggml_backend_cuda_reg() { (unsigned char)prop.uuid.bytes[14], (unsigned char)prop.uuid.bytes[15] ); - dev_ctx->uuid = uuid; + dev_ctx->id = id; #else - dev_ctx->uuid = "GPU-" + std::string(prop.uuid.bytes, 16); + #ifdef _WIN32 + char id[16]; + snprintf(id, sizeof(id), "%d", i); + dev_ctx->id = id; + #else + dev_ctx->id = "GPU-" + std::string(prop.uuid.bytes, 16); + #endif #endif ggml_backend_dev_t dev = new ggml_backend_device { diff --git a/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m index f20f5615..110c9ece 100644 --- a/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m +++ b/ml/backend/ggml/ggml/src/ggml-metal/ggml-metal.m @@ -5726,7 +5726,7 @@ static enum ggml_backend_dev_type ggml_backend_metal_device_get_type(ggml_backen static void ggml_backend_metal_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) { props->name = ggml_backend_metal_device_get_name(dev); props->description = ggml_backend_metal_device_get_description(dev); - props->uuid = "0"; + props->id = "0"; props->type = ggml_backend_metal_device_get_type(dev); ggml_backend_metal_device_get_memory(dev, &props->memory_free, &props->memory_total); props->caps = (struct ggml_backend_dev_caps) { From f8a6e8881975b2964aa2179e74c4426b4a455d0f Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Fri, 11 Jul 2025 12:21:54 -0700 Subject: [PATCH 05/16] Only load supported models on new engine (#11362) * Only load supported models on new engine Verify the model is supported before trying to load * int: testcase for all library models --- integration/library_models_test.go | 57 +++++++++ integration/utils_test.go | 185 +++++++++++++++++++++++++++++ model/models/llama/model.go | 9 ++ model/models/qwen2/model.go | 10 ++ 4 files changed, 261 insertions(+) create mode 100644 integration/library_models_test.go diff --git a/integration/library_models_test.go b/integration/library_models_test.go new file mode 100644 index 00000000..cdf65efc --- /dev/null +++ b/integration/library_models_test.go @@ -0,0 +1,57 @@ +//go:build integration && library + +package integration + +import ( + "context" + "log/slog" + "testing" + "time" + + "github.com/ollama/ollama/api" +) + +// First run of this scenario on a target system will take a long time to download +// ~1.5TB of models. Set a sufficiently large -timeout for your network speed +func TestLibraryModelsGenerate(t *testing.T) { + softTimeout, hardTimeout := getTimeouts(t) + slog.Info("Setting timeouts", "soft", softTimeout, "hard", hardTimeout) + ctx, cancel := context.WithTimeout(context.Background(), hardTimeout) + defer cancel() + client, _, cleanup := InitServerConnection(ctx, t) + defer cleanup() + + chatModels := libraryChatModels + for _, model := range chatModels { + t.Run(model, func(t *testing.T) { + if time.Now().Sub(started) > softTimeout { + t.Skip("skipping remaining tests to avoid excessive runtime") + } + if err := PullIfMissing(ctx, client, model); err != nil { + t.Fatalf("pull failed %s", err) + } + req := api.GenerateRequest{ + Model: model, + Prompt: "why is the sky blue?", + KeepAlive: &api.Duration{Duration: 10 * time.Second}, + Options: map[string]interface{}{ + "temperature": 0.1, + "seed": 123, + }, + } + anyResp := []string{"rayleigh", "scatter", "atmosphere", "nitrogen", "oxygen", "wavelength"} + // Special cases + if model == "duckdb-nsql" { + anyResp = []string{"select", "from"} + } else if model == "granite3-guardian" || model == "shieldgemma" || model == "llama-guard3" || model == "bespoke-minicheck" { + anyResp = []string{"yes", "no", "safe", "unsafe"} + } else if model == "openthinker" || model == "nexusraven" { + anyResp = []string{"plugin", "im_sep", "components", "function call"} + } else if model == "starcoder" || model == "starcoder2" || model == "magicoder" || model == "deepseek-coder" { + req.Prompt = "def fibonacci():" + anyResp = []string{"f(n)", "sequence", "n-1", "main()", "__main__", "while"} + } + DoGenerate(ctx, t, client, req, anyResp, 120*time.Second, 30*time.Second) + }) + } +} diff --git a/integration/utils_test.go b/integration/utils_test.go index c76af59c..3d726123 100644 --- a/integration/utils_test.go +++ b/integration/utils_test.go @@ -72,6 +72,187 @@ var ( "stablelm2:latest", // Predictions are off, crashes on small VRAM GPUs "falcon:latest", } + + // Some library models are quite large - ensure large VRAM and sufficient disk space + // before running scenarios based on this set + libraryChatModels = []string{ + "alfred", + "athene-v2", + "aya-expanse", + "aya", + "bakllava", + "bespoke-minicheck", + "codebooga", + "codegeex4", + "codegemma", + "codellama", + "codeqwen", + "codestral", + "codeup", + "cogito", + "command-a", + "command-r-plus", + "command-r", + "command-r7b-arabic", + "command-r7b", + "dbrx", + "deepcoder", + "deepscaler", + "deepseek-coder-v2", + "deepseek-coder", + "deepseek-llm", + "deepseek-r1", + // "deepseek-v2.5", // requires 155 GB VRAM + "deepseek-v2", + // "deepseek-v3", // requires 482 GB VRAM + "devstral", + "dolphin-llama3", + "dolphin-mistral", + "dolphin-mixtral", + "dolphin-phi", + "dolphin3", + "dolphincoder", + "duckdb-nsql", + "everythinglm", + "exaone-deep", + "exaone3.5", + "falcon", + "falcon2", + "falcon3", + "firefunction-v2", + "gemma", + "gemma2", + "gemma3", + "gemma3n", + "glm4", + "goliath", + "granite-code", + "granite3-dense", + "granite3-guardian", + "granite3-moe", + "granite3.1-dense", + "granite3.1-moe", + "granite3.2-vision", + "granite3.2", + "granite3.3", + "hermes3", + "internlm2", + "llama-guard3", + "llama-pro", + "llama2-chinese", + "llama2-uncensored", + "llama2", + "llama3-chatqa", + "llama3-gradient", + "llama3-groq-tool-use", + "llama3.1", + "llama3.2-vision", + "llama3.2", + "llama3.3", + "llama3", + "llama4", + "llava-llama3", + "llava-phi3", + "llava", + "magicoder", + "magistral", + "marco-o1", + "mathstral", + "meditron", + "medllama2", + "megadolphin", + "minicpm-v", + "mistral-large", + "mistral-nemo", + "mistral-openorca", + "mistral-small", + "mistral-small3.1", + "mistral-small3.2", + "mistral", + "mistrallite", + "mixtral", + "moondream", + "nemotron-mini", + "nemotron", + "neural-chat", + "nexusraven", + "notus", + "nous-hermes", + "nous-hermes2-mixtral", + "nous-hermes2", + "nuextract", + "olmo2", + "open-orca-platypus2", + "openchat", + "opencoder", + "openhermes", + "openthinker", + "orca-mini", + "orca2", + // "phi", // unreliable + "phi3.5", + "phi3", + "phi4-mini-reasoning", + "phi4-mini", + "phi4-reasoning", + "phi4", + "phind-codellama", + "qwen", + "qwen2-math", + "qwen2.5-coder", + "qwen2.5", + "qwen2.5vl", + "qwen2", + "qwen3:0.6b", // dense + "qwen3:30b", // MOE + "qwq", + "r1-1776", + "reader-lm", + "reflection", + "sailor2", + "samantha-mistral", + "shieldgemma", + "smallthinker", + "smollm", + "smollm2", + "solar-pro", + "solar", + "sqlcoder", + "stable-beluga", + "stable-code", + "stablelm-zephyr", + "stablelm2", + "starcoder", + "starcoder2", + "starling-lm", + "tinydolphin", + "tinyllama", + "tulu3", + "vicuna", + "wizard-math", + "wizard-vicuna-uncensored", + "wizard-vicuna", + "wizardcoder", + "wizardlm-uncensored", + "wizardlm2", + "xwinlm", + "yarn-llama2", + "yarn-mistral", + "yi-coder", + "yi", + "zephyr", + } + libraryEmbedModels = []string{ + "all-minilm", + "bge-large", + "bge-m3", + "granite-embedding", + "mxbai-embed-large", + "nomic-embed-text", + "paraphrase-multilingual", + "snowflake-arctic-embed", + "snowflake-arctic-embed2", + } ) func Init() { @@ -313,6 +494,10 @@ func DoGenerate(ctx context.Context, t *testing.T, client *api.Client, genReq ap t.Errorf("generate stalled. Response so far:%s", buf.String()) } case <-done: + if genErr != nil && strings.Contains(genErr.Error(), "model requires more system memory") { + slog.Warn("model is too large for the target test system", "model", genReq.Model, "error", genErr) + return + } require.NoError(t, genErr, "failed with %s request prompt %s ", genReq.Model, genReq.Prompt) // Verify the response contains the expected data response := buf.String() diff --git a/model/models/llama/model.go b/model/models/llama/model.go index 3cf782d0..77d8f36d 100644 --- a/model/models/llama/model.go +++ b/model/models/llama/model.go @@ -2,6 +2,7 @@ package llama import ( "cmp" + "fmt" "math" "github.com/ollama/ollama/fs" @@ -33,6 +34,14 @@ type Model struct { } func New(c fs.Config) (model.Model, error) { + // This model currently only supports the gpt2 tokenizer + if c.String("tokenizer.ggml.model") == "llama" { + return nil, fmt.Errorf("unsupported tokenizer: llama") + } + // Best effort detection of library/deepseek-coder model(s) which are incompatible + if c.String("general.name") == "deepseek-ai" { + return nil, fmt.Errorf("unsupported model: %s", c.String("general.name")) + } m := Model{ BytePairEncoding: model.NewBytePairEncoding( c.String("tokenizer.ggml.pretokenizer", `(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+`), diff --git a/model/models/qwen2/model.go b/model/models/qwen2/model.go index 42338d0d..3c662f06 100644 --- a/model/models/qwen2/model.go +++ b/model/models/qwen2/model.go @@ -2,7 +2,9 @@ package qwen2 import ( "cmp" + "fmt" "math" + "strings" "github.com/ollama/ollama/fs" "github.com/ollama/ollama/kvcache" @@ -126,6 +128,14 @@ func (m Model) Shift(ctx ml.Context, layer int, key, shift ml.Tensor) (ml.Tensor } func New(c fs.Config) (model.Model, error) { + // This model currently only supports the gpt2 tokenizer + if c.String("tokenizer.ggml.model") == "llama" { + return nil, fmt.Errorf("unsupported tokenizer: llama") + } + // detect library/qwen model(s) which are incompatible + if strings.HasPrefix(c.String("general.name"), "Qwen2-beta") { + return nil, fmt.Errorf("unsupported model: %s", c.String("general.name")) + } m := Model{ Layers: make([]DecoderLayer, c.Uint("block_count")), BytePairEncoding: model.NewBytePairEncoding( From 9a43994c45f8da1b21fd302d5ef000cee36c4e16 Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Thu, 10 Jul 2025 16:55:34 -0700 Subject: [PATCH 06/16] ggml: Disable unused pipeline parallelism We're not currently using it, even in cases where we could. Disabling it improves generation performance by 10-30% with multiple GPUs. --- ml/backend/ggml/ggml.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 680910f8..7d6831ee 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -418,7 +418,7 @@ func New(modelPath string, params ml.BackendParams) (ml.Backend, error) { (*C.ggml_backend_buffer_type_t)(unsafe.Pointer(&schedBufts[0])), C.int(len(schedBackends)), C.size_t(maxGraphNodes), - C._Bool(len(gpus) > 1 && slices.Contains(gpus, output.d)), + C._Bool(false), C._Bool(false), ), schedBackends: schedBackends, From acef9b4c1b4bc97dba88ed02cc707635b96074de Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Mon, 7 Jul 2025 13:10:14 -0700 Subject: [PATCH 07/16] ggml: Use assigned layers when reporting loading stats Reporting params.NumGPULayers can be misleading because it is the requested number of layers, not the actual number that is loaded. While they are often the same, there are cases where they might mismatch, such as if the GPU backend is missing. --- ml/backend/ggml/ggml.go | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index 7d6831ee..24347689 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -356,23 +356,25 @@ func New(modelPath string, params ml.BackendParams) (ml.Backend, error) { } // Mimic llama runner logs summarizing layers and memory - slog.Info(fmt.Sprintf("offloading %d repeating layers to GPU", max(0, params.NumGPULayers-1))) gpuLayers := 0 - switch C.ggml_backend_dev_type(output.d) { - case 0: // CPU - slog.Info("offloading output layer to CPU") - case 1: // GPU - slog.Info("offloading output layer to GPU") - gpuLayers++ - case 2: // ACCEL - slog.Info("offloading output layer to ACCEL") - } for _, layer := range layers { - if C.ggml_backend_dev_type(layer.d) == 1 { + if C.ggml_backend_dev_type(layer.d) == C.GGML_BACKEND_DEVICE_TYPE_GPU { gpuLayers++ } } + slog.Info(fmt.Sprintf("offloading %d repeating layers to GPU", gpuLayers)) + + switch C.ggml_backend_dev_type(output.d) { + case C.GGML_BACKEND_DEVICE_TYPE_CPU: + slog.Info("offloading output layer to CPU") + case C.GGML_BACKEND_DEVICE_TYPE_GPU: + slog.Info("offloading output layer to GPU") + gpuLayers++ + case C.GGML_BACKEND_DEVICE_TYPE_ACCEL: + slog.Info("offloading output layer to ACCEL") + } slog.Info(fmt.Sprintf("offloaded %d/%d layers to GPU", gpuLayers, len(layers)+1)) + for bs := range maps.Values(bbs) { slog.Info("model weights", "buffer", C.GoString(C.ggml_backend_buffer_name(bs)), "size", format.HumanBytes2(uint64(C.ggml_backend_buffer_get_size(bs)))) } From 4261a3b0b264430489921a1b4a16a6267711d595 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=85=88=E7=9F=A5?= <85628682+sncix@users.noreply.github.com> Date: Fri, 11 Jul 2025 22:15:00 +0000 Subject: [PATCH 08/16] docs: update modelfile.md to reflect current default num_ctx (#11189) As in the commit 44b466eeb2e42e9ce2852c69d7cddb7ebac5daf8, the default context length has been increased to 4096. --- docs/modelfile.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modelfile.md b/docs/modelfile.md index 6513873c..53a21714 100644 --- a/docs/modelfile.md +++ b/docs/modelfile.md @@ -150,7 +150,7 @@ PARAMETER | Parameter | Description | Value Type | Example Usage | | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | -------------------- | -| num_ctx | Sets the size of the context window used to generate the next token. (Default: 2048) | int | num_ctx 4096 | +| num_ctx | Sets the size of the context window used to generate the next token. (Default: 4096) | int | num_ctx 4096 | | repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 | | repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 | | temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 | From 2e3fd86d482cb4e77e54179836ddd6a518e2300b Mon Sep 17 00:00:00 2001 From: Marcelo Fornet Date: Wed, 16 Jul 2025 19:50:46 +0200 Subject: [PATCH 09/16] docs: fix typo in macos.md (#11425) --- docs/macos.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/macos.md b/docs/macos.md index 63bf14b1..9617bdc7 100644 --- a/docs/macos.md +++ b/docs/macos.md @@ -22,7 +22,7 @@ To install the Ollama application somewhere other than `Applications`, place the Ollama on MacOS stores files in a few different locations. - `~/.ollama` contains models and configuration - `~/.ollama/logs` contains logs - - *app.log* contains most resent logs from the GUI application + - *app.log* contains most recent logs from the GUI application - *server.log* contains the most recent server logs - `/Ollama.app/Contents/Resources/ollama` the CLI binary @@ -39,4 +39,4 @@ rm -rf ~/Library/Caches/com.electron.ollama/ rm -rf ~/Library/Caches/ollama rm -rf ~/Library/WebKit/com.electron.ollama rm -rf ~/.ollama -``` \ No newline at end of file +``` From 92c2e8a56c7eb9a5a99439133220d707710da0f8 Mon Sep 17 00:00:00 2001 From: Bruce MacDonald Date: Wed, 16 Jul 2025 11:03:28 -0700 Subject: [PATCH 10/16] api: fix unreachable status err (#11423) StatusError was unreachable, the client always checked for error messages in the response body first, and the server always includes error messages with HTTP error status codes. --- api/client.go | 8 ++++---- api/client_test.go | 10 ++++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/api/client.go b/api/client.go index 9f0dba8d..7cc2acb3 100644 --- a/api/client.go +++ b/api/client.go @@ -222,10 +222,6 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f return fmt.Errorf("unmarshal: %w", err) } - if errorResponse.Error != "" { - return errors.New(errorResponse.Error) - } - if response.StatusCode >= http.StatusBadRequest { return StatusError{ StatusCode: response.StatusCode, @@ -234,6 +230,10 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f } } + if errorResponse.Error != "" { + return errors.New(errorResponse.Error) + } + if err := fn(bts); err != nil { return err } diff --git a/api/client_test.go b/api/client_test.go index 2ceeec9c..f0034e02 100644 --- a/api/client_test.go +++ b/api/client_test.go @@ -89,6 +89,16 @@ func TestClientStream(t *testing.T) { }, wantErr: "mid-stream error", }, + { + name: "http status error takes precedence over general error", + responses: []any{ + testError{ + message: "custom error message", + statusCode: http.StatusInternalServerError, + }, + }, + wantErr: "500", + }, { name: "successful stream completion", responses: []any{ From d73f8aa8c3979b33f5ea19b80406c20e88ee3b1b Mon Sep 17 00:00:00 2001 From: Parth Sareen Date: Wed, 16 Jul 2025 11:18:16 -0700 Subject: [PATCH 11/16] cmd: add default assistant role to message construction (#11431) --- cmd/cmd.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index b569dddd..c661df4e 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1080,10 +1080,11 @@ func chat(cmd *cobra.Command, opts runOptions) (*api.Message, error) { var state *displayResponseState = &displayResponseState{} var latest api.ChatResponse var fullResponse strings.Builder - var role string var thinkTagOpened bool = false var thinkTagClosed bool = false + role := "assistant" + fn := func(response api.ChatResponse) error { if response.Message.Content != "" || !opts.HideThinking { p.StopAndClear() From b4fe3adc0a97c160a6af71e7a2c49ceb31a8177c Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 16 Jul 2025 17:32:57 -0700 Subject: [PATCH 12/16] compile bf16 support into ggml-metal (#11430) --- ml/backend/ggml/ggml/src/ggml-metal/metal.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ml/backend/ggml/ggml/src/ggml-metal/metal.go b/ml/backend/ggml/ggml/src/ggml-metal/metal.go index 0ee017dd..bf20ab7f 100644 --- a/ml/backend/ggml/ggml/src/ggml-metal/metal.go +++ b/ml/backend/ggml/ggml/src/ggml-metal/metal.go @@ -4,6 +4,6 @@ package metal //go:generate sh -c "{ echo // Code generated by 'go generate'. DO NOT EDIT.; sed -e '/__embed_ggml-common.h__/r ../ggml-common.h' -e '/__embed_ggml-common.h__/d' -e '/#include \"ggml-metal-impl.h\"/r ggml-metal-impl.h' -e '/#include \"ggml-metal-impl.h\"/d' ggml-metal.metal; } >ggml-metal-embed.metal" -// #cgo CPPFLAGS: -DGGML_METAL_NDEBUG -DGGML_METAL_EMBED_LIBRARY -I.. -I../../include +// #cgo CPPFLAGS: -DGGML_METAL_NDEBUG -DGGML_METAL_EMBED_LIBRARY -DGGML_METAL_USE_BF16 -I.. -I../../include // #cgo LDFLAGS: -framework Metal -framework MetalKit import "C" From e840ccb5239c92f5f118fbdcb3288f844c4a9f8d Mon Sep 17 00:00:00 2001 From: Haiyue Wang Date: Thu, 17 Jul 2025 12:20:28 +0800 Subject: [PATCH 13/16] readme: update the llama.cpp github link (#11427) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7ecbf4a7..7f5d4fb1 100644 --- a/README.md +++ b/README.md @@ -598,7 +598,7 @@ See the [API documentation](./docs/api.md) for all endpoints. ### Supported backends -- [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov. +- [llama.cpp](https://github.com/ggml-org/llama.cpp) project founded by Georgi Gerganov. ### Observability - [Opik](https://www.comet.com/docs/opik/cookbook/ollama) is an open-source platform to debug, evaluate, and monitor your LLM applications, RAG systems, and agentic workflows with comprehensive tracing, automated evaluations, and production-ready dashboards. Opik supports native intergration to Ollama. From 5e67f4f90e13ce19eb103216bd151ce9f5fb9008 Mon Sep 17 00:00:00 2001 From: frob Date: Thu, 17 Jul 2025 12:31:49 +0800 Subject: [PATCH 14/16] openai: allow openai endpoint to accept webp images (#11412) Co-authored-by: Richard Lyons --- openai/openai.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openai/openai.go b/openai/openai.go index 012189d2..35b8b9a0 100644 --- a/openai/openai.go +++ b/openai/openai.go @@ -423,7 +423,7 @@ func fromChatRequest(r ChatCompletionRequest) (*api.ChatRequest, error) { } } - types := []string{"jpeg", "jpg", "png"} + types := []string{"jpeg", "jpg", "png", "webp"} valid := false for _, t := range types { prefix := "data:image/" + t + ";base64," From 802ad16ce44312826526d9c6fa4374488a9f4e6c Mon Sep 17 00:00:00 2001 From: frob Date: Thu, 17 Jul 2025 15:16:10 +1000 Subject: [PATCH 15/16] docs: add the no-Modelfile function of `ollama create` (#9077) --- cmd/cmd.go | 4 ++-- docs/import.md | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index c661df4e..7955012c 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1418,13 +1418,13 @@ func NewCLI() *cobra.Command { createCmd := &cobra.Command{ Use: "create MODEL", - Short: "Create a model from a Modelfile", + Short: "Create a model", Args: cobra.ExactArgs(1), PreRunE: checkServerHeartbeat, RunE: CreateHandler, } - createCmd.Flags().StringP("file", "f", "", "Name of the Modelfile (default \"Modelfile\"") + createCmd.Flags().StringP("file", "f", "", "Name of the Modelfile (default \"Modelfile\")") createCmd.Flags().StringP("quantize", "q", "", "Quantize model to this level (e.g. q4_K_M)") showCmd := &cobra.Command{ diff --git a/docs/import.md b/docs/import.md index df06ce4b..104b4162 100644 --- a/docs/import.md +++ b/docs/import.md @@ -53,6 +53,8 @@ FROM /path/to/safetensors/directory If you create the Modelfile in the same directory as the weights, you can use the command `FROM .`. +If you do not create the Modelfile, ollama will act as if there was a Modelfile with the command `FROM .`. + Now run the `ollama create` command from the directory where you created the `Modelfile`: ```shell From 191d94289d016b59c0553b14d299d1bac07a7fcd Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Thu, 17 Jul 2025 07:33:44 -0700 Subject: [PATCH 16/16] ci: switch mac builder to arm64 (#11379) The macos-13 is x86, while macos-13-xlarge is arm64 --- .github/workflows/release.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 4acb283b..40871e64 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -23,7 +23,7 @@ jobs: echo GOFLAGS="'-ldflags=-w -s \"-X=github.com/ollama/ollama/version.Version=${GITHUB_REF_NAME#v}\" \"-X=github.com/ollama/ollama/server.mode=release\"'" >>$GITHUB_OUTPUT darwin-build: - runs-on: macos-13 + runs-on: macos-13-xlarge environment: release needs: setup-environment strategy: