mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-21 22:33:56 +00:00
The workaround has been moved into the underlying C++ code.
This reverts commit e4340667e3.
This commit is contained in:
@@ -349,9 +349,6 @@ func GPUDevices(ctx context.Context, runners []FilteredRunnerDiscovery) []ml.Dev
|
||||
}
|
||||
}
|
||||
|
||||
// Apply any iGPU workarounds
|
||||
iGPUWorkarounds(devices)
|
||||
|
||||
return devices
|
||||
}
|
||||
|
||||
@@ -601,32 +598,3 @@ func GetDevicesFromRunner(ctx context.Context, runner BaseRunner) ([]ml.DeviceIn
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func iGPUWorkarounds(devices []ml.DeviceInfo) {
|
||||
// short circuit if we have no iGPUs
|
||||
anyiGPU := false
|
||||
for i := range devices {
|
||||
if devices[i].Integrated {
|
||||
anyiGPU = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !anyiGPU {
|
||||
return
|
||||
}
|
||||
|
||||
memInfo, err := GetCPUMem()
|
||||
if err != nil {
|
||||
slog.Debug("failed to fetch system memory information for iGPU", "error", err)
|
||||
return
|
||||
}
|
||||
for i := range devices {
|
||||
if !devices[i].Integrated {
|
||||
continue
|
||||
}
|
||||
// NVIDIA iGPUs return useless free VRAM data which ignores system buff/cache
|
||||
if devices[i].Library == "CUDA" {
|
||||
devices[i].FreeMemory = memInfo.FreeMemory
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user