mirror of
https://github.com/likelovewant/ollama-for-amd.git
synced 2025-12-21 22:33:56 +00:00
ggml: Avoid cudaMemsetAsync during memory fitting
We pass invalid pointers when we check the size of the required compute graph before fitting. Some CUDA APIs validate these pointers but we can just skip them during this phase. cudaMemsetAsync is one of these that we weren't skipping but never took the code path that used it. Now that we have enabled op_offload, we can hit it in memory pressured situations.
This commit is contained in:
10
ml/backend/ggml/ggml/src/ggml-cuda/common.cuh
vendored
10
ml/backend/ggml/ggml/src/ggml-cuda/common.cuh
vendored
@@ -55,10 +55,20 @@ static cudaError_t cudaMemcpy2DAsyncReserve ( void* dst, size_t dpitch, const vo
|
||||
}
|
||||
}
|
||||
|
||||
static cudaError_t cudaMemsetAsyncReserve ( void* devPtr, int value, size_t count, cudaStream_t stream = 0 ) {
|
||||
if (!reserving_graph) {
|
||||
return cudaMemsetAsync(devPtr, value, count, stream);
|
||||
} else {
|
||||
return cudaSuccess;
|
||||
}
|
||||
}
|
||||
|
||||
#undef cudaMemcpyAsync
|
||||
#define cudaMemcpyAsync cudaMemcpyAsyncReserve
|
||||
#undef cudaMemcpy2DAsync
|
||||
#define cudaMemcpy2DAsync cudaMemcpy2DAsyncReserve
|
||||
#undef cudaMemsetAsync
|
||||
#define cudaMemsetAsync cudaMemsetAsyncReserve
|
||||
|
||||
#define STRINGIZE_IMPL(...) #__VA_ARGS__
|
||||
#define STRINGIZE(...) STRINGIZE_IMPL(__VA_ARGS__)
|
||||
|
||||
Reference in New Issue
Block a user