We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 853f7f0 commit b39bc70Copy full SHA for b39bc70
aten/src/ATen/cuda/CUDABlas.cpp
@@ -191,7 +191,7 @@ static size_t _parseChosenWorkspaceSize() {
191
cudaDeviceProp* p = at::cuda::getDeviceProperties(c10::cuda::current_device());
192
// Keep workspace_size = 1024 for small Ampere GPUs
193
// See https://github.com/pytorch/pytorch/pull/120925#issuecomment-1977556485
194
- if (p->major == 8 && p->total_memory / 1073741824 >= 24) {
+ if (p->major == 8 && p->totalGlobalMem / 1073741824 >= 24) {
195
workspace_size = 4096;
196
} else if (p->major >= 9) {
197
workspace_size = 32768;
0 commit comments