diff --git a/GPU/GLES/DepalettizeShaderGLES.cpp b/GPU/GLES/DepalettizeShaderGLES.cpp index 52b1458b1373..b18ddd587675 100644 --- a/GPU/GLES/DepalettizeShaderGLES.cpp +++ b/GPU/GLES/DepalettizeShaderGLES.cpp @@ -53,6 +53,10 @@ DepalShaderCacheGLES::DepalShaderCacheGLES(Draw::DrawContext *draw) { render_ = (GLRenderManager *)draw->GetNativeObject(Draw::NativeObject::RENDER_MANAGER); // Pre-build the vertex program useGL3_ = gl_extensions.GLES3 || gl_extensions.VersionGEThan(3, 3); + if (!gstate_c.Supports(GPU_SUPPORTS_32BIT_INT_FSHADER)) { + // Use the floating point path, it just can't handle the math. + useGL3_ = false; + } vertexShaderFailed_ = false; vertexShader_ = 0; diff --git a/GPU/GLES/GPU_GLES.cpp b/GPU/GLES/GPU_GLES.cpp index 2a75ead250e2..f4e152e9344c 100644 --- a/GPU/GLES/GPU_GLES.cpp +++ b/GPU/GLES/GPU_GLES.cpp @@ -190,11 +190,18 @@ void GPU_GLES::CheckGPUFeatures() { } if (gl_extensions.IsGLES) { - if (gl_extensions.GLES3) + if (gl_extensions.GLES3) { features |= GPU_SUPPORTS_GLSL_ES_300; + // Mali reports 30 but works fine... + if (gl_extensions.range[1][5][1] >= 30) { + features |= GPU_SUPPORTS_32BIT_INT_FSHADER; + } + } } else { - if (gl_extensions.VersionGEThan(3, 3, 0)) + if (gl_extensions.VersionGEThan(3, 3, 0)) { features |= GPU_SUPPORTS_GLSL_330; + features |= GPU_SUPPORTS_32BIT_INT_FSHADER; + } } if (gl_extensions.EXT_shader_framebuffer_fetch || gl_extensions.NV_shader_framebuffer_fetch || gl_extensions.ARM_shader_framebuffer_fetch) { diff --git a/GPU/GLES/TextureCacheGLES.cpp b/GPU/GLES/TextureCacheGLES.cpp index 04b629e1b13b..6536e8d91610 100644 --- a/GPU/GLES/TextureCacheGLES.cpp +++ b/GPU/GLES/TextureCacheGLES.cpp @@ -444,7 +444,9 @@ void TextureCacheGLES::ApplyTextureFramebuffer(TexCacheEntry *entry, VirtualFram uint32_t clutMode = gstate.clutformat & 0xFFFFFF; bool useShaderDepal = framebufferManager_->GetCurrentRenderVFB() != framebuffer && gstate_c.Supports(GPU_SUPPORTS_GLSL_ES_300); - + if (!gstate_c.Supports(GPU_SUPPORTS_32BIT_INT_FSHADER)) { + useShaderDepal = false; + } if ((entry->status & TexCacheEntry::STATUS_DEPALETTIZE) && !g_Config.bDisableSlowFramebufEffects) { if (useShaderDepal) { const GEPaletteFormat clutFormat = gstate.getClutPaletteFormat(); diff --git a/GPU/GPUState.h b/GPU/GPUState.h index a73023927fc2..c2f61dcc5ac5 100644 --- a/GPU/GPUState.h +++ b/GPU/GPUState.h @@ -466,7 +466,6 @@ enum { GPU_SUPPORTS_DUALSOURCE_BLEND = FLAG_BIT(0), GPU_SUPPORTS_GLSL_ES_300 = FLAG_BIT(1), GPU_SUPPORTS_GLSL_330 = FLAG_BIT(2), - GPU_SUPPORTS_UNPACK_SUBIMAGE = FLAG_BIT(3), GPU_SUPPORTS_BLEND_MINMAX = FLAG_BIT(4), GPU_SUPPORTS_LOGIC_OP = FLAG_BIT(5), GPU_USE_DEPTH_RANGE_HACK = FLAG_BIT(6), @@ -478,6 +477,7 @@ enum { GPU_SUPPORTS_TEXTURE_FLOAT = FLAG_BIT(12), GPU_SUPPORTS_16BIT_FORMATS = FLAG_BIT(13), GPU_SUPPORTS_DEPTH_CLAMP = FLAG_BIT(14), + GPU_SUPPORTS_32BIT_INT_FSHADER = FLAG_BIT(15), GPU_SUPPORTS_LARGE_VIEWPORTS = FLAG_BIT(16), GPU_SUPPORTS_ACCURATE_DEPTH = FLAG_BIT(17), GPU_SUPPORTS_VAO = FLAG_BIT(18), diff --git a/UI/DevScreens.cpp b/UI/DevScreens.cpp index 12d1ee1e147c..e6e3babc2043 100644 --- a/UI/DevScreens.cpp +++ b/UI/DevScreens.cpp @@ -405,6 +405,13 @@ void SystemInfoScreen::CreateViews() { #endif if (GetGPUBackend() == GPUBackend::OPENGL) { deviceSpecs->Add(new InfoItem(si->T("Core Context"), gl_extensions.IsCoreContext ? di->T("Active") : di->T("Inactive"))); + int highp_int_min = gl_extensions.range[1][5][0]; + int highp_int_max = gl_extensions.range[1][5][1]; + if (highp_int_max != 0) { + char highp_int_range[512]; + snprintf(highp_int_range, sizeof(highp_int_range), "Highp int range: %d-%d", highp_int_min, highp_int_max); + deviceSpecs->Add(new InfoItem(si->T("High precision int range"), highp_int_range)); + } } deviceSpecs->Add(new ItemHeader(si->T("OS Information"))); deviceSpecs->Add(new InfoItem(si->T("Memory Page Size"), StringFromFormat(si->T("%d bytes"), GetMemoryProtectPageSize()))); diff --git a/ext/native/gfx_es2/gpu_features.cpp b/ext/native/gfx_es2/gpu_features.cpp index 8bd8dc14aa58..fab660781db2 100644 --- a/ext/native/gfx_es2/gpu_features.cpp +++ b/ext/native/gfx_es2/gpu_features.cpp @@ -463,6 +463,13 @@ void CheckGLExtensions() { glGetShaderPrecisionFormat(shaderTypes[st], precisions[p], gl_extensions.range[st][p], &gl_extensions.precision[st][p]); } } + + // Now, Adreno lies. So let's override it. + if (gl_extensions.gpuVendor == GPU_VENDOR_QUALCOMM) { + WLOG("Detected Adreno - lowering int precision"); + gl_extensions.range[1][5][0] = 15; + gl_extensions.range[1][5][1] = 15; + } } #endif