Linux: Experimentally apply Alchemy's AMD/intel GPU memory detection fix - Thanks to Rye for this code
parent
aa440ccd28
commit
b4bc1b3286
|
|
@ -54,6 +54,13 @@
|
|||
#include "lldxhardware.h"
|
||||
#endif
|
||||
|
||||
#if LL_SDL
|
||||
#include "SDL2/SDL_video.h"
|
||||
|
||||
#define GLH_EXT_GET_PROC_ADDRESS SDL_GL_GetProcAddress
|
||||
#define ExtensionExists(exten, unused) SDL_GL_ExtensionSupported(exten);
|
||||
#endif
|
||||
|
||||
#ifdef _DEBUG
|
||||
//#define GL_STATE_VERIFY
|
||||
#endif
|
||||
|
|
@ -1003,9 +1010,9 @@ LLGLManager::LLGLManager() :
|
|||
//---------------------------------------------------------------------
|
||||
// Global initialization for GL
|
||||
//---------------------------------------------------------------------
|
||||
#if LL_WINDOWS && !LL_MESA_HEADLESS
|
||||
void LLGLManager::initWGL()
|
||||
{
|
||||
#if LL_WINDOWS && !LL_MESA_HEADLESS
|
||||
if (!glh_init_extensions("WGL_ARB_pixel_format"))
|
||||
{
|
||||
LL_WARNS("RenderInit") << "No ARB pixel format extensions" << LL_ENDL;
|
||||
|
|
@ -1043,8 +1050,8 @@ void LLGLManager::initWGL()
|
|||
{
|
||||
LL_WARNS("RenderInit") << "No ARB WGL render texture extensions" << LL_ENDL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
// return false if unable (or unwilling due to old drivers) to init GL
|
||||
bool LLGLManager::initGL()
|
||||
|
|
@ -1133,7 +1140,7 @@ bool LLGLManager::initGL()
|
|||
// Trailing space necessary to keep "nVidia Corpor_ati_on" cards
|
||||
// from being recognized as ATI.
|
||||
// NOTE: AMD has been pretty good about not breaking this check, do not rename without good reason
|
||||
if (mGLVendor.substr(0,4) == "ATI ")
|
||||
if (mGLVendor.substr(0,4) == "ATI " || mGLVendor.find("AMD") != std::string::npos)
|
||||
{
|
||||
mGLVendorShort = "AMD";
|
||||
// *TODO: Fix this?
|
||||
|
|
@ -1214,6 +1221,24 @@ bool LLGLManager::initGL()
|
|||
}
|
||||
#endif
|
||||
|
||||
// Ultimate fallbacks for linux and mesa
|
||||
if (mHasNVXMemInfo && mVRAM == 0)
|
||||
{
|
||||
S32 dedicated_memory;
|
||||
glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &dedicated_memory);
|
||||
mVRAM = dedicated_memory/1024;
|
||||
LL_INFOS("RenderInit") << "VRAM Detected (NVXMemInfo):" << mVRAM << LL_ENDL;
|
||||
}
|
||||
|
||||
if (mHasATIMemInfo && mVRAM == 0)
|
||||
{ //ask the gl how much vram is free at startup and attempt to use no more than half of that
|
||||
S32 meminfo[4];
|
||||
glGetIntegerv(GL_TEXTURE_FREE_MEMORY_ATI, meminfo);
|
||||
|
||||
mVRAM = meminfo[0] / 1024;
|
||||
LL_INFOS("RenderInit") << "VRAM Detected (ATIMemInfo):" << mVRAM << LL_ENDL;
|
||||
}
|
||||
|
||||
if (mVRAM < 256 && old_vram > 0)
|
||||
{
|
||||
// fall back to old method
|
||||
|
|
@ -1412,9 +1437,13 @@ void LLGLManager::initExtensions()
|
|||
|
||||
// <FS:Zi> Linux support
|
||||
//#if (LL_WINDOWS || LL_LINUX) && !LL_MESA_HEADLESS
|
||||
mHasATIMemInfo = ExtensionExists("GL_ATI_meminfo", gGLHExts.mSysExts); //Basic AMD method, also see mHasAMDAssociations
|
||||
mHasNVXMemInfo = ExtensionExists("GL_NVX_gpu_memory_info", gGLHExts.mSysExts);
|
||||
|
||||
LL_DEBUGS("RenderInit") << "GL Probe: Getting symbols" << LL_ENDL;
|
||||
|
||||
#if LL_WINDOWS
|
||||
// </FS:Zi>
|
||||
LL_DEBUGS("RenderInit") << "GL Probe: Getting symbols" << LL_ENDL;
|
||||
|
||||
// <FS:Zi> Linux support
|
||||
// #if LL_WINDOWS
|
||||
|
|
|
|||
|
|
@ -79,7 +79,9 @@ public:
|
|||
bool initGL();
|
||||
void shutdownGL();
|
||||
|
||||
#if LL_WINDOWS
|
||||
void initWGL(); // Initializes stupid WGL extensions
|
||||
#endif
|
||||
|
||||
std::string getRawGLString(); // For sending to simulator
|
||||
|
||||
|
|
@ -106,6 +108,8 @@ public:
|
|||
|
||||
// Vendor-specific extensions
|
||||
bool mHasAMDAssociations = false;
|
||||
bool mHasNVXMemInfo = false;
|
||||
bool mHasATIMemInfo = false;
|
||||
|
||||
BOOL mIsAMD;
|
||||
BOOL mIsNVIDIA;
|
||||
|
|
|
|||
|
|
@ -2684,10 +2684,26 @@ std::vector<std::string> LLWindowSDL::getDynamicFallbackFontList()
|
|||
// <FS:Zi> Implement available VRAM estimation like MacOSX
|
||||
U32 LLWindowSDL::getAvailableVRAMMegabytes()
|
||||
{
|
||||
/*
|
||||
static const U32 mb = 1024*1024;
|
||||
// We're asked for total available gpu memory, but we only have allocation info on texture usage. So estimate by doubling that.
|
||||
static const U32 total_factor = 2; // estimated total/textures
|
||||
return gGLManager.mVRAM - (LLImageGL::getTextureBytesAllocated() * total_factor/mb);
|
||||
*/
|
||||
LL_PROFILE_ZONE_SCOPED;
|
||||
if (gGLManager.mHasNVXMemInfo)
|
||||
{
|
||||
S32 available_memory;
|
||||
glGetIntegerv(GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX, &available_memory);
|
||||
return available_memory / 1024;
|
||||
}
|
||||
else
|
||||
{
|
||||
static const U32 mb = 1024*1024;
|
||||
// We're asked for total available gpu memory, but we only have allocation info on texture usage. So estimate by doubling that.
|
||||
static const U32 total_factor = 2; // estimated total/textures
|
||||
return (U32)llmax((U32)1, (U32)(gGLManager.mVRAM - (LLImageGL::getTextureBytesAllocated() * total_factor/mb)));
|
||||
}
|
||||
}
|
||||
// </FS:Zi>
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue