#1186 Fix mesh queues getting overfilled

Account for queue size instead of just active request
Reduce mutex locking
Prioritize skininfo queue over lod queue to lessen issues with t-poses
master
Andrey Kleshchev 2024-11-19 19:47:23 +02:00
parent 8e17f0c094
commit e20f4c0e4f
2 changed files with 95 additions and 47 deletions

View File

@ -933,6 +933,50 @@ void LLMeshRepoThread::run()
// in relatively similar manners, remake code to simplify/unify the process,
// like processRequests(&requestQ, fetchFunction); which does same thing for each element
if (mHttpRequestSet.size() < sRequestHighWater
&& !mSkinRequests.empty())
{
if (!mSkinRequests.empty())
{
std::list<UUIDBasedRequest> incomplete;
while (!mSkinRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
{
mMutex->lock();
auto req = mSkinRequests.front();
mSkinRequests.pop_front();
mMutex->unlock();
if (req.isDelayed())
{
incomplete.emplace_back(req);
}
else if (!fetchMeshSkinInfo(req.mId, req.canRetry()))
{
if (req.canRetry())
{
req.updateTime();
incomplete.emplace_back(req);
}
else
{
LLMutexLock locker(mMutex);
mSkinUnavailableQ.push_back(req);
LL_DEBUGS() << "mSkinReqQ failed: " << req.mId << LL_ENDL;
}
}
}
if (!incomplete.empty())
{
LLMutexLock locker(mMutex);
for (const auto& req : incomplete)
{
mSkinRequests.push_back(req);
}
}
}
}
if (!mLODReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
{
std::list<LODRequest> incomplete;
@ -982,50 +1026,6 @@ void LLMeshRepoThread::run()
}
}
if (mHttpRequestSet.size() < sRequestHighWater
&& !mSkinRequests.empty())
{
if (!mSkinRequests.empty())
{
std::list<UUIDBasedRequest> incomplete;
while (!mSkinRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
{
mMutex->lock();
auto req = mSkinRequests.front();
mSkinRequests.pop_front();
mMutex->unlock();
if (req.isDelayed())
{
incomplete.emplace_back(req);
}
else if (!fetchMeshSkinInfo(req.mId, req.canRetry()))
{
if (req.canRetry())
{
req.updateTime();
incomplete.emplace_back(req);
}
else
{
LLMutexLock locker(mMutex);
mSkinUnavailableQ.push_back(req);
LL_DEBUGS() << "mSkinReqQ failed: " << req.mId << LL_ENDL;
}
}
}
if (!incomplete.empty())
{
LLMutexLock locker(mMutex);
for (const auto& req : incomplete)
{
mSkinRequests.push_back(req);
}
}
}
}
if (!mHeaderReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
{
std::list<HeaderRequest> incomplete;
@ -1234,6 +1234,42 @@ void LLMeshRepoThread::loadMeshLOD(const LLVolumeParams& mesh_params, S32 lod)
}
}
void LLMeshRepoThread::loadMeshLODs(const lod_list_t& list)
{ //could be called from any thread
LLMutexLock lock(mMutex);
LLMutexLock header_lock(mHeaderMutex);
for (auto lod_pair : list)
{
const LLVolumeParams& mesh_params = lod_pair.first;
const LLUUID& mesh_id = mesh_params.getSculptID();
S32 lod = lod_pair.second;
mesh_header_map::iterator iter = mMeshHeader.find(mesh_id);
if (iter != mMeshHeader.end())
{ // if we have the header, request LOD byte range
LODRequest req(mesh_params, lod);
{
mLODReqQ.push(req);
LLMeshRepository::sLODProcessing++;
}
}
else
{
HeaderRequest req(mesh_params);
pending_lod_map::iterator pending = mPendingLOD.find(mesh_id);
if (pending != mPendingLOD.end())
{ // append this lod request to existing header request
pending->second.push_back(lod);
llassert(pending->second.size() <= LLModel::NUM_LODS);
}
else
{ // if no header request is pending, fetch header
mHeaderReqQ.push(req);
mPendingLOD[mesh_id].push_back(lod);
}
}
}
}
// Mutex: must be holding mMutex when called
void LLMeshRepoThread::setGetMeshCap(const std::string & mesh_cap)
{
@ -4032,8 +4068,12 @@ void LLMeshRepository::notifyLoadedMeshes()
mUploadErrorQ.pop();
}
// mPendingRequests go into queues, queues go into active http requests.
// Checking sRequestHighWater to keep queues at least somewhat populated
// for faster transition into http
S32 active_count = LLMeshRepoThread::sActiveHeaderRequests + LLMeshRepoThread::sActiveLODRequests + LLMeshRepoThread::sActiveSkinRequests;
if (active_count < LLMeshRepoThread::sRequestLowWater)
active_count += (S32)(mThread->mLODReqQ.size() + mThread->mHeaderReqQ.size() + mThread->mSkinInfoQ.size());
if (active_count < LLMeshRepoThread::sRequestHighWater)
{
S32 push_count = LLMeshRepoThread::sRequestHighWater - active_count;
@ -4089,7 +4129,8 @@ void LLMeshRepository::notifyLoadedMeshes()
std::partial_sort(mPendingRequests.begin(), mPendingRequests.begin() + push_count,
mPendingRequests.end(), PendingRequestBase::CompareScoreGreater());
}
LLMeshRepoThread::lod_list_t pending_lods; // to avoid locking on each operation, make a list beforehand
pending_lods.reserve(push_count);
while (!mPendingRequests.empty() && push_count > 0)
{
std::unique_ptr<PendingRequestBase>& req_p = mPendingRequests.front();
@ -4098,7 +4139,7 @@ void LLMeshRepository::notifyLoadedMeshes()
case MESH_REQUEST_LOD:
{
PendingRequestLOD* lod = (PendingRequestLOD*)req_p.get();
mThread->loadMeshLOD(lod->mMeshParams, lod->mLOD);
pending_lods.emplace_back(lod->mMeshParams, lod->mLOD);
LLMeshRepository::sLODPending--;
break;
}
@ -4116,6 +4157,10 @@ void LLMeshRepository::notifyLoadedMeshes()
mPendingRequests.erase(mPendingRequests.begin());
push_count--;
}
if (!pending_lods.empty())
{
mThread->loadMeshLODs(pending_lods);
}
}
//send decomposition requests

View File

@ -466,6 +466,9 @@ public:
void lockAndLoadMeshLOD(const LLVolumeParams& mesh_params, S32 lod);
void loadMeshLOD(const LLVolumeParams& mesh_params, S32 lod);
typedef std::vector<std::pair<const LLVolumeParams&, S32> > lod_list_t;
void loadMeshLODs(const lod_list_t& mesh_vect);
bool fetchMeshHeader(const LLVolumeParams& mesh_params, bool can_retry = true);
bool fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod, bool can_retry = true);
EMeshProcessingResult headerReceived(const LLVolumeParams& mesh_params, U8* data, S32 data_size);