Merge branch 'main' of https://github.com/secondlife/viewer into DRTVWR-600-maint-A
# Conflicts: # indra/llcommon/llapp.cpp # indra/llcommon/llapp.h # indra/llimage/llimageworker.cpp # indra/llui/llcontainerview.cpp # indra/llui/llcontainerview.h # indra/llui/llkeywords.cpp # indra/llui/lltabcontainer.cpp # indra/llui/lltextbase.cpp # indra/newview/llappviewer.cpp # indra/newview/llfavoritesbar.cpp # indra/newview/llfavoritesbar.h # indra/newview/llfloaterimnearbychathandler.cpp # indra/newview/llfloaterpreference.cpp # indra/newview/llhudnametag.h # indra/newview/llinventorypanel.cpp # indra/newview/llinventorypanel.h # indra/newview/llmeshrepository.cpp # indra/newview/lloutfitgallery.cpp # indra/newview/lloutfitslist.cpp # indra/newview/llpaneleditwearable.cpp # indra/newview/llpanelprofilepicks.cpp # indra/newview/llpanelvoicedevicesettings.h # indra/newview/llpreviewscript.cpp # indra/newview/llpreviewscript.h # indra/newview/llselectmgr.cpp # indra/newview/lltranslate.cpp # indra/newview/llviewerassetupload.cpp # indra/newview/llviewermessage.cppmaster
commit
d0102af56d
|
|
@ -73,7 +73,7 @@ jobs:
|
|||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: secondlife/build-variables
|
||||
ref: viewer
|
||||
ref: master
|
||||
path: .build-variables
|
||||
|
||||
- name: Checkout master-message-template
|
||||
|
|
|
|||
|
|
@ -1380,11 +1380,11 @@
|
|||
<key>archive</key>
|
||||
<map>
|
||||
<key>hash</key>
|
||||
<string>e50ea94bbaa4ff41bf53b84b7192df1a694c5337</string>
|
||||
<string>3a3e14563cd5fc019c3f139b82aa46ec79847709</string>
|
||||
<key>hash_algorithm</key>
|
||||
<string>sha1</string>
|
||||
<key>url</key>
|
||||
<string>https://github.com/secondlife/llca/releases/download/v202310121525.0-d22bd98/llca-202310121530.0-common-d22bd98.tar.zst</string>
|
||||
<string>https://github.com/secondlife/llca/releases/download/v202312051403.17-0f5d9c3/llca-202312051404.0-common-0f5d9c3.tar.zst</string>
|
||||
</map>
|
||||
<key>name</key>
|
||||
<string>common</string>
|
||||
|
|
@ -1398,7 +1398,7 @@
|
|||
<string>Copyright (c) 2016, Linden Research, Inc.; data provided by the Mozilla NSS Project.
|
||||
</string>
|
||||
<key>version</key>
|
||||
<string>202310121530.0</string>
|
||||
<string>202312051404.0</string>
|
||||
<key>name</key>
|
||||
<string>llca</string>
|
||||
</map>
|
||||
|
|
|
|||
3
build.sh
3
build.sh
|
|
@ -112,7 +112,8 @@ installer_CYGWIN()
|
|||
fi
|
||||
}
|
||||
|
||||
[[ -n "$GITHUB_OUTPUT" ]] || fatal "Need to export GITHUB_OUTPUT"
|
||||
# if someone wants to run build.sh outside the GitHub environment
|
||||
[[ -n "$GITHUB_OUTPUT" ]] || export GITHUB_OUTPUT='/dev/null'
|
||||
# The following is based on the Warning for GitHub multiline output strings:
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings
|
||||
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
|
||||
|
|
|
|||
|
|
@ -294,6 +294,7 @@ Beq Janus
|
|||
SL-18637
|
||||
SL-19317
|
||||
SL-19660
|
||||
SL-20610
|
||||
Beth Walcher
|
||||
Bezilon Kasei
|
||||
Biancaluce Robbiani
|
||||
|
|
|
|||
|
|
@ -119,11 +119,11 @@ public:
|
|||
* ClassicCallback must not itself be copied or moved! Once you've passed
|
||||
* get_userdata() to some API, this object MUST remain at that address.
|
||||
*/
|
||||
// However, we can't yet count on C++17 Class Template Argument Deduction,
|
||||
// which means makeClassicCallback() is still useful, which means we MUST
|
||||
// be able to return one to construct into caller's instance (move ctor).
|
||||
// Possible defense: bool 'referenced' data member set by get_userdata(),
|
||||
// with an llassert_always(! referenced) check in the move constructor.
|
||||
// However, makeClassicCallback() is useful for deducing the CALLABLE
|
||||
// type, which means we MUST be able to return one to construct into
|
||||
// caller's instance (move ctor). Possible defense: bool 'referenced' data
|
||||
// member set by get_userdata(), with an llassert_always(! referenced)
|
||||
// check in the move constructor.
|
||||
ClassicCallback(ClassicCallback const&) = delete;
|
||||
ClassicCallback(ClassicCallback&&) = default; // delete;
|
||||
ClassicCallback& operator=(ClassicCallback const&) = delete;
|
||||
|
|
|
|||
|
|
@ -96,7 +96,6 @@ bool LLApp::sLogInSignal = false;
|
|||
// Keeps track of application status
|
||||
LLScalarCond<LLApp::EAppStatus> LLApp::sStatus{LLApp::APP_STATUS_STOPPED};
|
||||
LLAppErrorHandler LLApp::sErrorHandler = NULL;
|
||||
bool LLApp::sErrorThreadRunning = false;
|
||||
|
||||
|
||||
LLApp::LLApp()
|
||||
|
|
@ -682,13 +681,8 @@ void default_unix_signal_handler(int signum, siginfo_t *info, void *)
|
|||
return;
|
||||
}
|
||||
|
||||
// Flag status to ERROR, so thread_error does its work.
|
||||
// Flag status to ERROR
|
||||
LLApp::setError();
|
||||
// Block in the signal handler until somebody says that we're done.
|
||||
while (LLApp::sErrorThreadRunning && !LLApp::isStopped())
|
||||
{
|
||||
ms_sleep(10);
|
||||
}
|
||||
|
||||
if (LLApp::sLogInSignal)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -291,7 +291,6 @@ protected:
|
|||
|
||||
static void setStatus(EAppStatus status); // Use this to change the application status.
|
||||
static LLScalarCond<EAppStatus> sStatus; // Reflects current application status
|
||||
static bool sErrorThreadRunning; // Set while the error thread is running
|
||||
static bool sDisableCrashlogger; // Let the OS handle crashes for us.
|
||||
std::wstring mCrashReportPipeStr; //Name of pipe to use for crash reporting.
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ std::string LLBase64::encode(const U8* input, size_t input_size)
|
|||
&& input_size > 0)
|
||||
{
|
||||
// Yes, it returns int.
|
||||
int b64_buffer_length = apr_base64_encode_len(narrow(input_size));
|
||||
int b64_buffer_length = apr_base64_encode_len(narrow<size_t>(input_size));
|
||||
char* b64_buffer = new char[b64_buffer_length];
|
||||
|
||||
// This is faster than apr_base64_encode() if you know
|
||||
|
|
@ -52,7 +52,7 @@ std::string LLBase64::encode(const U8* input, size_t input_size)
|
|||
b64_buffer_length = apr_base64_encode_binary(
|
||||
b64_buffer,
|
||||
input,
|
||||
narrow(input_size));
|
||||
narrow<size_t>(input_size));
|
||||
output.assign(b64_buffer);
|
||||
delete[] b64_buffer;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ LLCoros::LLCoros():
|
|||
// Previously we used
|
||||
// boost::context::guarded_stack_allocator::default_stacksize();
|
||||
// empirically this is insufficient.
|
||||
mStackSize(768*1024),
|
||||
mStackSize(900*1024),
|
||||
// mCurrent does NOT own the current CoroData instance -- it simply
|
||||
// points to it. So initialize it with a no-op deleter.
|
||||
mCurrent{ [](CoroData*){} }
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ public:
|
|||
|
||||
#ifdef _DEBUG
|
||||
// This function runs tests to make sure the crc is
|
||||
// working. Returns TRUE if it is.
|
||||
// working. Returns true if it is.
|
||||
static bool testHarness();
|
||||
#endif
|
||||
};
|
||||
|
|
|
|||
|
|
@ -87,11 +87,10 @@ protected:
|
|||
}
|
||||
void addEntry(Index index, Entry *entry)
|
||||
{
|
||||
if (lookup(index))
|
||||
if (!this->emplace(index, entry).second)
|
||||
{
|
||||
LL_ERRS() << "Dictionary entry already added (attempted to add duplicate entry)" << LL_ENDL;
|
||||
}
|
||||
(*this)[index] = entry;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ template< class T >
|
|||
class LLKeyThrottle
|
||||
{
|
||||
public:
|
||||
// @param realtime = FALSE for frame-based throttle, TRUE for usec
|
||||
// @param realtime = false for frame-based throttle, true for usec
|
||||
// real-time throttle
|
||||
LLKeyThrottle(U32 limit, F32 interval, bool realtime = true)
|
||||
: m(* new LLKeyThrottleImpl<T>)
|
||||
|
|
@ -325,7 +325,7 @@ public:
|
|||
|
||||
protected:
|
||||
LLKeyThrottleImpl<T>& m;
|
||||
bool mIsRealtime; // TRUE to be time based (default), FALSE for frame based
|
||||
bool mIsRealtime; // true to be time based (default), false for frame based
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ LLMetricPerformanceTesterBasic* LLMetricPerformanceTesterBasic::getTester(std::s
|
|||
}
|
||||
|
||||
/*static*/
|
||||
// Return TRUE if this metric is requested or if the general default "catch all" metric is requested
|
||||
// Return true if this metric is requested or if the general default "catch all" metric is requested
|
||||
bool LLMetricPerformanceTesterBasic::isMetricLogRequested(std::string name)
|
||||
{
|
||||
return (LLTrace::BlockTimer::sMetricLog && ((LLTrace::BlockTimer::sLogName == name) || (LLTrace::BlockTimer::sLogName == DEFAULT_METRIC_NAME)));
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ private:
|
|||
|
||||
std::string mName ; // Name of this tester instance
|
||||
S32 mCount ; // Current record count
|
||||
bool mValidInstance; // TRUE if the instance is managed by the map
|
||||
bool mValidInstance; // true if the instance is managed by the map
|
||||
std::vector< std::string > mMetricStrings ; // Metrics strings
|
||||
|
||||
// Static members managing the collection of testers
|
||||
|
|
@ -144,13 +144,13 @@ public:
|
|||
static void deleteTester(std::string name);
|
||||
|
||||
/**
|
||||
* @return Returns TRUE if that metric *or* the default catch all metric has been requested to be logged
|
||||
* @return Returns true if that metric *or* the default catch all metric has been requested to be logged
|
||||
* @param[in] name - Name of the tester queried.
|
||||
*/
|
||||
static bool isMetricLogRequested(std::string name);
|
||||
|
||||
/**
|
||||
* @return Returns TRUE if there's a tester defined, FALSE otherwise.
|
||||
* @return Returns true if there's a tester defined, false otherwise.
|
||||
*/
|
||||
static bool hasMetricPerformanceTesters() { return !sTesterMap.empty() ;}
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -46,33 +46,32 @@
|
|||
template <class Type> class LLPointer
|
||||
{
|
||||
public:
|
||||
|
||||
LLPointer() :
|
||||
LLPointer() :
|
||||
mPointer(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
LLPointer(Type* ptr) :
|
||||
LLPointer(Type* ptr) :
|
||||
mPointer(ptr)
|
||||
{
|
||||
ref();
|
||||
}
|
||||
|
||||
LLPointer(const LLPointer<Type>& ptr) :
|
||||
LLPointer(const LLPointer<Type>& ptr) :
|
||||
mPointer(ptr.mPointer)
|
||||
{
|
||||
ref();
|
||||
}
|
||||
|
||||
// support conversion up the type hierarchy. See Item 45 in Effective C++, 3rd Ed.
|
||||
// Support conversion up the type hierarchy. See Item 45 in Effective C++, 3rd Ed.
|
||||
template<typename Subclass>
|
||||
LLPointer(const LLPointer<Subclass>& ptr) :
|
||||
LLPointer(const LLPointer<Subclass>& ptr) :
|
||||
mPointer(ptr.get())
|
||||
{
|
||||
ref();
|
||||
}
|
||||
|
||||
~LLPointer()
|
||||
~LLPointer()
|
||||
{
|
||||
unref();
|
||||
}
|
||||
|
|
@ -83,39 +82,39 @@ public:
|
|||
const Type& operator*() const { return *mPointer; }
|
||||
Type& operator*() { return *mPointer; }
|
||||
|
||||
operator BOOL() const { return (mPointer != NULL); }
|
||||
operator bool() const { return (mPointer != NULL); }
|
||||
operator BOOL() const { return (mPointer != NULL); }
|
||||
operator bool() const { return (mPointer != NULL); }
|
||||
bool operator!() const { return (mPointer == NULL); }
|
||||
bool isNull() const { return (mPointer == NULL); }
|
||||
bool notNull() const { return (mPointer != NULL); }
|
||||
|
||||
operator Type*() const { return mPointer; }
|
||||
bool operator !=(Type* ptr) const { return (mPointer != ptr); }
|
||||
bool operator ==(Type* ptr) const { return (mPointer == ptr); }
|
||||
bool operator ==(const LLPointer<Type>& ptr) const { return (mPointer == ptr.mPointer); }
|
||||
bool operator < (const LLPointer<Type>& ptr) const { return (mPointer < ptr.mPointer); }
|
||||
bool operator > (const LLPointer<Type>& ptr) const { return (mPointer > ptr.mPointer); }
|
||||
operator Type*() const { return mPointer; }
|
||||
bool operator !=(Type* ptr) const { return (mPointer != ptr); }
|
||||
bool operator ==(Type* ptr) const { return (mPointer == ptr); }
|
||||
bool operator ==(const LLPointer<Type>& ptr) const { return (mPointer == ptr.mPointer); }
|
||||
bool operator < (const LLPointer<Type>& ptr) const { return (mPointer < ptr.mPointer); }
|
||||
bool operator > (const LLPointer<Type>& ptr) const { return (mPointer > ptr.mPointer); }
|
||||
|
||||
LLPointer<Type>& operator =(Type* ptr)
|
||||
{
|
||||
LLPointer<Type>& operator =(Type* ptr)
|
||||
{
|
||||
assign(ptr);
|
||||
return *this;
|
||||
return *this;
|
||||
}
|
||||
|
||||
LLPointer<Type>& operator =(const LLPointer<Type>& ptr)
|
||||
{
|
||||
LLPointer<Type>& operator =(const LLPointer<Type>& ptr)
|
||||
{
|
||||
assign(ptr);
|
||||
return *this;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// support assignment up the type hierarchy. See Item 45 in Effective C++, 3rd Ed.
|
||||
template<typename Subclass>
|
||||
LLPointer<Type>& operator =(const LLPointer<Subclass>& ptr)
|
||||
{
|
||||
LLPointer<Type>& operator =(const LLPointer<Subclass>& ptr)
|
||||
{
|
||||
assign(ptr.get());
|
||||
return *this;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
// Just exchange the pointers, which will not change the reference counts.
|
||||
static void swap(LLPointer<Type>& a, LLPointer<Type>& b)
|
||||
{
|
||||
|
|
@ -129,16 +128,6 @@ protected:
|
|||
void ref();
|
||||
void unref();
|
||||
#else
|
||||
|
||||
void assign(const LLPointer<Type>& ptr)
|
||||
{
|
||||
if( mPointer != ptr.mPointer )
|
||||
{
|
||||
unref();
|
||||
mPointer = ptr.mPointer;
|
||||
ref();
|
||||
}
|
||||
}
|
||||
void ref()
|
||||
{
|
||||
if (mPointer)
|
||||
|
|
@ -161,7 +150,18 @@ protected:
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif // LL_LIBRARY_INCLUDE
|
||||
|
||||
void assign(const LLPointer<Type>& ptr)
|
||||
{
|
||||
if (mPointer != ptr.mPointer)
|
||||
{
|
||||
unref();
|
||||
mPointer = ptr.mPointer;
|
||||
ref();
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
Type* mPointer;
|
||||
};
|
||||
|
|
@ -169,18 +169,18 @@ protected:
|
|||
template <class Type> class LLConstPointer
|
||||
{
|
||||
public:
|
||||
LLConstPointer() :
|
||||
LLConstPointer() :
|
||||
mPointer(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
LLConstPointer(const Type* ptr) :
|
||||
LLConstPointer(const Type* ptr) :
|
||||
mPointer(ptr)
|
||||
{
|
||||
ref();
|
||||
}
|
||||
|
||||
LLConstPointer(const LLConstPointer<Type>& ptr) :
|
||||
LLConstPointer(const LLConstPointer<Type>& ptr) :
|
||||
mPointer(ptr.mPointer)
|
||||
{
|
||||
ref();
|
||||
|
|
@ -188,7 +188,7 @@ public:
|
|||
|
||||
// support conversion up the type hierarchy. See Item 45 in Effective C++, 3rd Ed.
|
||||
template<typename Subclass>
|
||||
LLConstPointer(const LLConstPointer<Subclass>& ptr) :
|
||||
LLConstPointer(const LLConstPointer<Subclass>& ptr) :
|
||||
mPointer(ptr.get())
|
||||
{
|
||||
ref();
|
||||
|
|
@ -203,55 +203,55 @@ public:
|
|||
const Type* operator->() const { return mPointer; }
|
||||
const Type& operator*() const { return *mPointer; }
|
||||
|
||||
operator BOOL() const { return (mPointer != NULL); }
|
||||
operator bool() const { return (mPointer != NULL); }
|
||||
operator BOOL() const { return (mPointer != NULL); }
|
||||
operator bool() const { return (mPointer != NULL); }
|
||||
bool operator!() const { return (mPointer == NULL); }
|
||||
bool isNull() const { return (mPointer == NULL); }
|
||||
bool notNull() const { return (mPointer != NULL); }
|
||||
|
||||
operator const Type*() const { return mPointer; }
|
||||
bool operator !=(const Type* ptr) const { return (mPointer != ptr); }
|
||||
bool operator ==(const Type* ptr) const { return (mPointer == ptr); }
|
||||
bool operator ==(const LLConstPointer<Type>& ptr) const { return (mPointer == ptr.mPointer); }
|
||||
bool operator < (const LLConstPointer<Type>& ptr) const { return (mPointer < ptr.mPointer); }
|
||||
bool operator > (const LLConstPointer<Type>& ptr) const { return (mPointer > ptr.mPointer); }
|
||||
operator const Type*() const { return mPointer; }
|
||||
bool operator !=(const Type* ptr) const { return (mPointer != ptr); }
|
||||
bool operator ==(const Type* ptr) const { return (mPointer == ptr); }
|
||||
bool operator ==(const LLConstPointer<Type>& ptr) const { return (mPointer == ptr.mPointer); }
|
||||
bool operator < (const LLConstPointer<Type>& ptr) const { return (mPointer < ptr.mPointer); }
|
||||
bool operator > (const LLConstPointer<Type>& ptr) const { return (mPointer > ptr.mPointer); }
|
||||
|
||||
LLConstPointer<Type>& operator =(const Type* ptr)
|
||||
LLConstPointer<Type>& operator =(const Type* ptr)
|
||||
{
|
||||
if( mPointer != ptr )
|
||||
{
|
||||
unref();
|
||||
mPointer = ptr;
|
||||
unref();
|
||||
mPointer = ptr;
|
||||
ref();
|
||||
}
|
||||
|
||||
return *this;
|
||||
return *this;
|
||||
}
|
||||
|
||||
LLConstPointer<Type>& operator =(const LLConstPointer<Type>& ptr)
|
||||
{
|
||||
LLConstPointer<Type>& operator =(const LLConstPointer<Type>& ptr)
|
||||
{
|
||||
if( mPointer != ptr.mPointer )
|
||||
{
|
||||
unref();
|
||||
unref();
|
||||
mPointer = ptr.mPointer;
|
||||
ref();
|
||||
}
|
||||
return *this;
|
||||
return *this;
|
||||
}
|
||||
|
||||
// support assignment up the type hierarchy. See Item 45 in Effective C++, 3rd Ed.
|
||||
template<typename Subclass>
|
||||
LLConstPointer<Type>& operator =(const LLConstPointer<Subclass>& ptr)
|
||||
{
|
||||
LLConstPointer<Type>& operator =(const LLConstPointer<Subclass>& ptr)
|
||||
{
|
||||
if( mPointer != ptr.get() )
|
||||
{
|
||||
unref();
|
||||
unref();
|
||||
mPointer = ptr.get();
|
||||
ref();
|
||||
}
|
||||
return *this;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
// Just exchange the pointers, which will not change the reference counts.
|
||||
static void swap(LLConstPointer<Type>& a, LLConstPointer<Type>& b)
|
||||
{
|
||||
|
|
@ -262,11 +262,11 @@ public:
|
|||
|
||||
protected:
|
||||
#ifdef LL_LIBRARY_INCLUDE
|
||||
void ref();
|
||||
void ref();
|
||||
void unref();
|
||||
#else
|
||||
void ref()
|
||||
{
|
||||
#else // LL_LIBRARY_INCLUDE
|
||||
void ref()
|
||||
{
|
||||
if (mPointer)
|
||||
{
|
||||
mPointer->ref();
|
||||
|
|
@ -277,9 +277,9 @@ protected:
|
|||
{
|
||||
if (mPointer)
|
||||
{
|
||||
const Type *tempp = mPointer;
|
||||
const Type *temp = mPointer;
|
||||
mPointer = NULL;
|
||||
tempp->unref();
|
||||
temp->unref();
|
||||
if (mPointer != NULL)
|
||||
{
|
||||
LL_WARNS() << "Unreference did assignment to non-NULL because of destructor" << LL_ENDL;
|
||||
|
|
@ -287,7 +287,8 @@ protected:
|
|||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif // LL_LIBRARY_INCLUDE
|
||||
|
||||
protected:
|
||||
const Type* mPointer;
|
||||
};
|
||||
|
|
@ -297,13 +298,13 @@ class LLCopyOnWritePointer : public LLPointer<Type>
|
|||
{
|
||||
public:
|
||||
typedef LLCopyOnWritePointer<Type> self_t;
|
||||
typedef LLPointer<Type> pointer_t;
|
||||
|
||||
LLCopyOnWritePointer()
|
||||
typedef LLPointer<Type> pointer_t;
|
||||
|
||||
LLCopyOnWritePointer()
|
||||
: mStayUnique(false)
|
||||
{}
|
||||
|
||||
LLCopyOnWritePointer(Type* ptr)
|
||||
LLCopyOnWritePointer(Type* ptr)
|
||||
: LLPointer<Type>(ptr),
|
||||
mStayUnique(false)
|
||||
{}
|
||||
|
|
|
|||
|
|
@ -562,9 +562,9 @@ LLProcess::LLProcess(const LLSDOrParams& params):
|
|||
// IQA-490, CHOP-900: On Windows, ask APR to jump through hoops to
|
||||
// constrain the set of handles passed to the child process. Before we
|
||||
// changed to APR, the Windows implementation of LLProcessLauncher called
|
||||
// CreateProcess(bInheritHandles=FALSE), meaning to pass NO open handles
|
||||
// CreateProcess(bInheritHandles=false), meaning to pass NO open handles
|
||||
// to the child process. Now that we support pipes, though, we must allow
|
||||
// apr_proc_create() to pass bInheritHandles=TRUE. But without taking
|
||||
// apr_proc_create() to pass bInheritHandles=true. But without taking
|
||||
// special pains, that causes trouble in a number of ways, due to the fact
|
||||
// that the viewer is constantly opening and closing files -- most of
|
||||
// which CreateProcess() passes to every child process!
|
||||
|
|
|
|||
|
|
@ -58,7 +58,9 @@
|
|||
* to restore uniform distribution.
|
||||
*/
|
||||
|
||||
static LLRandLagFib2281 gRandomGenerator(LLUUID::getRandomSeed());
|
||||
// gRandomGenerator is a stateful static object, which is therefore not
|
||||
// inherently thread-safe.
|
||||
static thread_local LLRandLagFib2281 gRandomGenerator(LLUUID::getRandomSeed());
|
||||
|
||||
// no default implementation, only specific F64 and F32 specializations
|
||||
template <typename REAL>
|
||||
|
|
@ -71,7 +73,7 @@ inline F64 ll_internal_random<F64>()
|
|||
// CPUs (or at least multi-threaded processes) seem to
|
||||
// occasionally give an obviously incorrect random number -- like
|
||||
// 5^15 or something. Sooooo, clamp it as described above.
|
||||
F64 rv = gRandomGenerator();
|
||||
F64 rv{ gRandomGenerator() };
|
||||
if(!((rv >= 0.0) && (rv < 1.0))) return fmod(rv, 1.0);
|
||||
return rv;
|
||||
}
|
||||
|
|
@ -79,7 +81,13 @@ inline F64 ll_internal_random<F64>()
|
|||
template <>
|
||||
inline F32 ll_internal_random<F32>()
|
||||
{
|
||||
return F32(ll_internal_random<F64>());
|
||||
// *HACK: clamp the result as described above.
|
||||
// Per Monty, it's important to clamp using the correct fmodf() rather
|
||||
// than expanding to F64 for fmod() and then truncating back to F32. Prior
|
||||
// to this change, we were getting sporadic ll_frand() == 1.0 results.
|
||||
F32 rv{ narrow<F32>(gRandomGenerator()) };
|
||||
if(!((rv >= 0.0f) && (rv < 1.0f))) return fmodf(rv, 1.0f);
|
||||
return rv;
|
||||
}
|
||||
|
||||
/*------------------------------ F64 aliases -------------------------------*/
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@
|
|||
#include "llerror.h"
|
||||
|
||||
// maximum reference count before sounding memory leak alarm
|
||||
const S32 gMaxRefCount = S32_MAX;
|
||||
const S32 gMaxRefCount = LL_REFCOUNT_FREE;
|
||||
|
||||
LLRefCount::LLRefCount(const LLRefCount& other)
|
||||
: mRef(0)
|
||||
|
|
@ -49,7 +49,7 @@ LLRefCount::LLRefCount() :
|
|||
}
|
||||
|
||||
LLRefCount::~LLRefCount()
|
||||
{
|
||||
{
|
||||
if (mRef != LL_REFCOUNT_FREE && mRef != 0)
|
||||
{
|
||||
LL_ERRS() << "deleting non-zero reference" << LL_ENDL;
|
||||
|
|
|
|||
|
|
@ -51,24 +51,20 @@ protected:
|
|||
public:
|
||||
LLRefCount();
|
||||
|
||||
inline void validateRefCount() const
|
||||
{
|
||||
llassert(mRef > 0); // ref count below 0, likely corrupted
|
||||
llassert(mRef < gMaxRefCount); // ref count excessive, likely memory leak
|
||||
}
|
||||
|
||||
inline void ref() const
|
||||
{
|
||||
mRef++;
|
||||
validateRefCount();
|
||||
}
|
||||
{
|
||||
llassert(mRef != LL_REFCOUNT_FREE); // object is deleted
|
||||
mRef++;
|
||||
llassert(mRef < gMaxRefCount); // ref count excessive, likely memory leak
|
||||
}
|
||||
|
||||
inline S32 unref() const
|
||||
{
|
||||
validateRefCount();
|
||||
llassert(mRef != LL_REFCOUNT_FREE); // object is deleted
|
||||
llassert(mRef > 0); // ref count below 1, likely corrupted
|
||||
if (0 == --mRef)
|
||||
{
|
||||
mRef = LL_REFCOUNT_FREE; // set to nonsense yet recognizable value to aid in debugging
|
||||
mRef = LL_REFCOUNT_FREE; // set to nonsense yet recognizable value to aid in debugging
|
||||
delete this;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -82,8 +78,8 @@ public:
|
|||
return mRef;
|
||||
}
|
||||
|
||||
private:
|
||||
mutable S32 mRef;
|
||||
private:
|
||||
mutable S32 mRef;
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -106,7 +102,7 @@ protected:
|
|||
public:
|
||||
LLThreadSafeRefCount();
|
||||
LLThreadSafeRefCount(const LLThreadSafeRefCount&);
|
||||
LLThreadSafeRefCount& operator=(const LLThreadSafeRefCount& ref)
|
||||
LLThreadSafeRefCount& operator=(const LLThreadSafeRefCount& ref)
|
||||
{
|
||||
mRef = 0;
|
||||
return *this;
|
||||
|
|
@ -114,8 +110,8 @@ public:
|
|||
|
||||
void ref()
|
||||
{
|
||||
mRef++;
|
||||
}
|
||||
mRef++;
|
||||
}
|
||||
|
||||
void unref()
|
||||
{
|
||||
|
|
@ -136,36 +132,36 @@ public:
|
|||
return currentVal;
|
||||
}
|
||||
|
||||
private:
|
||||
LLAtomicS32 mRef;
|
||||
private:
|
||||
LLAtomicS32 mRef;
|
||||
};
|
||||
|
||||
/**
|
||||
* intrusive pointer support for LLThreadSafeRefCount
|
||||
* this allows you to use boost::intrusive_ptr with any LLThreadSafeRefCount-derived type
|
||||
*/
|
||||
inline void intrusive_ptr_add_ref(LLThreadSafeRefCount* p)
|
||||
inline void intrusive_ptr_add_ref(LLThreadSafeRefCount* p)
|
||||
{
|
||||
p->ref();
|
||||
}
|
||||
|
||||
inline void intrusive_ptr_release(LLThreadSafeRefCount* p)
|
||||
inline void intrusive_ptr_release(LLThreadSafeRefCount* p)
|
||||
{
|
||||
p->unref();
|
||||
p->unref();
|
||||
}
|
||||
|
||||
/**
|
||||
* intrusive pointer support
|
||||
* this allows you to use boost::intrusive_ptr with any LLRefCount-derived type
|
||||
*/
|
||||
inline void intrusive_ptr_add_ref(LLRefCount* p)
|
||||
inline void intrusive_ptr_add_ref(LLRefCount* p)
|
||||
{
|
||||
p->ref();
|
||||
}
|
||||
|
||||
inline void intrusive_ptr_release(LLRefCount* p)
|
||||
inline void intrusive_ptr_release(LLRefCount* p)
|
||||
{
|
||||
p->unref();
|
||||
p->unref();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -197,12 +197,12 @@ public:
|
|||
typename std::enable_if<std::is_integral<VALUE>::value &&
|
||||
! std::is_same<VALUE, Boolean>::value,
|
||||
bool>::type = true>
|
||||
LLSD(VALUE v): LLSD(Integer(narrow(v))) {}
|
||||
LLSD(VALUE v): LLSD(Integer(narrow<VALUE>(v))) {}
|
||||
// support construction from F32 et al.
|
||||
template <typename VALUE,
|
||||
typename std::enable_if<std::is_floating_point<VALUE>::value,
|
||||
bool>::type = true>
|
||||
LLSD(VALUE v): LLSD(Real(narrow(v))) {}
|
||||
LLSD(VALUE v): LLSD(Real(narrow<VALUE>(v))) {}
|
||||
//@}
|
||||
|
||||
/** @name Scalar Assignment */
|
||||
|
|
|
|||
|
|
@ -2174,7 +2174,7 @@ std::string zip_llsd(LLSD& data)
|
|||
|
||||
U8 out[CHUNK];
|
||||
|
||||
strm.avail_in = narrow(source.size());
|
||||
strm.avail_in = narrow<size_t>(source.size());
|
||||
strm.next_in = (U8*) source.data();
|
||||
U8* output = NULL;
|
||||
|
||||
|
|
|
|||
|
|
@ -196,12 +196,12 @@ S32 LLSDXMLFormatter::format_impl(const LLSD& data, std::ostream& ostr,
|
|||
// *FIX: memory inefficient.
|
||||
// *TODO: convert to use LLBase64
|
||||
ostr << pre << "<binary encoding=\"base64\">";
|
||||
int b64_buffer_length = apr_base64_encode_len(narrow(buffer.size()));
|
||||
int b64_buffer_length = apr_base64_encode_len(narrow<size_t>(buffer.size()));
|
||||
char* b64_buffer = new char[b64_buffer_length];
|
||||
b64_buffer_length = apr_base64_encode_binary(
|
||||
b64_buffer,
|
||||
&buffer[0],
|
||||
narrow(buffer.size()));
|
||||
narrow<size_t>(buffer.size()));
|
||||
ostr.write(b64_buffer, b64_buffer_length - 1);
|
||||
delete[] b64_buffer;
|
||||
ostr << "</binary>" << post;
|
||||
|
|
@ -404,11 +404,18 @@ S32 LLSDXMLParser::Impl::parse(std::istream& input, LLSD& data)
|
|||
if (buffer)
|
||||
{
|
||||
((char*) buffer)[count ? count - 1 : 0] = '\0';
|
||||
if (mEmitErrors)
|
||||
{
|
||||
LL_INFOS() << "LLSDXMLParser::Impl::parse: XML_STATUS_ERROR parsing:" << (char*)buffer << LL_ENDL;
|
||||
}
|
||||
}
|
||||
if (mEmitErrors)
|
||||
{
|
||||
LL_INFOS() << "LLSDXMLParser::Impl::parse: XML_STATUS_ERROR parsing:" << (char*) buffer << LL_ENDL;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (mEmitErrors)
|
||||
{
|
||||
LL_INFOS() << "LLSDXMLParser::Impl::parse: XML_STATUS_ERROR, null buffer" << LL_ENDL;
|
||||
}
|
||||
}
|
||||
data = LLSD();
|
||||
return LLSDParser::PARSE_FAILURE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1406,9 +1406,17 @@ bool LLStringUtil::formatDatetime(std::string& replacement, std::string token,
|
|||
}
|
||||
else
|
||||
{
|
||||
#if 0
|
||||
// EXT-1565 : Zai Lynch, James Linden : 15/Oct/09
|
||||
// [BSI] Feedback: Viewer clock mentions SLT, but would prefer it to show PST/PDT
|
||||
// "slt" = Second Life Time, which is deprecated.
|
||||
// If not utc or user local time, fallback to Pacific time
|
||||
replacement = LLStringOps::getPacificDaylightTime() ? "PDT" : "PST";
|
||||
#else
|
||||
// SL-20370 : Steeltoe Linden : 29/Sep/23
|
||||
// Change "PDT" to "SLT" on menu bar
|
||||
replacement = "SLT";
|
||||
#endif
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -917,7 +917,7 @@ void LLMemoryInfo::stream(std::ostream& s) const
|
|||
// Now stream stats
|
||||
BOOST_FOREACH(const MapEntry& pair, inMap(mStatsMap))
|
||||
{
|
||||
s << pfx << std::setw(narrow(key_width+1)) << (pair.first + ':') << ' ';
|
||||
s << pfx << std::setw(narrow<size_t>(key_width+1)) << (pair.first + ':') << ' ';
|
||||
LLSD value(pair.second);
|
||||
if (value.isInteger())
|
||||
s << std::setw(12) << value.asInteger();
|
||||
|
|
|
|||
|
|
@ -164,9 +164,9 @@ LL_COMMON_API std::ostream& operator<<(std::ostream& s, const LLOSInfo& info);
|
|||
LL_COMMON_API std::ostream& operator<<(std::ostream& s, const LLCPUInfo& info);
|
||||
LL_COMMON_API std::ostream& operator<<(std::ostream& s, const LLMemoryInfo& info);
|
||||
|
||||
// gunzip srcfile into dstfile. Returns FALSE on error.
|
||||
// gunzip srcfile into dstfile. Returns false on error.
|
||||
bool LL_COMMON_API gunzip_file(const std::string& srcfile, const std::string& dstfile);
|
||||
// gzip srcfile into dstfile. Returns FALSE on error.
|
||||
// gzip srcfile into dstfile. Returns false on error.
|
||||
bool LL_COMMON_API gzip_file(const std::string& srcfile, const std::string& dstfile);
|
||||
|
||||
extern LL_COMMON_API LLCPUInfo gSysCPU;
|
||||
|
|
|
|||
|
|
@ -113,15 +113,16 @@ LL_COMMON_API bool on_main_thread()
|
|||
return (LLThread::currentID() == main_thread());
|
||||
}
|
||||
|
||||
LL_COMMON_API void assert_main_thread()
|
||||
LL_COMMON_API bool assert_main_thread()
|
||||
{
|
||||
auto curr = LLThread::currentID();
|
||||
auto main = main_thread();
|
||||
if (curr != main)
|
||||
{
|
||||
LL_WARNS() << "Illegal execution from thread id " << curr
|
||||
<< " outside main thread " << main << LL_ENDL;
|
||||
}
|
||||
if (curr == main)
|
||||
return true;
|
||||
|
||||
LL_WARNS() << "Illegal execution from thread id " << curr
|
||||
<< " outside main thread " << main << LL_ENDL;
|
||||
return false;
|
||||
}
|
||||
|
||||
// this function has become moot
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ public:
|
|||
|
||||
//============================================================================
|
||||
|
||||
extern LL_COMMON_API void assert_main_thread();
|
||||
extern LL_COMMON_API bool assert_main_thread();
|
||||
extern LL_COMMON_API bool on_main_thread();
|
||||
|
||||
#endif // LL_LLTHREAD_H
|
||||
|
|
|
|||
|
|
@ -33,8 +33,6 @@
|
|||
namespace LLTrace
|
||||
{
|
||||
|
||||
MemStatHandle gTraceMemStat("LLTrace");
|
||||
|
||||
StatBase::StatBase( const char* name, const char* description )
|
||||
: mName(name),
|
||||
mDescription(description ? description : "")
|
||||
|
|
@ -65,7 +63,7 @@ void TimeBlockTreeNode::setParent( BlockTimerStatHandle* parent )
|
|||
llassert_always(parent != mBlock);
|
||||
llassert_always(parent != NULL);
|
||||
|
||||
TimeBlockTreeNode* parent_tree_node = get_thread_recorder()->getTimeBlockTreeNode(narrow(parent->getIndex()));
|
||||
TimeBlockTreeNode* parent_tree_node = get_thread_recorder()->getTimeBlockTreeNode(narrow<size_t>(parent->getIndex()));
|
||||
if (!parent_tree_node) return;
|
||||
|
||||
if (mParent)
|
||||
|
|
|
|||
|
|
@ -193,61 +193,6 @@ void add(CountStatHandle<T>& count, VALUE_T value)
|
|||
#endif
|
||||
}
|
||||
|
||||
template<>
|
||||
class StatType<MemAccumulator::AllocationFacet>
|
||||
: public StatType<MemAccumulator>
|
||||
{
|
||||
public:
|
||||
|
||||
StatType(const char* name, const char* description = "")
|
||||
: StatType<MemAccumulator>(name, description)
|
||||
{}
|
||||
};
|
||||
|
||||
template<>
|
||||
class StatType<MemAccumulator::DeallocationFacet>
|
||||
: public StatType<MemAccumulator>
|
||||
{
|
||||
public:
|
||||
|
||||
StatType(const char* name, const char* description = "")
|
||||
: StatType<MemAccumulator>(name, description)
|
||||
{}
|
||||
};
|
||||
|
||||
class MemStatHandle : public StatType<MemAccumulator>
|
||||
{
|
||||
public:
|
||||
typedef StatType<MemAccumulator> stat_t;
|
||||
MemStatHandle(const char* name, const char* description = "")
|
||||
: stat_t(name, description)
|
||||
{
|
||||
mName = name;
|
||||
}
|
||||
|
||||
void setName(const char* name)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mName = name;
|
||||
setKey(name);
|
||||
}
|
||||
|
||||
/*virtual*/ const char* getUnitLabel() const { return "KB"; }
|
||||
|
||||
StatType<MemAccumulator::AllocationFacet>& allocations()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return static_cast<StatType<MemAccumulator::AllocationFacet>&>(*(StatType<MemAccumulator>*)this);
|
||||
}
|
||||
|
||||
StatType<MemAccumulator::DeallocationFacet>& deallocations()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return static_cast<StatType<MemAccumulator::DeallocationFacet>&>(*(StatType<MemAccumulator>*)this);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// measures effective memory footprint of specified type
|
||||
// specialize to cover different types
|
||||
template<typename T, typename IS_MEM_TRACKABLE = void, typename IS_UNITS = void>
|
||||
|
|
@ -334,33 +279,6 @@ struct MeasureMem<std::basic_string<T>, IS_MEM_TRACKABLE, IS_BYTES>
|
|||
}
|
||||
};
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline void claim_alloc(MemStatHandle& measurement, const T& value)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
#if LL_TRACE_ENABLED
|
||||
auto size = MeasureMem<T>::measureFootprint(value);
|
||||
if(size == 0) return;
|
||||
MemAccumulator& accumulator = measurement.getCurrentAccumulator();
|
||||
accumulator.mSize.sample(accumulator.mSize.hasValue() ? accumulator.mSize.getLastValue() + (F64)size : (F64)size);
|
||||
accumulator.mAllocations.record(size);
|
||||
#endif
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline void disclaim_alloc(MemStatHandle& measurement, const T& value)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
#if LL_TRACE_ENABLED
|
||||
auto size = MeasureMem<T>::measureFootprint(value);
|
||||
if(size == 0) return;
|
||||
MemAccumulator& accumulator = measurement.getCurrentAccumulator();
|
||||
accumulator.mSize.sample(accumulator.mSize.hasValue() ? accumulator.mSize.getLastValue() - (F64)size : -(F64)size);
|
||||
accumulator.mDeallocations.add(size);
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // LL_LLTRACE_H
|
||||
|
|
|
|||
|
|
@ -1,24 +1,24 @@
|
|||
/**
|
||||
/**
|
||||
* @file lltracesampler.cpp
|
||||
*
|
||||
* $LicenseInfo:firstyear=2001&license=viewerlgpl$
|
||||
* Second Life Viewer Source Code
|
||||
* Copyright (C) 2012, Linden Research, Inc.
|
||||
*
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation;
|
||||
* version 2.1 of the License only.
|
||||
*
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*
|
||||
*
|
||||
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
|
||||
* $/LicenseInfo$
|
||||
*/
|
||||
|
|
@ -32,73 +32,52 @@
|
|||
namespace LLTrace
|
||||
{
|
||||
|
||||
extern MemStatHandle gTraceMemStat;
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
// AccumulatorBufferGroup
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
|
||||
AccumulatorBufferGroup::AccumulatorBufferGroup()
|
||||
AccumulatorBufferGroup::AccumulatorBufferGroup()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
claim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator));
|
||||
claim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator));
|
||||
claim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator));
|
||||
claim_alloc(gTraceMemStat, mStackTimers.capacity() * sizeof(TimeBlockAccumulator));
|
||||
claim_alloc(gTraceMemStat, mMemStats.capacity() * sizeof(MemAccumulator));
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
}
|
||||
|
||||
AccumulatorBufferGroup::AccumulatorBufferGroup(const AccumulatorBufferGroup& other)
|
||||
: mCounts(other.mCounts),
|
||||
mSamples(other.mSamples),
|
||||
mEvents(other.mEvents),
|
||||
mStackTimers(other.mStackTimers),
|
||||
mMemStats(other.mMemStats)
|
||||
mStackTimers(other.mStackTimers)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
claim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator));
|
||||
claim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator));
|
||||
claim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator));
|
||||
claim_alloc(gTraceMemStat, mStackTimers.capacity() * sizeof(TimeBlockAccumulator));
|
||||
claim_alloc(gTraceMemStat, mMemStats.capacity() * sizeof(MemAccumulator));
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
}
|
||||
|
||||
AccumulatorBufferGroup::~AccumulatorBufferGroup()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
disclaim_alloc(gTraceMemStat, mCounts.capacity() * sizeof(CountAccumulator));
|
||||
disclaim_alloc(gTraceMemStat, mSamples.capacity() * sizeof(SampleAccumulator));
|
||||
disclaim_alloc(gTraceMemStat, mEvents.capacity() * sizeof(EventAccumulator));
|
||||
disclaim_alloc(gTraceMemStat, mStackTimers.capacity() * sizeof(TimeBlockAccumulator));
|
||||
disclaim_alloc(gTraceMemStat, mMemStats.capacity() * sizeof(MemAccumulator));
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
}
|
||||
|
||||
void AccumulatorBufferGroup::handOffTo(AccumulatorBufferGroup& other)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
other.mCounts.reset(&mCounts);
|
||||
other.mSamples.reset(&mSamples);
|
||||
other.mEvents.reset(&mEvents);
|
||||
other.mStackTimers.reset(&mStackTimers);
|
||||
other.mMemStats.reset(&mMemStats);
|
||||
}
|
||||
|
||||
void AccumulatorBufferGroup::makeCurrent()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mCounts.makeCurrent();
|
||||
mSamples.makeCurrent();
|
||||
mEvents.makeCurrent();
|
||||
mStackTimers.makeCurrent();
|
||||
mMemStats.makeCurrent();
|
||||
|
||||
ThreadRecorder* thread_recorder = get_thread_recorder();
|
||||
AccumulatorBuffer<TimeBlockAccumulator>& timer_accumulator_buffer = mStackTimers;
|
||||
// update stacktimer parent pointers
|
||||
for (size_t i = 0, end_i = mStackTimers.size(); i < end_i; i++)
|
||||
{
|
||||
TimeBlockTreeNode* tree_node = thread_recorder->getTimeBlockTreeNode(narrow(i));
|
||||
TimeBlockTreeNode* tree_node = thread_recorder->getTimeBlockTreeNode(narrow<size_t>(i));
|
||||
if (tree_node)
|
||||
{
|
||||
timer_accumulator_buffer[i].mParent = tree_node->mParent;
|
||||
|
|
@ -109,12 +88,11 @@ void AccumulatorBufferGroup::makeCurrent()
|
|||
//static
|
||||
void AccumulatorBufferGroup::clearCurrent()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
AccumulatorBuffer<CountAccumulator>::clearCurrent();
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
AccumulatorBuffer<CountAccumulator>::clearCurrent();
|
||||
AccumulatorBuffer<SampleAccumulator>::clearCurrent();
|
||||
AccumulatorBuffer<EventAccumulator>::clearCurrent();
|
||||
AccumulatorBuffer<TimeBlockAccumulator>::clearCurrent();
|
||||
AccumulatorBuffer<MemAccumulator>::clearCurrent();
|
||||
}
|
||||
|
||||
bool AccumulatorBufferGroup::isCurrent() const
|
||||
|
|
@ -124,44 +102,39 @@ bool AccumulatorBufferGroup::isCurrent() const
|
|||
|
||||
void AccumulatorBufferGroup::append( const AccumulatorBufferGroup& other )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mCounts.addSamples(other.mCounts, SEQUENTIAL);
|
||||
mSamples.addSamples(other.mSamples, SEQUENTIAL);
|
||||
mEvents.addSamples(other.mEvents, SEQUENTIAL);
|
||||
mMemStats.addSamples(other.mMemStats, SEQUENTIAL);
|
||||
mStackTimers.addSamples(other.mStackTimers, SEQUENTIAL);
|
||||
}
|
||||
|
||||
void AccumulatorBufferGroup::merge( const AccumulatorBufferGroup& other)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mCounts.addSamples(other.mCounts, NON_SEQUENTIAL);
|
||||
mSamples.addSamples(other.mSamples, NON_SEQUENTIAL);
|
||||
mEvents.addSamples(other.mEvents, NON_SEQUENTIAL);
|
||||
mMemStats.addSamples(other.mMemStats, NON_SEQUENTIAL);
|
||||
// for now, hold out timers from merge, need to be displayed per thread
|
||||
//mStackTimers.addSamples(other.mStackTimers, NON_SEQUENTIAL);
|
||||
}
|
||||
|
||||
void AccumulatorBufferGroup::reset(AccumulatorBufferGroup* other)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mCounts.reset(other ? &other->mCounts : NULL);
|
||||
mSamples.reset(other ? &other->mSamples : NULL);
|
||||
mEvents.reset(other ? &other->mEvents : NULL);
|
||||
mStackTimers.reset(other ? &other->mStackTimers : NULL);
|
||||
mMemStats.reset(other ? &other->mMemStats : NULL);
|
||||
}
|
||||
|
||||
void AccumulatorBufferGroup::sync()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
if (isCurrent())
|
||||
{
|
||||
F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds();
|
||||
|
||||
mSamples.sync(time_stamp);
|
||||
mMemStats.sync(time_stamp);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -197,10 +170,9 @@ F64 SampleAccumulator::mergeSumsOfSquares(const SampleAccumulator& a, const Samp
|
|||
return a.getSumOfSquares();
|
||||
}
|
||||
|
||||
|
||||
void SampleAccumulator::addSamples( const SampleAccumulator& other, EBufferAppendType append_type )
|
||||
{
|
||||
if (append_type == NON_SEQUENTIAL)
|
||||
if (append_type == NON_SEQUENTIAL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
|
@ -299,7 +271,7 @@ void EventAccumulator::addSamples( const EventAccumulator& other, EBufferAppendT
|
|||
|
||||
void EventAccumulator::reset( const EventAccumulator* other )
|
||||
{
|
||||
mNumSamples = 0;
|
||||
mNumSamples = 0;
|
||||
mSum = 0;
|
||||
mMin = F32(NaN);
|
||||
mMax = F32(NaN);
|
||||
|
|
@ -308,5 +280,4 @@ void EventAccumulator::reset( const EventAccumulator* other )
|
|||
mLastValue = other ? other->mLastValue : NaN;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,26 +1,26 @@
|
|||
|
||||
/**
|
||||
/**
|
||||
* @file lltraceaccumulators.h
|
||||
* @brief Storage for accumulating statistics
|
||||
*
|
||||
* $LicenseInfo:firstyear=2001&license=viewerlgpl$
|
||||
* Second Life Viewer Source Code
|
||||
* Copyright (C) 2012, Linden Research, Inc.
|
||||
*
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation;
|
||||
* version 2.1 of the License only.
|
||||
*
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*
|
||||
*
|
||||
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
|
||||
* $/LicenseInfo$
|
||||
*/
|
||||
|
|
@ -28,7 +28,6 @@
|
|||
#ifndef LL_LLTRACEACCUMULATORS_H
|
||||
#define LL_LLTRACEACCUMULATORS_H
|
||||
|
||||
|
||||
#include "stdtypes.h"
|
||||
#include "llpreprocessor.h"
|
||||
#include "llunits.h"
|
||||
|
|
@ -66,7 +65,7 @@ namespace LLTrace
|
|||
: mStorageSize(0),
|
||||
mStorage(NULL)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
const AccumulatorBuffer& other = *getDefaultBuffer();
|
||||
resize(sNextStorageSlot);
|
||||
for (S32 i = 0; i < sNextStorageSlot; i++)
|
||||
|
|
@ -77,7 +76,7 @@ namespace LLTrace
|
|||
|
||||
~AccumulatorBuffer()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
if (isCurrent())
|
||||
{
|
||||
LLThreadLocalSingletonPointer<ACCUMULATOR>::setInstance(NULL);
|
||||
|
|
@ -85,14 +84,14 @@ namespace LLTrace
|
|||
delete[] mStorage;
|
||||
}
|
||||
|
||||
LL_FORCE_INLINE ACCUMULATOR& operator[](size_t index)
|
||||
{
|
||||
return mStorage[index];
|
||||
LL_FORCE_INLINE ACCUMULATOR& operator[](size_t index)
|
||||
{
|
||||
return mStorage[index];
|
||||
}
|
||||
|
||||
LL_FORCE_INLINE const ACCUMULATOR& operator[](size_t index) const
|
||||
{
|
||||
return mStorage[index];
|
||||
{
|
||||
return mStorage[index];
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -100,7 +99,7 @@ namespace LLTrace
|
|||
: mStorageSize(0),
|
||||
mStorage(NULL)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
resize(sNextStorageSlot);
|
||||
for (S32 i = 0; i < sNextStorageSlot; i++)
|
||||
{
|
||||
|
|
@ -110,7 +109,7 @@ namespace LLTrace
|
|||
|
||||
void addSamples(const AccumulatorBuffer<ACCUMULATOR>& other, EBufferAppendType append_type)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot);
|
||||
for (size_t i = 0; i < sNextStorageSlot; i++)
|
||||
{
|
||||
|
|
@ -120,7 +119,7 @@ namespace LLTrace
|
|||
|
||||
void copyFrom(const AccumulatorBuffer<ACCUMULATOR>& other)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize >= sNextStorageSlot);
|
||||
for (size_t i = 0; i < sNextStorageSlot; i++)
|
||||
{
|
||||
|
|
@ -130,7 +129,7 @@ namespace LLTrace
|
|||
|
||||
void reset(const AccumulatorBuffer<ACCUMULATOR>* other = NULL)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
llassert(mStorageSize >= sNextStorageSlot);
|
||||
for (size_t i = 0; i < sNextStorageSlot; i++)
|
||||
{
|
||||
|
|
@ -140,7 +139,7 @@ namespace LLTrace
|
|||
|
||||
void sync(F64SecondsImplicit time_stamp)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
llassert(mStorageSize >= sNextStorageSlot);
|
||||
for (size_t i = 0; i < sNextStorageSlot; i++)
|
||||
{
|
||||
|
|
@ -160,13 +159,13 @@ namespace LLTrace
|
|||
|
||||
static void clearCurrent()
|
||||
{
|
||||
LLThreadLocalSingletonPointer<ACCUMULATOR>::setInstance(NULL);
|
||||
LLThreadLocalSingletonPointer<ACCUMULATOR>::setInstance(NULL);
|
||||
}
|
||||
|
||||
// NOTE: this is not thread-safe. We assume that slots are reserved in the main thread before any child threads are spawned
|
||||
size_t reserveSlot()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
size_t next_slot = sNextStorageSlot++;
|
||||
if (next_slot >= mStorageSize)
|
||||
{
|
||||
|
|
@ -180,7 +179,7 @@ namespace LLTrace
|
|||
|
||||
void resize(size_t new_size)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
if (new_size <= mStorageSize) return;
|
||||
|
||||
ACCUMULATOR* old_storage = mStorage;
|
||||
|
|
@ -214,14 +213,14 @@ namespace LLTrace
|
|||
return mStorageSize;
|
||||
}
|
||||
|
||||
static size_t getNumIndices()
|
||||
static size_t getNumIndices()
|
||||
{
|
||||
return sNextStorageSlot;
|
||||
}
|
||||
|
||||
static self_t* getDefaultBuffer()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
static bool sInitialized = false;
|
||||
if (!sInitialized)
|
||||
{
|
||||
|
|
@ -336,7 +335,7 @@ namespace LLTrace
|
|||
|
||||
void sample(F64 value)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
F64SecondsImplicit time_stamp = LLTimer::getTotalSeconds();
|
||||
|
||||
// store effect of last value
|
||||
|
|
@ -399,7 +398,7 @@ namespace LLTrace
|
|||
F64 mMean,
|
||||
mSumOfSquares;
|
||||
|
||||
F64SecondsImplicit
|
||||
F64SecondsImplicit
|
||||
mLastSampleTimeStamp,
|
||||
mTotalSamplingTime;
|
||||
|
||||
|
|
@ -409,7 +408,7 @@ namespace LLTrace
|
|||
S32 mNumSamples;
|
||||
// distinct from mNumSamples, since we might have inherited a last value from
|
||||
// a previous sampling period
|
||||
bool mHasValue;
|
||||
bool mHasValue;
|
||||
};
|
||||
|
||||
class CountAccumulator
|
||||
|
|
@ -457,14 +456,14 @@ namespace LLTrace
|
|||
|
||||
class alignas(32) TimeBlockAccumulator
|
||||
{
|
||||
public:
|
||||
public:
|
||||
typedef F64Seconds value_t;
|
||||
static F64Seconds getDefaultValue() { return F64Seconds(0); }
|
||||
|
||||
typedef TimeBlockAccumulator self_t;
|
||||
|
||||
// fake classes that allows us to view different facets of underlying statistic
|
||||
struct CallCountFacet
|
||||
struct CallCountFacet
|
||||
{
|
||||
typedef S32 value_t;
|
||||
};
|
||||
|
|
@ -515,12 +514,12 @@ namespace LLTrace
|
|||
BlockTimerStatHandle* getParent() { return mParent; }
|
||||
|
||||
BlockTimerStatHandle* mBlock;
|
||||
BlockTimerStatHandle* mParent;
|
||||
BlockTimerStatHandle* mParent;
|
||||
std::vector<BlockTimerStatHandle*> mChildren;
|
||||
bool mCollapsed;
|
||||
bool mNeedsSorting;
|
||||
};
|
||||
|
||||
|
||||
struct BlockTimerStackRecord
|
||||
{
|
||||
class BlockTimer* mActiveTimer;
|
||||
|
|
@ -528,65 +527,6 @@ namespace LLTrace
|
|||
U64 mChildTime;
|
||||
};
|
||||
|
||||
struct MemAccumulator
|
||||
{
|
||||
typedef F64Bytes value_t;
|
||||
static F64Bytes getDefaultValue() { return F64Bytes(0); }
|
||||
|
||||
typedef MemAccumulator self_t;
|
||||
|
||||
// fake classes that allows us to view different facets of underlying statistic
|
||||
struct AllocationFacet
|
||||
{
|
||||
typedef F64Bytes value_t;
|
||||
static F64Bytes getDefaultValue() { return F64Bytes(0); }
|
||||
};
|
||||
|
||||
struct DeallocationFacet
|
||||
{
|
||||
typedef F64Bytes value_t;
|
||||
static F64Bytes getDefaultValue() { return F64Bytes(0); }
|
||||
};
|
||||
|
||||
void addSamples(const MemAccumulator& other, EBufferAppendType append_type)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mAllocations.addSamples(other.mAllocations, append_type);
|
||||
mDeallocations.addSamples(other.mDeallocations, append_type);
|
||||
|
||||
if (append_type == SEQUENTIAL)
|
||||
{
|
||||
mSize.addSamples(other.mSize, SEQUENTIAL);
|
||||
}
|
||||
else
|
||||
{
|
||||
F64 allocation_delta(other.mAllocations.getSum() - other.mDeallocations.getSum());
|
||||
mSize.sample(mSize.hasValue()
|
||||
? mSize.getLastValue() + allocation_delta
|
||||
: allocation_delta);
|
||||
}
|
||||
}
|
||||
|
||||
void reset(const MemAccumulator* other)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mSize.reset(other ? &other->mSize : NULL);
|
||||
mAllocations.reset(other ? &other->mAllocations : NULL);
|
||||
mDeallocations.reset(other ? &other->mDeallocations : NULL);
|
||||
}
|
||||
|
||||
void sync(F64SecondsImplicit time_stamp)
|
||||
{
|
||||
mSize.sync(time_stamp);
|
||||
}
|
||||
|
||||
bool hasValue() const { return mSize.hasValue(); }
|
||||
|
||||
SampleAccumulator mSize;
|
||||
EventAccumulator mAllocations;
|
||||
CountAccumulator mDeallocations;
|
||||
};
|
||||
|
||||
struct AccumulatorBufferGroup : public LLRefCount
|
||||
{
|
||||
AccumulatorBufferGroup();
|
||||
|
|
@ -607,9 +547,7 @@ namespace LLTrace
|
|||
AccumulatorBuffer<SampleAccumulator> mSamples;
|
||||
AccumulatorBuffer<EventAccumulator> mEvents;
|
||||
AccumulatorBuffer<TimeBlockAccumulator> mStackTimers;
|
||||
AccumulatorBuffer<MemAccumulator> mMemStats;
|
||||
};
|
||||
}
|
||||
|
||||
#endif // LL_LLTRACEACCUMULATORS_H
|
||||
|
||||
|
|
|
|||
|
|
@ -1,24 +1,24 @@
|
|||
/**
|
||||
/**
|
||||
* @file lltracesampler.cpp
|
||||
*
|
||||
* $LicenseInfo:firstyear=2001&license=viewerlgpl$
|
||||
* Second Life Viewer Source Code
|
||||
* Copyright (C) 2012, Linden Research, Inc.
|
||||
*
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation;
|
||||
* version 2.1 of the License only.
|
||||
*
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*
|
||||
*
|
||||
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
|
||||
* $/LicenseInfo$
|
||||
*/
|
||||
|
|
@ -32,7 +32,7 @@
|
|||
#include "lltracethreadrecorder.h"
|
||||
#include "llthread.h"
|
||||
|
||||
inline F64 lerp(F64 a, F64 b, F64 u)
|
||||
inline F64 lerp(F64 a, F64 b, F64 u)
|
||||
{
|
||||
return a + ((b - a) * u);
|
||||
}
|
||||
|
|
@ -40,34 +40,29 @@ inline F64 lerp(F64 a, F64 b, F64 u)
|
|||
namespace LLTrace
|
||||
{
|
||||
|
||||
extern MemStatHandle gTraceMemStat;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
// Recording
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
|
||||
Recording::Recording(EPlayState state)
|
||||
Recording::Recording(EPlayState state)
|
||||
: mElapsedSeconds(0),
|
||||
mActiveBuffers(NULL)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
claim_alloc(gTraceMemStat, this);
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mBuffers = new AccumulatorBufferGroup();
|
||||
claim_alloc(gTraceMemStat, mBuffers);
|
||||
setPlayState(state);
|
||||
}
|
||||
|
||||
Recording::Recording( const Recording& other )
|
||||
: mActiveBuffers(NULL)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
claim_alloc(gTraceMemStat, this);
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
*this = other;
|
||||
}
|
||||
|
||||
Recording& Recording::operator = (const Recording& other)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
// this will allow us to seamlessly start without affecting any data we've acquired from other
|
||||
setPlayState(PAUSED);
|
||||
|
||||
|
|
@ -85,14 +80,11 @@ Recording& Recording::operator = (const Recording& other)
|
|||
return *this;
|
||||
}
|
||||
|
||||
|
||||
Recording::~Recording()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
disclaim_alloc(gTraceMemStat, this);
|
||||
disclaim_alloc(gTraceMemStat, mBuffers);
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
|
||||
// allow recording destruction without thread recorder running,
|
||||
// allow recording destruction without thread recorder running,
|
||||
// otherwise thread shutdown could crash if a recording outlives the thread recorder
|
||||
// besides, recording construction and destruction is fine without a recorder...just don't attempt to start one
|
||||
if (isStarted() && LLTrace::get_thread_recorder() != NULL)
|
||||
|
|
@ -107,14 +99,14 @@ void Recording::update()
|
|||
#if LL_TRACE_ENABLED
|
||||
if (isStarted())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mElapsedSeconds += mSamplingTimer.getElapsedTimeF64();
|
||||
|
||||
// must have
|
||||
llassert(mActiveBuffers != NULL
|
||||
// must have
|
||||
llassert(mActiveBuffers != NULL
|
||||
&& LLTrace::get_thread_recorder() != NULL);
|
||||
|
||||
if(!mActiveBuffers->isCurrent() && LLTrace::get_thread_recorder() != NULL)
|
||||
if (!mActiveBuffers->isCurrent() && LLTrace::get_thread_recorder() != NULL)
|
||||
{
|
||||
AccumulatorBufferGroup* buffers = mBuffers.write();
|
||||
LLTrace::get_thread_recorder()->deactivate(buffers);
|
||||
|
|
@ -128,7 +120,7 @@ void Recording::update()
|
|||
|
||||
void Recording::handleReset()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
#if LL_TRACE_ENABLED
|
||||
mBuffers.write()->reset();
|
||||
|
||||
|
|
@ -139,7 +131,7 @@ void Recording::handleReset()
|
|||
|
||||
void Recording::handleStart()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
#if LL_TRACE_ENABLED
|
||||
mSamplingTimer.reset();
|
||||
mBuffers.setStayUnique(true);
|
||||
|
|
@ -151,7 +143,7 @@ void Recording::handleStart()
|
|||
|
||||
void Recording::handleStop()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
#if LL_TRACE_ENABLED
|
||||
mElapsedSeconds += mSamplingTimer.getElapsedTimeF64();
|
||||
// must have thread recorder running on this thread
|
||||
|
|
@ -204,7 +196,6 @@ F64Seconds Recording::getSum(const StatType<TimeBlockAccumulator::SelfTimeFacet>
|
|||
return F64Seconds(((F64)(accumulator.mSelfTimeCounter) + (F64)(active_accumulator ? active_accumulator->mSelfTimeCounter : 0)) / (F64)LLTrace::BlockTimer::countsPerSecond());
|
||||
}
|
||||
|
||||
|
||||
S32 Recording::getSum(const StatType<TimeBlockAccumulator::CallCountFacet>& stat)
|
||||
{
|
||||
update();
|
||||
|
|
@ -219,7 +210,7 @@ F64Seconds Recording::getPerSec(const StatType<TimeBlockAccumulator>& stat)
|
|||
const TimeBlockAccumulator& accumulator = mBuffers->mStackTimers[stat.getIndex()];
|
||||
const TimeBlockAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mStackTimers[stat.getIndex()] : NULL;
|
||||
|
||||
return F64Seconds((F64)(accumulator.mTotalTimeCounter + (active_accumulator ? active_accumulator->mTotalTimeCounter : 0))
|
||||
return F64Seconds((F64)(accumulator.mTotalTimeCounter + (active_accumulator ? active_accumulator->mTotalTimeCounter : 0))
|
||||
/ ((F64)LLTrace::BlockTimer::countsPerSecond() * mElapsedSeconds.value()));
|
||||
}
|
||||
|
||||
|
|
@ -241,144 +232,9 @@ F32 Recording::getPerSec(const StatType<TimeBlockAccumulator::CallCountFacet>& s
|
|||
return (F32)(accumulator.mCalls + (active_accumulator ? active_accumulator->mCalls : 0)) / mElapsedSeconds.value();
|
||||
}
|
||||
|
||||
bool Recording::hasValue(const StatType<MemAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return accumulator.mSize.hasValue() || (active_accumulator && active_accumulator->mSize.hasValue() ? active_accumulator->mSize.hasValue() : false);
|
||||
}
|
||||
|
||||
F64Kilobytes Recording::getMin(const StatType<MemAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return F64Bytes(llmin(accumulator.mSize.getMin(), (active_accumulator && active_accumulator->mSize.hasValue() ? active_accumulator->mSize.getMin() : F32_MAX)));
|
||||
}
|
||||
|
||||
F64Kilobytes Recording::getMean(const StatType<MemAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
|
||||
if (active_accumulator && active_accumulator->mSize.hasValue())
|
||||
{
|
||||
F32 t = 0.0f;
|
||||
S32 div = accumulator.mSize.getSampleCount() + active_accumulator->mSize.getSampleCount();
|
||||
if (div > 0)
|
||||
{
|
||||
t = active_accumulator->mSize.getSampleCount() / div;
|
||||
}
|
||||
return F64Bytes(lerp(accumulator.mSize.getMean(), active_accumulator->mSize.getMean(), t));
|
||||
}
|
||||
else
|
||||
{
|
||||
return F64Bytes(accumulator.mSize.getMean());
|
||||
}
|
||||
}
|
||||
|
||||
F64Kilobytes Recording::getMax(const StatType<MemAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return F64Bytes(llmax(accumulator.mSize.getMax(), active_accumulator && active_accumulator->mSize.hasValue() ? active_accumulator->mSize.getMax() : F32_MIN));
|
||||
}
|
||||
|
||||
F64Kilobytes Recording::getStandardDeviation(const StatType<MemAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
if (active_accumulator && active_accumulator->hasValue())
|
||||
{
|
||||
F64 sum_of_squares = SampleAccumulator::mergeSumsOfSquares(accumulator.mSize, active_accumulator->mSize);
|
||||
return F64Bytes(sqrtf(sum_of_squares / (accumulator.mSize.getSamplingTime().value() + active_accumulator->mSize.getSamplingTime().value())));
|
||||
}
|
||||
else
|
||||
{
|
||||
return F64Bytes(accumulator.mSize.getStandardDeviation());
|
||||
}
|
||||
}
|
||||
|
||||
F64Kilobytes Recording::getLastValue(const StatType<MemAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return F64Bytes(active_accumulator ? active_accumulator->mSize.getLastValue() : accumulator.mSize.getLastValue());
|
||||
}
|
||||
|
||||
bool Recording::hasValue(const StatType<MemAccumulator::AllocationFacet>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return accumulator.mAllocations.hasValue() || (active_accumulator ? active_accumulator->mAllocations.hasValue() : false);
|
||||
}
|
||||
|
||||
F64Kilobytes Recording::getSum(const StatType<MemAccumulator::AllocationFacet>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return F64Bytes(accumulator.mAllocations.getSum() + (active_accumulator ? active_accumulator->mAllocations.getSum() : 0));
|
||||
}
|
||||
|
||||
F64Kilobytes Recording::getPerSec(const StatType<MemAccumulator::AllocationFacet>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return F64Bytes((accumulator.mAllocations.getSum() + (active_accumulator ? active_accumulator->mAllocations.getSum() : 0)) / mElapsedSeconds.value());
|
||||
}
|
||||
|
||||
S32 Recording::getSampleCount(const StatType<MemAccumulator::AllocationFacet>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return accumulator.mAllocations.getSampleCount() + (active_accumulator ? active_accumulator->mAllocations.getSampleCount() : 0);
|
||||
}
|
||||
|
||||
bool Recording::hasValue(const StatType<MemAccumulator::DeallocationFacet>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return accumulator.mDeallocations.hasValue() || (active_accumulator ? active_accumulator->mDeallocations.hasValue() : false);
|
||||
}
|
||||
|
||||
|
||||
F64Kilobytes Recording::getSum(const StatType<MemAccumulator::DeallocationFacet>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return F64Bytes(accumulator.mDeallocations.getSum() + (active_accumulator ? active_accumulator->mDeallocations.getSum() : 0));
|
||||
}
|
||||
|
||||
F64Kilobytes Recording::getPerSec(const StatType<MemAccumulator::DeallocationFacet>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return F64Bytes((accumulator.mDeallocations.getSum() + (active_accumulator ? active_accumulator->mDeallocations.getSum() : 0)) / mElapsedSeconds.value());
|
||||
}
|
||||
|
||||
S32 Recording::getSampleCount(const StatType<MemAccumulator::DeallocationFacet>& stat)
|
||||
{
|
||||
update();
|
||||
const MemAccumulator& accumulator = mBuffers->mMemStats[stat.getIndex()];
|
||||
const MemAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mMemStats[stat.getIndex()] : NULL;
|
||||
return accumulator.mDeallocations.getSampleCount() + (active_accumulator ? active_accumulator->mDeallocations.getSampleCount() : 0);
|
||||
}
|
||||
|
||||
bool Recording::hasValue(const StatType<CountAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];
|
||||
const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;
|
||||
return accumulator.hasValue() || (active_accumulator ? active_accumulator->hasValue() : false);
|
||||
|
|
@ -386,7 +242,7 @@ bool Recording::hasValue(const StatType<CountAccumulator>& stat)
|
|||
|
||||
F64 Recording::getSum(const StatType<CountAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];
|
||||
const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;
|
||||
return accumulator.getSum() + (active_accumulator ? active_accumulator->getSum() : 0);
|
||||
|
|
@ -394,7 +250,7 @@ F64 Recording::getSum(const StatType<CountAccumulator>& stat)
|
|||
|
||||
F64 Recording::getPerSec( const StatType<CountAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];
|
||||
const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;
|
||||
F64 sum = accumulator.getSum() + (active_accumulator ? active_accumulator->getSum() : 0);
|
||||
|
|
@ -403,7 +259,7 @@ F64 Recording::getPerSec( const StatType<CountAccumulator>& stat )
|
|||
|
||||
S32 Recording::getSampleCount( const StatType<CountAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const CountAccumulator& accumulator = mBuffers->mCounts[stat.getIndex()];
|
||||
const CountAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mCounts[stat.getIndex()] : NULL;
|
||||
return accumulator.getSampleCount() + (active_accumulator ? active_accumulator->getSampleCount() : 0);
|
||||
|
|
@ -411,7 +267,7 @@ S32 Recording::getSampleCount( const StatType<CountAccumulator>& stat )
|
|||
|
||||
bool Recording::hasValue(const StatType<SampleAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
|
||||
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
|
||||
return accumulator.hasValue() || (active_accumulator && active_accumulator->hasValue());
|
||||
|
|
@ -419,7 +275,7 @@ bool Recording::hasValue(const StatType<SampleAccumulator>& stat)
|
|||
|
||||
F64 Recording::getMin( const StatType<SampleAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
|
||||
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
|
||||
return llmin(accumulator.getMin(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMin() : F32_MAX);
|
||||
|
|
@ -427,7 +283,7 @@ F64 Recording::getMin( const StatType<SampleAccumulator>& stat )
|
|||
|
||||
F64 Recording::getMax( const StatType<SampleAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
|
||||
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
|
||||
return llmax(accumulator.getMax(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMax() : F32_MIN);
|
||||
|
|
@ -435,17 +291,17 @@ F64 Recording::getMax( const StatType<SampleAccumulator>& stat )
|
|||
|
||||
F64 Recording::getMean( const StatType<SampleAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
|
||||
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
|
||||
if (active_accumulator && active_accumulator->hasValue())
|
||||
{
|
||||
F32 t = 0.0f;
|
||||
S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount();
|
||||
if (div > 0)
|
||||
{
|
||||
t = active_accumulator->getSampleCount() / div;
|
||||
}
|
||||
F32 t = 0.0f;
|
||||
S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount();
|
||||
if (div > 0)
|
||||
{
|
||||
t = active_accumulator->getSampleCount() / div;
|
||||
}
|
||||
return lerp(accumulator.getMean(), active_accumulator->getMean(), t);
|
||||
}
|
||||
else
|
||||
|
|
@ -456,7 +312,7 @@ F64 Recording::getMean( const StatType<SampleAccumulator>& stat )
|
|||
|
||||
F64 Recording::getStandardDeviation( const StatType<SampleAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
|
||||
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
|
||||
|
||||
|
|
@ -473,7 +329,7 @@ F64 Recording::getStandardDeviation( const StatType<SampleAccumulator>& stat )
|
|||
|
||||
F64 Recording::getLastValue( const StatType<SampleAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
|
||||
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
|
||||
return (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getLastValue() : accumulator.getLastValue());
|
||||
|
|
@ -481,7 +337,7 @@ F64 Recording::getLastValue( const StatType<SampleAccumulator>& stat )
|
|||
|
||||
S32 Recording::getSampleCount( const StatType<SampleAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const SampleAccumulator& accumulator = mBuffers->mSamples[stat.getIndex()];
|
||||
const SampleAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mSamples[stat.getIndex()] : NULL;
|
||||
return accumulator.getSampleCount() + (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getSampleCount() : 0);
|
||||
|
|
@ -489,7 +345,7 @@ S32 Recording::getSampleCount( const StatType<SampleAccumulator>& stat )
|
|||
|
||||
bool Recording::hasValue(const StatType<EventAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
|
||||
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
|
||||
return accumulator.hasValue() || (active_accumulator && active_accumulator->hasValue());
|
||||
|
|
@ -497,7 +353,7 @@ bool Recording::hasValue(const StatType<EventAccumulator>& stat)
|
|||
|
||||
F64 Recording::getSum( const StatType<EventAccumulator>& stat)
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
|
||||
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
|
||||
return (F64)(accumulator.getSum() + (active_accumulator && active_accumulator->hasValue() ? active_accumulator->getSum() : 0));
|
||||
|
|
@ -505,7 +361,7 @@ F64 Recording::getSum( const StatType<EventAccumulator>& stat)
|
|||
|
||||
F64 Recording::getMin( const StatType<EventAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
|
||||
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
|
||||
return llmin(accumulator.getMin(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMin() : F32_MAX);
|
||||
|
|
@ -513,7 +369,7 @@ F64 Recording::getMin( const StatType<EventAccumulator>& stat )
|
|||
|
||||
F64 Recording::getMax( const StatType<EventAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
|
||||
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
|
||||
return llmax(accumulator.getMax(), active_accumulator && active_accumulator->hasValue() ? active_accumulator->getMax() : F32_MIN);
|
||||
|
|
@ -521,17 +377,17 @@ F64 Recording::getMax( const StatType<EventAccumulator>& stat )
|
|||
|
||||
F64 Recording::getMean( const StatType<EventAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
|
||||
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
|
||||
if (active_accumulator && active_accumulator->hasValue())
|
||||
{
|
||||
F32 t = 0.0f;
|
||||
S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount();
|
||||
if (div > 0)
|
||||
{
|
||||
t = active_accumulator->getSampleCount() / div;
|
||||
}
|
||||
S32 div = accumulator.getSampleCount() + active_accumulator->getSampleCount();
|
||||
if (div > 0)
|
||||
{
|
||||
t = active_accumulator->getSampleCount() / div;
|
||||
}
|
||||
return lerp(accumulator.getMean(), active_accumulator->getMean(), t);
|
||||
}
|
||||
else
|
||||
|
|
@ -542,7 +398,7 @@ F64 Recording::getMean( const StatType<EventAccumulator>& stat )
|
|||
|
||||
F64 Recording::getStandardDeviation( const StatType<EventAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
|
||||
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
|
||||
|
||||
|
|
@ -559,7 +415,7 @@ F64 Recording::getStandardDeviation( const StatType<EventAccumulator>& stat )
|
|||
|
||||
F64 Recording::getLastValue( const StatType<EventAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
|
||||
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
|
||||
return active_accumulator ? active_accumulator->getLastValue() : accumulator.getLastValue();
|
||||
|
|
@ -567,7 +423,7 @@ F64 Recording::getLastValue( const StatType<EventAccumulator>& stat )
|
|||
|
||||
S32 Recording::getSampleCount( const StatType<EventAccumulator>& stat )
|
||||
{
|
||||
update();
|
||||
update();
|
||||
const EventAccumulator& accumulator = mBuffers->mEvents[stat.getIndex()];
|
||||
const EventAccumulator* active_accumulator = mActiveBuffers ? &mActiveBuffers->mEvents[stat.getIndex()] : NULL;
|
||||
return accumulator.getSampleCount() + (active_accumulator ? active_accumulator->getSampleCount() : 0);
|
||||
|
|
@ -577,7 +433,7 @@ S32 Recording::getSampleCount( const StatType<EventAccumulator>& stat )
|
|||
// PeriodicRecording
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
|
||||
PeriodicRecording::PeriodicRecording( size_t num_periods, EPlayState state)
|
||||
PeriodicRecording::PeriodicRecording( size_t num_periods, EPlayState state)
|
||||
: mAutoResize(num_periods == 0),
|
||||
mCurPeriod(0),
|
||||
mNumRecordedPeriods(0),
|
||||
|
|
@ -585,15 +441,13 @@ PeriodicRecording::PeriodicRecording( size_t num_periods, EPlayState state)
|
|||
// code in several methods.
|
||||
mRecordingPeriods(num_periods ? num_periods : 1)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
setPlayState(state);
|
||||
claim_alloc(gTraceMemStat, this);
|
||||
}
|
||||
|
||||
PeriodicRecording::~PeriodicRecording()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
disclaim_alloc(gTraceMemStat, this);
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
}
|
||||
|
||||
void PeriodicRecording::nextPeriod()
|
||||
|
|
@ -615,12 +469,11 @@ void PeriodicRecording::nextPeriod()
|
|||
|
||||
void PeriodicRecording::appendRecording(Recording& recording)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
getCurRecording().appendRecording(recording);
|
||||
nextPeriod();
|
||||
}
|
||||
|
||||
|
||||
void PeriodicRecording::appendPeriodicRecording( PeriodicRecording& other )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
|
|
@ -693,16 +546,14 @@ F64Seconds PeriodicRecording::getDuration() const
|
|||
return duration;
|
||||
}
|
||||
|
||||
|
||||
LLTrace::Recording PeriodicRecording::snapshotCurRecording() const
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
Recording recording_copy(getCurRecording());
|
||||
recording_copy.stop();
|
||||
return recording_copy;
|
||||
}
|
||||
|
||||
|
||||
Recording& PeriodicRecording::getLastRecording()
|
||||
{
|
||||
return getPrevRecording(1);
|
||||
|
|
@ -737,19 +588,19 @@ const Recording& PeriodicRecording::getPrevRecording( size_t offset ) const
|
|||
|
||||
void PeriodicRecording::handleStart()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
getCurRecording().start();
|
||||
}
|
||||
|
||||
void PeriodicRecording::handleStop()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
getCurRecording().pause();
|
||||
}
|
||||
|
||||
void PeriodicRecording::handleReset()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
getCurRecording().stop();
|
||||
|
||||
if (mAutoResize)
|
||||
|
|
@ -771,13 +622,13 @@ void PeriodicRecording::handleReset()
|
|||
|
||||
void PeriodicRecording::handleSplitTo(PeriodicRecording& other)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
getCurRecording().splitTo(other.getCurRecording());
|
||||
}
|
||||
|
||||
F64 PeriodicRecording::getPeriodMin( const StatType<EventAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
bool has_value = false;
|
||||
|
|
@ -792,14 +643,14 @@ F64 PeriodicRecording::getPeriodMin( const StatType<EventAccumulator>& stat, siz
|
|||
}
|
||||
}
|
||||
|
||||
return has_value
|
||||
? min_val
|
||||
return has_value
|
||||
? min_val
|
||||
: NaN;
|
||||
}
|
||||
|
||||
F64 PeriodicRecording::getPeriodMax( const StatType<EventAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
bool has_value = false;
|
||||
|
|
@ -814,15 +665,15 @@ F64 PeriodicRecording::getPeriodMax( const StatType<EventAccumulator>& stat, siz
|
|||
}
|
||||
}
|
||||
|
||||
return has_value
|
||||
? max_val
|
||||
return has_value
|
||||
? max_val
|
||||
: NaN;
|
||||
}
|
||||
|
||||
// calculates means using aggregates per period
|
||||
F64 PeriodicRecording::getPeriodMean( const StatType<EventAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
F64 mean = 0;
|
||||
|
|
@ -838,14 +689,14 @@ F64 PeriodicRecording::getPeriodMean( const StatType<EventAccumulator>& stat, si
|
|||
}
|
||||
}
|
||||
|
||||
return valid_period_count
|
||||
return valid_period_count
|
||||
? mean / (F64)valid_period_count
|
||||
: NaN;
|
||||
}
|
||||
|
||||
F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<EventAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
F64 period_mean = getPeriodMean(stat, num_periods);
|
||||
|
|
@ -870,7 +721,7 @@ F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<EventAccumulat
|
|||
|
||||
F64 PeriodicRecording::getPeriodMin( const StatType<SampleAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
bool has_value = false;
|
||||
|
|
@ -885,14 +736,14 @@ F64 PeriodicRecording::getPeriodMin( const StatType<SampleAccumulator>& stat, si
|
|||
}
|
||||
}
|
||||
|
||||
return has_value
|
||||
? min_val
|
||||
return has_value
|
||||
? min_val
|
||||
: NaN;
|
||||
}
|
||||
|
||||
F64 PeriodicRecording::getPeriodMax(const StatType<SampleAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
bool has_value = false;
|
||||
|
|
@ -907,15 +758,15 @@ F64 PeriodicRecording::getPeriodMax(const StatType<SampleAccumulator>& stat, siz
|
|||
}
|
||||
}
|
||||
|
||||
return has_value
|
||||
? max_val
|
||||
return has_value
|
||||
? max_val
|
||||
: NaN;
|
||||
}
|
||||
|
||||
|
||||
F64 PeriodicRecording::getPeriodMean( const StatType<SampleAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
S32 valid_period_count = 0;
|
||||
|
|
@ -938,7 +789,7 @@ F64 PeriodicRecording::getPeriodMean( const StatType<SampleAccumulator>& stat, s
|
|||
|
||||
F64 PeriodicRecording::getPeriodMedian( const StatType<SampleAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
std::vector<F64> buf;
|
||||
|
|
@ -964,7 +815,7 @@ F64 PeriodicRecording::getPeriodMedian( const StatType<SampleAccumulator>& stat,
|
|||
|
||||
F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<SampleAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
F64 period_mean = getPeriodMean(stat, num_periods);
|
||||
|
|
@ -987,105 +838,13 @@ F64 PeriodicRecording::getPeriodStandardDeviation( const StatType<SampleAccumula
|
|||
: NaN;
|
||||
}
|
||||
|
||||
|
||||
F64Kilobytes PeriodicRecording::getPeriodMin( const StatType<MemAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
F64Kilobytes min_val(std::numeric_limits<F64>::max());
|
||||
for (size_t i = 1; i <= num_periods; i++)
|
||||
{
|
||||
Recording& recording = getPrevRecording(i);
|
||||
min_val = llmin(min_val, recording.getMin(stat));
|
||||
}
|
||||
|
||||
return min_val;
|
||||
}
|
||||
|
||||
F64Kilobytes PeriodicRecording::getPeriodMin(const MemStatHandle& stat, size_t num_periods)
|
||||
{
|
||||
return getPeriodMin(static_cast<const StatType<MemAccumulator>&>(stat), num_periods);
|
||||
}
|
||||
|
||||
F64Kilobytes PeriodicRecording::getPeriodMax(const StatType<MemAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
F64Kilobytes max_val(0.0);
|
||||
for (size_t i = 1; i <= num_periods; i++)
|
||||
{
|
||||
Recording& recording = getPrevRecording(i);
|
||||
max_val = llmax(max_val, recording.getMax(stat));
|
||||
}
|
||||
|
||||
return max_val;
|
||||
}
|
||||
|
||||
F64Kilobytes PeriodicRecording::getPeriodMax(const MemStatHandle& stat, size_t num_periods)
|
||||
{
|
||||
return getPeriodMax(static_cast<const StatType<MemAccumulator>&>(stat), num_periods);
|
||||
}
|
||||
|
||||
F64Kilobytes PeriodicRecording::getPeriodMean( const StatType<MemAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
F64Kilobytes mean(0);
|
||||
|
||||
for (size_t i = 1; i <= num_periods; i++)
|
||||
{
|
||||
Recording& recording = getPrevRecording(i);
|
||||
mean += recording.getMean(stat);
|
||||
}
|
||||
|
||||
return mean / F64(num_periods);
|
||||
}
|
||||
|
||||
F64Kilobytes PeriodicRecording::getPeriodMean(const MemStatHandle& stat, size_t num_periods)
|
||||
{
|
||||
return getPeriodMean(static_cast<const StatType<MemAccumulator>&>(stat), num_periods);
|
||||
}
|
||||
|
||||
F64Kilobytes PeriodicRecording::getPeriodStandardDeviation( const StatType<MemAccumulator>& stat, size_t num_periods /*= std::numeric_limits<size_t>::max()*/ )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
F64Kilobytes period_mean = getPeriodMean(stat, num_periods);
|
||||
S32 valid_period_count = 0;
|
||||
F64 sum_of_squares = 0;
|
||||
|
||||
for (size_t i = 1; i <= num_periods; i++)
|
||||
{
|
||||
Recording& recording = getPrevRecording(i);
|
||||
if (recording.hasValue(stat))
|
||||
{
|
||||
F64Kilobytes delta = recording.getMean(stat) - period_mean;
|
||||
sum_of_squares += delta.value() * delta.value();
|
||||
valid_period_count++;
|
||||
}
|
||||
}
|
||||
|
||||
return F64Kilobytes(valid_period_count
|
||||
? sqrt(sum_of_squares / (F64)valid_period_count)
|
||||
: NaN);
|
||||
}
|
||||
|
||||
F64Kilobytes PeriodicRecording::getPeriodStandardDeviation(const MemStatHandle& stat, size_t num_periods)
|
||||
{
|
||||
return getPeriodStandardDeviation(static_cast<const StatType<MemAccumulator>&>(stat), num_periods);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
// ExtendableRecording
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
|
||||
void ExtendableRecording::extend()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
// push the data back to accepted recording
|
||||
mAcceptedRecording.appendRecording(mPotentialRecording);
|
||||
// flush data, so we can start from scratch
|
||||
|
|
@ -1094,76 +853,72 @@ void ExtendableRecording::extend()
|
|||
|
||||
void ExtendableRecording::handleStart()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mPotentialRecording.start();
|
||||
}
|
||||
|
||||
void ExtendableRecording::handleStop()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mPotentialRecording.pause();
|
||||
}
|
||||
|
||||
void ExtendableRecording::handleReset()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mAcceptedRecording.reset();
|
||||
mPotentialRecording.reset();
|
||||
}
|
||||
|
||||
void ExtendableRecording::handleSplitTo(ExtendableRecording& other)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mPotentialRecording.splitTo(other.mPotentialRecording);
|
||||
}
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
// ExtendablePeriodicRecording
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
ExtendablePeriodicRecording::ExtendablePeriodicRecording()
|
||||
: mAcceptedRecording(0),
|
||||
ExtendablePeriodicRecording::ExtendablePeriodicRecording()
|
||||
: mAcceptedRecording(0),
|
||||
mPotentialRecording(0)
|
||||
{}
|
||||
|
||||
void ExtendablePeriodicRecording::extend()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
// push the data back to accepted recording
|
||||
mAcceptedRecording.appendPeriodicRecording(mPotentialRecording);
|
||||
// flush data, so we can start from scratch
|
||||
mPotentialRecording.reset();
|
||||
}
|
||||
|
||||
|
||||
void ExtendablePeriodicRecording::handleStart()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mPotentialRecording.start();
|
||||
}
|
||||
|
||||
void ExtendablePeriodicRecording::handleStop()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mPotentialRecording.pause();
|
||||
}
|
||||
|
||||
void ExtendablePeriodicRecording::handleReset()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mAcceptedRecording.reset();
|
||||
mPotentialRecording.reset();
|
||||
}
|
||||
|
||||
void ExtendablePeriodicRecording::handleSplitTo(ExtendablePeriodicRecording& other)
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
mPotentialRecording.splitTo(other.mPotentialRecording);
|
||||
}
|
||||
|
||||
|
||||
PeriodicRecording& get_frame_recording()
|
||||
{
|
||||
static thread_local PeriodicRecording sRecording(200, PeriodicRecording::STARTED);
|
||||
|
|
@ -1174,7 +929,7 @@ PeriodicRecording& get_frame_recording()
|
|||
|
||||
void LLStopWatchControlsMixinCommon::start()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
switch (mPlayState)
|
||||
{
|
||||
case STOPPED:
|
||||
|
|
@ -1196,7 +951,7 @@ void LLStopWatchControlsMixinCommon::start()
|
|||
|
||||
void LLStopWatchControlsMixinCommon::stop()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
switch (mPlayState)
|
||||
{
|
||||
case STOPPED:
|
||||
|
|
@ -1216,7 +971,7 @@ void LLStopWatchControlsMixinCommon::stop()
|
|||
|
||||
void LLStopWatchControlsMixinCommon::pause()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
switch (mPlayState)
|
||||
{
|
||||
case STOPPED:
|
||||
|
|
@ -1236,7 +991,7 @@ void LLStopWatchControlsMixinCommon::pause()
|
|||
|
||||
void LLStopWatchControlsMixinCommon::unpause()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
switch (mPlayState)
|
||||
{
|
||||
case STOPPED:
|
||||
|
|
@ -1256,7 +1011,7 @@ void LLStopWatchControlsMixinCommon::unpause()
|
|||
|
||||
void LLStopWatchControlsMixinCommon::resume()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
switch (mPlayState)
|
||||
{
|
||||
case STOPPED:
|
||||
|
|
@ -1277,7 +1032,7 @@ void LLStopWatchControlsMixinCommon::resume()
|
|||
|
||||
void LLStopWatchControlsMixinCommon::restart()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
switch (mPlayState)
|
||||
{
|
||||
case STOPPED:
|
||||
|
|
@ -1301,13 +1056,13 @@ void LLStopWatchControlsMixinCommon::restart()
|
|||
|
||||
void LLStopWatchControlsMixinCommon::reset()
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
handleReset();
|
||||
}
|
||||
|
||||
void LLStopWatchControlsMixinCommon::setPlayState( EPlayState state )
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
switch(state)
|
||||
{
|
||||
case STOPPED:
|
||||
|
|
|
|||
|
|
@ -1,25 +1,25 @@
|
|||
/**
|
||||
/**
|
||||
* @file lltracerecording.h
|
||||
* @brief Sampling object for collecting runtime statistics originating from lltrace.
|
||||
*
|
||||
* $LicenseInfo:firstyear=2001&license=viewerlgpl$
|
||||
* Second Life Viewer Source Code
|
||||
* Copyright (C) 2012, Linden Research, Inc.
|
||||
*
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation;
|
||||
* version 2.1 of the License only.
|
||||
*
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*
|
||||
*
|
||||
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
|
||||
* $/LicenseInfo$
|
||||
*/
|
||||
|
|
@ -112,7 +112,6 @@ private:
|
|||
// atomically stop this object while starting the other
|
||||
// no data can be missed in between stop and start
|
||||
virtual void handleSplitTo(DERIVED& other) {};
|
||||
|
||||
};
|
||||
|
||||
namespace LLTrace
|
||||
|
|
@ -129,8 +128,6 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
class EventStatHandle;
|
||||
|
||||
class MemStatHandle;
|
||||
|
||||
template<typename T>
|
||||
struct RelatedTypes
|
||||
{
|
||||
|
|
@ -152,7 +149,7 @@ namespace LLTrace
|
|||
typedef S32 sum_t;
|
||||
};
|
||||
|
||||
class Recording
|
||||
class Recording
|
||||
: public LLStopWatchControlsMixin<Recording>
|
||||
{
|
||||
public:
|
||||
|
|
@ -182,24 +179,6 @@ namespace LLTrace
|
|||
F64Seconds getPerSec(const StatType<TimeBlockAccumulator::SelfTimeFacet>& stat);
|
||||
F32 getPerSec(const StatType<TimeBlockAccumulator::CallCountFacet>& stat);
|
||||
|
||||
// Memory accessors
|
||||
bool hasValue(const StatType<MemAccumulator>& stat);
|
||||
F64Kilobytes getMin(const StatType<MemAccumulator>& stat);
|
||||
F64Kilobytes getMean(const StatType<MemAccumulator>& stat);
|
||||
F64Kilobytes getMax(const StatType<MemAccumulator>& stat);
|
||||
F64Kilobytes getStandardDeviation(const StatType<MemAccumulator>& stat);
|
||||
F64Kilobytes getLastValue(const StatType<MemAccumulator>& stat);
|
||||
|
||||
bool hasValue(const StatType<MemAccumulator::AllocationFacet>& stat);
|
||||
F64Kilobytes getSum(const StatType<MemAccumulator::AllocationFacet>& stat);
|
||||
F64Kilobytes getPerSec(const StatType<MemAccumulator::AllocationFacet>& stat);
|
||||
S32 getSampleCount(const StatType<MemAccumulator::AllocationFacet>& stat);
|
||||
|
||||
bool hasValue(const StatType<MemAccumulator::DeallocationFacet>& stat);
|
||||
F64Kilobytes getSum(const StatType<MemAccumulator::DeallocationFacet>& stat);
|
||||
F64Kilobytes getPerSec(const StatType<MemAccumulator::DeallocationFacet>& stat);
|
||||
S32 getSampleCount(const StatType<MemAccumulator::DeallocationFacet>& stat);
|
||||
|
||||
// CountStatHandle accessors
|
||||
bool hasValue(const StatType<CountAccumulator>& stat);
|
||||
F64 getSum(const StatType<CountAccumulator>& stat);
|
||||
|
|
@ -318,7 +297,7 @@ namespace LLTrace
|
|||
/*virtual*/ void handleSplitTo(Recording& other);
|
||||
|
||||
// returns data for current thread
|
||||
class ThreadRecorder* getThreadRecorder();
|
||||
class ThreadRecorder* getThreadRecorder();
|
||||
|
||||
LLTimer mSamplingTimer;
|
||||
F64Seconds mElapsedSeconds;
|
||||
|
|
@ -335,10 +314,10 @@ namespace LLTrace
|
|||
~PeriodicRecording();
|
||||
|
||||
void nextPeriod();
|
||||
auto getNumRecordedPeriods()
|
||||
{
|
||||
auto getNumRecordedPeriods()
|
||||
{
|
||||
// current period counts if not active
|
||||
return mNumRecordedPeriods + (isStarted() ? 0 : 1);
|
||||
return mNumRecordedPeriods + (isStarted() ? 0 : 1);
|
||||
}
|
||||
|
||||
F64Seconds getDuration() const;
|
||||
|
|
@ -367,7 +346,7 @@ namespace LLTrace
|
|||
}
|
||||
return num_samples;
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// PERIODIC MIN
|
||||
//
|
||||
|
|
@ -376,7 +355,7 @@ namespace LLTrace
|
|||
template <typename T>
|
||||
typename T::value_t getPeriodMin(const StatType<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
bool has_value = false;
|
||||
|
|
@ -391,15 +370,15 @@ namespace LLTrace
|
|||
}
|
||||
}
|
||||
|
||||
return has_value
|
||||
? min_val
|
||||
return has_value
|
||||
? min_val
|
||||
: T::getDefaultValue();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T getPeriodMin(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return T(getPeriodMin(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
|
|
@ -407,7 +386,7 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
T getPeriodMin(const SampleStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return T(getPeriodMin(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
|
|
@ -415,17 +394,14 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
T getPeriodMin(const EventStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return T(getPeriodMin(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
F64Kilobytes getPeriodMin(const StatType<MemAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max());
|
||||
F64Kilobytes getPeriodMin(const MemStatHandle& stat, size_t num_periods = std::numeric_limits<size_t>::max());
|
||||
|
||||
template <typename T>
|
||||
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMinPerSec(const StatType<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
typename RelatedTypes<typename T::value_t>::fractional_t min_val(std::numeric_limits<F64>::max());
|
||||
|
|
@ -440,7 +416,7 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
typename RelatedTypes<T>::fractional_t getPeriodMinPerSec(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return typename RelatedTypes<T>::fractional_t(getPeriodMinPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
|
|
@ -452,7 +428,7 @@ namespace LLTrace
|
|||
template <typename T>
|
||||
typename T::value_t getPeriodMax(const StatType<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
bool has_value = false;
|
||||
|
|
@ -467,15 +443,15 @@ namespace LLTrace
|
|||
}
|
||||
}
|
||||
|
||||
return has_value
|
||||
? max_val
|
||||
return has_value
|
||||
? max_val
|
||||
: T::getDefaultValue();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T getPeriodMax(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return T(getPeriodMax(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
|
|
@ -483,7 +459,7 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
T getPeriodMax(const SampleStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return T(getPeriodMax(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
|
|
@ -491,17 +467,14 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
T getPeriodMax(const EventStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return T(getPeriodMax(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
F64Kilobytes getPeriodMax(const StatType<MemAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max());
|
||||
F64Kilobytes getPeriodMax(const MemStatHandle& stat, size_t num_periods = std::numeric_limits<size_t>::max());
|
||||
|
||||
template <typename T>
|
||||
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMaxPerSec(const StatType<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
F64 max_val = std::numeric_limits<F64>::min();
|
||||
|
|
@ -516,7 +489,7 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
typename RelatedTypes<T>::fractional_t getPeriodMaxPerSec(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return typename RelatedTypes<T>::fractional_t(getPeriodMaxPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
|
|
@ -528,7 +501,7 @@ namespace LLTrace
|
|||
template <typename T>
|
||||
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMean(const StatType<T >& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
typename RelatedTypes<typename T::value_t>::fractional_t mean(0);
|
||||
|
|
@ -549,14 +522,14 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
typename RelatedTypes<T>::fractional_t getPeriodMean(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
F64 getPeriodMean(const StatType<SampleAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max());
|
||||
template<typename T>
|
||||
template<typename T>
|
||||
typename RelatedTypes<T>::fractional_t getPeriodMean(const SampleStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
|
|
@ -564,17 +537,14 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
typename RelatedTypes<T>::fractional_t getPeriodMean(const EventStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return typename RelatedTypes<T>::fractional_t(getPeriodMean(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
F64Kilobytes getPeriodMean(const StatType<MemAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max());
|
||||
F64Kilobytes getPeriodMean(const MemStatHandle& stat, size_t num_periods = std::numeric_limits<size_t>::max());
|
||||
|
||||
template <typename T>
|
||||
typename RelatedTypes<typename T::value_t>::fractional_t getPeriodMeanPerSec(const StatType<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
num_periods = llmin(num_periods, getNumRecordedPeriods());
|
||||
|
||||
typename RelatedTypes<typename T::value_t>::fractional_t mean = 0;
|
||||
|
|
@ -596,7 +566,7 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
typename RelatedTypes<T>::fractional_t getPeriodMeanPerSec(const CountStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return typename RelatedTypes<T>::fractional_t(getPeriodMeanPerSec(static_cast<const StatType<CountAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
|
|
@ -635,10 +605,10 @@ namespace LLTrace
|
|||
|
||||
F64 getPeriodStandardDeviation(const StatType<SampleAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max());
|
||||
|
||||
template<typename T>
|
||||
template<typename T>
|
||||
typename RelatedTypes<T>::fractional_t getPeriodStandardDeviation(const SampleStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return typename RelatedTypes<T>::fractional_t(getPeriodStandardDeviation(static_cast<const StatType<SampleAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
|
|
@ -646,13 +616,10 @@ namespace LLTrace
|
|||
template<typename T>
|
||||
typename RelatedTypes<T>::fractional_t getPeriodStandardDeviation(const EventStatHandle<T>& stat, size_t num_periods = std::numeric_limits<size_t>::max())
|
||||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
|
||||
return typename RelatedTypes<T>::fractional_t(getPeriodStandardDeviation(static_cast<const StatType<EventAccumulator>&>(stat), num_periods));
|
||||
}
|
||||
|
||||
F64Kilobytes getPeriodStandardDeviation(const StatType<MemAccumulator>& stat, size_t num_periods = std::numeric_limits<size_t>::max());
|
||||
F64Kilobytes getPeriodStandardDeviation(const MemStatHandle& stat, size_t num_periods = std::numeric_limits<size_t>::max());
|
||||
|
||||
private:
|
||||
// implementation for LLStopWatchControlsMixin
|
||||
/*virtual*/ void handleStart();
|
||||
|
|
@ -731,7 +698,7 @@ namespace LLTrace
|
|||
|
||||
PeriodicRecording& getResults() { return mAcceptedRecording; }
|
||||
const PeriodicRecording& getResults() const {return mAcceptedRecording;}
|
||||
|
||||
|
||||
void nextPeriod() { mPotentialRecording.nextPeriod(); }
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
namespace LLTrace
|
||||
{
|
||||
extern MemStatHandle gTraceMemStat;
|
||||
//extern MemStatHandle gTraceMemStat;
|
||||
|
||||
static ThreadRecorder* sMasterThreadRecorder = NULL;
|
||||
|
||||
|
|
@ -81,9 +81,9 @@ void ThreadRecorder::init()
|
|||
|
||||
BlockTimer::getRootTimeBlock().getCurrentAccumulator().mActiveCount = 1;
|
||||
|
||||
claim_alloc(gTraceMemStat, this);
|
||||
claim_alloc(gTraceMemStat, mRootTimer);
|
||||
claim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes);
|
||||
//claim_alloc(gTraceMemStat, this);
|
||||
//claim_alloc(gTraceMemStat, mRootTimer);
|
||||
//claim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -101,9 +101,9 @@ ThreadRecorder::~ThreadRecorder()
|
|||
#if LL_TRACE_ENABLED
|
||||
LLThreadLocalSingletonPointer<BlockTimerStackRecord>::setInstance(NULL);
|
||||
|
||||
disclaim_alloc(gTraceMemStat, this);
|
||||
disclaim_alloc(gTraceMemStat, sizeof(BlockTimer));
|
||||
disclaim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes);
|
||||
//disclaim_alloc(gTraceMemStat, this);
|
||||
//disclaim_alloc(gTraceMemStat, sizeof(BlockTimer));
|
||||
//disclaim_alloc(gTraceMemStat, sizeof(TimeBlockTreeNode) * mNumTimeBlockTreeNodes);
|
||||
|
||||
deactivate(&mThreadRecordingBuffers);
|
||||
|
||||
|
|
|
|||
|
|
@ -164,8 +164,10 @@ void LLUriParser::extractParts()
|
|||
#if LL_DARWIN
|
||||
typedef void(*sighandler_t)(int);
|
||||
jmp_buf return_to_normalize;
|
||||
static int sLastSignal = 0;
|
||||
void uri_signal_handler(int signal)
|
||||
{
|
||||
sLastSignal = signal;
|
||||
// Apparently signal handler throwing an exception doesn't work.
|
||||
// This is ugly and unsafe due to not unwinding content of uriparser library,
|
||||
// but unless we have a way to catch this as NSexception, jump appears to be the only option.
|
||||
|
|
@ -179,8 +181,10 @@ S32 LLUriParser::normalize()
|
|||
if (!mRes)
|
||||
{
|
||||
#if LL_DARWIN
|
||||
sighandler_t last_handler;
|
||||
last_handler = signal(SIGILL, &uri_signal_handler); // illegal instruction
|
||||
sighandler_t last_sigill_handler, last_sigbus_handler;
|
||||
last_sigill_handler = signal(SIGILL, &uri_signal_handler); // illegal instruction
|
||||
last_sigbus_handler = signal(SIGBUS, &uri_signal_handler);
|
||||
|
||||
if (setjmp(return_to_normalize))
|
||||
{
|
||||
// Issue: external library crashed via signal
|
||||
|
|
@ -194,8 +198,9 @@ S32 LLUriParser::normalize()
|
|||
// if this can be handled by NSexception, it needs to be remade
|
||||
llassert(0);
|
||||
|
||||
LL_WARNS() << "Uriparser crashed with SIGILL, while processing: " << mNormalizedUri << LL_ENDL;
|
||||
signal(SIGILL, last_handler);
|
||||
LL_WARNS() << "Uriparser crashed with " << sLastSignal << " , while processing: " << mNormalizedUri << LL_ENDL;
|
||||
signal(SIGILL, last_sigill_handler);
|
||||
signal(SIGBUS, last_sigbus_handler);
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
|
@ -203,7 +208,8 @@ S32 LLUriParser::normalize()
|
|||
mRes = uriNormalizeSyntaxExA(&mUri, URI_NORMALIZE_SCHEME | URI_NORMALIZE_HOST);
|
||||
|
||||
#if LL_DARWIN
|
||||
signal(SIGILL, last_handler);
|
||||
signal(SIGILL, last_sigill_handler);
|
||||
signal(SIGBUS, last_sigbus_handler);
|
||||
#endif
|
||||
|
||||
if (!mRes)
|
||||
|
|
@ -226,7 +232,7 @@ S32 LLUriParser::normalize()
|
|||
}
|
||||
}
|
||||
|
||||
if(mTmpScheme)
|
||||
if(mTmpScheme && mNormalizedUri.size() > 7)
|
||||
{
|
||||
mNormalizedUri = mNormalizedUri.substr(7);
|
||||
mTmpScheme = false;
|
||||
|
|
|
|||
|
|
@ -1000,7 +1000,7 @@ bool LLUUID::notNull() const
|
|||
bool LLUUID::isNull() const
|
||||
{
|
||||
U32* word = (U32*)mData;
|
||||
// If all bits are zero, return !0 == TRUE
|
||||
// If all bits are zero, return !0 == true
|
||||
return !(word[0] | word[1] | word[2] | word[3]);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -65,8 +65,8 @@ public:
|
|||
|
||||
static LLUUID generateNewID(std::string stream = ""); //static version of above for use in initializer expressions such as constructor params, etc.
|
||||
|
||||
bool set(const char *in_string, bool emit = true); // Convert from string, if emit is FALSE, do not emit warnings
|
||||
bool set(const std::string& in_string, bool emit = true); // Convert from string, if emit is FALSE, do not emit warnings
|
||||
bool set(const char *in_string, bool emit = true); // Convert from string, if emit is false, do not emit warnings
|
||||
bool set(const std::string& in_string, bool emit = true); // Convert from string, if emit is false, do not emit warnings
|
||||
void setNull(); // Faster than setting to LLUUID::null.
|
||||
|
||||
S32 cmpTime(uuid_time_t *t1, uuid_time_t *t2);
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ public:
|
|||
LLWorkerClass(LLWorkerThread* workerthread, const std::string& name);
|
||||
virtual ~LLWorkerClass();
|
||||
|
||||
// pure virtual, called from WORKER THREAD, returns TRUE if done
|
||||
// pure virtual, called from WORKER THREAD, returns true if done
|
||||
virtual bool doWork(S32 param)=0; // Called from WorkRequest::processRequest()
|
||||
// virtual, called from finishRequest() after completed or aborted
|
||||
virtual void finishWork(S32 param, bool completed); // called from finishRequest() (WORK THREAD)
|
||||
|
|
|
|||
|
|
@ -156,18 +156,15 @@ typedef int intptr_t;
|
|||
* type.
|
||||
*/
|
||||
// narrow_holder is a struct that accepts the passed value as its original
|
||||
// type and provides templated conversion functions to other types. Once we're
|
||||
// building with compilers that support Class Template Argument Deduction, we
|
||||
// can rename this class template 'narrow' and eliminate the narrow() factory
|
||||
// function below.
|
||||
// type and provides templated conversion functions to other types.
|
||||
template <typename FROM>
|
||||
class narrow_holder
|
||||
class narrow
|
||||
{
|
||||
private:
|
||||
FROM mValue;
|
||||
|
||||
public:
|
||||
narrow_holder(FROM value): mValue(value) {}
|
||||
narrow(FROM value): mValue(value) {}
|
||||
|
||||
/*---------------------- Narrowing unsigned to signed ----------------------*/
|
||||
template <typename TO,
|
||||
|
|
@ -207,13 +204,4 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
/// narrow() factory function returns a narrow_holder<FROM>(), which can be
|
||||
/// implicitly converted to the target type.
|
||||
template <typename FROM>
|
||||
inline
|
||||
narrow_holder<FROM> narrow(FROM value)
|
||||
{
|
||||
return { value };
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -46,11 +46,12 @@ namespace tut
|
|||
// the real time required for each push() call. Explicitly increment
|
||||
// the timestamp for each one -- but since we're passing explicit
|
||||
// timestamps, make the queue reorder them.
|
||||
queue.push(Queue::TimeTuple(Queue::Clock::now() + 200ms, "ghi"));
|
||||
auto now{ Queue::Clock::now() };
|
||||
queue.push(Queue::TimeTuple(now + 200ms, "ghi"));
|
||||
// Given the various push() overloads, you have to match the type
|
||||
// exactly: conversions are ambiguous.
|
||||
queue.push("abc"s);
|
||||
queue.push(Queue::Clock::now() + 100ms, "def");
|
||||
queue.push(now + 100ms, "def");
|
||||
queue.close();
|
||||
auto entry = queue.pop();
|
||||
ensure_equals("failed to pop first", std::get<0>(entry), "abc"s);
|
||||
|
|
|
|||
|
|
@ -60,12 +60,15 @@ struct sleepy_robin: public boost::fibers::algo::round_robin
|
|||
/*****************************************************************************
|
||||
* ThreadPoolBase
|
||||
*****************************************************************************/
|
||||
LL::ThreadPoolBase::ThreadPoolBase(const std::string& name, size_t threads,
|
||||
WorkQueueBase* queue):
|
||||
LL::ThreadPoolBase::ThreadPoolBase(const std::string& name,
|
||||
size_t threads,
|
||||
WorkQueueBase* queue,
|
||||
bool auto_shutdown):
|
||||
super(name),
|
||||
mName("ThreadPool:" + name),
|
||||
mThreadCount(getConfiguredWidth(name, threads)),
|
||||
mQueue(queue)
|
||||
mQueue(queue),
|
||||
mAutomaticShutdown(auto_shutdown)
|
||||
{}
|
||||
|
||||
void LL::ThreadPoolBase::start()
|
||||
|
|
@ -79,6 +82,14 @@ void LL::ThreadPoolBase::start()
|
|||
run(tname);
|
||||
});
|
||||
}
|
||||
|
||||
if (!mAutomaticShutdown)
|
||||
{
|
||||
// Some threads, like main window's might need to run a bit longer
|
||||
// to wait for a proper shutdown message
|
||||
return;
|
||||
}
|
||||
|
||||
// Listen on "LLApp", and when the app is shutting down, close the queue
|
||||
// and join the workers.
|
||||
LLEventPumps::instance().obtain("LLApp").listen(
|
||||
|
|
@ -113,8 +124,11 @@ void LL::ThreadPoolBase::close()
|
|||
mQueue->close();
|
||||
for (auto& pair: mThreads)
|
||||
{
|
||||
LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL;
|
||||
pair.second.join();
|
||||
if (pair.second.joinable())
|
||||
{
|
||||
LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL;
|
||||
pair.second.join();
|
||||
}
|
||||
}
|
||||
LL_DEBUGS("ThreadPool") << mName << " shutdown complete" << LL_ENDL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ namespace LL
|
|||
* overrides this parameter.
|
||||
*/
|
||||
ThreadPoolBase(const std::string& name, size_t threads,
|
||||
WorkQueueBase* queue);
|
||||
WorkQueueBase* queue, bool auto_shutdown = true);
|
||||
virtual ~ThreadPoolBase();
|
||||
|
||||
/**
|
||||
|
|
@ -55,7 +55,7 @@ namespace LL
|
|||
* ThreadPool listens for application shutdown messages on the "LLApp"
|
||||
* LLEventPump. Call close() to shut down this ThreadPool early.
|
||||
*/
|
||||
void close();
|
||||
virtual void close();
|
||||
|
||||
std::string getName() const { return mName; }
|
||||
size_t getWidth() const { return mThreads.size(); }
|
||||
|
|
@ -87,13 +87,14 @@ namespace LL
|
|||
|
||||
protected:
|
||||
std::unique_ptr<WorkQueueBase> mQueue;
|
||||
std::vector<std::pair<std::string, std::thread>> mThreads;
|
||||
bool mAutomaticShutdown;
|
||||
|
||||
private:
|
||||
void run(const std::string& name);
|
||||
|
||||
std::string mName;
|
||||
size_t mThreadCount;
|
||||
std::vector<std::pair<std::string, std::thread>> mThreads;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -117,8 +118,11 @@ namespace LL
|
|||
* Constraining the queue can cause a submitter to block. Do not
|
||||
* constrain any ThreadPool accepting work from the main thread.
|
||||
*/
|
||||
ThreadPoolUsing(const std::string& name, size_t threads=1, size_t capacity=1024*1024):
|
||||
ThreadPoolBase(name, threads, new queue_t(name, capacity))
|
||||
ThreadPoolUsing(const std::string& name,
|
||||
size_t threads=1,
|
||||
size_t capacity=1024*1024,
|
||||
bool auto_shutdown = true):
|
||||
ThreadPoolBase(name, threads, new queue_t(name, capacity), auto_shutdown)
|
||||
{}
|
||||
~ThreadPoolUsing() override {}
|
||||
|
||||
|
|
|
|||
|
|
@ -288,7 +288,7 @@ int BufferArray::findBlock(size_t pos, size_t * ret_offset)
|
|||
if (pos >= mLen)
|
||||
return -1; // Doesn't exist
|
||||
|
||||
const int block_limit(narrow(mBlocks.size()));
|
||||
const int block_limit(narrow<size_t>(mBlocks.size()));
|
||||
for (int i(0); i < block_limit; ++i)
|
||||
{
|
||||
if (pos < mBlocks[i]->mUsed)
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ bool LLImageDimensionsInfo::load(const std::string& src_filename,U32 codec)
|
|||
|
||||
if (file_size == 0)
|
||||
{
|
||||
mWarning = "texture_load_empty_file";
|
||||
setLastError("File is empty",src_filename);
|
||||
return false;
|
||||
}
|
||||
|
|
@ -90,6 +91,7 @@ bool LLImageDimensionsInfo::getImageDimensionsBmp()
|
|||
if (signature[0] != 'B' || signature[1] != 'M')
|
||||
{
|
||||
LL_WARNS() << "Not a BMP" << LL_ENDL;
|
||||
mWarning = "texture_load_format_error";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -140,6 +142,7 @@ bool LLImageDimensionsInfo::getImageDimensionsPng()
|
|||
if (memcmp(signature, png_magic, PNG_MAGIC_SIZE) != 0)
|
||||
{
|
||||
LL_WARNS() << "Not a PNG" << LL_ENDL;
|
||||
mWarning = "texture_load_format_error";
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -183,6 +186,7 @@ bool LLImageDimensionsInfo::getImageDimensionsJpeg()
|
|||
if (memcmp(signature, jpeg_magic, JPEG_MAGIC_SIZE) != 0)
|
||||
{
|
||||
LL_WARNS() << "Not a JPEG" << LL_ENDL;
|
||||
mWarning = "texture_load_format_error";
|
||||
return false;
|
||||
}
|
||||
fseek(fp, 0, SEEK_SET); // go back to start of the file
|
||||
|
|
|
|||
|
|
@ -55,6 +55,12 @@ public:
|
|||
{
|
||||
return mLastError;
|
||||
}
|
||||
|
||||
const std::string& getWarningName()
|
||||
{
|
||||
return mWarning;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
void clean()
|
||||
|
|
@ -129,6 +135,7 @@ protected:
|
|||
std::string mSrcFilename;
|
||||
|
||||
std::string mLastError;
|
||||
std::string mWarning;
|
||||
|
||||
U8* mData;
|
||||
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ bool LLImageJ2C::decodeChannels(LLImageRaw *raw_imagep, F32 decode_time, S32 fir
|
|||
{
|
||||
if (mDecoding)
|
||||
{
|
||||
LL_WARNS() << "decodeImpl failed but mDecoding is TRUE" << LL_ENDL;
|
||||
LL_WARNS() << "decodeImpl failed but mDecoding is true" << LL_ENDL;
|
||||
mDecoding = false;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,8 +35,10 @@ class ImageRequest
|
|||
{
|
||||
public:
|
||||
ImageRequest(const LLPointer<LLImageFormatted>& image,
|
||||
S32 discard, bool needs_aux,
|
||||
const LLPointer<LLImageDecodeThread::Responder>& responder);
|
||||
S32 discard,
|
||||
bool needs_aux,
|
||||
const LLPointer<LLImageDecodeThread::Responder>& responder,
|
||||
U32 request_id);
|
||||
virtual ~ImageRequest();
|
||||
|
||||
/*virtual*/ bool processRequest();
|
||||
|
|
@ -48,6 +50,7 @@ private:
|
|||
// input
|
||||
LLPointer<LLImageFormatted> mFormattedImage;
|
||||
S32 mDiscardLevel;
|
||||
U32 mRequestId;
|
||||
bool mNeedsAux;
|
||||
// output
|
||||
LLPointer<LLImageRaw> mDecodedImageRaw;
|
||||
|
|
@ -62,6 +65,7 @@ private:
|
|||
|
||||
// MAIN THREAD
|
||||
LLImageDecodeThread::LLImageDecodeThread(bool /*threaded*/)
|
||||
: mDecodeCount(0)
|
||||
{
|
||||
mThreadPool.reset(new LL::ThreadPool("ImageDecode", 8));
|
||||
mThreadPool->start();
|
||||
|
|
@ -92,9 +96,10 @@ LLImageDecodeThread::handle_t LLImageDecodeThread::decodeImage(
|
|||
{
|
||||
LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE;
|
||||
|
||||
U32 decode_id = ++mDecodeCount;
|
||||
// Instantiate the ImageRequest right in the lambda, why not?
|
||||
bool posted = mThreadPool->getQueue().post(
|
||||
[req = ImageRequest(image, discard, needs_aux, responder)]
|
||||
[req = ImageRequest(image, discard, needs_aux, responder, decode_id)]
|
||||
() mutable
|
||||
{
|
||||
auto done = req.processRequest();
|
||||
|
|
@ -103,13 +108,10 @@ LLImageDecodeThread::handle_t LLImageDecodeThread::decodeImage(
|
|||
if (! posted)
|
||||
{
|
||||
LL_DEBUGS() << "Tried to start decoding on shutdown" << LL_ENDL;
|
||||
// should this return 0?
|
||||
return 0;
|
||||
}
|
||||
|
||||
// It's important to our consumer (LLTextureFetchWorker) that we return a
|
||||
// nonzero handle. It is NOT important that the nonzero handle be unique:
|
||||
// nothing is ever done with it except to compare it to zero, or zero it.
|
||||
return 17;
|
||||
return decode_id;
|
||||
}
|
||||
|
||||
void LLImageDecodeThread::shutdown()
|
||||
|
|
@ -123,15 +125,18 @@ LLImageDecodeThread::Responder::~Responder()
|
|||
|
||||
//----------------------------------------------------------------------------
|
||||
|
||||
ImageRequest::ImageRequest(const LLPointer<LLImageFormatted>& image,
|
||||
S32 discard, bool needs_aux,
|
||||
const LLPointer<LLImageDecodeThread::Responder>& responder)
|
||||
ImageRequest::ImageRequest(const LLPointer<LLImageFormatted>& image,
|
||||
S32 discard,
|
||||
bool needs_aux,
|
||||
const LLPointer<LLImageDecodeThread::Responder>& responder,
|
||||
U32 request_id)
|
||||
: mFormattedImage(image),
|
||||
mDiscardLevel(discard),
|
||||
mNeedsAux(needs_aux),
|
||||
mDecodedRaw(false),
|
||||
mDecodedAux(false),
|
||||
mResponder(responder)
|
||||
mResponder(responder),
|
||||
mRequestId(request_id)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
@ -208,7 +213,7 @@ void ImageRequest::finishRequest(bool completed)
|
|||
if (mResponder.notNull())
|
||||
{
|
||||
bool success = completed && mDecodedRaw && (!mNeedsAux || mDecodedAux);
|
||||
mResponder->completed(success, mDecodedImageRaw, mDecodedImageAux);
|
||||
mResponder->completed(success, mDecodedImageRaw, mDecodedImageAux, mRequestId);
|
||||
}
|
||||
// Will automatically be deleted
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ public:
|
|||
protected:
|
||||
virtual ~Responder();
|
||||
public:
|
||||
virtual void completed(bool success, LLImageRaw* raw, LLImageRaw* aux) = 0;
|
||||
virtual void completed(bool success, LLImageRaw* raw, LLImageRaw* aux, U32 request_id) = 0;
|
||||
};
|
||||
|
||||
public:
|
||||
|
|
@ -53,6 +53,7 @@ public:
|
|||
const LLPointer<Responder>& responder);
|
||||
size_t getPending();
|
||||
size_t update(F32 max_time_ms);
|
||||
S32 getTotalDecodeCount() { return mDecodeCount; }
|
||||
void shutdown();
|
||||
|
||||
private:
|
||||
|
|
@ -60,6 +61,7 @@ private:
|
|||
// LLQueuedThread - instead this is the API by which we submit work to the
|
||||
// "ImageDecode" ThreadPool.
|
||||
std::unique_ptr<LL::ThreadPool> mThreadPool;
|
||||
LLAtomicU32 mDecodeCount;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ namespace tut
|
|||
done = res;
|
||||
*done = false;
|
||||
}
|
||||
virtual void completed(bool success, LLImageRaw* raw, LLImageRaw* aux)
|
||||
virtual void completed(bool success, LLImageRaw* raw, LLImageRaw* aux, U32 request_id)
|
||||
{
|
||||
*done = true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -190,7 +190,7 @@ public:
|
|||
// Assumes you have already called nextBlock().
|
||||
virtual void packMessage(LLMessageSystem* msg) const;
|
||||
|
||||
// Returns TRUE if the inventory item came through the network correctly.
|
||||
// Returns true if the inventory item came through the network correctly.
|
||||
// Uses a simple crc check which is defeatable, but we want to detect
|
||||
// network mangling somehow.
|
||||
virtual bool unpackMessage(LLMessageSystem* msg, const char* block, S32 block_num = 0);
|
||||
|
|
|
|||
|
|
@ -343,7 +343,7 @@ public:
|
|||
// time earlier than "now".
|
||||
void expirePasses(S32 now);
|
||||
|
||||
// Add to list, suppressing duplicates. Returns TRUE if added.
|
||||
// Add to list, suppressing duplicates. Returns true if added.
|
||||
bool addToAccessList(const LLUUID& agent_id, S32 time);
|
||||
bool addToBanList(const LLUUID& agent_id, S32 time);
|
||||
bool removeFromAccessList(const LLUUID& agent_id);
|
||||
|
|
@ -419,7 +419,7 @@ public:
|
|||
|
||||
// dealing with sales and parcel conversion.
|
||||
//
|
||||
// the isSaleTimerExpired will trivially return FALSE if there is
|
||||
// the isSaleTimerExpired will trivially return false if there is
|
||||
// no sale going on. Pass in the current time in usec which will
|
||||
// be used for comparison.
|
||||
bool isSaleTimerExpired(const U64& time);
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ public:
|
|||
// return TRUE if group_id is owner.
|
||||
bool isGroupOwned() const { return mIsGroupOwned; }
|
||||
|
||||
// This API returns TRUE if the object is owned at all, and FALSE
|
||||
// This API returns true if the object is owned at all, and false
|
||||
// otherwise. If it is owned at all, owner id is filled with
|
||||
// either the owner id or the group id, and the is_group_owned
|
||||
// parameter is appropriately filled. The values of owner_id and
|
||||
|
|
@ -237,13 +237,13 @@ public:
|
|||
// checked manipulators (since that is how it is used.) If the
|
||||
// agent is the system or (group == mGroup and group modify and
|
||||
// owner transfer) then this function will deed the permissions,
|
||||
// set the next owner mask, and return TRUE. Otherwise, no change
|
||||
// is effected, and the function returns FALSE.
|
||||
// set the next owner mask, and return true. Otherwise, no change
|
||||
// is effected, and the function returns false.
|
||||
bool deedToGroup(const LLUUID& agent, const LLUUID& group);
|
||||
// Attempt to set or clear the given bitmask. Returns TRUE if you
|
||||
// Attempt to set or clear the given bitmask. Returns true if you
|
||||
// are allowed to modify the permissions. If you attempt to turn
|
||||
// on bits not allowed by the base bits, the function will return
|
||||
// TRUE, but those bits will not be set.
|
||||
// true, but those bits will not be set.
|
||||
bool setBaseBits( const LLUUID& agent, bool set, PermissionMask bits);
|
||||
bool setOwnerBits( const LLUUID& agent, bool set, PermissionMask bits);
|
||||
bool setGroupBits( const LLUUID& agent, const LLUUID& group, bool set, PermissionMask bits);
|
||||
|
|
@ -293,10 +293,10 @@ public:
|
|||
// one provided, and sets the base mask as indicated.
|
||||
//bool setOwner(const LLUUID& agent, const LLUUID& owner, U32 new_base_mask);
|
||||
|
||||
// Attempt to set or clear the given bitmask. Returns TRUE if you
|
||||
// Attempt to set or clear the given bitmask. Returns true if you
|
||||
// are allowed to modify the permissions. If you attempt to turn
|
||||
// on bits not allowed by the base bits, the function will return
|
||||
// TRUE, but those bits will not be set.
|
||||
// true, but those bits will not be set.
|
||||
//bool setGroupBits( const LLUUID& agent, bool set, PermissionMask bits);
|
||||
//bool setEveryoneBits(const LLUUID& agent, bool set, PermissionMask bits);
|
||||
|
||||
|
|
|
|||
|
|
@ -137,7 +137,7 @@ public:
|
|||
}
|
||||
|
||||
// Note: Does NOT follow GL_QUAD conventions: the top and right edges ARE considered part of the rect
|
||||
// returns TRUE if any part of rect is is inside this LLRect
|
||||
// returns true if any part of rect is is inside this LLRect
|
||||
bool overlaps(const LLRectBase& rect) const
|
||||
{
|
||||
return !(mLeft > rect.mRight
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ void calc_tangent_from_triangle(
|
|||
|
||||
|
||||
// intersect test between triangle vert0, vert1, vert2 and a ray from orig in direction dir.
|
||||
// returns TRUE if intersecting and returns barycentric coordinates in intersection_a, intersection_b,
|
||||
// returns true if intersecting and returns barycentric coordinates in intersection_a, intersection_b,
|
||||
// and returns the intersection point along dir in intersection_t.
|
||||
|
||||
// Moller-Trumbore algorithm
|
||||
|
|
@ -4492,7 +4492,7 @@ void LLVolumeParams::reduceT(F32 begin, F32 end)
|
|||
const F32 MIN_CONCAVE_PROFILE_WEDGE = 0.125f; // 1/8 unity
|
||||
const F32 MIN_CONCAVE_PATH_WEDGE = 0.111111f; // 1/9 unity
|
||||
|
||||
// returns TRUE if the shape can be approximated with a convex shape
|
||||
// returns true if the shape can be approximated with a convex shape
|
||||
// for collison purposes
|
||||
bool LLVolumeParams::isConvex() const
|
||||
{
|
||||
|
|
@ -4656,7 +4656,7 @@ bool LLVolume::isFaceMaskValid(LLFaceID face_mask)
|
|||
|
||||
bool LLVolume::isConvex() const
|
||||
{
|
||||
// mParams.isConvex() may return FALSE even though the final
|
||||
// mParams.isConvex() may return false even though the final
|
||||
// geometry is actually convex due to LOD approximations.
|
||||
// TODO -- provide LLPath and LLProfile with isConvex() methods
|
||||
// that correctly determine convexity. -- Leviathan
|
||||
|
|
@ -5565,9 +5565,9 @@ bool LLVolumeFace::cacheOptimize(bool gen_tangents)
|
|||
|
||||
U32 stream_count = data.w.empty() ? 4 : 5;
|
||||
|
||||
U32 vert_count = meshopt_generateVertexRemapMulti(&remap[0], nullptr, data.p.size(), data.p.size(), mos, stream_count);
|
||||
size_t vert_count = meshopt_generateVertexRemapMulti(&remap[0], nullptr, data.p.size(), data.p.size(), mos, stream_count);
|
||||
|
||||
if (vert_count < 65535)
|
||||
if (vert_count < 65535 && vert_count != 0)
|
||||
{
|
||||
std::vector<U32> indices;
|
||||
indices.resize(mNumIndices);
|
||||
|
|
@ -5586,6 +5586,13 @@ bool LLVolumeFace::cacheOptimize(bool gen_tangents)
|
|||
{
|
||||
U32 src_idx = i;
|
||||
U32 dst_idx = remap[i];
|
||||
if (dst_idx >= mNumVertices)
|
||||
{
|
||||
dst_idx = mNumVertices - 1;
|
||||
// Shouldn't happen, figure out what gets returned in remap and why.
|
||||
llassert(false);
|
||||
LL_DEBUGS_ONCE("LLVOLUME") << "Invalid destination index, substituting" << LL_ENDL;
|
||||
}
|
||||
mIndices[i] = dst_idx;
|
||||
|
||||
mPositions[dst_idx].load3(data.p[src_idx].mV);
|
||||
|
|
@ -5619,6 +5626,10 @@ bool LLVolumeFace::cacheOptimize(bool gen_tangents)
|
|||
}
|
||||
else
|
||||
{
|
||||
if (vert_count == 0)
|
||||
{
|
||||
LL_WARNS_ONCE("LLVOLUME") << "meshopt_generateVertexRemapMulti failed to process a model or model was invalid" << LL_ENDL;
|
||||
}
|
||||
// blew past the max vertex size limit, use legacy tangent generation which never adds verts
|
||||
createTangents();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -298,7 +298,7 @@ bool ray_cylinder(const LLVector3 &ray_point, const LLVector3 &ray_direction,
|
|||
out = dist_to_closest_point + half_chord_length; // dist to exiting point
|
||||
if (out < 0.0f)
|
||||
{
|
||||
// cylinder is behind the ray, so we return FALSE
|
||||
// cylinder is behind the ray, so we return false
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -38,8 +38,8 @@ class LLQuaternion;
|
|||
// Vector arguments of the form "shape_scale" represent the scale of the
|
||||
// object along the three axes.
|
||||
//
|
||||
// All functions return the expected TRUE or FALSE, unless otherwise noted.
|
||||
// When FALSE is returned, any resulting values that might have been stored
|
||||
// All functions return the expected true or false, unless otherwise noted.
|
||||
// When false is returned, any resulting values that might have been stored
|
||||
// are undefined.
|
||||
//
|
||||
// Rays are defined by a "ray_point" and a "ray_direction" (unit).
|
||||
|
|
|
|||
|
|
@ -454,7 +454,7 @@ namespace tut
|
|||
template<> template<>
|
||||
void line_object::test<1>()
|
||||
{
|
||||
// this is a test for LLLine::intersects(point) which returns TRUE
|
||||
// this is a test for LLLine::intersects(point) which returns true
|
||||
// if the line passes within some tolerance of point
|
||||
|
||||
// these tests will have some floating point error,
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ namespace tut
|
|||
F32 x =-2.0f, y = -3.0f, z = 1.23f ;
|
||||
LLVector3 vec3(x,y,z);
|
||||
ensure("1:abs():Fail ", (true == vec3.abs()));
|
||||
ensure("2:isNull():Fail", (false == vec3.isNull())); //Returns TRUE if vector has a _very_small_ length
|
||||
ensure("2:isNull():Fail", (false == vec3.isNull())); //Returns true if vector has a _very_small_ length
|
||||
x =.00000001f, y = .000001001f, z = .000001001f;
|
||||
vec3.setVec(x,y,z);
|
||||
ensure("3:isNull(): Fail ", (true == vec3.isNull()));
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ LLVector2 LLVector2::zero(0,0);
|
|||
// Non-member functions
|
||||
|
||||
// Sets all values to absolute value of their original values
|
||||
// Returns TRUE if data changed
|
||||
// Returns true if data changed
|
||||
bool LLVector2::abs()
|
||||
{
|
||||
bool ret{ false };
|
||||
|
|
|
|||
|
|
@ -78,11 +78,11 @@ class LLVector2
|
|||
F32 magVecSquared() const; // deprecated
|
||||
F32 normVec(); // deprecated
|
||||
|
||||
bool abs(); // sets all values to absolute value of original value (first octant), returns TRUE if changed
|
||||
bool abs(); // sets all values to absolute value of original value (first octant), returns true if changed
|
||||
|
||||
const LLVector2& scaleVec(const LLVector2& vec); // scales per component by vec
|
||||
|
||||
bool isNull(); // Returns TRUE if vector has a _very_small_ length
|
||||
bool isNull(); // Returns true if vector has a _very_small_ length
|
||||
bool isExactlyZero() const { return !mV[VX] && !mV[VY]; }
|
||||
|
||||
F32 operator[](int idx) const { return mV[idx]; }
|
||||
|
|
@ -114,7 +114,7 @@ class LLVector2
|
|||
// Non-member functions
|
||||
|
||||
F32 angle_between(const LLVector2 &a, const LLVector2 &b); // Returns angle (radians) between a and b
|
||||
bool are_parallel(const LLVector2 &a, const LLVector2 &b, F32 epsilon=F_APPROXIMATELY_ZERO); // Returns TRUE if a and b are very close to parallel
|
||||
bool are_parallel(const LLVector2 &a, const LLVector2 &b, F32 epsilon=F_APPROXIMATELY_ZERO); // Returns true if a and b are very close to parallel
|
||||
F32 dist_vec(const LLVector2 &a, const LLVector2 &b); // Returns distance between a and b
|
||||
F32 dist_vec_squared(const LLVector2 &a, const LLVector2 &b);// Returns distance squared between a and b
|
||||
F32 dist_vec_squared2D(const LLVector2 &a, const LLVector2 &b);// Returns distance squared between a and b ignoring Z component
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ const LLVector3d LLVector3d::z_axis_neg(0, 0, -1);
|
|||
|
||||
|
||||
// Clamps each values to range (min,max).
|
||||
// Returns TRUE if data changed.
|
||||
// Returns true if data changed.
|
||||
bool LLVector3d::clamp(F64 min, F64 max)
|
||||
{
|
||||
bool ret{ false };
|
||||
|
|
@ -69,7 +69,7 @@ bool LLVector3d::clamp(F64 min, F64 max)
|
|||
}
|
||||
|
||||
// Sets all values to absolute value of their original values
|
||||
// Returns TRUE if data changed
|
||||
// Returns true if data changed
|
||||
bool LLVector3d::abs()
|
||||
{
|
||||
bool ret{ false };
|
||||
|
|
|
|||
|
|
@ -69,8 +69,8 @@ class LLVector3d
|
|||
}
|
||||
|
||||
inline bool isFinite() const; // checks to see if all values of LLVector3d are finite
|
||||
bool clamp(const F64 min, const F64 max); // Clamps all values to (min,max), returns TRUE if data changed
|
||||
bool abs(); // sets all values to absolute value of original value (first octant), returns TRUE if changed
|
||||
bool clamp(const F64 min, const F64 max); // Clamps all values to (min,max), returns true if data changed
|
||||
bool abs(); // sets all values to absolute value of original value (first octant), returns true if changed
|
||||
|
||||
inline const LLVector3d& clear(); // Clears LLVector3d to (0, 0, 0, 1)
|
||||
inline const LLVector3d& clearVec(); // deprecated
|
||||
|
|
@ -98,7 +98,7 @@ class LLVector3d
|
|||
const LLVector3d& rotVec(const LLMatrix3 &mat); // Rotates by LLMatrix4 mat
|
||||
const LLVector3d& rotVec(const LLQuaternion &q); // Rotates by LLQuaternion q
|
||||
|
||||
bool isNull() const; // Returns TRUE if vector has a _very_small_ length
|
||||
bool isNull() const; // Returns true if vector has a _very_small_ length
|
||||
bool isExactlyZero() const { return !mdV[VX] && !mdV[VY] && !mdV[VZ]; }
|
||||
|
||||
const LLVector3d& operator=(const LLVector4 &a);
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ const LLVector3 LLVector3::all_one(1.f,1.f,1.f);
|
|||
|
||||
|
||||
// Clamps each values to range (min,max).
|
||||
// Returns TRUE if data changed.
|
||||
// Returns true if data changed.
|
||||
bool LLVector3::clamp(F32 min, F32 max)
|
||||
{
|
||||
bool ret{ false };
|
||||
|
|
@ -70,7 +70,7 @@ bool LLVector3::clamp(F32 min, F32 max)
|
|||
}
|
||||
|
||||
// Clamps length to an upper limit.
|
||||
// Returns TRUE if the data changed
|
||||
// Returns true if the data changed
|
||||
bool LLVector3::clampLength( F32 length_limit )
|
||||
{
|
||||
bool changed{ false };
|
||||
|
|
@ -151,7 +151,7 @@ bool LLVector3::clamp(const LLVector3 &min_vec, const LLVector3 &max_vec)
|
|||
|
||||
|
||||
// Sets all values to absolute value of their original values
|
||||
// Returns TRUE if data changed
|
||||
// Returns true if data changed
|
||||
bool LLVector3::abs()
|
||||
{
|
||||
bool ret{ false };
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ class LLVector3
|
|||
void setValue(const LLSD& sd);
|
||||
|
||||
inline bool isFinite() const; // checks to see if all values of LLVector3 are finite
|
||||
bool clamp(F32 min, F32 max); // Clamps all values to (min,max), returns TRUE if data changed
|
||||
bool clamp(F32 min, F32 max); // Clamps all values to (min,max), returns true if data changed
|
||||
bool clamp(const LLVector3 &min_vec, const LLVector3 &max_vec); // Scales vector by another vector
|
||||
bool clampLength( F32 length_limit ); // Scales vector to limit length to a value
|
||||
|
||||
|
|
@ -80,7 +80,7 @@ class LLVector3
|
|||
void quantize8(F32 lowerxy, F32 upperxy, F32 lowerz, F32 upperz); // changes the vector to reflect quatization
|
||||
void snap(S32 sig_digits); // snaps x,y,z to sig_digits decimal places
|
||||
|
||||
bool abs(); // sets all values to absolute value of original value (first octant), returns TRUE if changed
|
||||
bool abs(); // sets all values to absolute value of original value (first octant), returns true if changed
|
||||
|
||||
inline void clear(); // Clears LLVector3 to (0, 0, 0)
|
||||
inline void setZero(); // Clears LLVector3 to (0, 0, 0)
|
||||
|
|
@ -119,7 +119,7 @@ class LLVector3
|
|||
const LLVector3& scaleVec(const LLVector3& vec); // scales per component by vec
|
||||
LLVector3 scaledVec(const LLVector3& vec) const; // get a copy of this vector scaled by vec
|
||||
|
||||
bool isNull() const; // Returns TRUE if vector has a _very_small_ length
|
||||
bool isNull() const; // Returns true if vector has a _very_small_ length
|
||||
bool isExactlyZero() const { return !mV[VX] && !mV[VY] && !mV[VZ]; }
|
||||
|
||||
F32 operator[](int idx) const { return mV[idx]; }
|
||||
|
|
@ -157,7 +157,7 @@ typedef LLVector3 LLSimLocalVec;
|
|||
// Non-member functions
|
||||
|
||||
F32 angle_between(const LLVector3 &a, const LLVector3 &b); // Returns angle (radians) between a and b
|
||||
bool are_parallel(const LLVector3 &a, const LLVector3 &b, F32 epsilon=F_APPROXIMATELY_ZERO); // Returns TRUE if a and b are very close to parallel
|
||||
bool are_parallel(const LLVector3 &a, const LLVector3 &b, F32 epsilon=F_APPROXIMATELY_ZERO); // Returns true if a and b are very close to parallel
|
||||
F32 dist_vec(const LLVector3 &a, const LLVector3 &b); // Returns distance between a and b
|
||||
F32 dist_vec_squared(const LLVector3 &a, const LLVector3 &b);// Returns distance squared between a and b
|
||||
F32 dist_vec_squared2D(const LLVector3 &a, const LLVector3 &b);// Returns distance squared between a and b ignoring Z component
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ const LLVector4& LLVector4::scaleVec(const LLVector4& vec)
|
|||
}
|
||||
|
||||
// Sets all values to absolute value of their original values
|
||||
// Returns TRUE if data changed
|
||||
// Returns true if data changed
|
||||
bool LLVector4::abs()
|
||||
{
|
||||
bool ret{ false };
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ class LLVector4
|
|||
F32 normVec(); // deprecated
|
||||
|
||||
// Sets all values to absolute value of their original values
|
||||
// Returns TRUE if data changed
|
||||
// Returns true if data changed
|
||||
bool abs();
|
||||
|
||||
bool isExactlyClear() const { return (mV[VW] == 1.0f) && !mV[VX] && !mV[VY] && !mV[VZ]; }
|
||||
|
|
@ -137,7 +137,7 @@ class LLVector4
|
|||
|
||||
// Non-member functions
|
||||
F32 angle_between(const LLVector4 &a, const LLVector4 &b); // Returns angle (radians) between a and b
|
||||
bool are_parallel(const LLVector4 &a, const LLVector4 &b, F32 epsilon = F_APPROXIMATELY_ZERO); // Returns TRUE if a and b are very close to parallel
|
||||
bool are_parallel(const LLVector4 &a, const LLVector4 &b, F32 epsilon = F_APPROXIMATELY_ZERO); // Returns true if a and b are very close to parallel
|
||||
F32 dist_vec(const LLVector4 &a, const LLVector4 &b); // Returns distance between a and b
|
||||
F32 dist_vec_squared(const LLVector4 &a, const LLVector4 &b); // Returns distance squared between a and b
|
||||
LLVector3 vec4to3(const LLVector4 &vec);
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ public:
|
|||
|
||||
// If available, copies name ("bobsmith123" or "James Linden") into string
|
||||
// If not available, copies the string "waiting".
|
||||
// Returns TRUE iff available.
|
||||
// Returns true if available.
|
||||
bool getFullName(const LLUUID& id, std::string& full_name);
|
||||
|
||||
// Reverse lookup of UUID from name
|
||||
|
|
@ -99,7 +99,7 @@ public:
|
|||
// If available, this method copies the group name into the string
|
||||
// provided. The caller must allocate at least
|
||||
// DB_GROUP_NAME_BUF_SIZE characters. If not available, this
|
||||
// method copies the string "waiting". Returns TRUE iff available.
|
||||
// method copies the string "waiting". Returns true if available.
|
||||
bool getGroupName(const LLUUID& id, std::string& group);
|
||||
|
||||
// Call the callback with the group or avatar name.
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ class LLCipher
|
|||
public:
|
||||
virtual ~LLCipher() {}
|
||||
|
||||
// encrypt src and place result into dst. returns TRUE if
|
||||
// encrypt src and place result into dst. returns true if
|
||||
// Returns number of bytes written into dst, or 0 on error.
|
||||
virtual U32 encrypt(const U8* src, U32 src_len, U8* dst, U32 dst_len) = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ void disconnect_smtp()
|
|||
}
|
||||
}
|
||||
|
||||
// Returns TRUE on success.
|
||||
// Returns true on success.
|
||||
// message should NOT be SMTP escaped.
|
||||
// static
|
||||
bool LLMail::send(
|
||||
|
|
|
|||
|
|
@ -262,7 +262,7 @@ bool LLTemplateMessageBuilder::removeLastBlock()
|
|||
|
||||
if (num_blocks <= 1)
|
||||
{
|
||||
// we just blew away the last one, so return FALSE
|
||||
// we just blew away the last one, so return false
|
||||
LL_WARNS() << "not blowing away the only block of message "
|
||||
<< mCurrentSMessageName
|
||||
<< ". Block: " << block_name
|
||||
|
|
|
|||
|
|
@ -41,8 +41,8 @@ public:
|
|||
~LLThrottle() { }
|
||||
|
||||
void setRate(const F32 rate);
|
||||
bool checkOverflow(const F32 amount); // I'm about to add an amount, TRUE if would overflow throttle
|
||||
bool throttleOverflow(const F32 amount); // I just sent amount, TRUE if that overflowed the throttle
|
||||
bool checkOverflow(const F32 amount); // I'm about to add an amount, true if would overflow throttle
|
||||
bool throttleOverflow(const F32 amount); // I just sent amount, true if that overflowed the throttle
|
||||
|
||||
F32 getAvailable(); // Return the available bits
|
||||
F32 getRate() const { return mRate; }
|
||||
|
|
@ -73,10 +73,10 @@ public:
|
|||
~LLThrottleGroup() { }
|
||||
|
||||
void resetDynamicAdjust();
|
||||
bool checkOverflow(S32 throttle_cat, F32 bits); // I'm about to send bits, TRUE if would overflow channel
|
||||
bool throttleOverflow(S32 throttle_cat, F32 bits); // I just sent bits, TRUE if that overflowed the channel
|
||||
bool dynamicAdjust(); // Shift bandwidth from idle channels to busy channels, TRUE if adjustment occurred
|
||||
bool setNominalBPS(F32* throttle_vec); // TRUE if any value was different, resets adjustment system if was different
|
||||
bool checkOverflow(S32 throttle_cat, F32 bits); // I'm about to send bits, true if would overflow channel
|
||||
bool throttleOverflow(S32 throttle_cat, F32 bits); // I just sent bits, true if that overflowed the channel
|
||||
bool dynamicAdjust(); // Shift bandwidth from idle channels to busy channels, true if adjustment occurred
|
||||
bool setNominalBPS(F32* throttle_vec); // true if any value was different, resets adjustment system if was different
|
||||
|
||||
S32 getAvailable(S32 throttle_cat); // Return bits available in the channel
|
||||
|
||||
|
|
|
|||
|
|
@ -386,12 +386,3 @@ std::ostream& operator<< (std::ostream& os, LLXfer &hh)
|
|||
os << hh.getFileName() ;
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ public:
|
|||
// The control port is the listen port of the parent process that
|
||||
// launched this machine. 0 means none or not known.
|
||||
const S32 &getControlPort() const { return mControlPort; }
|
||||
bool isValid() const { return (mHost.getPort() != 0); } // TRUE if corresponds to functioning machine
|
||||
bool isValid() const { return (mHost.getPort() != 0); } // true if corresponds to functioning machine
|
||||
|
||||
// set functions
|
||||
void setMachineType(EMachineType machine_type) { mMachineType = machine_type; }
|
||||
|
|
|
|||
|
|
@ -481,7 +481,7 @@ LLCircuitData* LLMessageSystem::findCircuit(const LLHost& host,
|
|||
return cdp;
|
||||
}
|
||||
|
||||
// Returns TRUE if a valid, on-circuit message has been received.
|
||||
// Returns true if a valid, on-circuit message has been received.
|
||||
// Requiring a non-const LockMessageChecker reference ensures that
|
||||
// mMessageReader has been set to mTemplateMessageReader.
|
||||
bool LLMessageSystem::checkMessages(LockMessageChecker&, S64 frame_count )
|
||||
|
|
@ -959,7 +959,7 @@ bool LLMessageSystem::isSendFullFast(const char* blockname)
|
|||
}
|
||||
|
||||
|
||||
// blow away the last block of a message, return FALSE if that leaves no blocks or there wasn't a block to remove
|
||||
// blow away the last block of a message, return false if that leaves no blocks or there wasn't a block to remove
|
||||
// TODO: Babbage: Remove this horror.
|
||||
bool LLMessageSystem::removeLastBlock()
|
||||
{
|
||||
|
|
@ -1513,8 +1513,8 @@ bool LLMessageSystem::getCircuitTrust(const LLHost &host)
|
|||
return false;
|
||||
}
|
||||
|
||||
// Activate a circuit, and set its trust level (TRUE if trusted,
|
||||
// FALSE if not).
|
||||
// Activate a circuit, and set its trust level (true if trusted,
|
||||
// false if not).
|
||||
void LLMessageSystem::enableCircuit(const LLHost &host, bool trusted)
|
||||
{
|
||||
LLCircuitData *cdp = mCircuitInfo.findCircuit(host);
|
||||
|
|
|
|||
|
|
@ -295,7 +295,7 @@ class LLMessageSystem : public LLMessageSenderInterface
|
|||
LLPacketRing mPacketRing;
|
||||
LLReliablePacketParams mReliablePacketParams;
|
||||
|
||||
// Set this flag to TRUE when you want *very* verbose logs.
|
||||
// Set this flag to true when you want *very* verbose logs.
|
||||
bool mVerboseLog;
|
||||
|
||||
F32 mMessageFileVersionNumber;
|
||||
|
|
@ -388,8 +388,8 @@ public:
|
|||
|
||||
// Set a callback function for a message system exception.
|
||||
void setExceptionFunc(EMessageException exception, msg_exception_callback func, void* data = NULL);
|
||||
// Call the specified exception func, and return TRUE if a
|
||||
// function was found and called. Otherwise return FALSE.
|
||||
// Call the specified exception func, and return true if a
|
||||
// function was found and called. Otherwise return false.
|
||||
bool callExceptionFunc(EMessageException exception);
|
||||
|
||||
// Set a function that will be called once per packet processed with the
|
||||
|
|
@ -885,7 +885,7 @@ private:
|
|||
LLTimer mMessageSystemTimer;
|
||||
|
||||
static F32 mTimeDecodesSpamThreshold; // If mTimeDecodes is on, all this many seconds for each msg decode before spamming
|
||||
static bool mTimeDecodes; // Measure time for all message decodes if TRUE;
|
||||
static bool mTimeDecodes; // Measure time for all message decodes if true;
|
||||
|
||||
msg_timing_callback mTimingCallback;
|
||||
void* mTimingCallbackData;
|
||||
|
|
|
|||
|
|
@ -338,7 +338,7 @@ S32 receive_packet(int hSocket, char * receiveBuffer)
|
|||
return nRet;
|
||||
}
|
||||
|
||||
// Returns TRUE on success.
|
||||
// Returns true on success.
|
||||
bool send_packet(int hSocket, const char *sendBuffer, int size, U32 recipient, int nPort)
|
||||
{
|
||||
// Sends a packet to the address set in initNet
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ void end_net(S32& socket_out);
|
|||
// returns size of packet or -1 in case of error
|
||||
S32 receive_packet(int hSocket, char * receiveBuffer);
|
||||
|
||||
bool send_packet(int hSocket, const char *sendBuffer, int size, U32 recipient, int nPort); // Returns TRUE on success.
|
||||
bool send_packet(int hSocket, const char *sendBuffer, int size, U32 recipient, int nPort); // Returns true on success.
|
||||
|
||||
//void get_sender(char * tmp);
|
||||
LLHost get_sender();
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ struct LLPartInitData {
|
|||
//How much of an effect does gravity have
|
||||
F32 globalLifetime;
|
||||
//If particles re-spawn, a system can exist forever.
|
||||
//If (ActionFlags & PART_SYS_GLOBAL_DIE) is TRUE this variable is used to determine how long the system lasts.
|
||||
//If (ActionFlags & PART_SYS_GLOBAL_DIE) is true this variable is used to determine how long the system lasts.
|
||||
F32 individualLifetime;
|
||||
//How long does each particle last if nothing else happens to it
|
||||
F32 individualLifetimeRange;
|
||||
|
|
@ -132,7 +132,7 @@ const int PART_SYS_SLOW_ANIM_BYTE = 0; // slow animation down by a factor of 10
|
|||
const int PART_SYS_SLOW_ANIM_BIT = 1; // useful for tweaking anims during debugging
|
||||
|
||||
const int PART_SYS_FOLLOW_VEL_BYTE = 0; // indicates whether to orient sprites towards
|
||||
const int PART_SYS_FOLLOW_VEL_BIT = 4; // their velocity vector -- default is FALSE
|
||||
const int PART_SYS_FOLLOW_VEL_BIT = 4; // their velocity vector -- default is false
|
||||
|
||||
const int PART_SYS_IS_LIGHT_BYTE = 0; // indicates whether a particular particle system
|
||||
const int PART_SYS_IS_LIGHT_BIT = 8; // is also a light object -- for andrew
|
||||
|
|
|
|||
|
|
@ -1041,7 +1041,12 @@ LLModel::weight_list& LLModel::getJointInfluences(const LLVector3& pos)
|
|||
weight_map::iterator iterPos = mSkinWeights.begin();
|
||||
weight_map::iterator iterEnd = mSkinWeights.end();
|
||||
|
||||
llassert(!mSkinWeights.empty());
|
||||
if (mSkinWeights.empty())
|
||||
{
|
||||
// function calls iter->second on all return paths
|
||||
// everything that calls this function should precheck that there is data.
|
||||
LL_ERRS() << "called getJointInfluences with empty weights list" << LL_ENDL;
|
||||
}
|
||||
|
||||
for ( ; iterPos!=iterEnd; ++iterPos )
|
||||
{
|
||||
|
|
@ -1068,11 +1073,16 @@ LLModel::weight_list& LLModel::getJointInfluences(const LLVector3& pos)
|
|||
const F32 epsilon = 1e-5f;
|
||||
weight_map::iterator iter_up = mSkinWeights.lower_bound(pos);
|
||||
weight_map::iterator iter_down = iter_up;
|
||||
if (iter_up != mSkinWeights.end())
|
||||
{
|
||||
iter_down = ++iter_up;
|
||||
}
|
||||
weight_map::iterator best = iter_up;
|
||||
weight_map::iterator best = iter_up;
|
||||
if (iter_up != mSkinWeights.end())
|
||||
{
|
||||
iter_down = ++iter_up;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Assumes that there is at least one element
|
||||
--best;
|
||||
}
|
||||
|
||||
F32 min_dist = (iter->first - pos).magVec();
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ public:
|
|||
// - right before the svg module calls the render callback hook. (with cache == true)
|
||||
static FT_Error OnPresetGlypthSlot(FT_GlyphSlot glyph_slot, FT_Bool cache, FT_Pointer* state);
|
||||
|
||||
// Called to render an OT-SVG glyph (right after the preset hook OnPresetGlypthSlot was called with cache set to TRUE)
|
||||
// Called to render an OT-SVG glyph (right after the preset hook OnPresetGlypthSlot was called with cache set to true)
|
||||
static FT_Error OnRender(FT_GlyphSlot glyph_slot, FT_Pointer* state);
|
||||
|
||||
// Called to deallocate our per glyph slot data
|
||||
|
|
|
|||
|
|
@ -94,6 +94,17 @@ void APIENTRY gl_debug_callback(GLenum source,
|
|||
return;
|
||||
}*/
|
||||
|
||||
if (gGLManager.mIsDisabled &&
|
||||
severity == GL_DEBUG_SEVERITY_HIGH_ARB &&
|
||||
source == GL_DEBUG_SOURCE_API_ARB &&
|
||||
type == GL_DEBUG_TYPE_ERROR_ARB &&
|
||||
id == GL_INVALID_VALUE)
|
||||
{
|
||||
// Suppress messages about deleting already deleted objects called from LLViewerWindow::stopGL()
|
||||
// "GL_INVALID_VALUE error generated. Handle does not refer to an object generated by OpenGL."
|
||||
return;
|
||||
}
|
||||
|
||||
// list of messages to suppress
|
||||
const char* suppress[] =
|
||||
{
|
||||
|
|
@ -148,8 +159,9 @@ void APIENTRY gl_debug_callback(GLenum source,
|
|||
glGetBufferParameteriv(GL_UNIFORM_BUFFER, GL_BUFFER_SIZE, &ubo_size);
|
||||
glGetBufferParameteriv(GL_UNIFORM_BUFFER, GL_BUFFER_IMMUTABLE_STORAGE, &ubo_immutable);
|
||||
}
|
||||
|
||||
if (severity == GL_DEBUG_SEVERITY_HIGH)
|
||||
|
||||
// No needs to halt when is called from LLViewerWindow::stopGL()
|
||||
if (severity == GL_DEBUG_SEVERITY_HIGH && !gGLManager.mIsDisabled)
|
||||
{
|
||||
LL_ERRS() << "Halting on GL Error" << LL_ENDL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -126,12 +126,12 @@ public:
|
|||
|
||||
void setSelected(bool is_selected);
|
||||
|
||||
bool getCollapsible() {return mCollapsible;};
|
||||
bool getCollapsible() { return mCollapsible; };
|
||||
|
||||
void setCollapsible(bool collapsible) {mCollapsible = collapsible;};
|
||||
void setCollapsible(bool collapsible) { mCollapsible = collapsible; };
|
||||
void changeOpenClose(bool is_open);
|
||||
|
||||
void canOpenClose(bool can_open_close) { mCanOpenClose = can_open_close;};
|
||||
void canOpenClose(bool can_open_close) { mCanOpenClose = can_open_close; };
|
||||
bool canOpenClose() const { return mCanOpenClose; };
|
||||
|
||||
virtual bool postBuild();
|
||||
|
|
@ -142,8 +142,8 @@ public:
|
|||
|
||||
void draw();
|
||||
|
||||
void storeOpenCloseState ();
|
||||
void restoreOpenCloseState ();
|
||||
void storeOpenCloseState();
|
||||
void restoreOpenCloseState();
|
||||
|
||||
protected:
|
||||
LLAccordionCtrlTab(const LLAccordionCtrlTab::Params&);
|
||||
|
|
|
|||
|
|
@ -288,7 +288,7 @@ void LLContainerView::setLabel(const std::string& label)
|
|||
mLabel = label;
|
||||
}
|
||||
|
||||
void LLContainerView::setDisplayChildren(const bool displayChildren)
|
||||
void LLContainerView::setDisplayChildren(bool displayChildren)
|
||||
{
|
||||
mDisplayChildren = displayChildren;
|
||||
for (child_list_const_iter_t child_iter = getChildList()->begin();
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ public:
|
|||
|
||||
void setLabel(const std::string& label);
|
||||
void showLabel(bool show) { mShowLabel = show; }
|
||||
void setDisplayChildren(const bool displayChildren);
|
||||
void setDisplayChildren(bool displayChildren);
|
||||
bool getDisplayChildren() { return mDisplayChildren; }
|
||||
void setScrollContainer(LLScrollContainer* scroll) {mScrollContainer = scroll;}
|
||||
|
||||
|
|
|
|||
|
|
@ -1361,26 +1361,28 @@ void LLFlatListViewEx::setForceShowingUnmatchedItems(bool show)
|
|||
mForceShowingUnmatchedItems = show;
|
||||
}
|
||||
|
||||
void LLFlatListViewEx::setFilterSubString(const std::string& filter_str)
|
||||
void LLFlatListViewEx::setFilterSubString(const std::string& filter_str, bool notify_parent)
|
||||
{
|
||||
if (0 != LLStringUtil::compareInsensitive(filter_str, mFilterSubString))
|
||||
{
|
||||
mFilterSubString = filter_str;
|
||||
updateNoItemsMessage(mFilterSubString);
|
||||
filterItems();
|
||||
filterItems(false, notify_parent);
|
||||
}
|
||||
}
|
||||
|
||||
void LLFlatListViewEx::updateItemVisibility(LLPanel* item, const LLSD &action)
|
||||
bool LLFlatListViewEx::updateItemVisibility(LLPanel* item, const LLSD &action)
|
||||
{
|
||||
if (!item) return;
|
||||
if (!item)
|
||||
return false;
|
||||
|
||||
bool visible = true;
|
||||
|
||||
// 0 signifies that filter is matched,
|
||||
// i.e. we don't hide items that don't support 'match_filter' action, separators etc.
|
||||
if (0 == item->notify(action))
|
||||
{
|
||||
mHasMatchedItems = true;
|
||||
item->setVisible(true);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
@ -1388,34 +1390,45 @@ void LLFlatListViewEx::updateItemVisibility(LLPanel* item, const LLSD &action)
|
|||
if (!mForceShowingUnmatchedItems)
|
||||
{
|
||||
selectItem(item, false);
|
||||
visible = false;
|
||||
}
|
||||
item->setVisible(mForceShowingUnmatchedItems);
|
||||
}
|
||||
|
||||
if (item->getVisible() != visible)
|
||||
{
|
||||
item->setVisible(visible);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void LLFlatListViewEx::filterItems()
|
||||
void LLFlatListViewEx::filterItems(bool re_sort, bool notify_parent)
|
||||
{
|
||||
typedef std::vector <LLPanel*> item_panel_list_t;
|
||||
|
||||
std::string cur_filter = mFilterSubString;
|
||||
LLStringUtil::toUpper(cur_filter);
|
||||
|
||||
LLSD action;
|
||||
action.with("match_filter", cur_filter);
|
||||
|
||||
item_panel_list_t items;
|
||||
getItems(items);
|
||||
|
||||
mHasMatchedItems = false;
|
||||
item_panel_list_t::iterator iter = items.begin(), iter_end = items.end();
|
||||
while (iter < iter_end)
|
||||
bool visibility_changed = false;
|
||||
pairs_const_iterator_t iter = getItemPairs().begin(), iter_end = getItemPairs().end();
|
||||
while (iter != iter_end)
|
||||
{
|
||||
LLPanel* pItem = *(iter++);
|
||||
updateItemVisibility(pItem, action);
|
||||
LLPanel* pItem = (*(iter++))->first;
|
||||
visibility_changed |= updateItemVisibility(pItem, action);
|
||||
}
|
||||
|
||||
sort();
|
||||
notifyParentItemsRectChanged();
|
||||
if (re_sort)
|
||||
{
|
||||
sort();
|
||||
}
|
||||
|
||||
if (visibility_changed && notify_parent)
|
||||
{
|
||||
notifyParentItemsRectChanged();
|
||||
}
|
||||
}
|
||||
|
||||
bool LLFlatListViewEx::hasMatchedItems()
|
||||
|
|
|
|||
|
|
@ -300,6 +300,7 @@ public:
|
|||
virtual S32 notify(const LLSD& info) ;
|
||||
|
||||
virtual ~LLFlatListView();
|
||||
|
||||
protected:
|
||||
|
||||
/** Pairs LLpanel representing a single item LLPanel and LLSD associated with it */
|
||||
|
|
@ -375,7 +376,9 @@ protected:
|
|||
|
||||
LLRect getLastSelectedItemRect();
|
||||
|
||||
void ensureSelectedVisible();
|
||||
void ensureSelectedVisible();
|
||||
|
||||
const pairs_list_t& getItemPairs() { return mItemPairs; }
|
||||
|
||||
private:
|
||||
|
||||
|
|
@ -482,14 +485,14 @@ public:
|
|||
/**
|
||||
* Sets up new filter string and filters the list.
|
||||
*/
|
||||
void setFilterSubString(const std::string& filter_str);
|
||||
void setFilterSubString(const std::string& filter_str, bool notify_parent);
|
||||
std::string getFilterSubString() { return mFilterSubString; }
|
||||
|
||||
/**
|
||||
* Filters the list, rearranges and notifies parent about shape changes.
|
||||
* Derived classes may want to overload rearrangeItems() to exclude repeated separators after filtration.
|
||||
*/
|
||||
void filterItems();
|
||||
void filterItems(bool re_sort, bool notify_parent);
|
||||
|
||||
/**
|
||||
* Returns true if last call of filterItems() found at least one matching item
|
||||
|
|
@ -513,7 +516,7 @@ protected:
|
|||
* @param item - item we are changing
|
||||
* @param item - action - parameters to determin visibility from
|
||||
*/
|
||||
void updateItemVisibility(LLPanel* item, const LLSD &action);
|
||||
bool updateItemVisibility(LLPanel* item, const LLSD &action);
|
||||
|
||||
private:
|
||||
std::string mNoFilteredItemsMsg;
|
||||
|
|
|
|||
|
|
@ -506,7 +506,6 @@ void LLFloater::enableResizeCtrls(bool enable, bool width, bool height)
|
|||
|
||||
void LLFloater::destroy()
|
||||
{
|
||||
gFloaterView->onDestroyFloater(this);
|
||||
// LLFloaterReg should be synchronized with "dead" floater to avoid returning dead instance before
|
||||
// it was deleted via LLMortician::updateClass(). See EXT-8458.
|
||||
LLFloaterReg::removeInstance(mInstanceName, mKey);
|
||||
|
|
@ -2407,8 +2406,7 @@ LLFloaterView::LLFloaterView (const Params& p)
|
|||
mFocusCycleMode(false),
|
||||
mMinimizePositionVOffset(0),
|
||||
mSnapOffsetBottom(0),
|
||||
mSnapOffsetRight(0),
|
||||
mFrontChild(NULL)
|
||||
mSnapOffsetRight(0)
|
||||
{
|
||||
mSnapView = getHandle();
|
||||
}
|
||||
|
|
@ -2564,7 +2562,8 @@ void LLFloaterView::bringToFront(LLFloater* child, bool give_focus, bool restore
|
|||
if (!child)
|
||||
return;
|
||||
|
||||
if (mFrontChild == child)
|
||||
LLFloater* front_child = mFrontChildHandle.get();
|
||||
if (front_child == child)
|
||||
{
|
||||
if (give_focus && child->canFocusStealFrontmost() && !gFocusMgr.childHasKeyboardFocus(child))
|
||||
{
|
||||
|
|
@ -2573,12 +2572,12 @@ void LLFloaterView::bringToFront(LLFloater* child, bool give_focus, bool restore
|
|||
return;
|
||||
}
|
||||
|
||||
if (mFrontChild)
|
||||
if (front_child && front_child->getVisible())
|
||||
{
|
||||
mFrontChild->goneFromFront();
|
||||
front_child->goneFromFront();
|
||||
}
|
||||
|
||||
mFrontChild = child;
|
||||
mFrontChildHandle = child->getHandle();
|
||||
|
||||
// *TODO: make this respect floater's mAutoFocus value, instead of
|
||||
// using parameter
|
||||
|
|
@ -3077,7 +3076,8 @@ LLFloater *LLFloaterView::getBackmost() const
|
|||
|
||||
void LLFloaterView::syncFloaterTabOrder()
|
||||
{
|
||||
if (mFrontChild && !mFrontChild->isDead() && mFrontChild->getIsChrome())
|
||||
LLFloater* front_child = mFrontChildHandle.get();
|
||||
if (front_child && front_child->getIsChrome())
|
||||
return;
|
||||
|
||||
// look for a visible modal dialog, starting from first
|
||||
|
|
@ -3115,11 +3115,12 @@ void LLFloaterView::syncFloaterTabOrder()
|
|||
LLFloater* floaterp = dynamic_cast<LLFloater*>(*child_it);
|
||||
if (gFocusMgr.childHasKeyboardFocus(floaterp))
|
||||
{
|
||||
if (mFrontChild != floaterp)
|
||||
LLFloater* front_child = mFrontChildHandle.get();
|
||||
if (front_child != floaterp)
|
||||
{
|
||||
// Grab a list of the top floaters that want to stay on top of the focused floater
|
||||
std::list<LLFloater*> listTop;
|
||||
if (mFrontChild && !mFrontChild->canFocusStealFrontmost())
|
||||
if (front_child && !front_child->canFocusStealFrontmost())
|
||||
{
|
||||
for (LLView* childp : *getChildList())
|
||||
{
|
||||
|
|
@ -3139,7 +3140,7 @@ void LLFloaterView::syncFloaterTabOrder()
|
|||
{
|
||||
sendChildToFront(childp);
|
||||
}
|
||||
mFrontChild = listTop.back();
|
||||
mFrontChildHandle = listTop.back()->getHandle();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3235,14 +3236,6 @@ void LLFloaterView::setToolbarRect(LLToolBarEnums::EToolBarLocation tb, const LL
|
|||
}
|
||||
}
|
||||
|
||||
void LLFloaterView::onDestroyFloater(LLFloater* floater)
|
||||
{
|
||||
if (mFrontChild == floater)
|
||||
{
|
||||
mFrontChild = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void LLFloater::setInstanceName(const std::string& name)
|
||||
{
|
||||
if (name != mInstanceName)
|
||||
|
|
|
|||
|
|
@ -607,7 +607,6 @@ public:
|
|||
LLFloater* getFrontmostClosableFloater();
|
||||
|
||||
void setToolbarRect(LLToolBarEnums::EToolBarLocation tb, const LLRect& toolbar_rect);
|
||||
void onDestroyFloater(LLFloater* floater);
|
||||
|
||||
private:
|
||||
void hiddenFloaterClosed(LLFloater* floater);
|
||||
|
|
@ -623,7 +622,7 @@ private:
|
|||
S32 mMinimizePositionVOffset;
|
||||
typedef std::vector<std::pair<LLHandle<LLFloater>, boost::signals2::connection> > hidden_floaters_t;
|
||||
hidden_floaters_t mHiddenFloaters;
|
||||
LLFloater * mFrontChild;
|
||||
LLHandle<LLFloater> mFrontChildHandle;
|
||||
};
|
||||
|
||||
//
|
||||
|
|
|
|||
|
|
@ -187,14 +187,18 @@ LLFolderViewItem::~LLFolderViewItem()
|
|||
|
||||
bool LLFolderViewItem::postBuild()
|
||||
{
|
||||
LLFolderViewModelItem& vmi = *getViewModelItem();
|
||||
// getDisplayName() is expensive (due to internal getLabelSuffix() and name building)
|
||||
// it also sets search strings so it requires a filter reset
|
||||
mLabel = vmi.getDisplayName();
|
||||
setToolTip(vmi.getName());
|
||||
LLFolderViewModelItem* vmi = getViewModelItem();
|
||||
llassert(vmi); // not supposed to happen, if happens, find out why and fix
|
||||
if (vmi)
|
||||
{
|
||||
// getDisplayName() is expensive (due to internal getLabelSuffix() and name building)
|
||||
// it also sets search strings so it requires a filter reset
|
||||
mLabel = vmi->getDisplayName();
|
||||
setToolTip(vmi->getName());
|
||||
|
||||
// Dirty the filter flag of the model from the view (CHUI-849)
|
||||
vmi.dirtyFilter();
|
||||
// Dirty the filter flag of the model from the view (CHUI-849)
|
||||
vmi->dirtyFilter();
|
||||
}
|
||||
|
||||
// Don't do full refresh on constructor if it is possible to avoid
|
||||
// it significantly slows down bulk view creation.
|
||||
|
|
|
|||
|
|
@ -479,7 +479,7 @@ LLTrace::BlockTimerStatHandle FTM_SYNTAX_COLORING("Syntax Coloring");
|
|||
|
||||
// Walk through a string, applying the rules specified by the keyword token list and
|
||||
// create a list of color segments.
|
||||
void LLKeywords::findSegments(std::vector<LLTextSegmentPtr>* seg_list, const LLWString& wtext, const LLColor4 &defaultColor, LLTextEditor& editor)
|
||||
void LLKeywords::findSegments(std::vector<LLTextSegmentPtr>* seg_list, const LLWString& wtext, LLTextEditor& editor, LLStyleConstSP style)
|
||||
{
|
||||
LL_RECORD_BLOCK_TIME(FTM_SYNTAX_COLORING);
|
||||
seg_list->clear();
|
||||
|
|
@ -491,7 +491,7 @@ void LLKeywords::findSegments(std::vector<LLTextSegmentPtr>* seg_list, const LLW
|
|||
|
||||
S32 text_len = wtext.size() + 1;
|
||||
|
||||
seg_list->push_back( new LLNormalTextSegment( defaultColor, 0, text_len, editor ) );
|
||||
seg_list->push_back( new LLNormalTextSegment( style, 0, text_len, editor ) );
|
||||
|
||||
const llwchar* base = wtext.c_str();
|
||||
const llwchar* cur = base;
|
||||
|
|
@ -501,9 +501,9 @@ void LLKeywords::findSegments(std::vector<LLTextSegmentPtr>* seg_list, const LLW
|
|||
{
|
||||
if( *cur == '\n' )
|
||||
{
|
||||
LLTextSegmentPtr text_segment = new LLLineBreakTextSegment(cur-base);
|
||||
LLTextSegmentPtr text_segment = new LLLineBreakTextSegment(style, cur-base);
|
||||
text_segment->setToken( 0 );
|
||||
insertSegment( *seg_list, text_segment, text_len, defaultColor, editor);
|
||||
insertSegment( *seg_list, text_segment, text_len, style, editor);
|
||||
cur++;
|
||||
if( !*cur || *cur == '\n' )
|
||||
{
|
||||
|
|
@ -541,7 +541,7 @@ void LLKeywords::findSegments(std::vector<LLTextSegmentPtr>* seg_list, const LLW
|
|||
S32 seg_end = cur - base;
|
||||
|
||||
//create segments from seg_start to seg_end
|
||||
insertSegments(wtext, *seg_list,cur_token, text_len, seg_start, seg_end, defaultColor, editor);
|
||||
insertSegments(wtext, *seg_list,cur_token, text_len, seg_start, seg_end, style, editor);
|
||||
line_done = true; // to break out of second loop.
|
||||
break;
|
||||
}
|
||||
|
|
@ -648,7 +648,7 @@ void LLKeywords::findSegments(std::vector<LLTextSegmentPtr>* seg_list, const LLW
|
|||
seg_end = seg_start + between_delimiters + cur_delimiter->getLengthHead();
|
||||
}
|
||||
|
||||
insertSegments(wtext, *seg_list,cur_delimiter, text_len, seg_start, seg_end, defaultColor, editor);
|
||||
insertSegments(wtext, *seg_list,cur_delimiter, text_len, seg_start, seg_end, style, editor);
|
||||
/*
|
||||
LLTextSegmentPtr text_segment = new LLNormalTextSegment( cur_delimiter->getColor(), seg_start, seg_end, editor );
|
||||
text_segment->setToken( cur_delimiter );
|
||||
|
|
@ -682,7 +682,7 @@ void LLKeywords::findSegments(std::vector<LLTextSegmentPtr>* seg_list, const LLW
|
|||
|
||||
// LL_INFOS("SyntaxLSL") << "Seg: [" << word.c_str() << "]" << LL_ENDL;
|
||||
|
||||
insertSegments(wtext, *seg_list,cur_token, text_len, seg_start, seg_end, defaultColor, editor);
|
||||
insertSegments(wtext, *seg_list,cur_token, text_len, seg_start, seg_end, style, editor);
|
||||
}
|
||||
cur += seg_len;
|
||||
continue;
|
||||
|
|
@ -697,30 +697,32 @@ void LLKeywords::findSegments(std::vector<LLTextSegmentPtr>* seg_list, const LLW
|
|||
}
|
||||
}
|
||||
|
||||
void LLKeywords::insertSegments(const LLWString& wtext, std::vector<LLTextSegmentPtr>& seg_list, LLKeywordToken* cur_token, S32 text_len, S32 seg_start, S32 seg_end, const LLColor4 &defaultColor, LLTextEditor& editor )
|
||||
void LLKeywords::insertSegments(const LLWString& wtext, std::vector<LLTextSegmentPtr>& seg_list, LLKeywordToken* cur_token, S32 text_len, S32 seg_start, S32 seg_end, LLStyleConstSP style, LLTextEditor& editor )
|
||||
{
|
||||
std::string::size_type pos = wtext.find('\n',seg_start);
|
||||
|
||||
LLStyleConstSP cur_token_style = new LLStyle(LLStyle::Params().font(style->getFont()).color(cur_token->getColor()));
|
||||
|
||||
while (pos!=-1 && pos < (std::string::size_type)seg_end)
|
||||
{
|
||||
if (pos!=seg_start)
|
||||
{
|
||||
LLTextSegmentPtr text_segment = new LLNormalTextSegment( cur_token->getColor(), seg_start, pos, editor );
|
||||
LLTextSegmentPtr text_segment = new LLNormalTextSegment(cur_token_style, seg_start, pos, editor);
|
||||
text_segment->setToken( cur_token );
|
||||
insertSegment( seg_list, text_segment, text_len, defaultColor, editor);
|
||||
insertSegment( seg_list, text_segment, text_len, style, editor);
|
||||
}
|
||||
|
||||
LLTextSegmentPtr text_segment = new LLLineBreakTextSegment(pos);
|
||||
LLTextSegmentPtr text_segment = new LLLineBreakTextSegment(style, pos);
|
||||
text_segment->setToken( cur_token );
|
||||
insertSegment( seg_list, text_segment, text_len, defaultColor, editor);
|
||||
insertSegment( seg_list, text_segment, text_len, style, editor);
|
||||
|
||||
seg_start = pos+1;
|
||||
pos = wtext.find('\n',seg_start);
|
||||
}
|
||||
|
||||
LLTextSegmentPtr text_segment = new LLNormalTextSegment( cur_token->getColor(), seg_start, seg_end, editor );
|
||||
LLTextSegmentPtr text_segment = new LLNormalTextSegment(cur_token_style, seg_start, seg_end, editor);
|
||||
text_segment->setToken( cur_token );
|
||||
insertSegment( seg_list, text_segment, text_len, defaultColor, editor);
|
||||
insertSegment( seg_list, text_segment, text_len, style, editor);
|
||||
}
|
||||
|
||||
void LLKeywords::insertSegment(std::vector<LLTextSegmentPtr>& seg_list, LLTextSegmentPtr new_segment, S32 text_len, const LLColor4 &defaultColor, LLTextEditor& editor )
|
||||
|
|
@ -744,6 +746,27 @@ void LLKeywords::insertSegment(std::vector<LLTextSegmentPtr>& seg_list, LLTextSe
|
|||
}
|
||||
}
|
||||
|
||||
void LLKeywords::insertSegment(std::vector<LLTextSegmentPtr>& seg_list, LLTextSegmentPtr new_segment, S32 text_len, LLStyleConstSP style, LLTextEditor& editor )
|
||||
{
|
||||
LLTextSegmentPtr last = seg_list.back();
|
||||
S32 new_seg_end = new_segment->getEnd();
|
||||
|
||||
if( new_segment->getStart() == last->getStart() )
|
||||
{
|
||||
seg_list.pop_back();
|
||||
}
|
||||
else
|
||||
{
|
||||
last->setEnd( new_segment->getStart() );
|
||||
}
|
||||
seg_list.push_back( new_segment );
|
||||
|
||||
if( new_seg_end < text_len )
|
||||
{
|
||||
seg_list.push_back( new LLNormalTextSegment( style, new_seg_end, text_len, editor ) );
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _DEBUG
|
||||
void LLKeywords::dump()
|
||||
{
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@
|
|||
|
||||
|
||||
#include "lldir.h"
|
||||
#include "llstyle.h"
|
||||
#include "llstring.h"
|
||||
#include "v3color.h"
|
||||
#include "v4color.h"
|
||||
|
|
@ -115,8 +116,8 @@ public:
|
|||
|
||||
void findSegments(std::vector<LLTextSegmentPtr> *seg_list,
|
||||
const LLWString& text,
|
||||
const LLColor4 &defaultColor,
|
||||
class LLTextEditor& editor);
|
||||
class LLTextEditor& editor,
|
||||
LLStyleConstSP style);
|
||||
void initialize(LLSD SyntaxXML);
|
||||
void processTokens();
|
||||
|
||||
|
|
@ -181,9 +182,11 @@ protected:
|
|||
S32 text_len,
|
||||
S32 seg_start,
|
||||
S32 seg_end,
|
||||
const LLColor4 &defaultColor,
|
||||
LLStyleConstSP style,
|
||||
LLTextEditor& editor);
|
||||
|
||||
void insertSegment(std::vector<LLTextSegmentPtr>& seg_list, LLTextSegmentPtr new_segment, S32 text_len, LLStyleConstSP style, LLTextEditor& editor );
|
||||
|
||||
bool mLoaded;
|
||||
LLSD mSyntax;
|
||||
word_token_map_t mWordTokenMap;
|
||||
|
|
|
|||
|
|
@ -1788,7 +1788,8 @@ LLMenuGL::LLMenuGL(const LLMenuGL::Params& p)
|
|||
mNeedsArrange(false),
|
||||
mAlwaysShowMenu(false),
|
||||
mResetScrollPositionOnShow(true),
|
||||
mShortcutPad(p.shortcut_pad)
|
||||
mShortcutPad(p.shortcut_pad),
|
||||
mFont(p.font)
|
||||
{
|
||||
typedef boost::tokenizer<boost::char_separator<char> > tokenizer;
|
||||
boost::char_separator<char> sep("_");
|
||||
|
|
@ -3647,6 +3648,7 @@ bool LLMenuBarGL::appendMenu( LLMenuGL* menu )
|
|||
p.disabled_color=LLUIColorTable::instance().getColor("MenuItemDisabledColor");
|
||||
p.highlight_bg_color=LLUIColorTable::instance().getColor("MenuItemHighlightBgColor");
|
||||
p.highlight_fg_color=LLUIColorTable::instance().getColor("MenuItemHighlightFgColor");
|
||||
p.font = menu->getFont();
|
||||
|
||||
LLMenuItemBranchDownGL* branch = LLUICtrlFactory::create<LLMenuItemBranchDownGL>(p);
|
||||
success &= branch->addToAcceleratorList(&mAccelerators);
|
||||
|
|
|
|||
|
|
@ -562,7 +562,9 @@ public:
|
|||
// add a context menu branch
|
||||
bool appendContextSubMenu(LLMenuGL *menu);
|
||||
|
||||
protected:
|
||||
const LLFontGL *getFont() const { return mFont; }
|
||||
|
||||
protected:
|
||||
void createSpilloverBranch();
|
||||
void cleanupSpilloverBranch();
|
||||
// Add the menu item to this menu.
|
||||
|
|
@ -594,6 +596,9 @@ protected:
|
|||
bool mKeepFixedSize;
|
||||
bool mNeedsArrange;
|
||||
|
||||
// Font for top menu items only
|
||||
const LLFontGL* mFont;
|
||||
|
||||
private:
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ LLStatBar::LLStatBar(const Params& p)
|
|||
mTargetMinBar(llmin(p.bar_min, p.bar_max)),
|
||||
mTargetMaxBar(llmax(p.bar_max, p.bar_min)),
|
||||
mCurMaxBar(p.bar_max),
|
||||
mCurMinBar(0),
|
||||
mCurMinBar(0),
|
||||
mDecimalDigits(p.decimal_digits),
|
||||
mNumHistoryFrames(p.num_frames),
|
||||
mNumShortHistoryFrames(p.num_frames_short),
|
||||
|
|
@ -222,9 +222,6 @@ bool LLStatBar::handleHover(S32 x, S32 y, MASK mask)
|
|||
case STAT_SAMPLE:
|
||||
LLToolTipMgr::instance().show(LLToolTip::Params().message(mStat.sampleStatp->getDescription()).sticky_rect(calcScreenRect()));
|
||||
break;
|
||||
case STAT_MEM:
|
||||
LLToolTipMgr::instance().show(LLToolTip::Params().message(mStat.memStatp->getDescription()).sticky_rect(calcScreenRect()));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
@ -373,18 +370,6 @@ void LLStatBar::draw()
|
|||
}
|
||||
}
|
||||
break;
|
||||
case STAT_MEM:
|
||||
{
|
||||
const LLTrace::StatType<LLTrace::MemAccumulator>& mem_stat = *mStat.memStatp;
|
||||
|
||||
unit_label = mUnitLabel.empty() ? mem_stat.getUnitLabel() : mUnitLabel;
|
||||
current = last_frame_recording.getLastValue(mem_stat).value();
|
||||
min = frame_recording.getPeriodMin(mem_stat, num_frames).value();
|
||||
max = frame_recording.getPeriodMax(mem_stat, num_frames).value();
|
||||
mean = frame_recording.getPeriodMean(mem_stat, num_frames).value();
|
||||
display_value = current;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
@ -500,11 +485,6 @@ void LLStatBar::draw()
|
|||
max_value = recording.getMax(*mStat.sampleStatp);
|
||||
num_samples = recording.getSampleCount(*mStat.sampleStatp);
|
||||
break;
|
||||
case STAT_MEM:
|
||||
min_value = recording.getMin(*mStat.memStatp).value();
|
||||
max_value = recording.getMax(*mStat.memStatp).value();
|
||||
num_samples = 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
@ -583,14 +563,8 @@ void LLStatBar::setStat(const std::string& stat_name)
|
|||
mStat.sampleStatp = sample_stat.get();
|
||||
mStatType = STAT_SAMPLE;
|
||||
}
|
||||
else if (auto mem_stat = StatType<MemAccumulator>::getInstance(stat_name))
|
||||
{
|
||||
mStat.memStatp = mem_stat.get();
|
||||
mStatType = STAT_MEM;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LLStatBar::setRange(F32 bar_min, F32 bar_max)
|
||||
{
|
||||
mTargetMinBar = llmin(bar_min, bar_max);
|
||||
|
|
|
|||
|
|
@ -95,17 +95,15 @@ private:
|
|||
STAT_NONE,
|
||||
STAT_COUNT,
|
||||
STAT_EVENT,
|
||||
STAT_SAMPLE,
|
||||
STAT_MEM
|
||||
STAT_SAMPLE
|
||||
} mStatType;
|
||||
|
||||
union
|
||||
{
|
||||
void* valid;
|
||||
void* valid;
|
||||
const LLTrace::StatType<LLTrace::CountAccumulator>* countStatp;
|
||||
const LLTrace::StatType<LLTrace::EventAccumulator>* eventStatp;
|
||||
const LLTrace::StatType<LLTrace::SampleAccumulator>* sampleStatp;
|
||||
const LLTrace::StatType<LLTrace::MemAccumulator>* memStatp;
|
||||
const LLTrace::StatType<LLTrace::SampleAccumulator>* sampleStatp;
|
||||
} mStat;
|
||||
|
||||
LLUIString mLabel;
|
||||
|
|
|
|||
|
|
@ -58,10 +58,7 @@ LLStatView::~LLStatView()
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
static StatViewRegistry::Register<LLStatBar> r1("stat_bar");
|
||||
static StatViewRegistry::Register<LLStatView> r2("stat_view");
|
||||
// stat_view can be a child of panels/etc.
|
||||
static LLDefaultChildRegistry::Register<LLStatView> r3("stat_view");
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ protected:
|
|||
friend class LLUICtrlFactory;
|
||||
|
||||
protected:
|
||||
std::string mSetting;
|
||||
|
||||
const std::string mSetting;
|
||||
};
|
||||
|
||||
#endif // LL_STATVIEW_
|
||||
|
|
|
|||
|
|
@ -1516,25 +1516,23 @@ bool LLTabContainer::selectTab(S32 which)
|
|||
|
||||
LLTabTuple* selected_tuple = getTab(which);
|
||||
if (!selected_tuple)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
LLSD cbdata;
|
||||
if (selected_tuple->mTabPanel)
|
||||
cbdata = selected_tuple->mTabPanel->getName();
|
||||
|
||||
bool res = false;
|
||||
if( !mValidateSignal || (*mValidateSignal)( this, cbdata ) )
|
||||
bool result = false;
|
||||
if (!mValidateSignal || (*mValidateSignal)(this, cbdata))
|
||||
{
|
||||
res = setTab(which);
|
||||
if (res && mCommitSignal)
|
||||
result = setTab(which);
|
||||
if (result && mCommitSignal)
|
||||
{
|
||||
(*mCommitSignal)(this, cbdata);
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// private
|
||||
|
|
|
|||
|
|
@ -3477,7 +3477,7 @@ bool LLNormalTextSegment::handleToolTip(S32 x, S32 y, MASK mask)
|
|||
if (mToken && !mToken->getToolTip().empty())
|
||||
{
|
||||
const LLWString& wmsg = mToken->getToolTip();
|
||||
LLToolTipMgr::instance().show(wstring_to_utf8str(wmsg));
|
||||
LLToolTipMgr::instance().show(wstring_to_utf8str(wmsg), (mToken->getType() == LLKeywordToken::TT_FUNCTION));
|
||||
return true;
|
||||
}
|
||||
// or do we have an explicitly set tooltip (e.g., for Urls)
|
||||
|
|
|
|||
|
|
@ -1862,7 +1862,8 @@ bool LLTextEditor::handleKeyHere(KEY key, MASK mask )
|
|||
}
|
||||
|
||||
if (mEnableTooltipPaste &&
|
||||
LLToolTipMgr::instance().toolTipVisible() &&
|
||||
LLToolTipMgr::instance().toolTipVisible() &&
|
||||
LLToolTipMgr::instance().isTooltipPastable() &&
|
||||
KEY_TAB == key)
|
||||
{ // Paste the first line of a tooltip into the editor
|
||||
std::string message;
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue