SH-4374 FIX Interesting: Statistics Object cache hit rate is always 100%
moved object cache sampling code so that it actually gets executed default values for stats are NaN instead of 0 in many casesmaster
parent
f7d90f8bb9
commit
8d3daa141e
|
|
@ -431,11 +431,11 @@ TimeBlockAccumulator::TimeBlockAccumulator()
|
|||
mParent(NULL)
|
||||
{}
|
||||
|
||||
void TimeBlockAccumulator::addSamples( const TimeBlockAccumulator& other, bool append )
|
||||
void TimeBlockAccumulator::addSamples( const TimeBlockAccumulator& other, EBufferAppendType append_type )
|
||||
{
|
||||
// we can't merge two unrelated time block samples, as that will screw with the nested timings
|
||||
// due to the call hierarchy of each thread
|
||||
llassert(append);
|
||||
llassert(append_type == SEQUENTIAL);
|
||||
mTotalTimeCounter += other.mTotalTimeCounter - other.mStartTotalTimeCounter;
|
||||
mSelfTimeCounter += other.mSelfTimeCounter;
|
||||
mCalls += other.mCalls;
|
||||
|
|
|
|||
|
|
@ -86,21 +86,21 @@ bool AccumulatorBufferGroup::isPrimary() const
|
|||
|
||||
void AccumulatorBufferGroup::append( const AccumulatorBufferGroup& other )
|
||||
{
|
||||
mCounts.addSamples(other.mCounts);
|
||||
mSamples.addSamples(other.mSamples);
|
||||
mEvents.addSamples(other.mEvents);
|
||||
mMemStats.addSamples(other.mMemStats);
|
||||
mStackTimers.addSamples(other.mStackTimers);
|
||||
mCounts.addSamples(other.mCounts, SEQUENTIAL);
|
||||
mSamples.addSamples(other.mSamples, SEQUENTIAL);
|
||||
mEvents.addSamples(other.mEvents, SEQUENTIAL);
|
||||
mMemStats.addSamples(other.mMemStats, SEQUENTIAL);
|
||||
mStackTimers.addSamples(other.mStackTimers, SEQUENTIAL);
|
||||
}
|
||||
|
||||
void AccumulatorBufferGroup::merge( const AccumulatorBufferGroup& other)
|
||||
{
|
||||
mCounts.addSamples(other.mCounts, false);
|
||||
mSamples.addSamples(other.mSamples, false);
|
||||
mEvents.addSamples(other.mEvents, false);
|
||||
mMemStats.addSamples(other.mMemStats, false);
|
||||
mCounts.addSamples(other.mCounts, NON_SEQUENTIAL);
|
||||
mSamples.addSamples(other.mSamples, NON_SEQUENTIAL);
|
||||
mEvents.addSamples(other.mEvents, NON_SEQUENTIAL);
|
||||
mMemStats.addSamples(other.mMemStats, NON_SEQUENTIAL);
|
||||
// for now, hold out timers from merge, need to be displayed per thread
|
||||
//mStackTimers.addSamples(other.mStackTimers, false);
|
||||
//mStackTimers.addSamples(other.mStackTimers, NON_SEQUENTIAL);
|
||||
}
|
||||
|
||||
void AccumulatorBufferGroup::reset(AccumulatorBufferGroup* other)
|
||||
|
|
@ -120,4 +120,130 @@ void AccumulatorBufferGroup::sync()
|
|||
mMemStats.sync(time_stamp);
|
||||
}
|
||||
|
||||
void SampleAccumulator::addSamples( const SampleAccumulator& other, EBufferAppendType append_type )
|
||||
{
|
||||
if (!mHasValue)
|
||||
{
|
||||
*this = other;
|
||||
|
||||
if (append_type == NON_SEQUENTIAL)
|
||||
{
|
||||
// restore own last value state
|
||||
mLastValue = NaN;
|
||||
mHasValue = false;
|
||||
}
|
||||
}
|
||||
else if (other.mHasValue)
|
||||
{
|
||||
mSum += other.mSum;
|
||||
|
||||
if (other.mMin < mMin) { mMin = other.mMin; }
|
||||
if (other.mMax > mMax) { mMax = other.mMax; }
|
||||
|
||||
F64 epsilon = 0.0000001;
|
||||
|
||||
if (other.mTotalSamplingTime > epsilon)
|
||||
{
|
||||
// combine variance (and hence standard deviation) of 2 different sized sample groups using
|
||||
// the following formula: http://www.mrc-bsu.cam.ac.uk/cochrane/handbook/chapter_7/7_7_3_8_combining_groups.htm
|
||||
F64 n_1 = mTotalSamplingTime,
|
||||
n_2 = other.mTotalSamplingTime;
|
||||
F64 m_1 = mMean,
|
||||
m_2 = other.mMean;
|
||||
F64 v_1 = mSumOfSquares / mTotalSamplingTime,
|
||||
v_2 = other.mSumOfSquares / other.mTotalSamplingTime;
|
||||
if (n_1 < epsilon)
|
||||
{
|
||||
mSumOfSquares = other.mSumOfSquares;
|
||||
}
|
||||
else
|
||||
{
|
||||
mSumOfSquares = mTotalSamplingTime
|
||||
* ((((n_1 - epsilon) * v_1)
|
||||
+ ((n_2 - epsilon) * v_2)
|
||||
+ (((n_1 * n_2) / (n_1 + n_2))
|
||||
* ((m_1 * m_1) + (m_2 * m_2) - (2.f * m_1 * m_2))))
|
||||
/ (n_1 + n_2 - epsilon));
|
||||
}
|
||||
|
||||
llassert(other.mTotalSamplingTime > 0);
|
||||
F64 weight = mTotalSamplingTime / (mTotalSamplingTime + other.mTotalSamplingTime);
|
||||
mNumSamples += other.mNumSamples;
|
||||
mTotalSamplingTime += other.mTotalSamplingTime;
|
||||
mMean = (mMean * weight) + (other.mMean * (1.0 - weight));
|
||||
}
|
||||
if (append_type == SEQUENTIAL)
|
||||
{
|
||||
mLastValue = other.mLastValue;
|
||||
mLastSampleTimeStamp = other.mLastSampleTimeStamp;
|
||||
mHasValue = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SampleAccumulator::reset( const SampleAccumulator* other )
|
||||
{
|
||||
mLastValue = other ? other->mLastValue : NaN;
|
||||
mHasValue = other ? other->mHasValue : false;
|
||||
mNumSamples = 0;
|
||||
mSum = 0;
|
||||
mMin = mLastValue;
|
||||
mMax = mLastValue;
|
||||
mMean = mLastValue;
|
||||
mSumOfSquares = 0;
|
||||
mLastSampleTimeStamp = LLTimer::getTotalSeconds();
|
||||
mTotalSamplingTime = 0;
|
||||
}
|
||||
|
||||
void EventAccumulator::addSamples( const EventAccumulator& other, EBufferAppendType append_type )
|
||||
{
|
||||
if (other.mNumSamples)
|
||||
{
|
||||
if (!mNumSamples)
|
||||
{
|
||||
*this = other;
|
||||
}
|
||||
else
|
||||
{
|
||||
mSum += other.mSum;
|
||||
|
||||
// NOTE: both conditions will hold first time through
|
||||
if (other.mMin < mMin) { mMin = other.mMin; }
|
||||
if (other.mMax > mMax) { mMax = other.mMax; }
|
||||
|
||||
// combine variance (and hence standard deviation) of 2 different sized sample groups using
|
||||
// the following formula: http://www.mrc-bsu.cam.ac.uk/cochrane/handbook/chapter_7/7_7_3_8_combining_groups.htm
|
||||
F64 n_1 = (F64)mNumSamples,
|
||||
n_2 = (F64)other.mNumSamples;
|
||||
F64 m_1 = mMean,
|
||||
m_2 = other.mMean;
|
||||
F64 v_1 = mSumOfSquares / mNumSamples,
|
||||
v_2 = other.mSumOfSquares / other.mNumSamples;
|
||||
mSumOfSquares = (F64)mNumSamples
|
||||
* ((((n_1 - 1.f) * v_1)
|
||||
+ ((n_2 - 1.f) * v_2)
|
||||
+ (((n_1 * n_2) / (n_1 + n_2))
|
||||
* ((m_1 * m_1) + (m_2 * m_2) - (2.f * m_1 * m_2))))
|
||||
/ (n_1 + n_2 - 1.f));
|
||||
|
||||
F64 weight = (F64)mNumSamples / (F64)(mNumSamples + other.mNumSamples);
|
||||
mNumSamples += other.mNumSamples;
|
||||
mMean = mMean * weight + other.mMean * (1.f - weight);
|
||||
if (append_type == SEQUENTIAL) mLastValue = other.mLastValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void EventAccumulator::reset( const EventAccumulator* other )
|
||||
{
|
||||
mNumSamples = 0;
|
||||
mSum = NaN;
|
||||
mMin = NaN;
|
||||
mMax = NaN;
|
||||
mMean = NaN;
|
||||
mSumOfSquares = 0;
|
||||
mLastValue = other ? other->mLastValue : NaN;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,6 +38,14 @@
|
|||
|
||||
namespace LLTrace
|
||||
{
|
||||
const F64 NaN = std::numeric_limits<double>::quiet_NaN();
|
||||
|
||||
enum EBufferAppendType
|
||||
{
|
||||
SEQUENTIAL,
|
||||
NON_SEQUENTIAL
|
||||
};
|
||||
|
||||
template<typename ACCUMULATOR>
|
||||
class AccumulatorBuffer : public LLRefCount
|
||||
{
|
||||
|
|
@ -83,12 +91,12 @@ namespace LLTrace
|
|||
return mStorage[index];
|
||||
}
|
||||
|
||||
void addSamples(const AccumulatorBuffer<ACCUMULATOR>& other, bool append = true)
|
||||
void addSamples(const AccumulatorBuffer<ACCUMULATOR>& other, EBufferAppendType append_type)
|
||||
{
|
||||
llassert(mStorageSize >= sNextStorageSlot && other.mStorageSize > sNextStorageSlot);
|
||||
for (size_t i = 0; i < sNextStorageSlot; i++)
|
||||
{
|
||||
mStorage[i].addSamples(other.mStorage[i], append);
|
||||
mStorage[i].addSamples(other.mStorage[i], append_type);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -211,7 +219,6 @@ namespace LLTrace
|
|||
template<typename ACCUMULATOR> size_t AccumulatorBuffer<ACCUMULATOR>::sNextStorageSlot = 0;
|
||||
template<typename ACCUMULATOR> AccumulatorBuffer<ACCUMULATOR>* AccumulatorBuffer<ACCUMULATOR>::sDefaultBuffer = NULL;
|
||||
|
||||
|
||||
class EventAccumulator
|
||||
{
|
||||
public:
|
||||
|
|
@ -219,98 +226,51 @@ namespace LLTrace
|
|||
typedef F64 mean_t;
|
||||
|
||||
EventAccumulator()
|
||||
: mSum(0),
|
||||
mMin((std::numeric_limits<F64>::max)()),
|
||||
mMax((std::numeric_limits<F64>::min)()),
|
||||
mMean(0),
|
||||
: mSum(NaN),
|
||||
mMin(NaN),
|
||||
mMax(NaN),
|
||||
mMean(NaN),
|
||||
mSumOfSquares(0),
|
||||
mNumSamples(0),
|
||||
mLastValue(0)
|
||||
mLastValue(NaN)
|
||||
{}
|
||||
|
||||
void record(F64 value)
|
||||
{
|
||||
mNumSamples++;
|
||||
mSum += value;
|
||||
// NOTE: both conditions will hold on first pass through
|
||||
if (value < mMin)
|
||||
if (mNumSamples == 0)
|
||||
{
|
||||
mSum = value;
|
||||
mMean = value;
|
||||
mMin = value;
|
||||
}
|
||||
if (value > mMax)
|
||||
{
|
||||
mMax = value;
|
||||
}
|
||||
F64 old_mean = mMean;
|
||||
mMean += (value - old_mean) / (F64)mNumSamples;
|
||||
mSumOfSquares += (value - old_mean) * (value - mMean);
|
||||
else
|
||||
{
|
||||
mSum += value;
|
||||
F64 old_mean = mMean;
|
||||
mMean += (value - old_mean) / (F64)mNumSamples;
|
||||
mSumOfSquares += (value - old_mean) * (value - mMean);
|
||||
|
||||
if (value < mMin) { mMin = value; }
|
||||
else if (value > mMax) { mMax = value; }
|
||||
}
|
||||
|
||||
mNumSamples++;
|
||||
mLastValue = value;
|
||||
}
|
||||
|
||||
void addSamples(const EventAccumulator& other, bool append)
|
||||
{
|
||||
if (other.mNumSamples)
|
||||
{
|
||||
mSum += other.mSum;
|
||||
|
||||
// NOTE: both conditions will hold first time through
|
||||
if (other.mMin < mMin) { mMin = other.mMin; }
|
||||
if (other.mMax > mMax) { mMax = other.mMax; }
|
||||
|
||||
// combine variance (and hence standard deviation) of 2 different sized sample groups using
|
||||
// the following formula: http://www.mrc-bsu.cam.ac.uk/cochrane/handbook/chapter_7/7_7_3_8_combining_groups.htm
|
||||
F64 n_1 = (F64)mNumSamples,
|
||||
n_2 = (F64)other.mNumSamples;
|
||||
F64 m_1 = mMean,
|
||||
m_2 = other.mMean;
|
||||
F64 v_1 = mSumOfSquares / mNumSamples,
|
||||
v_2 = other.mSumOfSquares / other.mNumSamples;
|
||||
if (n_1 == 0)
|
||||
{
|
||||
mSumOfSquares = other.mSumOfSquares;
|
||||
}
|
||||
else if (n_2 == 0)
|
||||
{
|
||||
// don't touch variance
|
||||
// mSumOfSquares = mSumOfSquares;
|
||||
}
|
||||
else
|
||||
{
|
||||
mSumOfSquares = (F64)mNumSamples
|
||||
* ((((n_1 - 1.f) * v_1)
|
||||
+ ((n_2 - 1.f) * v_2)
|
||||
+ (((n_1 * n_2) / (n_1 + n_2))
|
||||
* ((m_1 * m_1) + (m_2 * m_2) - (2.f * m_1 * m_2))))
|
||||
/ (n_1 + n_2 - 1.f));
|
||||
}
|
||||
|
||||
F64 weight = (F64)mNumSamples / (F64)(mNumSamples + other.mNumSamples);
|
||||
mNumSamples += other.mNumSamples;
|
||||
mMean = mMean * weight + other.mMean * (1.f - weight);
|
||||
if (append) mLastValue = other.mLastValue;
|
||||
}
|
||||
}
|
||||
|
||||
void reset(const EventAccumulator* other)
|
||||
{
|
||||
mNumSamples = 0;
|
||||
mSum = 0;
|
||||
mMin = std::numeric_limits<F64>::max();
|
||||
mMax = std::numeric_limits<F64>::min();
|
||||
mMean = 0;
|
||||
mSumOfSquares = 0;
|
||||
mLastValue = other ? other->mLastValue : 0;
|
||||
}
|
||||
|
||||
void addSamples(const EventAccumulator& other, EBufferAppendType append_type);
|
||||
void reset(const EventAccumulator* other);
|
||||
void sync(LLUnitImplicit<F64, LLUnits::Seconds>) {}
|
||||
|
||||
F64 getSum() const { return mSum; }
|
||||
F64 getMin() const { return mMin; }
|
||||
F64 getMax() const { return mMax; }
|
||||
F64 getLastValue() const { return mLastValue; }
|
||||
F64 getMean() const { return mMean; }
|
||||
F64 getSum() const { return mSum; }
|
||||
F64 getMin() const { return mMin; }
|
||||
F64 getMax() const { return mMax; }
|
||||
F64 getLastValue() const { return mLastValue; }
|
||||
F64 getMean() const { return mMean; }
|
||||
F64 getStandardDeviation() const { return sqrtf(mSumOfSquares / mNumSamples); }
|
||||
U32 getSampleCount() const { return mNumSamples; }
|
||||
U32 getSampleCount() const { return mNumSamples; }
|
||||
bool hasValue() const { return mNumSamples > 0; }
|
||||
|
||||
private:
|
||||
F64 mSum,
|
||||
|
|
@ -333,143 +293,85 @@ namespace LLTrace
|
|||
|
||||
SampleAccumulator()
|
||||
: mSum(0),
|
||||
mMin((std::numeric_limits<F64>::max)()),
|
||||
mMax((std::numeric_limits<F64>::min)()),
|
||||
mMean(0),
|
||||
mMin(NaN),
|
||||
mMax(NaN),
|
||||
mMean(NaN),
|
||||
mSumOfSquares(0),
|
||||
mLastSampleTimeStamp(LLTimer::getTotalSeconds()),
|
||||
mLastSampleTimeStamp(0),
|
||||
mTotalSamplingTime(0),
|
||||
mNumSamples(0),
|
||||
mLastValue(0),
|
||||
mLastValue(NaN),
|
||||
mHasValue(false)
|
||||
{}
|
||||
|
||||
void sample(F64 value)
|
||||
{
|
||||
LLUnitImplicit<F64, LLUnits::Seconds> time_stamp = LLTimer::getTotalSeconds();
|
||||
LLUnitImplicit<F64, LLUnits::Seconds> delta_time = time_stamp - mLastSampleTimeStamp;
|
||||
mLastSampleTimeStamp = time_stamp;
|
||||
|
||||
if (mHasValue)
|
||||
// store effect of last value
|
||||
sync(time_stamp);
|
||||
|
||||
if (!mHasValue)
|
||||
{
|
||||
mTotalSamplingTime += delta_time;
|
||||
mSum += mLastValue * delta_time;
|
||||
mHasValue = true;
|
||||
|
||||
// NOTE: both conditions will hold first time through
|
||||
mMin = value;
|
||||
mMax = value;
|
||||
mMean = value;
|
||||
mLastSampleTimeStamp = time_stamp;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (value < mMin) { mMin = value; }
|
||||
if (value > mMax) { mMax = value; }
|
||||
|
||||
F64 old_mean = mMean;
|
||||
mMean += (delta_time / mTotalSamplingTime) * (mLastValue - old_mean);
|
||||
mSumOfSquares += delta_time * (mLastValue - old_mean) * (mLastValue - mMean);
|
||||
else if (value > mMax) { mMax = value; }
|
||||
}
|
||||
|
||||
mLastValue = value;
|
||||
mNumSamples++;
|
||||
mHasValue = true;
|
||||
}
|
||||
|
||||
void addSamples(const SampleAccumulator& other, bool append)
|
||||
{
|
||||
if (other.mTotalSamplingTime)
|
||||
{
|
||||
mSum += other.mSum;
|
||||
|
||||
// NOTE: both conditions will hold first time through
|
||||
if (other.mMin < mMin) { mMin = other.mMin; }
|
||||
if (other.mMax > mMax) { mMax = other.mMax; }
|
||||
|
||||
// combine variance (and hence standard deviation) of 2 different sized sample groups using
|
||||
// the following formula: http://www.mrc-bsu.cam.ac.uk/cochrane/handbook/chapter_7/7_7_3_8_combining_groups.htm
|
||||
F64 n_1 = mTotalSamplingTime,
|
||||
n_2 = other.mTotalSamplingTime;
|
||||
F64 m_1 = mMean,
|
||||
m_2 = other.mMean;
|
||||
F64 v_1 = mSumOfSquares / mTotalSamplingTime,
|
||||
v_2 = other.mSumOfSquares / other.mTotalSamplingTime;
|
||||
if (n_1 == 0)
|
||||
{
|
||||
mSumOfSquares = other.mSumOfSquares;
|
||||
}
|
||||
else if (n_2 == 0)
|
||||
{
|
||||
// variance is unchanged
|
||||
// mSumOfSquares = mSumOfSquares;
|
||||
}
|
||||
else
|
||||
{
|
||||
mSumOfSquares = mTotalSamplingTime
|
||||
* ((((n_1 - 1.f) * v_1)
|
||||
+ ((n_2 - 1.f) * v_2)
|
||||
+ (((n_1 * n_2) / (n_1 + n_2))
|
||||
* ((m_1 * m_1) + (m_2 * m_2) - (2.f * m_1 * m_2))))
|
||||
/ (n_1 + n_2 - 1.f));
|
||||
}
|
||||
|
||||
llassert(other.mTotalSamplingTime > 0);
|
||||
F64 weight = mTotalSamplingTime / (mTotalSamplingTime + other.mTotalSamplingTime);
|
||||
mNumSamples += other.mNumSamples;
|
||||
mTotalSamplingTime += other.mTotalSamplingTime;
|
||||
mMean = (mMean * weight) + (other.mMean * (1.0 - weight));
|
||||
if (append)
|
||||
{
|
||||
mLastValue = other.mLastValue;
|
||||
mLastSampleTimeStamp = other.mLastSampleTimeStamp;
|
||||
mHasValue |= other.mHasValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void reset(const SampleAccumulator* other)
|
||||
{
|
||||
mNumSamples = 0;
|
||||
mSum = 0;
|
||||
mMin = std::numeric_limits<F64>::max();
|
||||
mMax = std::numeric_limits<F64>::min();
|
||||
mMean = other ? other->mLastValue : 0;
|
||||
mSumOfSquares = 0;
|
||||
mLastSampleTimeStamp = LLTimer::getTotalSeconds();
|
||||
mTotalSamplingTime = 0;
|
||||
mLastValue = other ? other->mLastValue : 0;
|
||||
mHasValue = other ? other->mHasValue : false;
|
||||
}
|
||||
void addSamples(const SampleAccumulator& other, EBufferAppendType append_type);
|
||||
void reset(const SampleAccumulator* other);
|
||||
|
||||
void sync(LLUnitImplicit<F64, LLUnits::Seconds> time_stamp)
|
||||
{
|
||||
LLUnitImplicit<F64, LLUnits::Seconds> delta_time = time_stamp - mLastSampleTimeStamp;
|
||||
|
||||
if (mHasValue)
|
||||
{
|
||||
LLUnitImplicit<F64, LLUnits::Seconds> delta_time = time_stamp - mLastSampleTimeStamp;
|
||||
mSum += mLastValue * delta_time;
|
||||
mTotalSamplingTime += delta_time;
|
||||
F64 old_mean = mMean;
|
||||
mMean += (delta_time / mTotalSamplingTime) * (mLastValue - old_mean);
|
||||
mSumOfSquares += delta_time * (mLastValue - old_mean) * (mLastValue - mMean);
|
||||
}
|
||||
mLastSampleTimeStamp = time_stamp;
|
||||
}
|
||||
|
||||
F64 getSum() const { return mSum; }
|
||||
F64 getMin() const { return mMin; }
|
||||
F64 getMax() const { return mMax; }
|
||||
F64 getLastValue() const { return mLastValue; }
|
||||
F64 getMean() const { return mMean; }
|
||||
F64 getSum() const { return mSum; }
|
||||
F64 getMin() const { return mMin; }
|
||||
F64 getMax() const { return mMax; }
|
||||
F64 getLastValue() const { return mLastValue; }
|
||||
F64 getMean() const { return mMean; }
|
||||
F64 getStandardDeviation() const { return sqrtf(mSumOfSquares / mTotalSamplingTime); }
|
||||
U32 getSampleCount() const { return mNumSamples; }
|
||||
bool hasValue() const { return mHasValue; }
|
||||
U32 getSampleCount() const { return mNumSamples; }
|
||||
bool hasValue() const { return mHasValue; }
|
||||
|
||||
private:
|
||||
F64 mSum,
|
||||
mMin,
|
||||
mMax,
|
||||
mLastValue;
|
||||
F64 mSum,
|
||||
mMin,
|
||||
mMax,
|
||||
mLastValue;
|
||||
|
||||
bool mHasValue;
|
||||
bool mHasValue; // distinct from mNumSamples, since we might have inherited an old sample
|
||||
|
||||
F64 mMean,
|
||||
mSumOfSquares;
|
||||
F64 mMean,
|
||||
mSumOfSquares;
|
||||
|
||||
LLUnitImplicit<F64, LLUnits::Seconds> mLastSampleTimeStamp,
|
||||
mTotalSamplingTime;
|
||||
LLUnitImplicit<F64, LLUnits::Seconds>
|
||||
mLastSampleTimeStamp,
|
||||
mTotalSamplingTime;
|
||||
|
||||
U32 mNumSamples;
|
||||
U32 mNumSamples;
|
||||
};
|
||||
|
||||
class CountAccumulator
|
||||
|
|
@ -489,7 +391,7 @@ namespace LLTrace
|
|||
mSum += value;
|
||||
}
|
||||
|
||||
void addSamples(const CountAccumulator& other, bool /*append*/)
|
||||
void addSamples(const CountAccumulator& other, bool /*follows_in_sequence*/)
|
||||
{
|
||||
mSum += other.mSum;
|
||||
mNumSamples += other.mNumSamples;
|
||||
|
|
@ -534,25 +436,26 @@ namespace LLTrace
|
|||
};
|
||||
|
||||
TimeBlockAccumulator();
|
||||
void addSamples(const self_t& other, bool /*append*/);
|
||||
void addSamples(const self_t& other, EBufferAppendType append_type);
|
||||
void reset(const self_t* other);
|
||||
void sync(LLUnitImplicit<F64, LLUnits::Seconds>) {}
|
||||
|
||||
//
|
||||
// members
|
||||
//
|
||||
U64 mStartTotalTimeCounter,
|
||||
mTotalTimeCounter,
|
||||
mSelfTimeCounter;
|
||||
U32 mCalls;
|
||||
class TimeBlock* mParent; // last acknowledged parent of this time block
|
||||
class TimeBlock* mLastCaller; // used to bootstrap tree construction
|
||||
U16 mActiveCount; // number of timers with this ID active on stack
|
||||
bool mMoveUpTree; // needs to be moved up the tree of timers at the end of frame
|
||||
U64 mStartTotalTimeCounter,
|
||||
mTotalTimeCounter,
|
||||
mSelfTimeCounter;
|
||||
U32 mCalls;
|
||||
class TimeBlock* mParent; // last acknowledged parent of this time block
|
||||
class TimeBlock* mLastCaller; // used to bootstrap tree construction
|
||||
U16 mActiveCount; // number of timers with this ID active on stack
|
||||
bool mMoveUpTree; // needs to be moved up the tree of timers at the end of frame
|
||||
|
||||
};
|
||||
|
||||
class TimeBlock;
|
||||
|
||||
class TimeBlockTreeNode
|
||||
{
|
||||
public:
|
||||
|
|
@ -603,10 +506,10 @@ namespace LLTrace
|
|||
mDeallocatedCount(0)
|
||||
{}
|
||||
|
||||
void addSamples(const MemStatAccumulator& other, bool append)
|
||||
void addSamples(const MemStatAccumulator& other, EBufferAppendType append_type)
|
||||
{
|
||||
mSize.addSamples(other.mSize, append);
|
||||
mChildSize.addSamples(other.mChildSize, append);
|
||||
mSize.addSamples(other.mSize, append_type);
|
||||
mChildSize.addSamples(other.mChildSize, append_type);
|
||||
mAllocatedCount += other.mAllocatedCount;
|
||||
mDeallocatedCount += other.mDeallocatedCount;
|
||||
}
|
||||
|
|
@ -645,11 +548,11 @@ namespace LLTrace
|
|||
void reset(AccumulatorBufferGroup* other = NULL);
|
||||
void sync();
|
||||
|
||||
AccumulatorBuffer<CountAccumulator> mCounts;
|
||||
AccumulatorBuffer<SampleAccumulator> mSamples;
|
||||
AccumulatorBuffer<EventAccumulator> mEvents;
|
||||
AccumulatorBuffer<TimeBlockAccumulator> mStackTimers;
|
||||
AccumulatorBuffer<MemStatAccumulator> mMemStats;
|
||||
AccumulatorBuffer<CountAccumulator> mCounts;
|
||||
AccumulatorBuffer<SampleAccumulator> mSamples;
|
||||
AccumulatorBuffer<EventAccumulator> mEvents;
|
||||
AccumulatorBuffer<TimeBlockAccumulator> mStackTimers;
|
||||
AccumulatorBuffer<MemStatAccumulator> mMemStats;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -287,6 +287,11 @@ U32 Recording::getSampleCount( const TraceType<SampleAccumulator>& stat )
|
|||
return mBuffers->mSamples[stat.getIndex()].getSampleCount();
|
||||
}
|
||||
|
||||
bool Recording::hasValue(const TraceType<EventAccumulator>& stat)
|
||||
{
|
||||
return mBuffers->mEvents[stat.getIndex()].hasValue();
|
||||
}
|
||||
|
||||
F64 Recording::getMin( const TraceType<EventAccumulator>& stat )
|
||||
{
|
||||
return mBuffers->mEvents[stat.getIndex()].getMin();
|
||||
|
|
@ -531,10 +536,12 @@ F64 PeriodicRecording::getPeriodMean( const TraceType<EventAccumulator>& stat, s
|
|||
for (S32 i = 1; i <= num_periods; i++)
|
||||
{
|
||||
S32 index = (mCurPeriod + total_periods - i) % total_periods;
|
||||
if (mRecordingPeriods[index].getDuration() > 0.f)
|
||||
Recording& recording = mRecordingPeriods[index];
|
||||
|
||||
if (recording.hasValue(stat))
|
||||
{
|
||||
S32 period_sample_count = mRecordingPeriods[index].getSampleCount(stat);
|
||||
mean += mRecordingPeriods[index].getMean(stat) * period_sample_count;
|
||||
S32 period_sample_count = recording.getSampleCount(stat);
|
||||
mean += recording.getMean(stat) * period_sample_count;
|
||||
total_sample_count += period_sample_count;
|
||||
}
|
||||
}
|
||||
|
|
@ -555,7 +562,11 @@ F64 PeriodicRecording::getPeriodMin( const TraceType<EventAccumulator>& stat, si
|
|||
for (S32 i = 1; i <= num_periods; i++)
|
||||
{
|
||||
S32 index = (mCurPeriod + total_periods - i) % total_periods;
|
||||
min_val = llmin(min_val, mRecordingPeriods[index].getMin(stat));
|
||||
Recording& recording = mRecordingPeriods[index];
|
||||
if (recording.hasValue(stat))
|
||||
{
|
||||
min_val = llmin(min_val, mRecordingPeriods[index].getMin(stat));
|
||||
}
|
||||
}
|
||||
return min_val;
|
||||
}
|
||||
|
|
@ -569,7 +580,11 @@ F64 PeriodicRecording::getPeriodMax( const TraceType<EventAccumulator>& stat, si
|
|||
for (S32 i = 1; i <= num_periods; i++)
|
||||
{
|
||||
S32 index = (mCurPeriod + total_periods - i) % total_periods;
|
||||
max_val = llmax(max_val, mRecordingPeriods[index].getMax(stat));
|
||||
Recording& recording = mRecordingPeriods[index];
|
||||
if (recording.hasValue(stat))
|
||||
{
|
||||
max_val = llmax(max_val, recording.getMax(stat));
|
||||
}
|
||||
}
|
||||
return max_val;
|
||||
}
|
||||
|
|
@ -583,9 +598,10 @@ F64 PeriodicRecording::getPeriodMin( const TraceType<SampleAccumulator>& stat, s
|
|||
for (S32 i = 1; i <= num_periods; i++)
|
||||
{
|
||||
S32 index = (mCurPeriod + total_periods - i) % total_periods;
|
||||
if (mRecordingPeriods[index].hasValue(stat))
|
||||
Recording& recording = mRecordingPeriods[index];
|
||||
if (recording.hasValue(stat))
|
||||
{
|
||||
min_val = llmin(min_val, mRecordingPeriods[index].getMin(stat));
|
||||
min_val = llmin(min_val, recording.getMin(stat));
|
||||
}
|
||||
}
|
||||
return min_val;
|
||||
|
|
@ -600,9 +616,10 @@ F64 PeriodicRecording::getPeriodMax(const TraceType<SampleAccumulator>& stat, si
|
|||
for (S32 i = 1; i <= num_periods; i++)
|
||||
{
|
||||
S32 index = (mCurPeriod + total_periods - i) % total_periods;
|
||||
if (mRecordingPeriods[index].hasValue(stat))
|
||||
Recording& recording = mRecordingPeriods[index];
|
||||
if (recording.hasValue(stat))
|
||||
{
|
||||
max_val = llmax(max_val, mRecordingPeriods[index].getMax(stat));
|
||||
max_val = llmax(max_val, recording.getMax(stat));
|
||||
}
|
||||
}
|
||||
return max_val;
|
||||
|
|
@ -622,10 +639,11 @@ F64 PeriodicRecording::getPeriodMean( const TraceType<SampleAccumulator>& stat,
|
|||
for (S32 i = 1; i <= num_periods; i++)
|
||||
{
|
||||
S32 index = (mCurPeriod + total_periods - i) % total_periods;
|
||||
if (mRecordingPeriods[index].getDuration() > 0.f && mRecordingPeriods[index].hasValue(stat))
|
||||
Recording& recording = mRecordingPeriods[index];
|
||||
if (recording.hasValue(stat))
|
||||
{
|
||||
LLUnit<F64, LLUnits::Seconds> recording_duration = mRecordingPeriods[index].getDuration();
|
||||
mean += mRecordingPeriods[index].getMean(stat) * recording_duration.value();
|
||||
LLUnit<F64, LLUnits::Seconds> recording_duration = recording.getDuration();
|
||||
mean += recording.getMean(stat) * recording_duration.value();
|
||||
total_duration += recording_duration;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -248,6 +248,8 @@ namespace LLTrace
|
|||
U32 getSampleCount(const TraceType<SampleAccumulator>& stat);
|
||||
|
||||
// EventStatHandle accessors
|
||||
bool hasValue(const TraceType<EventAccumulator>& stat);
|
||||
|
||||
F64 getSum(const TraceType<EventAccumulator>& stat);
|
||||
template <typename T>
|
||||
typename RelatedTypes<T>::sum_t getSum(const EventStatHandle<T>& stat)
|
||||
|
|
|
|||
|
|
@ -83,6 +83,10 @@ const F32 OO_LN2 = 1.4426950408889634073599246810019f;
|
|||
const F32 F_ALMOST_ZERO = 0.0001f;
|
||||
const F32 F_ALMOST_ONE = 1.0f - F_ALMOST_ZERO;
|
||||
|
||||
const F64 NaN = std::numeric_limits<double>::quiet_NaN();
|
||||
const F64 INFINITY = std::numeric_limits<double>::infinity();
|
||||
const F64 MINUS_INFINITY = std::numeric_limits<double>::infinity() * -1.0;
|
||||
|
||||
// BUG: Eliminate in favor of F_APPROXIMATELY_ZERO above?
|
||||
const F32 FP_MAG_THRESHOLD = 0.0000001f;
|
||||
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ public:
|
|||
/*virtual*/ void draw();
|
||||
/*virtual*/ void removeChild(LLView*);
|
||||
/*virtual*/ BOOL postBuild();
|
||||
/*virtual*/ bool addChild(LLView* child, S32 tab_groupdatefractuiona = 0);
|
||||
/*virtual*/ bool addChild(LLView* child, S32 tab_group = 0);
|
||||
/*virtual*/ void reshape(S32 width, S32 height, BOOL called_from_parent = TRUE);
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -94,7 +94,6 @@ extern LLPipeline gPipeline;
|
|||
U32 LLViewerObjectList::sSimulatorMachineIndex = 1; // Not zero deliberately, to speed up index check.
|
||||
std::map<U64, U32> LLViewerObjectList::sIPAndPortToIndex;
|
||||
std::map<U64, LLUUID> LLViewerObjectList::sIndexAndLocalIDToUUID;
|
||||
LLTrace::SampleStatHandle<LLUnit<F32, LLUnits::Percent> > LLViewerObjectList::sCacheHitRate("object_cache_hits");
|
||||
|
||||
LLViewerObjectList::LLViewerObjectList()
|
||||
{
|
||||
|
|
@ -308,6 +307,8 @@ LLViewerObject* LLViewerObjectList::processObjectUpdateFromCache(LLVOCacheEntry*
|
|||
LLViewerStatsRecorder& recorder = LLViewerStatsRecorder::instance();
|
||||
|
||||
// Cache Hit.
|
||||
record(LLStatViewer::OBJECT_CACHE_HIT_RATE, LLUnits::Ratio::fromValue(1));
|
||||
|
||||
cached_dpp->reset();
|
||||
cached_dpp->unpackUUID(fullid, "ID");
|
||||
cached_dpp->unpackU32(local_id, "LocalID");
|
||||
|
|
@ -355,7 +356,6 @@ LLViewerObject* LLViewerObjectList::processObjectUpdateFromCache(LLVOCacheEntry*
|
|||
}
|
||||
justCreated = true;
|
||||
mNumNewObjects++;
|
||||
sample(sCacheHitRate, LLUnits::Ratio::fromValue(1));
|
||||
}
|
||||
|
||||
if (objectp->isDead())
|
||||
|
|
@ -697,7 +697,6 @@ void LLViewerObjectList::processCachedObjectUpdate(LLMessageSystem *mesgsys,
|
|||
|
||||
continue; // no data packer, skip this object
|
||||
}
|
||||
//sample(sCacheHitRate, LLUnits::Ratio::fromValue(0));
|
||||
}
|
||||
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -196,8 +196,6 @@ protected:
|
|||
std::vector<OrphanInfo> mOrphanChildren; // UUID's of orphaned objects
|
||||
S32 mNumOrphans;
|
||||
|
||||
static LLTrace::SampleStatHandle<LLUnit<F32, LLUnits::Percent> > sCacheHitRate;
|
||||
|
||||
typedef std::vector<LLPointer<LLViewerObject> > vobj_list_t;
|
||||
|
||||
vobj_list_t mObjects;
|
||||
|
|
|
|||
|
|
@ -1985,6 +1985,7 @@ LLViewerRegion::eCacheUpdateResult LLViewerRegion::cacheFullUpdate(LLDataPackerB
|
|||
// Create new entry and add to map
|
||||
result = CACHE_UPDATE_ADDED;
|
||||
entry = new LLVOCacheEntry(local_id, crc, dp);
|
||||
record(LLStatViewer::OBJECT_CACHE_HIT_RATE, LLUnits::Ratio::fromValue(0));
|
||||
|
||||
mImpl->mCacheMap[local_id] = entry;
|
||||
|
||||
|
|
|
|||
|
|
@ -200,6 +200,9 @@ LLTrace::EventStatHandle<LLUnit<F64, LLUnits::Seconds> > AVATAR_EDIT_TIME("avata
|
|||
FPS_10_TIME("fps10time", "Seconds below 10 FPS"),
|
||||
FPS_8_TIME("fps8time", "Seconds below 8 FPS"),
|
||||
FPS_2_TIME("fps2time", "Seconds below 2 FPS");
|
||||
|
||||
LLTrace::EventStatHandle<LLUnit<F32, LLUnits::Percent> > OBJECT_CACHE_HIT_RATE("object_cache_hits");
|
||||
|
||||
}
|
||||
|
||||
LLViewerStats::LLViewerStats()
|
||||
|
|
|
|||
|
|
@ -240,6 +240,8 @@ extern LLTrace::EventStatHandle<LLUnit<F64, LLUnits::Seconds> > AVATAR_EDIT_TIME
|
|||
FPS_8_TIME,
|
||||
FPS_2_TIME;
|
||||
|
||||
extern LLTrace::EventStatHandle<LLUnit<F32, LLUnits::Percent> > OBJECT_CACHE_HIT_RATE;
|
||||
|
||||
}
|
||||
|
||||
class LLViewerStats : public LLSingleton<LLViewerStats>
|
||||
|
|
|
|||
Loading…
Reference in New Issue