diff --git a/.gitignore b/.gitignore
index 355c39de70..839d14bc0d 100755
--- a/.gitignore
+++ b/.gitignore
@@ -24,6 +24,7 @@ build-vc120-32/
build-vc120-64/
build-vc150-32/
build-vc150-64/
+build-vc160-64/
indra/CMakeFiles
indra/build-vc[0-9]*
indra/lib/mono/1.0/*.dll
diff --git a/autobuild.xml b/autobuild.xml
index 0ab120f50e..9185b8af22 100644
--- a/autobuild.xml
+++ b/autobuild.xml
@@ -1088,9 +1088,9 @@
archive
name
common
@@ -1099,100 +1099,6 @@
version
0.0.0
- glod
-
googlemock
copyright
@@ -2408,6 +2314,62 @@
version
7.11.1.297294
+ meshoptimizer
+
+ canonical_repo
+ https://bitbucket.org/lindenlab/3p-meshoptimizer
+ copyright
+ Copyright (c) 2016-2021 Arseny Kapoulkine
+ description
+ Meshoptimizer. Mesh optimization library.
+ license
+ meshoptimizer
+ license_file
+ LICENSES/meshoptimizer.txt
+ name
+ meshoptimizer
+ platforms
+
+ darwin64
+
+ archive
+
+ hash
+ 30bc37db57bbd87c4b5f62634964242a
+ url
+ https://automated-builds-secondlife-com.s3.amazonaws.com/ct2/84218/784918/meshoptimizer-0.16.561408-darwin64-561408.tar.bz2
+
+ name
+ darwin64
+
+ windows
+
+ archive
+
+ hash
+ ca3684bcf0447746cd2844e94f6d1fc7
+ url
+ https://automated-builds-secondlife-com.s3.amazonaws.com/ct2/84219/784924/meshoptimizer-0.16.561408-windows-561408.tar.bz2
+
+ name
+ windows
+
+ windows64
+
+ archive
+
+ hash
+ aef28c089d20f69d13c9c3e113fb3895
+ url
+ https://automated-builds-secondlife-com.s3.amazonaws.com/ct2/84220/784931/meshoptimizer-0.16.561408-windows64-561408.tar.bz2
+
+ name
+ windows64
+
+
+ version
+ 0.16.561408
+
nghttp2
copyright
@@ -3133,6 +3095,70 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors
version
0.132.2
+ tracy
+
+ canonical_repo
+ https://bitbucket.org/lindenlab/3p-tracy
+ copyright
+ Copyright (c) 2017-2021, Bartosz Taudul (wolf@nereid.pl)
+ description
+ Tracy Profiler Library
+ license
+ bsd
+ license_file
+ LICENSES/tracy_license.txt
+ name
+ tracy
+ platforms
+
+ darwin64
+
+ archive
+
+ hash
+ da7317e4a81609f624f84780f28b07de
+ url
+ https://automated-builds-secondlife-com.s3.amazonaws.com/ct2/86972/801630/tracy-v0.7.8.563351-darwin64-563351.tar.bz2
+
+ name
+ darwin64
+
+ windows
+
+ archive
+
+ hash
+ 47c696cd2966c5cc3c8ba6115dd1f886
+ hash_algorithm
+ md5
+ url
+ https://automated-builds-secondlife-com.s3.amazonaws.com/ct2/86973/801641/tracy-v0.7.8.563351-windows-563351.tar.bz2
+
+ name
+ windows
+
+ windows64
+
+ archive
+
+ hash
+ b649ee6591e67d2341e886b3fc3484a7
+ hash_algorithm
+ md5
+ url
+ https://automated-builds-secondlife-com.s3.amazonaws.com/ct2/86974/801642/tracy-v0.7.8.563351-windows64-563351.tar.bz2
+
+ name
+ windows64
+
+
+ source
+ https://bitbucket.org/lindenlab/3p-tracy
+ source_type
+ git
+ version
+ v0.7.8.563351
+
tut
copyright
diff --git a/build.sh b/build.sh
index 1b6dd17a4a..89609a9ffd 100755
--- a/build.sh
+++ b/build.sh
@@ -155,7 +155,11 @@ pre_build()
fi
set -x
- "$autobuild" configure --quiet -c $variant -- \
+ # honor autobuild_configure_parameters same as sling-buildscripts
+ eval_autobuild_configure_parameters=$(eval $(echo echo $autobuild_configure_parameters))
+
+ "$autobuild" configure --quiet -c $variant \
+ ${eval_autobuild_configure_parameters:---} \
-DPACKAGE:BOOL=ON \
-DHAVOK:BOOL="$HAVOK" \
-DRELEASE_CRASH_REPORTING:BOOL="$RELEASE_CRASH_REPORTING" \
@@ -205,7 +209,11 @@ build()
if $build_viewer
then
begin_section "autobuild $variant"
- "$autobuild" build --no-configure -c $variant || fatal "failed building $variant"
+ # honor autobuild_build_parameters same as sling-buildscripts
+ eval_autobuild_build_parameters=$(eval $(echo echo $autobuild_build_parameters))
+ "$autobuild" build --no-configure -c $variant \
+ $eval_autobuild_build_parameters \
+ || fatal "failed building $variant"
echo true >"$build_dir"/build_ok
end_section "autobuild $variant"
diff --git a/doc/contributions.txt b/doc/contributions.txt
index 669a0cd671..0acf92b189 100755
--- a/doc/contributions.txt
+++ b/doc/contributions.txt
@@ -278,6 +278,7 @@ Beq Janus
SL-14766
SL-14927
SL-11300
+ SL-15709
SL-16021
Beth Walcher
Bezilon Kasei
@@ -1370,7 +1371,7 @@ Sovereign Engineer
MAINT-7343
SL-11079
OPEN-343
- SL-11625
+ SL-11625
BUG-229030
SL-14705
SL-14706
@@ -1378,6 +1379,7 @@ Sovereign Engineer
SL-14731
SL-14732
SL-15096
+ SL-16127
SpacedOut Frye
VWR-34
VWR-45
diff --git a/indra/CMakeLists.txt b/indra/CMakeLists.txt
index dc1c28ce5b..f93a09d232 100644
--- a/indra/CMakeLists.txt
+++ b/indra/CMakeLists.txt
@@ -38,6 +38,7 @@ add_subdirectory(${LIBS_OPEN_PREFIX}llkdu)
add_subdirectory(${LIBS_OPEN_PREFIX}llimagej2coj)
add_subdirectory(${LIBS_OPEN_PREFIX}llinventory)
add_subdirectory(${LIBS_OPEN_PREFIX}llmath)
+add_subdirectory(${LIBS_OPEN_PREFIX}llmeshoptimizer)
add_subdirectory(${LIBS_OPEN_PREFIX}llmessage)
add_subdirectory(${LIBS_OPEN_PREFIX}llprimitive)
add_subdirectory(${LIBS_OPEN_PREFIX}llrender)
diff --git a/indra/cmake/CMakeLists.txt b/indra/cmake/CMakeLists.txt
index d7725670a2..50cd42ff57 100644
--- a/indra/cmake/CMakeLists.txt
+++ b/indra/cmake/CMakeLists.txt
@@ -40,7 +40,6 @@ set(cmake_SOURCE_FILES
FreeType.cmake
GLEXT.cmake
GLH.cmake
- GLOD.cmake
## GStreamer010Plugin.cmake
GoogleMock.cmake
Havok.cmake
@@ -59,6 +58,7 @@ set(cmake_SOURCE_FILES
LLKDU.cmake
LLLogin.cmake
LLMath.cmake
+ LLMeshOptimizer.cmake
LLMessage.cmake
LLPhysicsExtensions.cmake
LLPlugin.cmake
@@ -72,6 +72,7 @@ set(cmake_SOURCE_FILES
LLXML.cmake
Linking.cmake
MediaPluginBase.cmake
+ MESHOPTIMIZER.cmake
NDOF.cmake
OPENAL.cmake
OpenGL.cmake
diff --git a/indra/cmake/Copy3rdPartyLibs.cmake b/indra/cmake/Copy3rdPartyLibs.cmake
index b20d23cead..ff705101de 100644
--- a/indra/cmake/Copy3rdPartyLibs.cmake
+++ b/indra/cmake/Copy3rdPartyLibs.cmake
@@ -57,7 +57,6 @@ if(WINDOWS)
libaprutil-1.dll
libapriconv-1.dll
nghttp2.dll
- glod.dll
libhunspell.dll
uriparser.dll
)
@@ -104,6 +103,8 @@ if(WINDOWS)
set(MSVC_VER 120)
elseif (MSVC_VERSION GREATER_EQUAL 1910 AND MSVC_VERSION LESS 1920) # Visual Studio 2017
set(MSVC_VER 140)
+ elseif (MSVC_VERSION GREATER_EQUAL 1920 AND MSVC_VERSION LESS 1930) # Visual Studio 2019
+ set(MSVC_VER 140)
else (MSVC80)
MESSAGE(WARNING "New MSVC_VERSION ${MSVC_VERSION} of MSVC: adapt Copy3rdPartyLibs.cmake")
endif (MSVC80)
@@ -128,6 +129,7 @@ if(WINDOWS)
msvcp${MSVC_VER}.dll
msvcr${MSVC_VER}.dll
vcruntime${MSVC_VER}.dll
+ vcruntime${MSVC_VER}_1.dll
)
if(EXISTS "${registry_path}/${release_msvc_file}")
to_staging_dirs(
@@ -166,7 +168,6 @@ elseif(DARWIN)
libaprutil-1.0.dylib
libaprutil-1.dylib
${EXPAT_COPY}
- libGLOD.dylib
libhunspell-1.3.0.dylib
libndofdev.dylib
libnghttp2.dylib
@@ -215,7 +216,6 @@ elseif(LINUX)
${EXPAT_COPY}
libfreetype.so.6.6.2
libfreetype.so.6
- libGLOD.so
libgmodule-2.0.so
libgobject-2.0.so
libhunspell-1.3.so.0.0.0
diff --git a/indra/cmake/GLOD.cmake b/indra/cmake/GLOD.cmake
deleted file mode 100644
index 6f42b44ab8..0000000000
--- a/indra/cmake/GLOD.cmake
+++ /dev/null
@@ -1,11 +0,0 @@
-# -*- cmake -*-
-include(Prebuilt)
-
-if (NOT USESYSTEMLIBS)
- use_prebuilt_binary(glod)
-endif (NOT USESYSTEMLIBS)
-
-set(GLODLIB ON CACHE BOOL "Using GLOD library")
-
-set(GLOD_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include)
-set(GLOD_LIBRARIES GLOD)
diff --git a/indra/cmake/LLCommon.cmake b/indra/cmake/LLCommon.cmake
index 8900419f9b..34499aaa36 100644
--- a/indra/cmake/LLCommon.cmake
+++ b/indra/cmake/LLCommon.cmake
@@ -3,12 +3,14 @@
include(APR)
include(Boost)
include(EXPAT)
+include(Tracy)
include(ZLIB)
set(LLCOMMON_INCLUDE_DIRS
${LIBS_OPEN_DIR}/llcommon
${APRUTIL_INCLUDE_DIR}
${APR_INCLUDE_DIR}
+ ${TRACY_INCLUDE_DIR}
)
set(LLCOMMON_SYSTEM_INCLUDE_DIRS
${Boost_INCLUDE_DIRS}
@@ -30,7 +32,8 @@ else (LINUX)
${BOOST_FIBER_LIBRARY}
${BOOST_CONTEXT_LIBRARY}
${BOOST_THREAD_LIBRARY}
- ${BOOST_SYSTEM_LIBRARY} )
+ ${BOOST_SYSTEM_LIBRARY}
+ )
endif (LINUX)
set(LLCOMMON_LINK_SHARED OFF CACHE BOOL "Build the llcommon target as a static library.")
diff --git a/indra/cmake/LLMeshOptimizer.cmake b/indra/cmake/LLMeshOptimizer.cmake
new file mode 100644
index 0000000000..b79944f618
--- /dev/null
+++ b/indra/cmake/LLMeshOptimizer.cmake
@@ -0,0 +1,7 @@
+# -*- cmake -*-
+
+set(LLMESHOPTIMIZER_INCLUDE_DIRS
+ ${LIBS_OPEN_DIR}/llmeshoptimizer
+ )
+
+set(LLMESHOPTIMIZER_LIBRARIES llmeshoptimizer)
diff --git a/indra/cmake/MESHOPTIMIZER.cmake b/indra/cmake/MESHOPTIMIZER.cmake
new file mode 100644
index 0000000000..1c5b47b9bd
--- /dev/null
+++ b/indra/cmake/MESHOPTIMIZER.cmake
@@ -0,0 +1,16 @@
+# -*- cmake -*-
+
+include(Linking)
+include(Prebuilt)
+
+use_prebuilt_binary(meshoptimizer)
+
+if (WINDOWS)
+ set(MESHOPTIMIZER_LIBRARIES meshoptimizer.lib)
+elseif (LINUX)
+ set(MESHOPTIMIZER_LIBRARIES meshoptimizer.o)
+elseif (DARWIN)
+ set(MESHOPTIMIZER_LIBRARIES libmeshoptimizer.a)
+endif (WINDOWS)
+
+set(MESHOPTIMIZER_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include/meshoptimizer)
diff --git a/indra/cmake/Tracy.cmake b/indra/cmake/Tracy.cmake
new file mode 100644
index 0000000000..cfff956bcf
--- /dev/null
+++ b/indra/cmake/Tracy.cmake
@@ -0,0 +1,29 @@
+# -*- cmake -*-
+include(Prebuilt)
+
+set(USE_TRACY OFF CACHE BOOL "Use Tracy profiler.")
+
+if (USE_TRACY)
+ set(TRACY_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include/tracy)
+
+# See: indra/llcommon/llprofiler.h
+ add_definitions(-DLL_PROFILER_CONFIGURATION=3)
+ use_prebuilt_binary(tracy)
+
+ if (WINDOWS)
+ MESSAGE(STATUS "Including Tracy for Windows: '${TRACY_INCLUDE_DIR}'")
+ endif (WINDOWS)
+
+ if (DARWIN)
+ MESSAGE(STATUS "Including Tracy for Darwin: '${TRACY_INCLUDE_DIR}'")
+ endif (DARWIN)
+
+ if (LINUX)
+ MESSAGE(STATUS "Including Tracy for Linux: '${TRACY_INCLUDE_DIR}'")
+ endif (LINUX)
+else (USE_TRACY)
+ # Tracy.cmake should not set LLCOMMON_INCLUDE_DIRS, let LLCommon.cmake do that
+ set(TRACY_INCLUDE_DIR "")
+ set(TRACY_LIBRARY "")
+endif (USE_TRACY)
+
diff --git a/indra/edit-me-to-trigger-new-build.txt b/indra/edit-me-to-trigger-new-build.txt
index ade83202cf..eab7c17b71 100644
--- a/indra/edit-me-to-trigger-new-build.txt
+++ b/indra/edit-me-to-trigger-new-build.txt
@@ -1,3 +1,4 @@
euclid 5/29/2020
euclid 7/23/2020
euclid 4/29/2021
+euclid 10/5/2021 DRTVWR-546
diff --git a/indra/lib/python/indra/util/llmanifest.py b/indra/lib/python/indra/util/llmanifest.py
index 30b7228289..aedd3b7ee4 100755
--- a/indra/lib/python/indra/util/llmanifest.py
+++ b/indra/lib/python/indra/util/llmanifest.py
@@ -881,6 +881,49 @@ class LLManifest(object, metaclass=LLManifestRegistry):
# particular, let caller notice 0.
return count
+ def path_optional(self, src, dst=None):
+ sys.stdout.flush()
+ if src == None:
+ raise ManifestError("No source file, dst is " + dst)
+ if dst == None:
+ dst = src
+ dst = os.path.join(self.get_dst_prefix(), dst)
+ sys.stdout.write("Processing %s => %s ... " % (src, self._relative_dst_path(dst)))
+
+ def try_path(src):
+ # expand globs
+ count = 0
+ if self.wildcard_pattern.search(src):
+ for s,d in self.expand_globs(src, dst):
+ assert(s != d)
+ count += self.process_file(s, d)
+ else:
+ # if we're specifying a single path (not a glob),
+ # we should error out if it doesn't exist
+ self.check_file_exists(src)
+ count += self.process_either(src, dst)
+ return count
+
+ try_prefixes = [self.get_src_prefix(), self.get_artwork_prefix(), self.get_build_prefix()]
+ for pfx in try_prefixes:
+ try:
+ count = try_path(os.path.join(pfx, src))
+ except MissingError:
+ # if we produce MissingError, just try the next prefix
+ continue
+ # If we actually found nonzero files, stop looking
+ if count:
+ break
+ else:
+ sys.stdout.write("Skipping %s\n" % (src))
+ return 0
+
+ print("%d files" % count)
+
+ # Let caller check whether we processed as many files as expected. In
+ # particular, let caller notice 0.
+ return count
+
def do(self, *actions):
self.actions = actions
self.construct()
diff --git a/indra/llappearance/llavatarappearance.cpp b/indra/llappearance/llavatarappearance.cpp
index 90dfa04f28..2d6d2a10d2 100644
--- a/indra/llappearance/llavatarappearance.cpp
+++ b/indra/llappearance/llavatarappearance.cpp
@@ -1590,7 +1590,7 @@ BOOL LLAvatarAppearance::allocateCollisionVolumes( U32 num )
delete_and_clear_array(mCollisionVolumes);
mNumCollisionVolumes = 0;
- mCollisionVolumes = new(std::nothrow) LLAvatarJointCollisionVolume[num];
+ mCollisionVolumes = new LLAvatarJointCollisionVolume[num];
if (!mCollisionVolumes)
{
LL_WARNS() << "Failed to allocate collision volumes" << LL_ENDL;
diff --git a/indra/llappearance/lldriverparam.h b/indra/llappearance/lldriverparam.h
index f278dcc2e2..a6261b507b 100644
--- a/indra/llappearance/lldriverparam.h
+++ b/indra/llappearance/lldriverparam.h
@@ -77,73 +77,63 @@ protected:
//-----------------------------------------------------------------------------
-LL_ALIGN_PREFIX(16)
-class LLDriverParam : public LLViewerVisualParam
+class alignas(16) LLDriverParam : public LLViewerVisualParam
{
+ LL_ALIGN_NEW
private:
- // Hide the default constructor. Force construction with LLAvatarAppearance.
- LLDriverParam() {}
+ // Hide the default constructor. Force construction with LLAvatarAppearance.
+ LLDriverParam() {}
public:
- LLDriverParam(LLAvatarAppearance *appearance, LLWearable* wearable = NULL);
- ~LLDriverParam();
+ LLDriverParam(LLAvatarAppearance* appearance, LLWearable* wearable = NULL);
+ ~LLDriverParam();
- void* operator new(size_t size)
- {
- return ll_aligned_malloc_16(size);
- }
+ // Special: These functions are overridden by child classes
+ LLDriverParamInfo* getInfo() const { return (LLDriverParamInfo*)mInfo; }
+ // This sets mInfo and calls initialization functions
+ BOOL setInfo(LLDriverParamInfo* info);
- void operator delete(void* ptr)
- {
- ll_aligned_free_16(ptr);
- }
+ LLAvatarAppearance* getAvatarAppearance() { return mAvatarAppearance; }
+ const LLAvatarAppearance* getAvatarAppearance() const { return mAvatarAppearance; }
- // Special: These functions are overridden by child classes
- LLDriverParamInfo* getInfo() const { return (LLDriverParamInfo*)mInfo; }
- // This sets mInfo and calls initialization functions
- BOOL setInfo(LLDriverParamInfo *info);
+ void updateCrossDrivenParams(LLWearableType::EType driven_type);
- LLAvatarAppearance* getAvatarAppearance() { return mAvatarAppearance; }
- const LLAvatarAppearance* getAvatarAppearance() const { return mAvatarAppearance; }
+ /*virtual*/ LLViewerVisualParam* cloneParam(LLWearable* wearable) const;
- void updateCrossDrivenParams(LLWearableType::EType driven_type);
+ // LLVisualParam Virtual functions
+ /*virtual*/ void apply(ESex sex) {} // apply is called separately for each driven param.
+ /*virtual*/ void setWeight(F32 weight);
+ /*virtual*/ void setAnimationTarget(F32 target_value);
+ /*virtual*/ void stopAnimating();
+ /*virtual*/ BOOL linkDrivenParams(visual_param_mapper mapper, BOOL only_cross_params);
+ /*virtual*/ void resetDrivenParams();
- /*virtual*/ LLViewerVisualParam* cloneParam(LLWearable* wearable) const;
+ // LLViewerVisualParam Virtual functions
+ /*virtual*/ F32 getTotalDistortion();
+ /*virtual*/ const LLVector4a& getAvgDistortion();
+ /*virtual*/ F32 getMaxDistortion();
+ /*virtual*/ LLVector4a getVertexDistortion(S32 index, LLPolyMesh* poly_mesh);
+ /*virtual*/ const LLVector4a* getFirstDistortion(U32* index, LLPolyMesh** poly_mesh);
+ /*virtual*/ const LLVector4a* getNextDistortion(U32* index, LLPolyMesh** poly_mesh);
- // LLVisualParam Virtual functions
- /*virtual*/ void apply( ESex sex ) {} // apply is called separately for each driven param.
- /*virtual*/ void setWeight(F32 weight);
- /*virtual*/ void setAnimationTarget( F32 target_value);
- /*virtual*/ void stopAnimating();
- /*virtual*/ BOOL linkDrivenParams(visual_param_mapper mapper, BOOL only_cross_params);
- /*virtual*/ void resetDrivenParams();
-
- // LLViewerVisualParam Virtual functions
- /*virtual*/ F32 getTotalDistortion();
- /*virtual*/ const LLVector4a& getAvgDistortion();
- /*virtual*/ F32 getMaxDistortion();
- /*virtual*/ LLVector4a getVertexDistortion(S32 index, LLPolyMesh *poly_mesh);
- /*virtual*/ const LLVector4a* getFirstDistortion(U32 *index, LLPolyMesh **poly_mesh);
- /*virtual*/ const LLVector4a* getNextDistortion(U32 *index, LLPolyMesh **poly_mesh);
+ S32 getDrivenParamsCount() const;
+ const LLViewerVisualParam* getDrivenParam(S32 index) const;
- S32 getDrivenParamsCount() const;
- const LLViewerVisualParam* getDrivenParam(S32 index) const;
-
- typedef std::vector entry_list_t;
- entry_list_t& getDrivenList() { return mDriven; }
+ typedef std::vector entry_list_t;
+ entry_list_t& getDrivenList() { return mDriven; }
void setDrivenList(entry_list_t& driven_list) { mDriven = driven_list; }
protected:
- LLDriverParam(const LLDriverParam& pOther);
- F32 getDrivenWeight(const LLDrivenEntry* driven, F32 input_weight);
- void setDrivenWeight(LLDrivenEntry *driven, F32 driven_weight);
+ LLDriverParam(const LLDriverParam& pOther);
+ F32 getDrivenWeight(const LLDrivenEntry* driven, F32 input_weight);
+ void setDrivenWeight(LLDrivenEntry* driven, F32 driven_weight);
- LL_ALIGN_16(LLVector4a mDefaultVec); // temp holder
- entry_list_t mDriven;
- LLViewerVisualParam* mCurrentDistortionParam;
- // Backlink only; don't make this an LLPointer.
- LLAvatarAppearance* mAvatarAppearance;
- LLWearable* mWearablep;
-} LL_ALIGN_POSTFIX(16);
+ LL_ALIGN_16(LLVector4a mDefaultVec); // temp holder
+ entry_list_t mDriven;
+ LLViewerVisualParam* mCurrentDistortionParam;
+ // Backlink only; don't make this an LLPointer.
+ LLAvatarAppearance* mAvatarAppearance;
+ LLWearable* mWearablep;
+};
#endif // LL_LLDRIVERPARAM_H
diff --git a/indra/llappearance/llpolymorph.cpp b/indra/llappearance/llpolymorph.cpp
index ce7010984a..16b5f1e204 100644
--- a/indra/llappearance/llpolymorph.cpp
+++ b/indra/llappearance/llpolymorph.cpp
@@ -539,8 +539,6 @@ F32 LLPolyMorphTarget::getMaxDistortion()
//-----------------------------------------------------------------------------
// apply()
//-----------------------------------------------------------------------------
-static LLTrace::BlockTimerStatHandle FTM_APPLY_MORPH_TARGET("Apply Morph");
-
void LLPolyMorphTarget::apply( ESex avatar_sex )
{
if (!mMorphData || mNumMorphMasksPending > 0)
@@ -548,7 +546,7 @@ void LLPolyMorphTarget::apply( ESex avatar_sex )
return;
}
- LL_RECORD_BLOCK_TIME(FTM_APPLY_MORPH_TARGET);
+ LL_PROFILE_ZONE_SCOPED;
mLastSex = avatar_sex;
diff --git a/indra/llappearance/llpolymorph.h b/indra/llappearance/llpolymorph.h
index c6133cd831..29cd373636 100644
--- a/indra/llappearance/llpolymorph.h
+++ b/indra/llappearance/llpolymorph.h
@@ -41,24 +41,14 @@ class LLWearable;
//-----------------------------------------------------------------------------
// LLPolyMorphData()
//-----------------------------------------------------------------------------
-LL_ALIGN_PREFIX(16)
-class LLPolyMorphData
+class alignas(16) LLPolyMorphData
{
+ LL_ALIGN_NEW
public:
LLPolyMorphData(const std::string& morph_name);
~LLPolyMorphData();
LLPolyMorphData(const LLPolyMorphData &rhs);
- void* operator new(size_t size)
- {
- return ll_aligned_malloc_16(size);
- }
-
- void operator delete(void* ptr)
- {
- ll_aligned_free_16(ptr);
- }
-
BOOL loadBinary(LLFILE* fp, LLPolyMeshSharedData *mesh);
const std::string& getName() { return mName; }
@@ -76,7 +66,7 @@ public:
F32 mTotalDistortion; // vertex distortion summed over entire morph
F32 mMaxDistortion; // maximum single vertex distortion in a given morph
- LL_ALIGN_16(LLVector4a mAvgDistortion); // average vertex distortion, to infer directionality of the morph
+ LLVector4a mAvgDistortion; // average vertex distortion, to infer directionality of the morph
LLPolyMeshSharedData* mMesh;
private:
@@ -154,8 +144,9 @@ protected:
// These morph targets must be topologically consistent with a given Polymesh
// (share face sets)
//-----------------------------------------------------------------------------
-class LLPolyMorphTarget : public LLViewerVisualParam
+class alignas(16) LLPolyMorphTarget : public LLViewerVisualParam
{
+ LL_ALIGN_NEW
public:
LLPolyMorphTarget(LLPolyMesh *poly_mesh);
~LLPolyMorphTarget();
@@ -184,16 +175,6 @@ public:
void applyVolumeChanges(F32 delta_weight); // SL-315 - for resetSkeleton()
- void* operator new(size_t size)
- {
- return ll_aligned_malloc_16(size);
- }
-
- void operator delete(void* ptr)
- {
- ll_aligned_free_16(ptr);
- }
-
protected:
LLPolyMorphTarget(const LLPolyMorphTarget& pOther);
diff --git a/indra/llappearance/llpolyskeletaldistortion.cpp b/indra/llappearance/llpolyskeletaldistortion.cpp
index ae38c25dbf..360f17508f 100644
--- a/indra/llappearance/llpolyskeletaldistortion.cpp
+++ b/indra/llappearance/llpolyskeletaldistortion.cpp
@@ -188,11 +188,9 @@ BOOL LLPolySkeletalDistortion::setInfo(LLPolySkeletalDistortionInfo *info)
//-----------------------------------------------------------------------------
// apply()
//-----------------------------------------------------------------------------
-static LLTrace::BlockTimerStatHandle FTM_POLYSKELETAL_DISTORTION_APPLY("Skeletal Distortion");
-
void LLPolySkeletalDistortion::apply( ESex avatar_sex )
{
- LL_RECORD_BLOCK_TIME(FTM_POLYSKELETAL_DISTORTION_APPLY);
+ LL_PROFILE_ZONE_SCOPED;
F32 effective_weight = ( getSex() & avatar_sex ) ? mCurWeight : getDefaultWeight();
diff --git a/indra/llappearance/llpolyskeletaldistortion.h b/indra/llappearance/llpolyskeletaldistortion.h
index ab1a132d19..585d85f055 100644
--- a/indra/llappearance/llpolyskeletaldistortion.h
+++ b/indra/llappearance/llpolyskeletaldistortion.h
@@ -62,9 +62,9 @@ struct LLPolySkeletalBoneInfo
BOOL mHasPositionDeformation;
};
-LL_ALIGN_PREFIX(16)
-class LLPolySkeletalDistortionInfo : public LLViewerVisualParamInfo
+class alignas(16) LLPolySkeletalDistortionInfo : public LLViewerVisualParamInfo
{
+ LL_ALIGN_NEW
friend class LLPolySkeletalDistortion;
public:
@@ -73,19 +73,6 @@ public:
/*virtual*/ BOOL parseXml(LLXmlTreeNode* node);
-
-
- void* operator new(size_t size)
- {
- return ll_aligned_malloc_16(size);
- }
-
- void operator delete(void* ptr)
- {
- ll_aligned_free_16(ptr);
- }
-
-
protected:
typedef std::vector bone_info_list_t;
bone_info_list_t mBoneInfoList;
@@ -95,19 +82,10 @@ protected:
// LLPolySkeletalDeformation
// A set of joint scale data for deforming the avatar mesh
//-----------------------------------------------------------------------------
-class LLPolySkeletalDistortion : public LLViewerVisualParam
+class alignas(16) LLPolySkeletalDistortion : public LLViewerVisualParam
{
+ LL_ALIGN_NEW
public:
- void* operator new(size_t size)
- {
- return ll_aligned_malloc_16(size);
- }
-
- void operator delete(void* ptr)
- {
- ll_aligned_free_16(ptr);
- }
-
LLPolySkeletalDistortion(LLAvatarAppearance *avatarp);
~LLPolySkeletalDistortion();
diff --git a/indra/llappearance/lltexlayer.cpp b/indra/llappearance/lltexlayer.cpp
index a4600069ce..234f5c3007 100644
--- a/indra/llappearance/lltexlayer.cpp
+++ b/indra/llappearance/lltexlayer.cpp
@@ -142,17 +142,8 @@ BOOL LLTexLayerSetBuffer::renderTexLayerSet(LLRenderTarget* bound_target)
BOOL success = TRUE;
- bool use_shaders = LLGLSLShader::sNoFixedFunction;
-
- if (use_shaders)
- {
- gAlphaMaskProgram.bind();
- gAlphaMaskProgram.setMinimumAlpha(0.004f);
- }
- else
- {
- gGL.setAlphaRejectSettings(LLRender::CF_GREATER, 0.00f);
- }
+ gAlphaMaskProgram.bind();
+ gAlphaMaskProgram.setMinimumAlpha(0.004f);
LLVertexBuffer::unbind();
@@ -164,10 +155,7 @@ BOOL LLTexLayerSetBuffer::renderTexLayerSet(LLRenderTarget* bound_target)
midRenderTexLayerSet(success);
- if (use_shaders)
- {
- gAlphaMaskProgram.unbind();
- }
+ gAlphaMaskProgram.unbind();
LLVertexBuffer::unbind();
@@ -390,8 +378,6 @@ BOOL LLTexLayerSet::render( S32 x, S32 y, S32 width, S32 height, LLRenderTarget*
}
}
- bool use_shaders = LLGLSLShader::sNoFixedFunction;
-
LLGLSUIDefault gls_ui;
LLGLDepthTest gls_depth(GL_FALSE, GL_FALSE);
gGL.setColorMask(true, true);
@@ -400,20 +386,14 @@ BOOL LLTexLayerSet::render( S32 x, S32 y, S32 width, S32 height, LLRenderTarget*
{
gGL.flush();
LLGLDisable no_alpha(GL_ALPHA_TEST);
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.0f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.0f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4f( 0.f, 0.f, 0.f, 1.f );
gl_rect_2d_simple( width, height );
gGL.flush();
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.004f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.004f);
}
if (mIsVisible)
@@ -440,10 +420,7 @@ BOOL LLTexLayerSet::render( S32 x, S32 y, S32 width, S32 height, LLRenderTarget*
gGL.setSceneBlendType(LLRender::BT_REPLACE);
LLGLDisable no_alpha(GL_ALPHA_TEST);
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4f( 0.f, 0.f, 0.f, 0.f );
@@ -452,10 +429,7 @@ BOOL LLTexLayerSet::render( S32 x, S32 y, S32 width, S32 height, LLRenderTarget*
gGL.setSceneBlendType(LLRender::BT_ALPHA);
gGL.flush();
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.004f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.004f);
}
return success;
@@ -520,10 +494,9 @@ const LLTexLayerSetBuffer* LLTexLayerSet::getComposite() const
return mComposite;
}
-static LLTrace::BlockTimerStatHandle FTM_GATHER_MORPH_MASK_ALPHA("gatherMorphMaskAlpha");
void LLTexLayerSet::gatherMorphMaskAlpha(U8 *data, S32 origin_x, S32 origin_y, S32 width, S32 height, LLRenderTarget* bound_target)
{
- LL_RECORD_BLOCK_TIME(FTM_GATHER_MORPH_MASK_ALPHA);
+ LL_PROFILE_ZONE_SCOPED;
memset(data, 255, width * height);
for( layer_list_t::iterator iter = mLayerList.begin(); iter != mLayerList.end(); iter++ )
@@ -536,14 +509,11 @@ void LLTexLayerSet::gatherMorphMaskAlpha(U8 *data, S32 origin_x, S32 origin_y, S
renderAlphaMaskTextures(origin_x, origin_y, width, height, bound_target, true);
}
-static LLTrace::BlockTimerStatHandle FTM_RENDER_ALPHA_MASK_TEXTURES("renderAlphaMaskTextures");
void LLTexLayerSet::renderAlphaMaskTextures(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bound_target, bool forceClear)
{
- LL_RECORD_BLOCK_TIME(FTM_RENDER_ALPHA_MASK_TEXTURES);
+ LL_PROFILE_ZONE_SCOPED;
const LLTexLayerSetInfo *info = getInfo();
- bool use_shaders = LLGLSLShader::sNoFixedFunction;
-
gGL.setColorMask(false, true);
gGL.setSceneBlendType(LLRender::BT_REPLACE);
@@ -557,7 +527,6 @@ void LLTexLayerSet::renderAlphaMaskTextures(S32 x, S32 y, S32 width, S32 height,
{
LLGLSUIDefault gls_ui;
gGL.getTexUnit(0)->bind(tex);
- gGL.getTexUnit(0)->setTextureBlendType( LLTexUnit::TB_REPLACE );
gl_rect_2d_simple_tex( width, height );
}
}
@@ -568,20 +537,14 @@ void LLTexLayerSet::renderAlphaMaskTextures(S32 x, S32 y, S32 width, S32 height,
// Set the alpha channel to one (clean up after previous blending)
gGL.flush();
LLGLDisable no_alpha(GL_ALPHA_TEST);
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4f( 0.f, 0.f, 0.f, 1.f );
gl_rect_2d_simple( width, height );
gGL.flush();
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.004f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.004f);
}
// (Optional) Mask out part of the baked texture with alpha masks
@@ -589,7 +552,6 @@ void LLTexLayerSet::renderAlphaMaskTextures(S32 x, S32 y, S32 width, S32 height,
if (mMaskLayerList.size() > 0)
{
gGL.setSceneBlendType(LLRender::BT_MULT_ALPHA);
- gGL.getTexUnit(0)->setTextureBlendType( LLTexUnit::TB_REPLACE );
for (layer_list_t::iterator iter = mMaskLayerList.begin(); iter != mMaskLayerList.end(); iter++)
{
LLTexLayerInterface* layer = *iter;
@@ -602,7 +564,6 @@ void LLTexLayerSet::renderAlphaMaskTextures(S32 x, S32 y, S32 width, S32 height,
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
- gGL.getTexUnit(0)->setTextureBlendType(LLTexUnit::TB_MULT);
gGL.setColorMask(true, true);
gGL.setSceneBlendType(LLRender::BT_ALPHA);
}
@@ -1128,13 +1089,6 @@ BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bou
// *TODO: Is this correct?
//gPipeline.disableLights();
stop_glerror();
- if (!LLGLSLShader::sNoFixedFunction)
- {
- glDisable(GL_LIGHTING);
- }
- stop_glerror();
-
- bool use_shaders = LLGLSLShader::sNoFixedFunction;
LLColor4 net_color;
BOOL color_specified = findNetColor(&net_color);
@@ -1221,10 +1175,7 @@ BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bou
LLGLDisable alpha_test(no_alpha_test ? GL_ALPHA_TEST : 0);
if (no_alpha_test)
{
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.f);
}
LLTexUnit::eTextureAddressMode old_mode = tex->getAddressMode();
@@ -1238,10 +1189,7 @@ BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bou
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
if (no_alpha_test)
{
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.004f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.004f);
}
}
}
@@ -1275,18 +1223,12 @@ BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bou
color_specified )
{
LLGLDisable no_alpha(GL_ALPHA_TEST);
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.000f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.000f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4fv( net_color.mV );
gl_rect_2d_simple( width, height );
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.004f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.004f);
}
if( alpha_mask_specified || getInfo()->mWriteAllChannels )
@@ -1374,25 +1316,17 @@ BOOL LLTexLayer::blendAlphaTexture(S32 x, S32 y, S32 width, S32 height)
gGL.flush();
- bool use_shaders = LLGLSLShader::sNoFixedFunction;
-
if( !getInfo()->mStaticImageFileName.empty() )
{
LLGLTexture* tex = LLTexLayerStaticImageList::getInstance()->getTexture( getInfo()->mStaticImageFileName, getInfo()->mStaticImageIsMask );
if( tex )
{
LLGLSNoAlphaTest gls_no_alpha_test;
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->bind(tex, TRUE);
gl_rect_2d_simple_tex( width, height );
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.004f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.004f);
}
else
{
@@ -1407,18 +1341,11 @@ BOOL LLTexLayer::blendAlphaTexture(S32 x, S32 y, S32 width, S32 height)
if (tex)
{
LLGLSNoAlphaTest gls_no_alpha_test;
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->bind(tex);
gl_rect_2d_simple_tex( width, height );
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
- success = TRUE;
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.004f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.004f);
}
}
}
@@ -1431,7 +1358,6 @@ BOOL LLTexLayer::blendAlphaTexture(S32 x, S32 y, S32 width, S32 height)
addAlphaMask(data, originX, originY, width, height, bound_target);
}
-static LLTrace::BlockTimerStatHandle FTM_RENDER_MORPH_MASKS("renderMorphMasks");
void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLColor4 &layer_color, LLRenderTarget* bound_target, bool force_render)
{
if (!force_render && !hasMorph())
@@ -1439,18 +1365,12 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
LL_DEBUGS() << "skipping renderMorphMasks for " << getUUID() << LL_ENDL;
return;
}
- LL_RECORD_BLOCK_TIME(FTM_RENDER_MORPH_MASKS);
+ LL_PROFILE_ZONE_SCOPED;
BOOL success = TRUE;
llassert( !mParamAlphaList.empty() );
- bool use_shaders = LLGLSLShader::sNoFixedFunction;
-
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.f);
- }
-
+ gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.setColorMask(false, true);
LLTexLayerParamAlpha* first_param = *mParamAlphaList.begin();
@@ -1535,10 +1455,7 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
gl_rect_2d_simple( width, height );
}
- if (use_shaders)
- {
- gAlphaMaskProgram.setMinimumAlpha(0.004f);
- }
+ gAlphaMaskProgram.setMinimumAlpha(0.004f);
LLGLSUIDefault gls_ui;
@@ -1637,10 +1554,9 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
}
}
-static LLTrace::BlockTimerStatHandle FTM_ADD_ALPHA_MASK("addAlphaMask");
void LLTexLayer::addAlphaMask(U8 *data, S32 originX, S32 originY, S32 width, S32 height, LLRenderTarget* bound_target)
{
- LL_RECORD_BLOCK_TIME(FTM_ADD_ALPHA_MASK);
+ LL_PROFILE_ZONE_SCOPED;
S32 size = width * height;
const U8* alphaData = getAlphaData();
if (!alphaData && hasAlphaParams())
@@ -1981,10 +1897,9 @@ void LLTexLayerStaticImageList::deleteCachedImages()
// Returns an LLImageTGA that contains the encoded data from a tga file named file_name.
// Caches the result to speed identical subsequent requests.
-static LLTrace::BlockTimerStatHandle FTM_LOAD_STATIC_TGA("getImageTGA");
LLImageTGA* LLTexLayerStaticImageList::getImageTGA(const std::string& file_name)
{
- LL_RECORD_BLOCK_TIME(FTM_LOAD_STATIC_TGA);
+ LL_PROFILE_ZONE_SCOPED;
const char *namekey = mImageNames.addString(file_name);
image_tga_map_t::const_iterator iter = mStaticImageListTGA.find(namekey);
if( iter != mStaticImageListTGA.end() )
@@ -2011,10 +1926,9 @@ LLImageTGA* LLTexLayerStaticImageList::getImageTGA(const std::string& file_name)
// Returns a GL Image (without a backing ImageRaw) that contains the decoded data from a tga file named file_name.
// Caches the result to speed identical subsequent requests.
-static LLTrace::BlockTimerStatHandle FTM_LOAD_STATIC_TEXTURE("getTexture");
LLGLTexture* LLTexLayerStaticImageList::getTexture(const std::string& file_name, BOOL is_mask)
{
- LL_RECORD_BLOCK_TIME(FTM_LOAD_STATIC_TEXTURE);
+ LL_PROFILE_ZONE_SCOPED;
LLPointer tex;
const char *namekey = mImageNames.addString(file_name);
@@ -2061,10 +1975,9 @@ LLGLTexture* LLTexLayerStaticImageList::getTexture(const std::string& file_name,
// Reads a .tga file, decodes it, and puts the decoded data in image_raw.
// Returns TRUE if successful.
-static LLTrace::BlockTimerStatHandle FTM_LOAD_IMAGE_RAW("loadImageRaw");
BOOL LLTexLayerStaticImageList::loadImageRaw(const std::string& file_name, LLImageRaw* image_raw)
{
- LL_RECORD_BLOCK_TIME(FTM_LOAD_IMAGE_RAW);
+ LL_PROFILE_ZONE_SCOPED;
BOOL success = FALSE;
std::string path;
path = gDirUtilp->getExpandedFilename(LL_PATH_CHARACTER,file_name);
diff --git a/indra/llappearance/lltexlayerparams.cpp b/indra/llappearance/lltexlayerparams.cpp
index ff682d6906..ce5c7142d5 100644
--- a/indra/llappearance/lltexlayerparams.cpp
+++ b/indra/llappearance/lltexlayerparams.cpp
@@ -261,10 +261,9 @@ BOOL LLTexLayerParamAlpha::getSkip() const
}
-static LLTrace::BlockTimerStatHandle FTM_TEX_LAYER_PARAM_ALPHA("alpha render");
BOOL LLTexLayerParamAlpha::render(S32 x, S32 y, S32 width, S32 height)
{
- LL_RECORD_BLOCK_TIME(FTM_TEX_LAYER_PARAM_ALPHA);
+ LL_PROFILE_ZONE_SCOPED;
BOOL success = TRUE;
if (!mTexLayer)
diff --git a/indra/llappearance/lltexlayerparams.h b/indra/llappearance/lltexlayerparams.h
index 0cb2dedbff..e2440998b3 100644
--- a/indra/llappearance/lltexlayerparams.h
+++ b/indra/llappearance/lltexlayerparams.h
@@ -63,23 +63,14 @@ protected:
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
LL_ALIGN_PREFIX(16)
-class LLTexLayerParamAlpha : public LLTexLayerParam
+class alignas(16) LLTexLayerParamAlpha : public LLTexLayerParam
{
+ LL_ALIGN_NEW
public:
LLTexLayerParamAlpha( LLTexLayerInterface* layer );
LLTexLayerParamAlpha( LLAvatarAppearance* appearance );
/*virtual*/ ~LLTexLayerParamAlpha();
- void* operator new(size_t size)
- {
- return ll_aligned_malloc_16(size);
- }
-
- void operator delete(void* ptr)
- {
- ll_aligned_free_16(ptr);
- }
-
/*virtual*/ LLViewerVisualParam* cloneParam(LLWearable* wearable = NULL) const;
// LLVisualParam Virtual functions
@@ -146,9 +137,9 @@ private:
//
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-LL_ALIGN_PREFIX(16)
-class LLTexLayerParamColor : public LLTexLayerParam
+class alignas(16) LLTexLayerParamColor : public LLTexLayerParam
{
+ LL_ALIGN_NEW
public:
enum EColorOperation
{
@@ -161,16 +152,6 @@ public:
LLTexLayerParamColor( LLTexLayerInterface* layer );
LLTexLayerParamColor( LLAvatarAppearance* appearance );
- void* operator new(size_t size)
- {
- return ll_aligned_malloc_16(size);
- }
-
- void operator delete(void* ptr)
- {
- ll_aligned_free_16(ptr);
- }
-
/* virtual */ ~LLTexLayerParamColor();
/*virtual*/ LLViewerVisualParam* cloneParam(LLWearable* wearable = NULL) const;
@@ -198,8 +179,8 @@ protected:
virtual void onGlobalColorChanged() {}
private:
- LL_ALIGN_16(LLVector4a mAvgDistortionVec);
-} LL_ALIGN_POSTFIX(16);
+ LLVector4a mAvgDistortionVec;
+};
class LLTexLayerParamColorInfo : public LLViewerVisualParamInfo
{
diff --git a/indra/llcharacter/llcharacter.cpp b/indra/llcharacter/llcharacter.cpp
index b764ef0c7e..376f096642 100644
--- a/indra/llcharacter/llcharacter.cpp
+++ b/indra/llcharacter/llcharacter.cpp
@@ -188,20 +188,15 @@ void LLCharacter::requestStopMotion( LLMotion* motion)
//-----------------------------------------------------------------------------
// updateMotions()
//-----------------------------------------------------------------------------
-static LLTrace::BlockTimerStatHandle FTM_UPDATE_ANIMATION("Update Animation");
-static LLTrace::BlockTimerStatHandle FTM_UPDATE_HIDDEN_ANIMATION("Update Hidden Anim");
-static LLTrace::BlockTimerStatHandle FTM_UPDATE_MOTIONS("Update Motions");
-
void LLCharacter::updateMotions(e_update_t update_type)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
if (update_type == HIDDEN_UPDATE)
{
- LL_RECORD_BLOCK_TIME(FTM_UPDATE_HIDDEN_ANIMATION);
mMotionController.updateMotionsMinimal();
}
else
{
- LL_RECORD_BLOCK_TIME(FTM_UPDATE_ANIMATION);
// unpause if the number of outstanding pause requests has dropped to the initial one
if (mMotionController.isPaused() && mPauseRequest->getNumRefs() == 1)
{
@@ -209,7 +204,6 @@ void LLCharacter::updateMotions(e_update_t update_type)
}
bool force_update = (update_type == FORCE_UPDATE);
{
- LL_RECORD_BLOCK_TIME(FTM_UPDATE_MOTIONS);
mMotionController.updateMotions(force_update);
}
}
diff --git a/indra/llcharacter/lleditingmotion.cpp b/indra/llcharacter/lleditingmotion.cpp
index ddf89f30f2..c5757163d9 100644
--- a/indra/llcharacter/lleditingmotion.cpp
+++ b/indra/llcharacter/lleditingmotion.cpp
@@ -163,6 +163,7 @@ BOOL LLEditingMotion::onActivate()
//-----------------------------------------------------------------------------
BOOL LLEditingMotion::onUpdate(F32 time, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED;
LLVector3 focus_pt;
LLVector3* pointAtPt = (LLVector3*)mCharacter->getAnimationData("PointAtPoint");
diff --git a/indra/llcharacter/lleditingmotion.h b/indra/llcharacter/lleditingmotion.h
index 7b1c8bb059..80c1717a70 100644
--- a/indra/llcharacter/lleditingmotion.h
+++ b/indra/llcharacter/lleditingmotion.h
@@ -42,9 +42,11 @@
//-----------------------------------------------------------------------------
// class LLEditingMotion
//-----------------------------------------------------------------------------
+LL_ALIGN_PREFIX(16)
class LLEditingMotion :
public LLMotion
{
+ LL_ALIGN_NEW
public:
// Constructor
LLEditingMotion(const LLUUID &id);
@@ -108,6 +110,13 @@ public:
//-------------------------------------------------------------------------
// joint states to be animated
//-------------------------------------------------------------------------
+ LL_ALIGN_16(LLJoint mParentJoint);
+ LL_ALIGN_16(LLJoint mShoulderJoint);
+ LL_ALIGN_16(LLJoint mElbowJoint);
+ LL_ALIGN_16(LLJoint mWristJoint);
+ LL_ALIGN_16(LLJoint mTarget);
+ LLJointSolverRP3 mIKSolver;
+
LLCharacter *mCharacter;
LLVector3 mWristOffset;
@@ -117,17 +126,10 @@ public:
LLPointer mWristState;
LLPointer mTorsoState;
- LLJoint mParentJoint;
- LLJoint mShoulderJoint;
- LLJoint mElbowJoint;
- LLJoint mWristJoint;
- LLJoint mTarget;
- LLJointSolverRP3 mIKSolver;
-
static S32 sHandPose;
static S32 sHandPosePriority;
LLVector3 mLastSelectPt;
-};
+} LL_ALIGN_POSTFIX(16);
#endif // LL_LLKEYFRAMEMOTION_H
diff --git a/indra/llcharacter/llhandmotion.cpp b/indra/llcharacter/llhandmotion.cpp
index b3bf5a9a91..ceba956214 100644
--- a/indra/llcharacter/llhandmotion.cpp
+++ b/indra/llcharacter/llhandmotion.cpp
@@ -121,6 +121,7 @@ BOOL LLHandMotion::onActivate()
//-----------------------------------------------------------------------------
BOOL LLHandMotion::onUpdate(F32 time, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED;
eHandPose *requestedHandPose;
F32 timeDelta = time - mLastTime;
diff --git a/indra/llcharacter/llheadrotmotion.cpp b/indra/llcharacter/llheadrotmotion.cpp
index e91de7a11d..07a3aaebb6 100644
--- a/indra/llcharacter/llheadrotmotion.cpp
+++ b/indra/llcharacter/llheadrotmotion.cpp
@@ -175,6 +175,7 @@ BOOL LLHeadRotMotion::onActivate()
//-----------------------------------------------------------------------------
BOOL LLHeadRotMotion::onUpdate(F32 time, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
LLQuaternion targetHeadRotWorld;
LLQuaternion currentRootRotWorld = mRootJoint->getWorldRotation();
LLQuaternion currentInvRootRotWorld = ~currentRootRotWorld;
@@ -458,6 +459,7 @@ void LLEyeMotion::adjustEyeTarget(LLVector3* targetPos, LLJointState& left_eye_s
//-----------------------------------------------------------------------------
BOOL LLEyeMotion::onUpdate(F32 time, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
//calculate jitter
if (mEyeJitterTimer.getElapsedTimeF32() > mEyeJitterTime)
{
diff --git a/indra/llcharacter/lljoint.cpp b/indra/llcharacter/lljoint.cpp
index dee642310e..d72282ab42 100644
--- a/indra/llcharacter/lljoint.cpp
+++ b/indra/llcharacter/lljoint.cpp
@@ -922,6 +922,13 @@ const LLMatrix4 &LLJoint::getWorldMatrix()
return mXform.getWorldMatrix();
}
+const LLMatrix4a& LLJoint::getWorldMatrix4a()
+{
+ updateWorldMatrixParent();
+
+ return mWorldMatrix;
+}
+
//--------------------------------------------------------------------
// setWorldMatrix()
@@ -1003,6 +1010,7 @@ void LLJoint::updateWorldMatrix()
{
sNumUpdates++;
mXform.updateMatrix(FALSE);
+ mWorldMatrix.loadu(mXform.getWorldMatrix());
mDirtyFlags = 0x0;
}
}
diff --git a/indra/llcharacter/lljoint.h b/indra/llcharacter/lljoint.h
index 1b646b641f..63d99b9209 100644
--- a/indra/llcharacter/lljoint.h
+++ b/indra/llcharacter/lljoint.h
@@ -38,6 +38,7 @@
#include "m4math.h"
#include "llquaternion.h"
#include "xform.h"
+#include "llmatrix4a.h"
const S32 LL_CHARACTER_MAX_JOINTS_PER_MESH = 15;
// Need to set this to count of animate-able joints,
@@ -85,8 +86,10 @@ inline bool operator!=(const LLVector3OverrideMap& a, const LLVector3OverrideMap
//-----------------------------------------------------------------------------
// class LLJoint
//-----------------------------------------------------------------------------
+LL_ALIGN_PREFIX(16)
class LLJoint
{
+ LL_ALIGN_NEW
public:
// priority levels, from highest to lowest
enum JointPriority
@@ -114,16 +117,17 @@ public:
SUPPORT_EXTENDED
};
protected:
- std::string mName;
+ // explicit transformation members
+ LL_ALIGN_16(LLMatrix4a mWorldMatrix);
+ LLXformMatrix mXform;
+
+ std::string mName;
SupportCategory mSupport;
// parent joint
LLJoint *mParent;
- // explicit transformation members
- LLXformMatrix mXform;
-
LLVector3 mDefaultPosition;
LLVector3 mDefaultScale;
@@ -259,6 +263,8 @@ public:
const LLMatrix4 &getWorldMatrix();
void setWorldMatrix( const LLMatrix4& mat );
+ const LLMatrix4a& getWorldMatrix4a();
+
void updateWorldMatrixChildren();
void updateWorldMatrixParent();
@@ -296,6 +302,6 @@ public:
// These are used in checks of whether a pos/scale override is considered significant.
bool aboveJointPosThreshold(const LLVector3& pos) const;
bool aboveJointScaleThreshold(const LLVector3& scale) const;
-};
+} LL_ALIGN_POSTFIX(16);
#endif // LL_LLJOINT_H
diff --git a/indra/llcharacter/lljointsolverrp3.cpp b/indra/llcharacter/lljointsolverrp3.cpp
index 69a7e3dc6e..f3d5e2e324 100644
--- a/indra/llcharacter/lljointsolverrp3.cpp
+++ b/indra/llcharacter/lljointsolverrp3.cpp
@@ -1,6 +1,6 @@
/**
* @file lljointsolverrp3.cpp
- * @brief Implementation of LLJointSolverRP3 class.
+ * @brief Implementation of Joint Solver in 3D Real Projective space (RP3). See: https://en.wikipedia.org/wiki/Real_projective_space
*
* $LicenseInfo:firstyear=2001&license=viewerlgpl$
* Second Life Viewer Source Code
@@ -35,6 +35,11 @@
#define F_EPSILON 0.00001f
+#if LL_RELEASE
+ #define DEBUG_JOINT_SOLVER 0
+#else
+ #define DEBUG_JOINT_SOLVER 1
+#endif
//-----------------------------------------------------------------------------
// Constructor
@@ -150,6 +155,7 @@ void LLJointSolverRP3::solve()
LLVector3 cPos = mJointC->getWorldPosition();
LLVector3 gPos = mJointGoal->getWorldPosition();
+#if DEBUG_JOINT_SOLVER
LL_DEBUGS("JointSolver") << "LLJointSolverRP3::solve()" << LL_NEWLINE
<< "bPosLocal = " << mJointB->getPosition() << LL_NEWLINE
<< "cPosLocal = " << mJointC->getPosition() << LL_NEWLINE
@@ -159,6 +165,7 @@ void LLJointSolverRP3::solve()
<< "bPos : " << bPos << LL_NEWLINE
<< "cPos : " << cPos << LL_NEWLINE
<< "gPos : " << gPos << LL_ENDL;
+#endif
//-------------------------------------------------------------------------
// get the poleVector in world space
@@ -194,6 +201,7 @@ void LLJointSolverRP3::solve()
//-------------------------------------------------------------------------
LLVector3 abacCompOrthoVec = abVec - acVec * ((abVec * acVec)/(acVec * acVec));
+#if DEBUG_JOINT_SOLVER
LL_DEBUGS("JointSolver") << "abVec : " << abVec << LL_NEWLINE
<< "bcVec : " << bcVec << LL_NEWLINE
<< "acVec : " << acVec << LL_NEWLINE
@@ -202,6 +210,7 @@ void LLJointSolverRP3::solve()
<< "bcLen : " << bcLen << LL_NEWLINE
<< "agLen : " << agLen << LL_NEWLINE
<< "abacCompOrthoVec : " << abacCompOrthoVec << LL_ENDL;
+#endif
//-------------------------------------------------------------------------
// compute the normal of the original ABC plane (and store for later)
@@ -269,6 +278,7 @@ void LLJointSolverRP3::solve()
LLQuaternion bRot(theta - abbcAng, abbcOrthoVec);
+#if DEBUG_JOINT_SOLVER
LL_DEBUGS("JointSolver") << "abbcAng : " << abbcAng << LL_NEWLINE
<< "abbcOrthoVec : " << abbcOrthoVec << LL_NEWLINE
<< "agLenSq : " << agLenSq << LL_NEWLINE
@@ -280,6 +290,7 @@ void LLJointSolverRP3::solve()
<< abbcAng*180.0f/F_PI << " "
<< (theta - abbcAng)*180.0f/F_PI
<< LL_ENDL;
+#endif
//-------------------------------------------------------------------------
// compute rotation that rotates new A->C to A->G
@@ -293,9 +304,11 @@ void LLJointSolverRP3::solve()
LLQuaternion cgRot;
cgRot.shortestArc( acVec, agVec );
+#if DEBUG_JOINT_SOLVER
LL_DEBUGS("JointSolver") << "bcVec : " << bcVec << LL_NEWLINE
<< "acVec : " << acVec << LL_NEWLINE
<< "cgRot : " << cgRot << LL_ENDL;
+#endif
// update A->B and B->C with rotation from C to G
abVec = abVec * cgRot;
@@ -358,11 +371,13 @@ void LLJointSolverRP3::solve()
//-------------------------------------------------------------------------
LLQuaternion twistRot( mTwist, agVec );
+#if DEBUG_JOINT_SOLVER
LL_DEBUGS("JointSolver") << "abcNorm = " << abcNorm << LL_NEWLINE
<< "apgNorm = " << apgNorm << LL_NEWLINE
<< "pRot = " << pRot << LL_NEWLINE
<< "twist : " << mTwist*180.0/F_PI << LL_NEWLINE
<< "twistRot : " << twistRot << LL_ENDL;
+#endif
//-------------------------------------------------------------------------
// compute rotation of A
diff --git a/indra/llcharacter/llkeyframefallmotion.cpp b/indra/llcharacter/llkeyframefallmotion.cpp
index 7842f0e5fb..e8bb2bf95d 100644
--- a/indra/llcharacter/llkeyframefallmotion.cpp
+++ b/indra/llcharacter/llkeyframefallmotion.cpp
@@ -121,6 +121,7 @@ BOOL LLKeyframeFallMotion::onActivate()
//-----------------------------------------------------------------------------
BOOL LLKeyframeFallMotion::onUpdate(F32 activeTime, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
BOOL result = LLKeyframeMotion::onUpdate(activeTime, joint_mask);
F32 slerp_amt = clamp_rescale(activeTime / getDuration(), 0.5f, 0.75f, 0.f, 1.f);
diff --git a/indra/llcharacter/llkeyframemotion.cpp b/indra/llcharacter/llkeyframemotion.cpp
index fe9de30f0a..a25ff16786 100644
--- a/indra/llcharacter/llkeyframemotion.cpp
+++ b/indra/llcharacter/llkeyframemotion.cpp
@@ -677,6 +677,7 @@ BOOL LLKeyframeMotion::onActivate()
//-----------------------------------------------------------------------------
BOOL LLKeyframeMotion::onUpdate(F32 time, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
// llassert(time >= 0.f); // This will fire
time = llmax(0.f, time);
diff --git a/indra/llcharacter/llkeyframemotionparam.cpp b/indra/llcharacter/llkeyframemotionparam.cpp
index 6ed18bc445..aba1c5db39 100644
--- a/indra/llcharacter/llkeyframemotionparam.cpp
+++ b/indra/llcharacter/llkeyframemotionparam.cpp
@@ -158,6 +158,7 @@ BOOL LLKeyframeMotionParam::onActivate()
//-----------------------------------------------------------------------------
BOOL LLKeyframeMotionParam::onUpdate(F32 time, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED;
F32 weightFactor = 1.f / (F32)mParameterizedMotions.size();
// zero out all pose weights
diff --git a/indra/llcharacter/llkeyframestandmotion.h b/indra/llcharacter/llkeyframestandmotion.h
index c2634ecd6d..1aa5b187ba 100644
--- a/indra/llcharacter/llkeyframestandmotion.h
+++ b/indra/llcharacter/llkeyframestandmotion.h
@@ -37,9 +37,11 @@
//-----------------------------------------------------------------------------
// class LLKeyframeStandMotion
//-----------------------------------------------------------------------------
+LL_ALIGN_PREFIX(16)
class LLKeyframeStandMotion :
public LLKeyframeMotion
{
+ LL_ALIGN_NEW
public:
// Constructor
LLKeyframeStandMotion(const LLUUID &id);
@@ -69,6 +71,18 @@ public:
//-------------------------------------------------------------------------
// Member Data
//-------------------------------------------------------------------------
+ LLJoint mPelvisJoint;
+
+ LLJoint mHipLeftJoint;
+ LLJoint mKneeLeftJoint;
+ LLJoint mAnkleLeftJoint;
+ LLJoint mTargetLeft;
+
+ LLJoint mHipRightJoint;
+ LLJoint mKneeRightJoint;
+ LLJoint mAnkleRightJoint;
+ LLJoint mTargetRight;
+
LLCharacter *mCharacter;
BOOL mFlipFeet;
@@ -83,18 +97,6 @@ public:
LLPointer mKneeRightState;
LLPointer mAnkleRightState;
- LLJoint mPelvisJoint;
-
- LLJoint mHipLeftJoint;
- LLJoint mKneeLeftJoint;
- LLJoint mAnkleLeftJoint;
- LLJoint mTargetLeft;
-
- LLJoint mHipRightJoint;
- LLJoint mKneeRightJoint;
- LLJoint mAnkleRightJoint;
- LLJoint mTargetRight;
-
LLJointSolverRP3 mIKLeft;
LLJointSolverRP3 mIKRight;
@@ -110,7 +112,7 @@ public:
BOOL mTrackAnkles;
S32 mFrameNum;
-};
+} LL_ALIGN_POSTFIX(16);
#endif // LL_LLKEYFRAMESTANDMOTION_H
diff --git a/indra/llcharacter/llkeyframewalkmotion.cpp b/indra/llcharacter/llkeyframewalkmotion.cpp
index f180702385..298b37e60c 100644
--- a/indra/llcharacter/llkeyframewalkmotion.cpp
+++ b/indra/llcharacter/llkeyframewalkmotion.cpp
@@ -105,6 +105,7 @@ void LLKeyframeWalkMotion::onDeactivate()
//-----------------------------------------------------------------------------
BOOL LLKeyframeWalkMotion::onUpdate(F32 time, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED;
// compute time since last update
F32 deltaTime = time - mRealTimeLast;
@@ -198,6 +199,7 @@ BOOL LLWalkAdjustMotion::onActivate()
//-----------------------------------------------------------------------------
BOOL LLWalkAdjustMotion::onUpdate(F32 time, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED;
// delta_time is guaranteed to be non zero
F32 delta_time = llclamp(time - mLastTime, TIME_EPSILON, MAX_TIME_DELTA);
mLastTime = time;
@@ -373,6 +375,7 @@ BOOL LLFlyAdjustMotion::onActivate()
//-----------------------------------------------------------------------------
BOOL LLFlyAdjustMotion::onUpdate(F32 time, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED;
LLVector3 ang_vel = mCharacter->getCharacterAngularVelocity() * mCharacter->getTimeDilation();
F32 speed = mCharacter->getCharacterVelocity().magVec();
diff --git a/indra/llcharacter/llmotioncontroller.cpp b/indra/llcharacter/llmotioncontroller.cpp
index c48d02b652..e66714388a 100644
--- a/indra/llcharacter/llmotioncontroller.cpp
+++ b/indra/llcharacter/llmotioncontroller.cpp
@@ -503,6 +503,7 @@ void LLMotionController::resetJointSignatures()
//-----------------------------------------------------------------------------
void LLMotionController::updateIdleMotion(LLMotion* motionp)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
if (motionp->isStopped() && mAnimTime > motionp->getStopTime() + motionp->getEaseOutDuration())
{
deactivateMotionInstance(motionp);
@@ -541,6 +542,7 @@ void LLMotionController::updateIdleMotion(LLMotion* motionp)
//-----------------------------------------------------------------------------
void LLMotionController::updateIdleActiveMotions()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
for (motion_list_t::iterator iter = mActiveMotions.begin();
iter != mActiveMotions.end(); )
{
@@ -553,10 +555,9 @@ void LLMotionController::updateIdleActiveMotions()
//-----------------------------------------------------------------------------
// updateMotionsByType()
//-----------------------------------------------------------------------------
-static LLTrace::BlockTimerStatHandle FTM_MOTION_ON_UPDATE("Motion onUpdate");
-
void LLMotionController::updateMotionsByType(LLMotion::LLMotionBlendType anim_type)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
BOOL update_result = TRUE;
U8 last_joint_signature[LL_CHARACTER_MAX_ANIMATED_JOINTS];
@@ -712,7 +713,6 @@ void LLMotionController::updateMotionsByType(LLMotion::LLMotionBlendType anim_ty
// perform motion update
{
- LL_RECORD_BLOCK_TIME(FTM_MOTION_ON_UPDATE);
update_result = motionp->onUpdate(mAnimTime - motionp->mActivationTimestamp, last_joint_signature);
}
}
@@ -768,6 +768,7 @@ void LLMotionController::updateMotionsByType(LLMotion::LLMotionBlendType anim_ty
//-----------------------------------------------------------------------------
void LLMotionController::updateLoadingMotions()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
// query pending motions for completion
for (motion_set_t::iterator iter = mLoadingMotions.begin();
iter != mLoadingMotions.end(); )
@@ -815,6 +816,7 @@ void LLMotionController::updateLoadingMotions()
//-----------------------------------------------------------------------------
void LLMotionController::updateMotions(bool force_update)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
// SL-763: "Distant animated objects run at super fast speed"
// The use_quantum optimization or possibly the associated code in setTimeStamp()
// does not work as implemented.
@@ -907,6 +909,7 @@ void LLMotionController::updateMotions(bool force_update)
//-----------------------------------------------------------------------------
void LLMotionController::updateMotionsMinimal()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
// Always update mPrevTimerElapsed
mPrevTimerElapsed = mTimer.getElapsedTimeF32();
@@ -924,6 +927,7 @@ void LLMotionController::updateMotionsMinimal()
//-----------------------------------------------------------------------------
BOOL LLMotionController::activateMotionInstance(LLMotion *motion, F32 time)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR;
// It's not clear why the getWeight() line seems to be crashing this, but
// hopefully this fixes it.
if (motion == NULL || motion->getPose() == NULL)
diff --git a/indra/llcharacter/llpose.h b/indra/llcharacter/llpose.h
index c004a0f3b7..1405f1e053 100644
--- a/indra/llcharacter/llpose.h
+++ b/indra/llcharacter/llpose.h
@@ -80,8 +80,10 @@ public:
const S32 JSB_NUM_JOINT_STATES = 6;
+LL_ALIGN_PREFIX(16)
class LLJointStateBlender
{
+ LL_ALIGN_NEW
protected:
LLPointer mJointStates[JSB_NUM_JOINT_STATES];
S32 mPriorities[JSB_NUM_JOINT_STATES];
@@ -96,8 +98,8 @@ public:
void resetCachedJoint();
public:
- LLJoint mJointCache;
-};
+ LL_ALIGN_16(LLJoint mJointCache);
+} LL_ALIGN_POSTFIX(16);
class LLMotion;
diff --git a/indra/llcharacter/lltargetingmotion.cpp b/indra/llcharacter/lltargetingmotion.cpp
index 69681e4197..ec75212a40 100644
--- a/indra/llcharacter/lltargetingmotion.cpp
+++ b/indra/llcharacter/lltargetingmotion.cpp
@@ -103,6 +103,7 @@ BOOL LLTargetingMotion::onActivate()
//-----------------------------------------------------------------------------
BOOL LLTargetingMotion::onUpdate(F32 time, U8* joint_mask)
{
+ LL_PROFILE_ZONE_SCOPED;
F32 slerp_amt = LLSmoothInterpolation::getInterpolant(TORSO_TARGET_HALF_LIFE);
LLVector3 target;
diff --git a/indra/llcommon/CMakeLists.txt b/indra/llcommon/CMakeLists.txt
index 766a1849f9..ca8b5e946f 100644
--- a/indra/llcommon/CMakeLists.txt
+++ b/indra/llcommon/CMakeLists.txt
@@ -12,6 +12,7 @@ include(JsonCpp)
include(Copy3rdPartyLibs)
include(ZLIB)
include(URIPARSER)
+include(Tracy)
include_directories(
${EXPAT_INCLUDE_DIRS}
@@ -19,6 +20,7 @@ include_directories(
${JSONCPP_INCLUDE_DIR}
${ZLIB_INCLUDE_DIRS}
${URIPARSER_INCLUDE_DIRS}
+ ${TRACY_INCLUDE_DIR}
)
# add_executable(lltreeiterators lltreeiterators.cpp)
@@ -117,14 +119,16 @@ set(llcommon_SOURCE_FILES
lluriparser.cpp
lluuid.cpp
llworkerthread.cpp
- timing.cpp
u64.cpp
+ threadpool.cpp
+ workqueue.cpp
StackWalker.cpp
)
set(llcommon_HEADER_FILES
CMakeLists.txt
+ chrono.h
ctype_workaround.h
fix_macros.h
indra_constants.h
@@ -197,6 +201,8 @@ set(llcommon_HEADER_FILES
llmortician.h
llnametable.h
llpointer.h
+ llprofiler.h
+ llprofilercategories.h
llpounceable.h
llpredicate.h
llpreprocessor.h
@@ -251,8 +257,12 @@ set(llcommon_HEADER_FILES
lockstatic.h
stdtypes.h
stringize.h
+ threadpool.h
+ threadsafeschedule.h
timer.h
+ tuple.h
u64.h
+ workqueue.h
StackWalker.h
)
@@ -299,6 +309,7 @@ target_link_libraries(
${BOOST_SYSTEM_LIBRARY}
${GOOGLE_PERFTOOLS_LIBRARIES}
${URIPARSER_LIBRARIES}
+ ${TRACY_LIBRARY}
)
if (DARWIN)
@@ -355,6 +366,9 @@ if (LL_TESTS)
LL_ADD_INTEGRATION_TEST(lluri "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llunits "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(stringize "" "${test_libs}")
+ LL_ADD_INTEGRATION_TEST(threadsafeschedule "" "${test_libs}")
+ LL_ADD_INTEGRATION_TEST(tuple "" "${test_libs}")
+ LL_ADD_INTEGRATION_TEST(workqueue "" "${test_libs}")
## llexception_test.cpp isn't a regression test, and doesn't need to be run
## every build. It's to help a developer make implementation choices about
diff --git a/indra/llcommon/chrono.h b/indra/llcommon/chrono.h
new file mode 100644
index 0000000000..806e871892
--- /dev/null
+++ b/indra/llcommon/chrono.h
@@ -0,0 +1,65 @@
+/**
+ * @file chrono.h
+ * @author Nat Goodspeed
+ * @date 2021-10-05
+ * @brief supplement with utility functions
+ *
+ * $LicenseInfo:firstyear=2021&license=viewerlgpl$
+ * Copyright (c) 2021, Linden Research, Inc.
+ * $/LicenseInfo$
+ */
+
+#if ! defined(LL_CHRONO_H)
+#define LL_CHRONO_H
+
+#include
+#include // std::enable_if
+
+namespace LL
+{
+
+// time_point_cast() is derived from https://stackoverflow.com/a/35293183
+// without the iteration: we think errors in the ~1 microsecond range are
+// probably acceptable.
+
+// This variant is for the optimal case when the source and dest use the same
+// clock: that case is handled by std::chrono.
+template ::value,
+ bool>::type = true>
+DestTimePoint time_point_cast(const SrcTimePoint& time)
+{
+ return std::chrono::time_point_cast(time);
+}
+
+// This variant is for when the source and dest use different clocks -- see
+// the linked StackOverflow answer, also Howard Hinnant's, for more context.
+template ::value,
+ bool>::type = true>
+DestTimePoint time_point_cast(const SrcTimePoint& time)
+{
+ // The basic idea is that we must adjust the passed time_point by the
+ // difference between the clocks' epochs. But since time_point doesn't
+ // expose its epoch, we fall back on what each of them thinks is now().
+ // However, since we necessarily make sequential calls to those now()
+ // functions, the answers differ not only by the cycles spent executing
+ // those calls, but by potential OS interruptions between them. Try to
+ // reduce that error by capturing the source clock time both before and
+ // after the dest clock, and splitting the difference. Of course an
+ // interruption between two of these now() calls without a comparable
+ // interruption between the other two will skew the result, but better is
+ // more expensive.
+ const auto src_before = typename SrcTimePoint::clock::now();
+ const auto dest_now = typename DestTimePoint::clock::now();
+ const auto src_after = typename SrcTimePoint::clock::now();
+ const auto src_diff = src_after - src_before;
+ const auto src_now = src_before + src_diff / 2;
+ return dest_now + (time - src_now);
+}
+
+} // namespace LL
+
+#endif /* ! defined(LL_CHRONO_H) */
diff --git a/indra/llcommon/linden_common.h b/indra/llcommon/linden_common.h
index e5a913a6a9..a228fd22be 100644
--- a/indra/llcommon/linden_common.h
+++ b/indra/llcommon/linden_common.h
@@ -27,6 +27,14 @@
#ifndef LL_LINDEN_COMMON_H
#define LL_LINDEN_COMMON_H
+#include "llprofiler.h"
+#if TRACY_ENABLE && !defined(LL_PROFILER_ENABLE_TRACY_OPENGL) // hooks for memory profiling
+void *tracy_aligned_malloc(size_t size, size_t alignment);
+void tracy_aligned_free(void *memblock);
+#define _aligned_malloc(X, Y) tracy_aligned_malloc((X), (Y))
+#define _aligned_free(X) tracy_aligned_free((X))
+#endif
+
// *NOTE: Please keep includes here to a minimum!
//
// Files included here are included in every library .cpp file and
diff --git a/indra/llcommon/llcommon.cpp b/indra/llcommon/llcommon.cpp
index 96be913d17..d2c4e66160 100644
--- a/indra/llcommon/llcommon.cpp
+++ b/indra/llcommon/llcommon.cpp
@@ -33,6 +33,66 @@
#include "lltracethreadrecorder.h"
#include "llcleanup.h"
+thread_local bool gProfilerEnabled = false;
+
+#if (TRACY_ENABLE)
+// Override new/delete for tracy memory profiling
+void *operator new(size_t size)
+{
+ void* ptr;
+ if (gProfilerEnabled)
+ {
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ ptr = (malloc)(size);
+ }
+ else
+ {
+ ptr = (malloc)(size);
+ }
+ if (!ptr)
+ {
+ throw std::bad_alloc();
+ }
+ TracyAlloc(ptr, size);
+ return ptr;
+}
+
+void operator delete(void *ptr) noexcept
+{
+ TracyFree(ptr);
+ if (gProfilerEnabled)
+ {
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ (free)(ptr);
+ }
+ else
+ {
+ (free)(ptr);
+ }
+}
+
+// C-style malloc/free can't be so easily overridden, so we define tracy versions and use
+// a pre-processor #define in linden_common.h to redirect to them. The parens around the native
+// functions below prevents recursive substitution by the preprocessor.
+//
+// Unaligned mallocs are rare in LL code but hooking them causes problems in 3p lib code (looking at
+// you, Havok), so we'll only capture the aligned version.
+
+void *tracy_aligned_malloc(size_t size, size_t alignment)
+{
+ auto ptr = ll_aligned_malloc_fallback(size, alignment);
+ if (ptr) TracyAlloc(ptr, size);
+ return ptr;
+}
+
+void tracy_aligned_free(void *memblock)
+{
+ TracyFree(memblock);
+ ll_aligned_free_fallback(memblock);
+}
+
+#endif
+
//static
BOOL LLCommon::sAprInitialized = FALSE;
diff --git a/indra/llcommon/llcond.h b/indra/llcommon/llcond.h
index e31b67d893..da6e6affe1 100644
--- a/indra/llcommon/llcond.h
+++ b/indra/llcommon/llcond.h
@@ -53,6 +53,8 @@ private:
LLCoros::Mutex mMutex;
// Use LLCoros::ConditionVariable for the same reason.
LLCoros::ConditionVariable mCond;
+ using LockType = LLCoros::LockType;
+ using cv_status = LLCoros::cv_status;
public:
/// LLCond can be explicitly initialized with a specific value for mData if
@@ -65,10 +67,29 @@ public:
LLCond(const LLCond&) = delete;
LLCond& operator=(const LLCond&) = delete;
- /// get() returns a const reference to the stored DATA. The only way to
- /// get a non-const reference -- to modify the stored DATA -- is via
- /// update_one() or update_all().
- const value_type& get() const { return mData; }
+ /**
+ * get() returns the stored DATA by value -- so to use get(), DATA must
+ * be copyable. The only way to get a non-const reference -- to modify
+ * the stored DATA -- is via update_one() or update_all().
+ */
+ value_type get()
+ {
+ LockType lk(mMutex);
+ return mData;
+ }
+
+ /**
+ * get(functor) returns whatever the functor returns. It allows us to peek
+ * at the stored DATA without copying the whole thing. The functor must
+ * accept a const reference to DATA. If you want to modify DATA, call
+ * update_one() or update_all() instead.
+ */
+ template
+ auto get(FUNC&& func)
+ {
+ LockType lk(mMutex);
+ return std::forward(func)(const_data());
+ }
/**
* Pass update_one() an invocable accepting non-const (DATA&). The
@@ -80,11 +101,11 @@ public:
* update_one() when DATA is a struct or class.
*/
template
- void update_one(MODIFY modify)
+ void update_one(MODIFY&& modify)
{
{ // scope of lock can/should end before notify_one()
- LLCoros::LockType lk(mMutex);
- modify(mData);
+ LockType lk(mMutex);
+ std::forward(modify)(mData);
}
mCond.notify_one();
}
@@ -99,11 +120,11 @@ public:
* update_all() when DATA is a struct or class.
*/
template
- void update_all(MODIFY modify)
+ void update_all(MODIFY&& modify)
{
{ // scope of lock can/should end before notify_all()
- LLCoros::LockType lk(mMutex);
- modify(mData);
+ LockType lk(mMutex);
+ std::forward(modify)(mData);
}
mCond.notify_all();
}
@@ -116,9 +137,9 @@ public:
* wait() on the condition_variable.
*/
template
- void wait(Pred pred)
+ void wait(Pred&& pred)
{
- LLCoros::LockType lk(mMutex);
+ LockType lk(mMutex);
// We must iterate explicitly since the predicate accepted by
// condition_variable::wait() requires a different signature:
// condition_variable::wait() calls its predicate with no arguments.
@@ -127,7 +148,7 @@ public:
// But what if they instead pass a predicate accepting non-const
// (DATA&)? Such a predicate could modify mData, which would be Bad.
// Forbid that.
- while (! pred(const_cast(mData)))
+ while (! std::forward(pred)(const_data()))
{
mCond.wait(lk);
}
@@ -144,7 +165,7 @@ public:
* returning true.
*/
template
- bool wait_for(const std::chrono::duration& timeout_duration, Pred pred)
+ bool wait_for(const std::chrono::duration& timeout_duration, Pred&& pred)
{
// Instead of replicating wait_until() logic, convert duration to
// time_point and just call wait_until().
@@ -153,7 +174,8 @@ public:
// wrong! We'd keep pushing the timeout time farther and farther into
// the future. This way, we establish a definite timeout time and
// stick to it.
- return wait_until(std::chrono::steady_clock::now() + timeout_duration, pred);
+ return wait_until(std::chrono::steady_clock::now() + timeout_duration,
+ std::forward(pred));
}
/**
@@ -163,9 +185,9 @@ public:
* generic wait_for() method.
*/
template
- bool wait_for(F32Milliseconds timeout_duration, Pred pred)
+ bool wait_for(F32Milliseconds timeout_duration, Pred&& pred)
{
- return wait_for(convert(timeout_duration), pred);
+ return wait_for(convert(timeout_duration), std::forward(pred));
}
protected:
@@ -183,6 +205,10 @@ protected:
}
private:
+ // It's important to pass a const ref to certain user-specified functors
+ // that aren't supposed to be able to modify mData.
+ const value_type& const_data() const { return mData; }
+
/**
* Pass wait_until() a chrono::time_point, indicating the time at which we
* should stop waiting, and a predicate accepting (const DATA&), returning
@@ -203,21 +229,21 @@ private:
* honoring a fixed timeout.
*/
template
- bool wait_until(const std::chrono::time_point& timeout_time, Pred pred)
+ bool wait_until(const std::chrono::time_point& timeout_time, Pred&& pred)
{
- LLCoros::LockType lk(mMutex);
+ LockType lk(mMutex);
// We advise the caller to pass a predicate accepting (const DATA&).
// But what if they instead pass a predicate accepting non-const
// (DATA&)? Such a predicate could modify mData, which would be Bad.
// Forbid that.
- while (! pred(const_cast(mData)))
+ while (! std::forward(pred)(const_data()))
{
- if (LLCoros::cv_status::timeout == mCond.wait_until(lk, timeout_time))
+ if (cv_status::timeout == mCond.wait_until(lk, timeout_time))
{
// It's possible that wait_until() timed out AND the predicate
// became true more or less simultaneously. Even though
// wait_until() timed out, check the predicate one more time.
- return pred(const_cast(mData));
+ return std::forward(pred)(const_data());
}
}
return true;
diff --git a/indra/llcommon/lldate.cpp b/indra/llcommon/lldate.cpp
index 7a2a0869f4..2ddcf40895 100644
--- a/indra/llcommon/lldate.cpp
+++ b/indra/llcommon/lldate.cpp
@@ -86,11 +86,9 @@ std::string LLDate::asRFC1123() const
return toHTTPDateString (std::string ("%A, %d %b %Y %H:%M:%S GMT"));
}
-LLTrace::BlockTimerStatHandle FT_DATE_FORMAT("Date Format");
-
std::string LLDate::toHTTPDateString (std::string fmt) const
{
- LL_RECORD_BLOCK_TIME(FT_DATE_FORMAT);
+ LL_PROFILE_ZONE_SCOPED;
time_t locSeconds = (time_t) mSecondsSinceEpoch;
struct tm * gmt = gmtime (&locSeconds);
@@ -99,7 +97,7 @@ std::string LLDate::toHTTPDateString (std::string fmt) const
std::string LLDate::toHTTPDateString (tm * gmt, std::string fmt)
{
- LL_RECORD_BLOCK_TIME(FT_DATE_FORMAT);
+ LL_PROFILE_ZONE_SCOPED;
// avoid calling setlocale() unnecessarily - it's expensive.
static std::string prev_locale = "";
diff --git a/indra/llcommon/llerror.cpp b/indra/llcommon/llerror.cpp
index 55a06f8326..919d2dabc4 100644
--- a/indra/llcommon/llerror.cpp
+++ b/indra/llcommon/llerror.cpp
@@ -109,6 +109,7 @@ namespace {
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
int syslogPriority = LOG_CRIT;
switch (level) {
case LLError::LEVEL_DEBUG: syslogPriority = LOG_DEBUG; break;
@@ -166,6 +167,7 @@ namespace {
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
if (LLError::getAlwaysFlush())
{
mFile << message << std::endl;
@@ -194,7 +196,7 @@ namespace {
{
return LLError::getEnabledLogTypesMask() & 0x04;
}
-
+
LL_FORCE_INLINE std::string createBoldANSI()
{
std::string ansi_code;
@@ -220,10 +222,10 @@ namespace {
LL_FORCE_INLINE std::string createANSI(const std::string& color)
{
std::string ansi_code;
- ansi_code += '\033';
- ansi_code += "[";
+ ansi_code += '\033';
+ ansi_code += "[";
ansi_code += "38;5;";
- ansi_code += color;
+ ansi_code += color;
ansi_code += "m";
return ansi_code;
@@ -232,6 +234,7 @@ namespace {
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
// The default colors for error, warn and debug are now a bit more pastel
// and easier to read on the default (black) terminal background but you
// now have the option to set the color of each via an environment variables:
@@ -261,6 +264,7 @@ namespace {
}
else
{
+ LL_PROFILE_ZONE_NAMED("fprintf");
fprintf(stderr, "%s\n", message.c_str());
}
}
@@ -270,6 +274,7 @@ namespace {
LL_FORCE_INLINE void writeANSI(const std::string& ansi_code, const std::string& message)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
static std::string s_ansi_bold = createBoldANSI(); // bold text
static std::string s_ansi_reset = createResetANSI(); // reset
// ANSI color code escape sequence, message, and reset in one fprintf call
@@ -306,6 +311,7 @@ namespace {
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
mBuffer->addLine(message);
}
@@ -332,6 +338,7 @@ namespace {
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
debugger_print(message);
}
};
@@ -1210,6 +1217,7 @@ namespace
void writeToRecorders(const LLError::CallSite& site, const std::string& message)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
LLError::ELevel level = site.mLevel;
SettingsConfigPtr s = Globals::getInstance()->getSettingsConfig();
@@ -1344,6 +1352,7 @@ namespace LLError
bool Log::shouldLog(CallSite& site)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
LLMutexTrylock lock(getMutex(), 5);
if (!lock.isLocked())
{
@@ -1388,6 +1397,7 @@ namespace LLError
void Log::flush(const std::ostringstream& out, const CallSite& site)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
LLMutexTrylock lock(getMutex(),5);
if (!lock.isLocked())
{
diff --git a/indra/llcommon/llerror.h b/indra/llcommon/llerror.h
index d439136ca8..d06c0e2132 100644
--- a/indra/llcommon/llerror.h
+++ b/indra/llcommon/llerror.h
@@ -35,7 +35,9 @@
#include "stdtypes.h"
+#include "llprofiler.h"
#include "llpreprocessor.h"
+
#include
const int LL_ERR_NOERR = 0;
@@ -348,7 +350,8 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG;
// if (condition) LL_INFOS() << "True" << LL_ENDL; else LL_INFOS()() << "False" << LL_ENDL;
#define lllog(level, once, ...) \
- do { \
+ do { \
+ LL_PROFILE_ZONE_NAMED("lllog"); \
const char* tags[] = {"", ##__VA_ARGS__}; \
static LLError::CallSite _site(lllog_site_args_(level, once, tags)); \
lllog_test_()
diff --git a/indra/llcommon/llerrorcontrol.h b/indra/llcommon/llerrorcontrol.h
index e87bb7bf35..57f10b7895 100644
--- a/indra/llcommon/llerrorcontrol.h
+++ b/indra/llcommon/llerrorcontrol.h
@@ -190,6 +190,7 @@ namespace LLError
{}
void recordMessage(LLError::ELevel level, const std::string& message) override
{
+ LL_PROFILE_ZONE_SCOPED
mCallable(level, message);
}
private:
diff --git a/indra/llcommon/lleventfilter.h b/indra/llcommon/lleventfilter.h
index 48c2570732..7613850fb2 100644
--- a/indra/llcommon/lleventfilter.h
+++ b/indra/llcommon/lleventfilter.h
@@ -429,6 +429,8 @@ public:
// path, then stores it to mTarget.
virtual bool post(const LLSD& event)
{
+ LL_PROFILE_ZONE_SCOPED
+
// Extract the element specified by 'mPath' from 'event'. To perform a
// generic type-appropriate store through mTarget, construct an
// LLSDParam and store that, thus engaging LLSDParam's custom
diff --git a/indra/llcommon/llexception.cpp b/indra/llcommon/llexception.cpp
index b584b0ff8b..46560b5e4c 100644
--- a/indra/llcommon/llexception.cpp
+++ b/indra/llcommon/llexception.cpp
@@ -97,6 +97,11 @@ static const U32 STATUS_MSC_EXCEPTION = 0xE06D7363; // compiler specific
U32 msc_exception_filter(U32 code, struct _EXCEPTION_POINTERS *exception_infop)
{
+ const auto stack = to_string(boost::stacktrace::stacktrace());
+ LL_WARNS() << "SEH Exception handled (that probably shouldn't be): Code " << code
+ << "\n Stack trace: \n"
+ << stack << LL_ENDL;
+
if (code == STATUS_MSC_EXCEPTION)
{
// C++ exception, go on
diff --git a/indra/llcommon/llfasttimer.cpp b/indra/llcommon/llfasttimer.cpp
index 5b6a7b82f8..2612d0f07c 100644
--- a/indra/llcommon/llfasttimer.cpp
+++ b/indra/llcommon/llfasttimer.cpp
@@ -191,29 +191,30 @@ TimeBlockTreeNode& BlockTimerStatHandle::getTreeNode() const
}
+
void BlockTimer::bootstrapTimerTree()
{
- for (auto& base : BlockTimerStatHandle::instance_snapshot())
- {
- // because of indirect derivation from LLInstanceTracker, have to downcast
- BlockTimerStatHandle& timer = static_cast(base);
- if (&timer == &BlockTimer::getRootTimeBlock()) continue;
+ for (auto& base : BlockTimerStatHandle::instance_snapshot())
+ {
+ // because of indirect derivation from LLInstanceTracker, have to downcast
+ BlockTimerStatHandle& timer = static_cast(base);
+ if (&timer == &BlockTimer::getRootTimeBlock()) continue;
- // bootstrap tree construction by attaching to last timer to be on stack
- // when this timer was called
- if (timer.getParent() == &BlockTimer::getRootTimeBlock())
- {
- TimeBlockAccumulator& accumulator = timer.getCurrentAccumulator();
+ // bootstrap tree construction by attaching to last timer to be on stack
+ // when this timer was called
+ if (timer.getParent() == &BlockTimer::getRootTimeBlock())
+ {
+ TimeBlockAccumulator& accumulator = timer.getCurrentAccumulator();
- if (accumulator.mLastCaller)
- {
- timer.setParent(accumulator.mLastCaller);
- accumulator.mParent = accumulator.mLastCaller;
- }
- // no need to push up tree on first use, flag can be set spuriously
- accumulator.mMoveUpTree = false;
- }
- }
+ if (accumulator.mLastCaller)
+ {
+ timer.setParent(accumulator.mLastCaller);
+ accumulator.mParent = accumulator.mLastCaller;
+ }
+ // no need to push up tree on first use, flag can be set spuriously
+ accumulator.mMoveUpTree = false;
+ }
+ }
}
// bump timers up tree if they have been flagged as being in the wrong place
@@ -221,6 +222,7 @@ void BlockTimer::bootstrapTimerTree()
// this preserves partial order derived from current frame's observations
void BlockTimer::incrementalUpdateTimerTree()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
for(block_timer_tree_df_post_iterator_t it = begin_block_timer_tree_df_post(BlockTimer::getRootTimeBlock());
it != end_block_timer_tree_df_post();
++it)
@@ -260,7 +262,8 @@ void BlockTimer::incrementalUpdateTimerTree()
void BlockTimer::updateTimes()
- {
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
// walk up stack of active timers and accumulate current time while leaving timing structures active
BlockTimerStackRecord* stack_record = LLThreadLocalSingletonPointer::getInstance();
if (!stack_record) return;
@@ -271,7 +274,7 @@ void BlockTimer::updateTimes()
while(cur_timer
&& cur_timer->mParentTimerData.mActiveTimer != cur_timer) // root defined by parent pointing to self
- {
+ {
U64 cumulative_time_delta = cur_time - cur_timer->mStartTime;
cur_timer->mStartTime = cur_time;
diff --git a/indra/llcommon/llfasttimer.h b/indra/llcommon/llfasttimer.h
index dfc63d08a2..9bd93d7240 100644
--- a/indra/llcommon/llfasttimer.h
+++ b/indra/llcommon/llfasttimer.h
@@ -38,7 +38,10 @@
#define LL_FAST_TIMER_ON 1
#define LL_FASTTIMER_USE_RDTSC 1
+// NOTE: Also see llprofiler.h
+#if !defined(LL_PROFILER_CONFIGURATION)
#define LL_RECORD_BLOCK_TIME(timer_stat) const LLTrace::BlockTimer& LL_GLUE_TOKENS(block_time_recorder, __LINE__)(LLTrace::timeThisBlock(timer_stat)); (void)LL_GLUE_TOKENS(block_time_recorder, __LINE__);
+#endif // LL_PROFILER_CONFIGURATION
namespace LLTrace
{
diff --git a/indra/llcommon/llframetimer.cpp b/indra/llcommon/llframetimer.cpp
index 1e9920746b..c54029e8b4 100644
--- a/indra/llcommon/llframetimer.cpp
+++ b/indra/llcommon/llframetimer.cpp
@@ -29,6 +29,11 @@
#include "llframetimer.h"
+// We don't bother building a stand alone lib; we just need to include the one source file for Tracy support
+#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY || LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
+ #include "TracyClient.cpp"
+#endif // LL_PROFILER_CONFIGURATION
+
// Static members
//LLTimer LLFrameTimer::sInternalTimer;
U64 LLFrameTimer::sStartTotalTime = totalTime();
diff --git a/indra/llcommon/llinstancetracker.h b/indra/llcommon/llinstancetracker.h
index 402333cca7..02535a59e7 100644
--- a/indra/llcommon/llinstancetracker.h
+++ b/indra/llcommon/llinstancetracker.h
@@ -83,13 +83,34 @@ class LLInstanceTracker
typedef llthread::LockStatic LockStatic;
public:
+ using ptr_t = std::shared_ptr;
+ using weak_t = std::weak_ptr;
+
+ /**
+ * Storing a dumb T* somewhere external is a bad idea, since
+ * LLInstanceTracker subclasses are explicitly destroyed rather than
+ * managed by smart pointers. It's legal to declare stack instances of an
+ * LLInstanceTracker subclass. But it's reasonable to store a
+ * std::weak_ptr, which will become invalid when the T instance is
+ * destroyed.
+ */
+ weak_t getWeak()
+ {
+ return mSelf;
+ }
+
+ static S32 instanceCount()
+ {
+ return LockStatic()->mMap.size();
+ }
+
// snapshot of std::pair> pairs
class snapshot
{
// It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot.
- typedef std::vector>> VectorType;
+ typedef std::vector> VectorType;
// Dereferencing our iterator produces a std::shared_ptr for each
// instance that still exists. Since we store weak_ptrs, that involves
// two chained transformations:
@@ -98,7 +119,7 @@ public:
// It is very important that we filter lazily, that is, during
// traversal. Any one of our stored weak_ptrs might expire during
// traversal.
- typedef std::pair> strong_pair;
+ typedef std::pair strong_pair;
// Note for future reference: nat has not yet had any luck (up to
// Boost 1.67) trying to use boost::transform_iterator with a hand-
// coded functor, only with actual functions. In my experience, an
@@ -202,17 +223,12 @@ public:
iterator end() { return iterator(snapshot::end(), key_getter); }
};
- static T* getInstance(const KEY& k)
+ static ptr_t getInstance(const KEY& k)
{
LockStatic lock;
const InstanceMap& map(lock->mMap);
typename InstanceMap::const_iterator found = map.find(k);
- return (found == map.end()) ? NULL : found->second.get();
- }
-
- static S32 instanceCount()
- {
- return LockStatic()->mMap.size();
+ return (found == map.end()) ? NULL : found->second;
}
protected:
@@ -222,7 +238,9 @@ protected:
// shared_ptr, so give it a no-op deleter. We store shared_ptrs in our
// InstanceMap specifically so snapshot can store weak_ptrs so we can
// detect deletions during traversals.
- std::shared_ptr ptr(static_cast(this), [](T*){});
+ ptr_t ptr(static_cast(this), [](T*){});
+ // save corresponding weak_ptr for future reference
+ mSelf = ptr;
LockStatic lock;
add_(lock, key, ptr);
}
@@ -257,7 +275,7 @@ private:
static std::string report(const char* key) { return report(std::string(key)); }
// caller must instantiate LockStatic
- void add_(LockStatic& lock, const KEY& key, const std::shared_ptr& ptr)
+ void add_(LockStatic& lock, const KEY& key, const ptr_t& ptr)
{
mInstanceKey = key;
InstanceMap& map = lock->mMap;
@@ -281,7 +299,7 @@ private:
break;
}
}
- std::shared_ptr remove_(LockStatic& lock)
+ ptr_t remove_(LockStatic& lock)
{
InstanceMap& map = lock->mMap;
typename InstanceMap::iterator iter = map.find(mInstanceKey);
@@ -295,6 +313,9 @@ private:
}
private:
+ // Storing a weak_ptr to self is a bit like deriving from
+ // std::enable_shared_from_this(), except more explicit.
+ weak_t mSelf;
KEY mInstanceKey;
};
@@ -326,6 +347,9 @@ class LLInstanceTracker
typedef llthread::LockStatic LockStatic;
public:
+ using ptr_t = std::shared_ptr;
+ using weak_t = std::weak_ptr;
+
/**
* Storing a dumb T* somewhere external is a bad idea, since
* LLInstanceTracker subclasses are explicitly destroyed rather than
@@ -334,12 +358,15 @@ public:
* std::weak_ptr, which will become invalid when the T instance is
* destroyed.
*/
- std::weak_ptr getWeak()
+ weak_t getWeak()
{
return mSelf;
}
- static S32 instanceCount() { return LockStatic()->mSet.size(); }
+ static S32 instanceCount()
+ {
+ return LockStatic()->mSet.size();
+ }
// snapshot of std::shared_ptr pointers
class snapshot
@@ -347,7 +374,7 @@ public:
// It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot.
- typedef std::vector> VectorType;
+ typedef std::vector VectorType;
// Dereferencing our iterator produces a std::shared_ptr for each
// instance that still exists. Since we store weak_ptrs, that involves
// two chained transformations:
@@ -453,7 +480,7 @@ protected:
private:
// Storing a weak_ptr to self is a bit like deriving from
// std::enable_shared_from_this(), except more explicit.
- std::weak_ptr mSelf;
+ weak_t mSelf;
};
#endif
diff --git a/indra/llcommon/llleaplistener.cpp b/indra/llcommon/llleaplistener.cpp
index 3e6ce9092c..11bfec1b31 100644
--- a/indra/llcommon/llleaplistener.cpp
+++ b/indra/llcommon/llleaplistener.cpp
@@ -220,7 +220,7 @@ void LLLeapListener::getAPI(const LLSD& request) const
{
Response reply(LLSD(), request);
- LLEventAPI* found = LLEventAPI::getInstance(request["api"]);
+ auto found = LLEventAPI::getInstance(request["api"]);
if (found)
{
reply["name"] = found->getName();
diff --git a/indra/llcommon/llmemory.cpp b/indra/llcommon/llmemory.cpp
index ea84e4c1ea..849867586a 100644
--- a/indra/llcommon/llmemory.cpp
+++ b/indra/llcommon/llmemory.cpp
@@ -82,6 +82,7 @@ void LLMemory::initMaxHeapSizeGB(F32Gigabytes max_heap_size)
//static
void LLMemory::updateMemoryInfo()
{
+ LL_PROFILE_ZONE_SCOPED
#if LL_WINDOWS
PROCESS_MEMORY_COUNTERS counters;
@@ -145,6 +146,7 @@ void* LLMemory::tryToAlloc(void* address, U32 size)
//static
void LLMemory::logMemoryInfo(BOOL update)
{
+ LL_PROFILE_ZONE_SCOPED
if(update)
{
updateMemoryInfo() ;
diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h
index 24f86cc11e..ac6c969d70 100644
--- a/indra/llcommon/llmemory.h
+++ b/indra/llcommon/llmemory.h
@@ -101,6 +101,29 @@ template T* LL_NEXT_ALIGNED_ADDRESS_64(T* address)
#define LL_ALIGN_16(var) LL_ALIGN_PREFIX(16) var LL_ALIGN_POSTFIX(16)
+#define LL_ALIGN_NEW \
+public: \
+ void* operator new(size_t size) \
+ { \
+ return ll_aligned_malloc_16(size); \
+ } \
+ \
+ void operator delete(void* ptr) \
+ { \
+ ll_aligned_free_16(ptr); \
+ } \
+ \
+ void* operator new[](size_t size) \
+ { \
+ return ll_aligned_malloc_16(size); \
+ } \
+ \
+ void operator delete[](void* ptr) \
+ { \
+ ll_aligned_free_16(ptr); \
+ }
+
+
//------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------
// for enable buffer overrun detection predefine LL_DEBUG_BUFFER_OVERRUN in current library
@@ -113,8 +136,9 @@ template T* LL_NEXT_ALIGNED_ADDRESS_64(T* address)
#else
inline void* ll_aligned_malloc_fallback( size_t size, int align )
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
#if defined(LL_WINDOWS)
- return _aligned_malloc(size, align);
+ void* ret = _aligned_malloc(size, align);
#else
char* aligned = NULL;
void* mem = malloc( size + (align - 1) + sizeof(void*) );
@@ -125,12 +149,16 @@ template T* LL_NEXT_ALIGNED_ADDRESS_64(T* address)
((void**)aligned)[-1] = mem;
}
- return aligned;
+ void* ret = aligned;
#endif
+ LL_PROFILE_ALLOC(ret, size);
+ return ret;
}
inline void ll_aligned_free_fallback( void* ptr )
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ LL_PROFILE_FREE(ptr);
#if defined(LL_WINDOWS)
_aligned_free(ptr);
#else
@@ -146,21 +174,24 @@ template T* LL_NEXT_ALIGNED_ADDRESS_64(T* address)
inline void* ll_aligned_malloc_16(size_t size) // returned hunk MUST be freed with ll_aligned_free_16().
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
#if defined(LL_WINDOWS)
- return _aligned_malloc(size, 16);
+ void* ret = _aligned_malloc(size, 16);
#elif defined(LL_DARWIN)
- return malloc(size); // default osx malloc is 16 byte aligned.
+ void* ret = malloc(size); // default osx malloc is 16 byte aligned.
#else
- void *rtn;
- if (LL_LIKELY(0 == posix_memalign(&rtn, 16, size)))
- return rtn;
- else // bad alignment requested, or out of memory
- return NULL;
+ void *ret;
+ if (0 != posix_memalign(&ret, 16, size))
+ return nullptr;
#endif
+ LL_PROFILE_ALLOC(ret, size);
+ return ret;
}
inline void ll_aligned_free_16(void *p)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ LL_PROFILE_FREE(p);
#if defined(LL_WINDOWS)
_aligned_free(p);
#elif defined(LL_DARWIN)
@@ -172,10 +203,12 @@ inline void ll_aligned_free_16(void *p)
inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // returned hunk MUST be freed with ll_aligned_free_16().
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ LL_PROFILE_FREE(ptr);
#if defined(LL_WINDOWS)
- return _aligned_realloc(ptr, size, 16);
+ void* ret = _aligned_realloc(ptr, size, 16);
#elif defined(LL_DARWIN)
- return realloc(ptr,size); // default osx malloc is 16 byte aligned.
+ void* ret = realloc(ptr,size); // default osx malloc is 16 byte aligned.
#else
//FIXME: memcpy is SLOW
void* ret = ll_aligned_malloc_16(size);
@@ -188,27 +221,31 @@ inline void* ll_aligned_realloc_16(void* ptr, size_t size, size_t old_size) // r
}
ll_aligned_free_16(ptr);
}
- return ret;
#endif
+ LL_PROFILE_ALLOC(ptr, size);
+ return ret;
}
inline void* ll_aligned_malloc_32(size_t size) // returned hunk MUST be freed with ll_aligned_free_32().
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
#if defined(LL_WINDOWS)
- return _aligned_malloc(size, 32);
+ void* ret = _aligned_malloc(size, 32);
#elif defined(LL_DARWIN)
- return ll_aligned_malloc_fallback( size, 32 );
+ void* ret = ll_aligned_malloc_fallback( size, 32 );
#else
- void *rtn;
- if (LL_LIKELY(0 == posix_memalign(&rtn, 32, size)))
- return rtn;
- else // bad alignment requested, or out of memory
- return NULL;
+ void *ret;
+ if (0 != posix_memalign(&ret, 32, size))
+ return nullptr;
#endif
+ LL_PROFILE_ALLOC(ret, size);
+ return ret;
}
inline void ll_aligned_free_32(void *p)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ LL_PROFILE_FREE(p);
#if defined(LL_WINDOWS)
_aligned_free(p);
#elif defined(LL_DARWIN)
@@ -222,29 +259,35 @@ inline void ll_aligned_free_32(void *p)
template
LL_FORCE_INLINE void* ll_aligned_malloc(size_t size)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
+ void* ret;
if (LL_DEFAULT_HEAP_ALIGN % ALIGNMENT == 0)
{
- return malloc(size);
+ ret = malloc(size);
+ LL_PROFILE_ALLOC(ret, size);
}
else if (ALIGNMENT == 16)
{
- return ll_aligned_malloc_16(size);
+ ret = ll_aligned_malloc_16(size);
}
else if (ALIGNMENT == 32)
{
- return ll_aligned_malloc_32(size);
+ ret = ll_aligned_malloc_32(size);
}
else
{
- return ll_aligned_malloc_fallback(size, ALIGNMENT);
+ ret = ll_aligned_malloc_fallback(size, ALIGNMENT);
}
+ return ret;
}
template
LL_FORCE_INLINE void ll_aligned_free(void* ptr)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
if (ALIGNMENT == LL_DEFAULT_HEAP_ALIGN)
{
+ LL_PROFILE_FREE(ptr);
free(ptr);
}
else if (ALIGNMENT == 16)
@@ -266,6 +309,7 @@ LL_FORCE_INLINE void ll_aligned_free(void* ptr)
//
inline void ll_memcpy_nonaliased_aligned_16(char* __restrict dst, const char* __restrict src, size_t bytes)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
assert(src != NULL);
assert(dst != NULL);
assert(bytes > 0);
diff --git a/indra/llcommon/llmutex.cpp b/indra/llcommon/llmutex.cpp
index 4d73c04d07..0273dd5970 100644
--- a/indra/llcommon/llmutex.cpp
+++ b/indra/llcommon/llmutex.cpp
@@ -44,6 +44,7 @@ LLMutex::~LLMutex()
void LLMutex::lock()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if(isSelfLocked())
{ //redundant lock
mCount++;
@@ -65,6 +66,7 @@ void LLMutex::lock()
void LLMutex::unlock()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if (mCount > 0)
{ //not the root unlock
mCount--;
@@ -85,6 +87,7 @@ void LLMutex::unlock()
bool LLMutex::isLocked()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if (!mMutex.try_lock())
{
return true;
@@ -108,6 +111,7 @@ LLThread::id_t LLMutex::lockingThread() const
bool LLMutex::trylock()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if(isSelfLocked())
{ //redundant lock
mCount++;
@@ -146,17 +150,20 @@ LLCondition::~LLCondition()
void LLCondition::wait()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
std::unique_lock< std::mutex > lock(mMutex);
mCond.wait(lock);
}
void LLCondition::signal()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mCond.notify_one();
}
void LLCondition::broadcast()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mCond.notify_all();
}
@@ -166,6 +173,7 @@ LLMutexTrylock::LLMutexTrylock(LLMutex* mutex)
: mMutex(mutex),
mLocked(false)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if (mMutex)
mLocked = mMutex->trylock();
}
@@ -174,6 +182,7 @@ LLMutexTrylock::LLMutexTrylock(LLMutex* mutex, U32 aTries, U32 delay_ms)
: mMutex(mutex),
mLocked(false)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if (!mMutex)
return;
@@ -188,6 +197,7 @@ LLMutexTrylock::LLMutexTrylock(LLMutex* mutex, U32 aTries, U32 delay_ms)
LLMutexTrylock::~LLMutexTrylock()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if (mMutex && mLocked)
mMutex->unlock();
}
@@ -199,6 +209,7 @@ LLMutexTrylock::~LLMutexTrylock()
//
LLScopedLock::LLScopedLock(std::mutex* mutex) : mMutex(mutex)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if(mutex)
{
mutex->lock();
@@ -217,6 +228,7 @@ LLScopedLock::~LLScopedLock()
void LLScopedLock::unlock()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if(mLocked)
{
mMutex->unlock();
diff --git a/indra/llcommon/llpreprocessor.h b/indra/llcommon/llpreprocessor.h
index b17a8e761a..dc586b0008 100644
--- a/indra/llcommon/llpreprocessor.h
+++ b/indra/llcommon/llpreprocessor.h
@@ -171,7 +171,9 @@
#define LL_DLLIMPORT
#endif // LL_WINDOWS
-#if ! defined(LL_WINDOWS)
+#if __clang__ || ! defined(LL_WINDOWS)
+// Only on Windows, and only with the Microsoft compiler (vs. clang) is
+// wchar_t potentially not a distinct type.
#define LL_WCHAR_T_NATIVE 1
#else // LL_WINDOWS
// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
diff --git a/indra/llcommon/llprocessor.cpp b/indra/llcommon/llprocessor.cpp
index 5d16a4b74d..818df07bb2 100644
--- a/indra/llcommon/llprocessor.cpp
+++ b/indra/llcommon/llprocessor.cpp
@@ -44,20 +44,6 @@
#include "llsd.h"
-#if LL_MSVC && _M_X64
-# define LL_X86_64 1
-# define LL_X86 1
-#elif LL_MSVC && _M_IX86
-# define LL_X86 1
-#elif LL_GNUC && ( defined(__amd64__) || defined(__x86_64__) )
-# define LL_X86_64 1
-# define LL_X86 1
-#elif LL_GNUC && ( defined(__i386__) )
-# define LL_X86 1
-#elif LL_GNUC && ( defined(__powerpc__) || defined(__ppc__) )
-# define LL_PPC 1
-#endif
-
class LLProcessorInfoImpl; // foward declaration for the mImpl;
namespace
diff --git a/indra/llcommon/llprocessor.h b/indra/llcommon/llprocessor.h
index 90e5bc59ee..b77eb22c3a 100644
--- a/indra/llcommon/llprocessor.h
+++ b/indra/llcommon/llprocessor.h
@@ -29,6 +29,20 @@
#define LLPROCESSOR_H
#include "llunits.h"
+#if LL_MSVC && _M_X64
+# define LL_X86_64 1
+# define LL_X86 1
+#elif LL_MSVC && _M_IX86
+# define LL_X86 1
+#elif LL_GNUC && ( defined(__amd64__) || defined(__x86_64__) )
+# define LL_X86_64 1
+# define LL_X86 1
+#elif LL_GNUC && ( defined(__i386__) )
+# define LL_X86 1
+#elif LL_GNUC && ( defined(__powerpc__) || defined(__ppc__) )
+# define LL_PPC 1
+#endif
+
class LLProcessorInfoImpl;
class LL_COMMON_API LLProcessorInfo
diff --git a/indra/llcommon/llprofiler.h b/indra/llcommon/llprofiler.h
new file mode 100644
index 0000000000..f9d7ae7ce4
--- /dev/null
+++ b/indra/llcommon/llprofiler.h
@@ -0,0 +1,151 @@
+/**
+ * @file llprofiler.h
+ * @brief Wrapper for Tracy and/or other profilers
+ *
+ * $LicenseInfo:firstyear=2021&license=viewerlgpl$
+ * Second Life Viewer Source Code
+ * Copyright (C) 2021, Linden Research, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License only.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+ * $/LicenseInfo$
+ */
+
+#ifndef LL_PROFILER_H
+#define LL_PROFILER_H
+
+// If you use the default macros LL_PROFILE_ZONE_SCOPED and LL_PROFILE_ZONE_NAMED to profile code ...
+//
+// void foo()
+// {
+// LL_PROFILE_ZONE_SCOPED;
+// :
+//
+// {
+// LL_PROFILE_ZONE_NAMED("widget bar");
+// :
+// }
+// {
+// LL_PROFILE_ZONE_NAMED("widget qux");
+// :
+// }
+// }
+//
+// ... please be aware that ALL these will show up in a Tracy capture which can quickly exhaust memory.
+// Instead, use LL_PROFILE_ZONE_SCOPED_CATEGORY_* and LL_PROFILE_ZONE_NAMED_CATEGORY_* to profile code ...
+//
+// void foo()
+// {
+// LL_PROFILE_ZONE_SCOPED_CATEGORY_UI;
+// :
+//
+// {
+// LL_PROFILE_ZONE_NAMED_CATEGORY_UI("widget bar");
+// :
+// }
+// {
+// LL_PROFILE_ZONE_NAMED_CATEGORY_UI("widget qux");
+// :
+// }
+// }
+//
+// ... as these can be selectively turned on/off. This will minimize memory usage and visual clutter in a Tracy capture.
+// See llprofiler_categories.h for more details on profiling categories.
+
+#define LL_PROFILER_CONFIG_NONE 0 // No profiling
+#define LL_PROFILER_CONFIG_FAST_TIMER 1 // Profiling on: Only Fast Timers
+#define LL_PROFILER_CONFIG_TRACY 2 // Profiling on: Only Tracy
+#define LL_PROFILER_CONFIG_TRACY_FAST_TIMER 3 // Profiling on: Fast Timers + Tracy
+
+#ifndef LL_PROFILER_CONFIGURATION
+#define LL_PROFILER_CONFIGURATION LL_PROFILER_CONFIG_FAST_TIMER
+#endif
+
+extern thread_local bool gProfilerEnabled;
+
+#if defined(LL_PROFILER_CONFIGURATION) && (LL_PROFILER_CONFIGURATION > LL_PROFILER_CONFIG_NONE)
+ #if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY || LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
+ #define TRACY_ENABLE 1
+// Normally these would be enabled but we want to be able to build any viewer with Tracy enabled and run the Tracy server on another machine
+// They must be undefined in order to work across multiple machines
+// #define TRACY_NO_BROADCAST 1
+// #define TRACY_ONLY_LOCALHOST 1
+ #define TRACY_ONLY_IPV4 1
+ #include "Tracy.hpp"
+
+ // Mutually exclusive with detailed memory tracing
+ #define LL_PROFILER_ENABLE_TRACY_OPENGL 0
+ #endif
+
+ #if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY
+ #define LL_PROFILER_FRAME_END FrameMark
+ #define LL_PROFILER_SET_THREAD_NAME( name ) tracy::SetThreadName( name ); gProfilerEnabled = true;
+ #define LL_RECORD_BLOCK_TIME(name) ZoneScoped // Want descriptive names; was: ZoneNamedN( ___tracy_scoped_zone, #name, true );
+ #define LL_PROFILE_ZONE_NAMED(name) ZoneNamedN( ___tracy_scoped_zone, name, true );
+ #define LL_PROFILE_ZONE_NAMED_COLOR(name,color) ZoneNamedNC( ___tracy_scopped_zone, name, color, true ) // RGB
+ #define LL_PROFILE_ZONE_SCOPED ZoneScoped
+
+ #define LL_PROFILE_ZONE_NUM( val ) ZoneValue( val )
+ #define LL_PROFILE_ZONE_TEXT( text, size ) ZoneText( text, size )
+
+ #define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
+ #define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
+ #define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
+ #define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
+ #define LL_PROFILE_FREE(ptr) TracyFree(ptr)
+ #endif
+ #if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_FAST_TIMER
+ #define LL_PROFILER_FRAME_END
+ #define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name)
+ #define LL_RECORD_BLOCK_TIME(name) const LLTrace::BlockTimer& LL_GLUE_TOKENS(block_time_recorder, __LINE__)(LLTrace::timeThisBlock(name)); (void)LL_GLUE_TOKENS(block_time_recorder, __LINE__);
+ #define LL_PROFILE_ZONE_NAMED(name) // LL_PROFILE_ZONE_NAMED is a no-op when Tracy is disabled
+ #define LL_PROFILE_ZONE_SCOPED // LL_PROFILE_ZONE_SCOPED is a no-op when Tracy is disabled
+ #define LL_PROFILE_ZONE_COLOR(name,color) // LL_RECORD_BLOCK_TIME(name)
+
+ #define LL_PROFILE_ZONE_NUM( val ) (void)( val ); // Not supported
+ #define LL_PROFILE_ZONE_TEXT( text, size ) (void)( text ); void( size ); // Not supported
+
+ #define LL_PROFILE_ZONE_ERR(name) (void)(name); // Not supported
+ #define LL_PROFILE_ZONE_INFO(name) (void)(name); // Not supported
+ #define LL_PROFILE_ZONE_WARN(name) (void)(name); // Not supported
+ #define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
+ #define LL_PROFILE_FREE(ptr) (void)(ptr);
+ #endif
+ #if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
+ #define LL_PROFILER_FRAME_END FrameMark
+ #define LL_PROFILER_SET_THREAD_NAME( name ) tracy::SetThreadName( name ); gProfilerEnabled = true;
+ #define LL_RECORD_BLOCK_TIME(name) ZoneNamedN(___tracy_scoped_zone, #name, true); const LLTrace::BlockTimer& LL_GLUE_TOKENS(block_time_recorder, __LINE__)(LLTrace::timeThisBlock(name)); (void)LL_GLUE_TOKENS(block_time_recorder, __LINE__);
+ #define LL_PROFILE_ZONE_NAMED(name) ZoneNamedN( ___tracy_scoped_zone, #name, true );
+ #define LL_PROFILE_ZONE_NAMED_COLOR(name,color) ZoneNamedNC( ___tracy_scopped_zone, name, color, true ) // RGB
+ #define LL_PROFILE_ZONE_SCOPED ZoneScoped
+
+ #define LL_PROFILE_ZONE_NUM( val ) ZoneValue( val )
+ #define LL_PROFILE_ZONE_TEXT( text, size ) ZoneText( text, size )
+
+ #define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
+ #define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
+ #define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
+ #define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
+ #define LL_PROFILE_FREE(ptr) TracyFree(ptr)
+ #endif
+#else
+ #define LL_PROFILER_FRAME_END
+ #define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name)
+#endif // LL_PROFILER
+
+#include "llprofilercategories.h"
+
+#endif // LL_PROFILER_H
diff --git a/indra/llcommon/llprofilercategories.h b/indra/llcommon/llprofilercategories.h
new file mode 100644
index 0000000000..8db29468cc
--- /dev/null
+++ b/indra/llcommon/llprofilercategories.h
@@ -0,0 +1,280 @@
+/**
+ * @file llprofiler_ategories.h
+ * @brief Profiling categories to minimize Tracy memory usage when viewing captures.
+ *
+ * $LicenseInfo:firstyear=2022&license=viewerlgpl$
+ * Second Life Viewer Source Code
+ * Copyright (C) 2022, Linden Research, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License only.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+ * $/LicenseInfo$
+ */
+
+#ifndef LL_PROFILER_CATEGORIES_H
+#define LL_PROFILER_CATEGORIES_H
+
+// A Tracy capture can quickly consume memory. Use these defines to selectively turn on/off Tracy profiling for these categories.
+// The biggest memory usage ones are:
+//
+// LL_PROFILER_CATEGORY_ENABLE_DRAWPOOL
+// LL_PROFILER_CATEGORY_ENABLE_LLSD
+// LL_PROFILER_CATEGORY_ENABLE_MEMORY
+// LL_PROFILER_CATEGORY_ENABLE_SHADERS
+//
+// NOTE: You can still manually use:
+// LL_PROFILE_ZONE_SCOPED();
+// LL_PROFILE_ZONE_NAMED("name");
+// but just be aware that those will ALWAYS show up in a Tracy capture
+// a) using more memory, and
+// b) adding visual clutter.
+#define LL_PROFILER_CATEGORY_ENABLE_APP 1
+#define LL_PROFILER_CATEGORY_ENABLE_AVATAR 1
+#define LL_PROFILER_CATEGORY_ENABLE_DISPLAY 1
+#define LL_PROFILER_CATEGORY_ENABLE_DRAWABLE 1
+#define LL_PROFILER_CATEGORY_ENABLE_DRAWPOOL 1
+#define LL_PROFILER_CATEGORY_ENABLE_ENVIRONMENT 1
+#define LL_PROFILER_CATEGORY_ENABLE_FACE 1
+#define LL_PROFILER_CATEGORY_ENABLE_LLSD 1
+#define LL_PROFILER_CATEGORY_ENABLE_LOGGING 1
+#define LL_PROFILER_CATEGORY_ENABLE_MATERIAL 1
+#define LL_PROFILER_CATEGORY_ENABLE_MEDIA 1
+#define LL_PROFILER_CATEGORY_ENABLE_MEMORY 1
+#define LL_PROFILER_CATEGORY_ENABLE_NETWORK 1
+#define LL_PROFILER_CATEGORY_ENABLE_OCTREE 1
+#define LL_PROFILER_CATEGORY_ENABLE_PIPELINE 1
+#define LL_PROFILER_CATEGORY_ENABLE_SHADER 1
+#define LL_PROFILER_CATEGORY_ENABLE_SPATIAL 1
+#define LL_PROFILER_CATEGORY_ENABLE_STATS 1
+#define LL_PROFILER_CATEGORY_ENABLE_STRING 1
+#define LL_PROFILER_CATEGORY_ENABLE_TEXTURE 1
+#define LL_PROFILER_CATEGORY_ENABLE_THREAD 1
+#define LL_PROFILER_CATEGORY_ENABLE_UI 1
+#define LL_PROFILER_CATEGORY_ENABLE_VIEWER 1
+#define LL_PROFILER_CATEGORY_ENABLE_VERTEX 1
+#define LL_PROFILER_CATEGORY_ENABLE_VOLUME 1
+#define LL_PROFILER_CATEGORY_ENABLE_WIN32 1
+
+#if LL_PROFILER_CATEGORY_ENABLE_APP
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_APP LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_APP LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_APP(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_APP
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_AVATAR
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_AVATAR LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_AVATAR(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_AVATAR
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_DISPLAY
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DISPLAY LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DISPLAY LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DISPLAY(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DISPLAY
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_DRAWABLE
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWABLE LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DRAWABLE LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWABLE(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DRAWABLE
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_DRAWPOOL
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWPOOL LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DRAWPOOL LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWPOOL(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_DRAWPOOL
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_ENVIRONMENT
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_ENVIRONMENT LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_ENVIRONMENT LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_ENVIRONMENT(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_ENVIRONMENT
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_FACE
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_FACE LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_FACE LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_FACE(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_FACE
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_LLSD
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_LLSD LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_LLSD(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_LOGGING
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_LOGGING LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_LOGGING(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_LOGGING
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_MATERIAL
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MATERIAL LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MATERIAL LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MATERIAL(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MATERIAL
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_MEDIA
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MEDIA LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MEDIA LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MEDIA(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MEDIA
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_MEMORY
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MEMORY LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_MEMORY(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_NETWORK
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_NETWORK LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_NETWORK(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_OCTREE
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_OCTREE LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_OCTREE LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_OCTREE(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_OCTREE
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_PIPELINE
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_PIPELINE LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_PIPELINE LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_PIPELINE(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_PIPELINE
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_SHADER
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_SHADER LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_SHADER LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_SHADER(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_SHADER
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_SPATIAL
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_SPATIAL LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_SPATIAL LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_SPATIAL(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_SPATIAL
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_STATS
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_STATS LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_STATS(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_STRING
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_STRING LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_STRING LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_STRING(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_STRING
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_TEXTURE
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_TEXTURE LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_TEXTURE(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_THREAD
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_UI
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_UI LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_UI LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_UI(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_UI
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_VERTEX
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VERTEX LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VERTEX(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VERTEX
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_VIEWER
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VIEWER LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VIEWER LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VIEWER(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VIEWER
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_VOLUME
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VOLUME LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VOLUME LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_VOLUME(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_VOLUME
+#endif
+
+#if LL_PROFILER_CATEGORY_ENABLE_WIN32
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_WIN32 LL_PROFILE_ZONE_NAMED
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_WIN32 LL_PROFILE_ZONE_SCOPED
+#else
+ #define LL_PROFILE_ZONE_NAMED_CATEGORY_WIN32(name)
+ #define LL_PROFILE_ZONE_SCOPED_CATEGORY_WIN32
+#endif
+
+#endif // LL_PROFILER_CATEGORIES_H
+
diff --git a/indra/llcommon/llrefcount.cpp b/indra/llcommon/llrefcount.cpp
index 29a5ca6f24..5cbd346411 100644
--- a/indra/llcommon/llrefcount.cpp
+++ b/indra/llcommon/llrefcount.cpp
@@ -29,6 +29,9 @@
#include "llerror.h"
+// maximum reference count before sounding memory leak alarm
+const S32 gMaxRefCount = 65536;
+
LLRefCount::LLRefCount(const LLRefCount& other)
: mRef(0)
{
@@ -47,7 +50,7 @@ LLRefCount::LLRefCount() :
LLRefCount::~LLRefCount()
{
- if (mRef != 0)
+ if (mRef != LL_REFCOUNT_FREE && mRef != 0)
{
LL_ERRS() << "deleting non-zero reference" << LL_ENDL;
}
diff --git a/indra/llcommon/llrefcount.h b/indra/llcommon/llrefcount.h
index 7e4af6ea66..2080da1565 100644
--- a/indra/llcommon/llrefcount.h
+++ b/indra/llcommon/llrefcount.h
@@ -37,6 +37,10 @@ class LLMutex;
// see llthread.h for LLThreadSafeRefCount
//----------------------------------------------------------------------------
+//nonsense but recognizable value for freed LLRefCount (aids in debugging)
+#define LL_REFCOUNT_FREE 1234567890
+extern const S32 gMaxRefCount;
+
class LL_COMMON_API LLRefCount
{
protected:
@@ -47,17 +51,25 @@ protected:
public:
LLRefCount();
+ inline void validateRefCount() const
+ {
+ llassert(mRef > 0); // ref count below 0, likely corrupted
+ llassert(mRef < gMaxRefCount); // ref count excessive, likely memory leak
+ }
+
inline void ref() const
{
mRef++;
+ validateRefCount();
}
inline S32 unref() const
{
- llassert(mRef >= 1);
+ validateRefCount();
if (0 == --mRef)
{
- delete this;
+ mRef = LL_REFCOUNT_FREE; // set to nonsense yet recognizable value to aid in debugging
+ delete this;
return 0;
}
return mRef;
diff --git a/indra/llcommon/llsd.cpp b/indra/llcommon/llsd.cpp
index 57b746889d..807b3d13f8 100644
--- a/indra/llcommon/llsd.cpp
+++ b/indra/llcommon/llsd.cpp
@@ -400,6 +400,7 @@ namespace
ImplMap& ImplMap::makeMap(LLSD::Impl*& var)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
if (shared())
{
ImplMap* i = new ImplMap(mData);
@@ -414,18 +415,21 @@ namespace
bool ImplMap::has(const LLSD::String& k) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
DataMap::const_iterator i = mData.find(k);
return i != mData.end();
}
LLSD ImplMap::get(const LLSD::String& k) const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
DataMap::const_iterator i = mData.find(k);
return (i != mData.end()) ? i->second : LLSD();
}
LLSD ImplMap::getKeys() const
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
LLSD keys = LLSD::emptyArray();
DataMap::const_iterator iter = mData.begin();
while (iter != mData.end())
@@ -438,11 +442,13 @@ namespace
void ImplMap::insert(const LLSD::String& k, const LLSD& v)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
mData.insert(DataMap::value_type(k, v));
}
void ImplMap::erase(const LLSD::String& k)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
mData.erase(k);
}
@@ -684,6 +690,7 @@ const LLSD::Impl& LLSD::Impl::safe(const Impl* impl)
ImplMap& LLSD::Impl::makeMap(Impl*& var)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
ImplMap* im = new ImplMap;
reset(var, im);
return *im;
@@ -887,11 +894,16 @@ LLSD& LLSD::with(const String& k, const LLSD& v)
}
void LLSD::erase(const String& k) { makeMap(impl).erase(k); }
-LLSD& LLSD::operator[](const String& k)
- { return makeMap(impl).ref(k); }
+LLSD& LLSD::operator[](const String& k)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
+ return makeMap(impl).ref(k);
+}
const LLSD& LLSD::operator[](const String& k) const
- { return safe(impl).ref(k); }
-
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
+ return safe(impl).ref(k);
+}
LLSD LLSD::emptyArray()
{
@@ -914,10 +926,16 @@ LLSD& LLSD::with(Integer i, const LLSD& v)
LLSD& LLSD::append(const LLSD& v) { return makeArray(impl).append(v); }
void LLSD::erase(Integer i) { makeArray(impl).erase(i); }
-LLSD& LLSD::operator[](Integer i)
- { return makeArray(impl).ref(i); }
+LLSD& LLSD::operator[](Integer i)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
+ return makeArray(impl).ref(i);
+}
const LLSD& LLSD::operator[](Integer i) const
- { return safe(impl).ref(i); }
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
+ return safe(impl).ref(i);
+}
static const char *llsd_dump(const LLSD &llsd, bool useXMLFormat)
{
diff --git a/indra/llcommon/llsd.h b/indra/llcommon/llsd.h
index 5b6d5545af..24cb9bbce1 100644
--- a/indra/llcommon/llsd.h
+++ b/indra/llcommon/llsd.h
@@ -290,9 +290,17 @@ public:
LLSD& with(const String&, const LLSD&);
LLSD& operator[](const String&);
- LLSD& operator[](const char* c) { return (*this)[String(c)]; }
+ LLSD& operator[](const char* c)
+ {
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
+ return (*this)[String(c)];
+ }
const LLSD& operator[](const String&) const;
- const LLSD& operator[](const char* c) const { return (*this)[String(c)]; }
+ const LLSD& operator[](const char* c) const
+ {
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD;
+ return (*this)[String(c)];
+ }
//@}
/** @name Array Values */
diff --git a/indra/llcommon/llsdparam.cpp b/indra/llcommon/llsdparam.cpp
index 2e7b46f885..af4ccf25fd 100644
--- a/indra/llcommon/llsdparam.cpp
+++ b/indra/llcommon/llsdparam.cpp
@@ -37,8 +37,6 @@ static LLInitParam::Parser::parser_write_func_map_t sWriteFuncs;
static LLInitParam::Parser::parser_inspect_func_map_t sInspectFuncs;
static const LLSD NO_VALUE_MARKER;
-LLTrace::BlockTimerStatHandle FTM_SD_PARAM_ADAPTOR("LLSD to LLInitParam conversion");
-
//
// LLParamSDParser
//
diff --git a/indra/llcommon/llsdparam.h b/indra/llcommon/llsdparam.h
index 93910b70ae..82a623a8a0 100644
--- a/indra/llcommon/llsdparam.h
+++ b/indra/llcommon/llsdparam.h
@@ -110,7 +110,6 @@ private:
};
-extern LL_COMMON_API LLTrace::BlockTimerStatHandle FTM_SD_PARAM_ADAPTOR;
template
class LLSDParamAdapter : public T
{
@@ -118,7 +117,7 @@ public:
LLSDParamAdapter() {}
LLSDParamAdapter(const LLSD& sd)
{
- LL_RECORD_BLOCK_TIME(FTM_SD_PARAM_ADAPTOR);
+ LL_PROFILE_ZONE_SCOPED;
LLParamSDParser parser;
// don't spam for implicit parsing of LLSD, as we want to allow arbitrary freeform data and ignore most of it
bool parse_silently = true;
diff --git a/indra/llcommon/llsdutil.cpp b/indra/llcommon/llsdutil.cpp
index fc10fcece3..8e90d1e8b8 100644
--- a/indra/llcommon/llsdutil.cpp
+++ b/indra/llcommon/llsdutil.cpp
@@ -215,6 +215,8 @@ BOOL compare_llsd_with_template(
const LLSD& template_llsd,
LLSD& resultant_llsd)
{
+ LL_PROFILE_ZONE_SCOPED
+
if (
llsd_to_test.isUndefined() &&
template_llsd.isDefined() )
@@ -336,6 +338,8 @@ bool filter_llsd_with_template(
const LLSD & template_llsd,
LLSD & resultant_llsd)
{
+ LL_PROFILE_ZONE_SCOPED
+
if (llsd_to_test.isUndefined() && template_llsd.isDefined())
{
resultant_llsd = template_llsd;
@@ -530,6 +534,8 @@ class TypeLookup
public:
TypeLookup()
{
+ LL_PROFILE_ZONE_SCOPED
+
for (const Data *di(boost::begin(typedata)), *dend(boost::end(typedata)); di != dend; ++di)
{
mMap[di->type] = di->name;
@@ -538,6 +544,8 @@ public:
std::string lookup(LLSD::Type type) const
{
+ LL_PROFILE_ZONE_SCOPED
+
MapType::const_iterator found = mMap.find(type);
if (found != mMap.end())
{
@@ -588,6 +596,8 @@ static std::string match_types(LLSD::Type expect, // prototype.type()
LLSD::Type actual, // type we're checking
const std::string& pfx) // as for llsd_matches
{
+ LL_PROFILE_ZONE_SCOPED
+
// Trivial case: if the actual type is exactly what we expect, we're good.
if (actual == expect)
return "";
@@ -625,6 +635,8 @@ static std::string match_types(LLSD::Type expect, // prototype.type()
// see docstring in .h file
std::string llsd_matches(const LLSD& prototype, const LLSD& data, const std::string& pfx)
{
+ LL_PROFILE_ZONE_SCOPED
+
// An undefined prototype means that any data is valid.
// An undefined slot in an array or map prototype means that any data
// may fill that slot.
@@ -757,6 +769,8 @@ std::string llsd_matches(const LLSD& prototype, const LLSD& data, const std::str
bool llsd_equals(const LLSD& lhs, const LLSD& rhs, int bits)
{
+ LL_PROFILE_ZONE_SCOPED
+
// We're comparing strict equality of LLSD representation rather than
// performing any conversions. So if the types aren't equal, the LLSD
// values aren't equal.
@@ -865,6 +879,8 @@ namespace llsd
LLSD& drill_ref(LLSD& blob, const LLSD& rawPath)
{
+ LL_PROFILE_ZONE_SCOPED
+
// Treat rawPath uniformly as an array. If it's not already an array,
// store it as the only entry in one. (But let's say Undefined means an
// empty array.)
@@ -890,6 +906,8 @@ LLSD& drill_ref(LLSD& blob, const LLSD& rawPath)
// path entry that's bad.
for (LLSD::Integer i = 0; i < path.size(); ++i)
{
+ LL_PROFILE_ZONE_NUM( i )
+
const LLSD& key{path[i]};
if (key.isString())
{
@@ -918,6 +936,8 @@ LLSD& drill_ref(LLSD& blob, const LLSD& rawPath)
LLSD drill(const LLSD& blob, const LLSD& path)
{
+ LL_PROFILE_ZONE_SCOPED
+
// drill_ref() does exactly what we want. Temporarily cast away
// const-ness and use that.
return drill_ref(const_cast(blob), path);
@@ -930,6 +950,8 @@ LLSD drill(const LLSD& blob, const LLSD& path)
// filter may be include to exclude/include keys in a map.
LLSD llsd_clone(LLSD value, LLSD filter)
{
+ LL_PROFILE_ZONE_SCOPED
+
LLSD clone;
bool has_filter(filter.isMap());
diff --git a/indra/llcommon/llsingleton.h b/indra/llcommon/llsingleton.h
index 7c81d65a8b..51ef514cf7 100644
--- a/indra/llcommon/llsingleton.h
+++ b/indra/llcommon/llsingleton.h
@@ -455,6 +455,7 @@ public:
static DERIVED_TYPE* getInstance()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// We know the viewer has LLSingleton dependency circularities. If you
// feel strongly motivated to eliminate them, cheers and good luck.
// (At that point we could consider a much simpler locking mechanism.)
@@ -838,4 +839,36 @@ private: \
/* LLSINGLETON() is carefully implemented to permit exactly this */ \
LLSINGLETON_C11(DERIVED_CLASS) {}
+// Relatively unsafe singleton implementation that is much faster
+// and simpler than LLSingleton, but has no dependency tracking
+// or inherent thread safety and requires manual invocation of
+// createInstance before first use.
+template
+class LLSimpleton
+{
+public:
+ template
+ static void createInstance(ARGS&&... args)
+ {
+ llassert(sInstance == nullptr);
+ sInstance = new T(std::forward(args)...);
+ }
+
+ static inline T* getInstance() { return sInstance; }
+ static inline T& instance() { return *getInstance(); }
+ static inline bool instanceExists() { return sInstance != nullptr; }
+
+ static void deleteSingleton()
+ {
+ delete sInstance;
+ sInstance = nullptr;
+ }
+
+private:
+ static T* sInstance;
+};
+
+template
+T* LLSimpleton::sInstance{ nullptr };
+
#endif
diff --git a/indra/llcommon/llstring.cpp b/indra/llcommon/llstring.cpp
index 0290eea143..7f501f2e77 100644
--- a/indra/llcommon/llstring.cpp
+++ b/indra/llcommon/llstring.cpp
@@ -37,9 +37,6 @@
#include // for WideCharToMultiByte
#endif
-LLTrace::BlockTimerStatHandle FT_STRING_FORMAT("String Format");
-
-
std::string ll_safe_string(const char* in)
{
if(in) return std::string(in);
@@ -215,7 +212,7 @@ S32 utf16chars_to_wchar(const U16* inchars, llwchar* outchar)
return inchars - base;
}
-llutf16string wstring_to_utf16str(const LLWString &utf32str, S32 len)
+llutf16string wstring_to_utf16str(const llwchar* utf32str, size_t len)
{
llutf16string out;
@@ -237,27 +234,19 @@ llutf16string wstring_to_utf16str(const LLWString &utf32str, S32 len)
return out;
}
-llutf16string wstring_to_utf16str(const LLWString &utf32str)
+llutf16string utf8str_to_utf16str( const char* utf8str, size_t len )
{
- const S32 len = (S32)utf32str.length();
- return wstring_to_utf16str(utf32str, len);
-}
-
-llutf16string utf8str_to_utf16str ( const std::string& utf8str )
-{
- LLWString wstr = utf8str_to_wstring ( utf8str );
+ LLWString wstr = utf8str_to_wstring ( utf8str, len );
return wstring_to_utf16str ( wstr );
}
-
-LLWString utf16str_to_wstring(const llutf16string &utf16str, S32 len)
+LLWString utf16str_to_wstring(const U16* utf16str, size_t len)
{
LLWString wout;
- if((len <= 0) || utf16str.empty()) return wout;
+ if (len == 0) return wout;
S32 i = 0;
- // craziness to make gcc happy (llutf16string.c_str() is tweaked on linux):
- const U16* chars16 = &(*(utf16str.begin()));
+ const U16* chars16 = utf16str;
while (i < len)
{
llwchar cur_char;
@@ -267,12 +256,6 @@ LLWString utf16str_to_wstring(const llutf16string &utf16str, S32 len)
return wout;
}
-LLWString utf16str_to_wstring(const llutf16string &utf16str)
-{
- const S32 len = (S32)utf16str.length();
- return utf16str_to_wstring(utf16str, len);
-}
-
// Length in llwchar (UTF-32) of the first len units (16 bits) of the given UTF-16 string.
S32 utf16str_wstring_length(const llutf16string &utf16str, const S32 utf16_len)
{
@@ -392,8 +375,7 @@ S32 wstring_utf8_length(const LLWString& wstr)
return len;
}
-
-LLWString utf8str_to_wstring(const std::string& utf8str, S32 len)
+LLWString utf8str_to_wstring(const char* utf8str, size_t len)
{
LLWString wout;
@@ -481,13 +463,7 @@ LLWString utf8str_to_wstring(const std::string& utf8str, S32 len)
return wout;
}
-LLWString utf8str_to_wstring(const std::string& utf8str)
-{
- const S32 len = (S32)utf8str.length();
- return utf8str_to_wstring(utf8str, len);
-}
-
-std::string wstring_to_utf8str(const LLWString& utf32str, S32 len)
+std::string wstring_to_utf8str(const llwchar* utf32str, size_t len)
{
std::string out;
@@ -503,20 +479,9 @@ std::string wstring_to_utf8str(const LLWString& utf32str, S32 len)
return out;
}
-std::string wstring_to_utf8str(const LLWString& utf32str)
+std::string utf16str_to_utf8str(const U16* utf16str, size_t len)
{
- const S32 len = (S32)utf32str.length();
- return wstring_to_utf8str(utf32str, len);
-}
-
-std::string utf16str_to_utf8str(const llutf16string& utf16str)
-{
- return wstring_to_utf8str(utf16str_to_wstring(utf16str));
-}
-
-std::string utf16str_to_utf8str(const llutf16string& utf16str, S32 len)
-{
- return wstring_to_utf8str(utf16str_to_wstring(utf16str, len), len);
+ return wstring_to_utf8str(utf16str_to_wstring(utf16str, len));
}
std::string utf8str_trim(const std::string& utf8str)
@@ -657,17 +622,16 @@ std::string utf8str_removeCRLF(const std::string& utf8str)
}
#if LL_WINDOWS
-std::string ll_convert_wide_to_string(const wchar_t* in)
+unsigned int ll_wstring_default_code_page()
{
- return ll_convert_wide_to_string(in, CP_UTF8);
+ return CP_UTF8;
}
-std::string ll_convert_wide_to_string(const wchar_t* in, unsigned int code_page)
+std::string ll_convert_wide_to_string(const wchar_t* in, size_t len_in, unsigned int code_page)
{
std::string out;
if(in)
{
- int len_in = wcslen(in);
int len_out = WideCharToMultiByte(
code_page,
0,
@@ -699,12 +663,7 @@ std::string ll_convert_wide_to_string(const wchar_t* in, unsigned int code_page)
return out;
}
-std::wstring ll_convert_string_to_wide(const std::string& in)
-{
- return ll_convert_string_to_wide(in, CP_UTF8);
-}
-
-std::wstring ll_convert_string_to_wide(const std::string& in, unsigned int code_page)
+std::wstring ll_convert_string_to_wide(const char* in, size_t len, unsigned int code_page)
{
// From review:
// We can preallocate a wide char buffer that is the same length (in wchar_t elements) as the utf8 input,
@@ -716,10 +675,10 @@ std::wstring ll_convert_string_to_wide(const std::string& in, unsigned int code_
// reserve an output buffer that will be destroyed on exit, with a place
// to put NULL terminator
- std::vector w_out(in.length() + 1);
+ std::vector w_out(len + 1);
memset(&w_out[0], 0, w_out.size());
- int real_output_str_len = MultiByteToWideChar(code_page, 0, in.c_str(), in.length(),
+ int real_output_str_len = MultiByteToWideChar(code_page, 0, in, len,
&w_out[0], w_out.size() - 1);
//looks like MultiByteToWideChar didn't add null terminator to converted string, see EXT-4858.
@@ -729,30 +688,32 @@ std::wstring ll_convert_string_to_wide(const std::string& in, unsigned int code_
return {&w_out[0]};
}
-LLWString ll_convert_wide_to_wstring(const std::wstring& in)
+LLWString ll_convert_wide_to_wstring(const wchar_t* in, size_t len)
{
- // This function, like its converse, is a placeholder, encapsulating a
- // guilty little hack: the only "official" way nat has found to convert
- // between std::wstring (16 bits on Windows) and LLWString (UTF-32) is
- // by using iconv, which we've avoided so far. It kinda sorta works to
- // just copy individual characters...
- // The point is that if/when we DO introduce some more official way to
- // perform such conversions, we should only have to call it here.
- return { in.begin(), in.end() };
+ // Whether or not std::wstring and llutf16string are distinct types, they
+ // both hold UTF-16LE characters. (See header file comments.) Pretend this
+ // wchar_t* sequence is really a U16* sequence and use the conversion we
+ // define above.
+ return utf16str_to_wstring(reinterpret_cast(in), len);
}
-std::wstring ll_convert_wstring_to_wide(const LLWString& in)
+std::wstring ll_convert_wstring_to_wide(const llwchar* in, size_t len)
{
- // See comments in ll_convert_wide_to_wstring()
- return { in.begin(), in.end() };
+ // first, convert to llutf16string, for which we have a real implementation
+ auto utf16str{ wstring_to_utf16str(in, len) };
+ // then, because each U16 char must be UTF-16LE encoded, pretend the U16*
+ // string pointer is a wchar_t* and instantiate a std::wstring of the same
+ // length.
+ return { reinterpret_cast(utf16str.c_str()), utf16str.length() };
}
std::string ll_convert_string_to_utf8_string(const std::string& in)
{
- auto w_mesg = ll_convert_string_to_wide(in, CP_ACP);
- std::string out_utf8(ll_convert_wide_to_string(w_mesg.c_str(), CP_UTF8));
-
- return out_utf8;
+ // If you pass code_page, you must also pass length, otherwise the code
+ // page parameter will be mistaken for length.
+ auto w_mesg = ll_convert_string_to_wide(in, in.length(), CP_ACP);
+ // CP_UTF8 is default -- see ll_wstring_default_code_page() above.
+ return ll_convert_wide_to_string(w_mesg);
}
namespace
@@ -1356,7 +1317,7 @@ bool LLStringUtil::formatDatetime(std::string& replacement, std::string token,
template<>
S32 LLStringUtil::format(std::string& s, const format_map_t& substitutions)
{
- LL_RECORD_BLOCK_TIME(FT_STRING_FORMAT);
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STRING;
S32 res = 0;
std::string output;
@@ -1429,7 +1390,7 @@ S32 LLStringUtil::format(std::string& s, const format_map_t& substitutions)
template<>
S32 LLStringUtil::format(std::string& s, const LLSD& substitutions)
{
- LL_RECORD_BLOCK_TIME(FT_STRING_FORMAT);
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STRING;
S32 res = 0;
if (!substitutions.isMap())
diff --git a/indra/llcommon/llstring.h b/indra/llcommon/llstring.h
index 4263122f36..d94f549480 100644
--- a/indra/llcommon/llstring.h
+++ b/indra/llcommon/llstring.h
@@ -27,9 +27,11 @@
#ifndef LL_LLSTRING_H
#define LL_LLSTRING_H
+#include
#include
#include
#include
+#include // std::wcslen()
//#include
#include
#include
@@ -527,14 +529,71 @@ struct ll_convert_impl
T operator()(const T& in) const { return in; }
};
+// simple construction from char*
+template
+struct ll_convert_impl
+{
+ T operator()(const typename T::value_type* in) const { return { in }; }
+};
+
// specialize ll_convert_impl to return EXPR
#define ll_convert_alias(TO, FROM, EXPR) \
template<> \
struct ll_convert_impl \
{ \
- TO operator()(const FROM& in) const { return EXPR; } \
+ /* param_type optimally passes both char* and string */ \
+ TO operator()(typename boost::call_traits::param_type in) const { return EXPR; } \
}
+// If all we're doing is copying characters, pass this to ll_convert_alias as
+// EXPR. Since it expands into the 'return EXPR' slot in the ll_convert_impl
+// specialization above, it implies TO{ in.begin(), in.end() }.
+#define LL_CONVERT_COPY_CHARS { in.begin(), in.end() }
+
+// Generic name for strlen() / wcslen() - the default implementation should
+// (!) work with U16 and llwchar, but we don't intend to engage it.
+template
+size_t ll_convert_length(const CHARTYPE* zstr)
+{
+ const CHARTYPE* zp;
+ // classic C string scan
+ for (zp = zstr; *zp; ++zp)
+ ;
+ return (zp - zstr);
+}
+
+// specialize where we have a library function; may use intrinsic operations
+template <>
+inline size_t ll_convert_length(const wchar_t* zstr) { return std::wcslen(zstr); }
+template <>
+inline size_t ll_convert_length (const char* zstr) { return std::strlen(zstr); }
+
+// ll_convert_forms() is short for a bunch of boilerplate. It defines
+// longname(const char*, len), longname(const char*), longname(const string&)
+// and longname(const string&, len) so calls written pre-ll_convert() will
+// work. Most of these overloads will be unified once we turn on C++17 and can
+// use std::string_view.
+// It also uses aliasmacro to ensure that both ll_convert(const char*)
+// and ll_convert(const string&) will work.
+#define ll_convert_forms(aliasmacro, OUTSTR, INSTR, longname) \
+LL_COMMON_API OUTSTR longname(const INSTR::value_type* in, size_t len); \
+inline auto longname(const INSTR& in, size_t len) \
+{ \
+ return longname(in.c_str(), len); \
+} \
+inline auto longname(const INSTR::value_type* in) \
+{ \
+ return longname(in, ll_convert_length(in)); \
+} \
+inline auto longname(const INSTR& in) \
+{ \
+ return longname(in.c_str(), in.length()); \
+} \
+/* string param */ \
+aliasmacro(OUTSTR, INSTR, longname(in)); \
+/* char* param */ \
+aliasmacro(OUTSTR, const INSTR::value_type*, longname(in))
+
// Make the incoming string a utf8 string. Replaces any unknown glyph
// with the UNKNOWN_CHARACTER. Once any unknown glyph is found, the rest
// of the data may not be recovered.
@@ -571,63 +630,47 @@ LL_COMMON_API std::string rawstr_to_utf8(const std::string& raw);
// LL_WCHAR_T_NATIVE.
typedef std::basic_string llutf16string;
-#if ! defined(LL_WCHAR_T_NATIVE)
-// wchar_t is identical to U16, and std::wstring is identical to llutf16string.
-// Defining an ll_convert alias involving llutf16string would collide with the
-// comparable preferred alias involving std::wstring. (In this scenario, if
-// you pass llutf16string, it will engage the std::wstring specialization.)
-#define ll_convert_u16_alias(TO, FROM, EXPR) // nothing
-#else // defined(LL_WCHAR_T_NATIVE)
-// wchar_t is a distinct native type, so llutf16string is also a distinct
-// type, and there IS a point to converting separately to/from llutf16string.
-// (But why? Windows APIs are still defined in terms of wchar_t, and
-// in this scenario llutf16string won't work for them!)
-#define ll_convert_u16_alias(TO, FROM, EXPR) ll_convert_alias(TO, FROM, EXPR)
+// Considering wchar_t, llwchar and U16, there are three relevant cases:
+#if LLWCHAR_IS_WCHAR_T // every which way but Windows
+// llwchar is identical to wchar_t, LLWString is identical to std::wstring.
+// U16 is distinct, llutf16string is distinct (though pretty useless).
+// Given conversions to/from LLWString and to/from llutf16string, conversions
+// involving std::wstring would collide.
+#define ll_convert_wstr_alias(TO, FROM, EXPR) // nothing
+// but we can define conversions involving llutf16string without collisions
+#define ll_convert_u16_alias(TO, FROM, EXPR) ll_convert_alias(TO, FROM, EXPR)
-#if LL_WINDOWS
-// LL_WCHAR_T_NATIVE is defined on non-Windows systems because, in fact,
-// wchar_t is native. Everywhere but Windows, we use it for llwchar (see
-// stdtypes.h). That makes LLWString identical to std::wstring, so these
-// aliases for std::wstring would collide with those for LLWString. Only
-// define on Windows, where converting between std::wstring and llutf16string
-// means copying chars.
-ll_convert_alias(llutf16string, std::wstring, llutf16string(in.begin(), in.end()));
-ll_convert_alias(std::wstring, llutf16string, std::wstring(in.begin(), in.end()));
-#endif // LL_WINDOWS
-#endif // defined(LL_WCHAR_T_NATIVE)
+#elif defined(LL_WCHAR_T_NATIVE) // Windows, either clang or MS /Zc:wchar_t
+// llwchar (32-bit), wchar_t (16-bit) and U16 are all different types.
+// Conversions to/from LLWString, to/from std::wstring and to/from llutf16string
+// can all be defined.
+#define ll_convert_wstr_alias(TO, FROM, EXPR) ll_convert_alias(TO, FROM, EXPR)
+#define ll_convert_u16_alias(TO, FROM, EXPR) ll_convert_alias(TO, FROM, EXPR)
-LL_COMMON_API LLWString utf16str_to_wstring(const llutf16string &utf16str, S32 len);
-LL_COMMON_API LLWString utf16str_to_wstring(const llutf16string &utf16str);
-ll_convert_u16_alias(LLWString, llutf16string, utf16str_to_wstring(in));
+#else // ! LL_WCHAR_T_NATIVE: Windows with MS /Zc:wchar_t-
+// wchar_t is identical to U16, std::wstring is identical to llutf16string.
+// Given conversions to/from LLWString and to/from std::wstring, conversions
+// involving llutf16string would collide.
+#define ll_convert_u16_alias(TO, FROM, EXPR) // nothing
+// but we can define conversions involving std::wstring without collisions
+#define ll_convert_wstr_alias(TO, FROM, EXPR) ll_convert_alias(TO, FROM, EXPR)
+#endif
-LL_COMMON_API llutf16string wstring_to_utf16str(const LLWString &utf32str, S32 len);
-LL_COMMON_API llutf16string wstring_to_utf16str(const LLWString &utf32str);
-ll_convert_u16_alias(llutf16string, LLWString, wstring_to_utf16str(in));
+ll_convert_forms(ll_convert_u16_alias, LLWString, llutf16string, utf16str_to_wstring);
+ll_convert_forms(ll_convert_u16_alias, llutf16string, LLWString, wstring_to_utf16str);
+ll_convert_forms(ll_convert_u16_alias, llutf16string, std::string, utf8str_to_utf16str);
+ll_convert_forms(ll_convert_alias, LLWString, std::string, utf8str_to_wstring);
-LL_COMMON_API llutf16string utf8str_to_utf16str ( const std::string& utf8str, S32 len);
-LL_COMMON_API llutf16string utf8str_to_utf16str ( const std::string& utf8str );
-ll_convert_u16_alias(llutf16string, std::string, utf8str_to_utf16str(in));
-
-LL_COMMON_API LLWString utf8str_to_wstring(const std::string &utf8str, S32 len);
-LL_COMMON_API LLWString utf8str_to_wstring(const std::string &utf8str);
// Same function, better name. JC
inline LLWString utf8string_to_wstring(const std::string& utf8_string) { return utf8str_to_wstring(utf8_string); }
-// best name of all
-ll_convert_alias(LLWString, std::string, utf8string_to_wstring(in));
-//
LL_COMMON_API S32 wchar_to_utf8chars(llwchar inchar, char* outchars);
-LL_COMMON_API std::string wstring_to_utf8str(const LLWString &utf32str, S32 len);
-LL_COMMON_API std::string wstring_to_utf8str(const LLWString &utf32str);
-ll_convert_alias(std::string, LLWString, wstring_to_utf8str(in));
-LL_COMMON_API std::string utf16str_to_utf8str(const llutf16string &utf16str, S32 len);
-LL_COMMON_API std::string utf16str_to_utf8str(const llutf16string &utf16str);
-ll_convert_u16_alias(std::string, llutf16string, utf16str_to_utf8str(in));
+ll_convert_forms(ll_convert_alias, std::string, LLWString, wstring_to_utf8str);
+ll_convert_forms(ll_convert_u16_alias, std::string, llutf16string, utf16str_to_utf8str);
-#if LL_WINDOWS
+// an older alias for utf16str_to_utf8str(llutf16string)
inline std::string wstring_to_utf8str(const llutf16string &utf16str) { return utf16str_to_utf8str(utf16str);}
-#endif
// Length of this UTF32 string in bytes when transformed to UTF8
LL_COMMON_API S32 wstring_utf8_length(const LLWString& wstr);
@@ -701,42 +744,48 @@ LL_COMMON_API std::string utf8str_removeCRLF(const std::string& utf8str);
//@{
/**
- * @brief Convert a wide string to std::string
+ * @brief Convert a wide string to/from std::string
+ * Convert a Windows wide string to/from our LLWString
*
* This replaces the unsafe W2A macro from ATL.
*/
-LL_COMMON_API std::string ll_convert_wide_to_string(const wchar_t* in, unsigned int code_page);
-LL_COMMON_API std::string ll_convert_wide_to_string(const wchar_t* in); // default CP_UTF8
-inline std::string ll_convert_wide_to_string(const std::wstring& in, unsigned int code_page)
-{
- return ll_convert_wide_to_string(in.c_str(), code_page);
-}
-inline std::string ll_convert_wide_to_string(const std::wstring& in)
-{
- return ll_convert_wide_to_string(in.c_str());
-}
-ll_convert_alias(std::string, std::wstring, ll_convert_wide_to_string(in));
+// Avoid requiring this header to #include the Windows header file declaring
+// our actual default code_page by delegating this function to our .cpp file.
+LL_COMMON_API unsigned int ll_wstring_default_code_page();
-/**
- * Converts a string to wide string.
- */
-LL_COMMON_API std::wstring ll_convert_string_to_wide(const std::string& in,
- unsigned int code_page);
-LL_COMMON_API std::wstring ll_convert_string_to_wide(const std::string& in);
- // default CP_UTF8
-ll_convert_alias(std::wstring, std::string, ll_convert_string_to_wide(in));
+// This is like ll_convert_forms(), with the added complexity of a code page
+// parameter that may or may not be passed.
+#define ll_convert_cp_forms(aliasmacro, OUTSTR, INSTR, longname) \
+/* declare the only nontrivial implementation (in .cpp file) */ \
+LL_COMMON_API OUTSTR longname( \
+ const INSTR::value_type* in, \
+ size_t len, \
+ unsigned int code_page=ll_wstring_default_code_page()); \
+/* if passed only a char pointer, scan for nul terminator */ \
+inline auto longname(const INSTR::value_type* in) \
+{ \
+ return longname(in, ll_convert_length(in)); \
+} \
+/* if passed string and length, extract its char pointer */ \
+inline auto longname( \
+ const INSTR& in, \
+ size_t len, \
+ unsigned int code_page=ll_wstring_default_code_page()) \
+{ \
+ return longname(in.c_str(), len, code_page); \
+} \
+/* if passed only a string object, no scan, pass known length */ \
+inline auto longname(const INSTR& in) \
+{ \
+ return longname(in.c_str(), in.length()); \
+} \
+aliasmacro(OUTSTR, INSTR, longname(in)); \
+aliasmacro(OUTSTR, const INSTR::value_type*, longname(in))
-/**
- * Convert a Windows wide string to our LLWString
- */
-LL_COMMON_API LLWString ll_convert_wide_to_wstring(const std::wstring& in);
-ll_convert_alias(LLWString, std::wstring, ll_convert_wide_to_wstring(in));
-
-/**
- * Convert LLWString to Windows wide string
- */
-LL_COMMON_API std::wstring ll_convert_wstring_to_wide(const LLWString& in);
-ll_convert_alias(std::wstring, LLWString, ll_convert_wstring_to_wide(in));
+ll_convert_cp_forms(ll_convert_wstr_alias, std::string, std::wstring, ll_convert_wide_to_string);
+ll_convert_cp_forms(ll_convert_wstr_alias, std::wstring, std::string, ll_convert_string_to_wide);
+ ll_convert_forms(ll_convert_wstr_alias, LLWString, std::wstring, ll_convert_wide_to_wstring);
+ ll_convert_forms(ll_convert_wstr_alias, std::wstring, LLWString, ll_convert_wstring_to_wide);
/**
* Converts incoming string into utf8 string
@@ -1937,4 +1986,14 @@ void LLStringUtilBase::truncate(string_type& string, size_type count)
string.resize(count < cur_size ? count : cur_size);
}
+// The good thing about *declaration* macros, vs. usage macros, is that now
+// we're done with them: we don't need them to bleed into the consuming source
+// file.
+#undef ll_convert_alias
+#undef ll_convert_u16_alias
+#undef ll_convert_wstr_alias
+#undef LL_CONVERT_COPY_CHARS
+#undef ll_convert_forms
+#undef ll_convert_cp_forms
+
#endif // LL_STRING_H
diff --git a/indra/llcommon/llsys.cpp b/indra/llcommon/llsys.cpp
index cdc1d83b59..f717b2cf34 100644
--- a/indra/llcommon/llsys.cpp
+++ b/indra/llcommon/llsys.cpp
@@ -843,6 +843,7 @@ LLSD LLMemoryInfo::getStatsMap() const
LLMemoryInfo& LLMemoryInfo::refresh()
{
+ LL_PROFILE_ZONE_SCOPED
mStatsMap = loadStatsMap();
LL_DEBUGS("LLMemoryInfo") << "Populated mStatsMap:\n";
@@ -852,11 +853,9 @@ LLMemoryInfo& LLMemoryInfo::refresh()
return *this;
}
-static LLTrace::BlockTimerStatHandle FTM_MEMINFO_LOAD_STATS("MemInfo Load Stats");
-
LLSD LLMemoryInfo::loadStatsMap()
{
- LL_RECORD_BLOCK_TIME(FTM_MEMINFO_LOAD_STATS);
+ LL_PROFILE_ZONE_SCOPED;
// This implementation is derived from stream() code (as of 2011-06-29).
Stats stats;
diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp
index 6d531d842d..a807acc56e 100644
--- a/indra/llcommon/llthread.cpp
+++ b/indra/llcommon/llthread.cpp
@@ -135,6 +135,8 @@ void LLThread::threadRun()
set_thread_name(-1, mName.c_str());
#endif
+ LL_PROFILER_SET_THREAD_NAME( mName.c_str() );
+
// this is the first point at which we're actually running in the new thread
mID = currentID();
@@ -331,6 +333,7 @@ bool LLThread::runCondition(void)
// Stop thread execution if requested until unpaused.
void LLThread::checkPause()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mDataLock->lock();
// This is in a while loop because the pthread API allows for spurious wakeups.
@@ -362,17 +365,20 @@ void LLThread::setQuitting()
// static
LLThread::id_t LLThread::currentID()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
return std::this_thread::get_id();
}
// static
void LLThread::yield()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
std::this_thread::yield();
}
void LLThread::wake()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mDataLock->lock();
if(!shouldSleep())
{
@@ -383,6 +389,7 @@ void LLThread::wake()
void LLThread::wakeLocked()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
if(!shouldSleep())
{
mRunCondition->signal();
@@ -391,11 +398,13 @@ void LLThread::wakeLocked()
void LLThread::lockData()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mDataLock->lock();
}
void LLThread::unlockData()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD
mDataLock->unlock();
}
diff --git a/indra/llcommon/llthreadsafequeue.h b/indra/llcommon/llthreadsafequeue.h
index 26e0d71d31..68d79cdd12 100644
--- a/indra/llcommon/llthreadsafequeue.h
+++ b/indra/llcommon/llthreadsafequeue.h
@@ -1,6 +1,6 @@
/**
* @file llthreadsafequeue.h
- * @brief Base classes for thread, mutex and condition handling.
+ * @brief Queue protected with mutexes for cross-thread use
*
* $LicenseInfo:firstyear=2004&license=viewerlgpl$
* Second Life Viewer Source Code
@@ -27,16 +27,19 @@
#ifndef LL_LLTHREADSAFEQUEUE_H
#define LL_LLTHREADSAFEQUEUE_H
-#include "llexception.h"
-#include
-#include
-#include
-#include "mutex.h"
#include "llcoros.h"
#include LLCOROS_MUTEX_HEADER
#include
#include LLCOROS_CONDVAR_HEADER
+#include "llexception.h"
+#include "mutex.h"
+#include
+#include
+#include
+/*****************************************************************************
+* LLThreadSafeQueue
+*****************************************************************************/
//
// A general queue exception.
//
@@ -66,70 +69,116 @@ public:
}
};
-//
-// Implements a thread safe FIFO.
-//
-template
+/**
+ * Implements a thread safe FIFO.
+ */
+// Let the default std::queue default to underlying std::deque. Override if
+// desired.
+template>
class LLThreadSafeQueue
{
public:
typedef ElementT value_type;
-
- // If the pool is set to NULL one will be allocated and managed by this
- // queue.
+
+ // Limiting the number of pending items prevents unbounded growth of the
+ // underlying queue.
LLThreadSafeQueue(U32 capacity = 1024);
-
- // Add an element to the front of queue (will block if the queue has
- // reached capacity).
+ virtual ~LLThreadSafeQueue() {}
+
+ // Add an element to the queue (will block if the queue has reached
+ // capacity).
//
// This call will raise an interrupt error if the queue is closed while
// the caller is blocked.
- void pushFront(ElementT const & element);
-
- // Try to add an element to the front of queue without blocking. Returns
- // true only if the element was actually added.
- bool tryPushFront(ElementT const & element);
+ template
+ void push(T&& element);
+ // legacy name
+ void pushFront(ElementT const & element) { return push(element); }
- // Try to add an element to the front of queue, blocking if full but with
- // timeout. Returns true if the element was added.
+ // Add an element to the queue (will block if the queue has reached
+ // capacity). Return false if the queue is closed before push is possible.
+ template
+ bool pushIfOpen(T&& element);
+
+ // Try to add an element to the queue without blocking. Returns
+ // true only if the element was actually added.
+ template
+ bool tryPush(T&& element);
+ // legacy name
+ bool tryPushFront(ElementT const & element) { return tryPush(element); }
+
+ // Try to add an element to the queue, blocking if full but with timeout
+ // after specified duration. Returns true if the element was added.
// There are potentially two different timeouts involved: how long to try
// to lock the mutex, versus how long to wait for the queue to stop being
// full. Careful settings for each timeout might be orders of magnitude
// apart. However, this method conflates them.
+ template
+ bool tryPushFor(const std::chrono::duration& timeout,
+ T&& element);
+ // legacy name
template
bool tryPushFrontFor(const std::chrono::duration& timeout,
- ElementT const & element);
+ ElementT const & element) { return tryPushFor(timeout, element); }
- // Pop the element at the end of the queue (will block if the queue is
+ // Try to add an element to the queue, blocking if full but with
+ // timeout at specified time_point. Returns true if the element was added.
+ template
+ bool tryPushUntil(const std::chrono::time_point& until,
+ T&& element);
+ // no legacy name because this is a newer method
+
+ // Pop the element at the head of the queue (will block if the queue is
// empty).
//
// This call will raise an interrupt error if the queue is closed while
// the caller is blocked.
- ElementT popBack(void);
-
- // Pop an element from the end of the queue if there is one available.
+ ElementT pop(void);
+ // legacy name
+ ElementT popBack(void) { return pop(); }
+
+ // Pop an element from the head of the queue if there is one available.
// Returns true only if an element was popped.
- bool tryPopBack(ElementT & element);
-
+ bool tryPop(ElementT & element);
+ // legacy name
+ bool tryPopBack(ElementT & element) { return tryPop(element); }
+
+ // Pop the element at the head of the queue, blocking if empty, with
+ // timeout after specified duration. Returns true if an element was popped.
+ template
+ bool tryPopFor(const std::chrono::duration& timeout, ElementT& element);
+ // no legacy name because this is a newer method
+
+ // Pop the element at the head of the queue, blocking if empty, with
+ // timeout at specified time_point. Returns true if an element was popped.
+ template
+ bool tryPopUntil(const std::chrono::time_point& until,
+ ElementT& element);
+ // no legacy name because this is a newer method
+
// Returns the size of the queue.
size_t size();
+ //Returns the capacity of the queue.
+ U32 capacity() { return mCapacity; }
+
// closes the queue:
- // - every subsequent pushFront() call will throw LLThreadSafeQueueInterrupt
- // - every subsequent tryPushFront() call will return false
- // - popBack() calls will return normally until the queue is drained, then
- // every subsequent popBack() will throw LLThreadSafeQueueInterrupt
- // - tryPopBack() calls will return normally until the queue is drained,
- // then every subsequent tryPopBack() call will return false
+ // - every subsequent push() call will throw LLThreadSafeQueueInterrupt
+ // - every subsequent tryPush() call will return false
+ // - pop() calls will return normally until the queue is drained, then
+ // every subsequent pop() will throw LLThreadSafeQueueInterrupt
+ // - tryPop() calls will return normally until the queue is drained,
+ // then every subsequent tryPop() call will return false
void close();
- // detect closed state
+ // producer end: are we prevented from pushing any additional items?
bool isClosed();
- // inverse of isClosed()
- explicit operator bool();
+ // consumer end: are we done, is the queue entirely drained?
+ bool done();
-private:
- std::deque< ElementT > mStorage;
+protected:
+ typedef QueueT queue_type;
+ QueueT mStorage;
U32 mCapacity;
bool mClosed;
@@ -137,37 +186,154 @@ private:
typedef std::unique_lock lock_t;
boost::fibers::condition_variable_any mCapacityCond;
boost::fibers::condition_variable_any mEmptyCond;
+
+ enum pop_result { EMPTY, DONE, WAITING, POPPED };
+ // implementation logic, suitable for passing to tryLockUntil()
+ template
+ pop_result tryPopUntil_(lock_t& lock,
+ const std::chrono::time_point& until,
+ ElementT& element);
+ // if we're able to lock immediately, do so and run the passed callable,
+ // which must accept lock_t& and return bool
+ template
+ bool tryLock(CALLABLE&& callable);
+ // if we're able to lock before the passed time_point, do so and run the
+ // passed callable, which must accept lock_t& and return bool
+ template
+ bool tryLockUntil(const std::chrono::time_point& until,
+ CALLABLE&& callable);
+ // while lock is locked, really push the passed element, if we can
+ template
+ bool push_(lock_t& lock, T&& element);
+ // while lock is locked, really pop the head element, if we can
+ pop_result pop_(lock_t& lock, ElementT& element);
+ // Is the current head element ready to pop? We say yes; subclass can
+ // override as needed.
+ virtual bool canPop(const ElementT& head) const { return true; }
};
-// LLThreadSafeQueue
-//-----------------------------------------------------------------------------
+/*****************************************************************************
+* PriorityQueueAdapter
+*****************************************************************************/
+namespace LL
+{
+ /**
+ * std::priority_queue's API is almost like std::queue, intentionally of
+ * course, but you must access the element about to pop() as top() rather
+ * than as front(). Make an adapter for use with LLThreadSafeQueue.
+ */
+ template ,
+ typename Compare=std::less>
+ class PriorityQueueAdapter
+ {
+ public:
+ // publish all the same types
+ typedef std::priority_queue queue_type;
+ typedef typename queue_type::container_type container_type;
+ typedef typename queue_type::value_compare value_compare;
+ typedef typename queue_type::value_type value_type;
+ typedef typename queue_type::size_type size_type;
+ typedef typename queue_type::reference reference;
+ typedef typename queue_type::const_reference const_reference;
-template
-LLThreadSafeQueue::LLThreadSafeQueue(U32 capacity) :
+ // Although std::queue defines both const and non-const front()
+ // methods, std::priority_queue defines only const top().
+ const_reference front() const { return mQ.top(); }
+ // std::priority_queue has no equivalent to back(), so it's good that
+ // LLThreadSafeQueue doesn't use it.
+
+ // All the rest of these merely forward to the corresponding
+ // queue_type methods.
+ bool empty() const { return mQ.empty(); }
+ size_type size() const { return mQ.size(); }
+ void push(const value_type& value) { mQ.push(value); }
+ void push(value_type&& value) { mQ.push(std::move(value)); }
+ template
+ void emplace(Args&&... args) { mQ.emplace(std::forward(args)...); }
+ void pop() { mQ.pop(); }
+
+ private:
+ queue_type mQ;
+ };
+} // namespace LL
+
+
+/*****************************************************************************
+* LLThreadSafeQueue implementation
+*****************************************************************************/
+template
+LLThreadSafeQueue::LLThreadSafeQueue(U32 capacity) :
mCapacity(capacity),
mClosed(false)
{
}
-template
-void LLThreadSafeQueue::pushFront(ElementT const & element)
+// if we're able to lock immediately, do so and run the passed callable, which
+// must accept lock_t& and return bool
+template
+template
+bool LLThreadSafeQueue::tryLock(CALLABLE&& callable)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ lock_t lock1(mLock, std::defer_lock);
+ if (!lock1.try_lock())
+ return false;
+
+ return std::forward(callable)(lock1);
+}
+
+
+// if we're able to lock before the passed time_point, do so and run the
+// passed callable, which must accept lock_t& and return bool
+template
+template
+bool LLThreadSafeQueue::tryLockUntil(
+ const std::chrono::time_point& until,
+ CALLABLE&& callable)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ lock_t lock1(mLock, std::defer_lock);
+ if (!lock1.try_lock_until(until))
+ return false;
+
+ return std::forward(callable)(lock1);
+}
+
+
+// while lock is locked, really push the passed element, if we can
+template
+template
+bool LLThreadSafeQueue::push_(lock_t& lock, T&& element)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ if (mStorage.size() >= mCapacity)
+ return false;
+
+ mStorage.push(std::forward(element));
+ lock.unlock();
+ // now that we've pushed, if somebody's been waiting to pop, signal them
+ mEmptyCond.notify_one();
+ return true;
+}
+
+
+template
+template
+bool LLThreadSafeQueue::pushIfOpen(T&& element)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock1(mLock);
while (true)
{
+ // On the producer side, it doesn't matter whether the queue has been
+ // drained or not: the moment either end calls close(), further push()
+ // operations will fail.
if (mClosed)
- {
- LLTHROW(LLThreadSafeQueueInterrupt());
- }
+ return false;
- if (mStorage.size() < mCapacity)
- {
- mStorage.push_front(element);
- lock1.unlock();
- mEmptyCond.notify_one();
- return;
- }
+ if (push_(lock1, std::forward(element)))
+ return true;
// Storage Full. Wait for signal.
mCapacityCond.wait(lock1);
@@ -175,39 +341,205 @@ void LLThreadSafeQueue::pushFront(ElementT const & element)
}
-template
-template
-bool LLThreadSafeQueue::tryPushFrontFor(const std::chrono::duration& timeout,
- ElementT const & element)
+template
+template
+void LLThreadSafeQueue::push(T&& element)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ if (! pushIfOpen(std::forward(element)))
+ {
+ LLTHROW(LLThreadSafeQueueInterrupt());
+ }
+}
+
+
+template
+template
+bool LLThreadSafeQueue::tryPush(T&& element)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ return tryLock(
+ [this, element=std::move(element)](lock_t& lock)
+ {
+ if (mClosed)
+ return false;
+ return push_(lock, std::move(element));
+ });
+}
+
+
+template
+template
+bool LLThreadSafeQueue::tryPushFor(
+ const std::chrono::duration& timeout,
+ T&& element)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// Convert duration to time_point: passing the same timeout duration to
// each of multiple calls is wrong.
- auto endpoint = std::chrono::steady_clock::now() + timeout;
+ return tryPushUntil(std::chrono::steady_clock::now() + timeout,
+ std::forward(element));
+}
- lock_t lock1(mLock, std::defer_lock);
- if (!lock1.try_lock_until(endpoint))
- return false;
+template
+template
+bool LLThreadSafeQueue::tryPushUntil(
+ const std::chrono::time_point& until,
+ T&& element)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ return tryLockUntil(
+ until,
+ [this, until, element=std::move(element)](lock_t& lock)
+ {
+ while (true)
+ {
+ if (mClosed)
+ {
+ return false;
+ }
+
+ if (push_(lock, std::move(element)))
+ return true;
+
+ // Storage Full. Wait for signal.
+ if (LLCoros::cv_status::timeout == mCapacityCond.wait_until(lock, until))
+ {
+ // timed out -- formally we might recheck both conditions above
+ return false;
+ }
+ // If we didn't time out, we were notified for some reason. Loop back
+ // to check.
+ }
+ });
+}
+
+
+// while lock is locked, really pop the head element, if we can
+template
+typename LLThreadSafeQueue::pop_result
+LLThreadSafeQueue::pop_(lock_t& lock, ElementT& element)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ // If mStorage is empty, there's no head element.
+ if (mStorage.empty())
+ return mClosed? DONE : EMPTY;
+
+ // If there's a head element, pass it to canPop() to see if it's ready to pop.
+ if (! canPop(mStorage.front()))
+ return WAITING;
+
+ // std::queue::front() is the element about to pop()
+ element = mStorage.front();
+ mStorage.pop();
+ lock.unlock();
+ // now that we've popped, if somebody's been waiting to push, signal them
+ mCapacityCond.notify_one();
+ return POPPED;
+}
+
+
+template
+ElementT LLThreadSafeQueue::pop(void)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ lock_t lock1(mLock);
+ ElementT value;
while (true)
{
- if (mClosed)
+ // On the consumer side, we always try to pop before checking mClosed
+ // so we can finish draining the queue.
+ pop_result popped = pop_(lock1, value);
+ if (popped == POPPED)
+ return std::move(value);
+
+ // Once the queue is DONE, there will never be any more coming.
+ if (popped == DONE)
{
- return false;
+ LLTHROW(LLThreadSafeQueueInterrupt());
}
- if (mStorage.size() < mCapacity)
+ // If we didn't pop because WAITING, i.e. canPop() returned false,
+ // then even if the producer end has been closed, there's still at
+ // least one item to drain: wait for it. Or we might be EMPTY, with
+ // the queue still open. Either way, wait for signal.
+ mEmptyCond.wait(lock1);
+ }
+}
+
+
+template
+bool LLThreadSafeQueue::tryPop(ElementT & element)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ return tryLock(
+ [this, &element](lock_t& lock)
{
- mStorage.push_front(element);
- lock1.unlock();
- mEmptyCond.notify_one();
- return true;
+ // conflate EMPTY, DONE, WAITING: tryPop() behavior when the queue
+ // is closed is implemented by simple inability to push any new
+ // elements
+ return pop_(lock, element) == POPPED;
+ });
+}
+
+
+template
+template
+bool LLThreadSafeQueue::tryPopFor(
+ const std::chrono::duration& timeout,
+ ElementT& element)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ // Convert duration to time_point: passing the same timeout duration to
+ // each of multiple calls is wrong.
+ return tryPopUntil(std::chrono::steady_clock::now() + timeout, element);
+}
+
+
+template
+template
+bool LLThreadSafeQueue::tryPopUntil(
+ const std::chrono::time_point& until,
+ ElementT& element)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ return tryLockUntil(
+ until,
+ [this, until, &element](lock_t& lock)
+ {
+ // conflate EMPTY, DONE, WAITING
+ return tryPopUntil_(lock, until, element) == POPPED;
+ });
+}
+
+
+// body of tryPopUntil(), called once we have the lock
+template
+template
+typename LLThreadSafeQueue::pop_result
+LLThreadSafeQueue::tryPopUntil_(
+ lock_t& lock,
+ const std::chrono::time_point& until,
+ ElementT& element)
+{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ while (true)
+ {
+ pop_result popped = pop_(lock, element);
+ if (popped == POPPED || popped == DONE)
+ {
+ // If we succeeded, great! If we've drained the last item, so be
+ // it. Either way, break the loop and tell caller.
+ return popped;
}
- // Storage Full. Wait for signal.
- if (LLCoros::cv_status::timeout == mCapacityCond.wait_until(lock1, endpoint))
+ // EMPTY or WAITING: wait for signal.
+ if (LLCoros::cv_status::timeout == mEmptyCond.wait_until(lock, until))
{
- // timed out -- formally we might recheck both conditions above
- return false;
+ // timed out -- formally we might recheck
+ // as it is, break loop
+ return popped;
}
// If we didn't time out, we were notified for some reason. Loop back
// to check.
@@ -215,102 +547,44 @@ bool LLThreadSafeQueue::tryPushFrontFor(const std::chrono::duration
-bool LLThreadSafeQueue::tryPushFront(ElementT const & element)
-{
- lock_t lock1(mLock, std::defer_lock);
- if (!lock1.try_lock())
- return false;
-
- if (mClosed)
- return false;
-
- if (mStorage.size() >= mCapacity)
- return false;
-
- mStorage.push_front(element);
- lock1.unlock();
- mEmptyCond.notify_one();
- return true;
-}
-
-
-template
-ElementT LLThreadSafeQueue::popBack(void)
-{
- lock_t lock1(mLock);
- while (true)
- {
- if (!mStorage.empty())
- {
- ElementT value = mStorage.back();
- mStorage.pop_back();
- lock1.unlock();
- mCapacityCond.notify_one();
- return value;
- }
-
- if (mClosed)
- {
- LLTHROW(LLThreadSafeQueueInterrupt());
- }
-
- // Storage empty. Wait for signal.
- mEmptyCond.wait(lock1);
- }
-}
-
-
-template
-bool LLThreadSafeQueue::tryPopBack(ElementT & element)
-{
- lock_t lock1(mLock, std::defer_lock);
- if (!lock1.try_lock())
- return false;
-
- // no need to check mClosed: tryPopBack() behavior when the queue is
- // closed is implemented by simple inability to push any new elements
- if (mStorage.empty())
- return false;
-
- element = mStorage.back();
- mStorage.pop_back();
- lock1.unlock();
- mCapacityCond.notify_one();
- return true;
-}
-
-
-template
-size_t LLThreadSafeQueue::size(void)
+template
+size_t LLThreadSafeQueue::size(void)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
return mStorage.size();
}
-template
-void LLThreadSafeQueue::close()
+
+template
+void LLThreadSafeQueue::close()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
mClosed = true;
lock.unlock();
- // wake up any blocked popBack() calls
+ // wake up any blocked pop() calls
mEmptyCond.notify_all();
- // wake up any blocked pushFront() calls
+ // wake up any blocked push() calls
mCapacityCond.notify_all();
}
-template
-bool LLThreadSafeQueue::isClosed()
+
+template
+bool LLThreadSafeQueue::isClosed()
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lock_t lock(mLock);
- return mClosed && mStorage.size() == 0;
+ return mClosed;
}
-template
-LLThreadSafeQueue::operator bool()
+
+template
+bool LLThreadSafeQueue::done()
{
- return ! isClosed();
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
+ lock_t lock(mLock);
+ return mClosed && mStorage.empty();
}
#endif
diff --git a/indra/llcommon/lltrace.cpp b/indra/llcommon/lltrace.cpp
index 54079a4689..f59b207ded 100644
--- a/indra/llcommon/lltrace.cpp
+++ b/indra/llcommon/lltrace.cpp
@@ -61,6 +61,7 @@ TimeBlockTreeNode::TimeBlockTreeNode()
void TimeBlockTreeNode::setParent( BlockTimerStatHandle* parent )
{
+ LL_PROFILE_ZONE_SCOPED;
llassert_always(parent != mBlock);
llassert_always(parent != NULL);
diff --git a/indra/llcommon/lltrace.h b/indra/llcommon/lltrace.h
index 0d0cd6f581..fcd8753f75 100644
--- a/indra/llcommon/lltrace.h
+++ b/indra/llcommon/lltrace.h
@@ -227,6 +227,7 @@ public:
void setName(const char* name)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
mName = name;
setKey(name);
}
@@ -234,12 +235,14 @@ public:
/*virtual*/ const char* getUnitLabel() const { return "KB"; }
StatType& allocations()
- {
+ {
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return static_cast&>(*(StatType*)this);
}
StatType& deallocations()
- {
+ {
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return static_cast&>(*(StatType*)this);
}
};
@@ -261,6 +264,7 @@ struct MeasureMem
{
static size_t measureFootprint(const T& value)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return sizeof(T) + value.getMemFootprint();
}
};
@@ -270,6 +274,7 @@ struct MeasureMem
{
static size_t measureFootprint(const T& value)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return U32Bytes(value).value();
}
};
@@ -279,6 +284,7 @@ struct MeasureMem
{
static size_t measureFootprint(const T* value)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
if (!value)
{
return 0;
@@ -323,6 +329,7 @@ struct MeasureMem, IS_MEM_TRACKABLE, IS_BYTES>
{
static size_t measureFootprint(const std::basic_string& value)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
return value.capacity() * sizeof(T);
}
};
@@ -331,6 +338,7 @@ struct MeasureMem, IS_MEM_TRACKABLE, IS_BYTES>
template
inline void claim_alloc(MemStatHandle& measurement, const T& value)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
#if LL_TRACE_ENABLED
S32 size = MeasureMem::measureFootprint(value);
if(size == 0) return;
@@ -343,6 +351,7 @@ inline void claim_alloc(MemStatHandle& measurement, const T& value)
template
inline void disclaim_alloc(MemStatHandle& measurement, const T& value)
{
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_STATS;
#if LL_TRACE_ENABLED
S32 size = MeasureMem::measureFootprint(value);
if(size == 0) return;
@@ -352,141 +361,6 @@ inline void disclaim_alloc(MemStatHandle& measurement, const T& value)
#endif
}
-template
-class MemTrackableNonVirtual
-{
-public:
- typedef void mem_trackable_tag_t;
-
- MemTrackableNonVirtual(const char* name)
-#if LL_TRACE_ENABLED
- : mMemFootprint(0)
-#endif
- {
-#if LL_TRACE_ENABLED
- static bool name_initialized = false;
- if (!name_initialized)
- {
- name_initialized = true;
- sMemStat.setName(name);
- }
-#endif
- }
-
-#if LL_TRACE_ENABLED
- ~MemTrackableNonVirtual()
- {
- disclaimMem(mMemFootprint);
- }
-
- static MemStatHandle& getMemStatHandle()
- {
- return sMemStat;
- }
-
- S32 getMemFootprint() const { return mMemFootprint; }
-#endif
-
- void* operator new(size_t size)
- {
-#if LL_TRACE_ENABLED
- claim_alloc(sMemStat, size);
-#endif
- return ll_aligned_malloc(size);
- }
-
- template
- static void* aligned_new(size_t size)
- {
-#if LL_TRACE_ENABLED
- claim_alloc(sMemStat, size);
-#endif
- return ll_aligned_malloc(size);
- }
-
- void operator delete(void* ptr, size_t size)
- {
-#if LL_TRACE_ENABLED
- disclaim_alloc(sMemStat, size);
-#endif
- ll_aligned_free(ptr);
- }
-
- template
- static void aligned_delete(void* ptr, size_t size)
- {
-#if LL_TRACE_ENABLED
- disclaim_alloc(sMemStat, size);
-#endif
- ll_aligned_free(ptr);
- }
-
- void* operator new [](size_t size)
- {
-#if LL_TRACE_ENABLED
- claim_alloc(sMemStat, size);
-#endif
- return ll_aligned_malloc(size);
- }
-
- void operator delete[](void* ptr, size_t size)
- {
-#if LL_TRACE_ENABLED
- disclaim_alloc(sMemStat, size);
-#endif
- ll_aligned_free(ptr);
- }
-
- // claim memory associated with other objects/data as our own, adding to our calculated footprint
- template