Ansariel 2020-07-29 13:23:00 +02:00
commit 67255f5d64
549 changed files with 8924 additions and 15566 deletions

40
.gitignore vendored Normal file → Executable file
View File

@ -14,14 +14,17 @@
LICENSES
build-darwin-*
build-linux-*
build-stamp
build-vc120*
build-vc150*
configure-stamp
debian/files
debian/secondlife-appearance-utility*
debian/secondlife-viewer*
indra/.distcc
build-vc80/
build-vc100/
build-vc120/
build-vc120-32/
build-vc120-64/
build-vc150-32/
build-vc150-64/
indra/CMakeFiles
indra/build-vc[0-9]*
indra/lib/mono/1.0/*.dll
@ -68,16 +71,33 @@ indra/web/doc/asset-upload/plugins/verify-notecard
indra/web/doc/asset-upload/plugins/verify-texture
installed.xml
libraries
logs
tarfile_tmp
trivial_change_force_build
web/config.*
web/locale.*
web/secondlife.com.*
debian/secondlife-viewer*
debian/secondlife-appearance-utility*
debian/files
build-stamp
configure-stamp
indra/newview/dbghelp.dll
*Thumbs.db
*.cpp.orig
*.cpp.bak
*.h.bak
*.h.orig
indra/newview/typed_locations.txt
indra/newview/teleport_history.txt
indra/newview/search_history.txt
indra/newview/filters.xml
indra/newview/avatar_icons_cache.txt
indra/newview/avatar_lad.log
*.diff
indra/newview/pilot.txt
indra/newview/pilot.xml
#*.rej
.*\.wixobj
# Firestorm additions
indra/newview/exoflickrkeys.h
indra/newview/fsdiscordkey.h
my_autobuild.xml
.vscode
*Thumbs.db
logs

View File

@ -1,4 +1,4 @@
First, make sure gcc-4.9 and g++-4.9 are installed.
First, make sure gcc-5.4 and g++-5.4 are installed.
Ensure you can build a stock viewer-development try as described in the SL wiki. Before asking for any help
compiling Firestorm, make sure you can build viewer-development first. If you try and skip this step, you may

View File

@ -2,10 +2,10 @@ Before you start configuring your Windows build system, be aware of our tested c
Memory: You will need at least 2GB RAM, 4GB strongly recommended.
CPU: Multiple CPUs are strongly recommended.
A build can take over an hour.
Visual Studio 2013 Community Edition.
Visual Studio 2017 Community Edition.
Ensure you can build a stock viewer-development try as described in the SL wiki. Before asking for any help
compiling Firestorm, make sure you can build viewer-development first. If you try and skip this step, you may
compiling Firestorm, make sure you can build the Second Life viewer first. If you try and skip this step, you may
receive much less help. http://wiki.secondlife.com/wiki/Visual_Studio_2013_Viewer_Builds
If you want to use licensed FMOD or KDU build libraries (they are optional) you have to provision these yourself.
@ -48,7 +48,7 @@ If you want to build the 64bit version, add the parameter -A 64 to the autobuild
Logs:
Look for logs in build-vc120-32/logs for 32bit builds and build-vc120-64/logs for 64bit
Look for logs in build-vc150-32/logs for 32bit builds and build-vc150-64/logs for 64bit
Output:
Look for output in build-vc120-32/newview/Release for 32bit builds and build-vc120-64/newview/Release for 64bit
Look for output in build-vc150-32/newview/Release for 32bit builds and build-vc150-64/newview/Release for 64bit

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,7 @@ build_dir_Linux()
build_dir_CYGWIN()
{
echo build-vc120-${AUTOBUILD_ADDRSIZE}
echo build-vc${AUTOBUILD_VSVER:-120}-${AUTOBUILD_ADDRSIZE}
}
viewer_channel_suffix()

View File

@ -1098,6 +1098,7 @@ Nicky Dasmijn
SL-11061
SL-11072
SL-13141
SL-13642
Nicky Perian
OPEN-1
STORM-1087

View File

@ -15,6 +15,11 @@ set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake")
include(Variables)
include(BuildVersion)
set(LEGACY_STDIO_LIBS)
if (WINDOWS)
set(LEGACY_STDIO_LIBS legacy_stdio_definitions)
endif (WINDOWS)
if (NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING
"Build type. One of: Debug Release RelWithDebInfo" FORCE)
@ -93,6 +98,7 @@ if (WINDOWS AND EXISTS ${LIBS_CLOSED_DIR}copy_win_scripts)
endif (WINDOWS AND EXISTS ${LIBS_CLOSED_DIR}copy_win_scripts)
add_custom_target(viewer)
add_subdirectory(${LIBS_OPEN_PREFIX}llcrashlogger)
add_subdirectory(${LIBS_OPEN_PREFIX}llplugin)
add_subdirectory(${LIBS_OPEN_PREFIX}llui)

View File

@ -67,6 +67,11 @@ if (WINDOWS)
# http://www.cmake.org/pipermail/cmake/2009-September/032143.html
string(REPLACE "/Zm1000" " " CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
# <FS:ND> Remove this, it's no option to cl.exe and causes a massive amount of warnings.
# Without PreferredToolArchitecture=x64, as of 2020-06-26 the 32-bit
# compiler on our TeamCity build hosts has started running out of virtual
# memory for the precompiled header file.
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP /p:PreferredToolArchitecture=x64")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO

View File

@ -7,8 +7,8 @@ set(Boost_FIND_REQUIRED ON)
if (USESYSTEMLIBS)
include(FindBoost)
# <FS:TS> boost::context isn't used, so don't try to include it
# set(BOOST_CONTEXT_LIBRARY boost_context-mt)
set(BOOST_CONTEXT_LIBRARY boost_context-mt)
set(BOOST_FIBER_LIBRARY boost_fiber-mt)
set(BOOST_FILESYSTEM_LIBRARY boost_filesystem-mt)
set(BOOST_PROGRAM_OPTIONS_LIBRARY boost_program_options-mt)
set(BOOST_REGEX_LIBRARY boost_regex-mt)
@ -19,11 +19,14 @@ if (USESYSTEMLIBS)
else (USESYSTEMLIBS)
use_prebuilt_binary(boost)
set(Boost_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include)
set(BOOST_VERSION "1.55")
add_definitions(-DBOOST_ALL_NO_LIB)
# with the address size.
set(addrsfx "-x${ADDRESS_SIZE}")
if (WINDOWS)
if(MSVC80)
# This should be obsolete at this point
set(BOOST_VERSION "1.55")
set(BOOST_CONTEXT_LIBRARY
optimized libboost_context-vc80-mt-${BOOST_VERSION}
debug libboost_context-vc80-mt-gd-${BOOST_VERSION})
@ -54,89 +57,89 @@ else (USESYSTEMLIBS)
else(MSVC80)
# MSVC 10.0 config
set(BOOST_CONTEXT_LIBRARY
optimized libboost_context-mt
debug libboost_context-mt-gd)
set(BOOST_COROUTINE_LIBRARY
optimized libboost_coroutine-mt
debug libboost_coroutine-mt-gd)
optimized libboost_context-mt${addrsfx}
debug libboost_context-mt${addrsfx}-gd)
set(BOOST_FIBER_LIBRARY
optimized libboost_fiber-mt${addrsfx}
debug libboost_fiber-mt${addrsfx}-gd)
set(BOOST_FILESYSTEM_LIBRARY
optimized libboost_filesystem-mt
debug libboost_filesystem-mt-gd)
optimized libboost_filesystem-mt${addrsfx}
debug libboost_filesystem-mt${addrsfx}-gd)
set(BOOST_PROGRAM_OPTIONS_LIBRARY
optimized libboost_program_options-mt
debug libboost_program_options-mt-gd)
optimized libboost_program_options-mt${addrsfx}
debug libboost_program_options-mt${addrsfx}-gd)
set(BOOST_REGEX_LIBRARY
optimized libboost_regex-mt
debug libboost_regex-mt-gd)
optimized libboost_regex-mt${addrsfx}
debug libboost_regex-mt${addrsfx}-gd)
set(BOOST_SIGNALS_LIBRARY
optimized libboost_signals-mt
debug libboost_signals-mt-gd)
optimized libboost_signals-mt${addrsfx}
debug libboost_signals-mt${addrsfx}-gd)
set(BOOST_SYSTEM_LIBRARY
optimized libboost_system-mt
debug libboost_system-mt-gd)
optimized libboost_system-mt${addrsfx}
debug libboost_system-mt${addrsfx}-gd)
set(BOOST_THREAD_LIBRARY
optimized libboost_thread-mt
debug libboost_thread-mt-gd)
optimized libboost_thread-mt${addrsfx}
debug libboost_thread-mt${addrsfx}-gd)
set(BOOST_WAVE_LIBRARY
optimized libboost_wave-mt
debug libboost_wave-mt-gd)
optimized libboost_wave-mt${addrsfx}
debug libboost_wave-mt${addrsfx}-gd)
endif (MSVC80)
elseif (LINUX)
set(BOOST_CONTEXT_LIBRARY
optimized boost_context-mt
debug boost_context-mt-d)
set(BOOST_COROUTINE_LIBRARY
optimized boost_coroutine-mt
debug boost_coroutine-mt-d)
optimized boost_context-mt${addrsfx}
debug boost_context-mt${addrsfx}-d)
set(BOOST_FIBER_LIBRARY
optimized boost_fiber-mt${addrsfx}
debug boost_fiber-mt${addrsfx}-d)
set(BOOST_FILESYSTEM_LIBRARY
optimized boost_filesystem-mt
debug boost_filesystem-mt-d)
optimized boost_filesystem-mt${addrsfx}
debug boost_filesystem-mt${addrsfx}-d)
set(BOOST_PROGRAM_OPTIONS_LIBRARY
optimized boost_program_options-mt
debug boost_program_options-mt-d)
optimized boost_program_options-mt${addrsfx}
debug boost_program_options-mt${addrsfx}-d)
set(BOOST_REGEX_LIBRARY
optimized boost_regex-mt
debug boost_regex-mt-d)
optimized boost_regex-mt${addrsfx}
debug boost_regex-mt${addrsfx}-d)
set(BOOST_SIGNALS_LIBRARY
optimized boost_signals-mt
debug boost_signals-mt-d)
optimized boost_signals-mt${addrsfx}
debug boost_signals-mt${addrsfx}-d)
set(BOOST_SYSTEM_LIBRARY
optimized boost_system-mt
debug boost_system-mt-d)
optimized boost_system-mt${addrsfx}
debug boost_system-mt${addrsfx}-d)
set(BOOST_THREAD_LIBRARY
optimized boost_thread-mt
debug boost_thread-mt-d)
set(BOOST_WAVE_LIBRARY
optimized boost_wave-mt
debug boost_wave-mt-d)
optimized boost_thread-mt${addrsfx}
debug boost_thread-mt${addrsfx}-d)
set(BOOST_WAVE_LIBRARY
optimized boost_wave-mt${addrsfx}
debug boost_wave-mt${addrsfx}-gd)
elseif (DARWIN)
set(BOOST_CONTEXT_LIBRARY
optimized boost_context-mt
debug boost_context-mt-d)
set(BOOST_COROUTINE_LIBRARY
optimized boost_coroutine-mt
debug boost_coroutine-mt-d)
optimized boost_context-mt${addrsfx}
debug boost_context-mt${addrsfx}-d)
set(BOOST_FIBER_LIBRARY
optimized boost_fiber-mt${addrsfx}
debug boost_fiber-mt${addrsfx}-d)
set(BOOST_FILESYSTEM_LIBRARY
optimized boost_filesystem-mt
debug boost_filesystem-mt-d)
optimized boost_filesystem-mt${addrsfx}
debug boost_filesystem-mt${addrsfx}-d)
set(BOOST_PROGRAM_OPTIONS_LIBRARY
optimized boost_program_options-mt
debug boost_program_options-mt-d)
optimized boost_program_options-mt${addrsfx}
debug boost_program_options-mt${addrsfx}-d)
set(BOOST_REGEX_LIBRARY
optimized boost_regex-mt
debug boost_regex-mt-d)
optimized boost_regex-mt${addrsfx}
debug boost_regex-mt${addrsfx}-d)
set(BOOST_SIGNALS_LIBRARY
optimized boost_signals-mt
debug boost_signals-mt-d)
optimized boost_signals-mt${addrsfx}
debug boost_signals-mt${addrsfx}-d)
set(BOOST_SYSTEM_LIBRARY
optimized boost_system-mt
debug boost_system-mt-d)
optimized boost_system-mt${addrsfx}
debug boost_system-mt${addrsfx}-d)
set(BOOST_THREAD_LIBRARY
optimized boost_thread-mt
debug boost_thread-mt-d)
set(BOOST_WAVE_LIBRARY
optimized boost_wave-mt
debug boost_wave-mt-d)
optimized boost_thread-mt${addrsfx}
debug boost_thread-mt${addrsfx}-d)
set(BOOST_WAVE_LIBRARY
optimized libboost_wave-mt${addrsfx}
debug libboost_wave-mt${addrsfx}-gd)
endif (WINDOWS)
endif (USESYSTEMLIBS)

View File

@ -1,6 +1,7 @@
# -*- cmake -*-
# Construct the version and copyright information based on package data.
include(Python)
include(FindAutobuild)
# packages-formatter.py runs autobuild install --versions, which needs to know
# the build_directory, which (on Windows) depends on AUTOBUILD_ADDRSIZE.
@ -13,7 +14,7 @@ add_custom_command(OUTPUT packages-info.txt
DEPENDS ${CMAKE_SOURCE_DIR}/../scripts/packages-formatter.py
${CMAKE_SOURCE_DIR}/../autobuild.xml
COMMAND ${PYTHON_EXECUTABLE}
${CMAKE_SOURCE_DIR}/cmake/run_build_test.py -DAUTOBUILD_ADDRSIZE=${ADDRESS_SIZE}
${CMAKE_SOURCE_DIR}/cmake/run_build_test.py -DAUTOBUILD_ADDRSIZE=${ADDRESS_SIZE} -DAUTOBUILD=${AUTOBUILD_EXECUTABLE}
${PYTHON_EXECUTABLE}
${CMAKE_SOURCE_DIR}/../scripts/packages-formatter.py "${VIEWER_CHANNEL}" "${VIEWER_SHORT_VERSION}.${VIEWER_VERSION_REVISION}" > packages-info.txt
)

View File

@ -6,15 +6,10 @@ if (USESYSTEMLIBS)
set(CEFPLUGIN OFF CACHE BOOL
"CEFPLUGIN support for the llplugin/llmedia test apps.")
else (USESYSTEMLIBS)
if (LINUX AND ( CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9.4 ) )
message( "Using dullahan for GCC >= 5 " )
use_prebuilt_binary(dullahan-gcc5)
else()
use_prebuilt_binary(dullahan)
endif()
set(CEFPLUGIN ON CACHE BOOL
"CEFPLUGIN support for the llplugin/llmedia test apps.")
set(CEF_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include/cef)
set(CEF_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include/cef)
endif (USESYSTEMLIBS)
if (WINDOWS)

View File

@ -23,7 +23,6 @@ set(cmake_SOURCE_FILES
DBusGlib.cmake
DeploySharedLibs.cmake
Discord.cmake # <FS:LO> Discord rich presence
DirectX.cmake
DragDrop.cmake
EXPAT.cmake
FindAPR.cmake

View File

@ -7,6 +7,21 @@
include(CMakeCopyIfDifferent)
include(Linking)
# When we copy our dependent libraries, we almost always want to copy them to
# both the Release and the RelWithDebInfo staging directories. This has
# resulted in duplicate (or worse, erroneous attempted duplicate)
# copy_if_different commands. Encapsulate that usage.
# Pass FROM_DIR, TARGETS and the files to copy. TO_DIR is implicit.
# to_staging_dirs diverges from copy_if_different in that it appends to TARGETS.
MACRO(to_staging_dirs from_dir targets)
foreach(staging_dir
"${SHARED_LIB_STAGING_DIR_RELEASE}"
"${SHARED_LIB_STAGING_DIR_RELWITHDEBINFO}")
copy_if_different("${from_dir}" "${staging_dir}" out_targets ${ARGN})
list(APPEND "${targets}" "${out_targets}")
endforeach()
ENDMACRO(to_staging_dirs from_dir to_dir targets)
###################################################################
# set up platform specific lists of files that need to be copied
###################################################################
@ -74,95 +89,54 @@ if(WINDOWS)
#*******************************
# Copy MS C runtime dlls, required for packaging.
# *TODO - Adapt this to support VC9
if (MSVC80)
list(APPEND LMSVC_VER 80)
list(APPEND LMSVC_VERDOT 8.0)
set(MSVC_VER 80)
elseif (MSVC_VERSION EQUAL 1600) # VisualStudio 2010
MESSAGE(STATUS "MSVC_VERSION ${MSVC_VERSION}")
elseif (MSVC_VERSION EQUAL 1800) # VisualStudio 2013, which is (sigh) VS 12
list(APPEND LMSVC_VER 120)
list(APPEND LMSVC_VERDOT 12.0)
set(MSVC_VER 120)
elseif (MSVC_VERSION GREATER_EQUAL 1910 AND MSVC_VERSION LESS 1920) # Visual Studio 2017
set(MSVC_VER 140)
else (MSVC80)
MESSAGE(WARNING "New MSVC_VERSION ${MSVC_VERSION} of MSVC: adapt Copy3rdPartyLibs.cmake")
endif (MSVC80)
# try to copy VS2010 redist independently of system version
# maint-7360 CP
# list(APPEND LMSVC_VER 100)
# list(APPEND LMSVC_VERDOT 10.0)
if(ADDRESS_SIZE EQUAL 32)
# this folder contains the 32bit DLLs.. (yes really!)
set(registry_find_path "[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Windows;Directory]/SysWOW64")
else(ADDRESS_SIZE EQUAL 32)
# this folder contains the 64bit DLLs.. (yes really!)
set(registry_find_path "[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Windows;Directory]/System32")
endif(ADDRESS_SIZE EQUAL 32)
list(LENGTH LMSVC_VER count)
math(EXPR count "${count}-1")
foreach(i RANGE ${count})
list(GET LMSVC_VER ${i} MSVC_VER)
list(GET LMSVC_VERDOT ${i} MSVC_VERDOT)
MESSAGE(STATUS "Copying redist libs for VC ${MSVC_VERDOT}")
FIND_PATH(debug_msvc_redist_path NAME msvcr${MSVC_VER}d.dll
PATHS
[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\${MSVC_VERDOT}\\Setup\\VC;ProductDir]/redist/Debug_NonRedist/x86/Microsoft.VC${MSVC_VER}.DebugCRT
[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Windows;Directory]/SysWOW64
[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Windows;Directory]/System32
${MSVC_DEBUG_REDIST_PATH}
NO_DEFAULT_PATH
# Having a string containing the system registry path is a start, but to
# get CMake to actually read the registry, we must engage some other
# operation.
get_filename_component(registry_path "${registry_find_path}" ABSOLUTE)
# These are candidate DLL names. Empirically, VS versions before 2015 have
# msvcp*.dll and msvcr*.dll. VS 2017 has msvcp*.dll and vcruntime*.dll.
# Check each of them.
foreach(release_msvc_file
msvcp${MSVC_VER}.dll
msvcr${MSVC_VER}.dll
vcruntime${MSVC_VER}.dll
)
if(EXISTS ${debug_msvc_redist_path})
set(debug_msvc_files
msvcr${MSVC_VER}d.dll
msvcp${MSVC_VER}d.dll
)
copy_if_different(
${debug_msvc_redist_path}
"${SHARED_LIB_STAGING_DIR_DEBUG}"
out_targets
${debug_msvc_files}
)
set(third_party_targets ${third_party_targets} ${out_targets})
unset(debug_msvc_redist_path CACHE)
endif()
if(ADDRESS_SIZE EQUAL 32)
# this folder contains the 32bit DLLs.. (yes really!)
set(registry_find_path "[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Windows;Directory]/SysWOW64")
else(ADDRESS_SIZE EQUAL 32)
# this folder contains the 64bit DLLs.. (yes really!)
set(registry_find_path "[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Windows;Directory]/System32")
endif(ADDRESS_SIZE EQUAL 32)
FIND_PATH(release_msvc_redist_path NAME msvcr${MSVC_VER}.dll
PATHS
${registry_find_path}
NO_DEFAULT_PATH
)
if(EXISTS ${release_msvc_redist_path})
set(release_msvc_files
msvcr${MSVC_VER}.dll
msvcp${MSVC_VER}.dll
)
copy_if_different(
${release_msvc_redist_path}
"${SHARED_LIB_STAGING_DIR_RELEASE}"
out_targets
${release_msvc_files}
)
set(third_party_targets ${third_party_targets} ${out_targets})
copy_if_different(
${release_msvc_redist_path}
"${SHARED_LIB_STAGING_DIR_RELWITHDEBINFO}"
out_targets
${release_msvc_files}
)
set(third_party_targets ${third_party_targets} ${out_targets})
unset(release_msvc_redist_path CACHE)
if(EXISTS "${registry_path}/${release_msvc_file}")
to_staging_dirs(
${registry_path}
third_party_targets
${release_msvc_file})
else()
# This isn't a WARNING because, as noted above, every VS version
# we've observed has only a subset of the specified DLL names.
MESSAGE(STATUS "Redist lib ${release_msvc_file} not found")
endif()
endforeach()
MESSAGE(STATUS "Will copy redist files for MSVC ${MSVC_VER}:")
foreach(target ${third_party_targets})
MESSAGE(STATUS "${target}")
endforeach()
elseif(DARWIN)
set(SHARED_LIB_STAGING_DIR_DEBUG "${SHARED_LIB_STAGING_DIR}/Debug/Resources")
@ -187,10 +161,11 @@ elseif(DARWIN)
libexception_handler.dylib
${EXPAT_COPY}
libGLOD.dylib
libhunspell-1.3.0.dylib
libndofdev.dylib
libnghttp2.dylib
libnghttp2.14.dylib
libnghttp2.14.14.0.dylib
libnghttp2.14.19.0.dylib
libgrowl.dylib
libgrowl++.dylib
)
@ -283,52 +258,28 @@ endif(WINDOWS)
# Done building the file lists, now set up the copy commands.
################################################################
copy_if_different(
${vivox_lib_dir}
"${SHARED_LIB_STAGING_DIR_DEBUG}"
out_targets
${vivox_libs}
)
set(third_party_targets ${third_party_targets} ${out_targets})
# Curiously, slvoice_files are only copied to SHARED_LIB_STAGING_DIR_RELEASE.
# It's unclear whether this is oversight or intentional, but anyway leave the
# single copy_if_different command rather than using to_staging_dirs.
copy_if_different(
${slvoice_src_dir}
"${SHARED_LIB_STAGING_DIR_RELEASE}"
out_targets
${slvoice_files}
)
copy_if_different(
list(APPEND third_party_targets ${out_targets})
to_staging_dirs(
${vivox_lib_dir}
"${SHARED_LIB_STAGING_DIR_RELEASE}"
out_targets
third_party_targets
${vivox_libs}
)
set(third_party_targets ${third_party_targets} ${out_targets})
copy_if_different(
${vivox_lib_dir}
"${SHARED_LIB_STAGING_DIR_RELWITHDEBINFO}"
out_targets
${vivox_libs}
)
set(third_party_targets ${third_party_targets} ${out_targets})
copy_if_different(
to_staging_dirs(
${release_src_dir}
"${SHARED_LIB_STAGING_DIR_RELEASE}"
out_targets
third_party_targets
${release_files}
)
set(third_party_targets ${third_party_targets} ${out_targets})
copy_if_different(
${release_src_dir}
"${SHARED_LIB_STAGING_DIR_RELWITHDEBINFO}"
out_targets
${release_files}
)
set(third_party_targets ${third_party_targets} ${out_targets})
#<FS:TS> We need to do this regardless
#if(NOT USESYSTEMLIBS)

View File

@ -1,61 +0,0 @@
# -*- cmake -*-
if (WINDOWS)
if (DEFINED ENV{PROGRAMFILES\(X86\)})
set (PROGRAMFILES $ENV{PROGRAMFILES\(X86\)})
else (DEFINED ENV{PROGRAMFILES\(X86\)})
set (PROGRAMFILES $ENV{PROGRAMFILES})
endif (DEFINED ENV{PROGRAMFILES\(X86\)})
find_path(DIRECTX_INCLUDE_DIR dxdiag.h
"$ENV{DXSDK_DIR}/Include"
"${PROGRAMFILES}/Windows Kits/8.1/Include/um"
"${PROGRAMFILES}/Microsoft DirectX SDK (June 2010)/Include"
"${PROGRAMFILES}/Microsoft DirectX SDK (August 2009)/Include"
"${PROGRAMFILES}/Microsoft DirectX SDK (March 2009)/Include"
"${PROGRAMFILES}/Microsoft DirectX SDK (August 2008)/Include"
"${PROGRAMFILES}/Microsoft DirectX SDK (June 2008)/Include"
"${PROGRAMFILES}/Microsoft DirectX SDK (March 2008)/Include"
"${PROGRAMFILES}/Microsoft DirectX SDK (November 2007)/Include"
"${PROGRAMFILES}/Microsoft DirectX SDK (August 2007)/Include"
"C:/DX90SDK/Include"
"${PROGRAMFILES}/DX90SDK/Include"
)
if (DIRECTX_INCLUDE_DIR)
include_directories(${DIRECTX_INCLUDE_DIR})
if (DIRECTX_FIND_QUIETLY)
message(STATUS "Found DirectX include: ${DIRECTX_INCLUDE_DIR}")
endif (DIRECTX_FIND_QUIETLY)
else (DIRECTX_INCLUDE_DIR)
message(FATAL_ERROR "Could not find DirectX SDK Include")
endif (DIRECTX_INCLUDE_DIR)
if(ADDRESS_SIZE EQUAL 32)
set(DIRECTX_ARCHITECTURE "x86")
else(ADDRESS_SIZE EQUAL 32)
set(DIRECTX_ARCHITECTURE "x64")
endif(ADDRESS_SIZE EQUAL 32)
find_path(DIRECTX_LIBRARY_DIR dxguid.lib
"$ENV{DXSDK_DIR}/Lib/${DIRECTX_ARCHITECTURE}"
"${PROGRAMFILES}/Windows Kits/8.1/Lib/winv6.3/um/${DIRECTX_ARCHITECTURE}"
"${PROGRAMFILES}/Microsoft DirectX SDK (June 2010)/Lib/${DIRECTX_ARCHITECTURE}"
"${PROGRAMFILES}/Microsoft DirectX SDK (August 2009)/Lib/${DIRECTX_ARCHITECTURE}"
"${PROGRAMFILES}/Microsoft DirectX SDK (March 2009)/Lib/${DIRECTX_ARCHITECTURE}"
"${PROGRAMFILES}/Microsoft DirectX SDK (August 2008)/Lib/${DIRECTX_ARCHITECTURE}"
"${PROGRAMFILES}/Microsoft DirectX SDK (June 2008)/Lib/${DIRECTX_ARCHITECTURE}"
"${PROGRAMFILES}/Microsoft DirectX SDK (March 2008)/Lib/${DIRECTX_ARCHITECTURE}"
"${PROGRAMFILES}/Microsoft DirectX SDK (November 2007)/Lib/${DIRECTX_ARCHITECTURE}"
"${PROGRAMFILES}/Microsoft DirectX SDK (August 2007)/Lib/${DIRECTX_ARCHITECTURE}"
"C:/DX90SDK/Lib"
"${PROGRAMFILES}/DX90SDK/Lib"
)
if (DIRECTX_LIBRARY_DIR)
if (DIRECTX_FIND_QUIETLY)
message(STATUS "Found DirectX library: ${DIRECTX_LIBRARY_DIR}")
endif (DIRECTX_FIND_QUIETLY)
else (DIRECTX_LIBRARY_DIR)
message(FATAL_ERROR "Could not find DirectX SDK Libraries")
endif (DIRECTX_LIBRARY_DIR)
endif (WINDOWS)

View File

@ -5,11 +5,7 @@ use_prebuilt_binary(discord-rpc)
set(DISCORD_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include/discord-rpc)
if (WINDOWS)
if (ADDRESS_SIZE EQUAL 32)
set(DISCORD_LIBRARY discord-rpc)
else ()
set(DISCORD_LIBRARY discord-rpc_x64)
endif(ADDRESS_SIZE EQUAL 32)
elseif (LINUX)
set(DISCORD_LIBRARY discord-rpc)
elseif (DARWIN)

View File

@ -16,7 +16,7 @@ else (USESYSTEMLIBS)
elseif (DARWIN)
set(JSONCPP_LIBRARIES libjson_darwin_libmt.a)
elseif (LINUX)
set(JSONCPP_LIBRARIES libjson_linux-gcc-4.8_libmt.a)
set(JSONCPP_LIBRARIES libjson_linux-gcc-5.4.0_libmt.a)
endif (WINDOWS)
set(JSONCPP_INCLUDE_DIR "${LIBS_PREBUILT_DIR}/include/")
endif (USESYSTEMLIBS)

View File

@ -58,7 +58,7 @@ MACRO(LL_ADD_PROJECT_UNIT_TESTS project sources)
${GOOGLEMOCK_INCLUDE_DIRS}
)
SET(alltest_LIBRARIES
${BOOST_COROUTINE_LIBRARY}
${BOOST_FIBER_LIBRARY}
${BOOST_CONTEXT_LIBRARY}
${BOOST_SYSTEM_LIBRARY}
${GOOGLEMOCK_LIBRARIES}
@ -205,8 +205,9 @@ FUNCTION(LL_ADD_INTEGRATION_TEST
)
SET(libraries
${LEGACY_STDIO_LIBS}
${library_dependencies}
${BOOST_COROUTINE_LIBRARY}
${BOOST_FIBER_LIBRARY}
${BOOST_CONTEXT_LIBRARY}
${BOOST_SYSTEM_LIBRARY}
${GOOGLEMOCK_LIBRARIES}

View File

@ -18,7 +18,7 @@ endif (BUILD_HEADLESS)
set(LLAPPEARANCE_LIBRARIES llappearance
llmessage
llcorehttp
${BOOST_COROUTINE_LIBRARY}
${BOOST_FIBER_LIBRARY}
${BOOST_CONTEXT_LIBRARY}
${BOOST_SYSTEM_LIBRARY}
)

View File

@ -19,7 +19,7 @@ if (LINUX)
# specify all libraries that llcommon uses.
# llcommon uses `clock_gettime' which is provided by librt on linux.
set(LLCOMMON_LIBRARIES llcommon
${BOOST_COROUTINE_LIBRARY}
${BOOST_FIBER_LIBRARY}
${BOOST_CONTEXT_LIBRARY}
${BOOST_THREAD_LIBRARY}
${BOOST_SYSTEM_LIBRARY}
@ -27,7 +27,7 @@ if (LINUX)
)
else (LINUX)
set(LLCOMMON_LIBRARIES llcommon
${BOOST_COROUTINE_LIBRARY}
${BOOST_FIBER_LIBRARY}
${BOOST_CONTEXT_LIBRARY}
${BOOST_THREAD_LIBRARY}
${BOOST_SYSTEM_LIBRARY} )

View File

@ -12,6 +12,6 @@ set(LLCOREHTTP_INCLUDE_DIRS
)
set(LLCOREHTTP_LIBRARIES llcorehttp
${BOOST_COROUTINE_LIBRARY}
${BOOST_FIBER_LIBRARY}
${BOOST_CONTEXT_LIBRARY}
${BOOST_SYSTEM_LIBRARY})

View File

@ -64,6 +64,7 @@ endif (DARWIN)
# Libraries on which this application depends on
# Sort by high-level to low-level
target_link_libraries(llimage_libtest
${LEGACY_STDIO_LIBS}
${LLCOMMON_LIBRARIES}
${LLVFS_LIBRARIES}
${LLMATH_LIBRARIES}

View File

@ -75,6 +75,7 @@ endif (DARWIN)
# Libraries on which this library depends, needed for Linux builds
# Sort by high-level to low-level
target_link_libraries(llui_libtest
${LEGACY_STDIO_LIBS}
llui
llinventory
llmessage

View File

@ -69,7 +69,7 @@ target_link_libraries(linux-crash-logger
${LLMATH_LIBRARIES}
${LLCOREHTTP_LIBRARIES}
${LLCOMMON_LIBRARIES}
${BOOST_COROUTINE_LIBRARY}
${BOOST_FIBER_LIBRARY}
${BOOST_CONTEXT_LIBRARY}
${UI_LIBRARIES}
${DB_LIBRARIES}

View File

@ -32,7 +32,8 @@
struct WearableEntry : public LLDictionaryEntry
{
WearableEntry(const std::string &name,
WearableEntry(LLWearableType& wtype,
const std::string &name,
const std::string& default_new_name,
LLAssetType::EType assetType,
LLInventoryType::EIconName iconName,
@ -41,7 +42,7 @@ struct WearableEntry : public LLDictionaryEntry
LLDictionaryEntry(name),
mAssetType(assetType),
mDefaultNewName(default_new_name),
mLabel(LLWearableType::getInstance()->mTrans->getString(name)),
mLabel(wtype.mTrans->getString(name)),
mIconName(iconName),
mDisableCameraSwitch(disable_camera_switch),
mAllowMultiwear(allow_multiwear)
@ -56,10 +57,10 @@ struct WearableEntry : public LLDictionaryEntry
BOOL mAllowMultiwear;
};
class LLWearableDictionary : public LLSingleton<LLWearableDictionary>,
class LLWearableDictionary : public LLParamSingleton<LLWearableDictionary>,
public LLDictionary<LLWearableType::EType, WearableEntry>
{
LLSINGLETON(LLWearableDictionary);
LLSINGLETON(LLWearableDictionary, LLWearableType&);
// [RLVa:KB] - Checked: 2010-03-03 (RLVa-1.2.0a) | Added: RLVa-1.2.0a
protected:
@ -68,38 +69,32 @@ protected:
// [/RLVa:KB]
};
LLWearableDictionary::LLWearableDictionary()
LLWearableDictionary::LLWearableDictionary(LLWearableType& wtype)
{
if (!LLWearableType::instanceExists())
{
// LLWearableType is effectively a wrapper around LLWearableDictionary and is used as storage for LLTranslationBridge
// Todo: consider merging LLWearableType and LLWearableDictionary
LL_WARNS() << "Initing LLWearableDictionary without LLWearableType" << LL_ENDL;
}
addEntry(LLWearableType::WT_SHAPE, new WearableEntry("shape", "New Shape", LLAssetType::AT_BODYPART, LLInventoryType::ICONNAME_BODYPART_SHAPE, FALSE, FALSE));
addEntry(LLWearableType::WT_SKIN, new WearableEntry("skin", "New Skin", LLAssetType::AT_BODYPART, LLInventoryType::ICONNAME_BODYPART_SKIN, FALSE, FALSE));
addEntry(LLWearableType::WT_HAIR, new WearableEntry("hair", "New Hair", LLAssetType::AT_BODYPART, LLInventoryType::ICONNAME_BODYPART_HAIR, FALSE, FALSE));
addEntry(LLWearableType::WT_EYES, new WearableEntry("eyes", "New Eyes", LLAssetType::AT_BODYPART, LLInventoryType::ICONNAME_BODYPART_EYES, FALSE, FALSE));
addEntry(LLWearableType::WT_SHIRT, new WearableEntry("shirt", "New Shirt", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_SHIRT, FALSE, TRUE));
addEntry(LLWearableType::WT_PANTS, new WearableEntry("pants", "New Pants", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_PANTS, FALSE, TRUE));
addEntry(LLWearableType::WT_SHOES, new WearableEntry("shoes", "New Shoes", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_SHOES, FALSE, TRUE));
addEntry(LLWearableType::WT_SOCKS, new WearableEntry("socks", "New Socks", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_SOCKS, FALSE, TRUE));
addEntry(LLWearableType::WT_JACKET, new WearableEntry("jacket", "New Jacket", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_JACKET, FALSE, TRUE));
addEntry(LLWearableType::WT_GLOVES, new WearableEntry("gloves", "New Gloves", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_GLOVES, FALSE, TRUE));
addEntry(LLWearableType::WT_UNDERSHIRT, new WearableEntry("undershirt", "New Undershirt", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_UNDERSHIRT, FALSE, TRUE));
addEntry(LLWearableType::WT_UNDERPANTS, new WearableEntry("underpants", "New Underpants", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_UNDERPANTS, FALSE, TRUE));
addEntry(LLWearableType::WT_SKIRT, new WearableEntry("skirt", "New Skirt", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_SKIRT, FALSE, TRUE));
addEntry(LLWearableType::WT_ALPHA, new WearableEntry("alpha", "New Alpha", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_ALPHA, FALSE, TRUE));
addEntry(LLWearableType::WT_TATTOO, new WearableEntry("tattoo", "New Tattoo", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_TATTOO, FALSE, TRUE));
addEntry(LLWearableType::WT_UNIVERSAL, new WearableEntry("universal", "New Universal", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_UNIVERSAL, FALSE, TRUE));
addEntry(LLWearableType::WT_SHAPE, new WearableEntry(wtype, "shape", "New Shape", LLAssetType::AT_BODYPART, LLInventoryType::ICONNAME_BODYPART_SHAPE, FALSE, FALSE));
addEntry(LLWearableType::WT_SKIN, new WearableEntry(wtype, "skin", "New Skin", LLAssetType::AT_BODYPART, LLInventoryType::ICONNAME_BODYPART_SKIN, FALSE, FALSE));
addEntry(LLWearableType::WT_HAIR, new WearableEntry(wtype, "hair", "New Hair", LLAssetType::AT_BODYPART, LLInventoryType::ICONNAME_BODYPART_HAIR, FALSE, FALSE));
addEntry(LLWearableType::WT_EYES, new WearableEntry(wtype, "eyes", "New Eyes", LLAssetType::AT_BODYPART, LLInventoryType::ICONNAME_BODYPART_EYES, FALSE, FALSE));
addEntry(LLWearableType::WT_SHIRT, new WearableEntry(wtype, "shirt", "New Shirt", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_SHIRT, FALSE, TRUE));
addEntry(LLWearableType::WT_PANTS, new WearableEntry(wtype, "pants", "New Pants", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_PANTS, FALSE, TRUE));
addEntry(LLWearableType::WT_SHOES, new WearableEntry(wtype, "shoes", "New Shoes", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_SHOES, FALSE, TRUE));
addEntry(LLWearableType::WT_SOCKS, new WearableEntry(wtype, "socks", "New Socks", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_SOCKS, FALSE, TRUE));
addEntry(LLWearableType::WT_JACKET, new WearableEntry(wtype, "jacket", "New Jacket", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_JACKET, FALSE, TRUE));
addEntry(LLWearableType::WT_GLOVES, new WearableEntry(wtype, "gloves", "New Gloves", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_GLOVES, FALSE, TRUE));
addEntry(LLWearableType::WT_UNDERSHIRT, new WearableEntry(wtype, "undershirt", "New Undershirt", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_UNDERSHIRT, FALSE, TRUE));
addEntry(LLWearableType::WT_UNDERPANTS, new WearableEntry(wtype, "underpants", "New Underpants", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_UNDERPANTS, FALSE, TRUE));
addEntry(LLWearableType::WT_SKIRT, new WearableEntry(wtype, "skirt", "New Skirt", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_SKIRT, FALSE, TRUE));
addEntry(LLWearableType::WT_ALPHA, new WearableEntry(wtype, "alpha", "New Alpha", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_ALPHA, FALSE, TRUE));
addEntry(LLWearableType::WT_TATTOO, new WearableEntry(wtype, "tattoo", "New Tattoo", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_TATTOO, FALSE, TRUE));
addEntry(LLWearableType::WT_UNIVERSAL, new WearableEntry(wtype, "universal", "New Universal", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_UNIVERSAL, FALSE, TRUE));
// [SL:KB] - Patch: Appearance-Misc | Checked: 2011-05-29 (Catznip-2.6)
addEntry(LLWearableType::WT_PHYSICS, new WearableEntry("physics", "New Physics", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_PHYSICS, TRUE, FALSE));
addEntry(LLWearableType::WT_PHYSICS, new WearableEntry(wtype, "physics", "New Physics", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_PHYSICS, TRUE, FALSE));
// [/SL:KB]
// addEntry(LLWearableType::WT_PHYSICS, new WearableEntry("physics", "New Physics", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_PHYSICS, TRUE, TRUE));
// addEntry(LLWearableType::WT_PHYSICS, new WearableEntry(wtype, "physics", "New Physics", LLAssetType::AT_CLOTHING, LLInventoryType::ICONNAME_CLOTHING_PHYSICS, TRUE, TRUE));
addEntry(LLWearableType::WT_INVALID, new WearableEntry("invalid", "Invalid Wearable", LLAssetType::AT_NONE, LLInventoryType::ICONNAME_UNKNOWN, FALSE, FALSE));
addEntry(LLWearableType::WT_NONE, new WearableEntry("none", "Invalid Wearable", LLAssetType::AT_NONE, LLInventoryType::ICONNAME_NONE, FALSE, FALSE));
addEntry(LLWearableType::WT_INVALID, new WearableEntry(wtype, "invalid", "Invalid Wearable", LLAssetType::AT_NONE, LLInventoryType::ICONNAME_UNKNOWN, FALSE, FALSE));
addEntry(LLWearableType::WT_NONE, new WearableEntry(wtype, "none", "Invalid Wearable", LLAssetType::AT_NONE, LLInventoryType::ICONNAME_NONE, FALSE, FALSE));
}
@ -116,6 +111,14 @@ LLWearableType::~LLWearableType()
delete mTrans;
}
void LLWearableType::initSingleton()
{
// To make sure all wrapping functions will crash without initing LLWearableType;
LLWearableDictionary::initParamSingleton(*this);
// Todo: consider merging LLWearableType and LLWearableDictionary
}
// static
LLWearableType::EType LLWearableType::typeNameToType(const std::string& type_name)
{

View File

@ -37,6 +37,7 @@ class LLWearableType : public LLParamSingleton<LLWearableType>
{
LLSINGLETON(LLWearableType, LLTranslationBridge* trans);
~LLWearableType();
void initSingleton();
friend struct WearableEntry;
public:
enum EType

View File

@ -1,4 +1,3 @@
# -*- cmake -*-
project(llcommon)
@ -44,7 +43,6 @@ set(llcommon_SOURCE_FILES
llcleanup.cpp
llcommon.cpp
llcommonutils.cpp
llcoro_get_id.cpp
llcoros.cpp
llcrc.cpp
llcriticaldamp.cpp
@ -106,6 +104,7 @@ set(llcommon_SOURCE_FILES
llstring.cpp
llstringtable.cpp
llsys.cpp
lltempredirect.cpp
llthread.cpp
llthreadlocalstorage.cpp
llthreadsafequeue.cpp
@ -146,7 +145,7 @@ set(llcommon_HEADER_FILES
llcleanup.h
llcommon.h
llcommonutils.h
llcoro_get_id.h
llcond.h
llcoros.h
llcrc.h
llcriticaldamp.h
@ -186,9 +185,9 @@ set(llcommon_HEADER_FILES
llkeythrottle.h
llleap.h
llleaplistener.h
lllistenerwrapper.h
llliveappconfig.h
lllivefile.h
llmainthreadtask.h
llmd5.h
llmemory.h
llmemorystream.h
@ -230,6 +229,7 @@ set(llcommon_HEADER_FILES
llstaticstringtable.h
llstatsaccumulator.h
llsys.h
lltempredirect.h
llthread.h
llthreadlocalstorage.h
llthreadsafequeue.h
@ -247,6 +247,7 @@ set(llcommon_HEADER_FILES
llwin32headers.h
llwin32headerslean.h
llworkerthread.h
lockstatic.h
stdtypes.h
stringize.h
timer.h
@ -327,7 +328,7 @@ target_link_libraries(
${JSONCPP_LIBRARIES}
${ZLIB_LIBRARIES}
${WINDOWS_LIBRARIES}
${BOOST_COROUTINE_LIBRARY}
${BOOST_FIBER_LIBRARY}
${BOOST_CONTEXT_LIBRARY}
${BOOST_PROGRAM_OPTIONS_LIBRARY}
${BOOST_REGEX_LIBRARY}
@ -356,13 +357,14 @@ if (LL_TESTS)
${LLCOMMON_LIBRARIES}
${WINDOWS_LIBRARIES}
${GOOGLEMOCK_LIBRARIES}
${BOOST_COROUTINE_LIBRARY}
${BOOST_FIBER_LIBRARY}
${BOOST_CONTEXT_LIBRARY}
${BOOST_THREAD_LIBRARY}
${BOOST_SYSTEM_LIBRARY})
LL_ADD_INTEGRATION_TEST(commonmisc "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(bitpack "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llbase64 "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llcond "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(lldate "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(lldeadmantimer "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(lldependencies "" "${test_libs}")
@ -374,6 +376,7 @@ if (LL_TESTS)
LL_ADD_INTEGRATION_TEST(llheteromap "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llinstancetracker "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llleap "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llmainthreadtask "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llpounceable "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llprocess "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llprocessor "" "${test_libs}")

View File

@ -98,7 +98,10 @@
// If VC7 and later, then use the shipped 'dbghelp.h'-file
#pragma pack(push,8)
#if _MSC_VER >= 1300
#pragma warning (push)
#pragma warning (disable:4091) // a microsoft header has warnings. Very nice.
#include <dbghelp.h>
#pragma warning (pop)
#else
// inline the important dbghelp.h-declarations...
typedef enum {
@ -422,7 +425,7 @@ public:
LPSTR m_szSymPath;
#pragma pack(push,8)
typedef struct IMAGEHLP_MODULE64_V3 {
struct IMAGEHLP_MODULE64_V3 {
DWORD SizeOfStruct; // set to sizeof(IMAGEHLP_MODULE64)
DWORD64 BaseOfImage; // base load address of module
DWORD ImageSize; // virtual size of the loaded module
@ -450,7 +453,7 @@ typedef struct IMAGEHLP_MODULE64_V3 {
BOOL Publics; // contains public symbols
};
typedef struct IMAGEHLP_MODULE64_V2 {
struct IMAGEHLP_MODULE64_V2 {
DWORD SizeOfStruct; // set to sizeof(IMAGEHLP_MODULE64)
DWORD64 BaseOfImage; // base load address of module
DWORD ImageSize; // virtual size of the loaded module
@ -657,7 +660,7 @@ private:
pGMI = (tGMI) GetProcAddress( hPsapi, "GetModuleInformation" );
if ( (pEPM == NULL) || (pGMFNE == NULL) || (pGMBN == NULL) || (pGMI == NULL) )
{
// we couldn´t find all functions
// we couldn't find all functions
FreeLibrary(hPsapi);
return FALSE;
}

View File

@ -148,7 +148,7 @@ protected:
CHAR loadedImageName[STACKWALK_MAX_NAMELEN];
} CallstackEntry;
typedef enum CallstackEntryType {firstEntry, nextEntry, lastEntry};
enum CallstackEntryType {firstEntry, nextEntry, lastEntry};
virtual void OnSymInit(LPCSTR szSearchPath, DWORD symOptions, LPCSTR szUserName);
virtual void OnLoadModule(LPCSTR img, LPCSTR mod, DWORD64 baseAddr, DWORD size, DWORD result, LPCSTR symType, LPCSTR pdbName, ULONGLONG fileVersion);

View File

@ -47,13 +47,12 @@
#include "llstring.h"
#include "lleventtimer.h"
#if defined(LL_LINUX) && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 26
#define ucontext ucontext_t
#endif
#include "google_breakpad/exception_handler.h"
#include "stringize.h"
#include "llcleanup.h"
#include "llevents.h"
#include "llsdutil.h"
//
// Signal handling
@ -567,10 +566,42 @@ void LLApp::runErrorHandler()
LLApp::setStopped();
}
namespace
{
static std::map<LLApp::EAppStatus, const char*> statusDesc
{
{ LLApp::APP_STATUS_RUNNING, "running" },
{ LLApp::APP_STATUS_QUITTING, "quitting" },
{ LLApp::APP_STATUS_STOPPED, "stopped" },
{ LLApp::APP_STATUS_ERROR, "error" }
};
} // anonymous namespace
// static
void LLApp::setStatus(EAppStatus status)
{
sStatus = status;
sStatus = status;
// This can also happen very late in the application lifecycle -- don't
// resurrect a deleted LLSingleton
if (! LLEventPumps::wasDeleted())
{
// notify interested parties of status change
LLSD statsd;
auto found = statusDesc.find(status);
if (found != statusDesc.end())
{
statsd = found->second;
}
else
{
// unknown status? at least report value
statsd = LLSD::Integer(status);
}
LLEventPumps::instance().obtain("LLApp").post(llsd::map("status", statsd));
}
}

View File

@ -41,17 +41,7 @@
#include "llstring.h"
#if LL_WINDOWS
#pragma warning (push)
#pragma warning (disable:4265)
#endif
// warning C4265: 'std::_Pad' : class has virtual functions, but destructor is not virtual
#include <mutex>
#if LL_WINDOWS
#pragma warning (pop)
#endif
#include "mutex.h"
struct apr_dso_handle_t;
/**

View File

@ -23,7 +23,6 @@
* $/LicenseInfo$
*/
// <FS:Ansariel> Fix LNK4221 compiler warning
//#include "llatomic.h"
#include "llatomic.h"
//============================================================================

View File

@ -25,7 +25,7 @@
*/
// <FS:Ansariel> Get rid of LNK4221 linker warning since we don't run the unit tests anyway
//#include "linden_common.h"
#include "linden_common.h"
// implementation is all in the header, this include dep ensures the unit test is rerun if the implementation changes.
//#include "llbitpack.h"
#include "llbitpack.h"

405
indra/llcommon/llcond.h Normal file
View File

@ -0,0 +1,405 @@
/**
* @file llcond.h
* @author Nat Goodspeed
* @date 2019-07-10
* @brief LLCond is a wrapper around condition_variable to encapsulate the
* obligatory condition_variable usage pattern. We also provide
* simplified versions LLScalarCond, LLBoolCond and LLOneShotCond.
*
* $LicenseInfo:firstyear=2019&license=viewerlgpl$
* Copyright (c) 2019, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_LLCOND_H)
#define LL_LLCOND_H
#include "llunits.h"
#include "llcoros.h"
#include LLCOROS_MUTEX_HEADER
#include "mutex.h"
#include <chrono>
/**
* LLCond encapsulates the pattern required to use a condition_variable. It
* bundles subject data, a mutex and a condition_variable: the three required
* data objects. It provides wait() methods analogous to condition_variable,
* but using the contained condition_variable and the contained mutex. It
* provides modify() methods accepting an invocable to safely modify the
* contained data and notify waiters. These methods implicitly perform the
* required locking.
*
* The generic LLCond template assumes that DATA might be a struct or class.
* For a scalar DATA type, consider LLScalarCond instead. For specifically
* bool, consider LLBoolCond.
*
* Use of LLCoros::ConditionVariable makes LLCond work between
* coroutines as well as between threads.
*/
template <typename DATA>
class LLCond
{
public:
typedef DATA value_type;
private:
// This is the DATA controlled by the condition_variable.
value_type mData;
// condition_variable must be used in conjunction with a mutex. Use
// LLCoros::Mutex instead of std::mutex because the latter blocks
// the entire calling thread, whereas the former blocks only the current
// coroutine within the calling thread. Yet LLCoros::Mutex is safe to
// use across threads as well: it subsumes std::mutex functionality.
LLCoros::Mutex mMutex;
// Use LLCoros::ConditionVariable for the same reason.
LLCoros::ConditionVariable mCond;
public:
/// LLCond can be explicitly initialized with a specific value for mData if
/// desired.
LLCond(const value_type& init=value_type()):
mData(init)
{}
/// LLCond is move-only
LLCond(const LLCond&) = delete;
LLCond& operator=(const LLCond&) = delete;
/// get() returns a const reference to the stored DATA. The only way to
/// get a non-const reference -- to modify the stored DATA -- is via
/// update_one() or update_all().
const value_type& get() const { return mData; }
/**
* Pass update_one() an invocable accepting non-const (DATA&). The
* invocable will presumably modify the referenced DATA. update_one()
* will lock the mutex, call the invocable and then call notify_one() on
* the condition_variable.
*
* For scalar DATA, it's simpler to use LLScalarCond::set_one(). Use
* update_one() when DATA is a struct or class.
*/
template <typename MODIFY>
void update_one(MODIFY modify)
{
{ // scope of lock can/should end before notify_one()
LLCoros::LockType lk(mMutex);
modify(mData);
}
mCond.notify_one();
}
/**
* Pass update_all() an invocable accepting non-const (DATA&). The
* invocable will presumably modify the referenced DATA. update_all()
* will lock the mutex, call the invocable and then call notify_all() on
* the condition_variable.
*
* For scalar DATA, it's simpler to use LLScalarCond::set_all(). Use
* update_all() when DATA is a struct or class.
*/
template <typename MODIFY>
void update_all(MODIFY modify)
{
{ // scope of lock can/should end before notify_all()
LLCoros::LockType lk(mMutex);
modify(mData);
}
mCond.notify_all();
}
/**
* Pass wait() a predicate accepting (const DATA&), returning bool. The
* predicate returns true when the condition for which it is waiting has
* been satisfied, presumably determined by examining the referenced DATA.
* wait() locks the mutex and, until the predicate returns true, calls
* wait() on the condition_variable.
*/
template <typename Pred>
void wait(Pred pred)
{
LLCoros::LockType lk(mMutex);
// We must iterate explicitly since the predicate accepted by
// condition_variable::wait() requires a different signature:
// condition_variable::wait() calls its predicate with no arguments.
// Fortunately, the loop is straightforward.
// We advise the caller to pass a predicate accepting (const DATA&).
// But what if they instead pass a predicate accepting non-const
// (DATA&)? Such a predicate could modify mData, which would be Bad.
// Forbid that.
while (! pred(const_cast<const value_type&>(mData)))
{
mCond.wait(lk);
}
}
/**
* Pass wait_for() a chrono::duration, indicating how long we're willing
* to wait, and a predicate accepting (const DATA&), returning bool. The
* predicate returns true when the condition for which it is waiting has
* been satisfied, presumably determined by examining the referenced DATA.
* wait_for() locks the mutex and, until the predicate returns true, calls
* wait_for() on the condition_variable. wait_for() returns false if
* condition_variable::wait_for() timed out without the predicate
* returning true.
*/
template <typename Rep, typename Period, typename Pred>
bool wait_for(const std::chrono::duration<Rep, Period>& timeout_duration, Pred pred)
{
// Instead of replicating wait_until() logic, convert duration to
// time_point and just call wait_until().
// An implementation in which we repeatedly called
// condition_variable::wait_for() with our passed duration would be
// wrong! We'd keep pushing the timeout time farther and farther into
// the future. This way, we establish a definite timeout time and
// stick to it.
return wait_until(std::chrono::steady_clock::now() + timeout_duration, pred);
}
/**
* This wait_for() overload accepts F32Milliseconds as the duration. Any
* duration unit defined in llunits.h is implicitly convertible to
* F32Milliseconds. The semantics of this method are the same as the
* generic wait_for() method.
*/
template <typename Pred>
bool wait_for(F32Milliseconds timeout_duration, Pred pred)
{
return wait_for(convert(timeout_duration), pred);
}
protected:
// convert F32Milliseconds to a chrono::duration
auto convert(F32Milliseconds duration)
{
// std::chrono::milliseconds doesn't like to be constructed from a
// float (F32), rubbing our nose in the thought that
// std::chrono::duration::rep is probably integral. Therefore
// converting F32Milliseconds to std::chrono::milliseconds would lose
// precision. Use std::chrono::microseconds instead. Extract the F32
// milliseconds from F32Milliseconds, scale to microseconds, construct
// std::chrono::microseconds from that value.
return std::chrono::microseconds{ std::chrono::microseconds::rep(duration.value() * 1000) };
}
private:
/**
* Pass wait_until() a chrono::time_point, indicating the time at which we
* should stop waiting, and a predicate accepting (const DATA&), returning
* bool. The predicate returns true when the condition for which it is
* waiting has been satisfied, presumably determined by examining the
* referenced DATA. wait_until() locks the mutex and, until the predicate
* returns true, calls wait_until() on the condition_variable.
* wait_until() returns false if condition_variable::wait_until() timed
* out without the predicate returning true.
*
* Originally this class and its subclasses published wait_until() methods
* corresponding to each wait_for() method. But that raised all sorts of
* fascinating questions about the time zone of the passed time_point:
* local time? server time? UTC? The bottom line is that for LLCond
* timeout purposes, we really shouldn't have to care -- timeout duration
* is all we need. This private method remains because it's the simplest
* way to support iteratively waiting across spurious wakeups while
* honoring a fixed timeout.
*/
template <typename Clock, typename Duration, typename Pred>
bool wait_until(const std::chrono::time_point<Clock, Duration>& timeout_time, Pred pred)
{
LLCoros::LockType lk(mMutex);
// We advise the caller to pass a predicate accepting (const DATA&).
// But what if they instead pass a predicate accepting non-const
// (DATA&)? Such a predicate could modify mData, which would be Bad.
// Forbid that.
while (! pred(const_cast<const value_type&>(mData)))
{
if (LLCoros::cv_status::timeout == mCond.wait_until(lk, timeout_time))
{
// It's possible that wait_until() timed out AND the predicate
// became true more or less simultaneously. Even though
// wait_until() timed out, check the predicate one more time.
return pred(const_cast<const value_type&>(mData));
}
}
return true;
}
};
template <typename DATA>
class LLScalarCond: public LLCond<DATA>
{
using super = LLCond<DATA>;
public:
using typename super::value_type;
using super::get;
using super::wait;
using super::wait_for;
/// LLScalarCond can be explicitly initialized with a specific value for
/// mData if desired.
LLScalarCond(const value_type& init=value_type()):
super(init)
{}
/// Pass set_one() a new value to which to update mData. set_one() will
/// lock the mutex, update mData and then call notify_one() on the
/// condition_variable.
void set_one(const value_type& value)
{
super::update_one([&value](value_type& data){ data = value; });
}
/// Pass set_all() a new value to which to update mData. set_all() will
/// lock the mutex, update mData and then call notify_all() on the
/// condition_variable.
void set_all(const value_type& value)
{
super::update_all([&value](value_type& data){ data = value; });
}
/**
* Pass wait_equal() a value for which to wait. wait_equal() locks the
* mutex and, until the stored DATA equals that value, calls wait() on the
* condition_variable.
*/
void wait_equal(const value_type& value)
{
super::wait([&value](const value_type& data){ return (data == value); });
}
/**
* Pass wait_for_equal() a chrono::duration, indicating how long we're
* willing to wait, and a value for which to wait. wait_for_equal() locks
* the mutex and, until the stored DATA equals that value, calls
* wait_for() on the condition_variable. wait_for_equal() returns false if
* condition_variable::wait_for() timed out without the stored DATA being
* equal to the passed value.
*/
template <typename Rep, typename Period>
bool wait_for_equal(const std::chrono::duration<Rep, Period>& timeout_duration,
const value_type& value)
{
return super::wait_for(timeout_duration,
[&value](const value_type& data){ return (data == value); });
}
/**
* This wait_for_equal() overload accepts F32Milliseconds as the duration.
* Any duration unit defined in llunits.h is implicitly convertible to
* F32Milliseconds. The semantics of this method are the same as the
* generic wait_for_equal() method.
*/
bool wait_for_equal(F32Milliseconds timeout_duration, const value_type& value)
{
return wait_for_equal(super::convert(timeout_duration), value);
}
/**
* Pass wait_unequal() a value from which to move away. wait_unequal()
* locks the mutex and, until the stored DATA no longer equals that value,
* calls wait() on the condition_variable.
*/
void wait_unequal(const value_type& value)
{
super::wait([&value](const value_type& data){ return (data != value); });
}
/**
* Pass wait_for_unequal() a chrono::duration, indicating how long we're
* willing to wait, and a value from which to move away.
* wait_for_unequal() locks the mutex and, until the stored DATA no longer
* equals that value, calls wait_for() on the condition_variable.
* wait_for_unequal() returns false if condition_variable::wait_for()
* timed out with the stored DATA still being equal to the passed value.
*/
template <typename Rep, typename Period>
bool wait_for_unequal(const std::chrono::duration<Rep, Period>& timeout_duration,
const value_type& value)
{
return super::wait_for(timeout_duration,
[&value](const value_type& data){ return (data != value); });
}
/**
* This wait_for_unequal() overload accepts F32Milliseconds as the duration.
* Any duration unit defined in llunits.h is implicitly convertible to
* F32Milliseconds. The semantics of this method are the same as the
* generic wait_for_unequal() method.
*/
bool wait_for_unequal(F32Milliseconds timeout_duration, const value_type& value)
{
return wait_for_unequal(super::convert(timeout_duration), value);
}
protected:
using super::convert;
};
/// Using bool as LLScalarCond's DATA seems like a particularly useful case
using LLBoolCond = LLScalarCond<bool>;
/// LLOneShotCond -- init false, set (and wait for) true
class LLOneShotCond: public LLBoolCond
{
using super = LLBoolCond;
public:
using typename super::value_type;
using super::get;
using super::wait;
using super::wait_for;
using super::wait_equal;
using super::wait_for_equal;
using super::wait_unequal;
using super::wait_for_unequal;
/// The bool stored in LLOneShotCond is initially false
LLOneShotCond(): super(false) {}
/// LLOneShotCond assumes that nullary set_one() means to set its bool true
void set_one(bool value=true)
{
super::set_one(value);
}
/// LLOneShotCond assumes that nullary set_all() means to set its bool true
void set_all(bool value=true)
{
super::set_all(value);
}
/**
* wait() locks the mutex and, until the stored bool is true, calls wait()
* on the condition_variable.
*/
void wait()
{
super::wait_unequal(false);
}
/**
* Pass wait_for() a chrono::duration, indicating how long we're willing
* to wait. wait_for() locks the mutex and, until the stored bool is true,
* calls wait_for() on the condition_variable. wait_for() returns false if
* condition_variable::wait_for() timed out without the stored bool being
* true.
*/
template <typename Rep, typename Period>
bool wait_for(const std::chrono::duration<Rep, Period>& timeout_duration)
{
return super::wait_for_unequal(timeout_duration, false);
}
/**
* This wait_for() overload accepts F32Milliseconds as the duration.
* Any duration unit defined in llunits.h is implicitly convertible to
* F32Milliseconds. The semantics of this method are the same as the
* generic wait_for() method.
*/
bool wait_for(F32Milliseconds timeout_duration)
{
return wait_for(super::convert(timeout_duration));
}
};
#endif /* ! defined(LL_LLCOND_H) */

View File

@ -1,32 +0,0 @@
/**
* @file llcoro_get_id.cpp
* @author Nat Goodspeed
* @date 2016-09-03
* @brief Implementation for llcoro_get_id.
*
* $LicenseInfo:firstyear=2016&license=viewerlgpl$
* Copyright (c) 2016, Linden Research, Inc.
* $/LicenseInfo$
*/
// Precompiled header
#include "linden_common.h"
// associated header
#include "llcoro_get_id.h"
// STL headers
// std headers
// external library headers
// other Linden headers
#include "llcoros.h"
namespace llcoro
{
id get_id()
{
// An instance of Current can convert to LLCoros::CoroData*, which can
// implicitly convert to void*, which is an llcoro::id.
return LLCoros::Current();
}
} // llcoro

View File

@ -1,30 +0,0 @@
/**
* @file llcoro_get_id.h
* @author Nat Goodspeed
* @date 2016-09-03
* @brief Supplement the functionality in llcoro.h.
*
* This is broken out as a separate header file to resolve
* circularity: LLCoros isa LLSingleton, yet LLSingleton machinery
* requires llcoro::get_id().
*
* Be very suspicious of anyone else #including this header.
*
* $LicenseInfo:firstyear=2016&license=viewerlgpl$
* Copyright (c) 2016, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_LLCORO_GET_ID_H)
#define LL_LLCORO_GET_ID_H
namespace llcoro
{
/// Get an opaque, distinct token for the running coroutine (or main).
typedef void* id;
id get_id();
} // llcoro
#endif /* ! defined(LL_LLCORO_GET_ID_H) */

View File

@ -26,15 +26,30 @@
* $/LicenseInfo$
*/
#include "llwin32headers.h"
// Precompiled header
#include "linden_common.h"
// associated header
#include "llcoros.h"
// STL headers
// std headers
#include <atomic>
// external library headers
#include <boost/bind.hpp>
#include <boost/fiber/fiber.hpp>
#ifndef BOOST_DISABLE_ASSERTS
#define UNDO_BOOST_DISABLE_ASSERTS
// with Boost 1.65.1, needed for Mac with this specific header
#define BOOST_DISABLE_ASSERTS
#endif
#include <boost/fiber/protected_fixedsize_stack.hpp>
#ifdef UNDO_BOOST_DISABLE_ASSERTS
#undef UNDO_BOOST_DISABLE_ASSERTS
#undef BOOST_DISABLE_ASSERTS
#endif
// other Linden headers
#include "llapp.h"
#include "lltimer.h"
#include "llevents.h"
#include "llerror.h"
@ -45,85 +60,43 @@
#include <excpt.h>
#endif
namespace {
void no_op() {}
} // anonymous namespace
// Do nothing, when we need nothing done. This is a static member of LLCoros
// because CoroData is a private nested class.
void LLCoros::no_cleanup(CoroData*) {}
// CoroData for the currently-running coroutine. Use a thread_specific_ptr
// because each thread potentially has its own distinct pool of coroutines.
LLCoros::Current::Current()
{
// Use a function-static instance so this thread_specific_ptr is
// instantiated on demand. Since we happen to know it's consumed by
// LLSingleton, this is likely to happen before the runtime has finished
// initializing module-static data. For the same reason, we can't package
// this pointer in an LLSingleton.
// This thread_specific_ptr does NOT own the CoroData object! That's owned
// by LLCoros::mCoros. It merely identifies it. For this reason we
// instantiate it with a no-op cleanup function.
static boost::thread_specific_ptr<LLCoros::CoroData> sCurrent(LLCoros::no_cleanup);
// If this is the first time we're accessing sCurrent for the running
// thread, its get() will be NULL. This could be a problem, in that
// llcoro::get_id() would return the same (NULL) token value for the "main
// coroutine" in every thread, whereas what we really want is a distinct
// value for every distinct stack in the process. So if get() is NULL,
// give it a heap CoroData: this ensures that llcoro::get_id() will return
// distinct values.
// This tactic is "leaky": sCurrent explicitly does not destroy any
// CoroData to which it points, and we do NOT enter these "main coroutine"
// CoroData instances in the LLCoros::mCoros map. They are dummy entries,
// and they will leak at process shutdown: one CoroData per thread.
if (! sCurrent.get())
{
// It's tempting to provide a distinct name for each thread's "main
// coroutine." But as getName() has always returned the empty string
// to mean "not in a coroutine," empty string should suffice here --
// and truthfully the additional (thread-safe!) machinery to ensure
// uniqueness just doesn't feel worth the trouble.
// We use a no-op callable and a minimal stack size because, although
// CoroData's constructor in fact initializes its mCoro with a
// coroutine with that stack size, no one ever actually enters it by
// calling mCoro().
sCurrent.reset(new CoroData(0, // no prev
"", // not a named coroutine
no_op, // no-op callable
1024)); // stacksize moot
}
mCurrent = &sCurrent;
}
//static
// static
LLCoros::CoroData& LLCoros::get_CoroData(const std::string& caller)
{
CoroData* current = Current();
// With the dummy CoroData set in LLCoros::Current::Current(), this
// pointer should never be NULL.
llassert_always(current);
CoroData* current{ nullptr };
// be careful about attempted accesses in the final throes of app shutdown
if (! wasDeleted())
{
current = instance().mCurrent.get();
}
// For the main() coroutine, the one NOT explicitly launched by launch(),
// we never explicitly set mCurrent. Use a static CoroData instance with
// canonical values.
if (! current)
{
static std::atomic<int> which_thread(0);
// Use alternate CoroData constructor.
static thread_local CoroData sMain(which_thread++);
// We need not reset() the local_ptr to this instance; we'll simply
// find it again every time we discover that current is null.
current = &sMain;
}
return *current;
}
//static
LLCoros::coro::self& LLCoros::get_self()
LLCoros::coro::id LLCoros::get_self()
{
CoroData& current = get_CoroData("get_self()");
if (! current.mSelf)
{
LL_ERRS("LLCoros") << "Calling get_self() from non-coroutine context!" << LL_ENDL;
}
return *current.mSelf;
return boost::this_fiber::get_id();
}
//static
void LLCoros::set_consuming(bool consuming)
{
get_CoroData("set_consuming()").mConsuming = consuming;
CoroData& data(get_CoroData("set_consuming()"));
// DO NOT call this on the main() coroutine.
llassert_always(! data.mName.empty());
data.mConsuming = consuming;
}
//static
@ -132,89 +105,58 @@ bool LLCoros::get_consuming()
return get_CoroData("get_consuming()").mConsuming;
}
llcoro::Suspending::Suspending()
// static
void LLCoros::setStatus(const std::string& status)
{
LLCoros::Current current;
// Remember currently-running coroutine: we're about to suspend it.
mSuspended = current;
// Revert Current to the value it had at the moment we last switched
// into this coroutine.
current.reset(mSuspended->mPrev);
get_CoroData("setStatus()").mStatus = status;
}
llcoro::Suspending::~Suspending()
// static
std::string LLCoros::getStatus()
{
LLCoros::Current current;
// Okay, we're back, update our mPrev
mSuspended->mPrev = current;
// and reinstate our Current.
current.reset(mSuspended);
return get_CoroData("getStatus()").mStatus;
}
LLCoros::LLCoros():
// MAINT-2724: default coroutine stack size too small on Windows.
// Previously we used
// boost::context::guarded_stack_allocator::default_stacksize();
// empirically this is 64KB on Windows and Linux. Try quadrupling.
// empirically this is insufficient.
#if ADDRESS_SIZE == 64
mStackSize(512*1024)
mStackSize(512*1024),
#else
mStackSize(256*1024)
mStackSize(256*1024),
#endif
// mCurrent does NOT own the current CoroData instance -- it simply
// points to it. So initialize it with a no-op deleter.
mCurrent{ [](CoroData*){} }
{
// Register our cleanup() method for "mainloop" ticks
LLEventPumps::instance().obtain("mainloop").listen(
"LLCoros", boost::bind(&LLCoros::cleanup, this, _1));
}
bool LLCoros::cleanup(const LLSD&)
LLCoros::~LLCoros()
{
static std::string previousName;
static int previousCount = 0;
// Walk the mCoros map, checking and removing completed coroutines.
for (CoroMap::iterator mi(mCoros.begin()), mend(mCoros.end()); mi != mend; )
printActiveCoroutines("at entry to ~LLCoros()");
// Other LLApp status-change listeners do things like close
// work queues and inject the Stop exception into pending
// promises, to force coroutines waiting on those things to
// notice and terminate. The only problem is that by the time
// LLApp sets "quitting" status, the main loop has stopped
// pumping the fiber scheduler with yield() calls. A waiting
// coroutine still might not wake up until after resources on
// which it depends have been freed. Pump it a few times
// ourselves. Of course, stop pumping as soon as the last of
// the coroutines has terminated.
for (size_t count = 0; count < 10 && CoroData::instanceCount() > 0; ++count)
{
// Has this coroutine exited (normal return, exception, exit() call)
// since last tick?
if (mi->second->mCoro.exited())
{
if (previousName != mi->first)
{
previousName = mi->first;
previousCount = 1;
}
else
{
++previousCount;
}
if ((previousCount < 5) || !(previousCount % 50))
{
if (previousCount < 5)
LL_DEBUGS("LLCoros") << "LLCoros: cleaning up coroutine " << mi->first << LL_ENDL;
else
LL_DEBUGS("LLCoros") << "LLCoros: cleaning up coroutine " << mi->first << "("<< previousCount << ")" << LL_ENDL;
}
// The erase() call will invalidate its passed iterator value --
// so increment mi FIRST -- but pass its original value to
// erase(). This is what postincrement is all about.
mCoros.erase(mi++);
}
else
{
// Still live, just skip this entry as if incrementing at the top
// of the loop as usual.
++mi;
}
// don't use llcoro::suspend() because that module depends
// on this one
boost::this_fiber::yield();
}
return false;
printActiveCoroutines("after pumping");
}
std::string LLCoros::generateDistinctName(const std::string& prefix) const
{
static std::string previousName;
static int previousCount = 0;
static int unique = 0;
// Allowing empty name would make getName()'s not-found return ambiguous.
if (prefix.empty())
@ -225,37 +167,15 @@ std::string LLCoros::generateDistinctName(const std::string& prefix) const
// If the specified name isn't already in the map, just use that.
std::string name(prefix);
// Find the lowest numeric suffix that doesn't collide with an existing
// entry. Start with 2 just to make it more intuitive for any interested
// parties: e.g. "joe", "joe2", "joe3"...
for (int i = 2; ; name = STRINGIZE(prefix << i++))
// Until we find an unused name, append a numeric suffix for uniqueness.
while (CoroData::getInstance(name))
{
if (mCoros.find(name) == mCoros.end())
{
if (previousName != name)
{
previousName = name;
previousCount = 1;
}
else
{
++previousCount;
}
if ((previousCount < 5) || !(previousCount % 50))
{
if (previousCount < 5)
LL_DEBUGS("LLCoros") << "LLCoros: launching coroutine " << name << LL_ENDL;
else
LL_DEBUGS("LLCoros") << "LLCoros: launching coroutine " << name << "(" << previousCount << ")" << LL_ENDL;
}
return name;
}
name = STRINGIZE(prefix << unique++);
}
return name;
}
/*==========================================================================*|
bool LLCoros::kill(const std::string& name)
{
CoroMap::iterator found = mCoros.find(name);
@ -269,10 +189,19 @@ bool LLCoros::kill(const std::string& name)
mCoros.erase(found);
return true;
}
|*==========================================================================*/
std::string LLCoros::getName() const
//static
std::string LLCoros::getName()
{
return Current()->mName;
return get_CoroData("getName()").mName;
}
//static
std::string LLCoros::logname()
{
LLCoros::CoroData& data(get_CoroData("logname()"));
return data.mName.empty()? data.getKey() : data.mName;
}
void LLCoros::setStackSize(S32 stacksize)
@ -281,25 +210,46 @@ void LLCoros::setStackSize(S32 stacksize)
mStackSize = stacksize;
}
void LLCoros::printActiveCoroutines()
void LLCoros::printActiveCoroutines(const std::string& when)
{
LL_INFOS("LLCoros") << "Number of active coroutines: " << (S32)mCoros.size() << LL_ENDL;
if (mCoros.size() > 0)
LL_INFOS("LLCoros") << "Number of active coroutines " << when
<< ": " << CoroData::instanceCount() << LL_ENDL;
if (CoroData::instanceCount() > 0)
{
LL_INFOS("LLCoros") << "-------------- List of active coroutines ------------";
CoroMap::iterator iter;
CoroMap::iterator end = mCoros.end();
F64 time = LLTimer::getTotalSeconds();
for (iter = mCoros.begin(); iter != end; iter++)
for (auto& cd : CoroData::instance_snapshot())
{
F64 life_time = time - iter->second->mCreationTime;
LL_CONT << LL_NEWLINE << "Name: " << iter->first << " life: " << life_time;
F64 life_time = time - cd.mCreationTime;
LL_CONT << LL_NEWLINE
<< cd.getKey() << ' ' << cd.mStatus << " life: " << life_time;
}
LL_CONT << LL_ENDL;
LL_INFOS("LLCoros") << "-----------------------------------------------------" << LL_ENDL;
}
}
std::string LLCoros::launch(const std::string& prefix, const callable_t& callable)
{
std::string name(generateDistinctName(prefix));
// 'dispatch' means: enter the new fiber immediately, returning here only
// when the fiber yields for whatever reason.
// std::allocator_arg is a flag to indicate that the following argument is
// a StackAllocator.
// protected_fixedsize_stack sets a guard page past the end of the new
// stack so that stack underflow will result in an access violation
// instead of weird, subtle, possibly undiagnosed memory stomps.
boost::fibers::fiber newCoro(boost::fibers::launch::dispatch,
std::allocator_arg,
boost::fibers::protected_fixedsize_stack(mStackSize),
[this, &name, &callable](){ toplevel(name, callable); });
// You have two choices with a fiber instance: you can join() it or you
// can detach() it. If you try to destroy the instance before doing
// either, the program silently terminates. We don't need this handle.
newCoro.detach();
return name;
}
#if LL_WINDOWS
static const U32 STATUS_MSC_EXCEPTION = 0xE06D7363; // compiler specific
@ -337,13 +287,16 @@ void LLCoros::winlevel(const callable_t& callable)
#endif
// Top-level wrapper around caller's coroutine callable. This function accepts
// the coroutine library's implicit coro::self& parameter and saves it, but
// does not pass it down to the caller's callable.
void LLCoros::toplevel(coro::self& self, CoroData* data, const callable_t& callable)
// Top-level wrapper around caller's coroutine callable.
// Normally we like to pass strings and such by const reference -- but in this
// case, we WANT to copy both the name and the callable to our local stack!
void LLCoros::toplevel(std::string name, callable_t callable)
{
// capture the 'self' param in CoroData
data->mSelf = &self;
// keep the CoroData on this top-level function's stack frame
CoroData corodata(name);
// set it as current
mCurrent.reset(&corodata);
// run the code the caller actually wants in the coroutine
try
{
@ -351,81 +304,73 @@ void LLCoros::toplevel(coro::self& self, CoroData* data, const callable_t& calla
//#if LL_WINDOWS && LL_RELEASE_FOR_DOWNLOAD
// winlevel(callable);
//#else
// </FS:Ansariel>
callable();
//#endif // <FS:Ansariel> Disable for more meaningful callstacks
//#endif
// <FS:Ansariel> Disable for more meaningful callstacks
}
catch (const Stop& exc)
{
LL_INFOS("LLCoros") << "coroutine " << name << " terminating because "
<< exc.what() << LL_ENDL;
}
catch (const LLContinueError&)
{
// Any uncaught exception derived from LLContinueError will be caught
// here and logged. This coroutine will terminate but the rest of the
// viewer will carry on.
LOG_UNHANDLED_EXCEPTION(STRINGIZE("coroutine " << data->mName));
LOG_UNHANDLED_EXCEPTION(STRINGIZE("coroutine " << name));
}
catch (...)
{
// Any OTHER kind of uncaught exception will cause the viewer to
// crash, hopefully informatively.
CRASH_ON_UNHANDLED_EXCEPTION(STRINGIZE("coroutine " << name));
}
// <FS:Ansariel> Disable for more meaningful callstacks
//catch (...)
//{
// // Any OTHER kind of uncaught exception will cause the viewer to
// // crash, hopefully informatively.
// CRASH_ON_UNHANDLED_EXCEPTION(STRINGIZE("coroutine " << data->mName));
//}
// </FS:Ansariel>
// This cleanup isn't perfectly symmetrical with the way we initially set
// data->mPrev, but this is our last chance to reset Current.
Current().reset(data->mPrev);
}
/*****************************************************************************
* MUST BE LAST
*****************************************************************************/
// Turn off MSVC optimizations for just LLCoros::launch() -- see
// DEV-32777. But MSVC doesn't support push/pop for optimization flags as it
// does for warning suppression, and we really don't want to force
// optimization ON for other code even in Debug or RelWithDebInfo builds.
//static
void LLCoros::checkStop()
{
if (wasDeleted())
{
LLTHROW(Shutdown("LLCoros was deleted"));
}
// do this AFTER the check above, because getName() depends on
// get_CoroData(), which depends on the local_ptr in our instance().
if (getName().empty())
{
// Our Stop exception and its subclasses are intended to stop loitering
// coroutines. Don't throw it from the main coroutine.
return;
}
if (LLApp::isStopped())
{
LLTHROW(Stopped("viewer is stopped"));
}
if (! LLApp::isRunning())
{
LLTHROW(Stopping("viewer is stopping"));
}
}
#if LL_MSVC
// work around broken optimizations
#pragma warning(disable: 4748)
#pragma warning(disable: 4355) // 'this' used in initializer list: yes, intentionally
#pragma optimize("", off)
#endif // LL_MSVC
LLCoros::CoroData::CoroData(CoroData* prev, const std::string& name,
const callable_t& callable, S32 stacksize):
mPrev(prev),
LLCoros::CoroData::CoroData(const std::string& name):
LLInstanceTracker<CoroData, std::string>(name),
mName(name),
// Wrap the caller's callable in our toplevel() function so we can manage
// Current appropriately at startup and shutdown of each coroutine.
mCoro(boost::bind(toplevel, _1, this, callable), stacksize),
// don't consume events unless specifically directed
mConsuming(false),
mSelf(0),
mCreationTime(LLTimer::getTotalSeconds())
{
}
std::string LLCoros::launch(const std::string& prefix, const callable_t& callable)
LLCoros::CoroData::CoroData(int n):
// This constructor is used for the thread_local instance belonging to the
// default coroutine on each thread. We must give each one a different
// LLInstanceTracker key because LLInstanceTracker's map spans all
// threads, but we want the default coroutine on each thread to have the
// empty string as its visible name because some consumers test for that.
LLInstanceTracker<CoroData, std::string>("main" + stringize(n)),
mName(),
mConsuming(false),
mCreationTime(LLTimer::getTotalSeconds())
{
std::string name(generateDistinctName(prefix));
Current current;
// pass the current value of Current as previous context
CoroData* newCoro = new(std::nothrow) CoroData(current, name, callable, mStackSize);
if (newCoro == NULL)
{
// Out of memory?
printActiveCoroutines();
LL_ERRS("LLCoros") << "Failed to start coroutine: " << name << " Stacksize: " << mStackSize << " Total coroutines: " << mCoros.size() << LL_ENDL;
}
// Store it in our pointer map
mCoros.insert(name, newCoro);
// also set it as current
current.reset(newCoro);
/* Run the coroutine until its first wait, then return here */
(newCoro->mCoro)(std::nothrow);
return name;
}
#if LL_MSVC
// reenable optimizations
#pragma optimize("", on)
#endif // LL_MSVC

View File

@ -29,21 +29,26 @@
#if ! defined(LL_LLCOROS_H)
#define LL_LLCOROS_H
#include <boost/dcoroutine/coroutine.hpp>
#include <boost/dcoroutine/future.hpp>
#include "llexception.h"
#include <boost/fiber/fss.hpp>
#include <boost/fiber/future/promise.hpp>
#include <boost/fiber/future/future.hpp>
#include "mutex.h"
#include "llsingleton.h"
#include <boost/ptr_container/ptr_map.hpp>
#include "llinstancetracker.h"
#include <boost/function.hpp>
#include <boost/thread/tss.hpp>
#include <boost/noncopyable.hpp>
#include <string>
#include <stdexcept>
#include "llcoro_get_id.h" // for friend declaration
// forward-declare helper class
namespace llcoro
{
class Suspending;
// e.g. #include LLCOROS_MUTEX_HEADER
#define LLCOROS_MUTEX_HEADER <boost/fiber/mutex.hpp>
#define LLCOROS_CONDVAR_HEADER <boost/fiber/condition_variable.hpp>
namespace boost {
namespace fibers {
class mutex;
enum class cv_status;
class condition_variable;
}
}
/**
@ -76,19 +81,21 @@ class Suspending;
* name prefix; from your prefix it generates a distinct name, registers the
* new coroutine and returns the actual name.
*
* The name can be used to kill off the coroutine prematurely, if needed. It
* can also provide diagnostic info: we can look up the name of the
* The name
* can provide diagnostic info: we can look up the name of the
* currently-running coroutine.
*
* Finally, the next frame ("mainloop" event) after the coroutine terminates,
* LLCoros will notice its demise and destroy it.
*/
class LL_COMMON_API LLCoros: public LLSingleton<LLCoros>
{
LLSINGLETON(LLCoros);
~LLCoros();
public:
/// Canonical boost::dcoroutines::coroutine signature we use
typedef boost::dcoroutines::coroutine<void()> coro;
/// The viewer's use of the term "coroutine" became deeply embedded before
/// the industry term "fiber" emerged to distinguish userland threads from
/// simpler, more transient kinds of coroutines. Semantically they've
/// always been fibers. But at this point in history, we're pretty much
/// stuck with the term "coroutine."
typedef boost::fibers::fiber coro;
/// Canonical callable type
typedef boost::function<void()> callable_t;
@ -119,10 +126,10 @@ public:
* DEV-32777 comments for an explanation.
*
* Pass a nullary callable. It works to directly pass a nullary free
* function (or static method); for all other cases use boost::bind(). Of
* course, for a non-static class method, the first parameter must be the
* class instance. Any other parameters should be passed via the bind()
* expression.
* function (or static method); for other cases use a lambda expression,
* std::bind() or boost::bind(). Of course, for a non-static class method,
* the first parameter must be the class instance. Any other parameters
* should be passed via the enclosing expression.
*
* launch() tweaks the suggested name so it won't collide with any
* existing coroutine instance, creates the coroutine instance, registers
@ -138,7 +145,7 @@ public:
* one prematurely. Returns @c true if the specified name was found and
* still running at the time.
*/
bool kill(const std::string& name);
// bool kill(const std::string& name);
/**
* From within a coroutine, look up the (tweaked) name string by which
@ -146,16 +153,27 @@ public:
* (e.g. if the coroutine was launched by hand rather than using
* LLCoros::launch()).
*/
std::string getName() const;
static std::string getName();
/// for delayed initialization
/**
* This variation returns a name suitable for log messages: the explicit
* name for an explicitly-launched coroutine, or "mainN" for the default
* coroutine on a thread.
*/
static std::string logname();
/**
* For delayed initialization. To be clear, this will only affect
* coroutines launched @em after this point. The underlying facility
* provides no way to alter the stack size of any running coroutine.
*/
void setStackSize(S32 stacksize);
/// for delayed initialization
void printActiveCoroutines();
/// diagnostic
void printActiveCoroutines(const std::string& when=std::string());
/// get the current coro::self& for those who really really care
static coro::self& get_self();
/// get the current coro::id for those who really really care
static coro::id get_self();
/**
* Most coroutines, most of the time, don't "consume" the events for which
@ -180,6 +198,7 @@ public:
{
set_consuming(consuming);
}
OverrideConsuming(const OverrideConsuming&) = delete;
~OverrideConsuming()
{
set_consuming(mPrevConsuming);
@ -189,142 +208,124 @@ public:
bool mPrevConsuming;
};
/// set string coroutine status for diagnostic purposes
static void setStatus(const std::string& status);
static std::string getStatus();
/// RAII control of status
class TempStatus
{
public:
TempStatus(const std::string& status):
mOldStatus(getStatus())
{
setStatus(status);
}
TempStatus(const TempStatus&) = delete;
~TempStatus()
{
setStatus(mOldStatus);
}
private:
std::string mOldStatus;
};
/// thrown by checkStop()
// It may sound ironic that Stop is derived from LLContinueError, but the
// point is that LLContinueError is the category of exception that should
// not immediately crash the viewer. Stop and its subclasses are to notify
// coroutines that the viewer intends to shut down. The expected response
// is to terminate the coroutine, rather than abort the viewer.
struct Stop: public LLContinueError
{
Stop(const std::string& what): LLContinueError(what) {}
};
/// early stages
struct Stopping: public Stop
{
Stopping(const std::string& what): Stop(what) {}
};
/// cleaning up
struct Stopped: public Stop
{
Stopped(const std::string& what): Stop(what) {}
};
/// cleaned up -- not much survives!
struct Shutdown: public Stop
{
Shutdown(const std::string& what): Stop(what) {}
};
/// Call this intermittently if there's a chance your coroutine might
/// continue running into application shutdown. Throws Stop if LLCoros has
/// been cleaned up.
static void checkStop();
/**
* Please do NOT directly use boost::dcoroutines::future! It is essential
* to maintain the "current" coroutine at every context switch. This
* Future wraps the essential boost::dcoroutines::future functionality
* with that maintenance.
* Aliases for promise and future. An older underlying future implementation
* required us to wrap future; that's no longer needed. However -- if it's
* important to restore kill() functionality, we might need to provide a
* proxy, so continue using the aliases.
*/
template <typename T>
class Future;
using Promise = boost::fibers::promise<T>;
template <typename T>
using Future = boost::fibers::future<T>;
template <typename T>
static Future<T> getFuture(Promise<T>& promise) { return promise.get_future(); }
// use mutex, lock, condition_variable suitable for coroutines
using Mutex = boost::fibers::mutex;
using LockType = std::unique_lock<Mutex>;
using cv_status = boost::fibers::cv_status;
using ConditionVariable = boost::fibers::condition_variable;
/// for data local to each running coroutine
template <typename T>
using local_ptr = boost::fibers::fiber_specific_ptr<T>;
private:
friend class llcoro::Suspending;
friend llcoro::id llcoro::get_id();
std::string generateDistinctName(const std::string& prefix) const;
bool cleanup(const LLSD&);
void toplevel(std::string name, callable_t callable);
struct CoroData;
static void no_cleanup(CoroData*);
#if LL_WINDOWS
static void winlevel(const callable_t& callable);
#endif
static void toplevel(coro::self& self, CoroData* data, const callable_t& callable);
static CoroData& get_CoroData(const std::string& caller);
S32 mStackSize;
// coroutine-local storage, as it were: one per coro we track
struct CoroData
struct CoroData: public LLInstanceTracker<CoroData, std::string>
{
CoroData(CoroData* prev, const std::string& name,
const callable_t& callable, S32 stacksize);
CoroData(const std::string& name);
CoroData(int n);
// The boost::dcoroutines library supports asymmetric coroutines. Every
// time we context switch out of a coroutine, we pass control to the
// previously-active one (or to the non-coroutine stack owned by the
// thread). So our management of the "current" coroutine must be able to
// restore the previous value when we're about to switch away.
CoroData* mPrev;
// tweaked name of the current coroutine
const std::string mName;
// the actual coroutine instance
LLCoros::coro mCoro;
// set_consuming() state
bool mConsuming;
// When the dcoroutine library calls a top-level callable, it implicitly
// passes coro::self& as the first parameter. All our consumer code used
// to explicitly pass coro::self& down through all levels of call stack,
// because at the leaf level we need it for context-switching. But since
// coroutines are based on cooperative switching, we can cause the
// top-level entry point to stash a pointer to the currently-running
// coroutine, and manage it appropriately as we switch out and back in.
// That eliminates the need to pass it as an explicit parameter down
// through every level, which is unfortunately viral in nature. Finding it
// implicitly rather than explicitly allows minor maintenance in which a
// leaf-level function adds a new async I/O call that suspends the calling
// coroutine, WITHOUT having to propagate coro::self& through every
// function signature down to that point -- and of course through every
// other caller of every such function.
LLCoros::coro::self* mSelf;
// setStatus() state
std::string mStatus;
F64 mCreationTime; // since epoch
};
typedef boost::ptr_map<std::string, CoroData> CoroMap;
CoroMap mCoros;
// Identify the current coroutine's CoroData. Use a little helper class so
// a caller can either use a temporary instance, or instantiate a named
// variable and access it multiple times.
class Current
{
public:
Current();
operator LLCoros::CoroData*() { return get(); }
LLCoros::CoroData* operator->() { return get(); }
LLCoros::CoroData* get() { return mCurrent->get(); }
void reset(LLCoros::CoroData* ptr) { mCurrent->reset(ptr); }
private:
boost::thread_specific_ptr<LLCoros::CoroData>* mCurrent;
};
// Identify the current coroutine's CoroData. This local_ptr isn't static
// because it's a member of an LLSingleton, and we rely on it being
// cleaned up in proper dependency order.
local_ptr<CoroData> mCurrent;
};
namespace llcoro
{
/// Instantiate one of these in a block surrounding any leaf point when
/// control literally switches away from this coroutine.
class Suspending: boost::noncopyable
{
public:
Suspending();
~Suspending();
inline
std::string logname() { return LLCoros::logname(); }
private:
LLCoros::CoroData* mSuspended;
};
} // namespace llcoro
template <typename T>
class LLCoros::Future
{
typedef boost::dcoroutines::future<T> dfuture;
public:
Future():
mFuture(get_self())
{}
typedef typename boost::dcoroutines::make_callback_result<dfuture>::type callback_t;
callback_t make_callback()
{
return boost::dcoroutines::make_callback(mFuture);
}
#ifndef LL_LINUX
explicit
#endif
operator bool() const
{
return bool(mFuture);
}
bool operator!() const
{
return ! mFuture;
}
T get()
{
// instantiate Suspending to manage the "current" coroutine
llcoro::Suspending suspended;
return *mFuture;
}
private:
dfuture mFuture;
};
} // llcoro
#endif /* ! defined(LL_LLCOROS_H) */

View File

@ -36,7 +36,6 @@
#include "llunits.h"
// <FS:Ansariel> Fix dependency on llviewerprecompiledheaders.h in dependent projects
#include "llpreprocessor.h"
/**
* @class LLDate

View File

@ -39,6 +39,9 @@
#if !LL_WINDOWS
# include <syslog.h>
# include <unistd.h>
# include <sys/stat.h>
#else
# include <io.h>
#endif // !LL_WINDOWS
#include <vector>
#include "string.h"
@ -53,7 +56,12 @@
#include "llstl.h"
#include "lltimer.h"
#include "nd/ndlogthrottle.h"
// On Mac, got:
// #error "Boost.Stacktrace requires `_Unwind_Backtrace` function. Define
// `_GNU_SOURCE` macro or `BOOST_STACKTRACE_GNU_SOURCE_NOT_REQUIRED` if
// _Unwind_Backtrace is available without `_GNU_SOURCE`."
#define BOOST_STACKTRACE_GNU_SOURCE_NOT_REQUIRED
#include <boost/stacktrace.hpp>
namespace {
#if LL_WINDOWS
@ -120,23 +128,23 @@ namespace {
class RecordToFile : public LLError::Recorder
{
public:
RecordToFile(const std::string& filename)
RecordToFile(const std::string& filename):
mName(filename)
{
// <FS:Ansariel> Don't screw up log file output
this->showMultiline(true);
mFile.open(filename.c_str(), std::ios_base::out | std::ios_base::app);
if (!mFile)
{
LL_INFOS() << "Error setting log file to " << filename << LL_ENDL;
}
else
{
if (!LLError::getAlwaysFlush())
{
mFile.sync_with_stdio(false);
}
}
else
{
if (!LLError::getAlwaysFlush())
{
mFile.sync_with_stdio(false);
}
}
}
~RecordToFile()
@ -153,11 +161,13 @@ namespace {
#endif
}
bool okay() { return mFile.good(); }
bool okay() const { return mFile.good(); }
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
std::string getFilename() const { return mName; }
virtual void recordMessage(LLError::ELevel level,
const std::string& message) override
{
if (LLError::getAlwaysFlush())
{
mFile << message << std::endl;
@ -166,9 +176,10 @@ namespace {
{
mFile << message << "\n";
}
}
}
private:
const std::string mName;
llofstream mFile;
};
@ -176,7 +187,7 @@ namespace {
class RecordToStderr : public LLError::Recorder
{
public:
RecordToStderr(bool timestamp) : mUseANSI(ANSI_PROBE)
RecordToStderr(bool timestamp) : mUseANSI(checkANSI())
{
this->showMultiline(true);
}
@ -203,9 +214,7 @@ namespace {
static std::string s_ansi_warn = createANSI("34"); // blue
static std::string s_ansi_debug = createANSI("35"); // magenta
mUseANSI = (ANSI_PROBE == mUseANSI) ? (checkANSI() ? ANSI_YES : ANSI_NO) : mUseANSI;
if (ANSI_YES == mUseANSI)
if (mUseANSI)
{
writeANSI((level == LLError::LEVEL_ERROR) ? s_ansi_error :
(level == LLError::LEVEL_WARN) ? s_ansi_warn :
@ -221,12 +230,7 @@ namespace {
}
private:
enum ANSIState
{
ANSI_PROBE,
ANSI_YES,
ANSI_NO
} mUseANSI;
bool mUseANSI;
LL_FORCE_INLINE void writeANSI(const std::string& ansi_code, const std::string& message)
{
@ -237,16 +241,13 @@ namespace {
fprintf(stderr, "%s%s%s\n%s", s_ansi_bold.c_str(), ansi_code.c_str(), message.c_str(), s_ansi_reset.c_str() );
}
bool checkANSI(void)
static bool checkANSI(void)
{
#if LL_LINUX || LL_DARWIN
// Check whether it's okay to use ANSI; if stderr is
// a tty then we assume yes. Can be turned off with
// the LL_NO_ANSI_COLOR env var.
return (0 != isatty(2)) &&
(NULL == getenv("LL_NO_ANSI_COLOR"));
#endif // LL_LINUX
return FALSE; // works in a cygwin shell... ;)
}
};
@ -316,28 +317,35 @@ namespace LLError
{
#ifdef __GNUC__
// GCC: type_info::name() returns a mangled class name,st demangle
// passing nullptr, 0 forces allocation of a unique buffer we can free
// fixing MAINT-8724 on OSX 10.14
// passing nullptr, 0 forces allocation of a unique buffer we can free
// fixing MAINT-8724 on OSX 10.14
int status = -1;
char* name = abi::__cxa_demangle(mangled, nullptr, 0, &status);
std::string result(name ? name : mangled);
free(name);
return result;
std::string result(name ? name : mangled);
free(name);
return result;
#elif LL_WINDOWS
// DevStudio: type_info::name() includes the text "class " at the start
static const std::string class_prefix = "class ";
// Visual Studio: type_info::name() includes the text "class " at the start
std::string name = mangled;
if (0 != name.compare(0, class_prefix.length(), class_prefix))
for (const auto& prefix : std::vector<std::string>{ "class ", "struct " })
{
LL_DEBUGS() << "Did not see '" << class_prefix << "' prefix on '"
<< name << "'" << LL_ENDL;
return name;
if (0 == name.compare(0, prefix.length(), prefix))
{
return name.substr(prefix.length());
}
}
// huh, that's odd, we should see one or the other prefix -- but don't
// try to log unless logging is already initialized
if (is_available())
{
// in Python, " or ".join(vector) -- but in C++, a PITB
LL_DEBUGS() << "Did not see 'class' or 'struct' prefix on '"
<< name << "'" << LL_ENDL;
}
return name;
return name.substr(class_prefix.length());
#else
#else // neither GCC nor Visual Studio
return mangled;
#endif
}
@ -416,7 +424,7 @@ namespace
return false;
}
if (configuration.isUndefined() || !configuration.isMap() || configuration.emptyMap())
if (! configuration || !configuration.isMap())
{
LL_WARNS() << filename() << " missing, ill-formed, or simply undefined"
" content; not changing configuration"
@ -500,11 +508,8 @@ namespace LLError
LLError::TimeFunction mTimeFunction;
Recorders mRecorders;
RecorderPtr mFileRecorder;
RecorderPtr mFixedBufferRecorder;
std::string mFileRecorderFileName;
int mShouldLogCallCounter;
int mShouldLogCallCounter;
private:
SettingsConfig();
@ -539,9 +544,6 @@ namespace LLError
mCrashFunction(NULL),
mTimeFunction(NULL),
mRecorders(),
mFileRecorder(),
mFixedBufferRecorder(),
mFileRecorderFileName(),
mShouldLogCallCounter(0)
{
}
@ -657,31 +659,6 @@ namespace LLError
}
}
#ifdef LL_LINUX
// <FS:ND> Temp hack to get the old linux havok stub to link
CallSite::CallSite(LLError::ELevel level,
char const* file,
int line,
std::type_info const& class_info,
char const* function,
char const*,
char const*,
bool)
: mLevel(level),
mFile(file),
mLine(line),
mClassInfo(class_info),
mFunction(function),
mCached(false),
mShouldLog(false),
mPrintOnce(false),
mTags(0),
mTagCount(0)
{
}
// </FS:ND>
#endif
CallSite::~CallSite()
{
delete []mTags;
@ -695,21 +672,37 @@ namespace LLError
namespace
{
bool shouldLogToStderr()
{
bool shouldLogToStderr()
{
#if LL_DARWIN
// On Mac OS X, stderr from apps launched from the Finder goes to the
// console log. It's generally considered bad form to spam too much
// there.
// On Mac OS X, stderr from apps launched from the Finder goes to the
// console log. It's generally considered bad form to spam too much
// there. That scenario can be detected by noticing that stderr is a
// character device (S_IFCHR).
// If stdin is a tty, assume the user launched from the command line and
// therefore wants to see stderr. Otherwise, assume we've been launched
// from the finder and shouldn't spam stderr.
return isatty(0);
// If stderr is a tty or a pipe, assume the user launched from the
// command line or debugger and therefore wants to see stderr.
if (isatty(STDERR_FILENO))
return true;
// not a tty, but might still be a pipe -- check
struct stat st;
if (fstat(STDERR_FILENO, &st) < 0)
{
// capture errno right away, before engaging any other operations
auto errno_save = errno;
// this gets called during log-system setup -- can't log yet!
std::cerr << "shouldLogToStderr: fstat(" << STDERR_FILENO << ") failed, errno "
<< errno_save << std::endl;
// if we can't tell, err on the safe side and don't write stderr
return false;
}
// fstat() worked: return true only if stderr is a pipe
return ((st.st_mode & S_IFMT) == S_IFIFO);
#else
return true;
return true;
#endif
}
}
bool stderrLogWantsTime()
{
@ -726,16 +719,15 @@ namespace
LLError::Settings::getInstance()->reset();
LLError::setDefaultLevel(LLError::LEVEL_INFO);
LLError::setAlwaysFlush(true);
LLError::setEnabledLogTypesMask(0xFFFFFFFF);
LLError::setAlwaysFlush(true);
LLError::setEnabledLogTypesMask(0xFFFFFFFF);
LLError::setFatalFunction(LLError::crashAndLoop);
LLError::setTimeFunction(LLError::utcTime);
// log_to_stderr is only false in the unit and integration tests to keep builds quieter
if (log_to_stderr && shouldLogToStderr())
{
LLError::RecorderPtr recordToStdErr(new RecordToStderr(stderrLogWantsTime()));
LLError::addRecorder(recordToStdErr);
LLError::logToStderr();
}
#if LL_WINDOWS
@ -1035,49 +1027,110 @@ namespace LLError
s->mRecorders.erase(std::remove(s->mRecorders.begin(), s->mRecorders.end(), recorder),
s->mRecorders.end());
}
// Find an entry in SettingsConfig::mRecorders whose RecorderPtr points to
// a Recorder subclass of type RECORDER. Return, not a RecorderPtr (which
// points to the Recorder base class), but a shared_ptr<RECORDER> which
// specifically points to the concrete RECORDER subclass instance, along
// with a Recorders::iterator indicating the position of that entry in
// mRecorders. The shared_ptr might be empty (operator!() returns true) if
// there was no such RECORDER subclass instance in mRecorders.
template <typename RECORDER>
std::pair<boost::shared_ptr<RECORDER>, Recorders::iterator>
findRecorderPos()
{
SettingsConfigPtr s = Settings::instance().getSettingsConfig();
// Since we promise to return an iterator, use a classic iterator
// loop.
auto end{s->mRecorders.end()};
for (Recorders::iterator it{s->mRecorders.begin()}; it != end; ++it)
{
// *it is a RecorderPtr, a shared_ptr<Recorder>. Use a
// dynamic_pointer_cast to try to downcast to test if it's also a
// shared_ptr<RECORDER>.
auto ptr = boost::dynamic_pointer_cast<RECORDER>(*it);
if (ptr)
{
// found the entry we want
return { ptr, it };
}
}
// dropped out of the loop without finding any such entry -- instead
// of default-constructing Recorders::iterator (which might or might
// not be valid), return a value that is valid but not dereferenceable.
return { {}, end };
}
// Find an entry in SettingsConfig::mRecorders whose RecorderPtr points to
// a Recorder subclass of type RECORDER. Return, not a RecorderPtr (which
// points to the Recorder base class), but a shared_ptr<RECORDER> which
// specifically points to the concrete RECORDER subclass instance. The
// shared_ptr might be empty (operator!() returns true) if there was no
// such RECORDER subclass instance in mRecorders.
template <typename RECORDER>
boost::shared_ptr<RECORDER> findRecorder()
{
return findRecorderPos<RECORDER>().first;
}
// Remove an entry from SettingsConfig::mRecorders whose RecorderPtr
// points to a Recorder subclass of type RECORDER. Return true if there
// was one and we removed it, false if there wasn't one to start with.
template <typename RECORDER>
bool removeRecorder()
{
auto found = findRecorderPos<RECORDER>();
if (found.first)
{
SettingsConfigPtr s = Settings::instance().getSettingsConfig();
s->mRecorders.erase(found.second);
}
return bool(found.first);
}
}
namespace LLError
{
void logToFile(const std::string& file_name)
{
SettingsConfigPtr s = Settings::getInstance()->getSettingsConfig();
removeRecorder(s->mFileRecorder);
s->mFileRecorder.reset();
s->mFileRecorderFileName.clear();
// remove any previous Recorder filling this role
removeRecorder<RecordToFile>();
if (!file_name.empty())
{
RecorderPtr recordToFile(new RecordToFile(file_name));
if (boost::dynamic_pointer_cast<RecordToFile>(recordToFile)->okay())
{
s->mFileRecorderFileName = file_name;
s->mFileRecorder = recordToFile;
addRecorder(recordToFile);
}
boost::shared_ptr<RecordToFile> recordToFile(new RecordToFile(file_name));
if (recordToFile->okay())
{
addRecorder(recordToFile);
}
}
}
void logToFixedBuffer(LLLineBuffer* fixedBuffer)
{
SettingsConfigPtr s = Settings::getInstance()->getSettingsConfig();
removeRecorder(s->mFixedBufferRecorder);
s->mFixedBufferRecorder.reset();
if (fixedBuffer)
{
RecorderPtr recordToFixedBuffer(new RecordToFixedBuffer(fixedBuffer));
s->mFixedBufferRecorder = recordToFixedBuffer;
addRecorder(recordToFixedBuffer);
}
}
std::string logFileName()
{
SettingsConfigPtr s = Settings::getInstance()->getSettingsConfig();
return s->mFileRecorderFileName;
auto found = findRecorder<RecordToFile>();
return found? found->getFilename() : std::string();
}
void logToStderr()
{
if (! findRecorder<RecordToStderr>())
{
RecorderPtr recordToStdErr(new RecordToStderr(stderrLogWantsTime()));
addRecorder(recordToStdErr);
}
}
void logToFixedBuffer(LLLineBuffer* fixedBuffer)
{
// remove any previous Recorder filling this role
removeRecorder<RecordToFixedBuffer>();
if (fixedBuffer)
{
RecorderPtr recordToFixedBuffer(new RecordToFixedBuffer(fixedBuffer));
addRecorder(recordToFixedBuffer);
}
}
}
@ -1193,8 +1246,25 @@ namespace
}
namespace {
LLMutex gLogMutex;
LLMutex gCallStacksLogMutex;
// We need a couple different mutexes, but we want to use the same mechanism
// for both. Make getMutex() a template function with different instances
// for different MutexDiscriminator values.
enum MutexDiscriminator
{
LOG_MUTEX,
STACKS_MUTEX
};
// Some logging calls happen very early in processing -- so early that our
// module-static variables aren't yet initialized. getMutex() wraps a
// function-static LLMutex so that early calls can still have a valid
// LLMutex instance.
template <MutexDiscriminator MTX>
LLMutex* getMutex()
{
// guaranteed to be initialized the first time control reaches here
static LLMutex sMutex;
return &sMutex;
}
bool checkLevelMap(const LevelMap& map, const std::string& key,
LLError::ELevel& level)
@ -1247,7 +1317,7 @@ namespace LLError
bool Log::shouldLog(CallSite& site)
{
LLMutexTrylock lock(&gLogMutex, 5);
LLMutexTrylock lock(getMutex<LOG_MUTEX>(), 5);
if (!lock.isLocked())
{
return false;
@ -1298,7 +1368,7 @@ namespace LLError
std::ostringstream* Log::out()
{
LLMutexTrylock lock(&gLogMutex,5);
LLMutexTrylock lock(getMutex<LOG_MUTEX>(),5);
// If we hit a logging request very late during shutdown processing,
// when either of the relevant LLSingletons has already been deleted,
// DO NOT resurrect them.
@ -1318,7 +1388,7 @@ namespace LLError
void Log::flush(std::ostringstream* out, char* message)
{
LLMutexTrylock lock(&gLogMutex,5);
LLMutexTrylock lock(getMutex<LOG_MUTEX>(),5);
if (!lock.isLocked())
{
return;
@ -1358,7 +1428,7 @@ namespace LLError
void Log::flush(std::ostringstream* out, const CallSite& site)
{
LLMutexTrylock lock(&gLogMutex,5);
LLMutexTrylock lock(getMutex<LOG_MUTEX>(),5);
if (!lock.isLocked())
{
return;
@ -1387,11 +1457,6 @@ namespace LLError
delete out;
}
std::ostringstream prefix;
if( nd::logging::throttle( site.mFile, site.mLine, &prefix ) )
return;
if (site.mPrintOnce)
{
std::ostringstream message_stream;
@ -1534,129 +1599,133 @@ namespace LLError
S32 LLCallStacks::sIndex = 0 ;
//static
void LLCallStacks::allocateStackBuffer()
{
if(sBuffer == NULL)
{
sBuffer = new char*[512] ;
sBuffer[0] = new char[512 * 128] ;
for(S32 i = 1 ; i < 512 ; i++)
{
sBuffer[i] = sBuffer[i-1] + 128 ;
}
sIndex = 0 ;
}
}
void LLCallStacks::allocateStackBuffer()
{
if(sBuffer == NULL)
{
sBuffer = new char*[512] ;
sBuffer[0] = new char[512 * 128] ;
for(S32 i = 1 ; i < 512 ; i++)
{
sBuffer[i] = sBuffer[i-1] + 128 ;
}
sIndex = 0 ;
}
}
void LLCallStacks::freeStackBuffer()
{
if(sBuffer != NULL)
{
delete [] sBuffer[0] ;
delete [] sBuffer ;
sBuffer = NULL ;
}
}
void LLCallStacks::freeStackBuffer()
{
if(sBuffer != NULL)
{
delete [] sBuffer[0] ;
delete [] sBuffer ;
sBuffer = NULL ;
}
}
//static
void LLCallStacks::push(const char* function, const int line)
{
LLMutexTrylock lock(&gCallStacksLogMutex, 5);
if (!lock.isLocked())
{
return;
}
//static
void LLCallStacks::push(const char* function, const int line)
{
LLMutexTrylock lock(getMutex<STACKS_MUTEX>(), 5);
if (!lock.isLocked())
{
return;
}
if(sBuffer == NULL)
{
allocateStackBuffer();
}
if(sBuffer == NULL)
{
allocateStackBuffer();
}
if(sIndex > 511)
{
clear() ;
}
if(sIndex > 511)
{
clear() ;
}
strcpy(sBuffer[sIndex], function) ;
sprintf(sBuffer[sIndex] + strlen(function), " line: %d ", line) ;
sIndex++ ;
strcpy(sBuffer[sIndex], function) ;
sprintf(sBuffer[sIndex] + strlen(function), " line: %d ", line) ;
sIndex++ ;
return ;
}
return ;
}
//static
std::ostringstream* LLCallStacks::insert(const char* function, const int line)
{
std::ostringstream* _out = LLError::Log::out();
*_out << function << " line " << line << " " ;
//static
std::ostringstream* LLCallStacks::insert(const char* function, const int line)
{
std::ostringstream* _out = LLError::Log::out();
*_out << function << " line " << line << " " ;
return _out ;
}
return _out ;
}
//static
void LLCallStacks::end(std::ostringstream* _out)
{
LLMutexTrylock lock(getMutex<STACKS_MUTEX>(), 5);
if (!lock.isLocked())
{
return;
}
//static
void LLCallStacks::end(std::ostringstream* _out)
{
LLMutexTrylock lock(&gCallStacksLogMutex, 5);
if (!lock.isLocked())
{
return;
}
if(sBuffer == NULL)
{
allocateStackBuffer();
}
if(sBuffer == NULL)
{
allocateStackBuffer();
}
if(sIndex > 511)
{
clear() ;
}
if(sIndex > 511)
{
clear() ;
}
LLError::Log::flush(_out, sBuffer[sIndex++]) ;
}
LLError::Log::flush(_out, sBuffer[sIndex++]) ;
}
//static
void LLCallStacks::print()
{
LLMutexTrylock lock(getMutex<STACKS_MUTEX>(), 5);
if (!lock.isLocked())
{
return;
}
//static
void LLCallStacks::print()
{
LLMutexTrylock lock(&gCallStacksLogMutex, 5);
if (!lock.isLocked())
{
return;
}
if(sIndex > 0)
{
LL_INFOS() << " ************* PRINT OUT LL CALL STACKS ************* " << LL_ENDL;
while(sIndex > 0)
{
sIndex-- ;
LL_INFOS() << sBuffer[sIndex] << LL_ENDL;
}
LL_INFOS() << " *************** END OF LL CALL STACKS *************** " << LL_ENDL;
}
if(sIndex > 0)
{
LL_INFOS() << " ************* PRINT OUT LL CALL STACKS ************* " << LL_ENDL;
while(sIndex > 0)
{
sIndex-- ;
LL_INFOS() << sBuffer[sIndex] << LL_ENDL;
}
LL_INFOS() << " *************** END OF LL CALL STACKS *************** " << LL_ENDL;
}
if(sBuffer != NULL)
{
freeStackBuffer();
}
}
if(sBuffer != NULL)
{
freeStackBuffer();
}
}
//static
void LLCallStacks::clear()
{
sIndex = 0 ;
}
//static
void LLCallStacks::clear()
{
sIndex = 0 ;
}
//static
void LLCallStacks::cleanup()
{
freeStackBuffer();
}
//static
void LLCallStacks::cleanup()
{
freeStackBuffer();
}
std::ostream& operator<<(std::ostream& out, const LLStacktrace&)
{
return out << boost::stacktrace::stacktrace();
}
}
bool debugLoggingEnabled(const std::string& tag)
{
LLMutexTrylock lock(&gLogMutex, 5);
LLMutexTrylock lock(getMutex<LOG_MUTEX>(), 5);
if (!lock.isLocked())
{
return false;

View File

@ -208,6 +208,15 @@ namespace LLError
static void flush(std::ostringstream* out, char* message);
static void flush(std::ostringstream*, const CallSite&);
static std::string demangle(const char* mangled);
/// classname<TYPE>()
template <typename T>
static std::string classname() { return demangle(typeid(T).name()); }
/// classname(some_pointer)
template <typename T>
static std::string classname(T* const ptr) { return ptr? demangle(typeid(*ptr).name()) : "nullptr"; }
/// classname(some_reference)
template <typename T>
static std::string classname(const T& obj) { return demangle(typeid(obj).name()); }
};
struct LL_COMMON_API CallSite
@ -224,19 +233,6 @@ namespace LLError
const char** tags,
size_t tag_count);
#ifdef LL_LINUX
// <FS:ND> Temp hack to get the old linux havok stub to link
CallSite(LLError::ELevel,
char const*,
int,
std::type_info const&,
char const*,
char const*,
char const*,
bool);
// </FS:ND>
#endif
~CallSite();
#ifdef LL_LIBRARY_INCLUDE
@ -281,30 +277,36 @@ namespace LLError
class LL_COMMON_API NoClassInfo { };
// used to indicate no class info known for logging
//LLCallStacks keeps track of call stacks and output the call stacks to log file
//when LLAppViewer::handleViewerCrash() is triggered.
//
//Note: to be simple, efficient and necessary to keep track of correct call stacks,
//LLCallStacks is designed not to be thread-safe.
//so try not to use it in multiple parallel threads at same time.
//Used in a single thread at a time is fine.
class LL_COMMON_API LLCallStacks
{
private:
static char** sBuffer ;
static S32 sIndex ;
//LLCallStacks keeps track of call stacks and output the call stacks to log file
//when LLAppViewer::handleViewerCrash() is triggered.
//
//Note: to be simple, efficient and necessary to keep track of correct call stacks,
//LLCallStacks is designed not to be thread-safe.
//so try not to use it in multiple parallel threads at same time.
//Used in a single thread at a time is fine.
class LL_COMMON_API LLCallStacks
{
private:
static char** sBuffer ;
static S32 sIndex ;
static void allocateStackBuffer();
static void freeStackBuffer();
static void allocateStackBuffer();
static void freeStackBuffer();
public:
static void push(const char* function, const int line) ;
static std::ostringstream* insert(const char* function, const int line) ;
static void print() ;
static void clear() ;
static void end(std::ostringstream* _out) ;
static void cleanup();
};
public:
static void push(const char* function, const int line) ;
static std::ostringstream* insert(const char* function, const int line) ;
static void print() ;
static void clear() ;
static void end(std::ostringstream* _out) ;
static void cleanup();
};
// class which, when streamed, inserts the current stack trace
struct LLStacktrace
{
friend std::ostream& operator<<(std::ostream& out, const LLStacktrace&);
};
}
//this is cheaper than llcallstacks if no need to output other variables to call stacks.
@ -400,8 +402,13 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG;
#define LL_WARNS(...) lllog(LLError::LEVEL_WARN, false, ##__VA_ARGS__)
#define LL_ERRS(...) lllog(LLError::LEVEL_ERROR, false, ##__VA_ARGS__)
// alternative to llassert_always that prints explanatory message
#define LL_WARNS_IF(exp, ...) if (exp) LL_WARNS(##__VA_ARGS__) << "(" #exp ")"
#define LL_ERRS_IF(exp, ...) if (exp) LL_ERRS(##__VA_ARGS__) << "(" #exp ")"
// note ## token paste operator hack used above will only work in gcc following
// a comma and is completely unnecessary in VS since the comma is automatically
// suppressed
// https://gcc.gnu.org/onlinedocs/cpp/Variadic-Macros.html
// https://docs.microsoft.com/en-us/cpp/preprocessor/variadic-macros?view=vs-2015
#define LL_WARNS_IF(exp, ...) if (exp) LL_WARNS(__VA_ARGS__) << "(" #exp ")"
#define LL_ERRS_IF(exp, ...) if (exp) LL_ERRS(__VA_ARGS__) << "(" #exp ")"
// Only print the log message once (good for warnings or infos that would otherwise
// spam the log file over and over, such as tighter loops).

View File

@ -183,6 +183,7 @@ namespace LLError
// each error message is passed to each recorder via recordMessage()
LL_COMMON_API void logToFile(const std::string& filename);
LL_COMMON_API void logToStderr();
LL_COMMON_API void logToFixedBuffer(LLLineBuffer*);
// Utilities to add recorders for logging to a file or a fixed buffer
// A second call to the same function will remove the logger added

View File

@ -31,17 +31,17 @@
// associated header
#include "lleventcoro.h"
// STL headers
#include <map>
#include <chrono>
#include <exception>
// std headers
// external library headers
#include <boost/fiber/operations.hpp>
// other Linden headers
#include "llsdserialize.h"
#include "llsdutil.h"
#include "llerror.h"
#include "llcoros.h"
#include "llmake.h"
#include "llexception.h"
#include "lleventfilter.h"
#include "stringize.h"
namespace
{
@ -62,7 +62,7 @@ namespace
std::string listenerNameForCoro()
{
// If this coroutine was launched by LLCoros::launch(), find that name.
std::string name(LLCoros::instance().getName());
std::string name(LLCoros::getName());
if (! name.empty())
{
return name;
@ -92,137 +92,173 @@ std::string listenerNameForCoro()
* In the degenerate case in which @a path is an empty array, @a dest will
* @em become @a value rather than @em containing it.
*/
void storeToLLSDPath(LLSD& dest, const LLSD& rawPath, const LLSD& value)
void storeToLLSDPath(LLSD& dest, const LLSD& path, const LLSD& value)
{
if (rawPath.isUndefined())
if (path.isUndefined())
{
// no-op case
return;
}
// Arrange to treat rawPath uniformly as an array. If it's not already an
// array, store it as the only entry in one.
LLSD path;
if (rawPath.isArray())
{
path = rawPath;
}
else
{
path.append(rawPath);
}
// Need to indicate a current destination -- but that current destination
// needs to change as we step through the path array. Where normally we'd
// use an LLSD& to capture a subscripted LLSD lvalue, this time we must
// instead use a pointer -- since it must be reassigned.
LLSD* pdest = &dest;
// Now loop through that array
for (LLSD::Integer i = 0; i < path.size(); ++i)
{
if (path[i].isString())
{
// *pdest is an LLSD map
pdest = &((*pdest)[path[i].asString()]);
}
else if (path[i].isInteger())
{
// *pdest is an LLSD array
pdest = &((*pdest)[path[i].asInteger()]);
}
else
{
// What do we do with Real or Array or Map or ...?
// As it's a coder error -- not a user error -- rub the coder's
// face in it so it gets fixed.
LL_ERRS("lleventcoro") << "storeToLLSDPath(" << dest << ", " << rawPath << ", " << value
<< "): path[" << i << "] bad type " << path[i].type() << LL_ENDL;
}
}
// Here *pdest is where we should store value.
*pdest = value;
// Drill down to where we should store 'value'.
llsd::drill(dest, path) = value;
}
/// For LLCoros::Future<LLSD>::make_callback(), the callback has a signature
/// like void callback(LLSD), which isn't a valid LLEventPump listener: such
/// listeners must return bool.
template <typename LISTENER>
class FutureListener
{
public:
// FutureListener is instantiated on the coroutine stack: the stack, in
// other words, that wants to suspend.
FutureListener(const LISTENER& listener):
mListener(listener),
// Capture the suspending coroutine's flag as a consuming or
// non-consuming listener.
mConsume(LLCoros::get_consuming())
{}
// operator()() is called on the main stack: the stack on which the
// expected event is fired.
bool operator()(const LLSD& event)
{
mListener(event);
// tell upstream LLEventPump whether listener consumed
return mConsume;
}
protected:
LISTENER mListener;
bool mConsume;
};
} // anonymous
void llcoro::suspend()
{
// By viewer convention, we post an event on the "mainloop" LLEventPump
// each iteration of the main event-handling loop. So waiting for a single
// event on "mainloop" gives us a one-frame suspend.
suspendUntilEventOn("mainloop");
LLCoros::checkStop();
LLCoros::TempStatus st("waiting one tick");
boost::this_fiber::yield();
}
void llcoro::suspendUntilTimeout(float seconds)
{
LLEventTimeout timeout;
timeout.eventAfter(seconds, LLSD());
llcoro::suspendUntilEventOn(timeout);
LLCoros::checkStop();
// We used to call boost::this_fiber::sleep_for(). But some coroutines
// (e.g. LLExperienceCache::idleCoro()) sit in a suspendUntilTimeout()
// loop, in which case a sleep_for() call risks sleeping through shutdown.
// So instead, listen for "LLApp" state-changing events -- which
// fortunately is handled for us by suspendUntilEventOnWithTimeout().
// Wait for an event on a bogus LLEventPump on which nobody ever posts
// events. Don't make it static because that would force instantiation of
// the LLEventPumps LLSingleton registry at static initialization time.
// DO allow tweaking the name for uniqueness, this definitely gets
// re-entered on multiple coroutines!
// We could use an LLUUID if it were important to actively prohibit anyone
// from ever posting on this LLEventPump.
LLEventStream bogus("xyzzy", true);
// Timeout is the NORMAL case for this call!
static LLSD timedout;
// Deliver, but ignore, timedout when (as usual) we did not receive any
// "LLApp" event. The point is that suspendUntilEventOnWithTimeout() will
// itself throw Stopping when "LLApp" starts broadcasting shutdown events.
suspendUntilEventOnWithTimeout(bogus, seconds, timedout);
}
LLSD llcoro::postAndSuspend(const LLSD& event, const LLEventPumpOrPumpName& requestPump,
const LLEventPumpOrPumpName& replyPump, const LLSD& replyPumpNamePath)
namespace
{
// declare the future
LLCoros::Future<LLSD> future;
// make a callback that will assign a value to the future, and listen on
// the specified LLEventPump with that callback
std::string listenerName(listenerNameForCoro());
LLTempBoundListener connection(
replyPump.getPump().listen(listenerName,
llmake<FutureListener>(future.make_callback())));
// returns a listener on replyPumpP, also on "mainloop" -- both should be
// stored in LLTempBoundListeners on the caller's stack frame
std::pair<LLBoundListener, LLBoundListener>
postAndSuspendSetup(const std::string& callerName,
const std::string& listenerName,
LLCoros::Promise<LLSD>& promise,
const LLSD& event,
const LLEventPumpOrPumpName& requestPumpP,
const LLEventPumpOrPumpName& replyPumpP,
const LLSD& replyPumpNamePath)
{
// Before we get any farther -- should we be stopping instead of
// suspending?
LLCoros::checkStop();
// Get the consuming attribute for THIS coroutine, the one that's about to
// suspend. Don't call get_consuming() in the lambda body: that would
// return the consuming attribute for some other coroutine, most likely
// the main routine.
bool consuming(LLCoros::get_consuming());
// listen on the specified LLEventPump with a lambda that will assign a
// value to the promise, thus fulfilling its future
llassert_always_msg(replyPumpP, ("replyPump required for " + callerName));
LLEventPump& replyPump(replyPumpP.getPump());
// The relative order of the two listen() calls below would only matter if
// "LLApp" were an LLEventMailDrop. But if we ever go there, we'd want to
// notice the pending LLApp status first.
LLBoundListener stopper(
LLEventPumps::instance().obtain("LLApp").listen(
listenerName,
[&promise, listenerName](const LLSD& status)
{
// anything except "running" should wake up the waiting
// coroutine
auto& statsd = status["status"];
if (statsd.asString() != "running")
{
LL_DEBUGS("lleventcoro") << listenerName
<< " spotted status " << statsd
<< ", throwing Stopping" << LL_ENDL;
try
{
promise.set_exception(
std::make_exception_ptr(
LLCoros::Stopping("status " + statsd.asString())));
}
catch (const boost::fibers::promise_already_satisfied&)
{
LL_WARNS("lleventcoro") << listenerName
<< " couldn't throw Stopping "
"because promise already set" << LL_ENDL;
}
}
// do not consume -- every listener must see status
return false;
}));
LLBoundListener connection(
replyPump.listen(
listenerName,
[&promise, consuming, listenerName](const LLSD& result)
{
try
{
promise.set_value(result);
// We did manage to propagate the result value to the
// (real) listener. If we're supposed to indicate that
// we've consumed it, do so.
return consuming;
}
catch(boost::fibers::promise_already_satisfied & ex)
{
LL_DEBUGS("lleventcoro") << "promise already satisfied in '"
<< listenerName << "': " << ex.what() << LL_ENDL;
// We could not propagate the result value to the
// listener.
return false;
}
}));
// skip the "post" part if requestPump is default-constructed
if (requestPump)
if (requestPumpP)
{
LLEventPump& requestPump(requestPumpP.getPump());
// If replyPumpNamePath is non-empty, store the replyPump name in the
// request event.
LLSD modevent(event);
storeToLLSDPath(modevent, replyPumpNamePath, replyPump.getPump().getName());
LL_DEBUGS("lleventcoro") << "postAndSuspend(): coroutine " << listenerName
<< " posting to " << requestPump.getPump().getName()
storeToLLSDPath(modevent, replyPumpNamePath, replyPump.getName());
LL_DEBUGS("lleventcoro") << callerName << ": coroutine " << listenerName
<< " posting to " << requestPump.getName()
<< LL_ENDL;
// *NOTE:Mani - Removed because modevent could contain user's hashed passwd.
// << ": " << modevent << LL_ENDL;
requestPump.getPump().post(modevent);
requestPump.post(modevent);
}
LL_DEBUGS("lleventcoro") << "postAndSuspend(): coroutine " << listenerName
<< " about to wait on LLEventPump " << replyPump.getPump().getName()
LL_DEBUGS("lleventcoro") << callerName << ": coroutine " << listenerName
<< " about to wait on LLEventPump " << replyPump.getName()
<< LL_ENDL;
return { connection, stopper };
}
} // anonymous
LLSD llcoro::postAndSuspend(const LLSD& event, const LLEventPumpOrPumpName& requestPump,
const LLEventPumpOrPumpName& replyPump, const LLSD& replyPumpNamePath)
{
LLCoros::Promise<LLSD> promise;
std::string listenerName(listenerNameForCoro());
// Store both connections into LLTempBoundListeners so we implicitly
// disconnect on return from this function.
auto connections =
postAndSuspendSetup("postAndSuspend()", listenerName, promise,
event, requestPump, replyPump, replyPumpNamePath);
LLTempBoundListener connection(connections.first), stopper(connections.second);
// declare the future
LLCoros::Future<LLSD> future = LLCoros::getFuture(promise);
// calling get() on the future makes us wait for it
LLCoros::TempStatus st(STRINGIZE("waiting for " << replyPump.getPump().getName()));
LLSD value(future.get());
LL_DEBUGS("lleventcoro") << "postAndSuspend(): coroutine " << listenerName
<< " resuming with " << value << LL_ENDL;
@ -230,147 +266,52 @@ LLSD llcoro::postAndSuspend(const LLSD& event, const LLEventPumpOrPumpName& requ
return value;
}
LLSD llcoro::suspendUntilEventOnWithTimeout(const LLEventPumpOrPumpName& suspendPumpOrName,
F32 timeoutin, const LLSD &timeoutResult)
LLSD llcoro::postAndSuspendWithTimeout(const LLSD& event,
const LLEventPumpOrPumpName& requestPump,
const LLEventPumpOrPumpName& replyPump,
const LLSD& replyPumpNamePath,
F32 timeout, const LLSD& timeoutResult)
{
/**
* The timeout pump is attached upstream of of the waiting pump and will
* pass the timeout event through it. We CAN NOT attach downstream since
* doing so will cause the suspendPump to fire any waiting events immediately
* and they will be lost. This becomes especially problematic with the
* LLEventTimeout(pump) constructor which will also attempt to fire those
* events using the virtual listen_impl method in the not yet fully constructed
* timeoutPump.
*/
LLEventTimeout timeoutPump;
LLEventPump &suspendPump = suspendPumpOrName.getPump();
LLCoros::Promise<LLSD> promise;
std::string listenerName(listenerNameForCoro());
LLTempBoundListener timeoutListener(timeoutPump.listen(suspendPump.getName(),
boost::bind(&LLEventPump::post, &suspendPump, _1)));
// Store both connections into LLTempBoundListeners so we implicitly
// disconnect on return from this function.
auto connections =
postAndSuspendSetup("postAndSuspendWithTimeout()", listenerName, promise,
event, requestPump, replyPump, replyPumpNamePath);
LLTempBoundListener connection(connections.first), stopper(connections.second);
timeoutPump.eventAfter(timeoutin, timeoutResult);
return llcoro::suspendUntilEventOn(suspendPump);
}
namespace
{
/**
* This helper is specifically for postAndSuspend2(). We use a single future
* object, but we want to listen on two pumps with it. Since we must still
* adapt from the callable constructed by boost::dcoroutines::make_callback()
* (void return) to provide an event listener (bool return), we've adapted
* FutureListener for the purpose. The basic idea is that we construct a
* distinct instance of FutureListener2 -- binding different instance data --
* for each of the pumps. Then, when a pump delivers an LLSD value to either
* FutureListener2, it can combine that LLSD with its discriminator to feed
* the future object.
*
* DISCRIM is a template argument so we can use llmake() rather than
* having to write our own argument-deducing helper function.
*/
template <typename LISTENER, typename DISCRIM>
class FutureListener2: public FutureListener<LISTENER>
{
typedef FutureListener<LISTENER> super;
public:
// instantiated on coroutine stack: the stack about to suspend
FutureListener2(const LISTENER& listener, DISCRIM discriminator):
super(listener),
mDiscrim(discriminator)
{}
// called on main stack: the stack on which event is fired
bool operator()(const LLSD& event)
{
// our future object is defined to accept LLEventWithID
super::mListener(LLEventWithID(event, mDiscrim));
// tell LLEventPump whether or not event was consumed
return super::mConsume;
}
private:
const DISCRIM mDiscrim;
};
} // anonymous
namespace llcoro
{
LLEventWithID postAndSuspend2(const LLSD& event,
const LLEventPumpOrPumpName& requestPump,
const LLEventPumpOrPumpName& replyPump0,
const LLEventPumpOrPumpName& replyPump1,
const LLSD& replyPump0NamePath,
const LLSD& replyPump1NamePath)
{
// declare the future
LLCoros::Future<LLEventWithID> future;
// either callback will assign a value to this future; listen on
// each specified LLEventPump with a callback
std::string name(listenerNameForCoro());
LLTempBoundListener connection0(
replyPump0.getPump().listen(
name + "a",
llmake<FutureListener2>(future.make_callback(), 0)));
LLTempBoundListener connection1(
replyPump1.getPump().listen(
name + "b",
llmake<FutureListener2>(future.make_callback(), 1)));
// skip the "post" part if requestPump is default-constructed
if (requestPump)
LLCoros::Future<LLSD> future = LLCoros::getFuture(promise);
// wait for specified timeout
boost::fibers::future_status status;
{
// If either replyPumpNamePath is non-empty, store the corresponding
// replyPump name in the request event.
LLSD modevent(event);
storeToLLSDPath(modevent, replyPump0NamePath,
replyPump0.getPump().getName());
storeToLLSDPath(modevent, replyPump1NamePath,
replyPump1.getPump().getName());
LL_DEBUGS("lleventcoro") << "postAndSuspend2(): coroutine " << name
<< " posting to " << requestPump.getPump().getName()
<< ": " << modevent << LL_ENDL;
requestPump.getPump().post(modevent);
LLCoros::TempStatus st(STRINGIZE("waiting for " << replyPump.getPump().getName()
<< " for " << timeout << "s"));
// The fact that we accept non-integer seconds means we should probably
// use granularity finer than one second. However, given the overhead of
// the rest of our processing, it seems silly to use granularity finer
// than a millisecond.
status = future.wait_for(std::chrono::milliseconds(long(timeout * 1000)));
}
LL_DEBUGS("lleventcoro") << "postAndSuspend2(): coroutine " << name
<< " about to wait on LLEventPumps " << replyPump0.getPump().getName()
<< ", " << replyPump1.getPump().getName() << LL_ENDL;
// calling get() on the future makes us wait for it
LLEventWithID value(future.get());
LL_DEBUGS("lleventcoro") << "postAndSuspend(): coroutine " << name
<< " resuming with (" << value.first << ", " << value.second << ")"
<< LL_ENDL;
// returning should disconnect both connections
return value;
}
LLSD errorException(const LLEventWithID& result, const std::string& desc)
{
// If the result arrived on the error pump (pump 1), instead of
// returning it, deliver it via exception.
if (result.second)
// if the future is NOT yet ready, return timeoutResult instead
if (status == boost::fibers::future_status::timeout)
{
LLTHROW(LLErrorEvent(desc, result.first));
LL_DEBUGS("lleventcoro") << "postAndSuspendWithTimeout(): coroutine " << listenerName
<< " timed out after " << timeout << " seconds,"
<< " resuming with " << timeoutResult << LL_ENDL;
return timeoutResult;
}
// That way, our caller knows a simple return must be from the reply
// pump (pump 0).
return result.first;
}
LLSD errorLog(const LLEventWithID& result, const std::string& desc)
{
// If the result arrived on the error pump (pump 1), log it as a fatal
// error.
if (result.second)
else
{
LL_ERRS("errorLog") << desc << ":" << std::endl;
LLSDSerialize::toPrettyXML(result.first, LL_CONT);
LL_CONT << LL_ENDL;
}
// A simple return must therefore be from the reply pump (pump 0).
return result.first;
}
llassert_always(status == boost::fibers::future_status::ready);
} // namespace llcoro
// future is now ready, no more waiting
LLSD value(future.get());
LL_DEBUGS("lleventcoro") << "postAndSuspendWithTimeout(): coroutine " << listenerName
<< " resuming with " << value << LL_ENDL;
// returning should disconnect the connection
return value;
}
}

View File

@ -29,12 +29,8 @@
#if ! defined(LL_LLEVENTCORO_H)
#define LL_LLEVENTCORO_H
#include <boost/optional.hpp>
#include <string>
#include <utility> // std::pair
#include "llevents.h"
#include "llerror.h"
#include "llexception.h"
/**
* Like LLListenerOrPumpName, this is a class intended for parameter lists:
@ -147,117 +143,29 @@ LLSD suspendUntilEventOn(const LLEventPumpOrPumpName& pump)
return postAndSuspend(LLSD(), LLEventPumpOrPumpName(), pump);
}
/// Like postAndSuspend(), but if we wait longer than @a timeout seconds,
/// stop waiting and return @a timeoutResult instead.
LLSD postAndSuspendWithTimeout(const LLSD& event,
const LLEventPumpOrPumpName& requestPump,
const LLEventPumpOrPumpName& replyPump,
const LLSD& replyPumpNamePath,
F32 timeout, const LLSD& timeoutResult);
/// Suspend the coroutine until an event is fired on the identified pump
/// or the timeout duration has elapsed. If the timeout duration
/// elapses the specified LLSD is returned.
LLSD suspendUntilEventOnWithTimeout(const LLEventPumpOrPumpName& suspendPumpOrName, F32 timeoutin, const LLSD &timeoutResult);
} // namespace llcoro
/// return type for two-pump variant of suspendUntilEventOn()
typedef std::pair<LLSD, int> LLEventWithID;
namespace llcoro
{
/**
* This function waits for a reply on either of two specified LLEventPumps.
* Otherwise, it closely resembles postAndSuspend(); please see the documentation
* for that function for detailed parameter info.
*
* While we could have implemented the single-pump variant in terms of this
* one, there's enough added complexity here to make it worthwhile to give the
* single-pump variant its own straightforward implementation. Conversely,
* though we could use preprocessor logic to generate n-pump overloads up to
* BOOST_COROUTINE_WAIT_MAX, we don't foresee a use case. This two-pump
* overload exists because certain event APIs are defined in terms of a reply
* LLEventPump and an error LLEventPump.
*
* The LLEventWithID return value provides not only the received event, but
* the index of the pump on which it arrived (0 or 1).
*
* @note
* I'd have preferred to overload the name postAndSuspend() for both signatures.
* But consider the following ambiguous call:
* @code
* postAndSuspend(LLSD(), requestPump, replyPump, "someString");
* @endcode
* "someString" could be converted to either LLSD (@a replyPumpNamePath for
* the single-pump function) or LLEventOrPumpName (@a replyPump1 for two-pump
* function).
*
* It seems less burdensome to write postAndSuspend2() than to write either
* LLSD("someString") or LLEventOrPumpName("someString").
*/
LLEventWithID postAndSuspend2(const LLSD& event,
const LLEventPumpOrPumpName& requestPump,
const LLEventPumpOrPumpName& replyPump0,
const LLEventPumpOrPumpName& replyPump1,
const LLSD& replyPump0NamePath=LLSD(),
const LLSD& replyPump1NamePath=LLSD());
/**
* Wait for the next event on either of two specified LLEventPumps.
*/
inline
LLEventWithID
suspendUntilEventOn(const LLEventPumpOrPumpName& pump0, const LLEventPumpOrPumpName& pump1)
LLSD suspendUntilEventOnWithTimeout(const LLEventPumpOrPumpName& suspendPumpOrName,
F32 timeoutin, const LLSD &timeoutResult)
{
// This is now a convenience wrapper for postAndSuspend2().
return postAndSuspend2(LLSD(), LLEventPumpOrPumpName(), pump0, pump1);
return postAndSuspendWithTimeout(LLSD(), // event
LLEventPumpOrPumpName(), // requestPump
suspendPumpOrName, // replyPump
LLSD(), // replyPumpNamePath
timeoutin,
timeoutResult);
}
/**
* Helper for the two-pump variant of suspendUntilEventOn(), e.g.:
*
* @code
* LLSD reply = errorException(suspendUntilEventOn(replyPump, errorPump),
* "error response from login.cgi");
* @endcode
*
* Examines an LLEventWithID, assuming that the second pump (pump 1) is
* listening for an error indication. If the incoming data arrived on pump 1,
* throw an LLErrorEvent exception. If the incoming data arrived on pump 0,
* just return it. Since a normal return can only be from pump 0, we no longer
* need the LLEventWithID's discriminator int; we can just return the LLSD.
*
* @note I'm not worried about introducing the (fairly generic) name
* errorException() into global namespace, because how many other overloads of
* the same name are going to accept an LLEventWithID parameter?
*/
LLSD errorException(const LLEventWithID& result, const std::string& desc);
} // namespace llcoro
/**
* Exception thrown by errorException(). We don't call this LLEventError
* because it's not an error in event processing: rather, this exception
* announces an event that bears error information (for some other API).
*/
class LL_COMMON_API LLErrorEvent: public LLException
{
public:
LLErrorEvent(const std::string& what, const LLSD& data):
LLException(what),
mData(data)
{}
virtual ~LLErrorEvent() throw() {}
LLSD getData() const { return mData; }
private:
LLSD mData;
};
namespace llcoro
{
/**
* Like errorException(), save that this trips a fatal error using LL_ERRS
* rather than throwing an exception.
*/
LL_COMMON_API LLSD errorLog(const LLEventWithID& result, const std::string& desc);
} // namespace llcoro
/**
@ -304,84 +212,4 @@ private:
LLEventStream mPump;
};
/**
* Other event APIs require the names of two different LLEventPumps: one for
* success response, the other for error response. Extend LLCoroEventPump
* for the two-pump use case.
*/
class LL_COMMON_API LLCoroEventPumps
{
public:
LLCoroEventPumps(const std::string& name="coro",
const std::string& suff0="Reply",
const std::string& suff1="Error"):
mPump0(name + suff0, true), // allow tweaking the pump instance name
mPump1(name + suff1, true)
{}
/// request pump 0's name
std::string getName0() const { return mPump0.getName(); }
/// request pump 1's name
std::string getName1() const { return mPump1.getName(); }
/// request both names
std::pair<std::string, std::string> getNames() const
{
return std::pair<std::string, std::string>(mPump0.getName(), mPump1.getName());
}
/// request pump 0
LLEventPump& getPump0() { return mPump0; }
/// request pump 1
LLEventPump& getPump1() { return mPump1; }
/// suspendUntilEventOn(either of our two LLEventPumps)
LLEventWithID suspend()
{
return llcoro::suspendUntilEventOn(mPump0, mPump1);
}
/// errorException(suspend())
LLSD suspendWithException()
{
return llcoro::errorException(suspend(), std::string("Error event on ") + getName1());
}
/// errorLog(suspend())
LLSD suspendWithLog()
{
return llcoro::errorLog(suspend(), std::string("Error event on ") + getName1());
}
LLEventWithID postAndSuspend(const LLSD& event,
const LLEventPumpOrPumpName& requestPump,
const LLSD& replyPump0NamePath=LLSD(),
const LLSD& replyPump1NamePath=LLSD())
{
return llcoro::postAndSuspend2(event, requestPump, mPump0, mPump1,
replyPump0NamePath, replyPump1NamePath);
}
LLSD postAndSuspendWithException(const LLSD& event,
const LLEventPumpOrPumpName& requestPump,
const LLSD& replyPump0NamePath=LLSD(),
const LLSD& replyPump1NamePath=LLSD())
{
return llcoro::errorException(postAndSuspend(event, requestPump,
replyPump0NamePath, replyPump1NamePath),
std::string("Error event on ") + getName1());
}
LLSD postAndSuspendWithLog(const LLSD& event,
const LLEventPumpOrPumpName& requestPump,
const LLSD& replyPump0NamePath=LLSD(),
const LLSD& replyPump1NamePath=LLSD())
{
return llcoro::errorLog(postAndSuspend(event, requestPump,
replyPump0NamePath, replyPump1NamePath),
std::string("Error event on ") + getName1());
}
private:
LLEventStream mPump0, mPump1;
};
#endif /* ! defined(LL_LLEVENTCORO_H) */

View File

@ -37,6 +37,9 @@
// other Linden headers
#include "llerror.h" // LL_ERRS
#include "llsdutil.h" // llsd_matches()
#include "stringize.h"
#include "lleventtimer.h"
#include "lldate.h"
/*****************************************************************************
* LLEventFilter
@ -182,6 +185,27 @@ bool LLEventTimeout::countdownElapsed() const
return mTimer.hasExpired();
}
LLEventTimer* LLEventTimeout::post_every(F32 period, const std::string& pump, const LLSD& data)
{
return LLEventTimer::run_every(
period,
[pump, data](){ LLEventPumps::instance().obtain(pump).post(data); });
}
LLEventTimer* LLEventTimeout::post_at(const LLDate& time, const std::string& pump, const LLSD& data)
{
return LLEventTimer::run_at(
time,
[pump, data](){ LLEventPumps::instance().obtain(pump).post(data); });
}
LLEventTimer* LLEventTimeout::post_after(F32 interval, const std::string& pump, const LLSD& data)
{
return LLEventTimer::run_after(
interval,
[pump, data](){ LLEventPumps::instance().obtain(pump).post(data); });
}
/*****************************************************************************
* LLEventBatch
*****************************************************************************/
@ -409,3 +433,61 @@ void LLEventBatchThrottle::setSize(std::size_t size)
flush();
}
}
/*****************************************************************************
* LLEventLogProxy
*****************************************************************************/
LLEventLogProxy::LLEventLogProxy(LLEventPump& source, const std::string& name, bool tweak):
// note: we are NOT using the constructor that implicitly connects!
LLEventFilter(name, tweak),
// instead we simply capture a reference to the subject LLEventPump
mPump(source)
{
}
bool LLEventLogProxy::post(const LLSD& event) /* override */
{
auto counter = mCounter++;
auto eventplus = event;
if (eventplus.type() == LLSD::TypeMap)
{
eventplus["_cnt"] = counter;
}
std::string hdr{STRINGIZE(getName() << ": post " << counter)};
LL_INFOS("LogProxy") << hdr << ": " << event << LL_ENDL;
bool result = mPump.post(eventplus);
LL_INFOS("LogProxy") << hdr << " => " << result << LL_ENDL;
return result;
}
LLBoundListener LLEventLogProxy::listen_impl(const std::string& name,
const LLEventListener& target,
const NameList& after,
const NameList& before)
{
LL_DEBUGS("LogProxy") << "LLEventLogProxy('" << getName() << "').listen('"
<< name << "')" << LL_ENDL;
return mPump.listen(name,
[this, name, target](const LLSD& event)->bool
{ return listener(name, target, event); },
after,
before);
}
bool LLEventLogProxy::listener(const std::string& name,
const LLEventListener& target,
const LLSD& event) const
{
auto eventminus = event;
std::string counter{"**"};
if (eventminus.has("_cnt"))
{
counter = stringize(eventminus["_cnt"].asInteger());
eventminus.erase("_cnt");
}
std::string hdr{STRINGIZE(getName() << " to " << name << " " << counter)};
LL_INFOS("LogProxy") << hdr << ": " << eventminus << LL_ENDL;
bool result = target(eventminus);
LL_INFOS("LogProxy") << hdr << " => " << result << LL_ENDL;
return result;
}

View File

@ -32,8 +32,12 @@
#include "llevents.h"
#include "stdtypes.h"
#include "lltimer.h"
#include "llsdutil.h"
#include <boost/function.hpp>
class LLEventTimer;
class LLDate;
/**
* Generic base class
*/
@ -210,6 +214,19 @@ public:
LLEventTimeout();
LLEventTimeout(LLEventPump& source);
/// using LLEventTimeout as namespace for free functions
/// Post event to specified LLEventPump every period seconds. Delete
/// returned LLEventTimer* to cancel.
static LLEventTimer* post_every(F32 period, const std::string& pump, const LLSD& data);
/// Post event to specified LLEventPump at specified future time. Call
/// LLEventTimer::getInstance(returned pointer) to check whether it's still
/// pending; if so, delete the pointer to cancel.
static LLEventTimer* post_at(const LLDate& time, const std::string& pump, const LLSD& data);
/// Post event to specified LLEventPump after specified interval. Call
/// LLEventTimer::getInstance(returned pointer) to check whether it's still
/// pending; if so, delete the pointer to cancel.
static LLEventTimer* post_after(F32 interval, const std::string& pump, const LLSD& data);
protected:
virtual void setCountdown(F32 seconds);
virtual bool countdownElapsed() const;
@ -376,4 +393,149 @@ private:
std::size_t mBatchSize;
};
/**
* LLStoreListener self-registers on the LLEventPump of interest, and
* unregisters on destruction. As long as it exists, a particular element is
* extracted from every event that comes through the upstream LLEventPump and
* stored into the target variable.
*
* This is implemented as a subclass of LLEventFilter, though strictly
* speaking it isn't really a "filter" at all: it never passes incoming events
* to its own listeners, if any.
*
* TBD: A variant based on output iterators that stores and then increments
* the iterator. Useful with boost::coroutine2!
*/
template <typename T>
class LLStoreListener: public LLEventFilter
{
public:
// pass target and optional path to element
LLStoreListener(T& target, const LLSD& path=LLSD(), bool consume=false):
LLEventFilter("store"),
mTarget(target),
mPath(path),
mConsume(consume)
{}
// construct and connect
LLStoreListener(LLEventPump& source, T& target, const LLSD& path=LLSD(), bool consume=false):
LLEventFilter(source, "store"),
mTarget(target),
mPath(path),
mConsume(consume)
{}
// Calling post() with an LLSD event extracts the element indicated by
// path, then stores it to mTarget.
virtual bool post(const LLSD& event)
{
// Extract the element specified by 'mPath' from 'event'. To perform a
// generic type-appropriate store through mTarget, construct an
// LLSDParam<T> and store that, thus engaging LLSDParam's custom
// conversions.
mTarget = LLSDParam<T>(llsd::drill(event, mPath));
return mConsume;
}
private:
T& mTarget;
const LLSD mPath;
const bool mConsume;
};
/*****************************************************************************
* LLEventLogProxy
*****************************************************************************/
/**
* LLEventLogProxy is a little different than the other LLEventFilter
* subclasses declared in this header file, in that it completely wraps the
* passed LLEventPump (both input and output) instead of simply processing its
* output. Of course, if someone directly posts to the wrapped LLEventPump by
* looking up its string name in LLEventPumps, LLEventLogProxy can't intercept
* that post() call. But as long as consuming code is willing to access the
* LLEventLogProxy instance instead of the wrapped LLEventPump, all event data
* both post()ed and received is logged.
*
* The proxy role means that LLEventLogProxy intercepts more of LLEventPump's
* API than a typical LLEventFilter subclass.
*/
class LLEventLogProxy: public LLEventFilter
{
typedef LLEventFilter super;
public:
/**
* Construct LLEventLogProxy, wrapping the specified LLEventPump.
* Unlike a typical LLEventFilter subclass, the name parameter is @emph
* not optional because typically you want LLEventLogProxy to completely
* replace the wrapped LLEventPump. So you give the subject LLEventPump
* some other name and give the LLEventLogProxy the name that would have
* been used for the subject LLEventPump.
*/
LLEventLogProxy(LLEventPump& source, const std::string& name, bool tweak=false);
/// register a new listener
LLBoundListener listen_impl(const std::string& name, const LLEventListener& target,
const NameList& after, const NameList& before);
/// Post an event to all listeners
virtual bool post(const LLSD& event) /* override */;
private:
/// This method intercepts each call to any target listener. We pass it
/// the listener name and the caller's intended target listener plus the
/// posted LLSD event.
bool listener(const std::string& name,
const LLEventListener& target,
const LLSD& event) const;
LLEventPump& mPump;
LLSD::Integer mCounter{0};
};
/**
* LLEventPumpHolder<T> is a helper for LLEventLogProxyFor<T>. It simply
* stores an instance of T, presumably a subclass of LLEventPump. We derive
* LLEventLogProxyFor<T> from LLEventPumpHolder<T>, ensuring that
* LLEventPumpHolder's contained mWrappedPump is fully constructed before
* passing it to LLEventLogProxyFor's LLEventLogProxy base class constructor.
* But since LLEventPumpHolder<T> presents none of the LLEventPump API,
* LLEventLogProxyFor<T> inherits its methods unambiguously from
* LLEventLogProxy.
*/
template <class T>
class LLEventPumpHolder
{
protected:
LLEventPumpHolder(const std::string& name, bool tweak=false):
mWrappedPump(name, tweak)
{}
T mWrappedPump;
};
/**
* LLEventLogProxyFor<T> is a wrapper around any of the LLEventPump subclasses.
* Instantiating an LLEventLogProxy<T> instantiates an internal T. Otherwise
* it behaves like LLEventLogProxy.
*/
template <class T>
class LLEventLogProxyFor: private LLEventPumpHolder<T>, public LLEventLogProxy
{
// We derive privately from LLEventPumpHolder because it's an
// implementation detail of LLEventLogProxyFor. The only reason it's a
// base class at all is to guarantee that it's constructed first so we can
// pass it to our LLEventLogProxy base class constructor.
typedef LLEventPumpHolder<T> holder;
typedef LLEventLogProxy super;
public:
LLEventLogProxyFor(const std::string& name, bool tweak=false):
// our wrapped LLEventPump subclass instance gets a name suffix
// because that's not the LLEventPump we want consumers to obtain when
// they ask LLEventPumps for this name
holder(name + "-", tweak),
// it's our LLEventLogProxy that gets the passed name
super(holder::mWrappedPump, name, tweak)
{}
};
#endif /* ! defined(LL_LLEVENTFILTER_H) */

View File

@ -45,7 +45,7 @@
#include <cctype>
// external library headers
#include <boost/range/iterator_range.hpp>
#include <boost/dcoroutine/exception.hpp> // for abnormal_exit
#include <boost/make_shared.hpp>
#if LL_WINDOWS
#pragma warning (push)
#pragma warning (disable : 4701) // compiler thinks might use uninitialized var, but no
@ -63,52 +63,24 @@
#pragma warning (disable : 4702)
#endif
/*****************************************************************************
* queue_names: specify LLEventPump names that should be instantiated as
* LLEventQueue
*****************************************************************************/
/**
* At present, we recognize particular requested LLEventPump names as needing
* LLEventQueues. Later on we'll migrate this information to an external
* configuration file.
*/
const char* queue_names[] =
{
"placeholder - replace with first real name string"
};
/*****************************************************************************
* If there's a "mainloop" pump, listen on that to flush all LLEventQueues
*****************************************************************************/
struct RegisterFlush : public LLEventTrackable
{
RegisterFlush():
pumps(LLEventPumps::instance())
{
pumps.obtain("mainloop").listen("flushLLEventQueues", boost::bind(&RegisterFlush::flush, this, _1));
}
bool flush(const LLSD&)
{
pumps.flush();
return false;
}
~RegisterFlush()
{
// LLEventTrackable handles stopListening for us.
}
LLEventPumps& pumps;
};
static RegisterFlush registerFlush;
/*****************************************************************************
* LLEventPumps
*****************************************************************************/
LLEventPumps::LLEventPumps():
// Until we migrate this information to an external config file,
// initialize mQueueNames from the static queue_names array.
mQueueNames(boost::begin(queue_names), boost::end(queue_names))
{
}
mFactories
{
{ "LLEventStream", [](const std::string& name, bool tweak)
{ return new LLEventStream(name, tweak); } },
{ "LLEventMailDrop", [](const std::string& name, bool tweak)
{ return new LLEventMailDrop(name, tweak); } }
},
mTypes
{
// LLEventStream is the default for obtain(), so even if somebody DOES
// call obtain("placeholder"), this sample entry won't break anything.
{ "placeholder", "LLEventStream" }
}
{}
LLEventPump& LLEventPumps::obtain(const std::string& name)
{
@ -119,14 +91,31 @@ LLEventPump& LLEventPumps::obtain(const std::string& name)
// name.
return *found->second;
}
// Here we must instantiate an LLEventPump subclass.
LLEventPump* newInstance;
// Should this name be an LLEventQueue?
PumpNames::const_iterator nfound = mQueueNames.find(name);
if (nfound != mQueueNames.end())
newInstance = new LLEventQueue(name);
else
newInstance = new LLEventStream(name);
// Here we must instantiate an LLEventPump subclass. Is there a
// preregistered class name override for this specific instance name?
auto nfound = mTypes.find(name);
std::string type;
if (nfound != mTypes.end())
{
type = nfound->second;
}
// pass tweak=false: we already know there's no existing instance with
// this name
return make(name, false, type);
}
LLEventPump& LLEventPumps::make(const std::string& name, bool tweak,
const std::string& type)
{
// find the relevant factory for this (or default) type
auto found = mFactories.find(type.empty()? "LLEventStream" : type);
if (found == mFactories.end())
{
// Passing an unrecognized type name is a no-no
LLTHROW(BadType(type));
}
auto newInstance = (found->second)(name, tweak);
// LLEventPump's constructor implicitly registers each new instance in
// mPumpMap. But remember that we instantiated it (in mOurPumps) so we'll
// delete it later.
@ -144,14 +133,23 @@ bool LLEventPumps::post(const std::string&name, const LLSD&message)
return (*found).second->post(message);
}
void LLEventPumps::flush()
{
// Flush every known LLEventPump instance. Leave it up to each instance to
// decide what to do with the flush() call.
for (PumpMap::iterator pmi = mPumpMap.begin(), pmend = mPumpMap.end(); pmi != pmend; ++pmi)
for (PumpMap::value_type& pair : mPumpMap)
{
pmi->second->flush();
pair.second->flush();
}
}
void LLEventPumps::clear()
{
// Clear every known LLEventPump instance. Leave it up to each instance to
// decide what to do with the clear() call.
for (PumpMap::value_type& pair : mPumpMap)
{
pair.second->clear();
}
}
@ -159,9 +157,9 @@ void LLEventPumps::reset()
{
// Reset every known LLEventPump instance. Leave it up to each instance to
// decide what to do with the reset() call.
for (PumpMap::iterator pmi = mPumpMap.begin(), pmend = mPumpMap.end(); pmi != pmend; ++pmi)
for (PumpMap::value_type& pair : mPumpMap)
{
pmi->second->reset();
pair.second->reset();
}
}
@ -268,6 +266,9 @@ LLEventPumps::~LLEventPumps()
{
delete *mOurPumps.begin();
}
// Reset every remaining registered LLEventPump subclass instance: those
// we DIDN'T instantiate using either make() or obtain().
reset();
}
/*****************************************************************************
@ -284,7 +285,7 @@ LLEventPump::LLEventPump(const std::string& name, bool tweak):
// Register every new instance with LLEventPumps
mRegistry(LLEventPumps::instance().getHandle()),
mName(mRegistry.get()->registerNew(*this, name, tweak)),
mSignal(new LLStandardSignal()),
mSignal(boost::make_shared<LLStandardSignal>()),
mEnabled(true)
{}
@ -312,6 +313,14 @@ std::string LLEventPump::inventName(const std::string& pfx)
return STRINGIZE(pfx << suffix++);
}
void LLEventPump::clear()
{
// Destroy the original LLStandardSignal instance, replacing it with a
// whole new one.
mSignal = boost::make_shared<LLStandardSignal>();
mConnections.clear();
}
void LLEventPump::reset()
{
mSignal.reset();
@ -350,8 +359,8 @@ LLBoundListener LLEventPump::listen_impl(const std::string& name, const LLEventL
// is only when the existing connection object is still connected.
if (found != mConnections.end() && found->second.connected())
{
LLTHROW(DupListenerName("Attempt to register duplicate listener name '" + name +
"' on " + typeid(*this).name() + " '" + getName() + "'"));
LLTHROW(DupListenerName("Attempt to register duplicate listener name '" + name +
"' on " + typeid(*this).name() + " '" + getName() + "'"));
}
// Okay, name is unique, try to reconcile its dependencies. Specify a new
// "node" value that we never use for an mSignal placement; we'll fix it
@ -377,8 +386,8 @@ LLBoundListener LLEventPump::listen_impl(const std::string& name, const LLEventL
// unsortable. If we leave the new node in mDeps, it will continue
// to screw up all future attempts to sort()! Pull it out.
mDeps.remove(name);
LLTHROW(Cycle("New listener '" + name + "' on " + typeid(*this).name() +
" '" + getName() + "' would cause cycle: " + e.what()));
LLTHROW(Cycle("New listener '" + name + "' on " + typeid(*this).name() +
" '" + getName() + "' would cause cycle: " + e.what()));
}
// Walk the list to verify that we haven't changed the order.
float previous = 0.0, myprev = 0.0;
@ -442,7 +451,7 @@ LLBoundListener LLEventPump::listen_impl(const std::string& name, const LLEventL
// NOW remove the offending listener node.
mDeps.remove(name);
// Having constructed a description of the order change, inform caller.
LLTHROW(OrderChange(out.str()));
LLTHROW(OrderChange(out.str()));
}
// This node becomes the previous one.
previous = dmi->second;
@ -538,15 +547,7 @@ bool LLEventStream::post(const LLSD& event)
// Let caller know if any one listener handled the event. This is mostly
// useful when using LLEventStream as a listener for an upstream
// LLEventPump.
// <FS:ND> FIRE-19481; do not let any abnormal_exit propagate
// return (*signal)(event);
try {
return (*signal)(event);
}catch( boost::dcoroutines::abnormal_exit& )
{ return false; }
// </FS:ND>
}
/*****************************************************************************
@ -592,46 +593,9 @@ LLBoundListener LLEventMailDrop::listen_impl(const std::string& name,
return LLEventStream::listen_impl(name, listener, after, before);
}
/*****************************************************************************
* LLEventQueue
*****************************************************************************/
bool LLEventQueue::post(const LLSD& event)
void LLEventMailDrop::discard()
{
if (mEnabled)
{
// Defer sending this event by queueing it until flush()
mEventQueue.push_back(event);
}
// Unconditionally return false. We won't know until flush() whether a
// listener claims to have handled the event -- meanwhile, don't block
// other listeners.
return false;
}
void LLEventQueue::flush()
{
if(!mSignal) return;
// Consider the case when a given listener on this LLEventQueue posts yet
// another event on the same queue. If we loop over mEventQueue directly,
// we'll end up processing all those events during the same flush() call
// -- rather like an EventStream. Instead, copy mEventQueue and clear it,
// so that any new events posted to this LLEventQueue during flush() will
// be processed in the *next* flush() call.
EventQueue queue(mEventQueue);
mEventQueue.clear();
// NOTE NOTE NOTE: Any new access to member data beyond this point should
// cause us to move our LLStandardSignal object to a pimpl class along
// with said member data. Then the local shared_ptr will preserve both.
// DEV-43463: capture a local copy of mSignal. See LLEventStream::post()
// for detailed comments.
boost::shared_ptr<LLStandardSignal> signal(mSignal);
for ( ; ! queue.empty(); queue.pop_front())
{
(*signal)(queue.front());
}
mEventHistory.clear();
}
/*****************************************************************************

View File

@ -37,6 +37,7 @@
#include <set>
#include <vector>
#include <deque>
#include <functional>
#if LL_WINDOWS
#pragma warning (push)
#pragma warning (disable : 4263) // boost::signals2::expired_slot::what() has const mismatch
@ -55,7 +56,6 @@
#include <boost/visit_each.hpp>
#include <boost/ref.hpp> // reference_wrapper
#include <boost/type_traits/is_pointer.hpp>
#include <boost/function.hpp>
#include <boost/static_assert.hpp>
#include "llsd.h"
#include "llsingleton.h"
@ -211,8 +211,7 @@ public:
/// exception if you try to call when empty
struct Empty: public LLException
{
Empty(const std::string& what):
LLException(std::string("LLListenerOrPumpName::Empty: ") + what) {}
Empty(const std::string& what): LLException("LLListenerOrPumpName::Empty: " + what) {}
};
private:
@ -247,6 +246,30 @@ public:
*/
LLEventPump& obtain(const std::string& name);
/// exception potentially thrown by make()
struct BadType: public LLException
{
BadType(const std::string& what): LLException("BadType: " + what) {}
};
/**
* Create an LLEventPump with suggested name (optionally of specified
* LLEventPump subclass type). As with obtain(), LLEventPumps owns the new
* instance.
*
* As with LLEventPump's constructor, make() could throw
* LLEventPump::DupPumpName unless you pass tweak=true.
*
* As with a hand-constructed LLEventPump subclass, if you pass
* tweak=true, the tweaked name can be obtained by LLEventPump::getName().
*
* Pass empty type to get the default LLEventStream.
*
* If you pass an unrecognized type string, make() throws BadType.
*/
LLEventPump& make(const std::string& name, bool tweak=false,
const std::string& type=std::string());
/**
* Find the named LLEventPump instance. If it exists post the message to it.
* If the pump does not exist, do nothing.
@ -263,6 +286,11 @@ public:
*/
void flush();
/**
* Disconnect listeners from all known LLEventPump instances
*/
void clear();
/**
* Reset all known LLEventPump instances
* workaround for DEV-35406 crash on shutdown
@ -298,43 +326,21 @@ testable:
// destroyed.
typedef std::set<LLEventPump*> PumpSet;
PumpSet mOurPumps;
// LLEventPump names that should be instantiated as LLEventQueue rather
// than as LLEventStream
typedef std::set<std::string> PumpNames;
PumpNames mQueueNames;
// for make(), map string type name to LLEventPump subclass factory function
typedef std::map<std::string, std::function<LLEventPump*(const std::string&, bool)>> PumpFactories;
// Data used by make().
// One might think mFactories and mTypes could reasonably be static. So
// they could -- if not for the fact that make() or obtain() might be
// called before this module's static variables have been initialized.
// This is why we use singletons in the first place.
PumpFactories mFactories;
// for obtain(), map desired string instance name to string type when
// obtain() must create the instance
typedef std::map<std::string, std::string> InstanceTypes;
InstanceTypes mTypes;
};
/*****************************************************************************
* details
*****************************************************************************/
namespace LLEventDetail
{
/// Any callable capable of connecting an LLEventListener to an
/// LLStandardSignal to produce an LLBoundListener can be mapped to this
/// signature.
typedef boost::function<LLBoundListener(const LLEventListener&)> ConnectFunc;
/// overload of visit_and_connect() when we have a string identifier available
template <typename LISTENER>
LLBoundListener visit_and_connect(const std::string& name,
const LISTENER& listener,
const ConnectFunc& connect_func);
/**
* Utility template function to use Visitor appropriately
*
* @param listener Callable to connect, typically a boost::bind()
* expression. This will be visited by Visitor using boost::visit_each().
* @param connect_func Callable that will connect() @a listener to an
* LLStandardSignal, returning LLBoundListener.
*/
template <typename LISTENER>
LLBoundListener visit_and_connect(const LISTENER& listener,
const ConnectFunc& connect_func)
{
return visit_and_connect("", listener, connect_func);
}
} // namespace LLEventDetail
/*****************************************************************************
* LLEventTrackable
*****************************************************************************/
@ -369,11 +375,6 @@ namespace LLEventDetail
* instance, it attempts to dereference the <tt>Foo*</tt> pointer that was
* <tt>delete</tt>d but not zeroed.)
* - Undefined behavior results.
* If you suspect you may encounter any such scenario, you're better off
* managing the lifespan of your object with <tt>boost::shared_ptr</tt>.
* Passing <tt>LLEventPump::listen()</tt> a <tt>boost::bind()</tt> expression
* involving a <tt>boost::weak_ptr<Foo></tt> is recognized specially, engaging
* thread-safe Boost.Signals2 machinery.
*/
typedef boost::signals2::trackable LLEventTrackable;
@ -382,7 +383,7 @@ typedef boost::signals2::trackable LLEventTrackable;
*****************************************************************************/
/**
* LLEventPump is the base class interface through which we access the
* concrete subclasses LLEventStream and LLEventQueue.
* concrete subclasses such as LLEventStream.
*
* @NOTE
* LLEventPump derives from LLEventTrackable so that when you "chain"
@ -403,8 +404,7 @@ public:
*/
struct DupPumpName: public LLException
{
DupPumpName(const std::string& what):
LLException(std::string("DupPumpName: ") + what) {}
DupPumpName(const std::string& what): LLException("DupPumpName: " + what) {}
};
/**
@ -440,9 +440,7 @@ public:
*/
struct DupListenerName: public ListenError
{
DupListenerName(const std::string& what):
ListenError(std::string("DupListenerName: ") + what)
{}
DupListenerName(const std::string& what): ListenError("DupListenerName: " + what) {}
};
/**
* exception thrown by listen(). The order dependencies specified for your
@ -454,7 +452,7 @@ public:
*/
struct Cycle: public ListenError
{
Cycle(const std::string& what): ListenError(std::string("Cycle: ") + what) {}
Cycle(const std::string& what): ListenError("Cycle: " + what) {}
};
/**
* exception thrown by listen(). This one means that your new listener
@ -475,7 +473,7 @@ public:
*/
struct OrderChange: public ListenError
{
OrderChange(const std::string& what): ListenError(std::string("OrderChange: ") + what) {}
OrderChange(const std::string& what): ListenError("OrderChange: " + what) {}
};
/// used by listen()
@ -512,44 +510,13 @@ public:
* the result be assigned to a LLTempBoundListener or the listener is
* manually disconnected when no longer needed since there will be no
* way to later find and disconnect this listener manually.
*
* If (as is typical) you pass a <tt>boost::bind()</tt> expression as @a
* listener, listen() will inspect the components of that expression. If a
* bound object matches any of several cases, the connection will
* automatically be disconnected when that object is destroyed.
*
* * You bind a <tt>boost::weak_ptr</tt>.
* * Binding a <tt>boost::shared_ptr</tt> that way would ensure that the
* referenced object would @em never be destroyed, since the @c
* shared_ptr stored in the LLEventPump would remain an outstanding
* reference. Use the weaken() function to convert your @c shared_ptr to
* @c weak_ptr. Because this is easy to forget, binding a @c shared_ptr
* will produce a compile error (@c BOOST_STATIC_ASSERT failure).
* * You bind a simple pointer or reference to an object derived from
* <tt>boost::enable_shared_from_this</tt>. (UNDER CONSTRUCTION)
* * You bind a simple pointer or reference to an object derived from
* LLEventTrackable. Unlike the cases described above, though, this is
* vulnerable to a couple of cross-thread race conditions, as described
* in the LLEventTrackable documentation.
*/
template <typename LISTENER>
LLBoundListener listen(const std::string& name, const LISTENER& listener,
LLBoundListener listen(const std::string& name,
const LLEventListener& listener,
const NameList& after=NameList(),
const NameList& before=NameList())
{
// Examine listener, using our listen_impl() method to make the
// actual connection.
// This is why listen() is a template. Conversion from boost::bind()
// to LLEventListener performs type erasure, so it's important to look
// at the boost::bind object itself before that happens.
return LLEventDetail::visit_and_connect(name,
listener,
boost::bind(&LLEventPump::listen_invoke,
this,
name,
_1,
after,
before));
return listen_impl(name, listener, after, before);
}
/// Get the LLBoundListener associated with the passed name (dummy
@ -587,19 +554,12 @@ public:
private:
friend class LLEventPumps;
virtual void clear();
virtual void reset();
private:
LLBoundListener listen_invoke(const std::string& name, const LLEventListener& listener,
const NameList& after,
const NameList& before)
{
return this->listen_impl(name, listener, after, before);
}
// must precede mName; see LLEventPump::LLEventPump()
LLHandle<LLEventPumps> mRegistry;
@ -663,11 +623,10 @@ public:
* event *must* eventually reach a listener that will consume it, else the
* queue will grow to arbitrary length.
*
* @NOTE: When using an LLEventMailDrop (or LLEventQueue) with a LLEventTimeout or
* @NOTE: When using an LLEventMailDrop with an LLEventTimeout or
* LLEventFilter attaching the filter downstream, using Timeout's constructor will
* cause the MailDrop to discharge any of its stored events. The timeout should
* instead be connected upstream using its listen() method.
* See llcoro::suspendUntilEventOnWithTimeout() for an example.
*/
class LL_COMMON_API LLEventMailDrop : public LLEventStream
{
@ -679,7 +638,8 @@ public:
virtual bool post(const LLSD& event) override;
/// Remove any history stored in the mail drop.
virtual void flush() override { mEventHistory.clear(); LLEventStream::flush(); };
void discard();
protected:
virtual LLBoundListener listen_impl(const std::string& name, const LLEventListener&,
const NameList& after,
@ -690,30 +650,6 @@ private:
EventList mEventHistory;
};
/*****************************************************************************
* LLEventQueue
*****************************************************************************/
/**
* LLEventQueue is a LLEventPump whose post() method defers calling registered
* listeners until flush() is called.
*/
class LL_COMMON_API LLEventQueue: public LLEventPump
{
public:
LLEventQueue(const std::string& name, bool tweak=false): LLEventPump(name, tweak) {}
virtual ~LLEventQueue() {}
/// Post an event to all listeners
virtual bool post(const LLSD& event);
/// flush queued events
virtual void flush();
private:
typedef std::deque<LLSD> EventQueue;
EventQueue mEventQueue;
};
/*****************************************************************************
* LLReqID
*****************************************************************************/
@ -809,329 +745,6 @@ private:
LL_COMMON_API bool sendReply(const LLSD& reply, const LLSD& request,
const std::string& replyKey="reply");
/**
* Base class for LLListenerWrapper. See visit_and_connect() and llwrap(). We
* provide virtual @c accept_xxx() methods, customization points allowing a
* subclass access to certain data visible at LLEventPump::listen() time.
* Example subclass usage:
*
* @code
* myEventPump.listen("somename",
* llwrap<MyListenerWrapper>(boost::bind(&MyClass::method, instance, _1)));
* @endcode
*
* Because of the anticipated usage (note the anonymous temporary
* MyListenerWrapper instance in the example above), the @c accept_xxx()
* methods must be @c const.
*/
class LL_COMMON_API LLListenerWrapperBase
{
public:
/// New instance. The accept_xxx() machinery makes it important to use
/// shared_ptrs for our data. Many copies of this object are made before
/// the instance that actually ends up in the signal, yet accept_xxx()
/// will later be called on the @em original instance. All copies of the
/// same original instance must share the same data.
LLListenerWrapperBase():
mName(new std::string),
mConnection(new LLBoundListener)
{
}
/// Copy constructor. Copy shared_ptrs to original instance data.
LLListenerWrapperBase(const LLListenerWrapperBase& that):
mName(that.mName),
mConnection(that.mConnection)
{
}
virtual ~LLListenerWrapperBase() {}
/// Ask LLEventPump::listen() for the listener name
virtual void accept_name(const std::string& name) const
{
*mName = name;
}
/// Ask LLEventPump::listen() for the new connection
virtual void accept_connection(const LLBoundListener& connection) const
{
*mConnection = connection;
}
protected:
/// Listener name.
boost::shared_ptr<std::string> mName;
/// Connection.
boost::shared_ptr<LLBoundListener> mConnection;
};
/*****************************************************************************
* Underpinnings
*****************************************************************************/
/**
* We originally provided a suite of overloaded
* LLEventTrackable::listenTo(LLEventPump&, ...) methods that would call
* LLEventPump::listen(...) and then pass the returned LLBoundListener to
* LLEventTrackable::track(). This was workable but error-prone: the coder
* must remember to call listenTo() rather than the more straightforward
* listen() method.
*
* Now we publish only the single canonical listen() method, so there's a
* uniform mechanism. Having a single way to do this is good, in that there's
* no question in the coder's mind which of several alternatives to choose.
*
* To support automatic connection management, we use boost::visit_each
* (http://www.boost.org/doc/libs/1_37_0/doc/html/boost/visit_each.html) to
* inspect each argument of a boost::bind expression. (Although the visit_each
* mechanism was first introduced with the original Boost.Signals library, it
* was only later documented.)
*
* Cases:
* * At least one of the function's arguments is a boost::weak_ptr<T>. Pass
* the corresponding shared_ptr to slot_type::track(). Ideally that would be
* the object whose method we want to call, but in fact we do the same for
* any weak_ptr we might find among the bound arguments. If we're passing
* our bound method a weak_ptr to some object, wouldn't the destruction of
* that object invalidate the call? So we disconnect automatically when any
* such object is destroyed. This is the mechanism preferred by boost::
* signals2.
* * One of the functions's arguments is a boost::shared_ptr<T>. This produces
* a compile error: the bound copy of the shared_ptr stored in the
* boost_bind object stored in the signal object would make the referenced
* T object immortal. We provide a weaken() function. Pass
* weaken(your_shared_ptr) instead. (We can inspect, but not modify, the
* boost::bind object. Otherwise we'd replace the shared_ptr with weak_ptr
* implicitly and just proceed.)
* * One of the function's arguments is a plain pointer/reference to an object
* derived from boost::enable_shared_from_this. We assume that this object
* is managed using boost::shared_ptr, so we implicitly extract a shared_ptr
* and track that. (UNDER CONSTRUCTION)
* * One of the function's arguments is derived from LLEventTrackable. Pass
* the LLBoundListener to its LLEventTrackable::track(). This is vulnerable
* to a couple different race conditions, as described in LLEventTrackable
* documentation. (NOTE: Now that LLEventTrackable is a typedef for
* boost::signals2::trackable, the Signals2 library handles this itself, so
* our visitor needs no special logic for this case.)
* * Any other argument type is irrelevant to automatic connection management.
*/
namespace LLEventDetail
{
template <typename F>
const F& unwrap(const F& f) { return f; }
template <typename F>
const F& unwrap(const boost::reference_wrapper<F>& f) { return f.get(); }
// Most of the following is lifted from the Boost.Signals use of
// visit_each.
template<bool Cond> struct truth {};
/**
* boost::visit_each() Visitor, used on a template argument <tt>const F&
* f</tt> as follows (see visit_and_connect()):
* @code
* LLEventListener listener(f);
* Visitor visitor(listener); // bind listener so it can track() shared_ptrs
* using boost::visit_each; // allow unqualified visit_each() call for ADL
* visit_each(visitor, unwrap(f));
* @endcode
*/
class Visitor
{
public:
/**
* Visitor binds a reference to LLEventListener so we can track() any
* shared_ptrs we find in the argument list.
*/
Visitor(LLEventListener& listener):
mListener(listener)
{
}
/**
* boost::visit_each() calls this method for each component of a
* boost::bind() expression.
*/
template <typename T>
void operator()(const T& t) const
{
decode(t, 0);
}
private:
// decode() decides between a reference wrapper and anything else
// boost::ref() variant
template<typename T>
void decode(const boost::reference_wrapper<T>& t, int) const
{
// add_if_trackable(t.get_pointer());
}
// decode() anything else
template<typename T>
void decode(const T& t, long) const
{
typedef truth<(boost::is_pointer<T>::value)> is_a_pointer;
maybe_get_pointer(t, is_a_pointer());
}
// maybe_get_pointer() decides between a pointer and a non-pointer
// plain pointer variant
template<typename T>
void maybe_get_pointer(const T& t, truth<true>) const
{
// add_if_trackable(t);
}
// shared_ptr variant
template<typename T>
void maybe_get_pointer(const boost::shared_ptr<T>& t, truth<false>) const
{
// If we have a shared_ptr to this object, it doesn't matter
// whether the object is derived from LLEventTrackable, so no
// further analysis of T is needed.
// mListener.track(t);
// Make this case illegal. Passing a bound shared_ptr to
// slot_type::track() is useless, since the bound shared_ptr will
// keep the object alive anyway! Force the coder to cast to weak_ptr.
// Trivial as it is, make the BOOST_STATIC_ASSERT() condition
// dependent on template param so the macro is only evaluated if
// this method is in fact instantiated, as described here:
// http://www.boost.org/doc/libs/1_34_1/doc/html/boost_staticassert.html
// ATTENTION: Don't bind a shared_ptr<anything> using
// LLEventPump::listen(boost::bind()). Doing so captures a copy of
// the shared_ptr, making the referenced object effectively
// immortal. Use the weaken() function, e.g.:
// somepump.listen(boost::bind(...weaken(my_shared_ptr)...));
// This lets us automatically disconnect when the referenced
// object is destroyed.
BOOST_STATIC_ASSERT(sizeof(T) == 0);
}
// weak_ptr variant
template<typename T>
void maybe_get_pointer(const boost::weak_ptr<T>& t, truth<false>) const
{
// If we have a weak_ptr to this object, it doesn't matter
// whether the object is derived from LLEventTrackable, so no
// further analysis of T is needed.
mListener.track(t);
// std::cout << "Found weak_ptr<" << typeid(T).name() << ">!\n";
}
#if 0
// reference to anything derived from boost::enable_shared_from_this
template <typename T>
inline void maybe_get_pointer(const boost::enable_shared_from_this<T>& ct,
truth<false>) const
{
// Use the slot_type::track(shared_ptr) mechanism. Cast away
// const-ness because (in our code base anyway) it's unusual
// to find shared_ptr<const T>.
boost::enable_shared_from_this<T>&
t(const_cast<boost::enable_shared_from_this<T>&>(ct));
std::cout << "Capturing shared_from_this()" << std::endl;
boost::shared_ptr<T> sp(t.shared_from_this());
/*==========================================================================*|
std::cout << "Capturing weak_ptr" << std::endl;
boost::weak_ptr<T> wp(sp);
|*==========================================================================*/
std::cout << "Tracking shared__ptr" << std::endl;
mListener.track(sp);
}
#endif
// non-pointer variant
template<typename T>
void maybe_get_pointer(const T& t, truth<false>) const
{
// Take the address of this object, because the object itself may be
// trackable
// add_if_trackable(boost::addressof(t));
}
/*==========================================================================*|
// add_if_trackable() adds LLEventTrackable objects to mTrackables
inline void add_if_trackable(const LLEventTrackable* t) const
{
if (t)
{
}
}
// pointer to anything not an LLEventTrackable subclass
inline void add_if_trackable(const void*) const
{
}
// pointer to free function
// The following construct uses the preprocessor to generate
// add_if_trackable() overloads accepting pointer-to-function taking
// 0, 1, ..., LLEVENTS_LISTENER_ARITY parameters of arbitrary type.
#define BOOST_PP_LOCAL_MACRO(n) \
template <typename R \
BOOST_PP_COMMA_IF(n) \
BOOST_PP_ENUM_PARAMS(n, typename T)> \
inline void \
add_if_trackable(R (*)(BOOST_PP_ENUM_PARAMS(n, T))) const \
{ \
}
#define BOOST_PP_LOCAL_LIMITS (0, LLEVENTS_LISTENER_ARITY)
#include BOOST_PP_LOCAL_ITERATE()
#undef BOOST_PP_LOCAL_MACRO
#undef BOOST_PP_LOCAL_LIMITS
|*==========================================================================*/
/// Bind a reference to the LLEventListener to call its track() method.
LLEventListener& mListener;
};
/**
* Utility template function to use Visitor appropriately
*
* @param raw_listener Callable to connect, typically a boost::bind()
* expression. This will be visited by Visitor using boost::visit_each().
* @param connect_funct Callable that will connect() @a raw_listener to an
* LLStandardSignal, returning LLBoundListener.
*/
template <typename LISTENER>
LLBoundListener visit_and_connect(const std::string& name,
const LISTENER& raw_listener,
const ConnectFunc& connect_func)
{
// Capture the listener
LLEventListener listener(raw_listener);
// Define our Visitor, binding the listener so we can call
// listener.track() if we discover any shared_ptr<Foo>.
LLEventDetail::Visitor visitor(listener);
// Allow unqualified visit_each() call for ADL
using boost::visit_each;
// Visit each component of a boost::bind() expression. Pass
// 'raw_listener', our template argument, rather than 'listener' from
// which type details have been erased. unwrap() comes from
// Boost.Signals, in case we were passed a boost::ref().
visit_each(visitor, LLEventDetail::unwrap(raw_listener));
// Make the connection using passed function.
LLBoundListener connection(connect_func(listener));
// If the LISTENER is an LLListenerWrapperBase subclass, pass it the
// desired information. It's important that we pass the raw_listener
// so the compiler can make decisions based on its original type.
const LLListenerWrapperBase* lwb =
ll_template_cast<const LLListenerWrapperBase*>(&raw_listener);
if (lwb)
{
lwb->accept_name(name);
lwb->accept_connection(connection);
}
// In any case, show new connection to caller.
return connection;
}
} // namespace LLEventDetail
// Somewhat to my surprise, passing boost::bind(...boost::weak_ptr<T>...) to
// listen() fails in Boost code trying to instantiate LLEventListener (i.e.
// LLStandardSignal::slot_type) because the boost::get_pointer() utility function isn't
@ -1142,12 +755,4 @@ namespace boost
T* get_pointer(const weak_ptr<T>& ptr) { return shared_ptr<T>(ptr).get(); }
}
/// Since we forbid use of listen(boost::bind(...shared_ptr<T>...)), provide an
/// easy way to cast to the corresponding weak_ptr.
template <typename T>
boost::weak_ptr<T> weaken(const boost::shared_ptr<T>& ptr)
{
return boost::weak_ptr<T>(ptr);
}
#endif /* ! defined(LL_LLEVENTS_H) */

View File

@ -57,35 +57,17 @@ LLEventTimer::~LLEventTimer()
//static
void LLEventTimer::updateClass()
{
std::list<LLEventTimer*> completed_timers;
// <FS:ND> Minimize calls to getInstances per frame
// for (instance_iter iter = beginInstances(); iter != endInstances(); )
instance_iter end = endInstances();
for (instance_iter iter = beginInstances(); iter != end; )
// </FS:ND>
for (auto& timer : instance_snapshot())
{
LLEventTimer& timer = *iter++;
F32 et = timer.mEventTimer.getElapsedTimeF32();
if (timer.mEventTimer.getStarted() && et > timer.mPeriod) {
timer.mEventTimer.reset();
if ( timer.tick() )
{
completed_timers.push_back( &timer );
delete &timer;
}
}
}
if ( completed_timers.size() > 0 )
{
for (std::list<LLEventTimer*>::iterator completed_iter = completed_timers.begin();
completed_iter != completed_timers.end();
completed_iter++ )
{
delete *completed_iter;
}
}
}

View File

@ -47,9 +47,76 @@ public:
static void updateClass();
/// Schedule recurring calls to generic callable every period seconds.
/// Returns a pointer; if you delete it, cancels the recurring calls.
template <typename CALLABLE>
static LLEventTimer* run_every(F32 period, const CALLABLE& callable);
/// Schedule a future call to generic callable. Returns a pointer.
/// CAUTION: The object referenced by that pointer WILL BE DELETED once
/// the callback has been called! LLEventTimer::getInstance(pointer) (NOT
/// pointer->getInstance(pointer)!) can be used to test whether the
/// pointer is still valid. If it is, deleting it will cancel the
/// callback.
template <typename CALLABLE>
static LLEventTimer* run_at(const LLDate& time, const CALLABLE& callable);
/// Like run_at(), but after a time delta rather than at a timestamp.
/// Same CAUTION.
template <typename CALLABLE>
static LLEventTimer* run_after(F32 interval, const CALLABLE& callable);
protected:
LLTimer mEventTimer;
F32 mPeriod;
private:
template <typename CALLABLE>
class Generic;
};
template <typename CALLABLE>
class LLEventTimer::Generic: public LLEventTimer
{
public:
// making TIME generic allows engaging either LLEventTimer constructor
template <typename TIME>
Generic(const TIME& time, bool once, const CALLABLE& callable):
LLEventTimer(time),
mOnce(once),
mCallable(callable)
{}
BOOL tick() override
{
mCallable();
// true tells updateClass() to delete this instance
return mOnce;
}
private:
bool mOnce;
CALLABLE mCallable;
};
template <typename CALLABLE>
LLEventTimer* LLEventTimer::run_every(F32 period, const CALLABLE& callable)
{
// return false to schedule recurring calls
return new Generic<CALLABLE>(period, false, callable);
}
template <typename CALLABLE>
LLEventTimer* LLEventTimer::run_at(const LLDate& time, const CALLABLE& callable)
{
// return true for one-shot callback
return new Generic<CALLABLE>(time, true, callable);
}
template <typename CALLABLE>
LLEventTimer* LLEventTimer::run_after(F32 interval, const CALLABLE& callable)
{
// one-shot callback after specified interval
return new Generic<CALLABLE>(interval, true, callable);
}
#endif //LL_EVENTTIMER_H

View File

@ -18,10 +18,28 @@
#include <typeinfo>
// external library headers
#include <boost/exception/diagnostic_information.hpp>
#include <boost/exception/error_info.hpp>
// On Mac, got:
// #error "Boost.Stacktrace requires `_Unwind_Backtrace` function. Define
// `_GNU_SOURCE` macro or `BOOST_STACKTRACE_GNU_SOURCE_NOT_REQUIRED` if
// _Unwind_Backtrace is available without `_GNU_SOURCE`."
#define BOOST_STACKTRACE_GNU_SOURCE_NOT_REQUIRED
#if LL_WINDOWS
// On Windows, header-only implementation causes macro collisions -- use
// prebuilt library
#define BOOST_STACKTRACE_LINK
#endif // LL_WINDOWS
#include <boost/stacktrace.hpp>
// other Linden headers
#include "llerror.h"
#include "llerrorcontrol.h"
// used to attach and extract stacktrace information to/from boost::exception,
// see https://www.boost.org/doc/libs/release/doc/html/stacktrace/getting_started.html#stacktrace.getting_started.exceptions_with_stacktrace
// apparently the struct passed as the first template param needs no definition?
typedef boost::error_info<struct errinfo_stacktrace_, boost::stacktrace::stacktrace>
errinfo_stacktrace;
namespace {
// used by crash_on_unhandled_exception_() and log_unhandled_exception_()
void log_unhandled_exception_(LLError::ELevel level,
@ -53,3 +71,17 @@ void log_unhandled_exception_(const char* file, int line, const char* pretty_fun
// routinely, but we DO expect to return from this function.
log_unhandled_exception_(LLError::LEVEL_WARN, file, line, pretty_function, context);
}
void annotate_exception_(boost::exception& exc)
{
// https://www.boost.org/doc/libs/release/libs/exception/doc/tutorial_transporting_data.html
// "Adding of Arbitrary Data to Active Exception Objects"
// Given a boost::exception&, we can add boost::error_info items to it
// without knowing its leaf type.
// The stacktrace constructor that lets us skip a level -- and why would
// we always include annotate_exception_()? -- also requires a max depth.
// For the nullary constructor, the stacktrace class declaration itself
// passes static_cast<std::size_t>(-1), but that's kind of dubious.
// Anyway, which of us is really going to examine more than 100 frames?
exc << errinfo_stacktrace(boost::stacktrace::stacktrace(1, 100));
}

View File

@ -67,9 +67,29 @@ struct LLContinueError: public LLException
* enriches the exception's diagnostic_information() with the source file,
* line and containing function of the LLTHROW() macro.
*/
// Currently we implement that using BOOST_THROW_EXCEPTION(). Wrap it in
// LLTHROW() in case we ever want to revisit that implementation decision.
#define LLTHROW(x) BOOST_THROW_EXCEPTION(x)
#define LLTHROW(x) \
do { \
/* Capture the exception object 'x' by value. (Exceptions must */ \
/* be copyable.) It might seem simpler to use */ \
/* BOOST_THROW_EXCEPTION(annotate_exception_(x)) instead of */ \
/* three separate statements, but: */ \
/* - We want to throw 'x' with its original type, not just a */ \
/* reference to boost::exception. */ \
/* - To return x's original type, annotate_exception_() would */ \
/* have to be a template function. */ \
/* - We want annotate_exception_() to be opaque. */ \
/* We also might consider embedding BOOST_THROW_EXCEPTION() in */ \
/* our helper function, but we want the filename and line info */ \
/* embedded by BOOST_THROW_EXCEPTION() to be the throw point */ \
/* rather than always indicating the same line in */ \
/* llexception.cpp. */ \
auto exc{x}; \
annotate_exception_(exc); \
BOOST_THROW_EXCEPTION(exc); \
/* Use the classic 'do { ... } while (0)' macro trick to wrap */ \
/* our multiple statements. */ \
} while (0)
void annotate_exception_(boost::exception& exc);
/// Call this macro from a catch (...) clause
#define CRASH_ON_UNHANDLED_EXCEPTION(CONTEXT) \

View File

@ -199,27 +199,26 @@ TimeBlockTreeNode& BlockTimerStatHandle::getTreeNode() const
void BlockTimer::bootstrapTimerTree()
{
for (BlockTimerStatHandle::instance_tracker_t::instance_iter it = BlockTimerStatHandle::instance_tracker_t::beginInstances(), end_it = BlockTimerStatHandle::instance_tracker_t::endInstances();
it != end_it;
++it)
for (auto& base : BlockTimerStatHandle::instance_snapshot())
{
BlockTimerStatHandle& timer = static_cast<BlockTimerStatHandle&>(*it);
// because of indirect derivation from LLInstanceTracker, have to downcast
BlockTimerStatHandle& timer = static_cast<BlockTimerStatHandle&>(base);
if (&timer == &BlockTimer::getRootTimeBlock()) continue;
// bootstrap tree construction by attaching to last timer to be on stack
// when this timer was called
if (timer.getParent() == &BlockTimer::getRootTimeBlock())
{
{
TimeBlockAccumulator& accumulator = timer.getCurrentAccumulator();
if (accumulator.mLastCaller)
{
{
timer.setParent(accumulator.mLastCaller);
accumulator.mParent = accumulator.mLastCaller;
}
}
// no need to push up tree on first use, flag can be set spuriously
accumulator.mMoveUpTree = false;
}
}
}
}
@ -312,12 +311,10 @@ void BlockTimer::processTimes()
updateTimes();
// reset for next frame
for (BlockTimerStatHandle::instance_tracker_t::instance_iter it = BlockTimerStatHandle::instance_tracker_t::beginInstances(),
end_it = BlockTimerStatHandle::instance_tracker_t::endInstances();
it != end_it;
++it)
for (auto& base : BlockTimerStatHandle::instance_snapshot())
{
BlockTimerStatHandle& timer = static_cast<BlockTimerStatHandle&>(*it);
// because of indirect derivation from LLInstanceTracker, have to downcast
BlockTimerStatHandle& timer = static_cast<BlockTimerStatHandle&>(base);
TimeBlockAccumulator& accumulator = timer.getCurrentAccumulator();
accumulator.mLastCaller = NULL;
@ -368,12 +365,10 @@ void BlockTimer::logStats()
LLSD sd;
{
for (BlockTimerStatHandle::instance_tracker_t::instance_iter it = BlockTimerStatHandle::instance_tracker_t::beginInstances(),
end_it = BlockTimerStatHandle::instance_tracker_t::endInstances();
it != end_it;
++it)
for (auto& base : BlockTimerStatHandle::instance_snapshot())
{
BlockTimerStatHandle& timer = static_cast<BlockTimerStatHandle&>(*it);
// because of indirect derivation from LLInstanceTracker, have to downcast
BlockTimerStatHandle& timer = static_cast<BlockTimerStatHandle&>(base);
LLTrace::PeriodicRecording& frame_recording = LLTrace::get_frame_recording();
sd[timer.getName()]["Time"] = (LLSD::Real) (frame_recording.getLastRecording().getSum(timer).value());
sd[timer.getName()]["Calls"] = (LLSD::Integer) (frame_recording.getLastRecording().getSum(timer.callCount()));

View File

@ -31,6 +31,10 @@
#include "lltrace.h"
#include "lltreeiterators.h"
#if LL_WINDOWS
#include <intrin.h>
#endif
#define LL_FAST_TIMER_ON 1
#define LL_FASTTIMER_USE_RDTSC 1
@ -85,6 +89,8 @@ public:
// return __rdtsc();
//}
// shift off lower 8 bits for lower resolution but longer term timing
// on 1Ghz machine, a 32-bit word will hold ~1000 seconds of timing
#if LL_FASTTIMER_USE_RDTSC

View File

@ -86,6 +86,69 @@ public:
static const char * tmpdir();
};
/// RAII class
class LLUniqueFile
{
public:
// empty
LLUniqueFile(): mFileHandle(nullptr) {}
// wrap (e.g.) result of LLFile::fopen()
LLUniqueFile(LLFILE* f): mFileHandle(f) {}
// no copy
LLUniqueFile(const LLUniqueFile&) = delete;
// move construction
LLUniqueFile(LLUniqueFile&& other)
{
mFileHandle = other.mFileHandle;
other.mFileHandle = nullptr;
}
// The point of LLUniqueFile is to close on destruction.
~LLUniqueFile()
{
close();
}
// simple assignment
LLUniqueFile& operator=(LLFILE* f)
{
close();
mFileHandle = f;
return *this;
}
// copy assignment deleted
LLUniqueFile& operator=(const LLUniqueFile&) = delete;
// move assignment
LLUniqueFile& operator=(LLUniqueFile&& other)
{
close();
std::swap(mFileHandle, other.mFileHandle);
return *this;
}
// explicit close operation
void close()
{
if (mFileHandle)
{
// in case close() throws, set mFileHandle null FIRST
LLFILE* h{nullptr};
std::swap(h, mFileHandle);
LLFile::close(h);
}
}
// detect whether the wrapped LLFILE is open or not
explicit operator bool() const { return bool(mFileHandle); }
bool operator!() { return ! mFileHandle; }
// LLUniqueFile should be usable for any operation that accepts LLFILE*
// (or FILE* for that matter)
operator LLFILE*() const { return mFileHandle; }
private:
LLFILE* mFileHandle;
};
#if LL_WINDOWS
/**
* @brief Controlling input for files.

View File

@ -27,25 +27,15 @@
#include "linden_common.h"
// associated header
#include "llinstancetracker.h"
#include "llapr.h"
#include "llerror.h"
// STL headers
// std headers
// external library headers
// other Linden headers
void LLInstanceTrackerBase::StaticBase::incrementDepth()
void LLInstanceTrackerPrivate::logerrs(const char* cls, const std::string& arg1,
const std::string& arg2, const std::string& arg3)
{
++sIterationNestDepth;
}
void LLInstanceTrackerBase::StaticBase::decrementDepth()
{
llassert(sIterationNestDepth);
--sIterationNestDepth;
}
U32 LLInstanceTrackerBase::StaticBase::getDepth()
{
return sIterationNestDepth;
LL_ERRS("LLInstanceTracker") << LLError::Log::demangle(cls)
<< arg1 << arg2 << arg3 << LL_ENDL;
}

View File

@ -28,438 +28,432 @@
#ifndef LL_LLINSTANCETRACKER_H
#define LL_LLINSTANCETRACKER_H
#include <atomic>
#include <map>
#include <set>
#include <vector>
#include <typeinfo>
#include <memory>
#include <type_traits>
#include "mutex.h"
#include <atomic>
#include "llstringtable.h"
#include <boost/iterator/transform_iterator.hpp>
#include <boost/iterator/indirect_iterator.hpp>
// <FS:CR>
#ifdef LL_DEBUG
#include "llerror.h"
#endif
// </FS:CR>
#include <boost/iterator/filter_iterator.hpp>
// As of 2017-05-06, as far as nat knows, only clang supports __has_feature().
// Unfortunately VS2013's preprocessor shortcut logic doesn't prevent it from
// producing (fatal) warnings for defined(__clang__) && __has_feature(...).
// Have to work around that.
#if ! defined(__clang__)
#define __has_feature(x) 0
#endif // __clang__
#include "lockstatic.h"
#include "stringize.h"
#if defined(LL_TEST_llinstancetracker) && __has_feature(cxx_noexcept)
// ~LLInstanceTracker() performs llassert_always() validation. That's fine in
// production code, since the llassert_always() is implemented as an LL_ERRS
// message, which will crash-with-message. In our integration test executable,
// though, this llassert_always() throws an exception instead so we can test
// error conditions and continue running the test. However -- as of C++11,
// destructors are implicitly noexcept(true). Unless we mark
// ~LLInstanceTracker() noexcept(false), the test executable crashes even on
// the ATTEMPT to throw.
#define LLINSTANCETRACKER_DTOR_NOEXCEPT noexcept(false)
#else
// If we're building for production, or in fact building *any other* test, or
// we're using a compiler that doesn't support __has_feature(), or we're not
// compiling with a C++ version that supports noexcept -- don't specify it.
#define LLINSTANCETRACKER_DTOR_NOEXCEPT
#endif
// As of 2017-05-06, as far as nat knows, only clang supports __has_feature().
// Unfortunately VS2013's preprocessor shortcut logic doesn't prevent it from
// producing (fatal) warnings for defined(__clang__) && __has_feature(...).
// Have to work around that.
#if ! defined(__clang__)
#define __has_feature(x) 0
#endif // __clang__
#if defined(LL_TEST_llinstancetracker) && __has_feature(cxx_noexcept)
// ~LLInstanceTracker() performs llassert_always() validation. That's fine in
// production code, since the llassert_always() is implemented as an LL_ERRS
// message, which will crash-with-message. In our integration test executable,
// though, this llassert_always() throws an exception instead so we can test
// error conditions and continue running the test. However -- as of C++11,
// destructors are implicitly noexcept(true). Unless we mark
// ~LLInstanceTracker() noexcept(false), the test executable crashes even on
// the ATTEMPT to throw.
#define LLINSTANCETRACKER_DTOR_NOEXCEPT noexcept(false)
#else
// If we're building for production, or in fact building *any other* test, or
// we're using a compiler that doesn't support __has_feature(), or we're not
// compiling with a C++ version that supports noexcept -- don't specify it.
#define LLINSTANCETRACKER_DTOR_NOEXCEPT
#endif
/**
* Base class manages "class-static" data that must actually have singleton
* semantics: one instance per process, rather than one instance per module as
* sometimes happens with data simply declared static.
*/
class LL_COMMON_API LLInstanceTrackerBase
/*****************************************************************************
* StaticBase
*****************************************************************************/
namespace LLInstanceTrackerPrivate
{
protected:
/// It's not essential to derive your STATICDATA (for use with
/// getStatic()) from StaticBase; it's just that both known
/// implementations do.
struct StaticBase
{
// <FS:ND> Only needed in debug builds
#ifdef LL_DEBUG
StaticBase()
: sIterationNestDepth(0)
{}
// We need to be able to lock static data while manipulating it.
std::mutex mMutex;
};
#else
StaticBase()
{}
#endif
// </FS:ND>
void incrementDepth();
void decrementDepth();
U32 getDepth();
private:
#ifdef LL_WINDOWS
std::atomic_uint32_t sIterationNestDepth;
#else
std::atomic_uint sIterationNestDepth;
#endif
};
};
LL_COMMON_API void assert_main_thread();
void logerrs(const char* cls, const std::string&, const std::string&, const std::string&);
} // namespace LLInstanceTrackerPrivate
/*****************************************************************************
* LLInstanceTracker with key
*****************************************************************************/
enum EInstanceTrackerAllowKeyCollisions
{
LLInstanceTrackerErrorOnCollision,
LLInstanceTrackerReplaceOnCollision
LLInstanceTrackerErrorOnCollision,
LLInstanceTrackerReplaceOnCollision
};
/// This mix-in class adds support for tracking all instances of the specified class parameter T
/// The (optional) key associates a value of type KEY with a given instance of T, for quick lookup
/// If KEY is not provided, then instances are stored in a simple set
/// @NOTE: see explicit specialization below for default KEY==void case
/// @NOTE: this class is not thread-safe unless used as read-only
template<typename T, typename KEY = void, EInstanceTrackerAllowKeyCollisions KEY_COLLISION_BEHAVIOR = LLInstanceTrackerErrorOnCollision>
class LLInstanceTracker : public LLInstanceTrackerBase
template<typename T, typename KEY = void,
EInstanceTrackerAllowKeyCollisions KEY_COLLISION_BEHAVIOR = LLInstanceTrackerErrorOnCollision>
class LLInstanceTracker
{
protected:
typedef LLInstanceTracker<T, KEY> self_t;
typedef typename std::multimap<KEY, T*> InstanceMap;
struct StaticData: public StaticBase
{
InstanceMap sMap;
};
static StaticData& getStatic() { static StaticData sData; return sData;}
static InstanceMap& getMap_() { return getStatic().sMap; }
typedef std::map<KEY, std::shared_ptr<T>> InstanceMap;
struct StaticData: public LLInstanceTrackerPrivate::StaticBase
{
InstanceMap mMap;
};
typedef llthread::LockStatic<StaticData> LockStatic;
public:
class instance_iter : public boost::iterator_facade<instance_iter, T, boost::forward_traversal_tag>
{
public:
typedef boost::iterator_facade<instance_iter, T, boost::forward_traversal_tag> super_t;
// snapshot of std::pair<const KEY, std::shared_ptr<T>> pairs
class snapshot
{
// It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot.
typedef std::vector<std::pair<const KEY, std::weak_ptr<T>>> VectorType;
// Dereferencing our iterator produces a std::shared_ptr for each
// instance that still exists. Since we store weak_ptrs, that involves
// two chained transformations:
// - a transform_iterator to lock the weak_ptr and return a shared_ptr
// - a filter_iterator to skip any shared_ptr that has become invalid.
// It is very important that we filter lazily, that is, during
// traversal. Any one of our stored weak_ptrs might expire during
// traversal.
typedef std::pair<const KEY, std::shared_ptr<T>> strong_pair;
// Note for future reference: nat has not yet had any luck (up to
// Boost 1.67) trying to use boost::transform_iterator with a hand-
// coded functor, only with actual functions. In my experience, an
// internal boost::result_of() operation fails, even with an explicit
// result_type typedef. But this works.
static strong_pair strengthen(typename VectorType::value_type& pair)
{
return { pair.first, pair.second.lock() };
}
static bool dead_skipper(const strong_pair& pair)
{
return bool(pair.second);
}
instance_iter(const typename InstanceMap::iterator& it)
: mIterator(it)
{
// <FS:ND> Minimize calls to getStatic
#ifdef LL_DEBUG
getStatic().incrementDepth();
#endif
// </FS:ND>
}
public:
snapshot():
// populate our vector with a snapshot of (locked!) InstanceMap
// note, this assigns pair<KEY, shared_ptr> to pair<KEY, weak_ptr>
mData(mLock->mMap.begin(), mLock->mMap.end())
{
// release the lock once we've populated mData
mLock.unlock();
}
~instance_iter()
{
// <FS:ND> Minimize calls to getStatic
#ifdef LL_DEBUG
getStatic().decrementDepth();
#endif
// </FS:ND>
}
// You can't make a transform_iterator (or anything else) that
// literally stores a C++ function (decltype(strengthen)) -- but you
// can make a transform_iterator based on a _function pointer._
typedef boost::transform_iterator<decltype(strengthen)*,
typename VectorType::iterator> strong_iterator;
typedef boost::filter_iterator<decltype(dead_skipper)*, strong_iterator> iterator;
iterator begin() { return make_iterator(mData.begin()); }
iterator end() { return make_iterator(mData.end()); }
private:
friend class boost::iterator_core_access;
private:
iterator make_iterator(typename VectorType::iterator iter)
{
// transform_iterator only needs the base iterator and the transform.
// filter_iterator wants the predicate and both ends of the range.
return iterator(dead_skipper,
strong_iterator(iter, strengthen),
strong_iterator(mData.end(), strengthen));
}
void increment() { mIterator++; }
bool equal(instance_iter const& other) const
{
return mIterator == other.mIterator;
}
// lock static data during construction
#if ! LL_WINDOWS
LockStatic mLock;
#else // LL_WINDOWS
// We want to be able to use (e.g.) our instance_snapshot subclass as:
// for (auto& inst : T::instance_snapshot()) ...
// But when this snapshot base class directly contains LockStatic, as
// above, Visual Studio 2017 requires us to code instead:
// for (auto& inst : std::move(T::instance_snapshot())) ...
// nat thinks this should be unnecessary, as an anonymous class
// instance is already a temporary. It shouldn't need to be cast to
// rvalue reference (the role of std::move()). clang evidently agrees,
// as the short form works fine with Xcode on Mac.
// To support the succinct usage, instead of directly storing
// LockStatic, store std::shared_ptr<LockStatic>, which is copyable.
std::shared_ptr<LockStatic> mLockp{std::make_shared<LockStatic>()};
LockStatic& mLock{*mLockp};
#endif // LL_WINDOWS
VectorType mData;
};
T& dereference() const
{
return *(mIterator->second);
}
// iterate over this for references to each instance
class instance_snapshot: public snapshot
{
private:
static T& instance_getter(typename snapshot::iterator::reference pair)
{
return *pair.second;
}
public:
typedef boost::transform_iterator<decltype(instance_getter)*,
typename snapshot::iterator> iterator;
iterator begin() { return iterator(snapshot::begin(), instance_getter); }
iterator end() { return iterator(snapshot::end(), instance_getter); }
typename InstanceMap::iterator mIterator;
};
void deleteAll()
{
for (auto it(snapshot::begin()), end(snapshot::end()); it != end; ++it)
{
delete it->second.get();
}
}
};
class key_iter : public boost::iterator_facade<key_iter, KEY, boost::forward_traversal_tag>
{
public:
typedef boost::iterator_facade<key_iter, KEY, boost::forward_traversal_tag> super_t;
// iterate over this for each key
class key_snapshot: public snapshot
{
private:
static KEY key_getter(typename snapshot::iterator::reference pair)
{
return pair.first;
}
public:
typedef boost::transform_iterator<decltype(key_getter)*,
typename snapshot::iterator> iterator;
iterator begin() { return iterator(snapshot::begin(), key_getter); }
iterator end() { return iterator(snapshot::end(), key_getter); }
};
key_iter(typename InstanceMap::iterator it)
: mIterator(it)
{
// <FS:ND> Minimize calls to getStatic
#ifdef LL_DEBUG
getStatic().incrementDepth();
#endif
// </FS:ND>
}
static T* getInstance(const KEY& k)
{
LockStatic lock;
const InstanceMap& map(lock->mMap);
typename InstanceMap::const_iterator found = map.find(k);
return (found == map.end()) ? NULL : found->second.get();
}
key_iter(const key_iter& other)
: mIterator(other.mIterator)
{
// <FS:ND> Minimize calls to getStatic
#ifdef LL_DEBUG
getStatic().incrementDepth();
#endif
// </FS:ND>
}
~key_iter()
{
// <FS:ND> Minimize calls to getStatic
#ifdef LL_DEBUG
getStatic().decrementDepth();
#endif
// </FS:ND>
}
private:
friend class boost::iterator_core_access;
void increment() { mIterator++; }
bool equal(key_iter const& other) const
{
return mIterator == other.mIterator;
}
KEY& dereference() const
{
return const_cast<KEY&>(mIterator->first);
}
typename InstanceMap::iterator mIterator;
};
static T* getInstance(const KEY& k)
{
const InstanceMap& map(getMap_());
typename InstanceMap::const_iterator found = map.find(k);
return (found == map.end()) ? NULL : found->second;
}
static instance_iter beginInstances()
{
return instance_iter(getMap_().begin());
}
static instance_iter endInstances()
{
return instance_iter(getMap_().end());
}
static S32 instanceCount()
{
return getMap_().size();
}
static key_iter beginKeys()
{
return key_iter(getMap_().begin());
}
static key_iter endKeys()
{
return key_iter(getMap_().end());
}
static S32 instanceCount()
{
return LockStatic()->mMap.size();
}
protected:
LLInstanceTracker(const KEY& key)
{
// make sure static data outlives all instances
getStatic();
add_(key);
}
virtual ~LLInstanceTracker() LLINSTANCETRACKER_DTOR_NOEXCEPT
{
// it's unsafe to delete instances of this type while all instances are being iterated over.
// <FS:ND> Minimize calls to getStatic
#ifdef LL_DEBUG
llassert_always(getStatic().getDepth() == 0);
#endif
// </FS:ND>
remove_();
}
virtual void setKey(KEY key) { remove_(); add_(key); }
virtual const KEY& getKey() const { return mInstanceKey; }
LLInstanceTracker(const KEY& key)
{
// We do not intend to manage the lifespan of this object with
// shared_ptr, so give it a no-op deleter. We store shared_ptrs in our
// InstanceMap specifically so snapshot can store weak_ptrs so we can
// detect deletions during traversals.
std::shared_ptr<T> ptr(static_cast<T*>(this), [](T*){});
LockStatic lock;
add_(lock, key, ptr);
}
public:
virtual ~LLInstanceTracker()
{
LockStatic lock;
remove_(lock);
}
protected:
virtual void setKey(KEY key)
{
LockStatic lock;
// Even though the shared_ptr we store in our map has a no-op deleter
// for T itself, letting the use count decrement to 0 will still
// delete the use-count object. Capture the shared_ptr we just removed
// and re-add it to the map with the new key.
auto ptr = remove_(lock);
add_(lock, key, ptr);
}
public:
virtual const KEY& getKey() const { return mInstanceKey; }
private:
LLInstanceTracker( const LLInstanceTracker& );
const LLInstanceTracker& operator=( const LLInstanceTracker& );
LLInstanceTracker( const LLInstanceTracker& ) = delete;
LLInstanceTracker& operator=( const LLInstanceTracker& ) = delete;
void add_(const KEY& key)
{
mInstanceKey = key;
InstanceMap& map = getMap_();
typename InstanceMap::iterator insertion_point_it = map.lower_bound(key);
if (insertion_point_it != map.end()
&& insertion_point_it->first == key)
{ // found existing entry with that key
switch(KEY_COLLISION_BEHAVIOR)
{
case LLInstanceTrackerErrorOnCollision:
{
// use assert here instead of LL_ERRS(), otherwise the error will be ignored since this call is made during global object initialization
llassert_always_msg(false, "Instance with this same key already exists!");
break;
}
case LLInstanceTrackerReplaceOnCollision:
{
// replace pointer, but leave key (should have compared equal anyway)
insertion_point_it->second = static_cast<T*>(this);
break;
}
default:
break;
}
}
else
{ // new key
map.insert(insertion_point_it, std::make_pair(key, static_cast<T*>(this)));
}
}
void remove_()
{
InstanceMap& map = getMap_();
typename InstanceMap::iterator iter = map.find(mInstanceKey);
if (iter != map.end())
{
map.erase(iter);
}
}
// for logging
template <typename K>
static std::string report(K key) { return stringize(key); }
static std::string report(const std::string& key) { return "'" + key + "'"; }
static std::string report(const char* key) { return report(std::string(key)); }
// caller must instantiate LockStatic
void add_(LockStatic& lock, const KEY& key, const std::shared_ptr<T>& ptr)
{
mInstanceKey = key;
InstanceMap& map = lock->mMap;
switch(KEY_COLLISION_BEHAVIOR)
{
case LLInstanceTrackerErrorOnCollision:
{
// map stores shared_ptr to self
auto pair = map.emplace(key, ptr);
if (! pair.second)
{
LLInstanceTrackerPrivate::logerrs(typeid(*this).name(), " instance with key ",
report(key), " already exists!");
}
break;
}
case LLInstanceTrackerReplaceOnCollision:
map[key] = ptr;
break;
default:
break;
}
}
std::shared_ptr<T> remove_(LockStatic& lock)
{
InstanceMap& map = lock->mMap;
typename InstanceMap::iterator iter = map.find(mInstanceKey);
if (iter != map.end())
{
auto ret = iter->second;
map.erase(iter);
return ret;
}
return {};
}
private:
KEY mInstanceKey;
KEY mInstanceKey;
};
/*****************************************************************************
* LLInstanceTracker without key
*****************************************************************************/
// TODO:
// - For the case of omitted KEY template parameter, consider storing
// std::map<T*, std::shared_ptr<T>> instead of std::set<std::shared_ptr<T>>.
// That might let us share more of the implementation between KEY and
// non-KEY LLInstanceTracker subclasses.
// - Even if not that, consider trying to unify the snapshot implementations.
// The trouble is that the 'iterator' published by each (and by their
// subclasses) must reflect the specific type of the callables that
// distinguish them. (Maybe make instance_snapshot() and key_snapshot()
// factory functions that pass lambdas to a factory function for the generic
// template class?)
/// explicit specialization for default case where KEY is void
/// use a simple std::set<T*>
template<typename T, EInstanceTrackerAllowKeyCollisions KEY_COLLISION_BEHAVIOR>
class LLInstanceTracker<T, void, KEY_COLLISION_BEHAVIOR> : public LLInstanceTrackerBase
class LLInstanceTracker<T, void, KEY_COLLISION_BEHAVIOR>
{
protected:
typedef LLInstanceTracker<T, void> self_t;
typedef typename std::set<T*> InstanceSet;
struct StaticData: public StaticBase
{
InstanceSet sSet;
};
static StaticData& getStatic() { static StaticData sData; return sData; }
static InstanceSet& getSet_() { return getStatic().sSet; }
typedef std::set<std::shared_ptr<T>> InstanceSet;
struct StaticData: public LLInstanceTrackerPrivate::StaticBase
{
InstanceSet mSet;
};
typedef llthread::LockStatic<StaticData> LockStatic;
public:
/**
* Storing a dumb T* somewhere external is a bad idea, since
* LLInstanceTracker subclasses are explicitly destroyed rather than
* managed by smart pointers. It's legal to declare stack instances of an
* LLInstanceTracker subclass. But it's reasonable to store a
* std::weak_ptr<T>, which will become invalid when the T instance is
* destroyed.
*/
std::weak_ptr<T> getWeak()
{
return mSelf;
}
/**
* Does a particular instance still exist? Of course, if you already have
* a T* in hand, you need not call getInstance() to @em locate the
* instance -- unlike the case where getInstance() accepts some kind of
* key. Nonetheless this method is still useful to @em validate a
* particular T*, since each instance's destructor removes itself from the
* underlying set.
*/
static T* getInstance(T* k)
{
const InstanceSet& set(getSet_());
typename InstanceSet::const_iterator found = set.find(k);
return (found == set.end())? NULL : *found;
}
static S32 instanceCount() { return getSet_().size(); }
static S32 instanceCount() { return LockStatic()->mSet.size(); }
class instance_iter : public boost::iterator_facade<instance_iter, T, boost::forward_traversal_tag>
{
public:
instance_iter(const typename InstanceSet::iterator& it)
: mIterator(it)
{
// <FS:ND> Minimize calls to getStatic
#ifdef LL_DEBUG
getStatic().incrementDepth();
#endif
// </FS:ND>
}
// snapshot of std::shared_ptr<T> pointers
class snapshot
{
// It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot.
typedef std::vector<std::weak_ptr<T>> VectorType;
// Dereferencing our iterator produces a std::shared_ptr for each
// instance that still exists. Since we store weak_ptrs, that involves
// two chained transformations:
// - a transform_iterator to lock the weak_ptr and return a shared_ptr
// - a filter_iterator to skip any shared_ptr that has become invalid.
typedef std::shared_ptr<T> strong_ptr;
static strong_ptr strengthen(typename VectorType::value_type& ptr)
{
return ptr.lock();
}
static bool dead_skipper(const strong_ptr& ptr)
{
return bool(ptr);
}
instance_iter(const instance_iter& other)
: mIterator(other.mIterator)
{
// <FS:ND> Minimize calls to getStatic
#ifdef LL_DEBUG
getStatic().incrementDepth();
#endif
}
public:
snapshot():
// populate our vector with a snapshot of (locked!) InstanceSet
// note, this assigns stored shared_ptrs to weak_ptrs for snapshot
mData(mLock->mSet.begin(), mLock->mSet.end())
{
// release the lock once we've populated mData
mLock.unlock();
}
~instance_iter()
{
// <FS:ND> Minimize calls to getStatic
#ifdef LL_DEBUG
getStatic().decrementDepth();
#endif
// </FS:ND>
}
typedef boost::transform_iterator<decltype(strengthen)*,
typename VectorType::iterator> strong_iterator;
typedef boost::filter_iterator<decltype(dead_skipper)*, strong_iterator> iterator;
private:
friend class boost::iterator_core_access;
iterator begin() { return make_iterator(mData.begin()); }
iterator end() { return make_iterator(mData.end()); }
void increment() { mIterator++; }
bool equal(instance_iter const& other) const
{
return mIterator == other.mIterator;
}
private:
iterator make_iterator(typename VectorType::iterator iter)
{
// transform_iterator only needs the base iterator and the transform.
// filter_iterator wants the predicate and both ends of the range.
return iterator(dead_skipper,
strong_iterator(iter, strengthen),
strong_iterator(mData.end(), strengthen));
}
T& dereference() const
{
return **mIterator;
}
// lock static data during construction
#if ! LL_WINDOWS
LockStatic mLock;
#else // LL_WINDOWS
// We want to be able to use our instance_snapshot subclass as:
// for (auto& inst : T::instance_snapshot()) ...
// But when this snapshot base class directly contains LockStatic, as
// above, Visual Studio 2017 requires us to code instead:
// for (auto& inst : std::move(T::instance_snapshot())) ...
// nat thinks this should be unnecessary, as an anonymous class
// instance is already a temporary. It shouldn't need to be cast to
// rvalue reference (the role of std::move()). clang evidently agrees,
// as the short form works fine with Xcode on Mac.
// To support the succinct usage, instead of directly storing
// LockStatic, store std::shared_ptr<LockStatic>, which is copyable.
std::shared_ptr<LockStatic> mLockp{std::make_shared<LockStatic>()};
LockStatic& mLock{*mLockp};
#endif // LL_WINDOWS
VectorType mData;
};
typename InstanceSet::iterator mIterator;
};
// iterate over this for references to each instance
struct instance_snapshot: public snapshot
{
typedef boost::indirect_iterator<typename snapshot::iterator> iterator;
iterator begin() { return iterator(snapshot::begin()); }
iterator end() { return iterator(snapshot::end()); }
static instance_iter beginInstances() { return instance_iter(getSet_().begin()); }
static instance_iter endInstances() { return instance_iter(getSet_().end()); }
void deleteAll()
{
for (auto it(snapshot::begin()), end(snapshot::end()); it != end; ++it)
{
delete it->get();
}
}
};
protected:
LLInstanceTracker()
{
// make sure static data outlives all instances
getStatic();
getSet_().insert(static_cast<T*>(this));
}
virtual ~LLInstanceTracker() LLINSTANCETRACKER_DTOR_NOEXCEPT
{
// it's unsafe to delete instances of this type while all instances are being iterated over.
LLInstanceTracker()
{
// Since we do not intend for this shared_ptr to manage lifespan, give
// it a no-op deleter.
std::shared_ptr<T> ptr(static_cast<T*>(this), [](T*){});
// save corresponding weak_ptr for future reference
mSelf = ptr;
// Also store it in our class-static set to track this instance.
LockStatic()->mSet.emplace(ptr);
}
public:
virtual ~LLInstanceTracker()
{
// convert weak_ptr to shared_ptr because that's what we store in our
// InstanceSet
LockStatic()->mSet.erase(mSelf.lock());
}
protected:
LLInstanceTracker(const LLInstanceTracker& other):
LLInstanceTracker()
{}
// <FS:ND> Minimize calls to getStatic
#ifdef LL_DEBUG
llassert_always(getStatic().getDepth() == 0);
#endif
// </FS:ND>
getSet_().erase(static_cast<T*>(this));
}
LLInstanceTracker(const LLInstanceTracker& other)
{
getSet_().insert(static_cast<T*>(this));
}
private:
// Storing a weak_ptr to self is a bit like deriving from
// std::enable_shared_from_this(), except more explicit.
std::weak_ptr<T> mSelf;
};
#endif

View File

@ -14,6 +14,8 @@
// associated header
#include "llleaplistener.h"
// STL headers
#include <map>
#include <functional>
// std headers
// external library headers
#include <boost/foreach.hpp>
@ -60,16 +62,11 @@ LLLeapListener::LLLeapListener(const ConnectFunc& connect):
LLSD need_name(LLSDMap("name", LLSD()));
add("newpump",
"Instantiate a new LLEventPump named like [\"name\"] and listen to it.\n"
"If [\"type\"] == \"LLEventQueue\", make LLEventQueue, else LLEventStream.\n"
"[\"type\"] == \"LLEventStream\", \"LLEventMailDrop\" et al.\n"
"Events sent through new LLEventPump will be decorated with [\"pump\"]=name.\n"
"Returns actual name in [\"name\"] (may be different if collision).",
&LLLeapListener::newpump,
need_name);
add("killpump",
"Delete LLEventPump [\"name\"] created by \"newpump\".\n"
"Returns [\"status\"] boolean indicating whether such a pump existed.",
&LLLeapListener::killpump,
need_name);
LLSD need_source_listener(LLSDMap("source", LLSD())("listener", LLSD()));
add("listen",
"Listen to an existing LLEventPump named [\"source\"], with listener name\n"
@ -124,40 +121,23 @@ void LLLeapListener::newpump(const LLSD& request)
Response reply(LLSD(), request);
std::string name = request["name"];
LLSD const & type = request["type"];
std::string type = request["type"];
LLEventPump * new_pump = NULL;
if (type.asString() == "LLEventQueue")
try
{
new_pump = new LLEventQueue(name, true); // tweak name for uniqueness
// tweak name for uniqueness
LLEventPump& new_pump(LLEventPumps::instance().make(name, true, type));
name = new_pump.getName();
reply["name"] = name;
// Now listen on this new pump with our plugin listener
std::string myname("llleap");
saveListener(name, myname, mConnect(new_pump, myname));
}
else
catch (const LLEventPumps::BadType& error)
{
if (! (type.isUndefined() || type.asString() == "LLEventStream"))
{
reply.warn(STRINGIZE("unknown 'type' " << type << ", using LLEventStream"));
}
new_pump = new LLEventStream(name, true); // tweak name for uniqueness
reply.error(error.what());
}
name = new_pump->getName();
mEventPumps.insert(name, new_pump);
// Now listen on this new pump with our plugin listener
std::string myname("llleap");
saveListener(name, myname, mConnect(*new_pump, myname));
reply["name"] = name;
}
void LLLeapListener::killpump(const LLSD& request)
{
Response reply(LLSD(), request);
std::string name = request["name"];
// success == (nonzero number of entries were erased)
reply["status"] = bool(mEventPumps.erase(name));
}
void LLLeapListener::listen(const LLSD& request)
@ -228,13 +208,11 @@ void LLLeapListener::getAPIs(const LLSD& request) const
{
Response reply(LLSD(), request);
for (LLEventAPI::instance_iter eai(LLEventAPI::beginInstances()),
eaend(LLEventAPI::endInstances());
eai != eaend; ++eai)
for (auto& ea : LLEventAPI::instance_snapshot())
{
LLSD info;
info["desc"] = eai->getDesc();
reply[eai->getName()] = info;
info["desc"] = ea.getDesc();
reply[ea.getName()] = info;
}
}

View File

@ -40,7 +40,6 @@ public:
private:
void newpump(const LLSD&);
void killpump(const LLSD&);
void listen(const LLSD&);
void stoplistening(const LLSD&);
void ping(const LLSD&) const;
@ -64,10 +63,6 @@ private:
// and listener name.
typedef std::map<std::pair<std::string, std::string>, LLBoundListener> ListenersMap;
ListenersMap mListeners;
// Similar lifespan reasoning applies to LLEventPumps instantiated by
// newpump() operations.
typedef boost::ptr_map<std::string, LLEventPump> EventPumpsMap;
EventPumpsMap mEventPumps;
};
#endif /* ! defined(LL_LLLEAPLISTENER_H) */

View File

@ -1,198 +0,0 @@
/**
* @file lllistenerwrapper.h
* @author Nat Goodspeed
* @date 2009-11-30
* @brief Introduce LLListenerWrapper template
*
* $LicenseInfo:firstyear=2009&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2010, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
#if ! defined(LL_LLLISTENERWRAPPER_H)
#define LL_LLLISTENERWRAPPER_H
#include "llevents.h" // LLListenerWrapperBase
#include <boost/visit_each.hpp>
/**
* Template base class for coding wrappers for LLEventPump listeners.
*
* Derive your listener wrapper from LLListenerWrapper. You must use
* LLLISTENER_WRAPPER_SUBCLASS() so your subclass will play nicely with
* boost::visit_each (q.v.). That way boost::signals2 can still detect
* derivation from LLEventTrackable, and so forth.
*/
template <typename LISTENER>
class LLListenerWrapper: public LLListenerWrapperBase
{
public:
/// Wrap an arbitrary listener object
LLListenerWrapper(const LISTENER& listener):
mListener(listener)
{}
/// call
virtual bool operator()(const LLSD& event)
{
return mListener(event);
}
/// Allow boost::visit_each() to peek at our mListener.
template <class V>
void accept_visitor(V& visitor) const
{
using boost::visit_each;
visit_each(visitor, mListener, 0);
}
private:
LISTENER mListener;
};
/**
* Specialize boost::visit_each() (leveraging ADL) to peek inside an
* LLListenerWrapper<T> to traverse its LISTENER. We borrow the
* accept_visitor() pattern from boost::bind(), avoiding the need to make
* mListener public.
*/
template <class V, typename T>
void visit_each(V& visitor, const LLListenerWrapper<T>& wrapper, int)
{
wrapper.accept_visitor(visitor);
}
/// use this (sigh!) for each subclass of LLListenerWrapper<T> you write
#define LLLISTENER_WRAPPER_SUBCLASS(CLASS) \
template <class V, typename T> \
void visit_each(V& visitor, const CLASS<T>& wrapper, int) \
{ \
visit_each(visitor, static_cast<const LLListenerWrapper<T>&>(wrapper), 0); \
} \
\
/* Have to state this explicitly, rather than using LL_TEMPLATE_CONVERTIBLE, */ \
/* because the source type is itself a template. */ \
template <typename T> \
struct ll_template_cast_impl<const LLListenerWrapperBase*, const CLASS<T>*> \
{ \
const LLListenerWrapperBase* operator()(const CLASS<T>* wrapper) \
{ \
return wrapper; \
} \
}
/**
* Make an instance of a listener wrapper. Every wrapper class must be a
* template accepting a listener object of arbitrary type. In particular, the
* type of a boost::bind() expression is deliberately undocumented. So we
* can't just write Wrapper<CorrectType>(boost::bind(...)). Instead we must
* write llwrap<Wrapper>(boost::bind(...)).
*/
template <template<typename> class WRAPPER, typename T>
WRAPPER<T> llwrap(const T& listener)
{
return WRAPPER<T>(listener);
}
/**
* This LLListenerWrapper template subclass is used to report entry/exit to an
* event listener, by changing this:
* @code
* someEventPump.listen("MyClass",
* boost::bind(&MyClass::method, ptr, _1));
* @endcode
* to this:
* @code
* someEventPump.listen("MyClass",
* llwrap<LLCoutListener>(
* boost::bind(&MyClass::method, ptr, _1)));
* @endcode
*/
template <class LISTENER>
class LLCoutListener: public LLListenerWrapper<LISTENER>
{
typedef LLListenerWrapper<LISTENER> super;
public:
/// Wrap an arbitrary listener object
LLCoutListener(const LISTENER& listener):
super(listener)
{}
/// call
virtual bool operator()(const LLSD& event)
{
std::cout << "Entering listener " << *super::mName << " with " << event << std::endl;
bool handled = super::operator()(event);
std::cout << "Leaving listener " << *super::mName;
if (handled)
{
std::cout << " (handled)";
}
std::cout << std::endl;
return handled;
}
};
LLLISTENER_WRAPPER_SUBCLASS(LLCoutListener);
/**
* This LLListenerWrapper template subclass is used to log entry/exit to an
* event listener, by changing this:
* @code
* someEventPump.listen("MyClass",
* boost::bind(&MyClass::method, ptr, _1));
* @endcode
* to this:
* @code
* someEventPump.listen("MyClass",
* llwrap<LLLogListener>(
* boost::bind(&MyClass::method, ptr, _1)));
* @endcode
*/
template <class LISTENER>
class LLLogListener: public LLListenerWrapper<LISTENER>
{
typedef LLListenerWrapper<LISTENER> super;
public:
/// Wrap an arbitrary listener object
LLLogListener(const LISTENER& listener):
super(listener)
{}
/// call
virtual bool operator()(const LLSD& event)
{
LL_DEBUGS("LLLogListener") << "Entering listener " << *super::mName << " with " << event << LL_ENDL;
bool handled = super::operator()(event);
LL_DEBUGS("LLLogListener") << "Leaving listener " << *super::mName;
if (handled)
{
LL_CONT << " (handled)";
}
LL_CONT << LL_ENDL;
return handled;
}
};
LLLISTENER_WRAPPER_SUBCLASS(LLLogListener);
#endif /* ! defined(LL_LLLISTENERWRAPPER_H) */

View File

@ -0,0 +1,22 @@
/**
* @file llmainthreadtask.cpp
* @author Nat Goodspeed
* @date 2019-12-05
* @brief Implementation for llmainthreadtask.
*
* $LicenseInfo:firstyear=2019&license=viewerlgpl$
* Copyright (c) 2019, Linden Research, Inc.
* $/LicenseInfo$
*/
// Precompiled header
#include "linden_common.h"
// associated header
#include "llmainthreadtask.h"
// STL headers
// std headers
// external library headers
// other Linden headers
// This file is required by our CMake integration-test machinery. It
// contributes no code to the viewer executable.

View File

@ -0,0 +1,99 @@
/**
* @file llmainthreadtask.h
* @author Nat Goodspeed
* @date 2019-12-04
* @brief LLMainThreadTask dispatches work to the main thread. When invoked on
* the main thread, it performs the work inline.
*
* $LicenseInfo:firstyear=2019&license=viewerlgpl$
* Copyright (c) 2019, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_LLMAINTHREADTASK_H)
#define LL_LLMAINTHREADTASK_H
#include "lleventtimer.h"
#include "llthread.h"
#include "llmake.h"
#include <future>
#include <type_traits> // std::result_of
/**
* LLMainThreadTask provides a way to perform some task specifically on the
* main thread, waiting for it to complete. A task consists of a C++ nullary
* invocable (i.e. any callable that requires no arguments) with arbitrary
* return type.
*
* Instead of instantiating LLMainThreadTask, pass your invocable to its
* static dispatch() method. dispatch() returns the result of calling your
* task. (Or, if your task throws an exception, dispatch() throws that
* exception. See std::packaged_task.)
*
* When you call dispatch() on the main thread (as determined by
* on_main_thread() in llthread.h), it simply calls your task and returns the
* result.
*
* When you call dispatch() on a secondary thread, it instantiates an
* LLEventTimer subclass scheduled immediately. Next time the main loop calls
* LLEventTimer::updateClass(), your task will be run, and LLMainThreadTask
* will fulfill a future with its result. Meanwhile the requesting thread
* blocks on that future. As soon as it is set, the requesting thread wakes up
* with the task result.
*/
class LLMainThreadTask
{
private:
// Don't instantiate this class -- use dispatch() instead.
LLMainThreadTask() {}
public:
/// dispatch() is the only way to invoke this functionality.
template <typename CALLABLE>
static auto dispatch(CALLABLE&& callable) -> decltype(callable())
{
if (on_main_thread())
{
// we're already running on the main thread, perfect
return callable();
}
else
{
// It's essential to construct LLEventTimer subclass instances on
// the heap because, on completion, LLEventTimer deletes them.
// Once we enable C++17, we can use Class Template Argument
// Deduction. Until then, use llmake_heap().
auto* task = llmake_heap<Task>(std::forward<CALLABLE>(callable));
auto future = task->mTask.get_future();
// Now simply block on the future.
return future.get();
}
}
private:
template <typename CALLABLE>
struct Task: public LLEventTimer
{
Task(CALLABLE&& callable):
// no wait time: call tick() next chance we get
LLEventTimer(0),
mTask(std::forward<CALLABLE>(callable))
{}
BOOL tick() override
{
// run the task on the main thread, will populate the future
// obtained by get_future()
mTask();
// tell LLEventTimer we're done (one shot)
return TRUE;
}
// Given arbitrary CALLABLE, which might be a lambda, how are we
// supposed to obtain its signature for std::packaged_task? It seems
// redundant to have to add an argument list to engage result_of, then
// add the argument list again to complete the signature. At least we
// only support a nullary CALLABLE.
std::packaged_task<typename std::result_of<CALLABLE()>::type()> mTask;
};
};
#endif /* ! defined(LL_LLMAINTHREADTASK_H) */

View File

@ -12,10 +12,8 @@
*
* also relevant:
*
* Template argument deduction for class templates
* http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0091r3.html
* was apparently adopted in June 2016? Unclear when compilers will
* portably support this, but there is hope.
* Template argument deduction for class templates (C++17)
* https://en.cppreference.com/w/cpp/language/class_template_argument_deduction
*
* $LicenseInfo:firstyear=2015&license=viewerlgpl$
* Copyright (c) 2015, Linden Research, Inc.
@ -25,37 +23,61 @@
#if ! defined(LL_LLMAKE_H)
#define LL_LLMAKE_H
/*==========================================================================*|
// When we allow ourselves to compile with C++11 features enabled, this form
// should generically handle an arbitrary number of arguments.
/**
* Usage: llmake<SomeTemplate>(args...)
*
* Deduces the types T... of 'args' and returns an instance of
* SomeTemplate<T...>(args...).
*/
template <template<typename...> class CLASS_TEMPLATE, typename... ARGS>
CLASS_TEMPLATE<ARGS...> llmake(ARGS && ... args)
{
return CLASS_TEMPLATE<ARGS...>(std::forward<ARGS>(args)...);
}
|*==========================================================================*/
// As of 2015-12-18, this is what we'll use instead. Add explicit overloads
// for different numbers of template parameters as use cases arise.
/// dumb pointer template just in case that's what's wanted
template <typename T>
using dumb_pointer = T*;
/**
* Usage: llmake<SomeTemplate>(arg)
* Same as llmake(), but returns a pointer to a new heap instance of
* SomeTemplate<T...>(args...) using the pointer of your choice.
*
* Deduces the type T of 'arg' and returns an instance of SomeTemplate<T>
* initialized with 'arg'. Assumes a constructor accepting T (by value,
* reference or whatever).
* @code
* auto* dumb = llmake_heap<SomeTemplate>(args...);
* auto shared = llmake_heap<SomeTemplate, std::shared_ptr>(args...);
* auto unique = llmake_heap<SomeTemplate, std::unique_ptr>(args...);
* @endcode
*/
template <template<typename> class CLASS_TEMPLATE, typename ARG1>
CLASS_TEMPLATE<ARG1> llmake(const ARG1& arg1)
// POINTER_TEMPLATE is characterized as template<typename...> rather than as
// template<typename T> because (e.g.) std::unique_ptr has multiple template
// arguments. Even though we only engage one, std::unique_ptr doesn't match a
// template template parameter that itself takes only one template parameter.
template <template<typename...> class CLASS_TEMPLATE,
template<typename...> class POINTER_TEMPLATE=dumb_pointer,
typename... ARGS>
POINTER_TEMPLATE<CLASS_TEMPLATE<ARGS...>> llmake_heap(ARGS&&... args)
{
return CLASS_TEMPLATE<ARG1>(arg1);
return POINTER_TEMPLATE<CLASS_TEMPLATE<ARGS...>>(
new CLASS_TEMPLATE<ARGS...>(std::forward<ARGS>(args)...));
}
template <template<typename, typename> class CLASS_TEMPLATE, typename ARG1, typename ARG2>
CLASS_TEMPLATE<ARG1, ARG2> llmake(const ARG1& arg1, const ARG2& arg2)
{
return CLASS_TEMPLATE<ARG1, ARG2>(arg1, arg2);
}
#endif // VS 2013 workaround
/// dumb pointer template just in case that's what's wanted
/**
* Same as llmake(), but returns a pointer to a new heap instance of
* SomeTemplate<T...>(args...) using the pointer of your choice.
*
* @code
* auto* dumb = llmake_heap<SomeTemplate>(args...);
* auto shared = llmake_heap<SomeTemplate, std::shared_ptr>(args...);
* auto unique = llmake_heap<SomeTemplate, std::unique_ptr>(args...);
* @endcode
*/
// POINTER_TEMPLATE is characterized as template<typename...> rather than as
// template<typename T> because (e.g.) std::unique_ptr has multiple template
// arguments. Even though we only engage one, std::unique_ptr doesn't match a
// template template parameter that itself takes only one template parameter.
#endif /* ! defined(LL_LLMAKE_H) */

View File

@ -32,8 +32,7 @@
//============================================================================
LLMutex::LLMutex() :
mCount(0),
mLockingThread(NO_THREAD)
mCount(0)
{
}
@ -55,7 +54,7 @@ void LLMutex::lock()
#if MUTEX_DEBUG
// Have to have the lock before we can access the debug info
U32 id = LLThread::currentID();
auto id = LLThread::currentID();
if (mIsLocked[id] != FALSE)
LL_ERRS() << "Already locked in Thread: " << id << LL_ENDL;
mIsLocked[id] = TRUE;
@ -74,13 +73,13 @@ void LLMutex::unlock()
#if MUTEX_DEBUG
// Access the debug info while we have the lock
U32 id = LLThread::currentID();
auto id = LLThread::currentID();
if (mIsLocked[id] != TRUE)
LL_ERRS() << "Not locked in Thread: " << id << LL_ENDL;
mIsLocked[id] = FALSE;
#endif
mLockingThread = NO_THREAD;
mLockingThread = LLThread::id_t();
mMutex.unlock();
}
@ -102,7 +101,7 @@ bool LLMutex::isSelfLocked()
return mLockingThread == LLThread::currentID();
}
U32 LLMutex::lockingThread() const
LLThread::id_t LLMutex::lockingThread() const
{
return mLockingThread;
}
@ -122,7 +121,7 @@ bool LLMutex::trylock()
#if MUTEX_DEBUG
// Have to have the lock before we can access the debug info
U32 id = LLThread::currentID();
auto id = LLThread::currentID();
if (mIsLocked[id] != FALSE)
LL_ERRS() << "Already locked in Thread: " << id << LL_ENDL;
mIsLocked[id] = TRUE;

View File

@ -28,20 +28,12 @@
#define LL_LLMUTEX_H
#include "stdtypes.h"
#include "llthread.h"
#include <boost/noncopyable.hpp>
#if LL_WINDOWS
#pragma warning (push)
#pragma warning (disable:4265)
#endif
// 'std::_Pad' : class has virtual functions, but destructor is not virtual
#include <mutex>
#include "mutex.h"
#include <condition_variable>
#if LL_WINDOWS
#pragma warning (pop)
#endif
//============================================================================
#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO)
@ -53,11 +45,6 @@
class LL_COMMON_API LLMutex
{
public:
typedef enum
{
NO_THREAD = 0xFFFFFFFF
} e_locking_thread;
LLMutex();
virtual ~LLMutex();
@ -66,15 +53,15 @@ public:
void unlock(); // undefined behavior when called on mutex not being held
bool isLocked(); // non-blocking, but does do a lock/unlock so not free
bool isSelfLocked(); //return true if locked in a same thread
U32 lockingThread() const; //get ID of locking thread
LLThread::id_t lockingThread() const; //get ID of locking thread
protected:
std::mutex mMutex;
mutable U32 mCount;
mutable U32 mLockingThread;
mutable LLThread::id_t mLockingThread;
#if MUTEX_DEBUG
std::map<U32, BOOL> mIsLocked;
std::map<LLThread::id_t, BOOL> mIsLocked;
#endif
};

View File

@ -242,4 +242,11 @@
#define LL_COMPILE_TIME_MESSAGE(msg)
#endif
// __FUNCTION__ works on all the platforms we care about, but...
#if LL_WINDOWS
#define LL_PRETTY_FUNCTION __FUNCSIG__
#else
#define LL_PRETTY_FUNCTION __PRETTY_FUNCTION__
#endif
#endif // not LL_LINDEN_PREPROCESSOR_H

View File

@ -994,9 +994,9 @@ void LLProcess::handle_status(int reason, int status)
// wi->rv = apr_proc_wait(wi->child, &wi->rc, &wi->why, APR_NOWAIT);
// It's just wrong to call apr_proc_wait() here. The only way APR knows to
// call us with APR_OC_REASON_DEATH is that it's already reaped this child
// process, so calling suspend() will only produce "huh?" from the OS. We
// process, so calling wait() will only produce "huh?" from the OS. We
// must rely on the status param passed in, which unfortunately comes
// straight from the OS suspend() call, which means we have to decode it by
// straight from the OS wait() call, which means we have to decode it by
// hand.
mStatus = interpret_status(status);
LL_INFOS("LLProcess") << getStatusString() << LL_ENDL;

View File

@ -753,8 +753,6 @@ private:
}
fclose(cpuinfo_fp);
}
# if LL_X86
// *NOTE:Mani - eww, macros! srry.
#define LLPI_SET_INFO_STRING(llpi_id, cpuinfo_id) \
if (!cpuinfo[cpuinfo_id].empty()) \
@ -782,7 +780,7 @@ private:
LLPI_SET_INFO_INT(eModel, "model");
S32 family;
S32 family{0};
if (!cpuinfo["cpu family"].empty()
&& LLStringUtil::convertToS32(cpuinfo["cpu family"], family))
{
@ -814,8 +812,6 @@ private:
{
setExtension(cpu_feature_names[eSSE2_Ext]);
}
# endif // LL_X86
}
std::string getCPUFeatureDescription() const

View File

@ -28,9 +28,10 @@
#include <boost/noncopyable.hpp>
#include <boost/intrusive_ptr.hpp>
#include "llmutex.h"
#include "llatomic.h"
class LLMutex;
//----------------------------------------------------------------------------
// RefCount objects should generally only be accessed by way of LLPointer<>'s
// see llthread.h for LLThreadSafeRefCount

View File

@ -66,7 +66,8 @@ const std::string LLSD_NOTATION_HEADER("llsd/notation");
*/
// static
void LLSDSerialize::serialize(const LLSD& sd, std::ostream& str, ELLSD_Serialize type, U32 options)
void LLSDSerialize::serialize(const LLSD& sd, std::ostream& str, ELLSD_Serialize type,
LLSDFormatter::EFormatterOptions options)
{
LLPointer<LLSDFormatter> f = NULL;
@ -174,10 +175,10 @@ bool LLSDSerialize::deserialize(LLSD& sd, std::istream& str, S32 max_bytes)
{
p = new LLSDXMLParser;
}
else if (header == LLSD_NOTATION_HEADER)
{
p = new LLSDNotationParser;
}
else if (header == LLSD_NOTATION_HEADER)
{
p = new LLSDNotationParser;
}
else
{
LL_WARNS() << "deserialize request for unknown ELLSD_Serialize" << LL_ENDL;
@ -1234,9 +1235,11 @@ bool LLSDBinaryParser::parseString(
/**
* LLSDFormatter
*/
LLSDFormatter::LLSDFormatter() :
mBoolAlpha(false)
LLSDFormatter::LLSDFormatter(bool boolAlpha, const std::string& realFmt, EFormatterOptions options):
mOptions(options)
{
boolalpha(boolAlpha);
realFormat(realFmt);
}
// virtual
@ -1253,6 +1256,17 @@ void LLSDFormatter::realFormat(const std::string& format)
mRealFormat = format;
}
S32 LLSDFormatter::format(const LLSD& data, std::ostream& ostr) const
{
// pass options captured by constructor
return format(data, ostr, mOptions);
}
S32 LLSDFormatter::format(const LLSD& data, std::ostream& ostr, EFormatterOptions options) const
{
return format_impl(data, ostr, options, 0);
}
void LLSDFormatter::formatReal(LLSD::Real real, std::ostream& ostr) const
{
std::string buffer = llformat(mRealFormat.c_str(), real);
@ -1262,7 +1276,9 @@ void LLSDFormatter::formatReal(LLSD::Real real, std::ostream& ostr) const
/**
* LLSDNotationFormatter
*/
LLSDNotationFormatter::LLSDNotationFormatter()
LLSDNotationFormatter::LLSDNotationFormatter(bool boolAlpha, const std::string& realFormat,
EFormatterOptions options):
LLSDFormatter(boolAlpha, realFormat, options)
{
}
@ -1278,14 +1294,8 @@ std::string LLSDNotationFormatter::escapeString(const std::string& in)
return ostr.str();
}
// virtual
S32 LLSDNotationFormatter::format(const LLSD& data, std::ostream& ostr, U32 options) const
{
S32 rv = format_impl(data, ostr, options, 0);
return rv;
}
S32 LLSDNotationFormatter::format_impl(const LLSD& data, std::ostream& ostr, U32 options, U32 level) const
S32 LLSDNotationFormatter::format_impl(const LLSD& data, std::ostream& ostr,
EFormatterOptions options, U32 level) const
{
S32 format_count = 1;
std::string pre;
@ -1406,21 +1416,33 @@ S32 LLSDNotationFormatter::format_impl(const LLSD& data, std::ostream& ostr, U32
{
// *FIX: memory inefficient.
const std::vector<U8>& buffer = data.asBinary();
ostr << "b(" << buffer.size() << ")\"";
if(buffer.size())
if (options & LLSDFormatter::OPTIONS_PRETTY_BINARY)
{
if (options & LLSDFormatter::OPTIONS_PRETTY_BINARY)
ostr << "b16\"";
if (! buffer.empty())
{
std::ios_base::fmtflags old_flags = ostr.flags();
ostr.setf( std::ios::hex, std::ios::basefield );
ostr << "0x";
// It shouldn't strictly matter whether the emitted hex digits
// are uppercase; LLSDNotationParser handles either; but as of
// 2020-05-13, Python's llbase.llsd requires uppercase hex.
ostr << std::uppercase;
auto oldfill(ostr.fill('0'));
auto oldwidth(ostr.width());
for (int i = 0; i < buffer.size(); i++)
{
ostr << (int) buffer[i];
// have to restate setw() before every conversion
ostr << std::setw(2) << (int) buffer[i];
}
ostr.width(oldwidth);
ostr.fill(oldfill);
ostr.flags(old_flags);
}
else
}
else // ! OPTIONS_PRETTY_BINARY
{
ostr << "b(" << buffer.size() << ")\"";
if (! buffer.empty())
{
ostr.write((const char*)&buffer[0], buffer.size());
}
@ -1437,11 +1459,12 @@ S32 LLSDNotationFormatter::format_impl(const LLSD& data, std::ostream& ostr, U32
return format_count;
}
/**
* LLSDBinaryFormatter
*/
LLSDBinaryFormatter::LLSDBinaryFormatter()
LLSDBinaryFormatter::LLSDBinaryFormatter(bool boolAlpha, const std::string& realFormat,
EFormatterOptions options):
LLSDFormatter(boolAlpha, realFormat, options)
{
}
@ -1450,7 +1473,8 @@ LLSDBinaryFormatter::~LLSDBinaryFormatter()
{ }
// virtual
S32 LLSDBinaryFormatter::format(const LLSD& data, std::ostream& ostr, U32 options) const
S32 LLSDBinaryFormatter::format_impl(const LLSD& data, std::ostream& ostr,
EFormatterOptions options, U32 level) const
{
S32 format_count = 1;
switch(data.type())
@ -1466,7 +1490,7 @@ S32 LLSDBinaryFormatter::format(const LLSD& data, std::ostream& ostr, U32 option
{
ostr.put('k');
formatString((*iter).first, ostr);
format_count += format((*iter).second, ostr);
format_count += format_impl((*iter).second, ostr, options, level+1);
}
ostr.put('}');
break;
@ -1481,7 +1505,7 @@ S32 LLSDBinaryFormatter::format(const LLSD& data, std::ostream& ostr, U32 option
LLSD::array_const_iterator end = data.endArray();
for(; iter != end; ++iter)
{
format_count += format(*iter, ostr);
format_count += format_impl(*iter, ostr, options, level+1);
}
ostr.put(']');
break;

View File

@ -435,7 +435,8 @@ public:
/**
* @brief Constructor
*/
LLSDFormatter();
LLSDFormatter(bool boolAlpha=false, const std::string& realFormat="",
EFormatterOptions options=OPTIONS_PRETTY_BINARY);
/**
* @brief Set the boolean serialization format.
@ -459,15 +460,37 @@ public:
void realFormat(const std::string& format);
/**
* @brief Call this method to format an LLSD to a stream.
* @brief Call this method to format an LLSD to a stream with options as
* set by the constructor.
*
* @param data The data to write.
* @param ostr The destination stream for the data.
* @return Returns The number of LLSD objects fomatted out
* @return Returns The number of LLSD objects formatted out
*/
virtual S32 format(const LLSD& data, std::ostream& ostr, U32 options = LLSDFormatter::OPTIONS_NONE) const = 0;
S32 format(const LLSD& data, std::ostream& ostr) const;
/**
* @brief Call this method to format an LLSD to a stream, passing options
* explicitly.
*
* @param data The data to write.
* @param ostr The destination stream for the data.
* @param options OPTIONS_NONE to emit LLSD::Binary as raw bytes
* @return Returns The number of LLSD objects formatted out
*/
virtual S32 format(const LLSD& data, std::ostream& ostr, EFormatterOptions options) const;
protected:
/**
* @brief Implementation to format the data. This is called recursively.
*
* @param data The data to write.
* @param ostr The destination stream for the data.
* @return Returns The number of LLSD objects formatted out
*/
virtual S32 format_impl(const LLSD& data, std::ostream& ostr, EFormatterOptions options,
U32 level) const = 0;
/**
* @brief Helper method which appropriately obeys the real format.
*
@ -476,9 +499,9 @@ protected:
*/
void formatReal(LLSD::Real real, std::ostream& ostr) const;
protected:
bool mBoolAlpha;
std::string mRealFormat;
EFormatterOptions mOptions;
};
@ -498,7 +521,8 @@ public:
/**
* @brief Constructor
*/
LLSDNotationFormatter();
LLSDNotationFormatter(bool boolAlpha=false, const std::string& realFormat="",
EFormatterOptions options=OPTIONS_PRETTY_BINARY);
/**
* @brief Helper static method to return a notation escaped string
@ -512,25 +536,16 @@ public:
*/
static std::string escapeString(const std::string& in);
/**
* @brief Call this method to format an LLSD to a stream.
*
* @param data The data to write.
* @param ostr The destination stream for the data.
* @return Returns The number of LLSD objects fomatted out
*/
virtual S32 format(const LLSD& data, std::ostream& ostr, U32 options = LLSDFormatter::OPTIONS_NONE) const;
protected:
/**
* @brief Implementation to format the data. This is called recursively.
*
* @param data The data to write.
* @param ostr The destination stream for the data.
* @return Returns The number of LLSD objects fomatted out
* @return Returns The number of LLSD objects formatted out
*/
S32 format_impl(const LLSD& data, std::ostream& ostr, U32 options, U32 level) const;
S32 format_impl(const LLSD& data, std::ostream& ostr, EFormatterOptions options,
U32 level) const override;
};
@ -550,7 +565,8 @@ public:
/**
* @brief Constructor
*/
LLSDXMLFormatter();
LLSDXMLFormatter(bool boolAlpha=false, const std::string& realFormat="",
EFormatterOptions options=OPTIONS_PRETTY_BINARY);
/**
* @brief Helper static method to return an xml escaped string
@ -565,20 +581,23 @@ public:
*
* @param data The data to write.
* @param ostr The destination stream for the data.
* @return Returns The number of LLSD objects fomatted out
* @return Returns The number of LLSD objects formatted out
*/
virtual S32 format(const LLSD& data, std::ostream& ostr, U32 options = LLSDFormatter::OPTIONS_NONE) const;
S32 format(const LLSD& data, std::ostream& ostr, EFormatterOptions options) const override;
// also pull down base-class format() method that isn't overridden
using LLSDFormatter::format;
protected:
/**
* @brief Implementation to format the data. This is called recursively.
*
* @param data The data to write.
* @param ostr The destination stream for the data.
* @return Returns The number of LLSD objects fomatted out
* @return Returns The number of LLSD objects formatted out
*/
S32 format_impl(const LLSD& data, std::ostream& ostr, U32 options, U32 level) const;
S32 format_impl(const LLSD& data, std::ostream& ostr, EFormatterOptions options,
U32 level) const override;
};
@ -618,18 +637,20 @@ public:
/**
* @brief Constructor
*/
LLSDBinaryFormatter();
LLSDBinaryFormatter(bool boolAlpha=false, const std::string& realFormat="",
EFormatterOptions options=OPTIONS_PRETTY_BINARY);
protected:
/**
* @brief Call this method to format an LLSD to a stream.
* @brief Implementation to format the data. This is called recursively.
*
* @param data The data to write.
* @param ostr The destination stream for the data.
* @return Returns The number of LLSD objects fomatted out
* @return Returns The number of LLSD objects formatted out
*/
virtual S32 format(const LLSD& data, std::ostream& ostr, U32 options = LLSDFormatter::OPTIONS_NONE) const;
S32 format_impl(const LLSD& data, std::ostream& ostr, EFormatterOptions options,
U32 level) const override;
protected:
/**
* @brief Helper method to serialize strings
*
@ -669,7 +690,8 @@ public:
/**
* @brief Constructor
*/
LLSDOStreamer(const LLSD& data, U32 options = LLSDFormatter::OPTIONS_NONE) :
LLSDOStreamer(const LLSD& data,
LLSDFormatter::EFormatterOptions options=LLSDFormatter::OPTIONS_PRETTY_BINARY) :
mSD(data), mOptions(options) {}
/**
@ -681,17 +703,17 @@ public:
* @return Returns the stream passed in after streaming mSD.
*/
friend std::ostream& operator<<(
std::ostream& str,
const LLSDOStreamer<Formatter>& formatter)
std::ostream& out,
const LLSDOStreamer<Formatter>& streamer)
{
LLPointer<Formatter> f = new Formatter;
f->format(formatter.mSD, str, formatter.mOptions);
return str;
f->format(streamer.mSD, out, streamer.mOptions);
return out;
}
protected:
LLSD mSD;
U32 mOptions;
LLSDFormatter::EFormatterOptions mOptions;
};
typedef LLSDOStreamer<LLSDNotationFormatter> LLSDNotationStreamer;
@ -724,7 +746,7 @@ public:
* Generic in/outs
*/
static void serialize(const LLSD& sd, std::ostream& str, ELLSD_Serialize,
U32 options = LLSDFormatter::OPTIONS_NONE);
LLSDFormatter::EFormatterOptions options=LLSDFormatter::OPTIONS_PRETTY_BINARY);
/**
* @brief Examine a stream, and parse 1 sd object out based on contents.
@ -753,8 +775,8 @@ public:
{
LLPointer<LLSDNotationFormatter> f = new LLSDNotationFormatter;
return f->format(sd, str,
LLSDFormatter::OPTIONS_PRETTY |
LLSDFormatter::OPTIONS_PRETTY_BINARY);
LLSDFormatter::EFormatterOptions(LLSDFormatter::OPTIONS_PRETTY |
LLSDFormatter::OPTIONS_PRETTY_BINARY));
}
static S32 fromNotation(LLSD& sd, std::istream& str, S32 max_bytes)
{

View File

@ -46,7 +46,9 @@ extern "C"
/**
* LLSDXMLFormatter
*/
LLSDXMLFormatter::LLSDXMLFormatter()
LLSDXMLFormatter::LLSDXMLFormatter(bool boolAlpha, const std::string& realFormat,
EFormatterOptions options):
LLSDFormatter(boolAlpha, realFormat, options)
{
}
@ -56,7 +58,8 @@ LLSDXMLFormatter::~LLSDXMLFormatter()
}
// virtual
S32 LLSDXMLFormatter::format(const LLSD& data, std::ostream& ostr, U32 options) const
S32 LLSDXMLFormatter::format(const LLSD& data, std::ostream& ostr,
EFormatterOptions options) const
{
std::streamsize old_precision = ostr.precision(25);
@ -73,7 +76,8 @@ S32 LLSDXMLFormatter::format(const LLSD& data, std::ostream& ostr, U32 options)
return rv;
}
S32 LLSDXMLFormatter::format_impl(const LLSD& data, std::ostream& ostr, U32 options, U32 level) const
S32 LLSDXMLFormatter::format_impl(const LLSD& data, std::ostream& ostr,
EFormatterOptions options, U32 level) const
{
S32 format_count = 1;
std::string pre;

View File

@ -506,7 +506,7 @@ struct Data
const char* name;
} typedata[] =
{
#define def(type) { LLSD::type, #type + 4 }
#define def(type) { LLSD::type, &#type[4] }
def(TypeUndefined),
def(TypeBoolean),
def(TypeInteger),
@ -856,6 +856,74 @@ bool llsd_equals(const LLSD& lhs, const LLSD& rhs, int bits)
}
}
/*****************************************************************************
* llsd::drill()
*****************************************************************************/
namespace llsd
{
LLSD& drill(LLSD& blob, const LLSD& rawPath)
{
// Treat rawPath uniformly as an array. If it's not already an array,
// store it as the only entry in one. (But let's say Undefined means an
// empty array.)
LLSD path;
if (rawPath.isArray() || rawPath.isUndefined())
{
path = rawPath;
}
else
{
path.append(rawPath);
}
// Need to indicate a current destination -- but that current destination
// must change as we step through the path array. Where normally we'd use
// an LLSD& to capture a subscripted LLSD lvalue, this time we must
// instead use a pointer -- since it must be reassigned.
// Start by pointing to the input blob exactly as is.
LLSD* located{&blob};
// Extract the element of interest by walking path. Use an explicit index
// so that, in case of a bogus type in path, we can identify the specific
// path entry that's bad.
for (LLSD::Integer i = 0; i < path.size(); ++i)
{
const LLSD& key{path[i]};
if (key.isString())
{
// a string path element is a map key
located = &((*located)[key.asString()]);
}
else if (key.isInteger())
{
// an integer path element is an array index
located = &((*located)[key.asInteger()]);
}
else
{
// What do we do with Real or Array or Map or ...?
// As it's a coder error -- not a user error -- rub the coder's
// face in it so it gets fixed.
LL_ERRS("llsdutil") << "drill(" << blob << ", " << rawPath
<< "): path[" << i << "] bad type "
<< sTypes.lookup(key.type()) << LL_ENDL;
}
}
// dereference the pointer to return a reference to the element we found
return *located;
}
LLSD drill(const LLSD& blob, const LLSD& path)
{
// non-const drill() does exactly what we want. Temporarily cast away
// const-ness and use that.
return drill(const_cast<LLSD&>(blob), path);
}
} // namespace llsd
// Construct a deep partial clone of of an LLSD object. primitive types share
// references, however maps, arrays and binary objects are duplicated. An optional
// filter may be include to exclude/include keys in a map.
@ -910,7 +978,6 @@ LLSD llsd_clone(LLSD value, LLSD filter)
return clone;
}
LLSD llsd_shallow(LLSD value, LLSD filter)
{
LLSD shallow;

View File

@ -143,6 +143,16 @@ LL_COMMON_API std::string llsd_matches(const LLSD& prototype, const LLSD& data,
/// equality rather than bitwise equality, pass @a bits as for
/// is_approx_equal_fraction().
LL_COMMON_API bool llsd_equals(const LLSD& lhs, const LLSD& rhs, int bits=-1);
/// If you don't care about LLSD::Real equality
inline bool operator==(const LLSD& lhs, const LLSD& rhs)
{
return llsd_equals(lhs, rhs);
}
inline bool operator!=(const LLSD& lhs, const LLSD& rhs)
{
// operator!=() should always be the negation of operator==()
return ! (lhs == rhs);
}
// Simple function to copy data out of input & output iterators if
// there is no need for casting.
@ -156,6 +166,31 @@ template<typename Input> LLSD llsd_copy_array(Input iter, Input end)
return dest;
}
namespace llsd
{
/**
* Drill down to locate an element in 'blob' according to 'path', where 'path'
* is one of the following:
*
* - LLSD::String: 'blob' is an LLSD::Map. Find the entry with key 'path'.
* - LLSD::Integer: 'blob' is an LLSD::Array. Find the entry with index 'path'.
* - Any other 'path' type will be interpreted as LLSD::Array, and 'blob' is a
* nested structure. For each element of 'path':
* - If it's an LLSD::Integer, select the entry with that index from an
* LLSD::Array at that level.
* - If it's an LLSD::String, select the entry with that key from an
* LLSD::Map at that level.
* - Anything else is an error.
*
* By implication, if path.isUndefined() or otherwise equivalent to an empty
* LLSD::Array, drill() returns 'blob' as is.
*/
LLSD drill(const LLSD& blob, const LLSD& path);
LLSD& drill( LLSD& blob, const LLSD& path);
}
/*****************************************************************************
* LLSDArray
*****************************************************************************/
@ -225,6 +260,36 @@ private:
LLSD _data;
};
namespace llsd
{
/**
* Construct an LLSD::Array inline, using modern C++ variadic arguments.
*/
// recursion tail
inline
void array_(LLSD&) {}
// recursive call
template <typename T0, typename... Ts>
void array_(LLSD& data, T0&& v0, Ts&&... vs)
{
data.append(std::forward<T0>(v0));
array_(data, std::forward<Ts>(vs)...);
}
// public interface
template <typename... Ts>
LLSD array(Ts&&... vs)
{
LLSD data;
array_(data, std::forward<Ts>(vs)...);
return data;
}
} // namespace llsd
/*****************************************************************************
* LLSDMap
*****************************************************************************/
@ -269,6 +334,36 @@ private:
LLSD _data;
};
namespace llsd
{
/**
* Construct an LLSD::Map inline, using modern C++ variadic arguments.
*/
// recursion tail
inline
void map_(LLSD&) {}
// recursive call
template <typename T0, typename... Ts>
void map_(LLSD& data, const LLSD::String& k0, T0&& v0, Ts&&... vs)
{
data[k0] = v0;
map_(data, std::forward<Ts>(vs)...);
}
// public interface
template <typename... Ts>
LLSD map(Ts&&... vs)
{
LLSD data;
map_(data, std::forward<Ts>(vs)...);
return data;
}
} // namespace llsd
/*****************************************************************************
* LLSDParam
*****************************************************************************/
@ -452,13 +547,22 @@ LLSD llsd_clone(LLSD value, LLSD filter = LLSD());
// the filter parameter.
LLSD llsd_shallow(LLSD value, LLSD filter = LLSD());
namespace llsd
{
// llsd namespace aliases
inline
LLSD clone (LLSD value, LLSD filter=LLSD()) { return llsd_clone (value, filter); }
inline
LLSD shallow(LLSD value, LLSD filter=LLSD()) { return llsd_shallow(value, filter); }
} // namespace llsd
// Specialization for generating a hash value from an LLSD block.
// <FS:ND> GCC 4.9 does not like the specialization in form of boost::hash but rather wants a namespace
// template <>
// struct boost::hash<LLSD>
namespace boost { template <> struct hash<LLSD>
// </FS:ND>
namespace boost
{
template <>
struct hash<LLSD>
{
typedef LLSD argument_type;
typedef std::size_t result_type;
@ -519,6 +623,5 @@ namespace boost { template <> struct hash<LLSD>
return seed;
}
};
} // <FS:ND/> close namespace
}
#endif // LL_LLSDUTIL_H

View File

@ -30,10 +30,9 @@
#include "llerror.h"
#include "llerrorcontrol.h" // LLError::is_available()
#include "lldependencies.h"
#include "llcoro_get_id.h"
#include "llexception.h"
#include "llcoros.h"
#include <boost/foreach.hpp>
#include <boost/unordered_map.hpp>
#include <algorithm>
#include <iostream> // std::cerr in dire emergency
#include <sstream>
@ -43,8 +42,6 @@ namespace {
void log(LLError::ELevel level,
const char* p1, const char* p2, const char* p3, const char* p4);
void logdebugs(const char* p1="", const char* p2="", const char* p3="", const char* p4="");
bool oktolog();
} // anonymous namespace
@ -115,19 +112,10 @@ private:
// initialized, either in the constructor or in initSingleton(). However,
// managing that as a stack depends on having a DISTINCT 'initializing'
// stack for every C++ stack in the process! And we have a distinct C++
// stack for every running coroutine. It would be interesting and cool to
// implement a generic coroutine-local-storage mechanism and use that
// here. The trouble is that LLCoros is itself an LLSingleton, so
// depending on LLCoros functionality could dig us into infinite
// recursion. (Moreover, when we reimplement LLCoros on top of
// Boost.Fiber, that library already provides fiber_specific_ptr -- so
// it's not worth a great deal of time and energy implementing a generic
// equivalent on top of boost::dcoroutine, which is on its way out.)
// Instead, use a map of llcoro::id to select the appropriate
// coro-specific 'initializing' stack. llcoro::get_id() is carefully
// implemented to avoid requiring LLCoros.
typedef boost::unordered_map<llcoro::id, list_t> InitializingMap;
InitializingMap mInitializing;
// stack for every running coroutine. Therefore this stack must be based
// on a coroutine-local pointer.
// This local_ptr isn't static because it's a member of an LLSingleton.
LLCoros::local_ptr<list_t> mInitializing;
public:
// Instantiate this to obtain a reference to the coroutine-specific
@ -145,8 +133,8 @@ public:
{
if (! mList)
{
LLTHROW(std::runtime_error("Trying to use LockedInitializing "
"after cleanup_initializing()"));
LLTHROW(LLException("Trying to use LockedInitializing "
"after cleanup_initializing()"));
}
return *mList;
}
@ -166,18 +154,23 @@ public:
private:
list_t& get_initializing_()
{
// map::operator[] has find-or-create semantics, exactly what we need
// here. It returns a reference to the selected mapped_type instance.
return mInitializing[llcoro::get_id()];
LLSingletonBase::list_t* current = mInitializing.get();
if (! current)
{
// If the running coroutine doesn't already have an initializing
// stack, allocate a new one and save it for future reference.
current = new LLSingletonBase::list_t();
mInitializing.reset(current);
}
return *current;
}
// By the time mInitializing is destroyed, its value for every coroutine
// except the running one must have been reset() to nullptr. So every time
// we pop the list to empty, reset() the running coroutine's local_ptr.
void cleanup_initializing_()
{
InitializingMap::iterator found = mInitializing.find(llcoro::get_id());
if (found != mInitializing.end())
{
mInitializing.erase(found);
}
mInitializing.reset(nullptr);
}
};
@ -302,7 +295,7 @@ void LLSingletonBase::MasterList::LockedInitializing::log(const char* verb, cons
}
}
void LLSingletonBase::capture_dependency(EInitState initState)
void LLSingletonBase::capture_dependency()
{
MasterList::LockedInitializing locked_list;
list_t& initializing(locked_list.get());
@ -334,21 +327,8 @@ void LLSingletonBase::capture_dependency(EInitState initState)
LLSingletonBase* foundp(*found);
out << classname(foundp) << " -> ";
}
// We promise to capture dependencies from both the constructor
// and the initSingleton() method, so an LLSingleton's instance
// pointer is on the initializing list during both. Now that we've
// detected circularity, though, we must distinguish the two. If
// the recursive call is from the constructor, we CAN'T honor it:
// otherwise we'd be returning a pointer to a partially-
// constructed object! But from initSingleton() is okay: that
// method exists specifically to support circularity.
// Decide which log helper to call.
if (initState == CONSTRUCTING)
{
logerrs("LLSingleton circularity in Constructor: ", out.str().c_str(),
classname(this).c_str(), "");
}
else if (it_next == initializing.end())
if (it_next == initializing.end())
{
// Points to self after construction, but during initialization.
// Singletons can initialize other classes that depend onto them,
@ -391,13 +371,12 @@ LLSingletonBase::vec_t LLSingletonBase::dep_sort()
// SingletonDeps through the life of the program, dynamically adding and
// removing LLSingletons as they are created and destroyed, in practice
// it's less messy to construct it on demand. The overhead of doing so
// should happen basically twice: once for cleanupAll(), once for
// deleteAll().
// should happen basically once: for deleteAll().
typedef LLDependencies<LLSingletonBase*> SingletonDeps;
SingletonDeps sdeps;
// Lock while traversing the master list
MasterList::LockedMaster master;
BOOST_FOREACH(LLSingletonBase* sp, master.get())
for (LLSingletonBase* sp : master.get())
{
// Build the SingletonDeps structure by adding, for each
// LLSingletonBase* sp in the master list, sp itself. It has no
@ -414,46 +393,27 @@ LLSingletonBase::vec_t LLSingletonBase::dep_sort()
// extracts just the first (key) element from each sorted_iterator, then
// uses vec_t's range constructor... but frankly this is more
// straightforward, as long as we remember the above reserve() call!
BOOST_FOREACH(SingletonDeps::sorted_iterator::value_type pair, sdeps.sort())
for (const SingletonDeps::sorted_iterator::value_type& pair : sdeps.sort())
{
ret.push_back(pair.first);
}
// The master list is not itself pushed onto the master list. Add it as
// the very last entry -- it is the LLSingleton on which ALL others
// depend! -- so our caller will process it.
ret.push_back(MasterList::getInstance());
ret.push_back(&master.Lock::get());
return ret;
}
//static
void LLSingletonBase::cleanupAll()
void LLSingletonBase::cleanup_()
{
// It's essential to traverse these in dependency order.
BOOST_FOREACH(LLSingletonBase* sp, dep_sort())
logdebugs("calling ", classname(this).c_str(), "::cleanupSingleton()");
try
{
// Call cleanupSingleton() only if we haven't already done so for this
// instance.
if (! sp->mCleaned)
{
sp->mCleaned = true;
logdebugs("calling ",
classname(sp).c_str(), "::cleanupSingleton()");
try
{
sp->cleanupSingleton();
}
catch (const std::exception& e)
{
logwarns("Exception in ", classname(sp).c_str(),
"::cleanupSingleton(): ", e.what());
}
catch (...)
{
logwarns("Unknown exception in ", classname(sp).c_str(),
"::cleanupSingleton()");
}
}
cleanupSingleton();
}
catch (...)
{
LOG_UNHANDLED_EXCEPTION(classname(this) + "::cleanupSingleton()");
}
}
@ -524,10 +484,6 @@ void log(LLError::ELevel level,
}
}
void logdebugs(const char* p1, const char* p2, const char* p3, const char* p4)
{
log(LLError::LEVEL_DEBUG, p1, p2, p3, p4);
}
} // anonymous namespace
//static
@ -536,6 +492,18 @@ void LLSingletonBase::logwarns(const char* p1, const char* p2, const char* p3, c
log(LLError::LEVEL_WARN, p1, p2, p3, p4);
}
//static
void LLSingletonBase::loginfos(const char* p1, const char* p2, const char* p3, const char* p4)
{
log(LLError::LEVEL_INFO, p1, p2, p3, p4);
}
//static
void LLSingletonBase::logdebugs(const char* p1, const char* p2, const char* p3, const char* p4)
{
log(LLError::LEVEL_DEBUG, p1, p2, p3, p4);
}
//static
void LLSingletonBase::logerrs(const char* p1, const char* p2, const char* p3, const char* p4)
{

View File

@ -30,18 +30,10 @@
#include <list>
#include <vector>
#include <typeinfo>
#if LL_WINDOWS
#pragma warning (push)
#pragma warning (disable:4265)
#endif
// warning C4265: 'std::_Pad' : class has virtual functions, but destructor is not virtual
#include <mutex>
#if LL_WINDOWS
#pragma warning (pop)
#endif
#include "mutex.h"
#include "lockstatic.h"
#include "llthread.h" // on_main_thread()
#include "llmainthreadtask.h"
class LLSingletonBase: private boost::noncopyable
{
@ -58,7 +50,6 @@ private:
typedef std::vector<LLSingletonBase*> vec_t;
static vec_t dep_sort();
bool mCleaned; // cleanupSingleton() has been called
// we directly depend on these other LLSingletons
typedef boost::unordered_set<LLSingletonBase*> set_t;
set_t mDepends;
@ -67,8 +58,8 @@ protected:
typedef enum e_init_state
{
UNINITIALIZED = 0, // must be default-initialized state
QUEUED, // construction queued, not yet executing
CONSTRUCTING, // within DERIVED_TYPE constructor
CONSTRUCTED, // finished DERIVED_TYPE constructor
INITIALIZING, // within DERIVED_TYPE::initSingleton()
INITIALIZED, // normal case
DELETED // deleteSingleton() or deleteAll() called
@ -117,15 +108,20 @@ protected:
protected:
// If a given call to B::getInstance() happens during either A::A() or
// A::initSingleton(), record that A directly depends on B.
void capture_dependency(EInitState);
void capture_dependency();
// delegate LL_ERRS() logging to llsingleton.cpp
// delegate logging calls to llsingleton.cpp
static void logerrs(const char* p1, const char* p2="",
const char* p3="", const char* p4="");
// delegate LL_WARNS() logging to llsingleton.cpp
static void logwarns(const char* p1, const char* p2="",
const char* p3="", const char* p4="");
static void loginfos(const char* p1, const char* p2="",
const char* p3="", const char* p4="");
static void logdebugs(const char* p1, const char* p2="",
const char* p3="", const char* p4="");
static std::string demangle(const char* mangled);
// these classname() declarations restate template functions declared in
// llerror.h because we avoid #including that here
template <typename T>
static std::string classname() { return demangle(typeid(T).name()); }
template <typename T>
@ -135,6 +131,9 @@ protected:
virtual void initSingleton() {}
virtual void cleanupSingleton() {}
// internal wrapper around calls to cleanupSingleton()
void cleanup_();
// deleteSingleton() isn't -- and shouldn't be -- a virtual method. It's a
// class static. However, given only Foo*, deleteAll() does need to be
// able to reach Foo::deleteSingleton(). Make LLSingleton (which declares
@ -144,32 +143,15 @@ protected:
public:
/**
* Call this to call the cleanupSingleton() method for every LLSingleton
* constructed since the start of the last cleanupAll() call. (Any
* LLSingleton constructed DURING a cleanupAll() call won't be cleaned up
* until the next cleanupAll() call.) cleanupSingleton() neither deletes
* nor destroys its LLSingleton; therefore it's safe to include logic that
* might take significant realtime or even throw an exception.
*
* The most important property of cleanupAll() is that cleanupSingleton()
* methods are called in dependency order, leaf classes last. Thus, given
* two LLSingleton subclasses A and B, if A's dependency on B is properly
* expressed as a B::getInstance() or B::instance() call during either
* A::A() or A::initSingleton(), B will be cleaned up after A.
*
* If a cleanupSingleton() method throws an exception, the exception is
* logged, but cleanupAll() attempts to continue calling the rest of the
* cleanupSingleton() methods.
*/
static void cleanupAll();
/**
* Call this to call the deleteSingleton() method for every LLSingleton
* constructed since the start of the last deleteAll() call. (Any
* LLSingleton constructed DURING a deleteAll() call won't be cleaned up
* until the next deleteAll() call.) deleteSingleton() deletes and
* destroys its LLSingleton. Any cleanup logic that might take significant
* realtime -- or throw an exception -- must not be placed in your
* LLSingleton's destructor, but rather in its cleanupSingleton() method.
* deleteAll() calls the cleanupSingleton() and deleteSingleton() methods
* for every LLSingleton constructed since the start of the last
* deleteAll() call. (Any LLSingleton constructed DURING a deleteAll()
* call won't be cleaned up until the next deleteAll() call.)
* deleteSingleton() deletes and destroys its LLSingleton. Any cleanup
* logic that might take significant realtime -- or throw an exception --
* must not be placed in your LLSingleton's destructor, but rather in its
* cleanupSingleton() method, which is called implicitly by
* deleteSingleton().
*
* The most important property of deleteAll() is that deleteSingleton()
* methods are called in dependency order, leaf classes last. Thus, given
@ -177,9 +159,9 @@ public:
* expressed as a B::getInstance() or B::instance() call during either
* A::A() or A::initSingleton(), B will be cleaned up after A.
*
* If a deleteSingleton() method throws an exception, the exception is
* logged, but deleteAll() attempts to continue calling the rest of the
* deleteSingleton() methods.
* If a cleanupSingleton() or deleteSingleton() method throws an
* exception, the exception is logged, but deleteAll() attempts to
* continue calling the rest of the deleteSingleton() methods.
*/
static void deleteAll();
};
@ -205,9 +187,9 @@ struct LLSingleton_manage_master
{
return LLSingletonBase::get_initializing_size();
}
void capture_dependency(LLSingletonBase* sb, LLSingletonBase::EInitState state)
void capture_dependency(LLSingletonBase* sb)
{
sb->capture_dependency(state);
sb->capture_dependency();
}
};
@ -222,14 +204,13 @@ struct LLSingleton_manage_master<LLSingletonBase::MasterList>
// since we never pushed, no need to clean up
void reset_initializing(LLSingletonBase::list_t::size_type size) {}
LLSingletonBase::list_t::size_type get_initializing_size() { return 0; }
void capture_dependency(LLSingletonBase*, LLSingletonBase::EInitState) {}
void capture_dependency(LLSingletonBase*) {}
};
// Now we can implement LLSingletonBase's template constructor.
template <typename DERIVED_TYPE>
LLSingletonBase::LLSingletonBase(tag<DERIVED_TYPE>):
mCleaned(false),
mDeleteSingleton(NULL)
mDeleteSingleton(nullptr)
{
// This is the earliest possible point at which we can push this new
// instance onto the init stack. LLSingleton::constructSingleton() can't
@ -271,10 +252,19 @@ class LLParamSingleton;
* leading back to yours, move the instance reference from your constructor to
* your initSingleton() method.
*
* If you override LLSingleton<T>::cleanupSingleton(), your method will be
* called if someone calls LLSingletonBase::cleanupAll(). The significant part
* of this promise is that cleanupAll() will call individual
* cleanupSingleton() methods in reverse dependency order.
* If you override LLSingleton<T>::cleanupSingleton(), your method will
* implicitly be called by LLSingleton<T>::deleteSingleton() just before the
* instance is destroyed. We introduce a special cleanupSingleton() method
* because cleanupSingleton() operations can involve nontrivial realtime, or
* throw an exception. A destructor should do neither!
*
* If your cleanupSingleton() method throws an exception, we log that
* exception but carry on.
*
* If at some point you call LLSingletonBase::deleteAll(), all remaining
* LLSingleton<T> instances will be destroyed in reverse dependency order. (Or
* call MySubclass::deleteSingleton() to specifically destroy the canonical
* MySubclass instance.)
*
* That is, consider LLSingleton subclasses C, B and A. A depends on B, which
* in turn depends on C. These dependencies are expressed as calls to
@ -282,31 +272,34 @@ class LLParamSingleton;
* It shouldn't matter whether these calls appear in A::A() or
* A::initSingleton(), likewise B::B() or B::initSingleton().
*
* We promise that if you later call LLSingletonBase::cleanupAll():
* 1. A::cleanupSingleton() will be called before
* 2. B::cleanupSingleton(), which will be called before
* 3. C::cleanupSingleton().
* We promise that if you later call LLSingletonBase::deleteAll():
* 1. A::deleteSingleton() will be called before
* 2. B::deleteSingleton(), which will be called before
* 3. C::deleteSingleton().
* Put differently, if your LLSingleton subclass constructor or
* initSingleton() method explicitly depends on some other LLSingleton
* subclass, you may continue to rely on that other subclass in your
* cleanupSingleton() method.
*
* We introduce a special cleanupSingleton() method because cleanupSingleton()
* operations can involve nontrivial realtime, or might throw an exception. A
* destructor should do neither!
*
* If your cleanupSingleton() method throws an exception, we log that
* exception but proceed with the remaining cleanupSingleton() calls.
*
* Similarly, if at some point you call LLSingletonBase::deleteAll(), all
* remaining LLSingleton instances will be destroyed in dependency order. (Or
* call MySubclass::deleteSingleton() to specifically destroy the canonical
* MySubclass instance.)
*/
template <typename DERIVED_TYPE>
class LLSingleton : public LLSingletonBase
{
private:
// LLSingleton<DERIVED_TYPE> must have a distinct instance of
// SingletonData for every distinct DERIVED_TYPE. It's tempting to
// consider hoisting SingletonData up into LLSingletonBase. Don't do it.
struct SingletonData
{
// Use a recursive_mutex in case of constructor circularity. With a
// non-recursive mutex, that would result in deadlock.
typedef std::recursive_mutex mutex_t;
mutex_t mMutex; // LockStatic looks for mMutex
EInitState mInitState{UNINITIALIZED};
DERIVED_TYPE* mInstance{nullptr};
};
typedef llthread::LockStatic<SingletonData> LockStatic;
// Allow LLParamSingleton subclass -- but NOT DERIVED_TYPE itself -- to
// access our private members.
friend class LLParamSingleton<DERIVED_TYPE>;
@ -356,17 +349,17 @@ private:
// purpose for its subclass LLParamSingleton is to support Singletons
// requiring constructor arguments. constructSingleton() supports both use
// cases.
// Accepting LockStatic& requires that the caller has already locked our
// static data before calling.
template <typename... Args>
static void constructSingleton(Args&&... args)
static void constructSingleton(LockStatic& lk, Args&&... args)
{
auto prev_size = LLSingleton_manage_master<DERIVED_TYPE>().get_initializing_size();
// getInstance() calls are from within constructor
sData.mInitState = CONSTRUCTING;
// Any getInstance() calls after this point are from within constructor
lk->mInitState = CONSTRUCTING;
try
{
sData.mInstance = new DERIVED_TYPE(std::forward<Args>(args)...);
// we have called constructor, have not yet called initSingleton()
sData.mInitState = CONSTRUCTED;
lk->mInstance = new DERIVED_TYPE(std::forward<Args>(args)...);
}
catch (const std::exception& err)
{
@ -380,58 +373,56 @@ private:
// There isn't a separate EInitState value meaning "we attempted
// to construct this LLSingleton subclass but could not," so use
// DELETED. That seems slightly more appropriate than UNINITIALIZED.
sData.mInitState = DELETED;
lk->mInitState = DELETED;
// propagate the exception
throw;
}
}
static void finishInitializing()
{
// getInstance() calls are from within initSingleton()
sData.mInitState = INITIALIZING;
// Any getInstance() calls after this point are from within initSingleton()
lk->mInitState = INITIALIZING;
try
{
// initialize singleton after constructing it so that it can
// reference other singletons which in turn depend on it, thus
// breaking cyclic dependencies
sData.mInstance->initSingleton();
sData.mInitState = INITIALIZED;
lk->mInstance->initSingleton();
lk->mInitState = INITIALIZED;
// pop this off stack of initializing singletons
pop_initializing();
pop_initializing(lk->mInstance);
}
catch (const std::exception& err)
{
// pop this off stack of initializing singletons here, too --
// BEFORE logging, so log-machinery LLSingletons don't record a
// dependency on DERIVED_TYPE!
pop_initializing();
pop_initializing(lk->mInstance);
logwarns("Error in ", classname<DERIVED_TYPE>().c_str(),
"::initSingleton(): ", err.what());
// and get rid of the instance entirely
// Get rid of the instance entirely. This call depends on our
// recursive_mutex. We could have a deleteSingleton(LockStatic&)
// overload and pass lk, but we don't strictly need it.
deleteSingleton();
// propagate the exception
throw;
}
}
static void pop_initializing()
static void pop_initializing(LLSingletonBase* sb)
{
// route through LLSingleton_manage_master so we Do The Right Thing
// (namely, nothing) for MasterList
LLSingleton_manage_master<DERIVED_TYPE>().pop_initializing(sData.mInstance);
LLSingleton_manage_master<DERIVED_TYPE>().pop_initializing(sb);
}
static void capture_dependency()
static void capture_dependency(LLSingletonBase* sb)
{
// By this point, if DERIVED_TYPE was pushed onto the initializing
// stack, it has been popped off. So the top of that stack, if any, is
// an LLSingleton that directly depends on DERIVED_TYPE. If
// getInstance() was called by another LLSingleton, rather than from
// vanilla application code, record the dependency.
LLSingleton_manage_master<DERIVED_TYPE>().capture_dependency(
sData.mInstance, sData.mInitState);
LLSingleton_manage_master<DERIVED_TYPE>().capture_dependency(sb);
}
// We know of no way to instruct the compiler that every subclass
@ -461,94 +452,173 @@ protected:
protected:
virtual ~LLSingleton()
{
// In case racing threads call getInstance() at the same moment as
// this destructor, serialize the calls.
Locker lk;
// This phase of cleanup is performed in the destructor rather than in
// deleteSingleton() to defend against manual deletion. When we moved
// cleanup to deleteSingleton(), we hit crashes due to dangling
// pointers in the MasterList.
LockStatic lk;
lk->mInstance = nullptr;
lk->mInitState = DELETED;
// remove this instance from the master list
// Remove this instance from the master list.
LLSingleton_manage_master<DERIVED_TYPE>().remove(this);
sData.mInstance = NULL;
sData.mInitState = DELETED;
}
public:
/**
* @brief Immediately delete the singleton.
* @brief Cleanup and destroy the singleton instance.
*
* A subsequent call to LLProxy::getInstance() will construct a new
* deleteSingleton() calls this instance's cleanupSingleton() method and
* then destroys the instance.
*
* A subsequent call to LLSingleton<T>::getInstance() will construct a new
* instance of the class.
*
* Without an explicit call to LLSingletonBase::deleteAll(), LLSingletons
* are implicitly destroyed after main() has exited and the C++ runtime is
* cleaning up statically-constructed objects. Some classes derived from
* LLSingleton have objects that are part of a runtime system that is
* terminated before main() exits. Calling the destructor of those objects
* after the termination of their respective systems can cause crashes and
* other problems during termination of the project. Using this method to
* destroy the singleton early can prevent these crashes.
*
* An example where this is needed is for a LLSingleton that has an APR
* object as a member that makes APR calls on destruction. The APR system is
* shut down explicitly before main() exits. This causes a crash on exit.
* Using this method before the call to apr_terminate() and NOT calling
* getInstance() again will prevent the crash.
* Without an explicit call to LLSingletonBase::deleteAll(), or
* LLSingleton<T>::deleteSingleton(), LLSingleton instances are simply
* leaked. (Allowing implicit destruction at shutdown caused too many
* problems.)
*/
static void deleteSingleton()
{
delete sData.mInstance;
// SingletonData state handled by destructor, above
// Hold the lock while we call cleanupSingleton() and the destructor.
// Our destructor also instantiates LockStatic, requiring a recursive
// mutex.
LockStatic lk;
// of course, only cleanup and delete if there's something there
if (lk->mInstance)
{
lk->mInstance->cleanup_();
delete lk->mInstance;
// destructor clears mInstance (and mInitState)
}
}
static DERIVED_TYPE* getInstance()
{
// In case racing threads call getInstance() at the same moment,
// serialize the calls.
Locker lk;
// We know the viewer has LLSingleton dependency circularities. If you
// feel strongly motivated to eliminate them, cheers and good luck.
// (At that point we could consider a much simpler locking mechanism.)
switch (sData.mInitState)
{
case CONSTRUCTING:
// here if DERIVED_TYPE's constructor (directly or indirectly)
// calls DERIVED_TYPE::getInstance()
logerrs("Tried to access singleton ",
classname<DERIVED_TYPE>().c_str(),
" from singleton constructor!");
return NULL;
// If A and B depend on each other, and thread T1 requests A at the
// same moment thread T2 requests B, you could get a sequence like this:
// - T1 locks A
// - T2 locks B
// - T1, having constructed A, calls A::initSingleton(), which calls
// B::getInstance() and blocks on B's lock
// - T2, having constructed B, calls B::initSingleton(), which calls
// A::getInstance() and blocks on A's lock
// In other words, classic deadlock.
case UNINITIALIZED:
constructSingleton();
// fall through...
// Avoid that by constructing and initializing every LLSingleton on
// the main thread. In that scenario:
// - T1 locks A
// - T2 locks B
// - T1 discovers A is UNINITIALIZED, so it queues a task for the main
// thread, unlocks A and blocks on the std::future.
// - T2 discovers B is UNINITIALIZED, so it queues a task for the main
// thread, unlocks B and blocks on the std::future.
// - The main thread executes T1's request for A. It locks A and
// starts to construct it.
// - A::initSingleton() calls B::getInstance(). Fine: nobody's holding
// B's lock.
// - The main thread locks B, constructs B, calls B::initSingleton(),
// which calls A::getInstance(), which returns A.
// - B::getInstance() returns B to A::initSingleton(), unlocking B.
// - A::getInstance() returns A to the task wrapper, unlocking A.
// - The task wrapper passes A to T1 via the future. T1 resumes.
// - The main thread executes T2's request for B. Oh look, B already
// exists. The task wrapper passes B to T2 via the future. T2
// resumes.
// This still works even if one of T1 or T2 *is* the main thread.
// This still works even if thread T3 requests B at the same moment as
// T2. Finding B still UNINITIALIZED, T3 also queues a task for the
// main thread, unlocks B and blocks on a (distinct) std::future. By
// the time the main thread executes T3's request for B, B already
// exists, and is simply delivered via the future.
case CONSTRUCTED:
// still have to call initSingleton()
finishInitializing();
break;
{ // nested scope for 'lk'
// In case racing threads call getInstance() at the same moment,
// serialize the calls.
LockStatic lk;
case INITIALIZING:
// here if DERIVED_TYPE::initSingleton() (directly or indirectly)
// calls DERIVED_TYPE::getInstance(): go ahead and allow it
case INITIALIZED:
// normal subsequent calls
break;
switch (lk->mInitState)
{
case CONSTRUCTING:
// here if DERIVED_TYPE's constructor (directly or indirectly)
// calls DERIVED_TYPE::getInstance()
logerrs("Tried to access singleton ",
classname<DERIVED_TYPE>().c_str(),
" from singleton constructor!");
return nullptr;
case DELETED:
// called after deleteSingleton()
logwarns("Trying to access deleted singleton ",
classname<DERIVED_TYPE>().c_str(),
" -- creating new instance");
constructSingleton();
finishInitializing();
break;
}
case INITIALIZING:
// here if DERIVED_TYPE::initSingleton() (directly or indirectly)
// calls DERIVED_TYPE::getInstance(): go ahead and allow it
case INITIALIZED:
// normal subsequent calls
// record the dependency, if any: check if we got here from another
// LLSingleton's constructor or initSingleton() method
capture_dependency(lk->mInstance);
return lk->mInstance;
// record the dependency, if any: check if we got here from another
// LLSingleton's constructor or initSingleton() method
capture_dependency();
return sData.mInstance;
case DELETED:
// called after deleteSingleton()
logwarns("Trying to access deleted singleton ",
classname<DERIVED_TYPE>().c_str(),
" -- creating new instance");
// fall through
case UNINITIALIZED:
case QUEUED:
// QUEUED means some secondary thread has already requested an
// instance, but for present purposes that's semantically
// identical to UNINITIALIZED: either way, we must ourselves
// request an instance.
break;
}
// Here we need to construct a new instance.
if (on_main_thread())
{
// On the main thread, directly construct the instance while
// holding the lock.
constructSingleton(lk);
capture_dependency(lk->mInstance);
return lk->mInstance;
}
// Here we need to construct a new instance, but we're on a secondary
// thread.
lk->mInitState = QUEUED;
} // unlock 'lk'
// Per the comment block above, dispatch to the main thread.
loginfos(classname<DERIVED_TYPE>().c_str(),
"::getInstance() dispatching to main thread");
auto instance = LLMainThreadTask::dispatch(
[](){
// VERY IMPORTANT to call getInstance() on the main thread,
// rather than going straight to constructSingleton()!
// During the time window before mInitState is INITIALIZED,
// multiple requests might be queued. It's essential that, as
// the main thread processes them, only the FIRST such request
// actually constructs the instance -- every subsequent one
// simply returns the existing instance.
loginfos(classname<DERIVED_TYPE>().c_str(),
"::getInstance() on main thread");
return getInstance();
});
// record the dependency chain tracked on THIS thread, not the main
// thread (consider a getInstance() overload with a tag param that
// suppresses dep tracking when dispatched to the main thread)
capture_dependency(instance);
loginfos(classname<DERIVED_TYPE>().c_str(),
"::getInstance() returning on requesting thread");
return instance;
}
// Reference version of getInstance()
// Preferred over getInstance() as it disallows checking for NULL
// Preferred over getInstance() as it disallows checking for nullptr
static DERIVED_TYPE& instance()
{
return *getInstance();
@ -559,8 +629,8 @@ public:
static bool instanceExists()
{
// defend any access to sData from racing threads
Locker lk;
return sData.mInitState == INITIALIZED;
LockStatic lk;
return lk->mInitState == INITIALIZED;
}
// Has this singleton been deleted? This can be useful during shutdown
@ -569,24 +639,11 @@ public:
static bool wasDeleted()
{
// defend any access to sData from racing threads
Locker lk;
return sData.mInitState == DELETED;
LockStatic lk;
return lk->mInitState == DELETED;
}
private:
struct SingletonData
{
// explicitly has a default constructor so that member variables are zero initialized in BSS
// and only changed by singleton logic, not constructor running during startup
EInitState mInitState;
DERIVED_TYPE* mInstance;
};
static SingletonData sData;
};
template<typename T>
typename LLSingleton<T>::SingletonData LLSingleton<T>::sData;
/**
* LLParamSingleton<T> is like LLSingleton<T>, except in the following ways:
@ -611,44 +668,86 @@ class LLParamSingleton : public LLSingleton<DERIVED_TYPE>
{
private:
typedef LLSingleton<DERIVED_TYPE> super;
using typename super::Locker;
using typename super::LockStatic;
// Passes arguments to DERIVED_TYPE's constructor and sets appropriate
// states, returning a pointer to the new instance.
template <typename... Args>
static DERIVED_TYPE* initParamSingleton_(Args&&... args)
{
// In case racing threads both call initParamSingleton() at the same
// time, serialize them. One should initialize; the other should see
// mInitState already set.
LockStatic lk;
// For organizational purposes this function shouldn't be called twice
if (lk->mInitState != super::UNINITIALIZED)
{
super::logerrs("Tried to initialize singleton ",
super::template classname<DERIVED_TYPE>().c_str(),
" twice!");
return nullptr;
}
else if (on_main_thread())
{
// on the main thread, simply construct instance while holding lock
super::logdebugs(super::template classname<DERIVED_TYPE>().c_str(),
"::initParamSingleton()");
super::constructSingleton(lk, std::forward<Args>(args)...);
return lk->mInstance;
}
else
{
// on secondary thread, dispatch to main thread --
// set state so we catch any other calls before the main thread
// picks up the task
lk->mInitState = super::QUEUED;
// very important to unlock here so main thread can actually process
lk.unlock();
super::loginfos(super::template classname<DERIVED_TYPE>().c_str(),
"::initParamSingleton() dispatching to main thread");
// Normally it would be the height of folly to reference-bind
// 'args' into a lambda to be executed on some other thread! By
// the time that thread executed the lambda, the references would
// all be dangling, and Bad Things would result. But
// LLMainThreadTask::dispatch() promises to block until the passed
// task has completed. So in this case we know the references will
// remain valid until the lambda has run, so we dare to bind
// references.
auto instance = LLMainThreadTask::dispatch(
[&](){
super::loginfos(super::template classname<DERIVED_TYPE>().c_str(),
"::initParamSingleton() on main thread");
return initParamSingleton_(std::forward<Args>(args)...);
});
super::loginfos(super::template classname<DERIVED_TYPE>().c_str(),
"::initParamSingleton() returning on requesting thread");
return instance;
}
}
public:
using super::deleteSingleton;
using super::instanceExists;
using super::wasDeleted;
// Passes arguments to DERIVED_TYPE's constructor and sets appropriate states
/// initParamSingleton() constructs the instance, returning a reference.
/// Pass whatever arguments are required to construct DERIVED_TYPE.
template <typename... Args>
static void initParamSingleton(Args&&... args)
static DERIVED_TYPE& initParamSingleton(Args&&... args)
{
// In case racing threads both call initParamSingleton() at the same
// time, serialize them. One should initialize; the other should see
// mInitState already set.
Locker lk;
// For organizational purposes this function shouldn't be called twice
if (super::sData.mInitState != super::UNINITIALIZED)
{
super::logerrs("Tried to initialize singleton ",
super::template classname<DERIVED_TYPE>().c_str(),
" twice!");
}
else
{
super::constructSingleton(std::forward<Args>(args)...);
super::finishInitializing();
}
return *initParamSingleton_(std::forward<Args>(args)...);
}
static DERIVED_TYPE* getInstance()
{
// In case racing threads call getInstance() at the same moment as
// initParamSingleton(), serialize the calls.
Locker lk;
LockStatic lk;
switch (super::sData.mInitState)
switch (lk->mInitState)
{
case super::UNINITIALIZED:
case super::QUEUED:
super::logerrs("Uninitialized param singleton ",
super::template classname<DERIVED_TYPE>().c_str());
break;
@ -659,25 +758,13 @@ public:
" from singleton constructor!");
break;
case super::CONSTRUCTED:
// Should never happen!? The CONSTRUCTED state is specifically to
// navigate through LLSingleton::SingletonInitializer getting
// constructed (once) before LLSingleton::getInstance()'s switch
// on mInitState. But our initParamSingleton() method calls
// constructSingleton() and then calls finishInitializing(), which
// immediately sets INITIALIZING. Why are we here?
super::logerrs("Param singleton ",
super::template classname<DERIVED_TYPE>().c_str(),
"::initSingleton() not yet called");
break;
case super::INITIALIZING:
// As with LLSingleton, explicitly permit circular calls from
// within initSingleton()
case super::INITIALIZED:
// for any valid call, capture dependencies
super::capture_dependency();
return super::sData.mInstance;
super::capture_dependency(lk->mInstance);
return lk->mInstance;
case super::DELETED:
super::logerrs("Trying to access deleted param singleton ",
@ -721,9 +808,9 @@ public:
using super::instanceExists;
using super::wasDeleted;
static void construct()
static DT* construct()
{
super::initParamSingleton();
return super::initParamSingleton();
}
};

View File

@ -33,7 +33,10 @@
#include <sstream>
#include "llwin32headerslean.h"
#include "Dbghelp.h"
#pragma warning (push)
#pragma warning (disable:4091) // a microsoft header has warnings. Very nice.
#include <dbghelp.h>
#pragma warning (pop)
typedef USHORT NTAPI RtlCaptureStackBackTrace_Function(
IN ULONG frames_to_skip,

View File

@ -36,6 +36,10 @@
#include <set>
#include <typeinfo>
#ifdef LL_LINUX
// <ND> For strcmp
#include <string.h>
#endif
// Use to compare the first element only of a pair
// e.g. typedef std::set<std::pair<int, Data*>, compare_pair<int, Data*> > some_pair_set_t;
template <typename T1, typename T2>

View File

@ -729,22 +729,6 @@ std::string utf8str_removeCRLF(const std::string& utf8str)
}
#if LL_WINDOWS
// documentation moved to header. Phoenix 2007-11-27
namespace snprintf_hack
{
int snprintf(char *str, size_t size, const char *format, ...)
{
va_list args;
va_start(args, format);
int num_written = _vsnprintf(str, size, format, args); /* Flawfinder: ignore */
va_end(args);
str[size-1] = '\0'; // always null terminate
return num_written;
}
}
std::string ll_convert_wide_to_string(const wchar_t* in)
{
return ll_convert_wide_to_string(in, CP_UTF8);

View File

@ -715,32 +715,6 @@ LL_COMMON_API std::string utf8str_removeCRLF(const std::string& utf8str);
*/
//@{
/**
* @brief Implementation the expected snprintf interface.
*
* If the size of the passed in buffer is not large enough to hold the string,
* two bad things happen:
* 1. resulting formatted string is NOT null terminated
* 2. Depending on the platform, the return value could be a) the required
* size of the buffer to copy the entire formatted string or b) -1.
* On Windows with VS.Net 2003, it returns -1 e.g.
*
* safe_snprintf always adds a NULL terminator so that the caller does not
* need to check for return value or need to add the NULL terminator.
* It does not, however change the return value - to let the caller know
* that the passed in buffer size was not large enough to hold the
* formatted string.
*
*/
// Deal with the differeneces on Windows
namespace snprintf_hack
{
LL_COMMON_API int snprintf(char *str, size_t size, const char *format, ...);
}
using snprintf_hack::snprintf;
/**
* @brief Convert a wide string to std::string
*

View File

@ -0,0 +1,138 @@
/**
* @file lltempredirect.cpp
* @author Nat Goodspeed
* @date 2019-10-31
* @brief Implementation for lltempredirect.
*
* $LicenseInfo:firstyear=2019&license=viewerlgpl$
* Copyright (c) 2019, Linden Research, Inc.
* $/LicenseInfo$
*/
// Precompiled header
#include "linden_common.h"
// associated header
#include "lltempredirect.h"
// STL headers
// std headers
#if !LL_WINDOWS
# include <unistd.h>
#else
# include <io.h>
#endif // !LL_WINDOWS
// external library headers
// other Linden headers
/*****************************************************************************
* llfd
*****************************************************************************/
// We could restate the implementation of each of llfd::close(), etc., but
// this is way more succinct.
#if LL_WINDOWS
#define fhclose _close
#define fhdup _dup
#define fhdup2 _dup2
#define fhfdopen _fdopen
#define fhfileno _fileno
#else
#define fhclose ::close
#define fhdup ::dup
#define fhdup2 ::dup2
#define fhfdopen ::fdopen
#define fhfileno ::fileno
#endif
int llfd::close(int fd)
{
return fhclose(fd);
}
int llfd::dup(int target)
{
return fhdup(target);
}
int llfd::dup2(int target, int reference)
{
return fhdup2(target, reference);
}
FILE* llfd::open(int fd, const char* mode)
{
return fhfdopen(fd, mode);
}
int llfd::fileno(FILE* stream)
{
return fhfileno(stream);
}
/*****************************************************************************
* LLTempRedirect
*****************************************************************************/
LLTempRedirect::LLTempRedirect():
mOrigTarget(-1), // -1 is an invalid file descriptor
mReference(-1)
{}
LLTempRedirect::LLTempRedirect(FILE* target, FILE* reference):
LLTempRedirect((target? fhfileno(target) : -1),
(reference? fhfileno(reference) : -1))
{}
LLTempRedirect::LLTempRedirect(int target, int reference):
// capture a duplicate file descriptor for the file originally targeted by
// 'reference'
mOrigTarget((reference >= 0)? fhdup(reference) : -1),
mReference(reference)
{
if (target >= 0 && reference >= 0)
{
// As promised, force 'reference' to refer to 'target'. This first
// implicitly closes 'reference', which is why we first capture a
// duplicate so the original target file stays open.
fhdup2(target, reference);
}
}
LLTempRedirect::LLTempRedirect(LLTempRedirect&& other)
{
mOrigTarget = other.mOrigTarget;
mReference = other.mReference;
// other LLTempRedirect must be in moved-from state so its destructor
// won't repeat the same operations as ours!
other.mOrigTarget = -1;
other.mReference = -1;
}
LLTempRedirect::~LLTempRedirect()
{
reset();
}
void LLTempRedirect::reset()
{
// If this instance was default-constructed (or constructed with an
// invalid file descriptor), skip the following.
if (mOrigTarget >= 0)
{
// Restore mReference to point to mOrigTarget. This implicitly closes
// the duplicate created by our constructor of its 'target' file
// descriptor.
fhdup2(mOrigTarget, mReference);
// mOrigTarget has served its purpose
fhclose(mOrigTarget);
}
// assign these because reset() is also responsible for a "moved from"
// instance
mOrigTarget = -1;
mReference = -1;
}
LLTempRedirect& LLTempRedirect::operator=(LLTempRedirect&& other)
{
reset();
std::swap(mOrigTarget, other.mOrigTarget);
std::swap(mReference, other.mReference);
return *this;
}

View File

@ -0,0 +1,91 @@
/**
* @file lltempredirect.h
* @author Nat Goodspeed
* @date 2019-10-31
* @brief RAII low-level file-descriptor redirection
*
* $LicenseInfo:firstyear=2019&license=viewerlgpl$
* Copyright (c) 2019, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_LLTEMPREDIRECT_H)
#define LL_LLTEMPREDIRECT_H
// Functions in this namespace are intended to insulate the caller from the
// aggravating distinction between ::close() and Microsoft _close().
namespace llfd
{
int close(int fd);
int dup(int target);
int dup2(int target, int reference);
FILE* open(int fd, const char* mode);
int fileno(FILE* stream);
} // namespace llfd
/**
* LLTempRedirect is an RAII class that performs file redirection on low-level
* file descriptors, expressed as ints. (Use llfd::fileno() to obtain the file
* descriptor from a classic-C FILE*. There is no portable way to obtain the
* file descriptor from a std::fstream.)
*
* Instantiate LLTempRedirect with a target file descriptor (e.g. for some
* open file) and a reference file descriptor (e.g. for stderr). From that
* point until the LLTempRedirect instance is destroyed, all OS-level writes
* to the reference file descriptor will be redirected to the target file.
*
* Because dup2() is used for redirection, the original passed target file
* descriptor remains open. If you want LLTempRedirect's destructor to close
* the target file, close() the target file descriptor after passing it to
* LLTempRedirect's constructor.
*
* LLTempRedirect's constructor saves the original target of the reference
* file descriptor. Its destructor restores the reference file descriptor to
* point once again to its original target.
*/
class LLTempRedirect
{
public:
LLTempRedirect();
/**
* For the lifespan of this LLTempRedirect instance, all writes to
* 'reference' will be redirected to 'target'. When this LLTempRedirect is
* destroyed, the original target for 'reference' will be restored.
*
* Pass 'target' as NULL if you simply want to save and restore
* 'reference' against possible redirection in the meantime.
*/
LLTempRedirect(FILE* target, FILE* reference);
/**
* For the lifespan of this LLTempRedirect instance, all writes to
* 'reference' will be redirected to 'target'. When this LLTempRedirect is
* destroyed, the original target for 'reference' will be restored.
*
* Pass 'target' as -1 if you simply want to save and restore
* 'reference' against possible redirection in the meantime.
*/
LLTempRedirect(int target, int reference);
LLTempRedirect(const LLTempRedirect&) = delete;
LLTempRedirect(LLTempRedirect&& other);
~LLTempRedirect();
LLTempRedirect& operator=(const LLTempRedirect&) = delete;
LLTempRedirect& operator=(LLTempRedirect&& other);
/// returns (duplicate file descriptor for) the original target of the
/// 'reference' file descriptor passed to our constructor
int getOriginalTarget() const { return mOrigTarget; }
/// returns the original 'reference' file descriptor passed to our
/// constructor
int getReference() const { return mReference; }
private:
void reset();
int mOrigTarget, mReference;
};
#endif /* ! defined(LL_LLTEMPREDIRECT_H) */

View File

@ -92,26 +92,39 @@ void set_thread_name( DWORD dwThreadID, const char* threadName)
// }
//
//----------------------------------------------------------------------------
namespace
{
U32 LL_THREAD_LOCAL sThreadID = 0;
LLThread::id_t main_thread()
{
// Using a function-static variable to identify the main thread
// requires that control reach here from the main thread before it
// reaches here from any other thread. We simply trust that whichever
// thread gets here first is the main thread.
static LLThread::id_t s_thread_id = LLThread::currentID();
return s_thread_id;
}
U32 LLThread::sIDIter = 0;
} // anonymous namespace
LL_COMMON_API bool on_main_thread()
{
return (LLThread::currentID() == main_thread());
}
LL_COMMON_API void assert_main_thread()
{
static U32 s_thread_id = LLThread::currentID();
if (LLThread::currentID() != s_thread_id)
auto curr = LLThread::currentID();
auto main = main_thread();
if (curr != main)
{
LL_WARNS() << "Illegal execution from thread id " << (S32) LLThread::currentID()
<< " outside main thread " << (S32) s_thread_id << LL_ENDL;
LL_WARNS() << "Illegal execution from thread id " << curr
<< " outside main thread " << main << LL_ENDL;
}
}
void LLThread::registerThreadID()
{
sThreadID = ++sIDIter;
}
// this function has become moot
void LLThread::registerThreadID() {}
//
// Handed to the APR thread creation function
@ -122,11 +135,12 @@ void LLThread::threadRun()
set_thread_name(-1, mName.c_str());
#endif
// this is the first point at which we're actually running in the new thread
mID = currentID();
// for now, hard code all LLThreads to report to single master thread recorder, which is known to be running on main thread
mRecorder = new LLTrace::ThreadRecorder(*LLTrace::get_master_thread_recorder());
sThreadID = mID;
// Run the user supplied function
do
{
@ -168,8 +182,6 @@ LLThread::LLThread(const std::string& name, apr_pool_t *poolp) :
mStatus(STOPPED),
mRecorder(NULL)
{
mID = ++sIDIter;
mRunCondition = new LLCondition();
mDataLock = new LLMutex();
mLocalAPRFilePoolp = NULL ;
@ -347,9 +359,9 @@ void LLThread::setQuitting()
}
// static
U32 LLThread::currentID()
LLThread::id_t LLThread::currentID()
{
return sThreadID;
return std::this_thread::get_id();
}
// static
@ -376,6 +388,16 @@ void LLThread::wakeLocked()
}
}
void LLThread::lockData()
{
mDataLock->lock();
}
void LLThread::unlockData()
{
mDataLock->unlock();
}
//============================================================================
//----------------------------------------------------------------------------

View File

@ -30,12 +30,9 @@
#include "llapp.h"
#include "llapr.h"
#include "boost/intrusive_ptr.hpp"
#include "llmutex.h"
#include "llrefcount.h"
#include <thread>
LL_COMMON_API void assert_main_thread();
namespace LLTrace
{
class ThreadRecorder;
@ -45,7 +42,6 @@ class LL_COMMON_API LLThread
{
private:
friend class LLMutex;
static U32 sIDIter;
public:
typedef enum e_thread_status
@ -55,6 +51,7 @@ public:
QUITTING= 2, // Someone wants this thread to quit
CRASHED = -1 // An uncaught exception was thrown by the thread
} EThreadStatus;
typedef std::thread::id id_t;
LLThread(const std::string& name, apr_pool_t *poolp = NULL);
virtual ~LLThread(); // Warning! You almost NEVER want to destroy a thread unless it's in the STOPPED state.
@ -64,7 +61,7 @@ public:
bool isStopped() const { return (STOPPED == mStatus) || (CRASHED == mStatus); }
bool isCrashed() const { return (CRASHED == mStatus); }
static U32 currentID(); // Return ID of current thread
static id_t currentID(); // Return ID of current thread
static void yield(); // Static because it can be called by the main thread, which doesn't have an LLThread data structure.
public:
@ -88,7 +85,7 @@ public:
LLVolatileAPRPool* getLocalAPRFilePool() { return mLocalAPRFilePoolp ; }
U32 getID() const { return mID; }
id_t getID() const { return mID; }
// Called by threads *not* created via LLThread to register some
// internal state used by LLMutex. You must call this once early
@ -109,7 +106,7 @@ protected:
std::thread *mThreadp;
EThreadStatus mStatus;
U32 mID;
id_t mID;
LLTrace::ThreadRecorder* mRecorder;
//a local apr_pool for APRFile operations in this thread. If it exists, LLAPRFile::sAPRFilePoolp should not be used.
@ -126,8 +123,8 @@ protected:
virtual bool runCondition(void);
// Lock/Unlock Run Condition -- use around modification of any variable used in runCondition()
inline void lockData();
inline void unlockData();
void lockData();
void unlockData();
// This is the predicate that decides whether the thread should sleep.
// It should only be called with mDataLock locked, since the virtual runCondition() function may need to access
@ -142,17 +139,6 @@ protected:
};
void LLThread::lockData()
{
mDataLock->lock();
}
void LLThread::unlockData()
{
mDataLock->unlock();
}
//============================================================================
// Simple responder for self destructing callbacks
@ -168,5 +154,6 @@ public:
//============================================================================
extern LL_COMMON_API void assert_main_thread();
extern LL_COMMON_API bool on_main_thread();
#endif // LL_LLTHREAD_H

View File

@ -93,11 +93,9 @@ void LLThreadLocalPointerBase::initAllThreadLocalStorage()
{
if (!sInitialized)
{
for (LLInstanceTracker<LLThreadLocalPointerBase>::instance_iter it = beginInstances(), end_it = endInstances();
it != end_it;
++it)
for (auto& base : instance_snapshot())
{
(*it).initStorage();
base.initStorage();
}
sInitialized = true;
}
@ -108,11 +106,9 @@ void LLThreadLocalPointerBase::destroyAllThreadLocalStorage()
{
if (sInitialized)
{
//for (LLInstanceTracker<LLThreadLocalPointerBase>::instance_iter it = beginInstances(), end_it = endInstances();
// it != end_it;
// ++it)
//for (auto& base : instance_snapshot())
//{
// (*it).destroyStorage();
// base.destroyStorage();
//}
sInitialized = false;
}

View File

@ -23,7 +23,7 @@
* $/LicenseInfo$
*/
//#include "linden_common.h"
//#include "llthreadsafequeue.h"
#include "linden_common.h"
#include "llthreadsafequeue.h"

View File

@ -30,18 +30,12 @@
#include "llexception.h"
#include <deque>
#include <string>
#if LL_WINDOWS
#pragma warning (push)
#pragma warning (disable:4265)
#endif
// 'std::_Pad' : class has virtual functions, but destructor is not virtual
#include <mutex>
#include <condition_variable>
#if LL_WINDOWS
#pragma warning (pop)
#endif
#include <chrono>
#include "mutex.h"
#include "llcoros.h"
#include LLCOROS_MUTEX_HEADER
#include <boost/fiber/timed_mutex.hpp>
#include LLCOROS_CONDVAR_HEADER
//
// A general queue exception.
@ -88,18 +82,28 @@ public:
// Add an element to the front of queue (will block if the queue has
// reached capacity).
//
// This call will raise an interrupt error if the queue is deleted while
// This call will raise an interrupt error if the queue is closed while
// the caller is blocked.
void pushFront(ElementT const & element);
// Try to add an element to the front ofqueue without blocking. Returns
// Try to add an element to the front of queue without blocking. Returns
// true only if the element was actually added.
bool tryPushFront(ElementT const & element);
// Try to add an element to the front of queue, blocking if full but with
// timeout. Returns true if the element was added.
// There are potentially two different timeouts involved: how long to try
// to lock the mutex, versus how long to wait for the queue to stop being
// full. Careful settings for each timeout might be orders of magnitude
// apart. However, this method conflates them.
template <typename Rep, typename Period>
bool tryPushFrontFor(const std::chrono::duration<Rep, Period>& timeout,
ElementT const & element);
// Pop the element at the end of the queue (will block if the queue is
// empty).
//
// This call will raise an interrupt error if the queue is deleted while
// This call will raise an interrupt error if the queue is closed while
// the caller is blocked.
ElementT popBack(void);
@ -110,13 +114,29 @@ public:
// Returns the size of the queue.
size_t size();
// closes the queue:
// - every subsequent pushFront() call will throw LLThreadSafeQueueInterrupt
// - every subsequent tryPushFront() call will return false
// - popBack() calls will return normally until the queue is drained, then
// every subsequent popBack() will throw LLThreadSafeQueueInterrupt
// - tryPopBack() calls will return normally until the queue is drained,
// then every subsequent tryPopBack() call will return false
void close();
// detect closed state
bool isClosed();
// inverse of isClosed()
explicit operator bool();
private:
std::deque< ElementT > mStorage;
U32 mCapacity;
bool mClosed;
std::mutex mLock;
std::condition_variable mCapacityCond;
std::condition_variable mEmptyCond;
boost::fibers::timed_mutex mLock;
typedef std::unique_lock<decltype(mLock)> lock_t;
boost::fibers::condition_variable_any mCapacityCond;
boost::fibers::condition_variable_any mEmptyCond;
};
// LLThreadSafeQueue
@ -124,7 +144,8 @@ private:
template<typename ElementT>
LLThreadSafeQueue<ElementT>::LLThreadSafeQueue(U32 capacity) :
mCapacity(capacity)
mCapacity(capacity),
mClosed(false)
{
}
@ -132,13 +153,18 @@ mCapacity(capacity)
template<typename ElementT>
void LLThreadSafeQueue<ElementT>::pushFront(ElementT const & element)
{
lock_t lock1(mLock);
while (true)
{
std::unique_lock<std::mutex> lock1(mLock);
if (mClosed)
{
LLTHROW(LLThreadSafeQueueInterrupt());
}
if (mStorage.size() < mCapacity)
{
mStorage.push_front(element);
lock1.unlock();
mEmptyCond.notify_one();
return;
}
@ -149,17 +175,61 @@ void LLThreadSafeQueue<ElementT>::pushFront(ElementT const & element)
}
template <typename ElementT>
template <typename Rep, typename Period>
bool LLThreadSafeQueue<ElementT>::tryPushFrontFor(const std::chrono::duration<Rep, Period>& timeout,
ElementT const & element)
{
// Convert duration to time_point: passing the same timeout duration to
// each of multiple calls is wrong.
auto endpoint = std::chrono::steady_clock::now() + timeout;
lock_t lock1(mLock, std::defer_lock);
if (!lock1.try_lock_until(endpoint))
return false;
while (true)
{
if (mClosed)
{
return false;
}
if (mStorage.size() < mCapacity)
{
mStorage.push_front(element);
lock1.unlock();
mEmptyCond.notify_one();
return true;
}
// Storage Full. Wait for signal.
if (LLCoros::cv_status::timeout == mCapacityCond.wait_until(lock1, endpoint))
{
// timed out -- formally we might recheck both conditions above
return false;
}
// If we didn't time out, we were notified for some reason. Loop back
// to check.
}
}
template<typename ElementT>
bool LLThreadSafeQueue<ElementT>::tryPushFront(ElementT const & element)
{
std::unique_lock<std::mutex> lock1(mLock, std::defer_lock);
lock_t lock1(mLock, std::defer_lock);
if (!lock1.try_lock())
return false;
if (mClosed)
return false;
if (mStorage.size() >= mCapacity)
return false;
mStorage.push_front(element);
lock1.unlock();
mEmptyCond.notify_one();
return true;
}
@ -168,18 +238,23 @@ bool LLThreadSafeQueue<ElementT>::tryPushFront(ElementT const & element)
template<typename ElementT>
ElementT LLThreadSafeQueue<ElementT>::popBack(void)
{
lock_t lock1(mLock);
while (true)
{
std::unique_lock<std::mutex> lock1(mLock);
if (!mStorage.empty())
{
ElementT value = mStorage.back();
mStorage.pop_back();
lock1.unlock();
mCapacityCond.notify_one();
return value;
}
if (mClosed)
{
LLTHROW(LLThreadSafeQueueInterrupt());
}
// Storage empty. Wait for signal.
mEmptyCond.wait(lock1);
}
@ -189,15 +264,18 @@ ElementT LLThreadSafeQueue<ElementT>::popBack(void)
template<typename ElementT>
bool LLThreadSafeQueue<ElementT>::tryPopBack(ElementT & element)
{
std::unique_lock<std::mutex> lock1(mLock, std::defer_lock);
lock_t lock1(mLock, std::defer_lock);
if (!lock1.try_lock())
return false;
// no need to check mClosed: tryPopBack() behavior when the queue is
// closed is implemented by simple inability to push any new elements
if (mStorage.empty())
return false;
element = mStorage.back();
mStorage.pop_back();
lock1.unlock();
mCapacityCond.notify_one();
return true;
}
@ -206,8 +284,34 @@ bool LLThreadSafeQueue<ElementT>::tryPopBack(ElementT & element)
template<typename ElementT>
size_t LLThreadSafeQueue<ElementT>::size(void)
{
std::lock_guard<std::mutex> lock(mLock);
lock_t lock(mLock);
return mStorage.size();
}
template<typename ElementT>
void LLThreadSafeQueue<ElementT>::close()
{
lock_t lock(mLock);
mClosed = true;
lock.unlock();
// wake up any blocked popBack() calls
mEmptyCond.notify_all();
// wake up any blocked pushFront() calls
mCapacityCond.notify_all();
}
template<typename ElementT>
bool LLThreadSafeQueue<ElementT>::isClosed()
{
lock_t lock(mLock);
return mClosed;
}
template<typename ElementT>
LLThreadSafeQueue<ElementT>::operator bool()
{
lock_t lock(mLock);
return ! mClosed;
}
#endif

View File

@ -57,7 +57,7 @@ class StatBase
{
public:
StatBase(const char* name, const char* description);
virtual ~StatBase() LLINSTANCETRACKER_DTOR_NOEXCEPT {}
virtual ~StatBase() {}
virtual const char* getUnitLabel() const;
const std::string& getName() const { return mName; }

View File

@ -291,8 +291,8 @@ void EventAccumulator::reset( const EventAccumulator* other )
{
mNumSamples = 0;
mSum = 0;
mMin = NaN;
mMax = NaN;
mMin = F32(NaN);
mMax = F32(NaN);
mMean = NaN;
mSumOfSquares = 0;
mLastValue = other ? other->mLastValue : NaN;

View File

@ -242,8 +242,8 @@ namespace LLTrace
EventAccumulator()
: mSum(0),
mMin(NaN),
mMax(NaN),
mMin(F32(NaN)),
mMax(F32(NaN)),
mMean(NaN),
mSumOfSquares(0),
mNumSamples(0),
@ -313,8 +313,8 @@ namespace LLTrace
SampleAccumulator()
: mSum(0),
mMin(NaN),
mMax(NaN),
mMin(F32(NaN)),
mMax(F32(NaN)),
mMean(NaN),
mSumOfSquares(0),
mLastSampleTimeStamp(0),

View File

@ -28,6 +28,7 @@
#include "lltracethreadrecorder.h"
#include "llfasttimer.h"
#include "lltrace.h"
#include "llstl.h"
namespace LLTrace
{
@ -64,16 +65,15 @@ void ThreadRecorder::init()
activate(&mThreadRecordingBuffers);
// initialize time block parent pointers
for (BlockTimerStatHandle::instance_tracker_t::instance_iter it = BlockTimerStatHandle::instance_tracker_t::beginInstances(), end_it = BlockTimerStatHandle::instance_tracker_t::endInstances();
it != end_it;
++it)
for (auto& base : BlockTimerStatHandle::instance_snapshot())
{
BlockTimerStatHandle& time_block = static_cast<BlockTimerStatHandle&>(*it);
TimeBlockTreeNode& tree_node = mTimeBlockTreeNodes[it->getIndex()];
// because of indirect derivation from LLInstanceTracker, have to downcast
BlockTimerStatHandle& time_block = static_cast<BlockTimerStatHandle&>(base);
TimeBlockTreeNode& tree_node = mTimeBlockTreeNodes[time_block.getIndex()];
tree_node.mBlock = &time_block;
tree_node.mParent = &root_time_block;
it->getCurrentAccumulator().mParent = &root_time_block;
time_block.getCurrentAccumulator().mParent = &root_time_block;
}
mRootTimer = new BlockTimer(root_time_block);

View File

@ -43,6 +43,7 @@
#include "llstring.h"
#include "lltimer.h"
#include "llthread.h"
#include "llmutex.h"
const LLUUID LLUUID::null;
const LLTransactionID LLTransactionID::tnull;
@ -739,7 +740,7 @@ void LLUUID::getCurrentTime(uuid_time_t *timestamp)
getSystemTime(&time_last);
uuids_this_tick = uuids_per_tick;
init = TRUE;
mMutex = new LLMutex();
mMutex = new LLMutex();
}
uuid_time_t time_now = {0,0};

View File

@ -34,6 +34,7 @@
#include "llqueuedthread.h"
#include "llatomic.h"
#include "llmutex.h"
#define USE_FRAME_CALLBACK_MANAGER 0

View File

@ -0,0 +1,73 @@
/**
* @file lockstatic.h
* @author Nat Goodspeed
* @date 2019-12-03
* @brief LockStatic class provides mutex-guarded access to the specified
* static data.
*
* $LicenseInfo:firstyear=2019&license=viewerlgpl$
* Copyright (c) 2019, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_LOCKSTATIC_H)
#define LL_LOCKSTATIC_H
#include "mutex.h" // std::unique_lock
namespace llthread
{
// Instantiate this template to obtain a pointer to the canonical static
// instance of Static while holding a lock on that instance. Use of
// Static::mMutex presumes that Static declares some suitable mMutex.
template <typename Static>
class LockStatic
{
typedef std::unique_lock<decltype(Static::mMutex)> lock_t;
public:
LockStatic():
mData(getStatic()),
mLock(mData->mMutex)
{}
Static* get() const { return mData; }
operator Static*() const { return get(); }
Static* operator->() const { return get(); }
// sometimes we must explicitly unlock...
void unlock()
{
// but once we do, access is no longer permitted
mData = nullptr;
mLock.unlock();
}
protected:
Static* mData;
lock_t mLock;
private:
Static* getStatic()
{
// Static::mMutex must be function-local static rather than class-
// static. Some of our consumers must function properly (therefore
// lock properly) even when the containing module's static variables
// have not yet been runtime-initialized. A mutex requires
// construction. A static class member might not yet have been
// constructed.
//
// We could store a dumb mutex_t*, notice when it's NULL and allocate a
// heap mutex -- but that's vulnerable to race conditions. And we can't
// defend the dumb pointer with another mutex.
//
// We could store a std::atomic<mutex_t*> -- but a default-constructed
// std::atomic<T> does not contain a valid T, even a default-constructed
// T! Which means std::atomic, too, requires runtime initialization.
//
// But a function-local static is guaranteed to be initialized exactly
// once: the first time control reaches that declaration.
static Static sData;
return &sData;
}
};
} // llthread namespace
#endif /* ! defined(LL_LOCKSTATIC_H) */

22
indra/llcommon/mutex.h Normal file
View File

@ -0,0 +1,22 @@
/**
* @file mutex.h
* @author Nat Goodspeed
* @date 2019-12-03
* @brief Wrap <mutex> in odious boilerplate
*
* $LicenseInfo:firstyear=2019&license=viewerlgpl$
* Copyright (c) 2019, Linden Research, Inc.
* $/LicenseInfo$
*/
#if LL_WINDOWS
#pragma warning (push)
#pragma warning (disable:4265)
#endif
// warning C4265: 'std::_Pad' : class has virtual functions, but destructor is not virtual
#include <mutex>
#if LL_WINDOWS
#pragma warning (pop)
#endif

View File

@ -0,0 +1,67 @@
/**
* @file llcond_test.cpp
* @author Nat Goodspeed
* @date 2019-07-18
* @brief Test for llcond.
*
* $LicenseInfo:firstyear=2019&license=viewerlgpl$
* Copyright (c) 2019, Linden Research, Inc.
* $/LicenseInfo$
*/
// Precompiled header
#include "linden_common.h"
// associated header
#include "llcond.h"
// STL headers
// std headers
// external library headers
// other Linden headers
#include "../test/lltut.h"
#include "llcoros.h"
/*****************************************************************************
* TUT
*****************************************************************************/
namespace tut
{
struct llcond_data
{
LLScalarCond<int> cond{0};
};
typedef test_group<llcond_data> llcond_group;
typedef llcond_group::object object;
llcond_group llcondgrp("llcond");
template<> template<>
void object::test<1>()
{
set_test_name("Immediate gratification");
cond.set_one(1);
ensure("wait_for_equal() failed",
cond.wait_for_equal(F32Milliseconds(1), 1));
ensure("wait_for_unequal() should have failed",
! cond.wait_for_unequal(F32Milliseconds(1), 1));
}
template<> template<>
void object::test<2>()
{
set_test_name("Simple two-coroutine test");
LLCoros::instance().launch(
"test<2>",
[this]()
{
// Lambda immediately entered -- control comes here first.
ensure_equals(cond.get(), 0);
cond.set_all(1);
cond.wait_equal(2);
ensure_equals(cond.get(), 2);
cond.set_all(3);
});
// Main coroutine is resumed only when the lambda waits.
ensure_equals(cond.get(), 1);
cond.set_all(2);
cond.wait_equal(3);
}
} // namespace tut

View File

@ -26,101 +26,32 @@
* $/LicenseInfo$
*/
/*****************************************************************************/
// test<1>() is cloned from a Boost.Coroutine example program whose copyright
// info is reproduced here:
/*---------------------------------------------------------------------------*/
// Copyright (c) 2006, Giovanni P. Deretta
//
// This code may be used under either of the following two licences:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. OF SUCH DAMAGE.
//
// Or:
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
/*****************************************************************************/
#define BOOST_RESULT_OF_USE_TR1 1
// On some platforms, Boost.Coroutine must #define magic symbols before
// #including platform-API headers. Naturally, that's ineffective unless the
// Boost.Coroutine #include is the *first* #include of the platform header.
// That means that client code must generally #include Boost.Coroutine headers
// before anything else.
#include <boost/dcoroutine/coroutine.hpp>
#include <boost/bind.hpp>
#include <boost/range.hpp>
#include <boost/utility.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
#include "linden_common.h"
#include <iostream>
#include <string>
#include <typeinfo>
#include "../test/lltut.h"
#include "../test/lltestapp.h"
#include "llsd.h"
#include "llsdutil.h"
#include "llevents.h"
#include "tests/wrapllerrs.h"
#include "stringize.h"
#include "llcoros.h"
#include "lleventfilter.h"
#include "lleventcoro.h"
#include "../test/debug.h"
#include "../test/sync.h"
using namespace llcoro;
/*****************************************************************************
* from the banana.cpp example program borrowed for test<1>()
*****************************************************************************/
namespace coroutines = boost::dcoroutines;
using coroutines::coroutine;
template<typename Iter>
bool match(Iter first, Iter last, std::string match) {
std::string::iterator i = match.begin();
for(; (first != last) && (i != match.end()); ++i) {
if (*first != *i)
return false;
++first;
}
return i == match.end();
}
template<typename BidirectionalIterator>
BidirectionalIterator
match_substring(BidirectionalIterator begin,
BidirectionalIterator end,
std::string xmatch,
BOOST_DEDUCED_TYPENAME coroutine<BidirectionalIterator(void)>::self& self) {
//BidirectionalIterator begin_ = begin;
for(; begin != end; ++begin)
if(match(begin, end, xmatch)) {
self.yield(begin);
}
return end;
}
typedef coroutine<std::string::iterator(void)> match_coroutine_type;
/*****************************************************************************
* Test helpers
*****************************************************************************/
@ -131,8 +62,9 @@ typedef coroutine<std::string::iterator(void)> match_coroutine_type;
class ImmediateAPI
{
public:
ImmediateAPI():
mPump("immediate", true)
ImmediateAPI(Sync& sync):
mPump("immediate", true),
mSync(sync)
{
mPump.listen("API", boost::bind(&ImmediateAPI::operator(), this, _1));
}
@ -141,20 +73,18 @@ public:
// Invoke this with an LLSD map containing:
// ["value"]: Integer value. We will reply with ["value"] + 1.
// ["reply"]: Name of LLEventPump on which to send success response.
// ["error"]: Name of LLEventPump on which to send error response.
// ["fail"]: Presence of this key selects ["error"], else ["success"] as
// the name of the pump on which to send the response.
// ["reply"]: Name of LLEventPump on which to send response.
bool operator()(const LLSD& event) const
{
mSync.bump();
LLSD::Integer value(event["value"]);
LLSD::String replyPumpName(event.has("fail")? "error" : "reply");
LLEventPumps::instance().obtain(event[replyPumpName]).post(value + 1);
LLEventPumps::instance().obtain(event["reply"]).post(value + 1);
return false;
}
private:
LLEventStream mPump;
Sync& mSync;
};
/*****************************************************************************
@ -162,103 +92,82 @@ private:
*****************************************************************************/
namespace tut
{
struct coroutine_data {};
typedef test_group<coroutine_data> coroutine_group;
struct test_data
{
Sync mSync;
ImmediateAPI immediateAPI{mSync};
std::string replyName, errorName, threw, stringdata;
LLSD result, errordata;
int which;
LLTestApp testApp;
void explicit_wait(boost::shared_ptr<LLCoros::Promise<std::string>>& cbp);
void waitForEventOn1();
void coroPump();
void postAndWait1();
void coroPumpPost();
};
typedef test_group<test_data> coroutine_group;
typedef coroutine_group::object object;
coroutine_group coroutinegrp("coroutine");
template<> template<>
void object::test<1>()
{
set_test_name("From banana.cpp example program in Boost.Coroutine distro");
std::string buffer = "banananana";
std::string match = "nana";
std::string::iterator begin = buffer.begin();
std::string::iterator end = buffer.end();
#if defined(BOOST_CORO_POSIX_IMPL)
// std::cout << "Using Boost.Coroutine " << BOOST_CORO_POSIX_IMPL << '\n';
#else
// std::cout << "Using non-Posix Boost.Coroutine implementation" << std::endl;
#endif
typedef std::string::iterator signature(std::string::iterator,
std::string::iterator,
std::string,
match_coroutine_type::self&);
coroutine<std::string::iterator(void)> matcher
(boost::bind(static_cast<signature*>(match_substring),
begin,
end,
match,
_1));
std::string::iterator i = matcher();
/*==========================================================================*|
while(matcher && i != buffer.end()) {
std::cout <<"Match at: "<< std::distance(buffer.begin(), i)<<'\n';
i = matcher();
}
|*==========================================================================*/
size_t matches[] = { 2, 4, 6 };
for (size_t *mi(boost::begin(matches)), *mend(boost::end(matches));
mi != mend; ++mi, i = matcher())
{
ensure("more", matcher);
ensure("found", i != buffer.end());
ensure_equals("value", std::distance(buffer.begin(), i), *mi);
}
ensure("done", ! matcher);
}
// use static data so we can intersperse coroutine functions with the
// tests that engage them
ImmediateAPI immediateAPI;
std::string replyName, errorName, threw, stringdata;
LLSD result, errordata;
int which;
// reinit vars at the start of each test
void clear()
{
replyName.clear();
errorName.clear();
threw.clear();
stringdata.clear();
result = LLSD();
errordata = LLSD();
which = 0;
}
void explicit_wait(boost::shared_ptr<LLCoros::Future<std::string>::callback_t>& cbp)
void test_data::explicit_wait(boost::shared_ptr<LLCoros::Promise<std::string>>& cbp)
{
BEGIN
{
mSync.bump();
// The point of this test is to verify / illustrate suspending a
// coroutine for something other than an LLEventPump. In other
// words, this shows how to adapt to any async operation that
// provides a callback-style notification (and prove that it
// works).
LLCoros::Future<std::string> future;
// get the callback from that future
LLCoros::Future<std::string>::callback_t callback(future.make_callback());
// Perhaps we would send a request to a remote server and arrange
// for 'callback' to be called on response. Of course that might
// involve an adapter object from the actual callback signature to
// the signature of 'callback' -- in this case, void(std::string).
// For test purposes, instead of handing 'callback' (or the
// for cbp->set_value() to be called on response.
// For test purposes, instead of handing 'callback' (or an
// adapter) off to some I/O subsystem, we'll just pass it back to
// our caller.
cbp.reset(new LLCoros::Future<std::string>::callback_t(callback));
cbp = boost::make_shared<LLCoros::Promise<std::string>>();
LLCoros::Future<std::string> future = LLCoros::getFuture(*cbp);
ensure("Not yet", ! future);
// calling get() on the future causes us to suspend
debug("about to suspend");
stringdata = future.get();
ensure("Got it", bool(future));
mSync.bump();
ensure_equals("Got it", stringdata, "received");
}
END
}
template<> template<>
void object::test<1>()
{
set_test_name("explicit_wait");
DEBUG;
// Construct the coroutine instance that will run explicit_wait.
boost::shared_ptr<LLCoros::Promise<std::string>> respond;
LLCoros::instance().launch("test<1>",
[this, &respond](){ explicit_wait(respond); });
mSync.bump();
// When the coroutine waits for the future, it returns here.
debug("about to respond");
// Now we're the I/O subsystem delivering a result. This should make
// the coroutine ready.
respond->set_value("received");
// but give it a chance to wake up
mSync.yield();
// ensure the coroutine ran and woke up again with the intended result
ensure_equals(stringdata, "received");
}
void test_data::waitForEventOn1()
{
BEGIN
{
mSync.bump();
result = suspendUntilEventOn("source");
mSync.bump();
}
END
}
@ -266,28 +175,27 @@ namespace tut
template<> template<>
void object::test<2>()
{
clear();
set_test_name("explicit_wait");
set_test_name("waitForEventOn1");
DEBUG;
// Construct the coroutine instance that will run explicit_wait.
boost::shared_ptr<LLCoros::Future<std::string>::callback_t> respond;
LLCoros::instance().launch("test<2>",
boost::bind(explicit_wait, boost::ref(respond)));
// When the coroutine waits for the future, it returns here.
debug("about to respond");
// Now we're the I/O subsystem delivering a result. This immediately
// transfers control back to the coroutine.
(*respond)("received");
// ensure the coroutine ran and woke up again with the intended result
ensure_equals(stringdata, "received");
LLCoros::instance().launch("test<2>", [this](){ waitForEventOn1(); });
mSync.bump();
debug("about to send");
LLEventPumps::instance().obtain("source").post("received");
// give waitForEventOn1() a chance to run
mSync.yield();
debug("back from send");
ensure_equals(result.asString(), "received");
}
void waitForEventOn1()
void test_data::coroPump()
{
BEGIN
{
result = suspendUntilEventOn("source");
mSync.bump();
LLCoroEventPump waiter;
replyName = waiter.getName();
result = waiter.suspend();
mSync.bump();
}
END
}
@ -295,24 +203,27 @@ namespace tut
template<> template<>
void object::test<3>()
{
clear();
set_test_name("waitForEventOn1");
set_test_name("coroPump");
DEBUG;
LLCoros::instance().launch("test<3>", waitForEventOn1);
LLCoros::instance().launch("test<3>", [this](){ coroPump(); });
mSync.bump();
debug("about to send");
LLEventPumps::instance().obtain("source").post("received");
LLEventPumps::instance().obtain(replyName).post("received");
// give coroPump() a chance to run
mSync.yield();
debug("back from send");
ensure_equals(result.asString(), "received");
}
void waitForEventOn2()
{
void test_data::postAndWait1()
BEGIN
{
LLEventWithID pair = suspendUntilEventOn("reply", "error");
result = pair.first;
which = pair.second;
debug(STRINGIZE("result = " << result << ", which = " << which));
mSync.bump();
result = postAndSuspend(LLSDMap("value", 17), // request event
immediateAPI.getPump(), // requestPump
"reply1", // replyPump
"reply"); // request["reply"] = name
mSync.bump();
}
END
}
@ -320,475 +231,107 @@ namespace tut
template<> template<>
void object::test<4>()
{
clear();
set_test_name("waitForEventOn2 reply");
{
set_test_name("postAndWait1");
DEBUG;
LLCoros::instance().launch("test<4>", waitForEventOn2);
debug("about to send");
LLEventPumps::instance().obtain("reply").post("received");
debug("back from send");
LLCoros::instance().launch("test<4>", [this](){ postAndWait1(); });
ensure_equals(result.asInteger(), 18);
}
void test_data::coroPumpPost()
{
BEGIN
{
mSync.bump();
LLCoroEventPump waiter;
result = waiter.postAndSuspend(LLSDMap("value", 17),
immediateAPI.getPump(), "reply");
mSync.bump();
}
ensure_equals(result.asString(), "received");
ensure_equals("which pump", which, 0);
END
}
template<> template<>
void object::test<5>()
{
clear();
set_test_name("waitForEventOn2 error");
set_test_name("coroPumpPost");
DEBUG;
LLCoros::instance().launch("test<5>", waitForEventOn2);
debug("about to send");
LLEventPumps::instance().obtain("error").post("badness");
debug("back from send");
ensure_equals(result.asString(), "badness");
ensure_equals("which pump", which, 1);
LLCoros::instance().launch("test<5>", [this](){ coroPumpPost(); });
ensure_equals(result.asInteger(), 18);
}
void coroPump()
template <class PUMP>
void test()
{
BEGIN
PUMP pump(typeid(PUMP).name());
bool running{false};
LLSD data{LLSD::emptyArray()};
// start things off by posting once before even starting the listener
// coro
LL_DEBUGS() << "test() posting first" << LL_ENDL;
LLSD first{LLSDMap("desc", "first")("value", 0)};
bool consumed = pump.post(first);
ensure("should not have consumed first", ! consumed);
// now launch the coro
LL_DEBUGS() << "test() launching listener coro" << LL_ENDL;
running = true;
LLCoros::instance().launch(
"listener",
[&pump, &running, &data](){
// important for this test that we consume posted values
LLCoros::instance().set_consuming(true);
// should immediately retrieve 'first' without waiting
LL_DEBUGS() << "listener coro waiting for first" << LL_ENDL;
data.append(llcoro::suspendUntilEventOnWithTimeout(pump, 0.1, LLSD()));
// Don't use ensure() from within the coro -- ensure() failure
// throws tut::fail, which won't propagate out to the main
// test driver, which will result in an odd failure.
// Wait for 'second' because it's not already pending.
LL_DEBUGS() << "listener coro waiting for second" << LL_ENDL;
data.append(llcoro::suspendUntilEventOnWithTimeout(pump, 0.1, LLSD()));
// and wait for 'third', which should involve no further waiting
LL_DEBUGS() << "listener coro waiting for third" << LL_ENDL;
data.append(llcoro::suspendUntilEventOnWithTimeout(pump, 0.1, LLSD()));
LL_DEBUGS() << "listener coro done" << LL_ENDL;
running = false;
});
// back from coro at the point where it's waiting for 'second'
LL_DEBUGS() << "test() posting second" << LL_ENDL;
LLSD second{llsd::map("desc", "second", "value", 1)};
consumed = pump.post(second);
ensure("should have consumed second", consumed);
// This is a key point: even though we've post()ed the value for which
// the coroutine is waiting, it's actually still suspended until we
// pause for some other reason. The coroutine will only pick up one
// value at a time from our 'pump'. It's important to exercise the
// case when we post() two values before it picks up either.
LL_DEBUGS() << "test() posting third" << LL_ENDL;
LLSD third{llsd::map("desc", "third", "value", 2)};
consumed = pump.post(third);
ensure("should NOT yet have consumed third", ! consumed);
// now just wait for coro to finish -- which it eventually will, given
// that all its suspend calls have short timeouts.
while (running)
{
LLCoroEventPump waiter;
replyName = waiter.getName();
result = waiter.suspend();
LL_DEBUGS() << "test() waiting for coro done" << LL_ENDL;
llcoro::suspendUntilTimeout(0.1);
}
END
// okay, verify expected results
ensure_equals("should have received three values", data,
llsd::array(first, second, third));
LL_DEBUGS() << "test() done" << LL_ENDL;
}
template<> template<>
void object::test<6>()
{
clear();
set_test_name("coroPump");
DEBUG;
LLCoros::instance().launch("test<6>", coroPump);
debug("about to send");
LLEventPumps::instance().obtain(replyName).post("received");
debug("back from send");
ensure_equals(result.asString(), "received");
}
void coroPumps()
{
BEGIN
{
LLCoroEventPumps waiter;
replyName = waiter.getName0();
errorName = waiter.getName1();
LLEventWithID pair(waiter.suspend());
result = pair.first;
which = pair.second;
}
END
set_test_name("LLEventMailDrop");
tut::test<LLEventMailDrop>();
}
template<> template<>
void object::test<7>()
{
clear();
set_test_name("coroPumps reply");
DEBUG;
LLCoros::instance().launch("test<7>", coroPumps);
debug("about to send");
LLEventPumps::instance().obtain(replyName).post("received");
debug("back from send");
ensure_equals(result.asString(), "received");
ensure_equals("which pump", which, 0);
}
template<> template<>
void object::test<8>()
{
clear();
set_test_name("coroPumps error");
DEBUG;
LLCoros::instance().launch("test<8>", coroPumps);
debug("about to send");
LLEventPumps::instance().obtain(errorName).post("badness");
debug("back from send");
ensure_equals(result.asString(), "badness");
ensure_equals("which pump", which, 1);
}
void coroPumpsNoEx()
{
BEGIN
{
LLCoroEventPumps waiter;
replyName = waiter.getName0();
errorName = waiter.getName1();
result = waiter.suspendWithException();
}
END
}
template<> template<>
void object::test<9>()
{
clear();
set_test_name("coroPumpsNoEx");
DEBUG;
LLCoros::instance().launch("test<9>", coroPumpsNoEx);
debug("about to send");
LLEventPumps::instance().obtain(replyName).post("received");
debug("back from send");
ensure_equals(result.asString(), "received");
}
void coroPumpsEx()
{
BEGIN
{
LLCoroEventPumps waiter;
replyName = waiter.getName0();
errorName = waiter.getName1();
try
{
result = waiter.suspendWithException();
debug("no exception");
}
catch (const LLErrorEvent& e)
{
debug(STRINGIZE("exception " << e.what()));
errordata = e.getData();
}
}
END
}
template<> template<>
void object::test<10>()
{
clear();
set_test_name("coroPumpsEx");
DEBUG;
LLCoros::instance().launch("test<10>", coroPumpsEx);
debug("about to send");
LLEventPumps::instance().obtain(errorName).post("badness");
debug("back from send");
ensure("no result", result.isUndefined());
ensure_equals("got error", errordata.asString(), "badness");
}
void coroPumpsNoLog()
{
BEGIN
{
LLCoroEventPumps waiter;
replyName = waiter.getName0();
errorName = waiter.getName1();
result = waiter.suspendWithLog();
}
END
}
template<> template<>
void object::test<11>()
{
clear();
set_test_name("coroPumpsNoLog");
DEBUG;
LLCoros::instance().launch("test<11>", coroPumpsNoLog);
debug("about to send");
LLEventPumps::instance().obtain(replyName).post("received");
debug("back from send");
ensure_equals(result.asString(), "received");
}
void coroPumpsLog()
{
BEGIN
{
LLCoroEventPumps waiter;
replyName = waiter.getName0();
errorName = waiter.getName1();
WrapLLErrs capture;
threw = capture.catch_llerrs([&waiter, &debug](){
result = waiter.suspendWithLog();
debug("no exception");
});
}
END
}
template<> template<>
void object::test<12>()
{
clear();
set_test_name("coroPumpsLog");
DEBUG;
LLCoros::instance().launch("test<12>", coroPumpsLog);
debug("about to send");
LLEventPumps::instance().obtain(errorName).post("badness");
debug("back from send");
ensure("no result", result.isUndefined());
ensure_contains("got error", threw, "badness");
}
void postAndWait1()
{
BEGIN
{
result = postAndSuspend(LLSDMap("value", 17), // request event
immediateAPI.getPump(), // requestPump
"reply1", // replyPump
"reply"); // request["reply"] = name
}
END
}
template<> template<>
void object::test<13>()
{
clear();
set_test_name("postAndWait1");
DEBUG;
LLCoros::instance().launch("test<13>", postAndWait1);
ensure_equals(result.asInteger(), 18);
}
void postAndWait2()
{
BEGIN
{
LLEventWithID pair = ::postAndSuspend2(LLSDMap("value", 18),
immediateAPI.getPump(),
"reply2",
"error2",
"reply",
"error");
result = pair.first;
which = pair.second;
debug(STRINGIZE("result = " << result << ", which = " << which));
}
END
}
template<> template<>
void object::test<14>()
{
clear();
set_test_name("postAndWait2");
DEBUG;
LLCoros::instance().launch("test<14>", postAndWait2);
ensure_equals(result.asInteger(), 19);
ensure_equals(which, 0);
}
void postAndWait2_1()
{
BEGIN
{
LLEventWithID pair = ::postAndSuspend2(LLSDMap("value", 18)("fail", LLSD()),
immediateAPI.getPump(),
"reply2",
"error2",
"reply",
"error");
result = pair.first;
which = pair.second;
debug(STRINGIZE("result = " << result << ", which = " << which));
}
END
}
template<> template<>
void object::test<15>()
{
clear();
set_test_name("postAndWait2_1");
DEBUG;
LLCoros::instance().launch("test<15>", postAndWait2_1);
ensure_equals(result.asInteger(), 19);
ensure_equals(which, 1);
}
void coroPumpPost()
{
BEGIN
{
LLCoroEventPump waiter;
result = waiter.postAndSuspend(LLSDMap("value", 17),
immediateAPI.getPump(), "reply");
}
END
}
template<> template<>
void object::test<16>()
{
clear();
set_test_name("coroPumpPost");
DEBUG;
LLCoros::instance().launch("test<16>", coroPumpPost);
ensure_equals(result.asInteger(), 18);
}
void coroPumpsPost()
{
BEGIN
{
LLCoroEventPumps waiter;
LLEventWithID pair(waiter.postAndSuspend(LLSDMap("value", 23),
immediateAPI.getPump(), "reply", "error"));
result = pair.first;
which = pair.second;
}
END
}
template<> template<>
void object::test<17>()
{
clear();
set_test_name("coroPumpsPost reply");
DEBUG;
LLCoros::instance().launch("test<17>", coroPumpsPost);
ensure_equals(result.asInteger(), 24);
ensure_equals("which pump", which, 0);
}
void coroPumpsPost_1()
{
BEGIN
{
LLCoroEventPumps waiter;
LLEventWithID pair(
waiter.postAndSuspend(LLSDMap("value", 23)("fail", LLSD()),
immediateAPI.getPump(), "reply", "error"));
result = pair.first;
which = pair.second;
}
END
}
template<> template<>
void object::test<18>()
{
clear();
set_test_name("coroPumpsPost error");
DEBUG;
LLCoros::instance().launch("test<18>", coroPumpsPost_1);
ensure_equals(result.asInteger(), 24);
ensure_equals("which pump", which, 1);
}
void coroPumpsPostNoEx()
{
BEGIN
{
LLCoroEventPumps waiter;
result = waiter.postAndSuspendWithException(LLSDMap("value", 8),
immediateAPI.getPump(), "reply", "error");
}
END
}
template<> template<>
void object::test<19>()
{
clear();
set_test_name("coroPumpsPostNoEx");
DEBUG;
LLCoros::instance().launch("test<19>", coroPumpsPostNoEx);
ensure_equals(result.asInteger(), 9);
}
void coroPumpsPostEx()
{
BEGIN
{
LLCoroEventPumps waiter;
try
{
result = waiter.postAndSuspendWithException(
LLSDMap("value", 9)("fail", LLSD()),
immediateAPI.getPump(), "reply", "error");
debug("no exception");
}
catch (const LLErrorEvent& e)
{
debug(STRINGIZE("exception " << e.what()));
errordata = e.getData();
}
}
END
}
template<> template<>
void object::test<20>()
{
clear();
set_test_name("coroPumpsPostEx");
DEBUG;
LLCoros::instance().launch("test<20>", coroPumpsPostEx);
ensure("no result", result.isUndefined());
ensure_equals("got error", errordata.asInteger(), 10);
}
void coroPumpsPostNoLog()
{
BEGIN
{
LLCoroEventPumps waiter;
result = waiter.postAndSuspendWithLog(LLSDMap("value", 30),
immediateAPI.getPump(), "reply", "error");
}
END
}
template<> template<>
void object::test<21>()
{
clear();
set_test_name("coroPumpsPostNoLog");
DEBUG;
LLCoros::instance().launch("test<21>", coroPumpsPostNoLog);
ensure_equals(result.asInteger(), 31);
}
void coroPumpsPostLog()
{
BEGIN
{
LLCoroEventPumps waiter;
WrapLLErrs capture;
threw = capture.catch_llerrs(
[&waiter, &debug](){
result = waiter.postAndSuspendWithLog(
LLSDMap("value", 31)("fail", LLSD()),
immediateAPI.getPump(), "reply", "error");
debug("no exception");
});
}
END
}
template<> template<>
void object::test<22>()
{
clear();
set_test_name("coroPumpsPostLog");
DEBUG;
LLCoros::instance().launch("test<22>", coroPumpsPostLog);
ensure("no result", result.isUndefined());
ensure_contains("got error", threw, "32");
set_test_name("LLEventLogProxyFor<LLEventMailDrop>");
tut::test< LLEventLogProxyFor<LLEventMailDrop> >();
}
}
/*==========================================================================*|
#include <boost/context/guarded_stack_allocator.hpp>
namespace tut
{
template<> template<>
void object::test<23>()
{
set_test_name("stacksize");
std::cout << "default_stacksize: " << boost::context::guarded_stack_allocator::default_stacksize() << '\n';
}
} // namespace tut
|*==========================================================================*/

View File

@ -23,6 +23,7 @@
#include "stringize.h"
#include "tests/wrapllerrs.h"
#include "../test/catch_and_store_what_in.h"
#include "../test/debug.h"
#include <map>
#include <string>
@ -45,15 +46,6 @@ using boost::lambda::var;
using namespace llsd;
/*****************************************************************************
* Output control
*****************************************************************************/
#ifdef DEBUG_ON
using std::cout;
#else
static std::ostringstream cout;
#endif
/*****************************************************************************
* Example data, functions, classes
*****************************************************************************/
@ -155,13 +147,13 @@ struct Vars
/*------------- no-args (non-const, const, static) methods -------------*/
void method0()
{
cout << "method0()\n";
debug()("method0()");
i = 17;
}
void cmethod0() const
{
cout << 'c';
debug()('c', NONL);
const_cast<Vars*>(this)->method0();
}
@ -170,13 +162,13 @@ struct Vars
/*------------ Callable (non-const, const, static) methods -------------*/
void method1(const LLSD& obj)
{
cout << "method1(" << obj << ")\n";
debug()("method1(", obj, ")");
llsd = obj;
}
void cmethod1(const LLSD& obj) const
{
cout << 'c';
debug()('c', NONL);
const_cast<Vars*>(this)->method1(obj);
}
@ -196,12 +188,12 @@ struct Vars
else
vcp = std::string("'") + cp + "'";
cout << "methodna(" << b
<< ", " << i
<< ", " << f
<< ", " << d
<< ", " << vcp
<< ")\n";
debug()("methodna(", b,
", ", i,
", ", f,
", ", d,
", ", vcp,
")");
this->b = b;
this->i = i;
@ -218,12 +210,12 @@ struct Vars
vbin << std::hex << std::setfill('0') << std::setw(2) << unsigned(byte);
}
cout << "methodnb(" << "'" << s << "'"
<< ", " << uuid
<< ", " << date
<< ", '" << uri << "'"
<< ", " << vbin.str()
<< ")\n";
debug()("methodnb(", "'", s, "'",
", ", uuid,
", ", date,
", '", uri, "'",
", ", vbin.str(),
")");
this->s = s;
this->uuid = uuid;
@ -234,18 +226,30 @@ struct Vars
void cmethodna(NPARAMSa) const
{
cout << 'c';
debug()('c', NONL);
const_cast<Vars*>(this)->methodna(NARGSa);
}
void cmethodnb(NPARAMSb) const
{
cout << 'c';
debug()('c', NONL);
const_cast<Vars*>(this)->methodnb(NARGSb);
}
static void smethodna(NPARAMSa);
static void smethodnb(NPARAMSb);
static Debug& debug()
{
// Lazily initialize this Debug instance so it can notice if main()
// has forcibly set LOGTEST. If it were simply a static member, it
// would already have examined the environment variable by the time
// main() gets around to checking command-line switches. Since we have
// a global static Vars instance, the same would be true of a plain
// non-static member.
static Debug sDebug("Vars");
return sDebug;
}
};
/*------- Global Vars instance for free functions and static methods -------*/
static Vars g;
@ -253,25 +257,25 @@ static Vars g;
/*------------ Static Vars method implementations reference 'g' ------------*/
void Vars::smethod0()
{
cout << "smethod0() -> ";
debug()("smethod0() -> ", NONL);
g.method0();
}
void Vars::smethod1(const LLSD& obj)
{
cout << "smethod1(" << obj << ") -> ";
debug()("smethod1(", obj, ") -> ", NONL);
g.method1(obj);
}
void Vars::smethodna(NPARAMSa)
{
cout << "smethodna(...) -> ";
debug()("smethodna(...) -> ", NONL);
g.methodna(NARGSa);
}
void Vars::smethodnb(NPARAMSb)
{
cout << "smethodnb(...) -> ";
debug()("smethodnb(...) -> ", NONL);
g.methodnb(NARGSb);
}
@ -284,25 +288,25 @@ void clear()
/*------------------- Free functions also reference 'g' --------------------*/
void free0()
{
cout << "free0() -> ";
g.debug()("free0() -> ", NONL);
g.method0();
}
void free1(const LLSD& obj)
{
cout << "free1(" << obj << ") -> ";
g.debug()("free1(", obj, ") -> ", NONL);
g.method1(obj);
}
void freena(NPARAMSa)
{
cout << "freena(...) -> ";
g.debug()("freena(...) -> ", NONL);
g.methodna(NARGSa);
}
void freenb(NPARAMSb)
{
cout << "freenb(...) -> ";
g.debug()("freenb(...) -> ", NONL);
g.methodnb(NARGSb);
}
@ -313,6 +317,7 @@ namespace tut
{
struct lleventdispatcher_data
{
Debug debug{"test"};
WrapLLErrs redirect;
Dispatcher work;
Vars v;
@ -431,12 +436,17 @@ namespace tut
// Same for freenb() et al.
params = LLSDMap("a", LLSDArray("b")("i")("f")("d")("cp"))
("b", LLSDArray("s")("uuid")("date")("uri")("bin"));
cout << "params:\n" << params << "\nparams[\"a\"]:\n" << params["a"] << "\nparams[\"b\"]:\n" << params["b"] << std::endl;
debug("params:\n",
params, "\n"
"params[\"a\"]:\n",
params["a"], "\n"
"params[\"b\"]:\n",
params["b"]);
// default LLSD::Binary value
std::vector<U8> binary;
for (size_t ix = 0, h = 0xaa; ix < 6; ++ix, h += 0x11)
{
binary.push_back(h);
binary.push_back((U8)h);
}
// Full defaults arrays. We actually don't care what the LLUUID or
// LLDate values are, as long as they're different from the
@ -448,7 +458,8 @@ namespace tut
(LLDate::now())
(LLURI("http://www.ietf.org/rfc/rfc3986.txt"))
(binary));
cout << "dft_array_full:\n" << dft_array_full << std::endl;
debug("dft_array_full:\n",
dft_array_full);
// Partial defaults arrays.
foreach(LLSD::String a, ab)
{
@ -457,7 +468,8 @@ namespace tut
llsd_copy_array(dft_array_full[a].beginArray() + partition,
dft_array_full[a].endArray());
}
cout << "dft_array_partial:\n" << dft_array_partial << std::endl;
debug("dft_array_partial:\n",
dft_array_partial);
foreach(LLSD::String a, ab)
{
@ -473,7 +485,10 @@ namespace tut
dft_map_partial[a][params[a][ix].asString()] = dft_array_full[a][ix];
}
}
cout << "dft_map_full:\n" << dft_map_full << "\ndft_map_partial:\n" << dft_map_partial << '\n';
debug("dft_map_full:\n",
dft_map_full, "\n"
"dft_map_partial:\n",
dft_map_partial);
// (Free function | static method) with (no | arbitrary) params,
// map style, no (empty array) defaults
@ -918,7 +933,12 @@ namespace tut
params[a].endArray()),
dft_array_partial[a]);
}
cout << "allreq:\n" << allreq << "\nleftreq:\n" << leftreq << "\nrightdft:\n" << rightdft << std::endl;
debug("allreq:\n",
allreq, "\n"
"leftreq:\n",
leftreq, "\n"
"rightdft:\n",
rightdft);
// Generate maps containing parameter names not provided by the
// dft_map_partial maps.
@ -930,7 +950,8 @@ namespace tut
skipreq[a].erase(me.first);
}
}
cout << "skipreq:\n" << skipreq << std::endl;
debug("skipreq:\n",
skipreq);
LLSD groups(LLSDArray // array of groups
@ -975,7 +996,11 @@ namespace tut
LLSD names(grp[0]);
LLSD required(grp[1][0]);
LLSD optional(grp[1][1]);
cout << "For " << names << ",\n" << "required:\n" << required << "\noptional:\n" << optional << std::endl;
debug("For ", names, ",\n",
"required:\n",
required, "\n"
"optional:\n",
optional);
// Loop through 'names'
foreach(LLSD nm, inArray(names))
@ -1145,7 +1170,7 @@ namespace tut
std::vector<U8> binary;
for (size_t h(0x01), i(0); i < 5; h+= 0x22, ++i)
{
binary.push_back(h);
binary.push_back((U8)h);
}
LLSD args(LLSDMap("a", LLSDArray(true)(17)(3.14)(123.456)("char*"))
("b", LLSDArray("string")
@ -1163,7 +1188,7 @@ namespace tut
}
// Adjust expect["a"]["cp"] for special Vars::cp treatment.
expect["a"]["cp"] = std::string("'") + expect["a"]["cp"].asString() + "'";
cout << "expect: " << expect << '\n';
debug("expect: ", expect);
// Use substantially the same logic for args and argsplus
LLSD argsarrays(LLSDArray(args)(argsplus));
@ -1218,7 +1243,8 @@ namespace tut
{
array_overfull[a].append("bogus");
}
cout << "array_full: " << array_full << "\narray_overfull: " << array_overfull << std::endl;
debug("array_full: ", array_full, "\n"
"array_overfull: ", array_overfull);
// We rather hope that LLDate::now() will generate a timestamp
// distinct from the one it generated in the constructor, moments ago.
ensure_not_equals("Timestamps too close",
@ -1233,7 +1259,8 @@ namespace tut
map_overfull[a] = map_full[a];
map_overfull[a]["extra"] = "ignore";
}
cout << "map_full: " << map_full << "\nmap_overfull: " << map_overfull << std::endl;
debug("map_full: ", map_full, "\n"
"map_overfull: ", map_overfull);
LLSD expect(map_full);
// Twiddle the const char* param.
expect["a"]["cp"] = std::string("'") + expect["a"]["cp"].asString() + "'";
@ -1248,7 +1275,7 @@ namespace tut
// so won't bother returning it. Predict that behavior to match the
// LLSD values.
expect["a"].erase("b");
cout << "expect: " << expect << std::endl;
debug("expect: ", expect);
// For this test, calling functions registered with different sets of
// parameter defaults should make NO DIFFERENCE WHATSOEVER. Every call
// should pass all params.

View File

@ -36,9 +36,12 @@
// other Linden headers
#include "../test/lltut.h"
#include "stringize.h"
#include "llsdutil.h"
#include "listener.h"
#include "tests/wrapllerrs.h"
#include <typeinfo>
/*****************************************************************************
* Test classes
*****************************************************************************/
@ -401,6 +404,78 @@ namespace tut
throttle.post(";17");
ensure_equals("17", cat.result, "136;12;17"); // "17" delivered
}
template<class PUMP>
void test()
{
PUMP pump(typeid(PUMP).name());
LLSD data{LLSD::emptyArray()};
bool consumed{true};
// listener that appends to 'data'
// but that also returns the current value of 'consumed'
// Instantiate this separately because we're going to listen()
// multiple times with the same lambda: LLEventMailDrop only replays
// queued events on a new listen() call.
auto lambda =
[&data, &consumed](const LLSD& event)->bool
{
data.append(event);
return consumed;
};
{
LLTempBoundListener conn = pump.listen("lambda", lambda);
pump.post("first");
}
// first post() should certainly be received by listener
ensure_equals("first", data, llsd::array("first"));
// the question is, since consumed was true, did it queue the value?
data = LLSD::emptyArray();
{
// if it queued the value, it would be delivered on subsequent
// listen() call
LLTempBoundListener conn = pump.listen("lambda", lambda);
}
ensure_equals("empty1", data, LLSD::emptyArray());
data = LLSD::emptyArray();
// now let's NOT consume the posted data
consumed = false;
{
LLTempBoundListener conn = pump.listen("lambda", lambda);
pump.post("second");
pump.post("third");
}
// the two events still arrive
ensure_equals("second,third1", data, llsd::array("second", "third"));
data = LLSD::emptyArray();
{
// when we reconnect, these should be delivered again
// but this time they should be consumed
consumed = true;
LLTempBoundListener conn = pump.listen("lambda", lambda);
}
// unconsumed events were delivered again
ensure_equals("second,third2", data, llsd::array("second", "third"));
data = LLSD::emptyArray();
{
// when we reconnect this time, no more unconsumed events
LLTempBoundListener conn = pump.listen("lambda", lambda);
}
ensure_equals("empty2", data, LLSD::emptyArray());
}
template<> template<>
void filter_object::test<6>()
{
set_test_name("LLEventMailDrop");
tut::test<LLEventMailDrop>();
}
template<> template<>
void filter_object::test<7>()
{
set_test_name("LLEventLogProxyFor<LLEventMailDrop>");
tut::test< LLEventLogProxyFor<LLEventMailDrop> >();
}
} // namespace tut
/*****************************************************************************

View File

@ -305,4 +305,19 @@ namespace tut
std::cout << center("int", '=', margin) << std::endl;
catch_several(throw_int, "throw_int");
}
template<> template<>
void object::test<2>()
{
set_test_name("reporting exceptions");
try
{
LLTHROW(LLException("badness"));
}
catch (...)
{
LOG_UNHANDLED_EXCEPTION("llexception test<2>()");
}
}
} // namespace tut

View File

@ -41,7 +41,6 @@
#include <boost/scoped_ptr.hpp>
// other Linden headers
#include "../test/lltut.h"
#include "wrapllerrs.h"
struct Badness: public std::runtime_error
{
@ -112,24 +111,22 @@ namespace tut
void object::test<2>()
{
ensure_equals(Unkeyed::instanceCount(), 0);
Unkeyed* dangling = NULL;
std::weak_ptr<Unkeyed> dangling;
{
Unkeyed one;
ensure_equals(Unkeyed::instanceCount(), 1);
Unkeyed* found = Unkeyed::getInstance(&one);
ensure_equals(found, &one);
std::weak_ptr<Unkeyed> found = one.getWeak();
ensure(! found.expired());
{
boost::scoped_ptr<Unkeyed> two(new Unkeyed);
ensure_equals(Unkeyed::instanceCount(), 2);
Unkeyed* found = Unkeyed::getInstance(two.get());
ensure_equals(found, two.get());
}
ensure_equals(Unkeyed::instanceCount(), 1);
// store an unwise pointer to a temp Unkeyed instance
dangling = &one;
// store a weak pointer to a temp Unkeyed instance
dangling = found;
} // make that instance vanish
// check the now-invalid pointer to the destroyed instance
ensure("getInstance(T*) failed to track destruction", ! Unkeyed::getInstance(dangling));
ensure("weak_ptr<Unkeyed> failed to track destruction", dangling.expired());
ensure_equals(Unkeyed::instanceCount(), 0);
}
@ -142,7 +139,8 @@ namespace tut
// reimplement LLInstanceTracker using, say, a hash map instead of a
// std::map. We DO insist that every key appear exactly once.
typedef std::vector<std::string> StringVector;
StringVector keys(Keyed::beginKeys(), Keyed::endKeys());
auto snap = Keyed::key_snapshot();
StringVector keys(snap.begin(), snap.end());
std::sort(keys.begin(), keys.end());
StringVector::const_iterator ki(keys.begin());
ensure_equals(*ki++, "one");
@ -153,17 +151,15 @@ namespace tut
ensure("didn't reach end", ki == keys.end());
// Use a somewhat different approach to order independence with
// beginInstances(): explicitly capture the instances we know in a
// instance_snapshot(): explicitly capture the instances we know in a
// set, and delete them as we iterate through.
typedef std::set<Keyed*> InstanceSet;
InstanceSet instances;
instances.insert(&one);
instances.insert(&two);
instances.insert(&three);
for (Keyed::instance_iter ii(Keyed::beginInstances()), iend(Keyed::endInstances());
ii != iend; ++ii)
for (auto& ref : Keyed::instance_snapshot())
{
Keyed& ref = *ii;
ensure_equals("spurious instance", instances.erase(&ref), 1);
}
ensure_equals("unreported instance", instances.size(), 0);
@ -180,11 +176,10 @@ namespace tut
instances.insert(&two);
instances.insert(&three);
for (Unkeyed::instance_iter ii(Unkeyed::beginInstances()), iend(Unkeyed::endInstances()); ii != iend; ++ii)
{
Unkeyed& ref = *ii;
ensure_equals("spurious instance", instances.erase(&ref), 1);
}
for (auto& ref : Unkeyed::instance_snapshot())
{
ensure_equals("spurious instance", instances.erase(&ref), 1);
}
ensure_equals("unreported instance", instances.size(), 0);
}
@ -192,49 +187,49 @@ namespace tut
template<> template<>
void object::test<5>()
{
set_test_name("delete Keyed with outstanding instance_iter");
std::string what;
Keyed* keyed = new Keyed("delete Keyed with outstanding instance_iter");
{
WrapLLErrs wrapper;
Keyed::instance_iter i(Keyed::beginInstances());
what = wrapper.catch_llerrs([&keyed](){
delete keyed;
});
}
ensure(! what.empty());
std::string desc("delete Keyed with outstanding instance_snapshot");
set_test_name(desc);
Keyed* keyed = new Keyed(desc);
// capture a snapshot but do not yet traverse it
auto snapshot = Keyed::instance_snapshot();
// delete the one instance
delete keyed;
// traversing the snapshot should reflect the deletion
// avoid ensure_equals() because it requires the ability to stream the
// two values to std::ostream
ensure(snapshot.begin() == snapshot.end());
}
template<> template<>
void object::test<6>()
{
set_test_name("delete Keyed with outstanding key_iter");
std::string what;
Keyed* keyed = new Keyed("delete Keyed with outstanding key_it");
{
WrapLLErrs wrapper;
Keyed::key_iter i(Keyed::beginKeys());
what = wrapper.catch_llerrs([&keyed](){
delete keyed;
});
}
ensure(! what.empty());
std::string desc("delete Keyed with outstanding key_snapshot");
set_test_name(desc);
Keyed* keyed = new Keyed(desc);
// capture a snapshot but do not yet traverse it
auto snapshot = Keyed::key_snapshot();
// delete the one instance
delete keyed;
// traversing the snapshot should reflect the deletion
// avoid ensure_equals() because it requires the ability to stream the
// two values to std::ostream
ensure(snapshot.begin() == snapshot.end());
}
template<> template<>
void object::test<7>()
{
set_test_name("delete Unkeyed with outstanding instance_iter");
set_test_name("delete Unkeyed with outstanding instance_snapshot");
std::string what;
Unkeyed* unkeyed = new Unkeyed;
{
WrapLLErrs wrapper;
Unkeyed::instance_iter i(Unkeyed::beginInstances());
what = wrapper.catch_llerrs([&unkeyed](){
delete unkeyed;
});
}
ensure(! what.empty());
// capture a snapshot but do not yet traverse it
auto snapshot = Unkeyed::instance_snapshot();
// delete the one instance
delete unkeyed;
// traversing the snapshot should reflect the deletion
// avoid ensure_equals() because it requires the ability to stream the
// two values to std::ostream
ensure(snapshot.begin() == snapshot.end());
}
template<> template<>
@ -246,11 +241,9 @@ namespace tut
// We can't use the iterator-range InstanceSet constructor because
// beginInstances() returns an iterator that dereferences to an
// Unkeyed&, not an Unkeyed*.
for (Unkeyed::instance_iter uki(Unkeyed::beginInstances()),
ukend(Unkeyed::endInstances());
uki != ukend; ++uki)
for (auto& ref : Unkeyed::instance_snapshot())
{
existing.insert(&*uki);
existing.insert(&ref);
}
try
{
@ -273,11 +266,9 @@ namespace tut
// instances was also present in the original set. If that's not true,
// it's because our new Unkeyed ended up in the updated set despite
// its constructor exception.
for (Unkeyed::instance_iter uki(Unkeyed::beginInstances()),
ukend(Unkeyed::endInstances());
uki != ukend; ++uki)
for (auto& ref : Unkeyed::instance_snapshot())
{
ensure("failed to remove instance", existing.find(&*uki) != existing.end());
ensure("failed to remove instance", existing.find(&ref) != existing.end());
}
}
} // namespace tut

Some files were not shown because too many files have changed in this diff Show More