# Conflicts:
#	autobuild.xml
#	indra/cmake/ViewerMiscLibs.cmake
#	indra/llmath/llvolume.cpp
#	indra/newview/fonts/DejaVu-license.txt
#	indra/newview/fonts/DejaVuSans-Bold.ttf
#	indra/newview/fonts/DejaVuSans-BoldOblique.ttf
#	indra/newview/fonts/DejaVuSans-Oblique.ttf
#	indra/newview/fonts/DejaVuSans.ttf
#	indra/newview/fonts/DejaVuSansMono.ttf
#	indra/newview/llviewermenu.cpp
master
Ansariel 2023-12-18 19:25:49 +01:00
commit b32cd27b4c
1098 changed files with 58473 additions and 50654 deletions

View File

@ -4,54 +4,93 @@ on:
workflow_dispatch:
pull_request:
push:
branches: [main, contribute]
branches: ["*"]
tags: ["*"]
jobs:
build:
strategy:
matrix:
runner: [windows-large]
configuration: [ReleaseOS]
addrsize: [64]
runner: [windows-large, macos-12-xl]
configuration: [Release, ReleaseOS]
python-version: ["3.11"]
include:
- runner: windows-large
- runner: macos-12-xl
developer_dir: "/Applications/Xcode_14.0.1.app/Contents/Developer"
exclude:
- runner: macos-12-xl
configuration: ReleaseOS
addrsize: 32
runs-on: ${{ matrix.runner }}
outputs:
viewer_channel: ${{ steps.build.outputs.viewer_channel }}
viewer_version: ${{ steps.build.outputs.viewer_version }}
imagename: ${{ steps.build.outputs.imagename }}
env:
AUTOBUILD_ADDRSIZE: 64
AUTOBUILD_BUILD_ID: ${{ github.run_id }}
AUTOBUILD_CONFIGURATION: ${{ matrix.configuration }}
AUTOBUILD_ADDRSIZE: ${{ matrix.addrsize }}
# authorizes fetching private constituent packages
AUTOBUILD_GITHUB_TOKEN: ${{ secrets.SHARED_AUTOBUILD_GITHUB_TOKEN }}
AUTOBUILD_INSTALLABLE_CACHE: ${{ github.workspace }}/.autobuild-installables
AUTOBUILD_VARIABLES_FILE: ${{ github.workspace }}/.build-variables/variables
AUTOBUILD_VSVER: "170" # vs2k22
LOGFAIL: debug # Show details when tests fail
AUTOBUILD_VSVER: "170"
DEVELOPER_DIR: ${{ matrix.developer_dir }}
# Ensure that Linden viewer builds engage Bugsplat.
BUGSPLAT_DB: ${{ matrix.configuration != 'ReleaseOS' && 'SecondLife_Viewer_2018' || '' }}
BUGSPLAT_PASS: ${{ secrets.BUGSPLAT_PASS }}
BUGSPLAT_USER: ${{ secrets.BUGSPLAT_USER }}
build_coverity: false
build_log_dir: ${{ github.workspace }}/.logs
build_viewer: true
BUILDSCRIPTS_SHARED: ${{ github.workspace }}/.shared
# extracted and committed to viewer repo
BUILDSCRIPTS_SUPPORT_FUNCTIONS: ${{ github.workspace }}/buildscripts_support_functions
GIT_REF: ${{ github.head_ref || github.ref }}
LL_SKIP_REQUIRE_SYSROOT: 1
# Setting this variable directs Linden's TUT test driver code to capture
# test-program log output at the specified level, but to display it only if
# the individual test fails.
LOGFAIL: DEBUG
master_message_template_checkout: ${{ github.workspace }}/.master-message-template
# Only set variants to the one configuration: don't let build.sh loop
# over variants, let GitHub distribute variants over multiple hosts.
variants: ${{ matrix.configuration }}
steps:
- name: Checkout code
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Checkout build variables
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
repository: secondlife/build-variables
ref: viewer
path: .build-variables
- name: Checkout master-message-template
uses: actions/checkout@v4
with:
repository: secondlife/master-message-template
path: .master-message-template
- name: Install autobuild and python dependencies
run: pip3 install autobuild llbase
run: pip3 install autobuild llsd
- name: Cache autobuild packages
uses: actions/cache@v3
id: cache-installables
with:
path: .autobuild-installables
key: ${{ runner.os }}-${{ matrix.addrsize }}-${{ matrix.configuration }}-${{ hashFiles('autobuild.xml') }}
key: ${{ runner.os }}-64-${{ matrix.configuration }}-${{ hashFiles('autobuild.xml') }}
restore-keys: |
${{ runner.os }}-${{ matrix.addrsize }}-${{ matrix.configuration }}-
${{ runner.os }}-${{ matrix.addrsize }}-
${{ runner.os }}-64-${{ matrix.configuration }}-
${{ runner.os }}-64-
- name: Install windows dependencies
if: runner.os == 'Windows'
@ -63,31 +102,270 @@ jobs:
env:
RUNNER_OS: ${{ runner.os }}
run: |
# set up things the viewer's build.sh script expects
set -x
mkdir -p "$build_log_dir"
mkdir -p "$BUILDSCRIPTS_SHARED/packages/lib/python"
source "$BUILDSCRIPTS_SUPPORT_FUNCTIONS"
if [[ "$OSTYPE" =~ cygwin|msys ]]
then
native_path() { cygpath --windows "$1"; }
shell_path() { cygpath --unix "$1"; }
else
native_path() { echo "$1"; }
shell_path() { echo "$1"; }
fi
finalize()
{
case "$1" in
true|0)
record_success "Build Succeeded"
;;
*)
record_failure "Build Failed with $1"
;;
esac
}
initialize_build()
{
echo "initialize_build"
}
initialize_version()
{
export revision="$AUTOBUILD_BUILD_ID"
}
python_cmd()
{
if [[ "x${1:0:1}" == "x-" ]] # -m, -c, etc.
then # if $1 is a switch, don't try to twiddle paths
"$(shell_path "$PYTHON_COMMAND")" "$@"
elif [[ "$(basename "$1")" == "codeticket.py" ]]
then # ignore any attempt to contact codeticket
echo "## $@"
else # running a script at an explicit path: fix path for Python
local script="$1"
shift
"$(shell_path "$PYTHON_COMMAND")" "$(native_path "$script")" "$@"
fi
}
repo_branch()
{
git -C "$1" branch | grep '^* ' | cut -c 3-
}
record_dependencies_graph()
{
echo "TODO: generate and post dependency graph"
}
# Since we're not uploading to codeticket, DO NOT sleep for minutes.
sleep()
{
echo "Not sleeping for $1 seconds"
}
export -f native_path shell_path finalize initialize_build initialize_version
export -f python_cmd repo_branch record_dependencies_graph sleep
## Useful for diagnosing Windows LLProcess/LLLeap test failures
##export APR_LOG="${RUNNER_TEMP}/apr.log"
export arch=$(uname | cut -b-6)
# Surprise! GH Windows runner's MINGW6 is a $arch value we've never
# seen before, so numerous tests don't know about it.
[[ "$arch" == "MINGW6" ]] && arch=CYGWIN
export AUTOBUILD="$(which autobuild)"
# Build with a tag like "Second_Life_Project_Shiny#abcdef0" to get a
# viewer channel "Second Life Project Shiny" (ignoring "#hash",
# needed to disambiguate tags).
if [[ "$GITHUB_REF_TYPE" == "tag" && "${GITHUB_REF_NAME:0:12}" == "Second_Life_" ]]
then viewer_channel="${GITHUB_REF_NAME%#*}"
export viewer_channel="${viewer_channel//_/ }"
else export viewer_channel="Second Life Test"
fi
echo "viewer_channel=$viewer_channel" >> "$GITHUB_OUTPUT"
# On windows we need to point the build to the correct python
# as neither CMake's FindPython nor our custom Python.cmake module
# will resolve the correct interpreter location.
if [[ "$RUNNER_OS" == "Windows" ]]; then
export PYTHON="$(cygpath -m "$(which python)")"
export PYTHON="$(native_path "$(which python)")"
echo "Python location: $PYTHON"
export PYTHON_COMMAND="$PYTHON"
else
export PYTHON_COMMAND="python3"
fi
autobuild configure -- -DVIEWER_CHANNEL="Second Life Test ${GIT_REF##*/}"
autobuild build --no-configure
export PYTHON_COMMAND_NATIVE="$(native_path "$PYTHON_COMMAND")"
# Find artifacts
if [[ "$RUNNER_OS" == "Windows" ]]; then
installer_path=$(find ./build-*/newview/ | grep '_Setup\.exe')
installer_name="$(basename $installer_path)"
elif [[ "$RUNNER_OS" == "macOS" ]]; then
installer_path=$(find ./build-*/newview/ | grep '\.dmg')
installer_name="$(basename $installer_path)"
./build.sh
# Each artifact is downloaded as a distinct .zip file. Multiple jobs
# (per the matrix above) writing the same filepath to the same
# artifact name will *overwrite* that file. Moreover, they can
# interfere with each other, causing the upload to fail.
# https://github.com/actions/upload-artifact#uploading-to-the-same-artifact
# Given the size of our installers, and the fact that we typically
# only want to download just one instead of a single zip containing
# several, generate a distinct artifact name for each installer.
# If the matrix above can run multiple builds on the same
# platform, we must disambiguate on more than the platform name.
# e.g. if we were still running Windows 32-bit builds, we'd need to
# qualify the artifact with bit width.
if [[ "$AUTOBUILD_CONFIGURATION" == "ReleaseOS" ]]
then cfg_suffix='OS'
else cfg_suffix=''
fi
echo "artifact=$RUNNER_OS$cfg_suffix" >> $GITHUB_OUTPUT
echo "installer_path=$installer_path" >> $GITHUB_OUTPUT
echo "installer_name=$installer_name" >> $GITHUB_OUTPUT
- name: Upload installer
- name: Upload executable
if: matrix.configuration != 'ReleaseOS' && steps.build.outputs.viewer_app
uses: actions/upload-artifact@v3
with:
name: ${{ steps.build.outputs.installer_name }}
path: ${{ steps.build.outputs.installer_path }}
name: "${{ steps.build.outputs.artifact }}-app"
path: |
${{ steps.build.outputs.viewer_app }}
# The other upload of nontrivial size is the symbol file. Use a distinct
# artifact for that too.
- name: Upload symbol file
if: matrix.configuration != 'ReleaseOS'
uses: actions/upload-artifact@v3
with:
name: "${{ steps.build.outputs.artifact }}-symbols"
path: |
${{ steps.build.outputs.symbolfile }}
- name: Upload metadata
if: matrix.configuration != 'ReleaseOS'
uses: actions/upload-artifact@v3
with:
name: "${{ steps.build.outputs.artifact }}-metadata"
# emitted by build.sh, possibly multiple lines
path: |
${{ steps.build.outputs.metadata }}
- name: Upload physics package
uses: actions/upload-artifact@v3
# should only be set for viewer-private
if: matrix.configuration != 'ReleaseOS' && steps.build.outputs.physicstpv
with:
name: "${{ steps.build.outputs.artifact }}-physics"
# emitted by build.sh, zero or one lines
path: |
${{ steps.build.outputs.physicstpv }}
sign-and-package-windows:
needs: build
runs-on: windows
steps:
- name: Sign and package Windows viewer
uses: secondlife/viewer-build-util/sign-pkg-windows@v1
with:
vault_uri: "${{ secrets.AZURE_KEY_VAULT_URI }}"
cert_name: "${{ secrets.AZURE_CERT_NAME }}"
client_id: "${{ secrets.AZURE_CLIENT_ID }}"
client_secret: "${{ secrets.AZURE_CLIENT_SECRET }}"
tenant_id: "${{ secrets.AZURE_TENANT_ID }}"
sign-and-package-mac:
needs: build
runs-on: macos-latest
steps:
- name: Unpack Mac notarization credentials
id: note-creds
shell: bash
run: |
# In NOTARIZE_CREDS_MACOS we expect to find:
# USERNAME="..."
# PASSWORD="..."
# TEAM_ID="..."
eval "${{ secrets.NOTARIZE_CREDS_MACOS }}"
echo "::add-mask::$USERNAME"
echo "::add-mask::$PASSWORD"
echo "::add-mask::$TEAM_ID"
echo "note_user=$USERNAME" >> "$GITHUB_OUTPUT"
echo "note_pass=$PASSWORD" >> "$GITHUB_OUTPUT"
echo "note_team=$TEAM_ID" >> "$GITHUB_OUTPUT"
# If we didn't manage to retrieve all of these credentials, better
# find out sooner than later.
[[ -n "$USERNAME" && -n "$PASSWORD" && -n "$TEAM_ID" ]]
- name: Sign and package Mac viewer
uses: secondlife/viewer-build-util/sign-pkg-mac@v1
with:
channel: ${{ needs.build.outputs.viewer_channel }}
imagename: ${{ needs.build.outputs.imagename }}
cert_base64: ${{ secrets.SIGNING_CERT_MACOS }}
cert_name: ${{ secrets.SIGNING_CERT_MACOS_IDENTITY }}
cert_pass: ${{ secrets.SIGNING_CERT_MACOS_PASSWORD }}
note_user: ${{ steps.note-creds.outputs.note_user }}
note_pass: ${{ steps.note-creds.outputs.note_pass }}
note_team: ${{ steps.note-creds.outputs.note_team }}
post-windows-symbols:
needs: build
runs-on: ubuntu-latest
steps:
- name: Post Windows symbols
uses: secondlife/viewer-build-util/post-bugsplat-windows@v1
with:
username: ${{ secrets.BUGSPLAT_USER }}
password: ${{ secrets.BUGSPLAT_PASS }}
database: "SecondLife_Viewer_2018"
channel: ${{ needs.build.outputs.viewer_channel }}
version: ${{ needs.build.outputs.viewer_version }}
post-mac-symbols:
needs: build
runs-on: ubuntu-latest
steps:
- name: Post Mac symbols
uses: secondlife/viewer-build-util/post-bugsplat-mac@v1
with:
username: ${{ secrets.BUGSPLAT_USER }}
password: ${{ secrets.BUGSPLAT_PASS }}
database: "SecondLife_Viewer_2018"
channel: ${{ needs.build.outputs.viewer_channel }}
version: ${{ needs.build.outputs.viewer_version }}
release:
needs: [sign-and-package-windows, sign-and-package-mac]
runs-on: ubuntu-latest
if: github.ref_type == 'tag' && startsWith(github.ref_name, 'Second_Life_')
steps:
- uses: actions/download-artifact@v3
with:
name: Windows-installer
- uses: actions/download-artifact@v3
with:
name: macOS-installer
- uses: actions/download-artifact@v3
with:
name: Windows-metadata
- name: Rename windows metadata
run: |
mv autobuild-package.xml Windows-autobuild-package.xml
mv newview/viewer_version.txt Windows-viewer_version.txt
- uses: actions/download-artifact@v3
with:
name: macOS-metadata
- name: Rename macOS metadata
run: |
mv autobuild-package.xml macOS-autobuild-package.xml
mv newview/viewer_version.txt macOS-viewer_version.txt
# forked from softprops/action-gh-release
- uses: secondlife-3p/action-gh-release@v1
with:
# name the release page for the build number so we can find it
# easily (analogous to looking up a codeticket build page)
name: "v${{ github.run_id }}"
prerelease: true
generate_release_notes: true
# the only reason we generate a GH release is to post build products
fail_on_unmatched_files: true
files: |
*.dmg
*.exe
*-autobuild-package.xml
*-viewer_version.txt

View File

@ -1,5 +1,6 @@
name: Build viewer
on:
workflow_dispatch:
push:
branches:
- "Firestorm*.*.*"
@ -13,39 +14,37 @@ env:
EXTRA_ARGS: -DUSE_FMODSTUDIO=ON -DUSE_KDU=ON --crashreporting
build_secrets_checkout: ${{github.workspace}}/signing
XZ_DEFAULTS: -T0
jobs:
build_matrix:
strategy:
matrix:
os: [macos-11,ubuntu-20.04,windows-2022]
grid: [sl,os]
addrsize: [64,32]
exclude:
- os: ubuntu-20.04
addrsize: 32
- os: macos-11
addrsize: 32
# - grid: sl
# addrsize: 32
addrsize: [64]
runs-on: ${{ matrix.os }}
steps:
- name: Install Bash 4 and GNU sed on Mac
if: runner.os == 'macOS'
run: |
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
brew update
brew install bash
brew install gnu-sed
echo "/usr/local/bin" >> $GITHUB_PATH
echo "$(brew --prefix)/opt/gnu-sed/libexec/gnubin" >> $GITHUB_PATH
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
id: py311
id: py312
with:
python-version: '3.11'
python-version: '3.11.6'
cache: 'pip'
- if: runner.os == 'Windows'
run: |
python3 -m pip install -r requirements.txt --user
echo "$HOME/.local/bin" >> $GITHUB_PATH
- if: runner.os != 'Windows'
- name: Install python requirements
run: |
python3 -m pip install -r requirements.txt
python -m pip install -r requirements.txt
- name: Check python version
run: python -V
@ -60,6 +59,14 @@ jobs:
EOF
shell: bash
- name: Test python3 llsd
run: |
python3 - <<EOF
import llsd
print("Hello from inline Python script!")
EOF
shell: bash
- name: Free Disk Space (Ubuntu)
if: runner.os == 'Linux'
uses: jlumbroso/free-disk-space@main
@ -72,17 +79,6 @@ jobs:
echo "CC=gcc-10" >> $GITHUB_ENV
echo "CXX=g++-10" >> $GITHUB_ENV
- name: Install Bash 4 and GNU sed on Mac
if: runner.os == 'macOS'
run: |
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
brew update
brew install bash
brew install gnu-sed
echo "/usr/local/bin" >> $GITHUB_PATH
echo "$(brew --prefix)/opt/gnu-sed/libexec/gnubin" >> $GITHUB_PATH
- name: Setup rclone and download the folder
uses: beqjanus/setup-rclone@main
@ -293,7 +289,7 @@ jobs:
FS_RELEASE_FOLDER=preview
FS_BUILD_WEBHOOK_URL=${{ secrets.BETA_WEBHOOK_URL }}
elif [[ "${{ github.ref_name }}" == *alpha* ]]; then
FS_RELEASE_FOLDER=preview
FS_RELEASE_FOLDER=test
FS_BUILD_WEBHOOK_URL=${{ secrets.BETA_WEBHOOK_URL }}
elif [[ "${{ github.ref_name }}" == *nightly* ]] || [[ "${{ github.event_name }}" == 'schedule' ]]; then
FS_RELEASE_FOLDER=nightly
@ -322,5 +318,5 @@ jobs:
rclone_config: ${{ secrets.RCLONE_CONFIG }}
- name: Copy files to remote host
run: rclone copy ${{steps.download.outputs.download-path}}/${{ env.FS_RELEASE_FOLDER }} fs_deploy:${{ env.FS_RELEASE_FOLDER }}
run: rclone copy ${{steps.download.outputs.download-path}}/${{ env.FS_RELEASE_FOLDER }} fs_r2_deploy:viewerdownloads/${{ env.FS_RELEASE_FOLDER }}

View File

@ -11,7 +11,7 @@ jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: actions/setup-python@v4
with:
python-version: 3.x

18
.gitignore vendored
View File

@ -7,11 +7,20 @@
*.pyc
*.rej
*.swp
*.vcxproj
*.filters
*.sln
*.depend
*.stamp
*.rc
*.tar.bz2
*.code-workspace
*~
# Specific paths and/or names
CMakeCache.txt
cmake_install.cmake
LICENSES
build-darwin-*
build-linux-*
@ -19,8 +28,11 @@ debian/files
debian/secondlife-appearance-utility*
debian/secondlife-viewer*
indra/.distcc
build-vc*-32*/
build-vc*-64*/
indra/cmake/*
indra/out/*
indra/packages/*
build-vc*/
indra/CMakeFiles
indra/build-vc[0-9]*
indra/lib/mono/1.0/*.dll
@ -113,3 +125,5 @@ firestorm.code-workspace
.cache/clangd/index/
*-compiled.glsl
.github/release.yaml

View File

@ -2,11 +2,11 @@ Before you start configuring your Windows build system, be aware of our tested c
Memory: You will need at least 2GB RAM, 4GB strongly recommended.
CPU: Multiple CPUs are strongly recommended.
A build can take over an hour.
Visual Studio 2017 Community Edition.
Visual Studio 2017/2022 Community Edition.
Ensure you can build a stock viewer-development try as described in the SL wiki. Before asking for any help
compiling Firestorm, make sure you can build the Second Life viewer first. If you try and skip this step, you may
receive much less help. http://wiki.secondlife.com/wiki/Visual_Studio_2013_Viewer_Builds
receive much less help. https://wiki.secondlife.com/wiki/Build_the_Viewer_on_Windows
If you want to use licensed FMOD or KDU build libraries (they are optional) you have to provision these yourself.
If you're licensing these with Phoenix/Firestorm, ask for the libraries for fmod and kdu. Put them into:
@ -20,11 +20,11 @@ mailing list. We've created a non-KDU build target to make this easier. Everywhe
"ReleaseFS_open" instead. This will perform the same build, using OpenJpeg instead of KDU.
To build Firestorm:
To build Firestorm - a more detailed instruction can be found at https://wiki.firestormviewer.org/fs_compiling_firestorm_windows:
Open a CMD shell and navigating to your firestorm code repo:
autobuild build -c ReleaseFS
autobuild build -A 64 -c ReleaseFS
Other build targets you may use are:
@ -33,22 +33,18 @@ Other build targets you may use are:
RelWithDebInfoFS_open (no KDU, no FMOD)
Other examples:
autobuild configure -c ReleaseFS # basic configuration step, don't build, just configure
autobuild configure -c ReleaseFS -- --clean # clean the output area first, then configure
autobuild configure -c ReleaseFS -- --chan Private-Yourname # configure with a custom channel
autobuild configure -A 64 -c ReleaseFS # basic configuration step, don't build, just configure
autobuild configure -A 64 -c ReleaseFS -- --clean # clean the output area first, then configure
autobuild configure -A 64 -c ReleaseFS -- --chan Private-Yourname # configure with a custom channel
autobuild build -c ReleaseFS --no-configure # default quick rebuild
autobuild build -A 64 -c ReleaseFS --no-configure # default quick rebuild
If you want to set custom configuration, do this in the configure step separately from build, then run "autobuild
build -c ReleaseFS --no-configure" as a secondary step.
If you want to build the 64bit version, add the parameter -A 64 to the autobuild commands, e.g.:
autobuild configure -A 64 -c ReleaseFS
autobuild build -A 64 -c ReleaseFS --no-configure
build -A 64 -c ReleaseFS --no-configure" as a secondary step.
Logs:
Look for logs in build-vc150-32/logs for 32bit builds and build-vc150-64/logs for 64bit
Look for logs in build-vc1x0-64/logs
Output:
Look for output in build-vc150-32/newview/Release for 32bit builds and build-vc150-64/newview/Release for 64bit
Look for output in build-vc1x0-32/newview/Release

File diff suppressed because it is too large Load Diff

173
build.sh
View File

@ -16,6 +16,8 @@
# * The special style in which python is invoked is intentional to permit
# use of a native python install on windows - which requires paths in DOS form
cleanup="true"
retry_cmd()
{
max_attempts="$1"; shift
@ -110,6 +112,34 @@ installer_CYGWIN()
fi
}
[[ -n "$GITHUB_OUTPUT" ]] || fatal "Need to export GITHUB_OUTPUT"
# The following is based on the Warning for GitHub multiline output strings:
# https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
# Build up these arrays as we go
metadata=()
symbolfile=()
physicstpv=()
# and dump them to GITHUB_OUTPUT when done
cleanup="$cleanup ; \
arrayoutput metadata ; \
arrayoutput symbolfile ; \
arrayoutput physicstpv"
trap "$cleanup" EXIT
arrayoutput()
{
local outputname="$1"
# append "[*]" to the array name so array indirection works
local array="$1[*]"
local IFS='
'
echo "$outputname<<$EOF
${!array}
$EOF" >> "$GITHUB_OUTPUT"
}
pre_build()
{
local variant="$1"
@ -121,7 +151,7 @@ pre_build()
RELEASE_CRASH_REPORTING=ON
HAVOK=ON
SIGNING=()
if [ "$arch" == "Darwin" -a "$variant" == "Release" ]
if [[ "$arch" == "Darwin" && "$variant" == "Release" ]]
then SIGNING=("-DENABLE_SIGNING:BOOL=YES" \
"-DSIGNING_IDENTITY:STRING=Developer ID Application: Linden Research, Inc.")
fi
@ -145,15 +175,27 @@ pre_build()
VIEWER_SYMBOL_FILE="$(native_path "$abs_build_dir/newview/$variant/secondlife-symbols-$symplat-${AUTOBUILD_ADDRSIZE}.tar.bz2")"
fi
# don't spew credentials into build log
bugsplat_sh="$build_secrets_checkout/bugsplat/bugsplat.sh"
set +x
if [ -r "$bugsplat_sh" ]
then # show that we're doing this, just not the contents
echo source "$bugsplat_sh"
source "$bugsplat_sh"
# expect these variables to be set in the environment from GitHub secrets
if [[ -n "$BUGSPLAT_DB" ]]
then
# don't spew credentials into build log
set +x
if [[ -z "$BUGSPLAT_USER" || -z "$BUGSPLAT_PASS" ]]
then
# older mechanism involving build-secrets repo -
# if build_secrets_checkout isn't set, report its name
bugsplat_sh="${build_secrets_checkout:-\$build_secrets_checkout}/bugsplat/bugsplat.sh"
if [ -r "$bugsplat_sh" ]
then # show that we're doing this, just not the contents
echo source "$bugsplat_sh"
source "$bugsplat_sh"
else
fatal "BUGSPLAT_USER or BUGSPLAT_PASS missing, and no $bugsplat_sh"
fi
fi
set -x
export BUGSPLAT_USER BUGSPLAT_PASS
fi
set -x
# honor autobuild_configure_parameters same as sling-buildscripts
eval_autobuild_configure_parameters=$(eval $(echo echo $autobuild_configure_parameters))
@ -181,13 +223,17 @@ package_llphysicsextensions_tpv()
# nat 2016-12-21: without HAVOK, can't build PhysicsExtensions_TPV.
if [ "$variant" = "Release" -a "${HAVOK:-}" != "OFF" ]
then
test -r "$build_dir/packages/llphysicsextensions/autobuild-tpv.xml" || fatal "No llphysicsextensions_tpv autobuild configuration found"
tpvconfig=$(native_path "$build_dir/packages/llphysicsextensions/autobuild-tpv.xml")
"$autobuild" build --quiet --config-file "$tpvconfig" -c Tpv || fatal "failed to build llphysicsextensions_tpv"
tpvconfig="$build_dir/packages/llphysicsextensions/autobuild-tpv.xml"
test -r "$tpvconfig" || fatal "No llphysicsextensions_tpv autobuild configuration found"
# SL-19942: autobuild ignores -c switch if AUTOBUILD_CONFIGURATION set
unset AUTOBUILD_CONFIGURATION
"$autobuild" build --quiet --config-file "$(native_path "$tpvconfig")" -c Tpv \
|| fatal "failed to build llphysicsextensions_tpv"
# capture the package file name for use in upload later...
PKGTMP=`mktemp -t pgktpv.XXXXXX`
trap "rm $PKGTMP* 2>/dev/null" 0
cleanup="$cleanup ; rm $PKGTMP* 2>/dev/null"
trap "$cleanup" EXIT
"$autobuild" package --quiet --config-file "$tpvconfig" --results-file "$(native_path $PKGTMP)" || fatal "failed to package llphysicsextensions_tpv"
tpv_status=$?
if [ -r "${PKGTMP}" ]
@ -313,12 +359,20 @@ begin_section "coding policy check"
# this far. Running coding policy checks on one platform *should* suffice...
if [[ "$arch" == "Darwin" ]]
then
# install the git-hooks dependencies
pip install -r "$(native_path "$git_hooks_checkout/requirements.txt")" || \
fatal "pip install git-hooks failed"
# validate the branch we're about to build
python_cmd "$git_hooks_checkout/coding_policy_git.py" --all_files || \
fatal "coding policy check failed"
git_hooks_reqs="$git_hooks_checkout/requirements.txt"
if [[ -r "$(shell_path "$git_hooks_reqs")" ]]
then
# install the git-hooks dependencies
pip install -r "$(native_path "$git_hooks_reqs")" || \
fatal "pip install git-hooks failed"
fi
git_hooks_script="$git_hooks_checkout/coding_policy_git.py"
if [[ -r "$(shell_path "$git_hooks_script")" ]]
then
# validate the branch we're about to build
python_cmd "$(native_path "$git_hooks_script")" --all_files || \
fatal "coding policy check failed"
fi
fi
end_section "coding policy check"
@ -353,6 +407,7 @@ do
begin_section "Autobuild metadata"
python_cmd "$helpers/codeticket.py" addoutput "Autobuild Metadata" "$build_dir/autobuild-package.xml" --mimetype text/xml \
|| fatal "Upload of autobuild metadata failed"
metadata+=("$build_dir/autobuild-package.xml")
if [ "$arch" != "Linux" ]
then
record_dependencies_graph "$build_dir/autobuild-package.xml" # defined in buildscripts/hg/bin/build.sh
@ -366,8 +421,11 @@ do
if [ -r "$build_dir/newview/viewer_version.txt" ]
then
begin_section "Viewer Version"
python_cmd "$helpers/codeticket.py" addoutput "Viewer Version" "$(<"$build_dir/newview/viewer_version.txt")" --mimetype inline-text \
viewer_version="$(<"$build_dir/newview/viewer_version.txt")"
python_cmd "$helpers/codeticket.py" addoutput "Viewer Version" "$viewer_version" --mimetype inline-text \
|| fatal "Upload of viewer version failed"
metadata+=("$build_dir/newview/viewer_version.txt")
echo "viewer_version=$viewer_version" >> "$GITHUB_OUTPUT"
end_section "Viewer Version"
fi
;;
@ -376,12 +434,14 @@ do
then
record_event "Doxygen warnings generated; see doxygen_warnings.log"
python_cmd "$helpers/codeticket.py" addoutput "Doxygen Log" "$build_dir/doxygen_warnings.log" --mimetype text/plain ## TBD
metadata+=("$build_dir/doxygen_warnings.log")
fi
if [ -d "$build_dir/doxygen/html" ]
then
tar -c -f "$build_dir/viewer-doxygen.tar.bz2" --strip-components 3 "$build_dir/doxygen/html"
python_cmd "$helpers/codeticket.py" addoutput "Doxygen Tarball" "$build_dir/viewer-doxygen.tar.bz2" \
|| fatal "Upload of doxygen tarball failed"
metadata+=("$build_dir/viewer-doxygen.tar.bz2")
fi
;;
*)
@ -483,64 +543,29 @@ then
if $build_viewer
then
begin_section "Uploads"
# Upload installer
package=$(installer_$arch)
if [ x"$package" = x ] || test -d "$package"
# nat 2016-12-22: without RELEASE_CRASH_REPORTING, we have no symbol file.
if [ "${RELEASE_CRASH_REPORTING:-}" != "OFF" ]
then
fatal "No installer found from `pwd`"
succeeded=$build_coverity
else
# Upload base package.
retry_cmd 4 30 python_cmd "$helpers/codeticket.py" addoutput Installer "$package" \
|| fatal "Upload of installer failed"
wait_for_codeticket
# Upload additional packages.
for package_id in $additional_packages
do
package=$(installer_$arch "$package_id")
if [ x"$package" != x ]
# BugSplat wants to see xcarchive.zip
# e.g. build-darwin-x86_64/newview/Release/Second Life Test.xcarchive.zip
symbol_file="${build_dir}/newview/${variant}/${viewer_channel}.xcarchive.zip"
if [[ ! -f "$symbol_file" ]]
then
retry_cmd 4 30 python_cmd "$helpers/codeticket.py" addoutput "Installer $package_id" "$package" \
|| fatal "Upload of installer $package_id failed"
wait_for_codeticket
else
record_failure "Failed to find additional package for '$package_id'."
# symbol tarball we prep for (e.g.) Breakpad
symbol_file="$VIEWER_SYMBOL_FILE"
fi
done
# Upload crash reporter file
symbolfile+=("$symbol_file")
fi
if [ "$last_built_variant" = "Release" ]
then
# nat 2016-12-22: without RELEASE_CRASH_REPORTING, we have no symbol file.
if [ "${RELEASE_CRASH_REPORTING:-}" != "OFF" ]
then
# Upload crash reporter file
retry_cmd 4 30 python_cmd "$helpers/codeticket.py" addoutput "Symbolfile" "$VIEWER_SYMBOL_FILE" \
|| fatal "Upload of symbolfile failed"
wait_for_codeticket
fi
# Upload the llphysicsextensions_tpv package, if one was produced
# *TODO: Make this an upload-extension
if [ -r "$build_dir/llphysicsextensions_package" ]
then
llphysicsextensions_package=$(cat $build_dir/llphysicsextensions_package)
retry_cmd 4 30 python_cmd "$helpers/codeticket.py" addoutput "Physics Extensions Package" "$llphysicsextensions_package" --private \
|| fatal "Upload of physics extensions package failed"
fi
fi
# Run upload extensions
# Ex: bugsplat
if [ -d ${build_dir}/packages/upload-extensions ]; then
for extension in ${build_dir}/packages/upload-extensions/*.sh; do
begin_section "Upload Extension $extension"
. $extension
[ $? -eq 0 ] || fatal "Upload of extension $extension failed"
wait_for_codeticket
end_section "Upload Extension $extension"
done
fi
# Upload the llphysicsextensions_tpv package, if one was produced
# Only upload this package when building the private repo so the
# artifact is private.
if [[ "x$GITHUB_REPOSITORY" == "xsecondlife/viewer-private" && \
-r "$build_dir/llphysicsextensions_package" ]]
then
llphysicsextensions_package=$(cat $build_dir/llphysicsextensions_package)
physicstpv+=("$llphysicsextensions_package")
fi
end_section "Uploads"
else

View File

@ -0,0 +1,60 @@
# standalone functions from sling-buildscripts
set_build_number_to_revision()
{
record_event "buildNumber $revision"
}
record_event()
{
echo "=== $@"
}
begin_section()
{
record_event "START $*"
sections+=("$*")
}
end_section()
{
# accommodate dumb Mac bash 3, which doesn't understand array[-1]
local last=$(( ${#sections[@]} - 1 ))
record_event "END ${*:-${sections[$last]}}"
unset "sections[$last]"
}
record_success()
{
record_event "SUCCESS $*"
}
record_failure()
{
record_event "FAILURE $*" >&2
}
fatal()
{
record_failure "$@"
finalize false
exit 1
}
# redefined fail for backward compatibility
alias fail=fatal
pass()
{
exit 0
}
export -f set_build_number_to_revision
export -f record_event
export -f begin_section
export -f end_section
export -f record_success
export -f record_failure
export -f fatal
export -f pass
export sections

View File

@ -19,6 +19,7 @@ Agathos Frascati
CT-317
CT-352
Ai Austin
SL-19399
Aiko Ying
Aimee Trescothick
SNOW-227
@ -243,6 +244,7 @@ Ansariel Hiller
SL-19575
SL-19623
SL-4126
SL-20224
Aralara Rajal
Arare Chantilly
CHUIBUG-191
@ -936,6 +938,8 @@ LSL Scientist
Lamorna Proctor
Lares Carter
Larry Pixel
Lars Næsbye Christensen
SL-20054
Laurent Bechir
Leal Choche
Lenae Munz
@ -1427,6 +1431,7 @@ Sovereign Engineer
SL-18497
SL-18525
SL-18534
SL-19690
SL-19336
SpacedOut Frye
VWR-34

View File

@ -226,10 +226,7 @@ DOWNLOADS - {build_type}
full_file = os.path.join(build_type_dir, dir, file)
md5 = get_md5(full_file)
base_name = os.path.basename(file)
if "x64" in base_name:
wordsize = "64"
else:
wordsize = "32"
wordsize = "64"
if "FirestormOS-" in base_name:
grid = "OS"
@ -258,15 +255,6 @@ DOWNLOADS - {build_type}
output += "\n"
output += f"MD5: {md5_dict[f'{grid}{dir}{wordsize}']}\n"
output += "\n"
if dir == "windows":
# Need to do 32 bit as well
wordsize = "32"
output += f"{platform} for {grid_printable} ({wordsize}-bit)\n"
output += f"{download_root}/{dir}/{os.path.basename(file_dict[f'{grid}{dir}{wordsize}'])}\n"
output += "\n"
output += f"MD5: {md5_dict[f'{grid}{dir}{wordsize}']}\n"
output += "\n"
wordsize = "64"
except KeyError:
output += f"{platform} for {grid_printable} ({wordsize}-bit) - NOT AVAILABLE\n"
output += "\n"

View File

@ -29,15 +29,6 @@ else()
set( USE_AUTOBUILD_3P ON )
endif()
# The viewer code base can now be successfully compiled with -std=c++14. But
# turning that on in the generic viewer-build-variables/variables file would
# potentially require tweaking each of our ~50 third-party library builds.
# Until we decide to set -std=c++14 in viewer-build-variables/variables, set
# it locally here: we want to at least prevent inadvertently reintroducing
# viewer code that would fail with C++14.
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
include(Variables)
include(BuildVersion)

View File

@ -33,6 +33,11 @@ endif (WINDOWS)
# Portable compilation flags.
add_compile_definitions( ADDRESS_SIZE=${ADDRESS_SIZE})
# Because older versions of Boost.Bind dumped placeholders _1, _2 et al. into
# the global namespace, Boost now requires either BOOST_BIND_NO_PLACEHOLDERS
# to avoid that or BOOST_BIND_GLOBAL_PLACEHOLDERS to state that we require it
# -- which we do. Without one or the other, we get a ton of Boost warnings.
add_compile_definitions(BOOST_BIND_GLOBAL_PLACEHOLDERS)
# Configure crash reporting
set(RELEASE_CRASH_REPORTING OFF CACHE BOOL "Enable use of crash reporting in release builds")
@ -71,15 +76,6 @@ if (WINDOWS)
# http://www.cmake.org/pipermail/cmake/2009-September/032143.html
string(REPLACE "/Zm1000" " " CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
# Without PreferredToolArchitecture=x64, as of 2020-06-26 the 32-bit
# compiler on our TeamCity build hosts has started running out of virtual
# memory for the precompiled header file.
# CP changed to only append the flag for 32bit builds - on 64bit builds,
# locally at least, the build output is spammed with 1000s of 'D9002'
# warnings about this switch being ignored.
if(ADDRESS_SIZE EQUAL 32 AND DEFINED ENV{"TEAMCITY_PROJECT_NAME"})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /p:PreferredToolArchitecture=x64")
endif()
# zlib has assembly-language object files incompatible with SAFESEH
add_link_options(/LARGEADDRESSAWARE
/SAFESEH:NO

View File

@ -16,7 +16,6 @@ if (WINDOWS)
endif (LLCOMMON_LINK_SHARED)
target_link_libraries( ll::apr INTERFACE
${ARCH_PREBUILT_DIRS_RELEASE}/${APR_selector}apr-1.lib
${ARCH_PREBUILT_DIRS_RELEASE}/${APR_selector}apriconv-1.lib
${ARCH_PREBUILT_DIRS_RELEASE}/${APR_selector}aprutil-1.lib
)
elseif (DARWIN)
@ -38,7 +37,6 @@ else (WINDOWS)
target_link_libraries( ll::apr INTERFACE
apr-1
aprutil-1
# iconv # <FS:Zi> Doesn't seem to be necessary for Linux
uuid
rt
)

View File

@ -57,7 +57,6 @@ if(WINDOWS)
#openjp2.dll # <FS:Ansariel> Only copy OpenJPEG dll if needed
libapr-1.dll
libaprutil-1.dll
libapriconv-1.dll
nghttp2.dll
glod.dll # <FS:Beq> restore GLOD
libhunspell.dll
@ -104,7 +103,8 @@ if(WINDOWS)
set(release_files ${release_files} growl++.dll growl.dll )
if (TARGET ll::fmodstudio)
set(debug_files ${debug_files} fmodL.dll)
# fmodL is included for logging, only one should be picked by manifest
#set(release_files ${release_files} fmodL.dll)
set(release_files ${release_files} fmod.dll)
endif ()
@ -214,7 +214,6 @@ elseif(DARWIN)
libndofdev.dylib
libnghttp2.dylib
libnghttp2.14.dylib
libnghttp2.14.19.0.dylib
liburiparser.dylib
liburiparser.1.dylib
liburiparser.1.0.27.dylib

View File

@ -2,7 +2,7 @@
include_guard()
# FMODSTUDIO can be set when launching the make using the argument -DFMODSTUDIO:BOOL=ON
# FMODSTUDIO can be set when launching the make using the argument -DUSE_FMODSTUDIO:BOOL=ON
# When building using proprietary binaries though (i.e. having access to LL private servers),
# we always build with FMODSTUDIO.
# Open source devs should use the -DFMODSTUDIO:BOOL=ON then if they want to build with FMOD, whether

View File

@ -3,9 +3,7 @@ include(Prebuilt)
include(GLH)
add_library( ll::glext INTERFACE IMPORTED )
if (WINDOWS OR LINUX)
use_system_binary(glext)
use_prebuilt_binary(glext)
endif (WINDOWS OR LINUX)
use_system_binary(glext)
use_prebuilt_binary(glext)

View File

@ -130,6 +130,13 @@ MACRO(LL_ADD_PROJECT_UNIT_TESTS project sources)
message("LL_ADD_PROJECT_UNIT_TESTS ${name}_test_additional_CFLAGS ${${name}_test_additional_CFLAGS}")
endif()
if (DARWIN)
# test binaries always need to be signed for local development
set_target_properties(PROJECT_${project}_TEST_${name}
PROPERTIES
XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "-")
endif ()
#
# Setup test targets
#
@ -225,6 +232,13 @@ FUNCTION(LL_ADD_INTEGRATION_TEST
)
endif ()
if (DARWIN)
# test binaries always need to be signed for local development
set_target_properties(INTEGRATION_TEST_${testname}
PROPERTIES
XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "-")
endif ()
# Add link deps to the executable
if(TEST_DEBUG)
message(STATUS "TARGET_LINK_LIBRARIES(INTEGRATION_TEST_${testname} ${libraries})")

View File

@ -1,2 +1,5 @@
# -*- cmake -*-
include(Variables)
include(Mikktspace)

View File

@ -62,6 +62,7 @@ elseif (WINDOWS)
user32
ole32
dbghelp
rpcrt4.lib
legacy_stdio_definitions
)
else()

View File

@ -0,0 +1,6 @@
# -*- cmake -*-
include(Prebuilt)
if (NOT USESYSTEMLIBS)
use_prebuilt_binary(mikktspace)
endif (NOT USESYSTEMLIBS)

View File

@ -9,47 +9,37 @@ if (DEFINED ENV{PYTHON})
elseif (WINDOWS)
# On Windows, explicitly avoid Cygwin Python.
if (DEFINED ENV{VIRTUAL_ENV})
find_program(PYTHON_EXECUTABLE
NAMES python.exe
PATHS
"$ENV{VIRTUAL_ENV}\\scripts"
NO_DEFAULT_PATH
)
else()
# if the user has their own version of Python installed, prefer that
foreach(hive HKEY_CURRENT_USER HKEY_LOCAL_MACHINE)
# prefer more recent Python versions to older ones, if multiple versions
# are installed
foreach(pyver 3.11 3.10 3.9)
list(APPEND regpaths "[${hive}\\SOFTWARE\\Python\\PythonCore\\${pyver}\\InstallPath]")
endforeach()
# if the user has their own version of Python installed, prefer that
foreach(hive HKEY_CURRENT_USER HKEY_LOCAL_MACHINE)
# prefer more recent Python versions to older ones, if multiple versions
# are installed
foreach(pyver 3.12 3.11 3.10 3.9 3.8 3.7)
list(APPEND regpaths "[${hive}\\SOFTWARE\\Python\\PythonCore\\${pyver}\\InstallPath]")
endforeach()
endforeach()
# TODO: This logic has the disadvantage that if you have multiple versions
# of Python installed, the selected path won't necessarily be the newest -
# e.g. this GLOB will prefer Python310 to Python311. But since pymaybe is
# checked AFTER the registry entries, this will only surface as a problem if
# no installed Python appears in the registry.
file(GLOB pymaybe
"$ENV{PROGRAMFILES}/Python*"
## "$ENV{PROGRAMFILES(X86)}/Python*"
# The Windows environment variable is in fact as shown above, but CMake
# disallows querying an environment variable containing parentheses -
# thanks, Windows. Fudge by just appending " (x86)" to $PROGRAMFILES and
# hoping for the best.
"$ENV{PROGRAMFILES} (x86)/Python*"
"c:/Python*")
# TODO: This logic has the disadvantage that if you have multiple versions
# of Python installed, the selected path won't necessarily be the newest -
# e.g. this GLOB will prefer Python310 to Python311. But since pymaybe is
# checked AFTER the registry entries, this will only surface as a problem if
# no installed Python appears in the registry.
file(GLOB pymaybe
"$ENV{PROGRAMFILES}/Python*"
## "$ENV{PROGRAMFILES(X86)}/Python*"
# The Windows environment variable is in fact as shown above, but CMake
# disallows querying an environment variable containing parentheses -
# thanks, Windows. Fudge by just appending " (x86)" to $PROGRAMFILES and
# hoping for the best.
"$ENV{PROGRAMFILES} (x86)/Python*"
"c:/Python*")
set(Python3_FIND_REGISTRY "LAST")
find_program(python
NAMES python3.exe python.exe
NO_DEFAULT_PATH # added so that cmake does not find cygwin python
PATHS
${regpaths}
${pymaybe}
)
endif()
find_program(python
NAMES python3.exe python.exe
NO_DEFAULT_PATH # added so that cmake does not find cygwin python
PATHS
${regpaths}
${pymaybe}
)
find_package(Python3 COMPONENTS Interpreter)
else()
find_program(python python3)

View File

@ -0,0 +1,7 @@
# -*- cmake -*-
include(Prebuilt)
use_prebuilt_binary(tinygltf)
set(TINYGLTF_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include/tinygltf)

View File

@ -10,8 +10,9 @@ if (USE_TRACY)
use_prebuilt_binary(tracy)
target_include_directories( ll::tracy SYSTEM INTERFACE ${LIBS_PREBUILT_DIR}/include/tracy)
target_link_libraries( ll::tracy INTERFACE TracyClient )
# See: indra/llcommon/llprofiler.h
target_compile_definitions(ll::tracy INTERFACE LL_PROFILER_CONFIGURATION=3 )
# See: indra/llcommon/llprofiler.h
add_compile_definitions(LL_PROFILER_CONFIGURATION=3)
endif (USE_TRACY)

View File

@ -173,13 +173,17 @@ if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
set(CMAKE_XCODE_ATTRIBUTE_GCC_OPTIMIZATION_LEVEL "${CMAKE_MATCH_1}")
message(STATUS "CMAKE_XCODE_ATTRIBUTE_GCC_OPTIMIZATION_LEVEL = '${CMAKE_XCODE_ATTRIBUTE_GCC_OPTIMIZATION_LEVEL}'")
string(REGEX MATCHALL "[^ ]+" LL_BUILD_LIST "$ENV{LL_BUILD}")
list(FIND LL_BUILD_LIST "-iwithsysroot" sysroot_idx)
if ("${sysroot_idx}" LESS 0)
message(FATAL_ERROR "Environment variable LL_BUILD must contain '-iwithsysroot'")
endif ()
math(EXPR sysroot_idx "${sysroot_idx} + 1")
list(GET LL_BUILD_LIST "${sysroot_idx}" CMAKE_OSX_SYSROOT)
# allow disabling this check by setting LL_SKIP_REQUIRE_SYSROOT either ON as cmake cache var or non-empty as environment var
set(LL_SKIP_REQUIRE_SYSROOT OFF CACHE BOOL "Skip requirement to set toolchain sysroot ahead of time. Not skipped by default for consistency, but skipping can be useful for selecting alternative xcode versions side by side")
if("$ENV{LL_SKIP_REQUIRE_SYSROOT}" STREQUAL "" AND NOT ${LL_SKIP_REQUIRE_SYSROOT})
string(REGEX MATCHALL "[^ ]+" LL_BUILD_LIST "$ENV{LL_BUILD}")
list(FIND LL_BUILD_LIST "-iwithsysroot" sysroot_idx)
if ("${sysroot_idx}" LESS 0)
message(FATAL_ERROR "Environment variable LL_BUILD must contain '-iwithsysroot'")
endif ()
math(EXPR sysroot_idx "${sysroot_idx} + 1")
list(GET LL_BUILD_LIST "${sysroot_idx}" CMAKE_OSX_SYSROOT)
endif()
message(STATUS "CMAKE_OSX_SYSROOT = '${CMAKE_OSX_SYSROOT}'")
set(CMAKE_XCODE_ATTRIBUTE_GCC_VERSION "com.apple.compilers.llvm.clang.1_0")

View File

@ -1,2 +1,3 @@
include (Prebuilt)
use_prebuilt_binary(viewer-manager)

View File

@ -0,0 +1,5 @@
# -*- cmake -*-
include(Prebuilt)
use_prebuilt_binary(vulkan_gltf)

View File

@ -38,6 +38,7 @@ import itertools
import operator
import os
import re
import shlex
import shutil
import subprocess
import sys
@ -535,15 +536,15 @@ class LLManifest(object, metaclass=LLManifestRegistry):
self.cmakedirs(path)
return path
def run_command(self, command):
def run_command(self, command, **kwds):
"""
Runs an external command.
Raises ManifestError exception if the command returns a nonzero status.
"""
print("Running command:", command)
print("Running command:", shlex.join(command))
sys.stdout.flush()
try:
subprocess.check_call(command)
subprocess.check_call(command, **kwds)
except subprocess.CalledProcessError as err:
raise ManifestError( "Command %s returned non-zero status (%s)"
% (command, err.returncode) )

View File

@ -1064,7 +1064,6 @@ BOOL LLAvatarAppearance::loadSkeletonNode ()
mRoot->addChild(mMeshLOD[MESH_ID_UPPER_BODY]);
mRoot->addChild(mMeshLOD[MESH_ID_LOWER_BODY]);
mRoot->addChild(mMeshLOD[MESH_ID_SKIRT]);
mRoot->addChild(mMeshLOD[MESH_ID_HEAD]);
LLAvatarJoint *skull = (LLAvatarJoint*)mRoot->findJoint("mSkull");
if (skull)

View File

@ -142,7 +142,7 @@ public:
// <FS:ND> This map gets queried a huge amount of time.
// typedef std::map<std::string, LLJoint*> joint_map_t;
typedef boost::unordered_map< U32, LLJoint*> joint_map_t;
typedef std::unordered_map<U32, LLJoint*> joint_map_t;
// </FS:ND>
joint_map_t mJointMap;

View File

@ -384,7 +384,6 @@ BOOL LLTexLayerSet::render( S32 x, S32 y, S32 width, S32 height, LLRenderTarget*
// clear buffer area to ensure we don't pick up UI elements
{
gGL.flush();
LLGLDisable no_alpha(GL_ALPHA_TEST);
gAlphaMaskProgram.setMinimumAlpha(0.0f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4f( 0.f, 0.f, 0.f, 1.f );
@ -417,7 +416,6 @@ BOOL LLTexLayerSet::render( S32 x, S32 y, S32 width, S32 height, LLRenderTarget*
gGL.flush();
gGL.setSceneBlendType(LLRender::BT_REPLACE);
LLGLDisable no_alpha(GL_ALPHA_TEST);
gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
@ -507,7 +505,6 @@ void LLTexLayerSet::renderAlphaMaskTextures(S32 x, S32 y, S32 width, S32 height,
{
// Set the alpha channel to one (clean up after previous blending)
gGL.flush();
LLGLDisable no_alpha(GL_ALPHA_TEST);
gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4f( 0.f, 0.f, 0.f, 1.f );
@ -1032,7 +1029,6 @@ void LLTexLayer::calculateTexLayerColor(const param_color_list_t &param_list, LL
BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bound_target)
{
LLGLEnable color_mat(GL_COLOR_MATERIAL);
// *TODO: Is this correct?
//gPipeline.disableLights();
stop_glerror();
@ -1119,7 +1115,6 @@ BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bou
if( tex )
{
bool no_alpha_test = getInfo()->mWriteAllChannels;
LLGLDisable alpha_test(no_alpha_test ? GL_ALPHA_TEST : 0);
if (no_alpha_test)
{
gAlphaMaskProgram.setMinimumAlpha(0.f);
@ -1169,7 +1164,6 @@ BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bou
getInfo()->mStaticImageFileName.empty() &&
color_specified )
{
LLGLDisable no_alpha(GL_ALPHA_TEST);
gAlphaMaskProgram.setMinimumAlpha(0.000f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
@ -1267,7 +1261,6 @@ BOOL LLTexLayer::blendAlphaTexture(S32 x, S32 y, S32 width, S32 height)
LLGLTexture* tex = LLTexLayerStaticImageList::getInstance()->getTexture( getInfo()->mStaticImageFileName, getInfo()->mStaticImageIsMask );
if( tex )
{
LLGLSNoAlphaTest gls_no_alpha_test;
gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->bind(tex, TRUE);
gl_rect_2d_simple_tex( width, height );
@ -1286,7 +1279,6 @@ BOOL LLTexLayer::blendAlphaTexture(S32 x, S32 y, S32 width, S32 height)
LLGLTexture* tex = mLocalTextureObject->getImage();
if (tex)
{
LLGLSNoAlphaTest gls_no_alpha_test;
gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->bind(tex);
gl_rect_2d_simple_tex( width, height );
@ -1323,7 +1315,6 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
// Note: if the first param is a mulitply, multiply against the current buffer's alpha
if( !first_param || !first_param->getMultiplyBlend() )
{
LLGLDisable no_alpha(GL_ALPHA_TEST);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
// Clear the alpha
@ -1335,7 +1326,6 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
}
// Accumulate alphas
LLGLSNoAlphaTest gls_no_alpha_test;
gGL.color4f( 1.f, 1.f, 1.f, 1.f );
for (LLTexLayerParamAlpha* param : mParamAlphaList)
{
@ -1357,7 +1347,6 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
LLGLTexture* tex = mLocalTextureObject->getImage();
if( tex && (tex->getComponents() == 4) )
{
LLGLSNoAlphaTest gls_no_alpha_test;
LLTexUnit::eTextureAddressMode old_mode = tex->getAddressMode();
gGL.getTexUnit(0)->bind(tex, TRUE);
@ -1377,7 +1366,6 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
{
if( (tex->getComponents() == 4) || (tex->getComponents() == 1) )
{
LLGLSNoAlphaTest gls_no_alpha_test;
gGL.getTexUnit(0)->bind(tex, TRUE);
gl_rect_2d_simple_tex( width, height );
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
@ -1394,7 +1382,6 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
// Note: we're still using gGL.blendFunc( GL_DST_ALPHA, GL_ZERO );
if ( !is_approx_equal(layer_color.mV[VW], 1.f) )
{
LLGLDisable no_alpha(GL_ALPHA_TEST);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4fv(layer_color.mV);
gl_rect_2d_simple( width, height );
@ -1539,7 +1526,14 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
}
else
{ // platforms with working drivers...
glReadPixels(x, y, width, height, GL_ALPHA, GL_UNSIGNED_BYTE, alpha_data);
// We just want GL_ALPHA, but that isn't supported in OGL core profile 4.
static const size_t TEMP_BYTES_PER_PIXEL = 4;
U8* temp_data = (U8*)ll_aligned_malloc_32(mem_size * TEMP_BYTES_PER_PIXEL);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, temp_data);
for (size_t pixel = 0; pixel < pixels; pixel++) {
alpha_data[pixel] = temp_data[(pixel * TEMP_BYTES_PER_PIXEL) + 3];
}
ll_aligned_free_32(temp_data);
}
}
else

View File

@ -149,7 +149,7 @@ LLTexLayerParamAlpha::LLTexLayerParamAlpha(const LLTexLayerParamAlpha& pOther)
mCachedProcessedTexture(pOther.mCachedProcessedTexture),
mStaticImageTGA(pOther.mStaticImageTGA),
mStaticImageRaw(pOther.mStaticImageRaw),
mNeedsCreateTexture(pOther.mNeedsCreateTexture),
mNeedsCreateTexture(pOther.mNeedsCreateTexture.load()),
mStaticImageInvalid(pOther.mStaticImageInvalid),
mAvgDistortionVec(pOther.mAvgDistortionVec),
mCachedEffectiveWeight(pOther.mCachedEffectiveWeight)
@ -362,7 +362,6 @@ BOOL LLTexLayerParamAlpha::render(S32 x, S32 y, S32 width, S32 height)
mCachedProcessedTexture->setAddressMode(LLTexUnit::TAM_CLAMP);
}
LLGLSNoAlphaTest gls_no_alpha_test;
gGL.getTexUnit(0)->bind(mCachedProcessedTexture);
gl_rect_2d_simple_tex(width, height);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
@ -379,7 +378,6 @@ BOOL LLTexLayerParamAlpha::render(S32 x, S32 y, S32 width, S32 height)
}
else
{
LLGLDisable no_alpha(GL_ALPHA_TEST);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4f(0.f, 0.f, 0.f, effective_weight);
gl_rect_2d_simple(width, height);

View File

@ -111,7 +111,7 @@ private:
LLPointer<LLGLTexture> mCachedProcessedTexture;
LLPointer<LLImageTGA> mStaticImageTGA;
LLPointer<LLImageRaw> mStaticImageRaw;
BOOL mNeedsCreateTexture;
std::atomic<BOOL> mNeedsCreateTexture;
BOOL mStaticImageInvalid;
LL_ALIGN_16(LLVector4a mAvgDistortionVec);
F32 mCachedEffectiveWeight;

View File

@ -612,43 +612,40 @@ void LLAudioDecodeMgr::Impl::startMoreDecodes()
// Kick off a decode
mDecodes[decode_id] = LLPointer<LLVorbisDecodeState>(NULL);
try
{
main_queue->postTo(
general_queue,
[decode_id]() // Work done on general queue
bool posted = main_queue->postTo(
general_queue,
[decode_id]() // Work done on general queue
{
LLPointer<LLVorbisDecodeState> decode_state = beginDecodingAndWritingAudio(decode_id);
if (!decode_state)
{
LLPointer<LLVorbisDecodeState> decode_state = beginDecodingAndWritingAudio(decode_id);
if (gAudiop)
gAudiop->markSoundCorrupt(decode_id);
if (!decode_state)
{
if (gAudiop)
gAudiop->markSoundCorrupt(decode_id);
// Audio decode has errored
return decode_state;
}
// Disk write of decoded audio is now in progress off-thread
// Audio decode has errored
return decode_state;
},
[decode_id, this](LLPointer<LLVorbisDecodeState> decode_state) // Callback to main thread
mutable {
if (!gAudiop)
{
// There is no LLAudioEngine anymore. This might happen if
// an audio decode is enqueued just before shutdown.
return;
}
}
// At this point, we can be certain that the pointer to "this"
// is valid because the lifetime of "this" is dependent upon
// the lifetime of gAudiop.
// Disk write of decoded audio is now in progress off-thread
return decode_state;
},
[decode_id, this](LLPointer<LLVorbisDecodeState> decode_state) // Callback to main thread
mutable {
if (!gAudiop)
{
// There is no LLAudioEngine anymore. This might happen if
// an audio decode is enqueued just before shutdown.
return;
}
enqueueFinishAudio(decode_id, decode_state);
});
}
catch (const LLThreadSafeQueueInterrupt&)
// At this point, we can be certain that the pointer to "this"
// is valid because the lifetime of "this" is dependent upon
// the lifetime of gAudiop.
enqueueFinishAudio(decode_id, decode_state);
});
if (! posted)
{
// Shutdown
// Consider making processQueue() do a cleanup instead

View File

@ -30,6 +30,7 @@
#include <list>
#include <map>
#include <array>
#include "v3math.h"
#include "v3dmath.h"

View File

@ -51,9 +51,9 @@ public:
/*virtual*/ void start(const std::string& url);
/*virtual*/ void stop();
/*virtual*/ void pause(S32 pause);
/*virtual*/ void pause(int pause);
/*virtual*/ void update();
/*virtual*/ S32 isPlaying();
/*virtual*/ int isPlaying();
/*virtual*/ void setGain(F32 vol);
/*virtual*/ F32 getGain();
/*virtual*/ std::string getURL();

View File

@ -36,24 +36,20 @@
#include <boost/algorithm/string.hpp>
//<FS:ND> Query by JointKey rather than just a string, the key can be a U32 index for faster lookup
#include <boost/unordered_map.hpp>
#include <unordered_map>
boost::unordered_map< std::string, U32 > mpStringToKeys;
std::unordered_map<std::string, U32> mpStringToKeys;
JointKey JointKey::construct( std::string aName )
JointKey JointKey::construct(const std::string& aName)
{
boost::unordered_map< std::string, U32 >::iterator itr = mpStringToKeys.find( aName );
if( mpStringToKeys.end() == itr )
if (const auto itr = mpStringToKeys.find(aName); itr != mpStringToKeys.end())
{
U32 size = mpStringToKeys.size() + 1;
JointKey key{ aName, size };
mpStringToKeys[ aName ] = size;
return key;
return { aName, itr->second };
}
return JointKey{ aName, itr->second };
U32 size = mpStringToKeys.size() + 1;
mpStringToKeys.try_emplace(aName, size);
return { aName, size };
}
// </FS:ND>
@ -272,10 +268,13 @@ LLJoint *LLJoint::findJoint( const std::string &name )
for (LLJoint* joint : mChildren)
{
LLJoint *found = joint->findJoint(name);
if (found)
{
return found;
if(joint)
{
LLJoint *found = joint->findJoint(name);
if (found)
{
return found;
}
}
}

View File

@ -46,7 +46,7 @@ struct JointKey
std::string mName;
U32 mKey;
static JointKey construct( std::string aName );
static JointKey construct(const std::string& aName);
};
inline bool operator==(JointKey const &aLHS, JointKey const &aRHS)
@ -63,7 +63,6 @@ inline std::ostream& operator<<(std::ostream &aLHS, JointKey const &aRHS)
{
return aLHS << aRHS.mName << " (" << aRHS.mKey << ")";
}
// </FS:ND>
const S32 LL_CHARACTER_MAX_JOINTS_PER_MESH = 15;

View File

@ -17,7 +17,10 @@ include(Tracy)
set(llcommon_SOURCE_FILES
apply.cpp
commoncontrol.cpp
indra_constants.cpp
lazyeventapi.cpp
llallocator.cpp
llallocator_heap_profile.cpp
llapp.cpp
@ -115,11 +118,16 @@ set(llcommon_SOURCE_FILES
set(llcommon_HEADER_FILES
CMakeLists.txt
always_return.h
apply.h
chrono.h
classic_callback.h
commoncontrol.h
ctype_workaround.h
fix_macros.h
function_types.h
indra_constants.h
lazyeventapi.h
linden_common.h
llalignedarray.h
llallocator.h
@ -173,6 +181,7 @@ set(llcommon_HEADER_FILES
llinitdestroyclass.h
llinitparam.h
llinstancetracker.h
llinstancetrackersubclass.h
llkeybind.h
llkeythrottle.h
llleap.h
@ -246,6 +255,7 @@ set(llcommon_HEADER_FILES
stdtypes.h
stringize.h
threadpool.h
threadpool_fwd.h
threadsafeschedule.h
timer.h
tuple.h
@ -328,9 +338,11 @@ if (LL_TESTS)
#set(TEST_DEBUG on)
set(test_libs llcommon)
LL_ADD_INTEGRATION_TEST(apply "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(bitpack "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(classic_callback "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(commonmisc "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(lazyeventapi "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llbase64 "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(llcond "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(lldate "" "${test_libs}")

View File

@ -0,0 +1,124 @@
/**
* @file always_return.h
* @author Nat Goodspeed
* @date 2023-01-20
* @brief Call specified callable with arbitrary arguments, but always return
* specified type.
*
* $LicenseInfo:firstyear=2023&license=viewerlgpl$
* Copyright (c) 2023, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_ALWAYS_RETURN_H)
#define LL_ALWAYS_RETURN_H
#include <type_traits> // std::enable_if, std::is_convertible
namespace LL
{
#if __cpp_lib_is_invocable >= 201703L // C++17
template <typename CALLABLE, typename... ARGS>
using invoke_result = std::invoke_result<CALLABLE, ARGS...>;
#else // C++14
template <typename CALLABLE, typename... ARGS>
using invoke_result = std::result_of<CALLABLE(ARGS...)>;
#endif // C++14
/**
* AlwaysReturn<T>()(some_function, some_args...) calls
* some_function(some_args...). It is guaranteed to return a value of type
* T, regardless of the return type of some_function(). If some_function()
* returns a type convertible to T, it will convert and return that value.
* Otherwise (notably if some_function() is void), AlwaysReturn returns
* T().
*
* When some_function() returns a type not convertible to T, if
* you want AlwaysReturn to return some T value other than
* default-constructed T(), pass that value to AlwaysReturn's constructor.
*/
template <typename DESIRED>
class AlwaysReturn
{
public:
/// pass explicit default value if other than default-constructed type
AlwaysReturn(const DESIRED& dft=DESIRED()): mDefault(dft) {}
// callable returns a type not convertible to DESIRED, return default
template <typename CALLABLE, typename... ARGS,
typename std::enable_if<
! std::is_convertible<
typename invoke_result<CALLABLE, ARGS...>::type,
DESIRED
>::value,
bool
>::type=true>
DESIRED operator()(CALLABLE&& callable, ARGS&&... args)
{
// discard whatever callable(args) returns
std::forward<CALLABLE>(callable)(std::forward<ARGS>(args)...);
return mDefault;
}
// callable returns a type convertible to DESIRED
template <typename CALLABLE, typename... ARGS,
typename std::enable_if<
std::is_convertible<
typename invoke_result<CALLABLE, ARGS...>::type,
DESIRED
>::value,
bool
>::type=true>
DESIRED operator()(CALLABLE&& callable, ARGS&&... args)
{
return { std::forward<CALLABLE>(callable)(std::forward<ARGS>(args)...) };
}
private:
DESIRED mDefault;
};
/**
* always_return<T>(some_function, some_args...) calls
* some_function(some_args...). It is guaranteed to return a value of type
* T, regardless of the return type of some_function(). If some_function()
* returns a type convertible to T, it will convert and return that value.
* Otherwise (notably if some_function() is void), always_return() returns
* T().
*/
template <typename DESIRED, typename CALLABLE, typename... ARGS>
DESIRED always_return(CALLABLE&& callable, ARGS&&... args)
{
return AlwaysReturn<DESIRED>()(std::forward<CALLABLE>(callable),
std::forward<ARGS>(args)...);
}
/**
* make_always_return<T>(some_function) returns a callable which, when
* called with appropriate some_function() arguments, always returns a
* value of type T, regardless of the return type of some_function(). If
* some_function() returns a type convertible to T, the returned callable
* will convert and return that value. Otherwise (notably if
* some_function() is void), the returned callable returns T().
*
* When some_function() returns a type not convertible to T, if
* you want the returned callable to return some T value other than
* default-constructed T(), pass that value to make_always_return() as its
* optional second argument.
*/
template <typename DESIRED, typename CALLABLE>
auto make_always_return(CALLABLE&& callable, const DESIRED& dft=DESIRED())
{
return
[dft, callable = std::forward<CALLABLE>(callable)]
(auto&&... args)
{
return AlwaysReturn<DESIRED>(dft)(callable,
std::forward<decltype(args)>(args)...);
};
}
} // namespace LL
#endif /* ! defined(LL_ALWAYS_RETURN_H) */

29
indra/llcommon/apply.cpp Normal file
View File

@ -0,0 +1,29 @@
/**
* @file apply.cpp
* @author Nat Goodspeed
* @date 2022-12-21
* @brief Implementation for apply.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
// Precompiled header
#include "linden_common.h"
// associated header
#include "apply.h"
// STL headers
// std headers
// external library headers
// other Linden headers
#include "stringize.h"
void LL::apply_validate_size(size_t size, size_t arity)
{
if (size != arity)
{
LLTHROW(apply_error(stringize("LL::apply(func(", arity, " args), "
"std::vector(", size, " elements))")));
}
}

View File

@ -12,8 +12,11 @@
#if ! defined(LL_APPLY_H)
#define LL_APPLY_H
#include "llexception.h"
#include <boost/type_traits/function_traits.hpp>
#include <functional> // std::mem_fn()
#include <tuple>
#include <type_traits> // std::is_member_pointer
namespace LL
{
@ -54,20 +57,67 @@ namespace LL
}, \
(ARGS))
#if __cplusplus >= 201703L
/*****************************************************************************
* invoke()
*****************************************************************************/
#if __cpp_lib_invoke >= 201411L
// C++17 implementation
using std::apply;
using std::invoke;
#else // no std::invoke
// Use invoke() to handle pointer-to-method:
// derived from https://stackoverflow.com/a/38288251
template<typename Fn, typename... Args,
typename std::enable_if<std::is_member_pointer<typename std::decay<Fn>::type>::value,
int>::type = 0 >
auto invoke(Fn&& f, Args&&... args)
{
return std::mem_fn(std::forward<Fn>(f))(std::forward<Args>(args)...);
}
template<typename Fn, typename... Args,
typename std::enable_if<!std::is_member_pointer<typename std::decay<Fn>::type>::value,
int>::type = 0 >
auto invoke(Fn&& f, Args&&... args)
{
return std::forward<Fn>(f)(std::forward<Args>(args)...);
}
#endif // no std::invoke
/*****************************************************************************
* apply(function, tuple); apply(function, array)
*****************************************************************************/
#if __cpp_lib_apply >= 201603L
// C++17 implementation
// We don't just say 'using std::apply;' because that template is too general:
// it also picks up the apply(function, vector) case, which we want to handle
// below.
template <typename CALLABLE, typename... ARGS>
auto apply(CALLABLE&& func, const std::tuple<ARGS...>& args)
{
return std::apply(std::forward<CALLABLE>(func), args);
}
#else // C++14
// Derived from https://stackoverflow.com/a/20441189
// and https://en.cppreference.com/w/cpp/utility/apply
template <typename CALLABLE, typename TUPLE, std::size_t... I>
auto apply_impl(CALLABLE&& func, TUPLE&& args, std::index_sequence<I...>)
template <typename CALLABLE, typename... ARGS, std::size_t... I>
auto apply_impl(CALLABLE&& func, const std::tuple<ARGS...>& args, std::index_sequence<I...>)
{
// We accept const std::tuple& so a caller can construct an tuple on the
// fly. But std::get<I>(const tuple) adds a const qualifier to everything
// it extracts. Get a non-const ref to this tuple so we can extract
// without the extraneous const.
auto& non_const_args{ const_cast<std::tuple<ARGS...>&>(args) };
// call func(unpacked args)
return std::forward<CALLABLE>(func)(std::move(std::get<I>(args))...);
return invoke(std::forward<CALLABLE>(func),
std::forward<ARGS>(std::get<I>(non_const_args))...);
}
template <typename CALLABLE, typename... ARGS>
@ -81,6 +131,8 @@ auto apply(CALLABLE&& func, const std::tuple<ARGS...>& args)
std::index_sequence_for<ARGS...>{});
}
#endif // C++14
// per https://stackoverflow.com/a/57510428/5533635
template <typename CALLABLE, typename T, size_t SIZE>
auto apply(CALLABLE&& func, const std::array<T, SIZE>& args)
@ -88,28 +140,92 @@ auto apply(CALLABLE&& func, const std::array<T, SIZE>& args)
return apply(std::forward<CALLABLE>(func), std::tuple_cat(args));
}
/*****************************************************************************
* bind_front()
*****************************************************************************/
// To invoke a non-static member function with a tuple, you need a callable
// that binds your member function with an instance pointer or reference.
// std::bind_front() is perfect: std::bind_front(&cls::method, instance).
// Unfortunately bind_front() only enters the standard library in C++20.
#if __cpp_lib_bind_front >= 201907L
// C++20 implementation
using std::bind_front;
#else // no std::bind_front()
template<typename Fn, typename... Args,
typename std::enable_if<!std::is_member_pointer<typename std::decay<Fn>::type>::value,
int>::type = 0 >
auto bind_front(Fn&& f, Args&&... args)
{
// Don't use perfect forwarding for f or args: we must bind them for later.
return [f, pfx_args=std::make_tuple(args...)]
(auto&&... sfx_args)
{
// Use perfect forwarding for sfx_args because we use them as soon as
// we receive them.
return apply(
f,
std::tuple_cat(pfx_args,
std::make_tuple(std::forward<decltype(sfx_args)>(sfx_args)...)));
};
}
template<typename Fn, typename... Args,
typename std::enable_if<std::is_member_pointer<typename std::decay<Fn>::type>::value,
int>::type = 0 >
auto bind_front(Fn&& f, Args&&... args)
{
return bind_front(std::mem_fn(std::forward<Fn>(f)), std::forward<Args>(args)...);
}
#endif // C++20 with std::bind_front()
/*****************************************************************************
* apply(function, std::vector)
*****************************************************************************/
// per https://stackoverflow.com/a/28411055/5533635
template <typename CALLABLE, typename T, std::size_t... I>
auto apply_impl(CALLABLE&& func, const std::vector<T>& args, std::index_sequence<I...>)
{
return apply_impl(std::forward<CALLABLE>(func),
std::make_tuple(std::forward<T>(args[I])...),
I...);
return apply(std::forward<CALLABLE>(func),
std::make_tuple(args[I]...));
}
// this goes beyond C++17 std::apply()
// produce suitable error if apply(func, vector) is the wrong size for func()
void apply_validate_size(size_t size, size_t arity);
/// possible exception from apply() validation
struct apply_error: public LLException
{
apply_error(const std::string& what): LLException(what) {}
};
template <size_t ARITY, typename CALLABLE, typename T>
auto apply_n(CALLABLE&& func, const std::vector<T>& args)
{
apply_validate_size(args.size(), ARITY);
return apply_impl(std::forward<CALLABLE>(func),
args,
std::make_index_sequence<ARITY>());
}
/**
* apply(function, std::vector) goes beyond C++17 std::apply(). For this case
* @a function @emph cannot be variadic: the compiler must know at compile
* time how many arguments to pass. This isn't Python. (But see apply_n() to
* pass a specific number of args to a variadic function.)
*/
template <typename CALLABLE, typename T>
auto apply(CALLABLE&& func, const std::vector<T>& args)
{
// infer arity from the definition of func
constexpr auto arity = boost::function_traits<CALLABLE>::arity;
assert(args.size() == arity);
return apply_impl(std::forward<CALLABLE>(func),
args,
std::make_index_sequence<arity>());
// now that we have a compile-time arity, apply_n() works
return apply_n<arity>(std::forward<CALLABLE>(func), args);
}
#endif // C++14
} // namespace LL
#endif /* ! defined(LL_APPLY_H) */

View File

@ -0,0 +1,106 @@
/**
* @file commoncontrol.cpp
* @author Nat Goodspeed
* @date 2022-06-08
* @brief Implementation for commoncontrol.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
// Precompiled header
#include "linden_common.h"
// associated header
#include "commoncontrol.h"
// STL headers
// std headers
// external library headers
// other Linden headers
#include "llevents.h"
#include "llsdutil.h"
LLSD LL::CommonControl::access(const LLSD& params)
{
// We can't actually introduce a link-time dependency on llxml, or on any
// global LLControlGroup (*koff* gSavedSettings *koff*) but we can issue a
// runtime query. If we're running as part of a viewer with
// LLViewerControlListener, we can use that to interact with any
// instantiated LLControGroup.
LLSD response;
{
LLEventStream reply("reply");
LLTempBoundListener connection = reply.listen("listener",
[&response] (const LLSD& event)
{
response = event;
return false;
});
LLSD rparams{ params };
rparams["reply"] = reply.getName();
LLEventPumps::instance().obtain("LLViewerControl").post(rparams);
}
// LLViewerControlListener responds immediately. If it's listening at all,
// it will already have set response.
if (! response.isDefined())
{
LLTHROW(NoListener("No LLViewerControl listener instantiated"));
}
LLSD error{ response["error"] };
if (error.isDefined())
{
LLTHROW(ParamError(error));
}
response.erase("error");
response.erase("reqid");
return response;
}
/// set control group.key to defined default value
LLSD LL::CommonControl::set_default(const std::string& group, const std::string& key)
{
return access(llsd::map("op", "set",
"group", group, "key", key))["value"];
}
/// set control group.key to specified value
LLSD LL::CommonControl::set(const std::string& group, const std::string& key, const LLSD& value)
{
return access(llsd::map("op", "set",
"group", group, "key", key, "value", value))["value"];
}
/// toggle boolean control group.key
LLSD LL::CommonControl::toggle(const std::string& group, const std::string& key)
{
return access(llsd::map("op", "toggle",
"group", group, "key", key))["value"];
}
/// get the definition for control group.key, (! isDefined()) if bad
/// ["name"], ["type"], ["value"], ["comment"]
LLSD LL::CommonControl::get_def(const std::string& group, const std::string& key)
{
return access(llsd::map("op", "get",
"group", group, "key", key));
}
/// get the value of control group.key
LLSD LL::CommonControl::get(const std::string& group, const std::string& key)
{
return access(llsd::map("op", "get",
"group", group, "key", key))["value"];
}
/// get defined groups
std::vector<std::string> LL::CommonControl::get_groups()
{
auto groups{ access(llsd::map("op", "groups"))["groups"] };
return { groups.beginArray(), groups.endArray() };
}
/// get definitions for all variables in group
LLSD LL::CommonControl::get_vars(const std::string& group)
{
return access(llsd::map("op", "vars", "group", group))["vars"];
}

View File

@ -0,0 +1,75 @@
/**
* @file commoncontrol.h
* @author Nat Goodspeed
* @date 2022-06-08
* @brief Access LLViewerControl LLEventAPI, if process has one.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_COMMONCONTROL_H)
#define LL_COMMONCONTROL_H
#include <vector>
#include "llexception.h"
#include "llsd.h"
namespace LL
{
class CommonControl
{
public:
struct Error: public LLException
{
Error(const std::string& what): LLException(what) {}
};
/// Exception thrown if there's no LLViewerControl LLEventAPI
struct NoListener: public Error
{
NoListener(const std::string& what): Error(what) {}
};
struct ParamError: public Error
{
ParamError(const std::string& what): Error(what) {}
};
/// set control group.key to defined default value
static
LLSD set_default(const std::string& group, const std::string& key);
/// set control group.key to specified value
static
LLSD set(const std::string& group, const std::string& key, const LLSD& value);
/// toggle boolean control group.key
static
LLSD toggle(const std::string& group, const std::string& key);
/// get the definition for control group.key, (! isDefined()) if bad
/// ["name"], ["type"], ["value"], ["comment"]
static
LLSD get_def(const std::string& group, const std::string& key);
/// get the value of control group.key
static
LLSD get(const std::string& group, const std::string& key);
/// get defined groups
static
std::vector<std::string> get_groups();
/// get definitions for all variables in group
static
LLSD get_vars(const std::string& group);
private:
static
LLSD access(const LLSD& params);
};
} // namespace LL
#endif /* ! defined(LL_COMMONCONTROL_H) */

View File

@ -0,0 +1,49 @@
/**
* @file function_types.h
* @author Nat Goodspeed
* @date 2023-01-20
* @brief Extend boost::function_types to examine boost::function and
* std::function
*
* $LicenseInfo:firstyear=2023&license=viewerlgpl$
* Copyright (c) 2023, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_FUNCTION_TYPES_H)
#define LL_FUNCTION_TYPES_H
#include <boost/function.hpp>
#include <boost/function_types/function_arity.hpp>
#include <functional>
namespace LL
{
template <typename F>
struct function_arity_impl
{
static constexpr auto value = boost::function_types::function_arity<F>::value;
};
template <typename F>
struct function_arity_impl<std::function<F>>
{
static constexpr auto value = function_arity_impl<F>::value;
};
template <typename F>
struct function_arity_impl<boost::function<F>>
{
static constexpr auto value = function_arity_impl<F>::value;
};
template <typename F>
struct function_arity
{
static constexpr auto value = function_arity_impl<typename std::decay<F>::type>::value;
};
} // namespace LL
#endif /* ! defined(LL_FUNCTION_TYPES_H) */

View File

@ -0,0 +1,72 @@
/**
* @file lazyeventapi.cpp
* @author Nat Goodspeed
* @date 2022-06-17
* @brief Implementation for lazyeventapi.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
// Precompiled header
#include "linden_common.h"
// associated header
#include "lazyeventapi.h"
// STL headers
// std headers
#include <algorithm> // std::find_if
// external library headers
// other Linden headers
#include "llevents.h"
#include "llsdutil.h"
LL::LazyEventAPIBase::LazyEventAPIBase(
const std::string& name, const std::string& desc, const std::string& field)
{
// populate embedded LazyEventAPIParams instance
mParams.name = name;
mParams.desc = desc;
mParams.field = field;
// mParams.init and mOperations are populated by subsequent add() calls.
// Our raison d'etre: register as an LLEventPumps::PumpFactory
// so obtain() will notice any request for this name and call us.
// Of course, our subclass constructor must finish running (making add()
// calls) before mParams will be fully populated, but we expect that to
// happen well before the first LLEventPumps::obtain(name) call.
mRegistered = LLEventPumps::instance().registerPumpFactory(
name,
[this](const std::string& name){ return construct(name); });
}
LL::LazyEventAPIBase::~LazyEventAPIBase()
{
// If our constructor's registerPumpFactory() call was unsuccessful, that
// probably means somebody else claimed the name first. If that's the
// case, do NOT unregister their name out from under them!
// If this is a static instance being destroyed at process shutdown,
// LLEventPumps will probably have been cleaned up already.
if (mRegistered && ! LLEventPumps::wasDeleted())
{
// unregister the callback to this doomed instance
LLEventPumps::instance().unregisterPumpFactory(mParams.name);
}
}
LLSD LL::LazyEventAPIBase::getMetadata(const std::string& name) const
{
// Since mOperations is a vector rather than a map, just search.
auto found = std::find_if(mOperations.begin(), mOperations.end(),
[&name](const auto& namedesc)
{ return (namedesc.first == name); });
if (found == mOperations.end())
return {};
// LLEventDispatcher() supplements the returned metadata in different
// ways, depending on metadata provided to the specific add() method.
// Don't try to emulate all that. At some point we might consider more
// closely unifying LLEventDispatcher machinery with LazyEventAPI, but for
// now this will have to do.
return llsd::map("name", found->first, "desc", found->second);
}

View File

@ -0,0 +1,205 @@
/**
* @file lazyeventapi.h
* @author Nat Goodspeed
* @date 2022-06-16
* @brief Declaring a static module-scope LazyEventAPI registers a specific
* LLEventAPI for future on-demand instantiation.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_LAZYEVENTAPI_H)
#define LL_LAZYEVENTAPI_H
#include "apply.h"
#include "lleventapi.h"
#include "llinstancetracker.h"
#include <boost/signals2/signal.hpp>
#include <string>
#include <tuple>
#include <utility> // std::pair
#include <vector>
namespace LL
{
/**
* Bundle params we want to pass to LLEventAPI's protected constructor. We
* package them this way so a subclass constructor can simply forward an
* opaque reference to the LLEventAPI constructor.
*/
// This is a class instead of a plain struct mostly so when we forward-
// declare it we don't have to remember the distinction.
class LazyEventAPIParams
{
public:
// package the parameters used by the normal LLEventAPI constructor
std::string name, desc, field;
// bundle LLEventAPI::add() calls collected by LazyEventAPI::add(), so
// the special LLEventAPI constructor we engage can "play back" those
// add() calls
boost::signals2::signal<void(LLEventAPI*)> init;
};
/**
* LazyEventAPIBase implements most of the functionality of LazyEventAPI
* (q.v.), but we need the LazyEventAPI template subclass so we can accept
* the specific LLEventAPI subclass type.
*/
// No LLInstanceTracker key: we don't need to find a specific instance,
// LLLeapListener just needs to be able to enumerate all instances.
class LazyEventAPIBase: public LLInstanceTracker<LazyEventAPIBase>
{
public:
LazyEventAPIBase(const std::string& name, const std::string& desc,
const std::string& field);
virtual ~LazyEventAPIBase();
// Do not copy or move: once constructed, LazyEventAPIBase must stay
// put: we bind its instance pointer into a callback.
LazyEventAPIBase(const LazyEventAPIBase&) = delete;
LazyEventAPIBase(LazyEventAPIBase&&) = delete;
LazyEventAPIBase& operator=(const LazyEventAPIBase&) = delete;
LazyEventAPIBase& operator=(LazyEventAPIBase&&) = delete;
// capture add() calls we want to play back on LLEventAPI construction
template <typename... ARGS>
void add(const std::string& name, const std::string& desc, ARGS&&... rest)
{
// capture the metadata separately
mOperations.push_back(std::make_pair(name, desc));
// Use connect_extended() so the lambda is passed its own
// connection.
// apply() can't accept a template per se; it needs a particular
// specialization. Specialize out here to work around a clang bug:
// https://github.com/llvm/llvm-project/issues/41999
auto func{ &LazyEventAPIBase::add_trampoline
<const std::string&, const std::string&, ARGS...> };
// We can't bind an unexpanded parameter pack into a lambda --
// shame really. Instead, capture all our args as a std::tuple and
// then, in the lambda, use apply() to pass to add_trampoline().
auto args{ std::make_tuple(name, desc, std::forward<ARGS>(rest)...) };
mParams.init.connect_extended(
[func, args]
(const boost::signals2::connection& conn, LLEventAPI* instance)
{
// we only need this connection once
conn.disconnect();
// apply() expects a tuple specifying ALL the arguments,
// so prepend instance.
apply(func, std::tuple_cat(std::make_tuple(instance), args));
});
}
// The following queries mimic the LLEventAPI / LLEventDispatcher
// query API.
// Get the string name of the subject LLEventAPI
std::string getName() const { return mParams.name; }
// Get the documentation string
std::string getDesc() const { return mParams.desc; }
// Retrieve the LLSD key we use for dispatching
std::string getDispatchKey() const { return mParams.field; }
// operations
using NameDesc = std::pair<std::string, std::string>;
private:
// metadata that might be queried by LLLeapListener
std::vector<NameDesc> mOperations;
public:
using const_iterator = decltype(mOperations)::const_iterator;
const_iterator begin() const { return mOperations.begin(); }
const_iterator end() const { return mOperations.end(); }
LLSD getMetadata(const std::string& name) const;
protected:
// Params with which to instantiate the companion LLEventAPI subclass
LazyEventAPIParams mParams;
private:
// true if we successfully registered our LLEventAPI on construction
bool mRegistered;
// actually instantiate the companion LLEventAPI subclass
virtual LLEventPump* construct(const std::string& name) = 0;
// Passing an overloaded function to any function that accepts an
// arbitrary callable is a PITB because you have to specify the
// correct overload. What we want is for the compiler to select the
// correct overload, based on the carefully-wrought enable_ifs in
// LLEventDispatcher. This (one and only) add_trampoline() method
// exists solely to pass to LL::apply(). Once add_trampoline() is
// called with the expanded arguments, we hope the compiler will Do
// The Right Thing in selecting the correct LLEventAPI::add()
// overload.
template <typename... ARGS>
static
void add_trampoline(LLEventAPI* instance, ARGS&&... args)
{
instance->add(std::forward<ARGS>(args)...);
}
};
/**
* LazyEventAPI provides a way to register a particular LLEventAPI to be
* instantiated on demand, that is, when its name is passed to
* LLEventPumps::obtain().
*
* Derive your listener from LLEventAPI as usual, with its various
* operation methods, but code your constructor to accept
* <tt>(const LL::LazyEventAPIParams& params)</tt>
* and forward that reference to (the protected)
* <tt>LLEventAPI(const LL::LazyEventAPIParams&)</tt> constructor.
*
* Then derive your listener registrar from
* <tt>LazyEventAPI<your LLEventAPI subclass></tt>. The constructor should
* look very like a traditional LLEventAPI constructor:
*
* * pass (name, desc [, field]) to LazyEventAPI's constructor
* * in the body, make a series of add() calls referencing your LLEventAPI
* subclass methods.
*
* You may use any LLEventAPI::add() methods, that is, any
* LLEventDispatcher::add() methods. But the target methods you pass to
* add() must belong to your LLEventAPI subclass, not the LazyEventAPI
* subclass.
*
* Declare a static instance of your LazyEventAPI listener registrar
* class. When it's constructed at static initialization time, it will
* register your LLEventAPI subclass with LLEventPumps. It will also
* collect metadata for the LLEventAPI and its operations to provide to
* LLLeapListener's introspection queries.
*
* When someone later calls LLEventPumps::obtain() to post an event to
* your LLEventAPI subclass, obtain() will instantiate it using
* LazyEventAPI's name, desc, field and add() calls.
*/
template <class EVENTAPI>
class LazyEventAPI: public LazyEventAPIBase
{
public:
// for subclass constructor to reference handler methods
using listener = EVENTAPI;
LazyEventAPI(const std::string& name, const std::string& desc,
const std::string& field="op"):
// Forward ctor params to LazyEventAPIBase
LazyEventAPIBase(name, desc, field)
{}
private:
LLEventPump* construct(const std::string& /*name*/) override
{
// base class has carefully assembled LazyEventAPIParams embedded
// in this instance, just pass to LLEventAPI subclass constructor
return new EVENTAPI(mParams);
}
};
} // namespace LL
#endif /* ! defined(LL_LAZYEVENTAPI_H) */

View File

@ -38,6 +38,12 @@ const S32 FULL_VOLATILE_APR_POOL = 1024 ; //number of references to LLVolatileAP
bool gAPRInitialized = false;
int abortfunc(int retcode)
{
LL_WARNS("APR") << "Allocation failure in apr pool with code " << (S32)retcode << LL_ENDL;
return 0;
}
void ll_init_apr()
{
// Initialize APR and create the global pool
@ -45,7 +51,7 @@ void ll_init_apr()
if (!gAPRPoolp)
{
apr_pool_create(&gAPRPoolp, NULL);
apr_pool_create_ex(&gAPRPoolp, NULL, abortfunc, NULL);
}
if(!LLAPRFile::sAPRFilePoolp)
@ -532,6 +538,7 @@ S32 LLAPRFile::seek(apr_file_t* file_handle, apr_seek_where_t where, S32 offset)
//static
S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool)
{
LL_PROFILE_ZONE_SCOPED;
//*****************************************
LLAPRFilePoolScope scope(pool);
apr_file_t* file_handle = open(filename, scope.getVolatileAPRPool(), APR_READ|APR_BINARY);
@ -576,6 +583,7 @@ S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nb
//static
S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool)
{
LL_PROFILE_ZONE_SCOPED;
apr_int32_t flags = APR_CREATE|APR_WRITE|APR_BINARY;
if (offset < 0)
{

View File

@ -96,6 +96,7 @@ LLAssetDictionary::LLAssetDictionary()
addEntry(LLAssetType::AT_WIDGET, new AssetEntry("WIDGET", "widget", "widget", false, false, false));
addEntry(LLAssetType::AT_PERSON, new AssetEntry("PERSON", "person", "person", false, false, false));
addEntry(LLAssetType::AT_SETTINGS, new AssetEntry("SETTINGS", "settings", "settings blob", true, true, true));
addEntry(LLAssetType::AT_MATERIAL, new AssetEntry("MATERIAL", "material", "render material", true, true, true));
addEntry(LLAssetType::AT_UNKNOWN, new AssetEntry("UNKNOWN", "invalid", NULL, false, false, false));
addEntry(LLAssetType::AT_NONE, new AssetEntry("NONE", "-1", NULL, FALSE, FALSE, FALSE));

View File

@ -127,8 +127,9 @@ public:
AT_RESERVED_6 = 55,
AT_SETTINGS = 56, // Collection of settings
AT_COUNT = 57,
AT_MATERIAL = 57, // Render Material
AT_COUNT = 58,
// +*********************************************************+
// | TO ADD AN ELEMENT TO THIS ENUM: |

View File

@ -79,23 +79,15 @@ struct LLContextStatus
LL_COMMON_API std::ostream& operator<<(std::ostream& s, const LLContextStatus& context_status);
/* <FS:TS> gcc gets unhappy at what it thinks are multiline comments
// <FS:Beq> Store the check to avoid the nasty mutex monster that lies within
//#define dumpStack(tag) \
// if (debugLoggingEnabled(tag)) \
// { \
// LLCallStack cs; \
// LL_DEBUGS(tag) << "STACK:\n" << "====================\n" << cs << "====================" << LL_ENDL; \
// }
*/
// <FS:Ansariel> Restore this: Don't need this in actual relase builds
#ifdef LL_RELEASE_FOR_DOWNLOAD
#define dumpStack(tag)
#else
#define dumpStack(tag) \
if (debugLoggingEnabled(tag)) \
{ \
LLCallStack cs; \
LL_DEBUGS(tag) << "STACK:\n" << "====================\n" << cs << "====================" << LL_ENDL; \
}
#define dumpStack(tag) \
LL_DEBUGS(tag) << "STACK:\n" \
<< "====================\n" \
<< LLCallStack() \
<< "====================" \
<< LL_ENDL;
#endif
// </FS:Beq>
// </FS:Ansariel>

View File

@ -38,12 +38,13 @@ thread_local bool gProfilerEnabled = false;
// <FS:Beq/> #if (TRACY_ENABLE)
#if (TRACY_ENABLE) && LL_PROFILER_ENABLE_TRACY_MEMORY
// Override new/delete for tracy memory profiling
void *operator new(size_t size)
void* ll_tracy_new(size_t size)
{
void* ptr;
if (gProfilerEnabled)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
//LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
ptr = (malloc)(size);
}
else
@ -58,12 +59,22 @@ void *operator new(size_t size)
return ptr;
}
void operator delete(void *ptr) noexcept
void* operator new(size_t size)
{
return ll_tracy_new(size);
}
void* operator new[](std::size_t count)
{
return ll_tracy_new(count);
}
void ll_tracy_delete(void* ptr)
{
TracyFree(ptr);
if (gProfilerEnabled)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
//LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
(free)(ptr);
}
else
@ -72,6 +83,16 @@ void operator delete(void *ptr) noexcept
}
}
void operator delete(void *ptr) noexcept
{
ll_tracy_delete(ptr);
}
void operator delete[](void* ptr) noexcept
{
ll_tracy_delete(ptr);
}
// C-style malloc/free can't be so easily overridden, so we define tracy versions and use
// a pre-processor #define in linden_common.h to redirect to them. The parens around the native
// functions below prevents recursive substitution by the preprocessor.

View File

@ -117,16 +117,13 @@ std::string LLCoros::getStatus()
{
return get_CoroData("getStatus()").mStatus;
}
LLCoros::LLCoros():
// MAINT-2724: default coroutine stack size too small on Windows.
// Previously we used
// boost::context::guarded_stack_allocator::default_stacksize();
// empirically this is insufficient.
#if ADDRESS_SIZE == 64
mStackSize(512*1024),
#else
mStackSize(256*1024),
#endif
mStackSize(768*1024),
// mCurrent does NOT own the current CoroData instance -- it simply
// points to it. So initialize it with a no-op deleter.
mCurrent{ [](CoroData*){} }
@ -281,6 +278,7 @@ std::string LLCoros::launch(const std::string& prefix, const callable_t& callabl
catch (std::bad_alloc&)
{
// Out of memory on stack allocation?
printActiveCoroutines();
LL_ERRS("LLCoros") << "Bad memory allocation in LLCoros::launch(" << prefix << ")!" << LL_ENDL;
}

View File

@ -1621,19 +1621,18 @@ namespace LLError
}
}
bool debugLoggingEnabled(const std::string& tag)
void crashdriver(void (*callback)(int*))
{
LLMutexTrylock lock(getMutex<LOG_MUTEX>(), 5);
if (!lock.isLocked())
{
return false;
}
// The LLERROR_CRASH macro used to have inline code of the form:
//int* make_me_crash = NULL;
//*make_me_crash = 0;
SettingsConfigPtr s = Globals::getInstance()->getSettingsConfig();
LLError::ELevel level = LLError::LEVEL_DEBUG;
bool res = checkLevelMap(s->mTagLevelMap, tag, level);
return res;
// But compilers are getting smart enough to recognize that, so we must
// assign to an address supplied by a separate source file. We could do
// the assignment here in crashdriver() -- but then BugSplat would group
// all LL_ERRS() crashes as the fault of this one function, instead of
// identifying the specific LL_ERRS() source line. So instead, do the
// assignment in a lambda in the caller's source. We just provide the
// nullptr target.
callback(nullptr);
}

View File

@ -94,9 +94,11 @@ const int LL_ERR_NOERR = 0;
#ifdef SHOW_ASSERT
#define llassert(func) llassert_always_msg(func, #func)
#define llassert_msg(func, msg) llassert_always_msg(func, msg)
#define llverify(func) llassert_always_msg(func, #func)
#else
#define llassert(func)
#define llassert_msg(func, msg)
#define llverify(func) do {if (func) {}} while(0)
#endif
@ -395,11 +397,9 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG;
#define LL_NEWLINE '\n'
// Use this only in LL_ERRS or in a place that LL_ERRS may not be used
#define LLERROR_CRASH \
{ \
int* make_me_crash = NULL;\
*make_me_crash = 0; \
exit(*make_me_crash); \
#define LLERROR_CRASH \
{ \
crashdriver([](int* ptr){ *ptr = 0; exit(*ptr); }); \
}
#define LL_ENDL \
@ -476,7 +476,32 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG;
LLError::CallSite& _site(_sites[which]); \
lllog_test_()
// Check at run-time whether logging is enabled, without generating output
/*
// Check at run-time whether logging is enabled, without generating output.
Resist the temptation to add a function like this because it incurs the
expense of locking and map-searching every time control reaches it.
bool debugLoggingEnabled(const std::string& tag);
Instead of:
if debugLoggingEnabled("SomeTag")
{
// ... presumably expensive operation ...
LL_DEBUGS("SomeTag") << ... << LL_ENDL;
}
Use this:
LL_DEBUGS("SomeTag");
// ... presumably expensive operation ...
LL_CONT << ...;
LL_ENDL;
LL_DEBUGS("SomeTag") performs the locking and map-searching ONCE, then caches
the result in a static variable.
*/
// used by LLERROR_CRASH
void crashdriver(void (*)(int*));
#endif // LL_LLERROR_H

View File

@ -35,6 +35,7 @@
// external library headers
// other Linden headers
#include "llerror.h"
#include "lazyeventapi.h"
LLEventAPI::LLEventAPI(const std::string& name, const std::string& desc, const std::string& field):
lbase(name, field),
@ -43,6 +44,13 @@ LLEventAPI::LLEventAPI(const std::string& name, const std::string& desc, const s
{
}
LLEventAPI::LLEventAPI(const LL::LazyEventAPIParams& params):
LLEventAPI(params.name, params.desc, params.field)
{
// call initialization functions with our brand-new instance pointer
params.init(this);
}
LLEventAPI::~LLEventAPI()
{
}

View File

@ -35,6 +35,11 @@
#include "llinstancetracker.h"
#include <string>
namespace LL
{
class LazyEventAPIParams;
}
/**
* LLEventAPI not only provides operation dispatch functionality, inherited
* from LLDispatchListener -- it also gives us event API introspection.
@ -64,19 +69,6 @@ public:
/// Get the documentation string
std::string getDesc() const { return mDesc; }
/**
* Publish only selected add() methods from LLEventDispatcher.
* Every LLEventAPI add() @em must have a description string.
*/
template <typename CALLABLE>
void add(const std::string& name,
const std::string& desc,
CALLABLE callable,
const LLSD& required=LLSD())
{
LLEventDispatcher::add(name, desc, callable, required);
}
/**
* Instantiate a Response object in any LLEventAPI subclass method that
* wants to guarantee a reply (if requested) will be sent on exit from the
@ -150,16 +142,20 @@ public:
* @endcode
*/
LLSD& operator[](const LLSD::String& key) { return mResp[key]; }
/**
* set the response to the given data
*/
void setResponse(LLSD const & response){ mResp = response; }
/**
* set the response to the given data
*/
void setResponse(LLSD const & response){ mResp = response; }
LLSD mResp, mReq;
LLSD::String mKey;
};
protected:
// constructor used only by subclasses registered by LazyEventAPI
LLEventAPI(const LL::LazyEventAPIParams&);
private:
std::string mDesc;
};

View File

@ -40,70 +40,12 @@
// other Linden headers
#include "llevents.h"
#include "llerror.h"
#include "llexception.h"
#include "llsdutil.h"
#include "stringize.h"
#include <iomanip> // std::quoted()
#include <memory> // std::auto_ptr
/*****************************************************************************
* LLSDArgsSource
*****************************************************************************/
/**
* Store an LLSD array, producing its elements one at a time. Die with LL_ERRS
* if the consumer requests more elements than the array contains.
*/
class LL_COMMON_API LLSDArgsSource
{
public:
LLSDArgsSource(const std::string function, const LLSD& args);
~LLSDArgsSource();
LLSD next();
void done() const;
private:
std::string _function;
LLSD _args;
LLSD::Integer _index;
};
LLSDArgsSource::LLSDArgsSource(const std::string function, const LLSD& args):
_function(function),
_args(args),
_index(0)
{
if (! (_args.isUndefined() || _args.isArray()))
{
LL_ERRS("LLSDArgsSource") << _function << " needs an args array instead of "
<< _args << LL_ENDL;
}
}
LLSDArgsSource::~LLSDArgsSource()
{
done();
}
LLSD LLSDArgsSource::next()
{
if (_index >= _args.size())
{
LL_ERRS("LLSDArgsSource") << _function << " requires more arguments than the "
<< _args.size() << " provided: " << _args << LL_ENDL;
}
return _args[_index++];
}
void LLSDArgsSource::done() const
{
if (_index < _args.size())
{
LL_WARNS("LLSDArgsSource") << _function << " only consumed " << _index
<< " of the " << _args.size() << " arguments provided: "
<< _args << LL_ENDL;
}
}
/*****************************************************************************
* LLSDArgsMapper
*****************************************************************************/
@ -156,19 +98,26 @@ void LLSDArgsSource::done() const
* - Holes are filled with the default values.
* - Any remaining holes constitute an error.
*/
class LL_COMMON_API LLSDArgsMapper
class LL_COMMON_API LLEventDispatcher::LLSDArgsMapper
{
public:
/// Accept description of function: function name, param names, param
/// default values
LLSDArgsMapper(const std::string& function, const LLSD& names, const LLSD& defaults);
LLSDArgsMapper(LLEventDispatcher* parent, const std::string& function,
const LLSD& names, const LLSD& defaults);
/// Given arguments map, return LLSD::Array of parameter values, or LL_ERRS.
/// Given arguments map, return LLSD::Array of parameter values, or
/// trigger error.
LLSD map(const LLSD& argsmap) const;
private:
static std::string formatlist(const LLSD&);
template <typename... ARGS>
[[noreturn]] void callFail(ARGS&&... args) const;
// store a plain dumb back-pointer because we don't have to manage the
// parent LLEventDispatcher's lifespan
LLEventDispatcher* _parent;
// The function-name string is purely descriptive. We want error messages
// to be able to indicate which function's LLSDArgsMapper has the problem.
std::string _function;
@ -187,15 +136,18 @@ private:
FilledVector _has_dft;
};
LLSDArgsMapper::LLSDArgsMapper(const std::string& function,
const LLSD& names, const LLSD& defaults):
LLEventDispatcher::LLSDArgsMapper::LLSDArgsMapper(LLEventDispatcher* parent,
const std::string& function,
const LLSD& names,
const LLSD& defaults):
_parent(parent),
_function(function),
_names(names),
_has_dft(names.size())
{
if (! (_names.isUndefined() || _names.isArray()))
{
LL_ERRS("LLSDArgsMapper") << function << " names must be an array, not " << names << LL_ENDL;
callFail(" names must be an array, not ", names);
}
auto nparams(_names.size());
// From _names generate _indexes.
@ -218,8 +170,7 @@ LLSDArgsMapper::LLSDArgsMapper(const std::string& function,
// defaults is a (possibly empty) array. Right-align it with names.
if (ndefaults > nparams)
{
LL_ERRS("LLSDArgsMapper") << function << " names array " << names
<< " shorter than defaults array " << defaults << LL_ENDL;
callFail(" names array ", names, " shorter than defaults array ", defaults);
}
// Offset by which we slide defaults array right to right-align with
@ -256,23 +207,20 @@ LLSDArgsMapper::LLSDArgsMapper(const std::string& function,
}
if (bogus.size())
{
LL_ERRS("LLSDArgsMapper") << function << " defaults specified for nonexistent params "
<< formatlist(bogus) << LL_ENDL;
callFail(" defaults specified for nonexistent params ", formatlist(bogus));
}
}
else
{
LL_ERRS("LLSDArgsMapper") << function << " defaults must be a map or an array, not "
<< defaults << LL_ENDL;
callFail(" defaults must be a map or an array, not ", defaults);
}
}
LLSD LLSDArgsMapper::map(const LLSD& argsmap) const
LLSD LLEventDispatcher::LLSDArgsMapper::map(const LLSD& argsmap) const
{
if (! (argsmap.isUndefined() || argsmap.isMap() || argsmap.isArray()))
{
LL_ERRS("LLSDArgsMapper") << _function << " map() needs a map or array, not "
<< argsmap << LL_ENDL;
callFail(" map() needs a map or array, not ", argsmap);
}
// Initialize the args array. Indexing a non-const LLSD array grows it
// to appropriate size, but we don't want to resize this one on each
@ -369,15 +317,14 @@ LLSD LLSDArgsMapper::map(const LLSD& argsmap) const
// by argsmap, that's a problem.
if (unfilled.size())
{
LL_ERRS("LLSDArgsMapper") << _function << " missing required arguments "
<< formatlist(unfilled) << " from " << argsmap << LL_ENDL;
callFail(" missing required arguments ", formatlist(unfilled), " from ", argsmap);
}
// done
return args;
}
std::string LLSDArgsMapper::formatlist(const LLSD& list)
std::string LLEventDispatcher::LLSDArgsMapper::formatlist(const LLSD& list)
{
std::ostringstream out;
const char* delim = "";
@ -390,23 +337,44 @@ std::string LLSDArgsMapper::formatlist(const LLSD& list)
return out.str();
}
LLEventDispatcher::LLEventDispatcher(const std::string& desc, const std::string& key):
mDesc(desc),
mKey(key)
template <typename... ARGS>
[[noreturn]] void LLEventDispatcher::LLSDArgsMapper::callFail(ARGS&&... args) const
{
_parent->callFail<LLEventDispatcher::DispatchError>
(_function, std::forward<ARGS>(args)...);
}
/*****************************************************************************
* LLEventDispatcher
*****************************************************************************/
LLEventDispatcher::LLEventDispatcher(const std::string& desc, const std::string& key):
LLEventDispatcher(desc, key, "args")
{}
LLEventDispatcher::LLEventDispatcher(const std::string& desc, const std::string& key,
const std::string& argskey):
mDesc(desc),
mKey(key),
mArgskey(argskey)
{}
LLEventDispatcher::~LLEventDispatcher()
{
}
LLEventDispatcher::DispatchEntry::DispatchEntry(LLEventDispatcher* parent, const std::string& desc):
mParent(parent),
mDesc(desc)
{}
/**
* DispatchEntry subclass used for callables accepting(const LLSD&)
*/
struct LLEventDispatcher::LLSDDispatchEntry: public LLEventDispatcher::DispatchEntry
{
LLSDDispatchEntry(const std::string& desc, const Callable& func, const LLSD& required):
DispatchEntry(desc),
LLSDDispatchEntry(LLEventDispatcher* parent, const std::string& desc,
const Callable& func, const LLSD& required):
DispatchEntry(parent, desc),
mFunc(func),
mRequired(required)
{}
@ -414,22 +382,21 @@ struct LLEventDispatcher::LLSDDispatchEntry: public LLEventDispatcher::DispatchE
Callable mFunc;
LLSD mRequired;
virtual void call(const std::string& desc, const LLSD& event) const
LLSD call(const std::string& desc, const LLSD& event, bool, const std::string&) const override
{
// Validate the syntax of the event itself.
std::string mismatch(llsd_matches(mRequired, event));
if (! mismatch.empty())
{
LL_ERRS("LLEventDispatcher") << desc << ": bad request: " << mismatch << LL_ENDL;
callFail(desc, ": bad request: ", mismatch);
}
// Event syntax looks good, go for it!
mFunc(event);
return mFunc(event);
}
virtual LLSD addMetadata(LLSD meta) const
LLSD getMetadata() const override
{
meta["required"] = mRequired;
return meta;
return llsd::map("required", mRequired);
}
};
@ -439,17 +406,27 @@ struct LLEventDispatcher::LLSDDispatchEntry: public LLEventDispatcher::DispatchE
*/
struct LLEventDispatcher::ParamsDispatchEntry: public LLEventDispatcher::DispatchEntry
{
ParamsDispatchEntry(const std::string& desc, const invoker_function& func):
DispatchEntry(desc),
ParamsDispatchEntry(LLEventDispatcher* parent, const std::string& name,
const std::string& desc, const invoker_function& func):
DispatchEntry(parent, desc),
mName(name),
mInvoker(func)
{}
std::string mName;
invoker_function mInvoker;
virtual void call(const std::string& desc, const LLSD& event) const
LLSD call(const std::string&, const LLSD& event, bool, const std::string&) const override
{
LLSDArgsSource src(desc, event);
mInvoker(boost::bind(&LLSDArgsSource::next, boost::ref(src)));
try
{
return mInvoker(event);
}
catch (const LL::apply_error& err)
{
// could hit runtime errors with LL::apply()
callFail(err.what());
}
}
};
@ -459,23 +436,62 @@ struct LLEventDispatcher::ParamsDispatchEntry: public LLEventDispatcher::Dispatc
*/
struct LLEventDispatcher::ArrayParamsDispatchEntry: public LLEventDispatcher::ParamsDispatchEntry
{
ArrayParamsDispatchEntry(const std::string& desc, const invoker_function& func,
ArrayParamsDispatchEntry(LLEventDispatcher* parent, const std::string& name,
const std::string& desc, const invoker_function& func,
LLSD::Integer arity):
ParamsDispatchEntry(desc, func),
ParamsDispatchEntry(parent, name, desc, func),
mArity(arity)
{}
LLSD::Integer mArity;
virtual LLSD addMetadata(LLSD meta) const
LLSD call(const std::string& desc, const LLSD& event, bool fromMap, const std::string& argskey) const override
{
// std::string context { stringize(desc, "(", event, ") with argskey ", std::quoted(argskey), ": ") };
// Whether we try to extract arguments from 'event' depends on whether
// the LLEventDispatcher consumer called one of the (name, event)
// methods (! fromMap) or one of the (event) methods (fromMap). If we
// were called with (name, event), the passed event must itself be
// suitable to pass to the registered callable, no args extraction
// required or even attempted. Only if called with plain (event) do we
// consider extracting args from that event. Initially assume 'event'
// itself contains the arguments.
LLSD args{ event };
if (fromMap)
{
if (! mArity)
{
// When the target function is nullary, and we're called from
// an (event) method, just ignore the rest of the map entries.
args.clear();
}
else
{
// We only require/retrieve argskey if the target function
// isn't nullary. For all others, since we require an LLSD
// array, we must have an argskey.
if (argskey.empty())
{
callFail("LLEventDispatcher has no args key");
}
if ((! event.has(argskey)))
{
callFail("missing required key ", std::quoted(argskey));
}
args = event[argskey];
}
}
return ParamsDispatchEntry::call(desc, args, fromMap, argskey);
}
LLSD getMetadata() const override
{
LLSD array(LLSD::emptyArray());
// Resize to number of arguments required
if (mArity)
array[mArity - 1] = LLSD();
llassert_always(array.size() == mArity);
meta["required"] = array;
return meta;
return llsd::map("required", array);
}
};
@ -485,11 +501,11 @@ struct LLEventDispatcher::ArrayParamsDispatchEntry: public LLEventDispatcher::Pa
*/
struct LLEventDispatcher::MapParamsDispatchEntry: public LLEventDispatcher::ParamsDispatchEntry
{
MapParamsDispatchEntry(const std::string& name, const std::string& desc,
const invoker_function& func,
MapParamsDispatchEntry(LLEventDispatcher* parent, const std::string& name,
const std::string& desc, const invoker_function& func,
const LLSD& params, const LLSD& defaults):
ParamsDispatchEntry(desc, func),
mMapper(name, params, defaults),
ParamsDispatchEntry(parent, name, desc, func),
mMapper(parent, name, params, defaults),
mRequired(LLSD::emptyMap())
{
// Build the set of all param keys, then delete the ones that are
@ -532,18 +548,27 @@ struct LLEventDispatcher::MapParamsDispatchEntry: public LLEventDispatcher::Para
LLSD mRequired;
LLSD mOptional;
virtual void call(const std::string& desc, const LLSD& event) const
LLSD call(const std::string& desc, const LLSD& event, bool fromMap, const std::string& argskey) const override
{
// Just convert from LLSD::Map to LLSD::Array using mMapper, then pass
// to base-class call() method.
ParamsDispatchEntry::call(desc, mMapper.map(event));
// by default, pass the whole event as the arguments map
LLSD args{ event };
// Were we called by one of the (event) methods (instead of the (name,
// event) methods), do we have an argskey, and does the incoming event
// have that key?
if (fromMap && (! argskey.empty()) && event.has(argskey))
{
// if so, extract the value of argskey from the incoming event,
// and use that as the arguments map
args = event[argskey];
}
// Now convert args from LLSD map to LLSD array using mMapper, then
// pass to base-class call() method.
return ParamsDispatchEntry::call(desc, mMapper.map(args), fromMap, argskey);
}
virtual LLSD addMetadata(LLSD meta) const
LLSD getMetadata() const override
{
meta["required"] = mRequired;
meta["optional"] = mOptional;
return meta;
return llsd::map("required", mRequired, "optional", mOptional);
}
};
@ -552,9 +577,9 @@ void LLEventDispatcher::addArrayParamsDispatchEntry(const std::string& name,
const invoker_function& invoker,
LLSD::Integer arity)
{
mDispatch.insert(
DispatchMap::value_type(name, DispatchMap::mapped_type(
new ArrayParamsDispatchEntry(desc, invoker, arity))));
mDispatch.emplace(
name,
new ArrayParamsDispatchEntry(this, "", desc, invoker, arity));
}
void LLEventDispatcher::addMapParamsDispatchEntry(const std::string& name,
@ -563,25 +588,25 @@ void LLEventDispatcher::addMapParamsDispatchEntry(const std::string& name,
const LLSD& params,
const LLSD& defaults)
{
mDispatch.insert(
DispatchMap::value_type(name, DispatchMap::mapped_type(
new MapParamsDispatchEntry(name, desc, invoker, params, defaults))));
// Pass instance info as well as this entry name for error messages.
mDispatch.emplace(
name,
new MapParamsDispatchEntry(this, "", desc, invoker, params, defaults));
}
/// Register a callable by name
void LLEventDispatcher::add(const std::string& name, const std::string& desc,
const Callable& callable, const LLSD& required)
void LLEventDispatcher::addLLSD(const std::string& name, const std::string& desc,
const Callable& callable, const LLSD& required)
{
mDispatch.insert(
DispatchMap::value_type(name, DispatchMap::mapped_type(
new LLSDDispatchEntry(desc, callable, required))));
mDispatch.emplace(name, new LLSDDispatchEntry(this, desc, callable, required));
}
void LLEventDispatcher::addFail(const std::string& name, const std::string& classname) const
void LLEventDispatcher::addFail(const std::string& name, const char* classname) const
{
LL_ERRS("LLEventDispatcher") << "LLEventDispatcher(" << mDesc << ")::add(" << name
<< "): " << classname << " is not a subclass "
<< "of LLEventDispatcher" << LL_ENDL;
<< "): " << LLError::Log::demangle(classname)
<< " is not a subclass of LLEventDispatcher"
<< LL_ENDL;
}
/// Unregister a callable
@ -596,48 +621,105 @@ bool LLEventDispatcher::remove(const std::string& name)
return true;
}
/// Call a registered callable with an explicitly-specified name. If no
/// such callable exists, die with LL_ERRS.
void LLEventDispatcher::operator()(const std::string& name, const LLSD& event) const
/// Call a registered callable with an explicitly-specified name. It is an
/// error if no such callable exists.
LLSD LLEventDispatcher::operator()(const std::string& name, const LLSD& event) const
{
if (! try_call(name, event))
{
LL_ERRS("LLEventDispatcher") << "LLEventDispatcher(" << mDesc << "): '" << name
<< "' not found" << LL_ENDL;
}
}
/// Extract the @a key value from the incoming @a event, and call the
/// callable whose name is specified by that map @a key. If no such
/// callable exists, die with LL_ERRS.
void LLEventDispatcher::operator()(const LLSD& event) const
{
// This could/should be implemented in terms of the two-arg overload.
// However -- we can produce a more informative error message.
std::string name(event[mKey]);
if (! try_call(name, event))
{
LL_ERRS("LLEventDispatcher") << "LLEventDispatcher(" << mDesc << "): bad " << mKey
<< " value '" << name << "'" << LL_ENDL;
}
}
bool LLEventDispatcher::try_call(const LLSD& event) const
{
return try_call(event[mKey], event);
return try_call(std::string(), name, event);
}
bool LLEventDispatcher::try_call(const std::string& name, const LLSD& event) const
{
DispatchMap::const_iterator found = mDispatch.find(name);
if (found == mDispatch.end())
try
{
try_call(std::string(), name, event);
return true;
}
// Note that we don't catch the generic DispatchError, only the specific
// DispatchMissing. try_call() only promises to return false if the
// specified callable name isn't found -- not for general errors.
catch (const DispatchMissing&)
{
return false;
}
}
/// Extract the @a key value from the incoming @a event, and call the callable
/// whose name is specified by that map @a key. It is an error if no such
/// callable exists.
LLSD LLEventDispatcher::operator()(const LLSD& event) const
{
return try_call(mKey, event[mKey], event);
}
bool LLEventDispatcher::try_call(const LLSD& event) const
{
try
{
try_call(mKey, event[mKey], event);
return true;
}
catch (const DispatchMissing&)
{
return false;
}
}
LLSD LLEventDispatcher::try_call(const std::string& key, const std::string& name,
const LLSD& event) const
{
if (name.empty())
{
if (key.empty())
{
callFail<DispatchError>("attempting to call with no name");
}
else
{
callFail<DispatchError>("no ", key);
}
}
DispatchMap::const_iterator found = mDispatch.find(name);
if (found == mDispatch.end())
{
// Here we were passed a non-empty name, but there's no registered
// callable with that name. This is the one case in which we throw
// DispatchMissing instead of the generic DispatchError.
// Distinguish the public method by which our caller reached here:
// key.empty() means the name was passed explicitly, non-empty means
// we extracted the name from the incoming event using that key.
if (key.empty())
{
callFail<DispatchMissing>(std::quoted(name), " not found");
}
else
{
callFail<DispatchMissing>("bad ", key, " value ", std::quoted(name));
}
}
// Found the name, so it's plausible to even attempt the call.
found->second->call(STRINGIZE("LLEventDispatcher(" << mDesc << ") calling '" << name << "'"),
event);
return true; // tell caller we were able to call
const char* delim = (key.empty()? "" : "=");
// append either "[key=name]" or just "[name]"
SetState transient(this, '[', key, delim, name, ']');
return found->second->call("", event, (! key.empty()), mArgskey);
}
template <typename EXCEPTION, typename... ARGS>
//static
[[noreturn]] void LLEventDispatcher::sCallFail(ARGS&&... args)
{
auto error = stringize(std::forward<ARGS>(args)...);
LL_WARNS("LLEventDispatcher") << error << LL_ENDL;
LLTHROW(EXCEPTION(error));
}
template <typename EXCEPTION, typename... ARGS>
[[noreturn]] void LLEventDispatcher::callFail(ARGS&&... args) const
{
// Describe this instance in addition to the error itself.
sCallFail<EXCEPTION>(*this, ": ", std::forward<ARGS>(args)...);
}
LLSD LLEventDispatcher::getMetadata(const std::string& name) const
@ -647,26 +729,243 @@ LLSD LLEventDispatcher::getMetadata(const std::string& name) const
{
return LLSD();
}
LLSD meta;
LLSD meta{ found->second->getMetadata() };
meta["name"] = name;
meta["desc"] = found->second->mDesc;
return found->second->addMetadata(meta);
return meta;
}
LLDispatchListener::LLDispatchListener(const std::string& pumpname, const std::string& key):
LLEventDispatcher(pumpname, key),
mPump(pumpname, true), // allow tweaking for uniqueness
mBoundListener(mPump.listen("self", boost::bind(&LLDispatchListener::process, this, _1)))
std::ostream& operator<<(std::ostream& out, const LLEventDispatcher& self)
{
// If we're a subclass of LLEventDispatcher, e.g. LLEventAPI, report that.
// Also report whatever transient state is active.
return out << LLError::Log::classname(self) << '(' << self.mDesc << ')'
<< self.getState();
}
bool LLDispatchListener::process(const LLSD& event)
std::string LLEventDispatcher::getState() const
{
(*this)(event);
// default value of fiber_specific_ptr is nullptr, and ~SetState() reverts
// to that; infer empty string
if (! mState.get())
return {};
else
return *mState;
}
bool LLEventDispatcher::setState(SetState&, const std::string& state) const
{
// If SetState is instantiated at multiple levels of function call, ignore
// the lower-level call because the outer call presumably provides more
// context.
if (mState.get())
return false;
// Pass us empty string (a la ~SetState()) to reset to nullptr, else take
// a heap copy of the passed state string so we can delete it on
// subsequent reset().
mState.reset(state.empty()? nullptr : new std::string(state));
return true;
}
/*****************************************************************************
* LLDispatchListener
*****************************************************************************/
std::string LLDispatchListener::mReplyKey{ "reply" };
bool LLDispatchListener::process(const LLSD& event) const
{
// Decide what to do based on the incoming value of the specified dispatch
// key.
LLSD name{ event[getDispatchKey()] };
if (name.isMap())
{
call_map(name, event);
}
else if (name.isArray())
{
call_array(name, event);
}
else
{
call_one(name, event);
}
return false;
}
LLEventDispatcher::DispatchEntry::DispatchEntry(const std::string& desc):
mDesc(desc)
{}
void LLDispatchListener::call_one(const LLSD& name, const LLSD& event) const
{
LLSD result;
try
{
result = (*this)(event);
}
catch (const DispatchError& err)
{
if (! event.has(mReplyKey))
{
// Without a reply key, let the exception propagate.
throw;
}
// Here there was an error and the incoming event has mReplyKey. Reply
// with a map containing an "error" key explaining the problem.
return reply(llsd::map("error", err.what()), event);
}
// We seem to have gotten a valid result. But we don't know whether the
// registered callable is void or non-void. If it's void,
// LLEventDispatcher returned isUndefined(). Otherwise, try to send it
// back to our invoker.
if (result.isDefined())
{
if (! result.isMap())
{
// wrap the result in a map as the "data" key
result = llsd::map("data", result);
}
reply(result, event);
}
}
void LLDispatchListener::call_map(const LLSD& reqmap, const LLSD& event) const
{
// LLSD map containing returned values
LLSD result;
// cache dispatch key
std::string key{ getDispatchKey() };
// collect any error messages here
std::ostringstream errors;
const char* delim = "";
for (const auto& pair : llsd::inMap(reqmap))
{
const LLSD::String& name{ pair.first };
const LLSD& args{ pair.second };
try
{
// in case of errors, tell user the dispatch key, the fact that
// we're processing a request map and the current key in that map
SetState(this, '[', key, '[', name, "]]");
// With this form, capture return value even if undefined:
// presence of the key in the response map can be used to detect
// which request keys succeeded.
result[name] = (*this)(name, args);
}
catch (const std::exception& err)
{
// Catch not only DispatchError, but any C++ exception thrown by
// the target callable. Collect exception name and message in
// 'errors'.
errors << delim << LLError::Log::classname(err) << ": " << err.what();
delim = "\n";
}
}
// so, were there any errors?
std::string error = errors.str();
if (! error.empty())
{
if (! event.has(mReplyKey))
{
// can't send reply, throw
sCallFail<DispatchError>(error);
}
else
{
// reply key present
result["error"] = error;
}
}
reply(result, event);
}
void LLDispatchListener::call_array(const LLSD& reqarray, const LLSD& event) const
{
// LLSD array containing returned values
LLSD results;
// cache the dispatch key
std::string key{ getDispatchKey() };
// arguments array, if present -- const because, if it's shorter than
// reqarray, we don't want to grow it
const LLSD argsarray{ event[getArgsKey()] };
// error message, if any
std::string error;
// classic index loop because we need the index
for (size_t i = 0, size = reqarray.size(); i < size; ++i)
{
const auto& reqentry{ reqarray[i] };
std::string name;
LLSD args;
if (reqentry.isString())
{
name = reqentry.asString();
args = argsarray[i];
}
else if (reqentry.isArray() && reqentry.size() == 2 && reqentry[0].isString())
{
name = reqentry[0].asString();
args = reqentry[1];
}
else
{
// reqentry isn't in either of the documented forms
error = stringize(*this, ": ", getDispatchKey(), '[', i, "] ",
reqentry, " unsupported");
break;
}
// reqentry is one of the valid forms, got name and args
try
{
// in case of errors, tell user the dispatch key, the fact that
// we're processing a request array, the current entry in that
// array and the corresponding callable name
SetState(this, '[', key, '[', i, "]=", name, ']');
// With this form, capture return value even if undefined
results.append((*this)(name, args));
}
catch (const std::exception& err)
{
// Catch not only DispatchError, but any C++ exception thrown by
// the target callable. Report the exception class as well as the
// error string.
error = stringize(LLError::Log::classname(err), ": ", err.what());
break;
}
}
LLSD result;
// was there an error?
if (! error.empty())
{
if (! event.has(mReplyKey))
{
// can't send reply, throw
sCallFail<DispatchError>(error);
}
else
{
// reply key present
result["error"] = error;
}
}
// wrap the results array as response map "data" key, as promised
if (results.isDefined())
{
result["data"] = results;
}
reply(result, event);
}
void LLDispatchListener::reply(const LLSD& reply, const LLSD& request) const
{
// Call sendReply() unconditionally: sendReply() itself tests whether the
// specified reply key is present in the incoming request, and does
// nothing if there's no such key.
sendReply(reply, request, mReplyKey);
}

File diff suppressed because it is too large Load Diff

View File

@ -435,16 +435,61 @@ public:
// generic type-appropriate store through mTarget, construct an
// LLSDParam<T> and store that, thus engaging LLSDParam's custom
// conversions.
mTarget = LLSDParam<T>(llsd::drill(event, mPath));
storeTarget(LLSDParam<T>(llsd::drill(event, mPath)));
return mConsume;
}
private:
// This method disambiguates LLStoreListener<LLSD>. Directly assigning
// some_LLSD_var = LLSDParam<LLSD>(some_LLSD_value);
// is problematic because the compiler has too many choices: LLSD has
// multiple assignment operator overloads, and LLSDParam<LLSD> has a
// templated conversion operator. But LLSDParam<LLSD> can convert to a
// (const LLSD&) parameter, and LLSD::operator=(const LLSD&) works.
void storeTarget(const T& value)
{
mTarget = value;
}
T& mTarget;
const LLSD mPath;
const bool mConsume;
};
/**
* LLVarHolder bundles a target variable of the specified type. We use it as a
* base class so the target variable will be fully constructed by the time a
* subclass constructor tries to pass a reference to some other base class.
*/
template <typename T>
struct LLVarHolder
{
T mVar;
};
/**
* LLCaptureListener isa LLStoreListener that bundles the target variable of
* interest.
*/
template <typename T>
class LLCaptureListener: public LLVarHolder<T>,
public LLStoreListener<T>
{
private:
using holder = LLVarHolder<T>;
using super = LLStoreListener<T>;
public:
LLCaptureListener(const LLSD& path=LLSD(), bool consume=false):
super(*this, holder::mVar, path, consume)
{}
void set(T&& newval=T()) { holder::mVar = std::forward<T>(newval); }
const T& get() const { return holder::mVar; }
operator const T&() { return holder::mVar; }
};
/*****************************************************************************
* LLEventLogProxy
*****************************************************************************/

View File

@ -68,19 +68,78 @@
LLEventPumps::LLEventPumps():
mFactories
{
{ "LLEventStream", [](const std::string& name, bool tweak)
{ "LLEventStream", [](const std::string& name, bool tweak, const std::string& /*type*/)
{ return new LLEventStream(name, tweak); } },
{ "LLEventMailDrop", [](const std::string& name, bool tweak)
{ "LLEventMailDrop", [](const std::string& name, bool tweak, const std::string& /*type*/)
{ return new LLEventMailDrop(name, tweak); } }
},
mTypes
{
// LLEventStream is the default for obtain(), so even if somebody DOES
// call obtain("placeholder"), this sample entry won't break anything.
{ "placeholder", "LLEventStream" }
// { "placeholder", "LLEventStream" }
}
{}
bool LLEventPumps::registerTypeFactory(const std::string& type, const TypeFactory& factory)
{
auto found = mFactories.find(type);
// can't re-register a TypeFactory for a type name that's already registered
if (found != mFactories.end())
return false;
// doesn't already exist, go ahead and register
mFactories[type] = factory;
return true;
}
void LLEventPumps::unregisterTypeFactory(const std::string& type)
{
auto found = mFactories.find(type);
if (found != mFactories.end())
mFactories.erase(found);
}
bool LLEventPumps::registerPumpFactory(const std::string& name, const PumpFactory& factory)
{
// Do we already have a pump by this name?
if (mPumpMap.find(name) != mPumpMap.end())
return false;
// Do we already have an override for this pump name?
if (mTypes.find(name) != mTypes.end())
return false;
// Leverage the two-level lookup implemented by mTypes (pump name -> type
// name) and mFactories (type name -> factory). We could instead create a
// whole separate (pump name -> factory) map, and look in both; or we
// could change mTypes to (pump name -> factory) and, for typical type-
// based lookups, use a "factory" that looks up the real factory in
// mFactories. But this works, and we don't expect many calls to make() -
// either explicit or implicit via obtain().
// Create a bogus type name extremely unlikely to collide with an actual type.
static std::string nul(1, '\0');
std::string type_name{ nul + name };
mTypes[name] = type_name;
// TypeFactory is called with (name, tweak, type), whereas PumpFactory
// accepts only name. We could adapt with std::bind(), but this lambda
// does the trick.
mFactories[type_name] =
[factory]
(const std::string& name, bool /*tweak*/, const std::string& /*type*/)
{ return factory(name); };
return true;
}
void LLEventPumps::unregisterPumpFactory(const std::string& name)
{
auto tfound = mTypes.find(name);
if (tfound != mTypes.end())
{
auto ffound = mFactories.find(tfound->second);
if (ffound != mFactories.end())
{
mFactories.erase(ffound);
}
mTypes.erase(tfound);
}
}
LLEventPump& LLEventPumps::obtain(const std::string& name)
{
PumpMap::iterator found = mPumpMap.find(name);
@ -114,7 +173,7 @@ LLEventPump& LLEventPumps::make(const std::string& name, bool tweak,
// Passing an unrecognized type name is a no-no
LLTHROW(BadType(type));
}
auto newInstance = (found->second)(name, tweak);
auto newInstance = (found->second)(name, tweak, type);
// LLEventPump's constructor implicitly registers each new instance in
// mPumpMap. But remember that we instantiated it (in mOurPumps) so we'll
// delete it later.

View File

@ -270,6 +270,45 @@ public:
LLEventPump& make(const std::string& name, bool tweak=false,
const std::string& type=std::string());
/// function passed to registerTypeFactory()
typedef std::function<LLEventPump*(const std::string& name, bool tweak, const std::string& type)> TypeFactory;
/**
* Register a TypeFactory for use with make(). When make() is called with
* the specified @a type string, call @a factory(name, tweak, type) to
* instantiate it.
*
* Returns true if successfully registered, false if there already exists
* a TypeFactory for the specified @a type name.
*/
bool registerTypeFactory(const std::string& type, const TypeFactory& factory);
void unregisterTypeFactory(const std::string& type);
/// function passed to registerPumpFactory()
typedef std::function<LLEventPump*(const std::string&)> PumpFactory;
/**
* Register a PumpFactory for use with obtain(). When obtain() is called
* with the specified @a name string, if an LLEventPump with the specified
* @a name doesn't already exist, call @a factory(name) to instantiate it.
*
* Returns true if successfully registered, false if there already exists
* a factory override for the specified @a name.
*
* PumpFactory does not support @a tweak because it's only called when
* <i>that particular</i> @a name is passed to obtain(). Bear in mind that
* <tt>obtain(name)</tt> might still bypass the caller's PumpFactory for a
* couple different reasons:
*
* * registerPumpFactory() returns false because there's already a factory
* override for the specified @name
* * between a successful <tt>registerPumpFactory(name)</tt> call (returns
* true) and a call to <tt>obtain(name)</tt>, someone explicitly
* instantiated an LLEventPump(name), so obtain(name) returned that.
*/
bool registerPumpFactory(const std::string& name, const PumpFactory& factory);
void unregisterPumpFactory(const std::string& name);
/**
* Find the named LLEventPump instance. If it exists post the message to it.
* If the pump does not exist, do nothing.
@ -327,13 +366,13 @@ testable:
typedef std::set<LLEventPump*> PumpSet;
PumpSet mOurPumps;
// for make(), map string type name to LLEventPump subclass factory function
typedef std::map<std::string, std::function<LLEventPump*(const std::string&, bool)>> PumpFactories;
typedef std::map<std::string, TypeFactory> TypeFactories;
// Data used by make().
// One might think mFactories and mTypes could reasonably be static. So
// they could -- if not for the fact that make() or obtain() might be
// called before this module's static variables have been initialized.
// This is why we use singletons in the first place.
PumpFactories mFactories;
TypeFactories mFactories;
// for obtain(), map desired string instance name to string type when
// obtain() must create the instance

View File

@ -29,11 +29,6 @@
#include "llframetimer.h"
// We don't bother building a stand alone lib; we just need to include the one source file for Tracy support
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY || LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
#include "TracyClient.cpp"
#endif // LL_PROFILER_CONFIGURATION
// Static members
//LLTimer LLFrameTimer::sInternalTimer;
U64 LLFrameTimer::sStartTotalTime = totalTime();

View File

@ -104,22 +104,26 @@ public:
return LockStatic()->mMap.size();
}
// snapshot of std::pair<const KEY, std::shared_ptr<T>> pairs
class snapshot
// snapshot of std::pair<const KEY, std::shared_ptr<SUBCLASS>> pairs, for
// some SUBCLASS derived from T
template <typename SUBCLASS>
class snapshot_of
{
// It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot.
typedef std::vector<std::pair<const KEY, weak_t>> VectorType;
// Dereferencing our iterator produces a std::shared_ptr for each
// instance that still exists. Since we store weak_ptrs, that involves
// two chained transformations:
// Dereferencing the iterator we publish produces a
// std::shared_ptr<SUBCLASS> for each instance that still exists.
// Since we store weak_ptr<T>, that involves two chained
// transformations:
// - a transform_iterator to lock the weak_ptr and return a shared_ptr
// - a filter_iterator to skip any shared_ptr that has become invalid.
// - a filter_iterator to skip any shared_ptr<T> that has become
// invalid or references any T instance that isn't SUBCLASS.
// It is very important that we filter lazily, that is, during
// traversal. Any one of our stored weak_ptrs might expire during
// traversal.
typedef std::pair<const KEY, ptr_t> strong_pair;
typedef std::pair<const KEY, std::shared_ptr<SUBCLASS>> strong_pair;
// Note for future reference: nat has not yet had any luck (up to
// Boost 1.67) trying to use boost::transform_iterator with a hand-
// coded functor, only with actual functions. In my experience, an
@ -127,7 +131,7 @@ public:
// result_type typedef. But this works.
static strong_pair strengthen(typename VectorType::value_type& pair)
{
return { pair.first, pair.second.lock() };
return { pair.first, std::dynamic_pointer_cast<SUBCLASS>(pair.second.lock()) };
}
static bool dead_skipper(const strong_pair& pair)
{
@ -135,7 +139,7 @@ public:
}
public:
snapshot():
snapshot_of():
// populate our vector with a snapshot of (locked!) InstanceMap
// note, this assigns pair<KEY, shared_ptr> to pair<KEY, weak_ptr>
mData(mLock->mMap.begin(), mLock->mMap.end())
@ -184,44 +188,51 @@ public:
#endif // LL_WINDOWS
VectorType mData;
};
using snapshot = snapshot_of<T>;
// iterate over this for references to each instance
class instance_snapshot: public snapshot
// iterate over this for references to each SUBCLASS instance
template <typename SUBCLASS>
class instance_snapshot_of: public snapshot_of<SUBCLASS>
{
private:
static T& instance_getter(typename snapshot::iterator::reference pair)
using super = snapshot_of<SUBCLASS>;
static T& instance_getter(typename super::iterator::reference pair)
{
return *pair.second;
}
public:
typedef boost::transform_iterator<decltype(instance_getter)*,
typename snapshot::iterator> iterator;
iterator begin() { return iterator(snapshot::begin(), instance_getter); }
iterator end() { return iterator(snapshot::end(), instance_getter); }
typename super::iterator> iterator;
iterator begin() { return iterator(super::begin(), instance_getter); }
iterator end() { return iterator(super::end(), instance_getter); }
void deleteAll()
{
for (auto it(snapshot::begin()), end(snapshot::end()); it != end; ++it)
for (auto it(super::begin()), end(super::end()); it != end; ++it)
{
delete it->second.get();
}
}
};
};
using instance_snapshot = instance_snapshot_of<T>;
// iterate over this for each key
class key_snapshot: public snapshot
template <typename SUBCLASS>
class key_snapshot_of: public snapshot_of<SUBCLASS>
{
private:
static KEY key_getter(typename snapshot::iterator::reference pair)
using super = snapshot_of<SUBCLASS>;
static KEY key_getter(typename super::iterator::reference pair)
{
return pair.first;
}
public:
typedef boost::transform_iterator<decltype(key_getter)*,
typename snapshot::iterator> iterator;
iterator begin() { return iterator(snapshot::begin(), key_getter); }
iterator end() { return iterator(snapshot::end(), key_getter); }
typename super::iterator> iterator;
iterator begin() { return iterator(super::begin(), key_getter); }
iterator end() { return iterator(super::end(), key_getter); }
};
using key_snapshot = key_snapshot_of<T>;
static ptr_t getInstance(const KEY& k)
{
@ -368,22 +379,25 @@ public:
return LockStatic()->mSet.size();
}
// snapshot of std::shared_ptr<T> pointers
class snapshot
// snapshot of std::shared_ptr<SUBCLASS> pointers
template <typename SUBCLASS>
class snapshot_of
{
// It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot.
typedef std::vector<weak_t> VectorType;
// Dereferencing our iterator produces a std::shared_ptr for each
// instance that still exists. Since we store weak_ptrs, that involves
// two chained transformations:
// Dereferencing the iterator we publish produces a
// std::shared_ptr<SUBCLASS> for each instance that still exists.
// Since we store weak_ptrs, that involves two chained
// transformations:
// - a transform_iterator to lock the weak_ptr and return a shared_ptr
// - a filter_iterator to skip any shared_ptr that has become invalid.
typedef std::shared_ptr<T> strong_ptr;
// - a filter_iterator to skip any shared_ptr that has become invalid
// or references any T instance that isn't SUBCLASS.
typedef std::shared_ptr<SUBCLASS> strong_ptr;
static strong_ptr strengthen(typename VectorType::value_type& ptr)
{
return ptr.lock();
return std::dynamic_pointer_cast<SUBCLASS>(ptr.lock());
}
static bool dead_skipper(const strong_ptr& ptr)
{
@ -391,7 +405,7 @@ public:
}
public:
snapshot():
snapshot_of():
// populate our vector with a snapshot of (locked!) InstanceSet
// note, this assigns stored shared_ptrs to weak_ptrs for snapshot
mData(mLock->mSet.begin(), mLock->mSet.end())
@ -437,22 +451,33 @@ public:
#endif // LL_WINDOWS
VectorType mData;
};
using snapshot = snapshot_of<T>;
// iterate over this for references to each instance
struct instance_snapshot: public snapshot
template <typename SUBCLASS>
class instance_snapshot_of: public snapshot_of<SUBCLASS>
{
typedef boost::indirect_iterator<typename snapshot::iterator> iterator;
iterator begin() { return iterator(snapshot::begin()); }
iterator end() { return iterator(snapshot::end()); }
private:
using super = snapshot_of<SUBCLASS>;
public:
typedef boost::indirect_iterator<typename super::iterator> iterator;
iterator begin() { return iterator(super::begin()); }
iterator end() { return iterator(super::end()); }
void deleteAll()
{
for (auto it(snapshot::begin()), end(snapshot::end()); it != end; ++it)
for (auto it(super::begin()), end(super::end()); it != end; ++it)
{
delete it->get();
}
}
};
using instance_snapshot = instance_snapshot_of<T>;
// key_snapshot_of isn't really meaningful, but define it anyway to avoid
// requiring two different LLInstanceTrackerSubclass implementations.
template <typename SUBCLASS>
using key_snapshot_of = instance_snapshot_of<SUBCLASS>;
protected:
LLInstanceTracker()

View File

@ -0,0 +1,98 @@
/**
* @file llinstancetrackersubclass.h
* @author Nat Goodspeed
* @date 2022-12-09
* @brief Intermediate class to get subclass-specific types from
* LLInstanceTracker instance-retrieval methods.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_LLINSTANCETRACKERSUBCLASS_H)
#define LL_LLINSTANCETRACKERSUBCLASS_H
#include <memory> // std::shared_ptr, std::weak_ptr
/**
* Derive your subclass S of a subclass T of LLInstanceTracker<T> from
* LLInstanceTrackerSubclass<S, T> to perform appropriate downcasting and
* filtering for LLInstanceTracker access methods.
*
* LLInstanceTracker<T> uses CRTP, so that getWeak(), getInstance(), snapshot
* and instance_snapshot return pointers and references to T. The trouble is
* that subclasses T0 and T1 derived from T also get pointers and references
* to their base class T, requiring explicit downcasting. Moreover,
* T0::getInstance() shouldn't find an instance of any T subclass other than
* T0. Nor should T0::snapshot.
*
* @code
* class Tracked: public LLInstanceTracker<Tracked, std::string>
* {
* private:
* using super = LLInstanceTracker<Tracked, std::string>;
* public:
* Tracked(const std::string& name): super(name) {}
* // All references to Tracked::ptr_t, Tracked::getInstance() etc.
* // appropriately use Tracked.
* // ...
* };
*
* // But now we derive SubTracked from Tracked. We need SubTracked::ptr_t,
* // SubTracked::getInstance() etc. to use SubTracked, not Tracked.
* // This LLInstanceTrackerSubclass specialization is itself derived from
* // Tracked.
* class SubTracked: public LLInstanceTrackerSubclass<SubTracked, Tracked>
* {
* private:
* using super = LLInstanceTrackerSubclass<SubTracked, Tracked>;
* public:
* // LLInstanceTrackerSubclass's constructor forwards to Tracked's.
* SubTracked(const std::string& name): super(name) {}
* // SubTracked::getInstance() returns std::shared_ptr<SubTracked>, etc.
* // ...
* @endcode
*/
template <typename SUBCLASS, typename T>
class LLInstanceTrackerSubclass: public T
{
public:
using ptr_t = std::shared_ptr<SUBCLASS>;
using weak_t = std::weak_ptr<SUBCLASS>;
// forward any constructor call to the corresponding T ctor
template <typename... ARGS>
LLInstanceTrackerSubclass(ARGS&&... args):
T(std::forward<ARGS>(args)...)
{}
weak_t getWeak()
{
// call base-class getWeak(), try to lock, downcast to SUBCLASS
return std::dynamic_pointer_cast<SUBCLASS>(T::getWeak().lock());
}
template <typename KEY>
static ptr_t getInstance(const KEY& k)
{
return std::dynamic_pointer_cast<SUBCLASS>(T::getInstance(k));
}
using snapshot = typename T::template snapshot_of<SUBCLASS>;
using instance_snapshot = typename T::template instance_snapshot_of<SUBCLASS>;
using key_snapshot = typename T::template key_snapshot_of<SUBCLASS>;
static size_t instanceCount()
{
// T::instanceCount() lies because our snapshot, et al., won't
// necessarily return all the T instances -- only those that are also
// SUBCLASS instances. Count those.
size_t count = 0;
for (const auto& pair : snapshot())
++count;
return count;
}
};
#endif /* ! defined(LL_LLINSTANCETRACKERSUBCLASS_H) */

View File

@ -340,11 +340,28 @@ public:
}
else
{
// The LLSD object we got from our stream contains the keys we
// need.
LLEventPumps::instance().obtain(data["pump"]).post(data["data"]);
// Block calls to this method; resetting mBlocker unblocks calls
// to the other method.
try
{
// The LLSD object we got from our stream contains the
// keys we need.
LLEventPumps::instance().obtain(data["pump"]).post(data["data"]);
}
catch (const std::exception& err)
{
// No plugin should be allowed to crash the viewer by
// driving an exception -- intentionally or not.
LOG_UNHANDLED_EXCEPTION(stringize("handling request ", data));
// Whether or not the plugin added a "reply" key to the
// request, send a reply. We happen to know who originated
// this request, and the reply LLEventPump of interest.
// Not our problem if the plugin ignores the reply event.
data["reply"] = mReplyPump.getName();
sendReply(llsd::map("error",
stringize(LLError::Log::classname(err), ": ", err.what())),
data);
}
// Block calls to this method; resetting mBlocker unblocks
// calls to the other method.
mBlocker.reset(new LLEventPump::Blocker(mStdoutDataConnection));
// Go check for any more pending events in the buffer.
if (childout.size())
@ -389,6 +406,17 @@ public:
// Read all remaining bytes and log.
LL_INFOS("LLLeap") << mDesc << ": " << rest << LL_ENDL;
}
/*--------------------------- diagnostic ---------------------------*/
else if (data["eof"].asBoolean())
{
LL_DEBUGS("LLLeap") << mDesc << " ended, no partial line" << LL_ENDL;
}
else
{
LL_DEBUGS("LLLeap") << mDesc << " (still running, " << childerr.size()
<< " bytes pending)" << LL_ENDL;
}
/*------------------------- end diagnostic -------------------------*/
return false;
}

View File

@ -14,14 +14,16 @@
// associated header
#include "llleaplistener.h"
// STL headers
#include <map>
#include <algorithm> // std::find_if
#include <functional>
#include <map>
#include <set>
// std headers
// external library headers
#include <boost/foreach.hpp>
// other Linden headers
#include "lluuid.h"
#include "lazyeventapi.h"
#include "llsdutil.h"
#include "lluuid.h"
#include "stringize.h"
/*****************************************************************************
@ -110,7 +112,7 @@ LLLeapListener::~LLLeapListener()
// value_type, and Bad Things would happen if you copied an
// LLTempBoundListener. (Destruction of the original would disconnect the
// listener, invalidating every stored connection.)
BOOST_FOREACH(ListenersMap::value_type& pair, mListeners)
for (ListenersMap::value_type& pair : mListeners)
{
pair.second.disconnect();
}
@ -208,31 +210,65 @@ void LLLeapListener::getAPIs(const LLSD& request) const
{
Response reply(LLSD(), request);
// first, traverse existing LLEventAPI instances
std::set<std::string> instances;
for (auto& ea : LLEventAPI::instance_snapshot())
{
LLSD info;
info["desc"] = ea.getDesc();
reply[ea.getName()] = info;
// remember which APIs are actually instantiated
instances.insert(ea.getName());
reply[ea.getName()] = llsd::map("desc", ea.getDesc());
}
// supplement that with *potential* instances: that is, instances of
// LazyEventAPI that can each instantiate an LLEventAPI on demand
for (const auto& lea : LL::LazyEventAPIBase::instance_snapshot())
{
// skip any LazyEventAPI that's already instantiated its LLEventAPI
if (instances.find(lea.getName()) == instances.end())
{
reply[lea.getName()] = llsd::map("desc", lea.getDesc());
}
}
}
// Because LazyEventAPI deliberately mimics LLEventAPI's query API, this
// function can be passed either -- even though they're unrelated types.
template <typename API>
void reportAPI(LLEventAPI::Response& reply, const API& api)
{
reply["name"] = api.getName();
reply["desc"] = api.getDesc();
reply["key"] = api.getDispatchKey();
LLSD ops;
for (const auto& namedesc : api)
{
ops.append(api.getMetadata(namedesc.first));
}
reply["ops"] = ops;
}
void LLLeapListener::getAPI(const LLSD& request) const
{
Response reply(LLSD(), request);
auto found = LLEventAPI::getInstance(request["api"]);
if (found)
// check first among existing LLEventAPI instances
auto foundea = LLEventAPI::getInstance(request["api"]);
if (foundea)
{
reply["name"] = found->getName();
reply["desc"] = found->getDesc();
reply["key"] = found->getDispatchKey();
LLSD ops;
for (LLEventAPI::const_iterator oi(found->begin()), oend(found->end());
oi != oend; ++oi)
reportAPI(reply, *foundea);
}
else
{
// Here the requested LLEventAPI doesn't yet exist, but do we have a
// registered LazyEventAPI for it?
LL::LazyEventAPIBase::instance_snapshot snap;
auto foundlea = std::find_if(snap.begin(), snap.end(),
[api = request["api"].asString()]
(const auto& lea)
{ return (lea.getName() == api); });
if (foundlea != snap.end())
{
ops.append(found->getMetadata(oi->first));
reportAPI(reply, *foundlea);
}
reply["ops"] = ops;
}
}

View File

@ -35,6 +35,7 @@
# include <sys/types.h>
# include <mach/task.h>
# include <mach/mach_init.h>
#include <mach/mach_host.h>
#elif LL_LINUX
# include <unistd.h>
#endif
@ -109,6 +110,50 @@ void LLMemory::updateMemoryInfo()
{
sAvailPhysicalMemInKB = U32Kilobytes(0);
}
#elif defined(LL_DARWIN)
task_vm_info info;
mach_msg_type_number_t infoCount = TASK_VM_INFO_COUNT;
// MACH_TASK_BASIC_INFO reports the same resident_size, but does not tell us the reusable bytes or phys_footprint.
if (task_info(mach_task_self(), TASK_VM_INFO, reinterpret_cast<task_info_t>(&info), &infoCount) == KERN_SUCCESS)
{
// Our Windows definition of PagefileUsage is documented by Microsoft as "the total amount of
// memory that the memory manager has committed for a running process", which is rss.
sAllocatedPageSizeInKB = U32Bytes(info.resident_size);
// Activity Monitor => Inspect Process => Real Memory Size appears to report resident_size
// Activity monitor => main window memory column appears to report phys_footprint, which spot checks as at least 30% less.
// I think that is because of compression, which isn't going to give us a consistent measurement. We want uncompressed totals.
//
// In between is resident_size - reusable. This is what Chrome source code uses, with source comments saying it is 'the "Real Memory" value
// reported for the app by the Memory Monitor in Instruments.' It is still about 8% bigger than phys_footprint.
//
// (On Windows, we use WorkingSetSize.)
sAllocatedMemInKB = U32Bytes(info.resident_size - info.reusable);
}
else
{
LL_WARNS() << "task_info failed" << LL_ENDL;
}
// Total installed and available physical memory are properties of the host, not just our process.
vm_statistics64_data_t vmstat;
mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
mach_port_t host = mach_host_self();
vm_size_t page_size;
host_page_size(host, &page_size);
kern_return_t result = host_statistics64(host, HOST_VM_INFO64, reinterpret_cast<host_info_t>(&vmstat), &count);
if (result == KERN_SUCCESS) {
// This is what Chrome reports as 'the "Physical Memory Free" value reported by the Memory Monitor in Instruments.'
// Note though that inactive pages are not included here and not yet free, but could become so under memory pressure.
sAvailPhysicalMemInKB = U32Bytes(vmstat.free_count * page_size);
sMaxPhysicalMemInKB = LLMemoryInfo::getHardwareMemSize();
}
else
{
LL_WARNS() << "task_info failed" << LL_ENDL;
}
#else
//not valid for other systems for now.
sAllocatedMemInKB = U64Bytes(LLMemory::getCurrentRSS());

View File

@ -36,7 +36,8 @@
//============================================================================
#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO)
//#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO)
#define MUTEX_DEBUG 0 //disable mutex debugging as it's interfering with profiles
#if MUTEX_DEBUG
#include <map>
@ -61,7 +62,7 @@ protected:
mutable LLThread::id_t mLockingThread;
#if MUTEX_DEBUG
std::map<LLThread::id_t, BOOL> mIsLocked;
std::unordered_map<LLThread::id_t, BOOL> mIsLocked;
#endif
};

View File

@ -340,4 +340,28 @@ private:
bool mStayUnique;
};
// boost hash adapter
template <class Type>
struct boost::hash<LLPointer<Type>>
{
typedef LLPointer<Type> argument_type;
typedef std::size_t result_type;
result_type operator()(argument_type const& s) const
{
return (std::size_t) s.get();
}
};
// Adapt boost hash to std hash
namespace std
{
template<class Type> struct hash<LLPointer<Type>>
{
std::size_t operator()(LLPointer<Type> const& s) const noexcept
{
return boost::hash<LLPointer<Type>>()(s);
}
};
}
#endif

View File

@ -529,6 +529,7 @@ LLProcess::LLProcess(const LLSDOrParams& params):
// preserve existing semantics, we promise that mAttached defaults to the
// same setting as mAutokill.
mAttached(params.attached.isProvided()? params.attached : params.autokill),
mPool(NULL),
mPipes(NSLOTS)
{
// Hmm, when you construct a ptr_vector with a size, it merely reserves
@ -549,8 +550,14 @@ LLProcess::LLProcess(const LLSDOrParams& params):
mPostend = params.postend;
apr_pool_create(&mPool, gAPRPoolp);
if (!mPool)
{
LLTHROW(LLProcessError(STRINGIZE("failed to create apr pool")));
}
apr_procattr_t *procattr = NULL;
chkapr(apr_procattr_create(&procattr, gAPRPoolp));
chkapr(apr_procattr_create(&procattr, mPool));
// IQA-490, CHOP-900: On Windows, ask APR to jump through hoops to
// constrain the set of handles passed to the child process. Before we
@ -689,14 +696,14 @@ LLProcess::LLProcess(const LLSDOrParams& params):
// one. Hand-expand chkapr() macro so we can fill in the actual command
// string instead of the variable names.
if (ll_apr_warn_status(apr_proc_create(&mProcess, argv[0], &argv[0], NULL, procattr,
gAPRPoolp)))
mPool)))
{
LLTHROW(LLProcessError(STRINGIZE(params << " failed")));
}
// arrange to call status_callback()
apr_proc_other_child_register(&mProcess, &LLProcess::status_callback, this, mProcess.in,
gAPRPoolp);
mPool);
// and make sure we poll it once per "mainloop" tick
sProcessListener.addPoll(*this);
mStatus.mState = RUNNING;
@ -815,6 +822,12 @@ LLProcess::~LLProcess()
{
kill("destructor");
}
if (mPool)
{
apr_pool_destroy(mPool);
mPool = NULL;
}
}
bool LLProcess::kill(const std::string& who)

View File

@ -568,6 +568,7 @@ private:
// explicitly want this ptr_vector to be able to store NULLs
typedef boost::ptr_vector< boost::nullable<BasePipe> > PipeVector;
PipeVector mPipes;
apr_pool_t* mPool;
};
/// for logging

View File

@ -746,7 +746,7 @@ private:
__cpuid(0x1, eax, ebx, ecx, edx);
if(feature_infos[0] != (S32)edx)
{
LL_ERRS() << "machdep.cpu.feature_bits doesn't match expected cpuid result!" << LL_ENDL;
LL_WARNS() << "machdep.cpu.feature_bits doesn't match expected cpuid result!" << LL_ENDL;
}
#endif // LL_RELEASE_FOR_DOWNLOAD

View File

@ -95,8 +95,12 @@ namespace LLProfiler
// <FS:Beq> Fixed mutual exclusion issues with RAM and GPU. NOTE: This might still break on Apple in which case we'll need to restrict that platform
//// GPU Mutually exclusive with detailed memory tracing
// #define LL_PROFILER_ENABLE_TRACY_OPENGL 0
#define LL_PROFILER_ENABLE_TRACY_MEMORY 0
#define LL_PROFILER_ENABLE_TRACY_OPENGL 1
#define LL_PROFILER_ENABLE_TRACY_MEMORY 0
#define LL_PROFILER_ENABLE_TRACY_OPENGL 0
// Enable RenderDoc labeling
#define LL_PROFILER_ENABLE_RENDER_DOC 0
#endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY
@ -126,8 +130,6 @@ namespace LLProfiler
#define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
#define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
#define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
#define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
#define LL_PROFILE_FREE(ptr) TracyFree(ptr)
// <FS:Beq> Additional FS Tracy macros
#define LL_PROFILE_ZONE_COLOR(color) ZoneNamedC( ___tracy_scoped_zone, color, LLProfiler::active ) // <FS:Beq/> Additional Tracy macro
@ -144,7 +146,7 @@ namespace LLProfiler
#define LL_RECORD_BLOCK_TIME(name) const LLTrace::BlockTimer& LL_GLUE_TOKENS(block_time_recorder, __LINE__)(LLTrace::timeThisBlock(name)); (void)LL_GLUE_TOKENS(block_time_recorder, __LINE__);
#define LL_PROFILE_ZONE_NAMED(name) // LL_PROFILE_ZONE_NAMED is a no-op when Tracy is disabled
#define LL_PROFILE_ZONE_NAMED_COLOR(name,color) // LL_RECORD_BLOCK_TIME(name)
#define LL_PROFILE_ZONE_NAMED_COLOR(name,color) // LL_PROFILE_ZONE_NAMED_COLOR is a no-op when Tracy is disabled
#define LL_PROFILE_ZONE_SCOPED // LL_PROFILE_ZONE_SCOPED is a no-op when Tracy is disabled
#define LL_PROFILE_ZONE_NUM( val ) (void)( val ); // Not supported
@ -153,8 +155,6 @@ namespace LLProfiler
#define LL_PROFILE_ZONE_ERR(name) (void)(name); // Not supported
#define LL_PROFILE_ZONE_INFO(name) (void)(name); // Not supported
#define LL_PROFILE_ZONE_WARN(name) (void)(name); // Not supported
#define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
#define LL_PROFILE_FREE(ptr) (void)(ptr);
// <FS:Beq> Additional FS Tracy macros
#define LL_PROFILE_ZONE_COLOR(color)
#define LL_PROFILE_PLOT( name, value )
@ -187,8 +187,6 @@ namespace LLProfiler
#define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
#define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
#define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
#define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
#define LL_PROFILE_FREE(ptr) TracyFree(ptr)
// <FS:Beq> Additional FS Tracy macros
#define LL_PROFILE_ZONE_COLOR(color) ZoneNamedC( ___tracy_scoped_zone, color, LLProfiler::active )
#define LL_PROFILE_PLOT( name, value ) TracyPlot( name, value)
@ -222,6 +220,39 @@ namespace LLProfiler
// </FS:Ansariel>
#endif // LL_PROFILER
#if LL_PROFILER_ENABLE_TRACY_OPENGL
#define LL_PROFILE_GPU_ZONE(name) TracyGpuZone(name)
#define LL_PROFILE_GPU_ZONEC(name,color) TracyGpuZoneC(name,color)
#define LL_PROFILER_GPU_COLLECT TracyGpuCollect
#define LL_PROFILER_GPU_CONTEXT TracyGpuContext
// disable memory tracking (incompatible with GPU tracing
#define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
#define LL_PROFILE_FREE(ptr) (void)(ptr);
#else
#define LL_PROFILE_GPU_ZONE(name) (void)name;
#define LL_PROFILE_GPU_ZONEC(name,color) (void)name;(void)color;
#define LL_PROFILER_GPU_COLLECT
#define LL_PROFILER_GPU_CONTEXT
#define LL_LABEL_OBJECT_GL(type, name, length, label)
#if LL_PROFILER_CONFIGURATION > 1
#define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
#define LL_PROFILE_FREE(ptr) TracyFree(ptr)
#else
#define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
#define LL_PROFILE_FREE(ptr) (void)(ptr);
#endif
#endif
#if LL_PROFILER_ENABLE_RENDER_DOC
#define LL_LABEL_OBJECT_GL(type, name, length, label) glObjectLabel(type, name, length, label)
#else
#define LL_LABEL_OBJECT_GL(type, name, length, label)
#endif
#include "llprofilercategories.h"
#endif // LL_PROFILER_H

View File

@ -53,7 +53,7 @@
#define LL_PROFILER_CATEGORY_ENABLE_LOGGING 1
#define LL_PROFILER_CATEGORY_ENABLE_MATERIAL 1
#define LL_PROFILER_CATEGORY_ENABLE_MEDIA 1
#define LL_PROFILER_CATEGORY_ENABLE_MEMORY 0 // <FS:Beq/> Rationalise this silliness
#define LL_PROFILER_CATEGORY_ENABLE_MEMORY 0
#define LL_PROFILER_CATEGORY_ENABLE_NETWORK 1
#define LL_PROFILER_CATEGORY_ENABLE_OCTREE 1
#define LL_PROFILER_CATEGORY_ENABLE_PIPELINE 1

View File

@ -33,9 +33,12 @@
#include "llpointer.h"
#include "llrefcount.h" // LLRefCount
#include <boost/intrusive_ptr.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/type_traits/is_base_of.hpp>
#include <boost/type_traits/remove_pointer.hpp>
#include <boost/utility/enable_if.hpp>
#include <memory> // std::shared_ptr, std::unique_ptr
#include <type_traits>
/**
* LLPtrTo<TARGET>::type is either of two things:
@ -55,14 +58,14 @@ struct LLPtrTo
/// specialize for subclasses of LLRefCount
template <class T>
struct LLPtrTo<T, typename boost::enable_if< boost::is_base_of<LLRefCount, T> >::type>
struct LLPtrTo<T, typename std::enable_if< boost::is_base_of<LLRefCount, T>::value >::type>
{
typedef LLPointer<T> type;
};
/// specialize for subclasses of LLThreadSafeRefCount
template <class T>
struct LLPtrTo<T, typename boost::enable_if< boost::is_base_of<LLThreadSafeRefCount, T> >::type>
struct LLPtrTo<T, typename std::enable_if< boost::is_base_of<LLThreadSafeRefCount, T>::value >::type>
{
typedef LLPointer<T> type;
};
@ -83,4 +86,83 @@ struct LLRemovePointer< LLPointer<SOMECLASS> >
typedef SOMECLASS type;
};
namespace LL
{
/*****************************************************************************
* get_ref()
*****************************************************************************/
template <typename T>
struct GetRef
{
// return const ref or non-const ref, depending on whether we can bind
// a non-const lvalue ref to the argument
const auto& operator()(const T& obj) const { return obj; }
auto& operator()(T& obj) const { return obj; }
};
template <typename T>
struct GetRef<const T*>
{
const auto& operator()(const T* ptr) const { return *ptr; }
};
template <typename T>
struct GetRef<T*>
{
auto& operator()(T* ptr) const { return *ptr; }
};
template <typename T>
struct GetRef< LLPointer<T> >
{
auto& operator()(LLPointer<T> ptr) const { return *ptr; }
};
/// whether we're passed a pointer or a reference, return a reference
template <typename T>
auto& get_ref(T& ptr_or_ref)
{
return GetRef<typename std::decay<T>::type>()(ptr_or_ref);
}
template <typename T>
const auto& get_ref(const T& ptr_or_ref)
{
return GetRef<typename std::decay<T>::type>()(ptr_or_ref);
}
/*****************************************************************************
* get_ptr()
*****************************************************************************/
// if T is any pointer type we recognize, return it unchanged
template <typename T>
const T* get_ptr(const T* ptr) { return ptr; }
template <typename T>
T* get_ptr(T* ptr) { return ptr; }
template <typename T>
const std::shared_ptr<T>& get_ptr(const std::shared_ptr<T>& ptr) { return ptr; }
template <typename T>
const std::unique_ptr<T>& get_ptr(const std::unique_ptr<T>& ptr) { return ptr; }
template <typename T>
const boost::shared_ptr<T>& get_ptr(const boost::shared_ptr<T>& ptr) { return ptr; }
template <typename T>
const boost::intrusive_ptr<T>& get_ptr(const boost::intrusive_ptr<T>& ptr) { return ptr; }
template <typename T>
const LLPointer<T>& get_ptr(const LLPointer<T>& ptr) { return ptr; }
// T is not any pointer type we recognize, take a pointer to the parameter
template <typename T>
const T* get_ptr(const T& obj) { return &obj; }
template <typename T>
T* get_ptr(T& obj) { return &obj; }
} // namespace LL
#endif /* ! defined(LL_LLPTRTO_H) */

View File

@ -26,20 +26,26 @@
#include "linden_common.h"
#include "llqueuedthread.h"
#include <chrono>
#include "llstl.h"
#include "lltimer.h" // ms_sleep()
#include "lltracethreadrecorder.h"
#include "llmutex.h"
//============================================================================
// MAIN THREAD
LLQueuedThread::LLQueuedThread(const std::string& name, bool threaded, bool should_pause) :
LLThread(name),
mThreaded(threaded),
mIdleThread(true),
mNextHandle(0),
mStarted(FALSE)
LLThread(name),
mIdleThread(true),
mNextHandle(0),
mStarted(FALSE),
mThreaded(threaded),
mRequestQueue(name, 1024 * 1024)
{
llassert(threaded); // not threaded implementation is deprecated
mMainQueue = LL::WorkQueue::getInstance("mainloop");
if (mThreaded)
{
if(should_pause)
@ -69,6 +75,11 @@ void LLQueuedThread::shutdown()
unpause(); // MAIN THREAD
if (mThreaded)
{
if (mRequestQueue.size() == 0)
{
mRequestQueue.close();
}
S32 timeout = 100;
for ( ; timeout>0; timeout--)
{
@ -104,6 +115,8 @@ void LLQueuedThread::shutdown()
{
LL_WARNS() << "~LLQueuedThread() called with active requests: " << active_count << LL_ENDL;
}
mRequestQueue.close();
}
//----------------------------------------------------------------------------
@ -112,8 +125,7 @@ void LLQueuedThread::shutdown()
// virtual
size_t LLQueuedThread::update(F32 max_time_ms)
{
LL_PROFILE_ZONE_SCOPED
LL_PROFILE_ZONE_SCOPED;
if (!mStarted)
{
if (!mThreaded)
@ -127,38 +139,38 @@ size_t LLQueuedThread::update(F32 max_time_ms)
size_t LLQueuedThread::updateQueue(F32 max_time_ms)
{
LL_PROFILE_ZONE_SCOPED
F64 max_time = (F64)max_time_ms * .001;
LLTimer timer;
size_t pending = 1;
LL_PROFILE_ZONE_SCOPED;
// Frame Update
if (mThreaded)
{
pending = getPending();
if(pending > 0)
// schedule a call to threadedUpdate for every call to updateQueue
if (!isQuitting())
{
mRequestQueue.post([=]()
{
LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qt - update");
mIdleThread = false;
threadedUpdate();
mIdleThread = true;
}
);
}
if(getPending() > 0)
{
unpause();
}
unpause();
}
}
else
{
while (pending > 0)
{
pending = processNextRequest();
if (max_time && timer.getElapsedTimeF64() > max_time)
break;
}
mRequestQueue.runFor(std::chrono::microseconds((int) (max_time_ms*1000.f)));
threadedUpdate();
}
return pending;
return getPending();
}
void LLQueuedThread::incQueue()
{
LL_PROFILE_ZONE_SCOPED
// Something has been added to the queue
if (!isPaused())
{
@ -173,20 +185,12 @@ void LLQueuedThread::incQueue()
// May be called from any thread
size_t LLQueuedThread::getPending()
{
LL_PROFILE_ZONE_SCOPED
size_t res;
lockData();
res = mRequestQueue.size();
unlockData();
return res;
return mRequestQueue.size();
}
// MAIN thread
void LLQueuedThread::waitOnPending()
{
LL_PROFILE_ZONE_SCOPED
while(1)
{
update(0);
@ -206,41 +210,28 @@ void LLQueuedThread::waitOnPending()
// MAIN thread
void LLQueuedThread::printQueueStats()
{
LL_PROFILE_ZONE_SCOPED
lockData();
if (!mRequestQueue.empty())
U32 size = mRequestQueue.size();
if (size > 0)
{
QueuedRequest *req = *mRequestQueue.begin();
LL_INFOS() << llformat("Pending Requests:%d Current status:%d", mRequestQueue.size(), req->getStatus()) << LL_ENDL;
LL_INFOS() << llformat("Pending Requests:%d ", mRequestQueue.size()) << LL_ENDL;
}
else
{
LL_INFOS() << "Queued Thread Idle" << LL_ENDL;
}
unlockData();
}
// MAIN thread
LLQueuedThread::handle_t LLQueuedThread::generateHandle()
{
LL_PROFILE_ZONE_SCOPED
lockData();
while ((mNextHandle == nullHandle()) || (mRequestHash.find(mNextHandle)))
{
mNextHandle++;
}
const LLQueuedThread::handle_t res = mNextHandle++;
unlockData();
U32 res = ++mNextHandle;
return res;
}
// MAIN thread
bool LLQueuedThread::addRequest(QueuedRequest* req)
{
LL_PROFILE_ZONE_SCOPED
LL_PROFILE_ZONE_SCOPED;
if (mStatus == QUITTING)
{
return false;
@ -248,14 +239,14 @@ bool LLQueuedThread::addRequest(QueuedRequest* req)
lockData();
req->setStatus(STATUS_QUEUED);
mRequestQueue.insert(req);
mRequestHash.insert(req);
mRequestHash.insert(req);
#if _DEBUG
// LL_INFOS() << llformat("LLQueuedThread::Added req [%08d]",handle) << LL_ENDL;
#endif
unlockData();
incQueue();
llassert(!mDataLock->isSelfLocked());
mRequestQueue.post([this, req]() { processRequest(req); });
return true;
}
@ -263,8 +254,7 @@ bool LLQueuedThread::addRequest(QueuedRequest* req)
// MAIN thread
bool LLQueuedThread::waitForResult(LLQueuedThread::handle_t handle, bool auto_complete)
{
LL_PROFILE_ZONE_SCOPED
LL_PROFILE_ZONE_SCOPED;
llassert (handle != nullHandle());
bool res = false;
bool waspaused = isPaused();
@ -300,15 +290,12 @@ bool LLQueuedThread::waitForResult(LLQueuedThread::handle_t handle, bool auto_co
{
pause();
}
return res;
}
// MAIN thread
LLQueuedThread::QueuedRequest* LLQueuedThread::getRequest(handle_t handle)
{
LL_PROFILE_ZONE_SCOPED
if (handle == nullHandle())
{
return 0;
@ -321,8 +308,6 @@ LLQueuedThread::QueuedRequest* LLQueuedThread::getRequest(handle_t handle)
LLQueuedThread::status_t LLQueuedThread::getRequestStatus(handle_t handle)
{
LL_PROFILE_ZONE_SCOPED
status_t res = STATUS_EXPIRED;
lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
@ -336,8 +321,7 @@ LLQueuedThread::status_t LLQueuedThread::getRequestStatus(handle_t handle)
void LLQueuedThread::abortRequest(handle_t handle, bool autocomplete)
{
LL_PROFILE_ZONE_SCOPED
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
if (req)
@ -350,8 +334,6 @@ void LLQueuedThread::abortRequest(handle_t handle, bool autocomplete)
// MAIN thread
void LLQueuedThread::setFlags(handle_t handle, U32 flags)
{
LL_PROFILE_ZONE_SCOPED
lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
if (req)
@ -361,34 +343,9 @@ void LLQueuedThread::setFlags(handle_t handle, U32 flags)
unlockData();
}
void LLQueuedThread::setPriority(handle_t handle, U32 priority)
{
LL_PROFILE_ZONE_SCOPED
lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
if (req)
{
if(req->getStatus() == STATUS_INPROGRESS)
{
// not in list
req->setPriority(priority);
}
else if(req->getStatus() == STATUS_QUEUED)
{
// remove from list then re-insert
llverify(mRequestQueue.erase(req) == 1);
req->setPriority(priority);
mRequestQueue.insert(req);
}
}
unlockData();
}
bool LLQueuedThread::completeRequest(handle_t handle)
{
LL_PROFILE_ZONE_SCOPED
LL_PROFILE_ZONE_SCOPED;
bool res = false;
lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
@ -431,98 +388,120 @@ bool LLQueuedThread::check()
//============================================================================
// Runs on its OWN thread
size_t LLQueuedThread::processNextRequest()
void LLQueuedThread::processRequest(LLQueuedThread::QueuedRequest* req)
{
LL_PROFILE_ZONE_SCOPED
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
mIdleThread = false;
//threadedUpdate();
QueuedRequest *req;
// Get next request from pool
lockData();
while(1)
if ((req->getFlags() & FLAG_ABORT) || (mStatus == QUITTING))
{
req = NULL;
if (mRequestQueue.empty())
LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - abort");
req->setStatus(STATUS_ABORTED);
req->finishRequest(false);
if (req->getFlags() & FLAG_AUTO_COMPLETE)
{
break;
}
req = *mRequestQueue.begin();
mRequestQueue.erase(mRequestQueue.begin());
if ((req->getFlags() & FLAG_ABORT) || (mStatus == QUITTING))
{
req->setStatus(STATUS_ABORTED);
req->finishRequest(false);
if (req->getFlags() & FLAG_AUTO_COMPLETE)
{
mRequestHash.erase(req);
req->deleteRequest();
mRequestHash.erase(req);
req->deleteRequest();
// check();
}
continue;
}
llassert_always(req->getStatus() == STATUS_QUEUED);
break;
unlockData();
}
U32 start_priority = 0 ;
if (req)
{
req->setStatus(STATUS_INPROGRESS);
start_priority = req->getPriority();
}
unlockData();
else
{
llassert_always(req->getStatus() == STATUS_QUEUED);
// This is the only place we will call req->setStatus() after
// it has initially been seet to STATUS_QUEUED, so it is
// safe to access req.
if (req)
{
// <FS:ND> Image thread pool from CoolVL
if (req->getFlags() & FLAG_ASYNC)
{
req->processRequest();
return getPending();
}
// </FS:ND>
if (req)
{
req->setStatus(STATUS_INPROGRESS);
}
unlockData();
// process request
bool complete = req->processRequest();
// This is the only place we will call req->setStatus() after
// it has initially been seet to STATUS_QUEUED, so it is
// safe to access req.
if (req)
{
// process request
bool complete = req->processRequest();
if (complete)
{
lockData();
req->setStatus(STATUS_COMPLETE);
req->finishRequest(true);
if (req->getFlags() & FLAG_AUTO_COMPLETE)
{
mRequestHash.erase(req);
req->deleteRequest();
// check();
}
unlockData();
}
else
{
lockData();
req->setStatus(STATUS_QUEUED);
mRequestQueue.insert(req);
unlockData();
if (mThreaded && start_priority < PRIORITY_NORMAL)
{
ms_sleep(1); // sleep the thread a little
}
}
LLTrace::get_thread_recorder()->pushToParent();
}
if (complete)
{
LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - complete");
lockData();
req->setStatus(STATUS_COMPLETE);
req->finishRequest(true);
if (req->getFlags() & FLAG_AUTO_COMPLETE)
{
mRequestHash.erase(req);
req->deleteRequest();
// check();
}
unlockData();
}
else
{
LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - retry");
//put back on queue and try again in 0.1ms
lockData();
req->setStatus(STATUS_QUEUED);
unlockData();
return getPending();
llassert(!mDataLock->isSelfLocked());
#if 0
// try again on next frame
// NOTE: tried using "post" with a time in the future, but this
// would invariably cause this thread to wait for a long time (10+ ms)
// while work is pending
bool ret = LL::WorkQueue::postMaybe(
mMainQueue,
[=]()
{
LL_PROFILE_ZONE_NAMED("processRequest - retry");
mRequestQueue.post([=]()
{
LL_PROFILE_ZONE_NAMED("processRequest - retry"); // <-- not redundant, track retry on both queues
processRequest(req);
});
});
llassert(ret);
#else
using namespace std::chrono_literals;
auto retry_time = LL::WorkQueue::TimePoint::clock::now() + 16ms;
mRequestQueue.post([=]
{
LL_PROFILE_ZONE_NAMED("processRequest - retry");
if (LL::WorkQueue::TimePoint::clock::now() < retry_time)
{
auto sleep_time = std::chrono::duration_cast<std::chrono::milliseconds>(retry_time - LL::WorkQueue::TimePoint::clock::now());
if (sleep_time.count() > 0)
{
ms_sleep(sleep_time.count());
}
}
processRequest(req);
});
#endif
}
}
}
mIdleThread = true;
}
// virtual
bool LLQueuedThread::runCondition()
{
// mRunCondition must be locked here
if (mRequestQueue.empty() && mIdleThread)
if (mRequestQueue.size() == 0 && mIdleThread)
return false;
else
return true;
@ -536,34 +515,33 @@ void LLQueuedThread::run()
startThread();
mStarted = TRUE;
while (1)
/*while (1)
{
LL_PROFILE_ZONE_SCOPED;
// this will block on the condition until runCondition() returns true, the thread is unpaused, or the thread leaves the RUNNING state.
checkPause();
if (isQuitting())
{
LLTrace::get_thread_recorder()->pushToParent();
endThread();
break;
}
mIdleThread = false;
mIdleThread = FALSE;
LL_PROFILER_THREAD_BEGIN(mName.c_str())
threadedUpdate();
auto pending_work = processNextRequest();
LL_PROFILER_THREAD_END(mName.c_str())
if (pending_work == 0)
{
mIdleThread = TRUE;
ms_sleep(1);
//LL_PROFILE_ZONE_NAMED("LLQueuedThread - sleep");
mIdleThread = true;
//ms_sleep(1);
}
//LLThread::yield(); // thread should yield after each request
}
}*/
mRequestQueue.runUntilClose();
endThread();
LL_INFOS() << "LLQueuedThread " << mName << " EXITING." << LL_ENDL;
}
// virtual
@ -583,10 +561,9 @@ void LLQueuedThread::threadedUpdate()
//============================================================================
LLQueuedThread::QueuedRequest::QueuedRequest(LLQueuedThread::handle_t handle, U32 priority, U32 flags) :
LLQueuedThread::QueuedRequest::QueuedRequest(LLQueuedThread::handle_t handle, U32 flags) :
LLSimpleHashEntry<LLQueuedThread::handle_t>(handle),
mStatus(STATUS_UNKNOWN),
mPriority(priority),
mFlags(flags)
{
}

View File

@ -36,6 +36,7 @@
#include "llthread.h"
#include "llsimplehash.h"
#include "workqueue.h"
//============================================================================
// Note: ~LLQueuedThread is O(N) N=# of queued threads, assumed to be small
@ -45,15 +46,6 @@ class LL_COMMON_API LLQueuedThread : public LLThread
{
//------------------------------------------------------------------------
public:
enum priority_t {
PRIORITY_IMMEDIATE = 0x7FFFFFFF,
PRIORITY_URGENT = 0x40000000,
PRIORITY_HIGH = 0x30000000,
PRIORITY_NORMAL = 0x20000000,
PRIORITY_LOW = 0x10000000,
PRIORITY_LOWBITS = 0x0FFFFFFF,
PRIORITY_HIGHBITS = 0x70000000
};
enum status_t {
STATUS_EXPIRED = -1,
STATUS_UNKNOWN = 0,
@ -67,7 +59,6 @@ public:
FLAG_AUTO_COMPLETE = 1,
FLAG_AUTO_DELETE = 2, // child-class dependent
FLAG_ABORT = 4
,FLAG_ASYNC = 8 // <FS:ND/> Image thread pool from CoolVL
};
typedef U32 handle_t;
@ -83,28 +74,17 @@ public:
virtual ~QueuedRequest(); // use deleteRequest()
public:
QueuedRequest(handle_t handle, U32 priority, U32 flags = 0);
QueuedRequest(handle_t handle, U32 flags = 0);
status_t getStatus()
{
return mStatus;
}
U32 getPriority() const
{
return mPriority;
}
U32 getFlags() const
{
return mFlags;
}
bool higherPriority(const QueuedRequest& second) const
{
if ( mPriority == second.mPriority)
return mHashKey < second.mHashKey;
else
return mPriority > second.mPriority;
}
protected:
status_t setStatus(status_t newstatus)
{
@ -122,28 +102,11 @@ public:
virtual void finishRequest(bool completed); // Always called from thread after request has completed or aborted
virtual void deleteRequest(); // Only method to delete a request
void setPriority(U32 pri)
{
// Only do this on a request that is not in a queued list!
mPriority = pri;
};
protected:
LLAtomicBase<status_t> mStatus;
U32 mPriority;
U32 mFlags;
};
protected:
struct queued_request_less
{
bool operator()(const QueuedRequest* lhs, const QueuedRequest* rhs) const
{
return lhs->higherPriority(*rhs); // higher priority in front of queue (set)
}
};
//------------------------------------------------------------------------
public:
@ -168,7 +131,7 @@ private:
protected:
handle_t generateHandle();
bool addRequest(QueuedRequest* req);
size_t processNextRequest(void);
void processRequest(QueuedRequest* req);
void incQueue();
public:
@ -187,7 +150,6 @@ public:
status_t getRequestStatus(handle_t handle);
void abortRequest(handle_t handle, bool autocomplete);
void setFlags(handle_t handle, U32 flags);
void setPriority(handle_t handle, U32 priority);
bool completeRequest(handle_t handle);
// This is public for support classes like LLWorkerThread,
// but generally the methods above should be used.
@ -201,8 +163,10 @@ protected:
BOOL mStarted; // required when mThreaded is false to call startThread() from update()
LLAtomicBool mIdleThread; // request queue is empty (or we are quitting) and the thread is idle
typedef std::set<QueuedRequest*, queued_request_less> request_queue_t;
request_queue_t mRequestQueue;
//typedef std::set<QueuedRequest*, queued_request_less> request_queue_t;
//request_queue_t mRequestQueue;
LL::WorkQueue mRequestQueue;
LL::WorkQueue::weak_t mMainQueue;
enum { REQUEST_HASH_SIZE = 512 }; // must be power of 2
typedef LLSimpleHash<handle_t, REQUEST_HASH_SIZE> request_hash_t;

View File

@ -58,46 +58,14 @@
* to restore uniform distribution.
*/
// *NOTE: The system rand implementation is probably not correct.
#define LL_USE_SYSTEM_RAND 0
#if LL_USE_SYSTEM_RAND
#include <cstdlib>
#endif
#if LL_USE_SYSTEM_RAND
class LLSeedRand
{
public:
LLSeedRand()
{
#if LL_WINDOWS
srand(LLUUID::getRandomSeed());
#else
srand48(LLUUID::getRandomSeed());
#endif
}
};
static LLSeedRand sRandomSeeder;
inline F64 ll_internal_random_double()
{
#if LL_WINDOWS
return (F64)rand() / (F64)RAND_MAX;
#else
return drand48();
#endif
}
inline F32 ll_internal_random_float()
{
#if LL_WINDOWS
return (F32)rand() / (F32)RAND_MAX;
#else
return (F32)drand48();
#endif
}
#else
static LLRandLagFib2281 gRandomGenerator(LLUUID::getRandomSeed());
inline F64 ll_internal_random_double()
// no default implementation, only specific F64 and F32 specializations
template <typename REAL>
inline REAL ll_internal_random();
template <>
inline F64 ll_internal_random<F64>()
{
// *HACK: Through experimentation, we have found that dual core
// CPUs (or at least multi-threaded processes) seem to
@ -108,15 +76,35 @@ inline F64 ll_internal_random_double()
return rv;
}
template <>
inline F32 ll_internal_random<F32>()
{
return F32(ll_internal_random<F64>());
}
/*------------------------------ F64 aliases -------------------------------*/
inline F64 ll_internal_random_double()
{
return ll_internal_random<F64>();
}
F64 ll_drand()
{
return ll_internal_random_double();
}
/*------------------------------ F32 aliases -------------------------------*/
inline F32 ll_internal_random_float()
{
// The clamping rules are described above.
F32 rv = (F32)gRandomGenerator();
if(!((rv >= 0.0f) && (rv < 1.0f))) return fmod(rv, 1.f);
return rv;
return ll_internal_random<F32>();
}
#endif
F32 ll_frand()
{
return ll_internal_random_float();
}
/*-------------------------- clamped random range --------------------------*/
S32 ll_rand()
{
return ll_rand(RAND_MAX);
@ -130,42 +118,28 @@ S32 ll_rand(S32 val)
return rv;
}
F32 ll_frand()
template <typename REAL>
REAL ll_grand(REAL val)
{
return ll_internal_random_float();
// The clamping rules are described above.
REAL rv = ll_internal_random<REAL>() * val;
if(val > 0)
{
if(rv >= val) return REAL();
}
else
{
if(rv <= val) return REAL();
}
return rv;
}
F32 ll_frand(F32 val)
{
// The clamping rules are described above.
F32 rv = ll_internal_random_float() * val;
if(val > 0)
{
if(rv >= val) return 0.0f;
}
else
{
if(rv <= val) return 0.0f;
}
return rv;
}
F64 ll_drand()
{
return ll_internal_random_double();
return ll_grand<F32>(val);
}
F64 ll_drand(F64 val)
{
// The clamping rules are described above.
F64 rv = ll_internal_random_double() * val;
if(val > 0)
{
if(rv >= val) return 0.0;
}
else
{
if(rv <= val) return 0.0;
}
return rv;
return ll_grand<F64>(val);
}

View File

@ -475,6 +475,7 @@ LLSDNotationParser::~LLSDNotationParser()
// virtual
S32 LLSDNotationParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) const
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// map: { string:object, string:object }
// array: [ object, object, object ]
// undef: !
@ -734,6 +735,7 @@ S32 LLSDNotationParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) c
S32 LLSDNotationParser::parseMap(std::istream& istr, LLSD& map, S32 max_depth) const
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// map: { string:object, string:object }
map = LLSD::emptyMap();
S32 parse_count = 0;
@ -794,6 +796,7 @@ S32 LLSDNotationParser::parseMap(std::istream& istr, LLSD& map, S32 max_depth) c
S32 LLSDNotationParser::parseArray(std::istream& istr, LLSD& array, S32 max_depth) const
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// array: [ object, object, object ]
array = LLSD::emptyArray();
S32 parse_count = 0;
@ -833,6 +836,7 @@ S32 LLSDNotationParser::parseArray(std::istream& istr, LLSD& array, S32 max_dept
bool LLSDNotationParser::parseString(std::istream& istr, LLSD& data) const
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
std::string value;
auto count = deserialize_string(istr, value, mMaxBytesLeft);
if(PARSE_FAILURE == count) return false;
@ -843,6 +847,7 @@ bool LLSDNotationParser::parseString(std::istream& istr, LLSD& data) const
bool LLSDNotationParser::parseBinary(std::istream& istr, LLSD& data) const
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// binary: b##"ff3120ab1"
// or: b(len)"..."
@ -945,6 +950,7 @@ LLSDBinaryParser::~LLSDBinaryParser()
// virtual
S32 LLSDBinaryParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) const
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
/**
* Undefined: '!'<br>
* Boolean: '1' for true '0' for false<br>

View File

@ -958,6 +958,8 @@ void LLSDXMLParser::parsePart(const char *buf, llssize len)
// virtual
S32 LLSDXMLParser::doParse(std::istream& input, LLSD& data, S32 max_depth) const
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
#ifdef XML_PARSER_PERFORMANCE_TESTS
XML_Timer timer( &parseTime );
#endif // XML_PARSER_PERFORMANCE_TESTS

View File

@ -1051,3 +1051,38 @@ LLSD llsd_shallow(LLSD value, LLSD filter)
return shallow;
}
LLSD LL::apply_llsd_fix(size_t arity, const LLSD& args)
{
// LLSD supports a number of types, two of which are aggregates: Map and
// Array. We don't try to support Map: supporting Map would seem to
// promise that we could somehow match the string key to 'func's parameter
// names. Uh sorry, maybe in some future version of C++ with reflection.
if (args.isMap())
{
LLTHROW(LL::apply_error("LL::apply(function, Map LLSD) unsupported"));
}
// We expect an LLSD array, but what the heck, treat isUndefined() as a
// zero-length array for calling a nullary 'func'.
if (args.isUndefined() || args.isArray())
{
// this works because LLSD().size() == 0
if (args.size() != arity)
{
LLTHROW(LL::apply_error(stringize("LL::apply(function(", arity, " args), ",
args.size(), "-entry LLSD array)")));
}
return args;
}
// args is one of the scalar types
// scalar_LLSD.size() == 0, so don't test that here.
// You can pass a scalar LLSD only to a unary 'func'.
if (arity != 1)
{
LLTHROW(LL::apply_error(stringize("LL::apply(function(", arity, " args), "
"LLSD ", LLSD::typeString(args.type()), ")")));
}
// make an array of it
return llsd::array(args);
}

View File

@ -29,8 +29,14 @@
#ifndef LL_LLSDUTIL_H
#define LL_LLSDUTIL_H
#include "apply.h" // LL::invoke()
#include "function_types.h" // LL::function_arity
#include "llsd.h"
#include <boost/functional/hash.hpp>
#include <cassert>
#include <memory> // std::shared_ptr
#include <type_traits>
#include <vector>
// U32
LL_COMMON_API LLSD ll_sd_from_U32(const U32);
@ -298,6 +304,11 @@ LLSD map(Ts&&... vs)
/*****************************************************************************
* LLSDParam
*****************************************************************************/
struct LLSDParamBase
{
virtual ~LLSDParamBase() {}
};
/**
* LLSDParam is a customization point for passing LLSD values to function
* parameters of more or less arbitrary type. LLSD provides a small set of
@ -315,7 +326,7 @@ LLSD map(Ts&&... vs)
* @endcode
*/
template <typename T>
class LLSDParam
class LLSDParam: public LLSDParamBase
{
public:
/**
@ -323,13 +334,66 @@ public:
* value for later retrieval
*/
LLSDParam(const LLSD& value):
_value(value)
value_(value)
{}
operator T() const { return _value; }
operator T() const { return value_; }
private:
T _value;
T value_;
};
/**
* LLSDParam<LLSD> is for when you don't already have the target parameter
* type in hand. Instantiate LLSDParam<LLSD>(your LLSD object), and the
* templated conversion operator will try to select a more specific LLSDParam
* specialization.
*/
template <>
class LLSDParam<LLSD>: public LLSDParamBase
{
private:
LLSD value_;
// LLSDParam<LLSD>::operator T() works by instantiating an LLSDParam<T> on
// demand. Returning that engages LLSDParam<T>::operator T(), producing
// the desired result. But LLSDParam<const char*> owns a std::string whose
// c_str() is returned by its operator const char*(). If we return a temp
// LLSDParam<const char*>, the compiler can destroy it right away, as soon
// as we've called operator const char*(). That's a problem! That
// invalidates the const char* we've just passed to the subject function.
// This LLSDParam<LLSD> is presumably guaranteed to survive until the
// subject function has returned, so we must ensure that any constructed
// LLSDParam<T> lives just as long as this LLSDParam<LLSD> does. Putting
// each LLSDParam<T> on the heap and capturing a smart pointer in a vector
// works. We would have liked to use std::unique_ptr, but vector entries
// must be copyable.
// (Alternatively we could assume that every instance of LLSDParam<LLSD>
// will be asked for at most ONE conversion. We could store a scalar
// std::unique_ptr and, when constructing an new LLSDParam<T>, assert that
// the unique_ptr is empty. But some future change in usage patterns, and
// consequent failure of that assertion, would be very mysterious. Instead
// of explaining how to fix it, just fix it now.)
mutable std::vector<std::shared_ptr<LLSDParamBase>> converters_;
public:
LLSDParam(const LLSD& value): value_(value) {}
/// if we're literally being asked for an LLSD parameter, avoid infinite
/// recursion
operator LLSD() const { return value_; }
/// otherwise, instantiate a more specific LLSDParam<T> to convert; that
/// preserves the existing customization mechanism
template <typename T>
operator T() const
{
// capture 'ptr' with the specific subclass type because converters_
// only stores LLSDParamBase pointers
auto ptr{ std::make_shared<LLSDParam<std::decay_t<T>>>(value_) };
// keep the new converter alive until we ourselves are destroyed
converters_.push_back(ptr);
return *ptr;
}
};
/**
@ -346,17 +410,17 @@ private:
*/
#define LLSDParam_for(T, AS) \
template <> \
class LLSDParam<T> \
class LLSDParam<T>: public LLSDParamBase \
{ \
public: \
LLSDParam(const LLSD& value): \
_value((T)value.AS()) \
value_((T)value.AS()) \
{} \
\
operator T() const { return _value; } \
operator T() const { return value_; } \
\
private: \
T _value; \
T value_; \
}
LLSDParam_for(float, asReal);
@ -372,31 +436,31 @@ LLSDParam_for(LLSD::Binary, asBinary);
* safely pass an LLSDParam<const char*>(yourLLSD).
*/
template <>
class LLSDParam<const char*>
class LLSDParam<const char*>: public LLSDParamBase
{
private:
// The difference here is that we store a std::string rather than a const
// char*. It's important that the LLSDParam object own the std::string.
std::string _value;
std::string value_;
// We don't bother storing the incoming LLSD object, but we do have to
// distinguish whether _value is an empty string because the LLSD object
// distinguish whether value_ is an empty string because the LLSD object
// contains an empty string or because it's isUndefined().
bool _undefined;
bool undefined_;
public:
LLSDParam(const LLSD& value):
_value(value),
_undefined(value.isUndefined())
value_(value),
undefined_(value.isUndefined())
{}
// The const char* we retrieve is for storage owned by our _value member.
// The const char* we retrieve is for storage owned by our value_ member.
// That's how we guarantee that the const char* is valid for the lifetime
// of this LLSDParam object. Constructing your LLSDParam in the argument
// list should ensure that the LLSDParam object will persist for the
// duration of the function call.
operator const char*() const
{
if (_undefined)
if (undefined_)
{
// By default, an isUndefined() LLSD object's asString() method
// will produce an empty string. But for a function accepting
@ -406,7 +470,7 @@ public:
// case, though, no LLSD value could pass NULL.
return NULL;
}
return _value.c_str();
return value_.c_str();
}
};
@ -555,4 +619,56 @@ struct hash<LLSD>
}
};
}
namespace LL
{
/*****************************************************************************
* apply(function, LLSD array)
*****************************************************************************/
// validate incoming LLSD blob, and return an LLSD array suitable to pass to
// the function of interest
LLSD apply_llsd_fix(size_t arity, const LLSD& args);
// Derived from https://stackoverflow.com/a/20441189
// and https://en.cppreference.com/w/cpp/utility/apply .
// We can't simply make a tuple from the LLSD array and then apply() that
// tuple to the function -- how would make_tuple() deduce the correct
// parameter type for each entry? We must go directly to the target function.
template <typename CALLABLE, std::size_t... I>
auto apply_impl(CALLABLE&& func, const LLSD& array, std::index_sequence<I...>)
{
// call func(unpacked args), using generic LLSDParam<LLSD> to convert each
// entry in 'array' to the target parameter type
return std::forward<CALLABLE>(func)(LLSDParam<LLSD>(array[I])...);
}
// use apply_n<ARITY>(function, LLSD) to call a specific arity of a variadic
// function with (that many) items from the passed LLSD array
template <size_t ARITY, typename CALLABLE>
auto apply_n(CALLABLE&& func, const LLSD& args)
{
return apply_impl(std::forward<CALLABLE>(func),
apply_llsd_fix(ARITY, args),
std::make_index_sequence<ARITY>());
}
/**
* apply(function, LLSD) goes beyond C++17 std::apply(). For this case
* @a function @emph cannot be variadic: the compiler must know at compile
* time how many arguments to pass. This isn't Python. (But see apply_n() to
* pass a specific number of args to a variadic function.)
*/
template <typename CALLABLE>
auto apply(CALLABLE&& func, const LLSD& args)
{
// infer arity from the definition of func
constexpr auto arity = function_arity<
typename std::remove_reference<CALLABLE>::type>::value;
// now that we have a compile-time arity, apply_n() works
return apply_n<arity>(std::forward<CALLABLE>(func), args);
}
} // namespace LL
#endif // LL_LLSDUTIL_H

View File

@ -782,20 +782,28 @@ static U32Kilobytes LLMemoryAdjustKBResult(U32Kilobytes inKB)
}
#endif
#if LL_DARWIN
// static
U32Kilobytes LLMemoryInfo::getHardwareMemSize()
{
// This might work on Linux as well. Someone check...
uint64_t phys = 0;
int mib[2] = { CTL_HW, HW_MEMSIZE };
size_t len = sizeof(phys);
sysctl(mib, 2, &phys, &len, NULL, 0);
return U64Bytes(phys);
}
#endif
U32Kilobytes LLMemoryInfo::getPhysicalMemoryKB() const
{
#if LL_WINDOWS
return LLMemoryAdjustKBResult(U32Kilobytes(mStatsMap["Total Physical KB"].asInteger()));
#elif LL_DARWIN
// This might work on Linux as well. Someone check...
uint64_t phys = 0;
int mib[2] = { CTL_HW, HW_MEMSIZE };
size_t len = sizeof(phys);
sysctl(mib, 2, &phys, &len, NULL, 0);
return U64Bytes(phys);
return getHardwareMemSize();
#elif LL_LINUX
U64 phys = 0;

View File

@ -129,7 +129,10 @@ public:
LLMemoryInfo(); ///< Default constructor
void stream(std::ostream& s) const; ///< output text info to s
U32Kilobytes getPhysicalMemoryKB() const;
U32Kilobytes getPhysicalMemoryKB() const;
#if LL_DARWIN
static U32Kilobytes getHardwareMemSize(); // Because some Mac linkers won't let us reference extern gSysMemory from a different lib.
#endif
//get the available memory infomation in KiloBytes.
static void getAvailableMemoryKB(U32Kilobytes& avail_physical_mem_kb, U32Kilobytes& avail_virtual_mem_kb);

View File

@ -42,6 +42,7 @@
#ifdef LL_WINDOWS
const DWORD MS_VC_EXCEPTION=0x406D1388;
#pragma pack(push,8)
@ -133,6 +134,15 @@ void LLThread::threadRun()
{
#ifdef LL_WINDOWS
set_thread_name(-1, mName.c_str());
#if 0 // probably a bad idea, see usage of SetThreadIdealProcessor in LLWindowWin32)
HANDLE hThread = GetCurrentThread();
if (hThread)
{
SetThreadAffinityMask(hThread, (DWORD_PTR) 0xFFFFFFFFFFFFFFFE);
}
#endif
#endif
LL_PROFILER_SET_THREAD_NAME( mName.c_str() );

View File

@ -30,6 +30,9 @@
#include "u64.h"
#include <chrono>
#include <thread>
#if LL_WINDOWS
# include "llwin32headerslean.h"
#elif LL_LINUX || LL_DARWIN
@ -62,9 +65,18 @@ LLTimer* LLTimer::sTimer = NULL;
//---------------------------------------------------------------------------
#if LL_WINDOWS
#if 0
void ms_sleep(U32 ms)
{
Sleep(ms);
LL_PROFILE_ZONE_SCOPED;
using TimePoint = std::chrono::steady_clock::time_point;
auto resume_time = TimePoint::clock::now() + std::chrono::milliseconds(ms);
while (TimePoint::clock::now() < resume_time)
{
std::this_thread::yield(); //note: don't use LLThread::yield here to avoid yielding for too long
}
}
U32 micro_sleep(U64 us, U32 max_yields)
@ -74,6 +86,35 @@ U32 micro_sleep(U64 us, U32 max_yields)
ms_sleep((U32)(us / 1000));
return 0;
}
#else
U32 micro_sleep(U64 us, U32 max_yields)
{
LL_PROFILE_ZONE_SCOPED
#if 0
LARGE_INTEGER ft;
ft.QuadPart = -static_cast<S64>(us * 10); // '-' using relative time
HANDLE timer = CreateWaitableTimer(NULL, TRUE, NULL);
SetWaitableTimer(timer, &ft, 0, NULL, NULL, 0);
WaitForSingleObject(timer, INFINITE);
CloseHandle(timer);
#else
Sleep(us / 1000);
#endif
return 0;
}
void ms_sleep(U32 ms)
{
LL_PROFILE_ZONE_SCOPED
micro_sleep(ms * 1000, 0);
}
#endif
#elif LL_LINUX || LL_DARWIN
static void _sleep_loop(struct timespec& thiswait)
{

File diff suppressed because it is too large Load Diff

View File

@ -73,6 +73,7 @@ void LLWorkerThread::clearDeleteList()
{
worker->mRequestHandle = LLWorkerThread::nullHandle();
worker->clearFlags(LLWorkerClass::WCF_HAVE_WORK);
worker->clearFlags(LLWorkerClass::WCF_WORKING);
delete worker;
}
mDeleteList.clear() ;
@ -97,6 +98,7 @@ size_t LLWorkerThread::update(F32 max_time_ms)
{
if (worker->getFlags(LLWorkerClass::WCF_WORK_FINISHED))
{
worker->setFlags(LLWorkerClass::WCF_DELETE_REQUESTED);
delete_list.push_back(worker);
mDeleteList.erase(curiter);
}
@ -130,11 +132,11 @@ size_t LLWorkerThread::update(F32 max_time_ms)
//----------------------------------------------------------------------------
LLWorkerThread::handle_t LLWorkerThread::addWorkRequest(LLWorkerClass* workerclass, S32 param, U32 priority)
LLWorkerThread::handle_t LLWorkerThread::addWorkRequest(LLWorkerClass* workerclass, S32 param)
{
handle_t handle = generateHandle();
WorkRequest* req = new WorkRequest(handle, priority, workerclass, param);
WorkRequest* req = new WorkRequest(handle, workerclass, param);
bool res = addRequest(req);
if (!res)
@ -157,8 +159,8 @@ void LLWorkerThread::deleteWorker(LLWorkerClass* workerclass)
//============================================================================
// Runs on its OWN thread
LLWorkerThread::WorkRequest::WorkRequest(handle_t handle, U32 priority, LLWorkerClass* workerclass, S32 param) :
LLQueuedThread::QueuedRequest(handle, priority),
LLWorkerThread::WorkRequest::WorkRequest(handle_t handle, LLWorkerClass* workerclass, S32 param) :
LLQueuedThread::QueuedRequest(handle),
mWorkerClass(workerclass),
mParam(param)
{
@ -177,6 +179,7 @@ void LLWorkerThread::WorkRequest::deleteRequest()
// virtual
bool LLWorkerThread::WorkRequest::processRequest()
{
LL_PROFILE_ZONE_SCOPED;
LLWorkerClass* workerclass = getWorkerClass();
workerclass->setWorking(true);
bool complete = workerclass->doWork(getParam());
@ -187,6 +190,7 @@ bool LLWorkerThread::WorkRequest::processRequest()
// virtual
void LLWorkerThread::WorkRequest::finishRequest(bool completed)
{
LL_PROFILE_ZONE_SCOPED;
LLWorkerClass* workerclass = getWorkerClass();
workerclass->finishWork(getParam(), completed);
U32 flags = LLWorkerClass::WCF_WORK_FINISHED | (completed ? 0 : LLWorkerClass::WCF_WORK_ABORTED);
@ -200,7 +204,6 @@ LLWorkerClass::LLWorkerClass(LLWorkerThread* workerthread, const std::string& na
: mWorkerThread(workerthread),
mWorkerClassName(name),
mRequestHandle(LLWorkerThread::nullHandle()),
mRequestPriority(LLWorkerThread::PRIORITY_NORMAL),
mMutex(),
mWorkFlags(0)
{
@ -289,7 +292,7 @@ bool LLWorkerClass::yield()
//----------------------------------------------------------------------------
// calls startWork, adds doWork() to queue
void LLWorkerClass::addWork(S32 param, U32 priority)
void LLWorkerClass::addWork(S32 param)
{
mMutex.lock();
llassert_always(!(mWorkFlags & (WCF_WORKING|WCF_HAVE_WORK)));
@ -303,7 +306,7 @@ void LLWorkerClass::addWork(S32 param, U32 priority)
startWork(param);
clearFlags(WCF_WORK_FINISHED|WCF_WORK_ABORTED);
setFlags(WCF_HAVE_WORK);
mRequestHandle = mWorkerThread->addWorkRequest(this, param, priority);
mRequestHandle = mWorkerThread->addWorkRequest(this, param);
mMutex.unlock();
}
@ -318,7 +321,6 @@ void LLWorkerClass::abortWork(bool autocomplete)
if (mRequestHandle != LLWorkerThread::nullHandle())
{
mWorkerThread->abortRequest(mRequestHandle, autocomplete);
mWorkerThread->setPriority(mRequestHandle, LLQueuedThread::PRIORITY_IMMEDIATE);
setFlags(WCF_ABORT_REQUESTED);
}
mMutex.unlock();
@ -392,16 +394,5 @@ void LLWorkerClass::scheduleDelete()
}
}
void LLWorkerClass::setPriority(U32 priority)
{
mMutex.lock();
if (mRequestHandle != LLWorkerThread::nullHandle() && mRequestPriority != priority)
{
mRequestPriority = priority;
mWorkerThread->setPriority(mRequestHandle, priority);
}
mMutex.unlock();
}
//============================================================================

View File

@ -56,7 +56,7 @@ public:
virtual ~WorkRequest(); // use deleteRequest()
public:
WorkRequest(handle_t handle, U32 priority, LLWorkerClass* workerclass, S32 param);
WorkRequest(handle_t handle, LLWorkerClass* workerclass, S32 param);
S32 getParam()
{
@ -90,7 +90,7 @@ public:
/*virtual*/ size_t update(F32 max_time_ms);
handle_t addWorkRequest(LLWorkerClass* workerclass, S32 param, U32 priority = PRIORITY_NORMAL);
handle_t addWorkRequest(LLWorkerClass* workerclass, S32 param);
S32 getNumDeletes() { return (S32)mDeleteList.size(); } // debug
@ -151,10 +151,6 @@ public:
bool isWorking() { return getFlags(WCF_WORKING); }
bool wasAborted() { return getFlags(WCF_ABORT_REQUESTED); }
// setPriority(): changes the priority of a request
void setPriority(U32 priority);
U32 getPriority() { return mRequestPriority; }
const std::string& getName() const { return mWorkerClassName; }
protected:
@ -169,7 +165,7 @@ protected:
void setWorkerThread(LLWorkerThread* workerthread);
// addWork(): calls startWork, adds doWork() to queue
void addWork(S32 param, U32 priority = LLWorkerThread::PRIORITY_NORMAL);
void addWork(S32 param);
// abortWork(): requests that work be aborted
void abortWork(bool autocomplete);
@ -193,7 +189,6 @@ protected:
LLWorkerThread* mWorkerThread;
std::string mWorkerClassName;
handle_t mRequestHandle;
U32 mRequestPriority; // last priority set
private:
LLMutex mMutex;

View File

@ -0,0 +1,240 @@
/**
* @file apply_test.cpp
* @author Nat Goodspeed
* @date 2022-12-19
* @brief Test for apply.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
// Precompiled header
#include "linden_common.h"
// associated header
#include "apply.h"
// STL headers
// std headers
#include <iomanip>
// external library headers
// other Linden headers
#include "llsd.h"
#include "llsdutil.h"
#include <array>
#include <string>
#include <vector>
// for ensure_equals
std::ostream& operator<<(std::ostream& out, const std::vector<std::string>& stringvec)
{
const char* delim = "[";
for (const auto& str : stringvec)
{
out << delim << std::quoted(str);
delim = ", ";
}
return out << ']';
}
// the above must be declared BEFORE ensure_equals(std::vector<std::string>)
#include "../test/lltut.h"
/*****************************************************************************
* TUT
*****************************************************************************/
namespace tut
{
namespace statics
{
/*------------------------------ data ------------------------------*/
// Although we're using types from the LLSD namespace, we're not
// constructing LLSD values, but rather instances of the C++ types
// supported by LLSD.
static LLSD::Boolean b{true};
static LLSD::Integer i{17};
static LLSD::Real f{3.14};
static LLSD::String s{ "hello" };
static LLSD::UUID uu{ "baadf00d-dead-beef-baad-feedb0ef" };
static LLSD::Date dt{ "2022-12-19" };
static LLSD::URI uri{ "http://secondlife.com" };
static LLSD::Binary bin{ 0x01, 0x02, 0x03, 0x04, 0x05 };
static std::vector<LLSD::String> quick
{
"The", "quick", "brown", "fox", "etc."
};
static std::array<int, 5> fibs
{
0, 1, 1, 2, 3
};
// ensure that apply() actually reaches the target method --
// lack of ensure_equals() failure could be due to no-op apply()
bool called{ false };
// capture calls from collect()
std::vector<std::string> collected;
/*------------------------- test functions -------------------------*/
void various(LLSD::Boolean b, LLSD::Integer i, LLSD::Real f, const LLSD::String& s,
const LLSD::UUID& uu, const LLSD::Date& dt,
const LLSD::URI& uri, const LLSD::Binary& bin)
{
called = true;
ensure_equals( "b mismatch", b, statics::b);
ensure_equals( "i mismatch", i, statics::i);
ensure_equals( "f mismatch", f, statics::f);
ensure_equals( "s mismatch", s, statics::s);
ensure_equals( "uu mismatch", uu, statics::uu);
ensure_equals( "dt mismatch", dt, statics::dt);
ensure_equals("uri mismatch", uri, statics::uri);
ensure_equals("bin mismatch", bin, statics::bin);
}
void strings(std::string s0, std::string s1, std::string s2, std::string s3, std::string s4)
{
called = true;
ensure_equals("s0 mismatch", s0, statics::quick[0]);
ensure_equals("s1 mismatch", s1, statics::quick[1]);
ensure_equals("s2 mismatch", s2, statics::quick[2]);
ensure_equals("s3 mismatch", s3, statics::quick[3]);
ensure_equals("s4 mismatch", s4, statics::quick[4]);
}
void ints(int i0, int i1, int i2, int i3, int i4)
{
called = true;
ensure_equals("i0 mismatch", i0, statics::fibs[0]);
ensure_equals("i1 mismatch", i1, statics::fibs[1]);
ensure_equals("i2 mismatch", i2, statics::fibs[2]);
ensure_equals("i3 mismatch", i3, statics::fibs[3]);
ensure_equals("i4 mismatch", i4, statics::fibs[4]);
}
void sdfunc(const LLSD& sd)
{
called = true;
ensure_equals("sd mismatch", sd.asInteger(), statics::i);
}
void intfunc(int i)
{
called = true;
ensure_equals("i mismatch", i, statics::i);
}
void voidfunc()
{
called = true;
}
// recursion tail
void collect()
{
called = true;
}
// collect(arbitrary)
template <typename... ARGS>
void collect(const std::string& first, ARGS&&... rest)
{
statics::collected.push_back(first);
collect(std::forward<ARGS>(rest)...);
}
} // namespace statics
struct apply_data
{
apply_data()
{
// reset called before each test
statics::called = false;
statics::collected.clear();
}
};
typedef test_group<apply_data> apply_group;
typedef apply_group::object object;
apply_group applygrp("apply");
template<> template<>
void object::test<1>()
{
set_test_name("apply(tuple)");
LL::apply(statics::various,
std::make_tuple(statics::b, statics::i, statics::f, statics::s,
statics::uu, statics::dt, statics::uri, statics::bin));
ensure("apply(tuple) failed", statics::called);
}
template<> template<>
void object::test<2>()
{
set_test_name("apply(array)");
LL::apply(statics::ints, statics::fibs);
ensure("apply(array) failed", statics::called);
}
template<> template<>
void object::test<3>()
{
set_test_name("apply(vector)");
LL::apply(statics::strings, statics::quick);
ensure("apply(vector) failed", statics::called);
}
// The various apply(LLSD) tests exercise only the success cases because
// the failure cases trigger assert() fail, which is hard to catch.
template<> template<>
void object::test<4>()
{
set_test_name("apply(LLSD())");
LL::apply(statics::voidfunc, LLSD());
ensure("apply(LLSD()) failed", statics::called);
}
template<> template<>
void object::test<5>()
{
set_test_name("apply(fn(int), LLSD scalar)");
LL::apply(statics::intfunc, LLSD(statics::i));
ensure("apply(fn(int), LLSD scalar) failed", statics::called);
}
template<> template<>
void object::test<6>()
{
set_test_name("apply(fn(LLSD), LLSD scalar)");
// This test verifies that LLSDParam<LLSD> doesn't send the compiler
// into infinite recursion when the target is itself LLSD.
LL::apply(statics::sdfunc, LLSD(statics::i));
ensure("apply(fn(LLSD), LLSD scalar) failed", statics::called);
}
template<> template<>
void object::test<7>()
{
set_test_name("apply(LLSD array)");
LL::apply(statics::various,
llsd::array(statics::b, statics::i, statics::f, statics::s,
statics::uu, statics::dt, statics::uri, statics::bin));
ensure("apply(LLSD array) failed", statics::called);
}
template<> template<>
void object::test<8>()
{
set_test_name("VAPPLY()");
// Make a std::array<std::string> from statics::quick. We can't call a
// variadic function with a data structure of dynamic length.
std::array<std::string, 5> strray;
for (size_t i = 0; i < strray.size(); ++i)
strray[i] = statics::quick[i];
// This doesn't work: the compiler doesn't know which overload of
// collect() to pass to LL::apply().
// LL::apply(statics::collect, strray);
// That's what VAPPLY() is for.
VAPPLY(statics::collect, strray);
ensure("VAPPLY() failed", statics::called);
ensure_equals("collected mismatch", statics::collected, statics::quick);
}
} // namespace tut

View File

@ -0,0 +1,136 @@
/**
* @file lazyeventapi_test.cpp
* @author Nat Goodspeed
* @date 2022-06-18
* @brief Test for lazyeventapi.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
// Precompiled header
#include "linden_common.h"
// associated header
#include "lazyeventapi.h"
// STL headers
// std headers
// external library headers
// other Linden headers
#include "../test/lltut.h"
#include "llevents.h"
#include "llsdutil.h"
// observable side effect, solely for testing
static LLSD data;
// LLEventAPI listener subclass
class MyListener: public LLEventAPI
{
public:
// need this trivial forwarding constructor
// (of course do any other initialization your subclass requires)
MyListener(const LL::LazyEventAPIParams& params):
LLEventAPI(params)
{}
// example operation, registered by LazyEventAPI subclass below
void set_data(const LLSD& event)
{
data = event["data"];
}
};
// LazyEventAPI registrar subclass
class MyRegistrar: public LL::LazyEventAPI<MyListener>
{
using super = LL::LazyEventAPI<MyListener>;
using super::listener;
public:
// LazyEventAPI subclass initializes like a classic LLEventAPI subclass
// constructor, with API name and desc plus add() calls for the defined
// operations
MyRegistrar():
super("Test", "This is a test LLEventAPI")
{
add("set", "This is a set operation", &listener::set_data);
}
};
// Normally we'd declare a static instance of MyRegistrar -- but because we
// want to test both with and without, defer declaration to individual test
// methods.
/*****************************************************************************
* TUT
*****************************************************************************/
namespace tut
{
struct lazyeventapi_data
{
lazyeventapi_data()
{
// before every test, reset 'data'
data.clear();
}
~lazyeventapi_data()
{
// after every test, reset LLEventPumps
LLEventPumps::deleteSingleton();
}
};
typedef test_group<lazyeventapi_data> lazyeventapi_group;
typedef lazyeventapi_group::object object;
lazyeventapi_group lazyeventapigrp("lazyeventapi");
template<> template<>
void object::test<1>()
{
set_test_name("LazyEventAPI");
// this is where the magic (should) happen
// 'register' still a keyword until C++17
MyRegistrar regster;
LLEventPumps::instance().obtain("Test").post(llsd::map("op", "set", "data", "hey"));
ensure_equals("failed to set data", data.asString(), "hey");
}
template<> template<>
void object::test<2>()
{
set_test_name("No LazyEventAPI");
// Because the MyRegistrar declaration in test<1>() is local, because
// it has been destroyed, we fully expect NOT to reach a MyListener
// instance with this post.
LLEventPumps::instance().obtain("Test").post(llsd::map("op", "set", "data", "moot"));
ensure("accidentally set data", ! data.isDefined());
}
template<> template<>
void object::test<3>()
{
set_test_name("LazyEventAPI metadata");
MyRegistrar regster;
// Of course we have 'regster' in hand; we don't need to search for
// it. But this next test verifies that we can find (all) LazyEventAPI
// instances using LazyEventAPIBase::instance_snapshot. Normally we
// wouldn't search; normally we'd just look at each instance in the
// loop body.
const MyRegistrar* found = nullptr;
for (const auto& registrar : LL::LazyEventAPIBase::instance_snapshot())
if ((found = dynamic_cast<const MyRegistrar*>(&registrar)))
break;
ensure("Failed to find MyRegistrar via LLInstanceTracker", found);
ensure_equals("wrong API name", found->getName(), "Test");
ensure_contains("wrong API desc", found->getDesc(), "test LLEventAPI");
ensure_equals("wrong API field", found->getDispatchKey(), "op");
// Normally we'd just iterate over *found. But for test purposes,
// actually capture the range of NameDesc pairs in a vector.
std::vector<LL::LazyEventAPIBase::NameDesc> ops{ found->begin(), found->end() };
ensure_equals("failed to find operations", ops.size(), 1);
ensure_equals("wrong operation name", ops[0].first, "set");
ensure_contains("wrong operation desc", ops[0].second, "set operation");
LLSD metadata{ found->getMetadata(ops[0].first) };
ensure_equals("bad metadata name", metadata["name"].asString(), ops[0].first);
ensure_equals("bad metadata desc", metadata["desc"].asString(), ops[0].second);
}
} // namespace tut

View File

@ -18,9 +18,12 @@
// external library headers
// other Linden headers
#include "../test/lltut.h"
#include "lleventfilter.h"
#include "llsd.h"
#include "llsdutil.h"
#include "llevents.h"
#include "stringize.h"
#include "StringVec.h"
#include "tests/wrapllerrs.h"
#include "../test/catch_and_store_what_in.h"
#include "../test/debug.h"
@ -32,8 +35,6 @@
#include <boost/bind.hpp>
#include <boost/function.hpp>
#include <boost/range.hpp>
#include <boost/foreach.hpp>
#define foreach BOOST_FOREACH
#include <boost/lambda/lambda.hpp>
@ -177,6 +178,7 @@ struct Vars
/*-------- Arbitrary-params (non-const, const, static) methods ---------*/
void methodna(NPARAMSa)
{
DEBUG;
// Because our const char* param cp might be NULL, and because we
// intend to capture the value in a std::string, have to distinguish
// between the NULL value and any non-NULL value. Use a convention
@ -188,7 +190,7 @@ struct Vars
else
vcp = std::string("'") + cp + "'";
debug()("methodna(", b,
this->debug()("methodna(", b,
", ", i,
", ", f,
", ", d,
@ -205,7 +207,7 @@ struct Vars
void methodnb(NPARAMSb)
{
std::ostringstream vbin;
foreach(U8 byte, bin)
for (U8 byte: bin)
{
vbin << std::hex << std::setfill('0') << std::setw(2) << unsigned(byte);
}
@ -226,7 +228,8 @@ struct Vars
void cmethodna(NPARAMSa) const
{
debug()('c', NONL);
DEBUG;
this->debug()('c', NONL);
const_cast<Vars*>(this)->methodna(NARGSa);
}
@ -315,6 +318,31 @@ void freenb(NPARAMSb)
*****************************************************************************/
namespace tut
{
void ensure_has(const std::string& outer, const std::string& inner)
{
ensure(stringize("'", outer, "' does not contain '", inner, "'"),
outer.find(inner) != std::string::npos);
}
template <typename CALLABLE>
std::string call_exc(CALLABLE&& func, const std::string& exc_frag)
{
std::string what =
catch_what<LLEventDispatcher::DispatchError>(std::forward<CALLABLE>(func));
ensure_has(what, exc_frag);
return what;
}
template <typename CALLABLE>
void call_logerr(CALLABLE&& func, const std::string& frag)
{
CaptureLog capture;
// the error should be logged; we just need to stop the exception
// propagating
catch_what<LLEventDispatcher::DispatchError>(std::forward<CALLABLE>(func));
capture.messageWith(frag);
}
struct lleventdispatcher_data
{
Debug debug{"test"};
@ -397,9 +425,9 @@ namespace tut
work.add(name, desc, &Dispatcher::cmethod1, required);
// Non-subclass method with/out required params
addf("method1", "method1", &v);
work.add(name, desc, boost::bind(&Vars::method1, boost::ref(v), _1));
work.add(name, desc, [this](const LLSD& args){ return v.method1(args); });
addf("method1_req", "method1", &v);
work.add(name, desc, boost::bind(&Vars::method1, boost::ref(v), _1), required);
work.add(name, desc, [this](const LLSD& args){ return v.method1(args); }, required);
/*--------------- Arbitrary params, array style ----------------*/
@ -461,7 +489,7 @@ namespace tut
debug("dft_array_full:\n",
dft_array_full);
// Partial defaults arrays.
foreach(LLSD::String a, ab)
for (LLSD::String a: ab)
{
LLSD::Integer partition(std::min(partial_offset, dft_array_full[a].size()));
dft_array_partial[a] =
@ -471,7 +499,7 @@ namespace tut
debug("dft_array_partial:\n",
dft_array_partial);
foreach(LLSD::String a, ab)
for(LLSD::String a: ab)
{
// Generate full defaults maps by zipping (params, dft_array_full).
dft_map_full[a] = zipmap(params[a], dft_array_full[a]);
@ -583,6 +611,7 @@ namespace tut
void addf(const std::string& n, const std::string& d, Vars* v)
{
debug("addf('", n, "', '", d, "')");
// This method is to capture in our own DescMap the name and
// description of every registered function, for metadata query
// testing.
@ -598,19 +627,14 @@ namespace tut
{
// Copy descs to a temp map of same type.
DescMap forgotten(descs.begin(), descs.end());
// LLEventDispatcher intentionally provides only const_iterator:
// since dereferencing that iterator generates values on the fly,
// it's meaningless to have a modifiable iterator. But since our
// 'work' object isn't const, by default BOOST_FOREACH() wants to
// use non-const iterators. Persuade it to use the const_iterator.
foreach(LLEventDispatcher::NameDesc nd, const_cast<const Dispatcher&>(work))
for (LLEventDispatcher::NameDesc nd: work)
{
DescMap::iterator found = forgotten.find(nd.first);
ensure(STRINGIZE("LLEventDispatcher records function '" << nd.first
<< "' we didn't enter"),
ensure(stringize("LLEventDispatcher records function '", nd.first,
"' we didn't enter"),
found != forgotten.end());
ensure_equals(STRINGIZE("LLEventDispatcher desc '" << nd.second <<
"' doesn't match what we entered: '" << found->second << "'"),
ensure_equals(stringize("LLEventDispatcher desc '", nd.second,
"' doesn't match what we entered: '", found->second, "'"),
nd.second, found->second);
// found in our map the name from LLEventDispatcher, good, erase
// our map entry
@ -621,41 +645,49 @@ namespace tut
std::ostringstream out;
out << "LLEventDispatcher failed to report";
const char* delim = ": ";
foreach(const DescMap::value_type& fme, forgotten)
for (const DescMap::value_type& fme: forgotten)
{
out << delim << fme.first;
delim = ", ";
}
ensure(out.str(), false);
throw failure(out.str());
}
}
Vars* varsfor(const std::string& name)
{
VarsMap::const_iterator found = funcvars.find(name);
ensure(STRINGIZE("No Vars* for " << name), found != funcvars.end());
ensure(STRINGIZE("NULL Vars* for " << name), found->second);
ensure(stringize("No Vars* for ", name), found != funcvars.end());
ensure(stringize("NULL Vars* for ", name), found->second);
return found->second;
}
void ensure_has(const std::string& outer, const std::string& inner)
std::string call_exc(const std::string& func, const LLSD& args, const std::string& exc_frag)
{
ensure(STRINGIZE("'" << outer << "' does not contain '" << inner << "'").c_str(),
outer.find(inner) != std::string::npos);
return tut::call_exc(
[this, func, args]()
{
if (func.empty())
{
work(args);
}
else
{
work(func, args);
}
},
exc_frag);
}
void call_exc(const std::string& func, const LLSD& args, const std::string& exc_frag)
void call_logerr(const std::string& func, const LLSD& args, const std::string& frag)
{
std::string threw = catch_what<std::runtime_error>([this, &func, &args](){
work(func, args);
});
ensure_has(threw, exc_frag);
tut::call_logerr([this, func, args](){ work(func, args); }, frag);
}
LLSD getMetadata(const std::string& name)
{
LLSD meta(work.getMetadata(name));
ensure(STRINGIZE("No metadata for " << name), meta.isDefined());
ensure(stringize("No metadata for ", name), meta.isDefined());
return meta;
}
@ -724,7 +756,7 @@ namespace tut
set_test_name("map-style registration with non-array params");
// Pass "param names" as scalar or as map
LLSD attempts(llsd::array(17, LLSDMap("pi", 3.14)("two", 2)));
foreach(LLSD ae, inArray(attempts))
for (LLSD ae: inArray(attempts))
{
std::string threw = catch_what<std::exception>([this, &ae](){
work.add("freena_err", "freena", freena, ae);
@ -799,7 +831,7 @@ namespace tut
{
set_test_name("query Callables with/out required params");
LLSD names(llsd::array("free1", "Dmethod1", "Dcmethod1", "method1"));
foreach(LLSD nm, inArray(names))
for (LLSD nm: inArray(names))
{
LLSD metadata(getMetadata(nm));
ensure_equals("name mismatch", metadata["name"], nm);
@ -828,19 +860,19 @@ namespace tut
(5, llsd::array("freena_array", "smethodna_array", "methodna_array")),
llsd::array
(5, llsd::array("freenb_array", "smethodnb_array", "methodnb_array"))));
foreach(LLSD ae, inArray(expected))
for (LLSD ae: inArray(expected))
{
LLSD::Integer arity(ae[0].asInteger());
LLSD names(ae[1]);
LLSD req(LLSD::emptyArray());
if (arity)
req[arity - 1] = LLSD();
foreach(LLSD nm, inArray(names))
for (LLSD nm: inArray(names))
{
LLSD metadata(getMetadata(nm));
ensure_equals("name mismatch", metadata["name"], nm);
ensure_equals(metadata["desc"].asString(), descs[nm]);
ensure_equals(STRINGIZE("mismatched required for " << nm.asString()),
ensure_equals(stringize("mismatched required for ", nm.asString()),
metadata["required"], req);
ensure("should not have optional", metadata["optional"].isUndefined());
}
@ -854,7 +886,7 @@ namespace tut
// - (Free function | non-static method), map style, no params (ergo
// no defaults)
LLSD names(llsd::array("free0_map", "smethod0_map", "method0_map"));
foreach(LLSD nm, inArray(names))
for (LLSD nm: inArray(names))
{
LLSD metadata(getMetadata(nm));
ensure_equals("name mismatch", metadata["name"], nm);
@ -884,7 +916,7 @@ namespace tut
llsd::array("smethodnb_map_adft", "smethodnb_map_mdft"),
llsd::array("methodna_map_adft", "methodna_map_mdft"),
llsd::array("methodnb_map_adft", "methodnb_map_mdft")));
foreach(LLSD eq, inArray(equivalences))
for (LLSD eq: inArray(equivalences))
{
LLSD adft(eq[0]);
LLSD mdft(eq[1]);
@ -898,8 +930,8 @@ namespace tut
ensure_equals("mdft name", mdft, mmeta["name"]);
ameta.erase("name");
mmeta.erase("name");
ensure_equals(STRINGIZE("metadata for " << adft.asString()
<< " vs. " << mdft.asString()),
ensure_equals(stringize("metadata for ", adft.asString(),
" vs. ", mdft.asString()),
ameta, mmeta);
}
}
@ -915,7 +947,7 @@ namespace tut
// params are required. Also maps containing left requirements for
// partial defaults arrays. Also defaults maps from defaults arrays.
LLSD allreq, leftreq, rightdft;
foreach(LLSD::String a, ab)
for (LLSD::String a: ab)
{
// The map in which all params are required uses params[a] as
// keys, with all isUndefined() as values. We can accomplish that
@ -943,9 +975,9 @@ namespace tut
// Generate maps containing parameter names not provided by the
// dft_map_partial maps.
LLSD skipreq(allreq);
foreach(LLSD::String a, ab)
for (LLSD::String a: ab)
{
foreach(const MapEntry& me, inMap(dft_map_partial[a]))
for (const MapEntry& me: inMap(dft_map_partial[a]))
{
skipreq[a].erase(me.first);
}
@ -990,7 +1022,7 @@ namespace tut
(llsd::array("freenb_map_mdft", "smethodnb_map_mdft", "methodnb_map_mdft"),
llsd::array(LLSD::emptyMap(), dft_map_full["b"])))); // required, optional
foreach(LLSD grp, inArray(groups))
for (LLSD grp: inArray(groups))
{
// Internal structure of each group in 'groups':
LLSD names(grp[0]);
@ -1003,14 +1035,14 @@ namespace tut
optional);
// Loop through 'names'
foreach(LLSD nm, inArray(names))
for (LLSD nm: inArray(names))
{
LLSD metadata(getMetadata(nm));
ensure_equals("name mismatch", metadata["name"], nm);
ensure_equals(nm.asString(), metadata["desc"].asString(), descs[nm]);
ensure_equals(STRINGIZE(nm << " required mismatch"),
ensure_equals(stringize(nm, " required mismatch"),
metadata["required"], required);
ensure_equals(STRINGIZE(nm << " optional mismatch"),
ensure_equals(stringize(nm, " optional mismatch"),
metadata["optional"], optional);
}
}
@ -1031,13 +1063,7 @@ namespace tut
{
set_test_name("call with bad name");
call_exc("freek", LLSD(), "not found");
// We don't have a comparable helper function for the one-arg
// operator() method, and it's not worth building one just for this
// case. Write it out.
std::string threw = catch_what<std::runtime_error>([this](){
work(LLSDMap("op", "freek"));
});
ensure_has(threw, "bad");
std::string threw = call_exc("", LLSDMap("op", "freek"), "bad");
ensure_has(threw, "op");
ensure_has(threw, "freek");
}
@ -1079,7 +1105,7 @@ namespace tut
// LLSD value matching 'required' according to llsd_matches() rules.
LLSD matching(LLSDMap("d", 3.14)("array", llsd::array("answer", true, answer)));
// Okay, walk through 'tests'.
foreach(const CallablesTriple& tr, tests)
for (const CallablesTriple& tr: tests)
{
// Should be able to pass 'answer' to Callables registered
// without 'required'.
@ -1087,7 +1113,7 @@ namespace tut
ensure_equals("answer mismatch", tr.llsd, answer);
// Should NOT be able to pass 'answer' to Callables registered
// with 'required'.
call_exc(tr.name_req, answer, "bad request");
call_logerr(tr.name_req, answer, "bad request");
// But SHOULD be able to pass 'matching' to Callables registered
// with 'required'.
work(tr.name_req, matching);
@ -1101,17 +1127,20 @@ namespace tut
set_test_name("passing wrong args to (map | array)-style registrations");
// Pass scalar/map to array-style functions, scalar/array to map-style
// functions. As that validation happens well before we engage the
// argument magic, it seems pointless to repeat this with every
// variation: (free function | non-static method), (no | arbitrary)
// args. We should only need to engage it for one map-style
// registration and one array-style registration.
std::string array_exc("needs an args array");
call_exc("free0_array", 17, array_exc);
call_exc("free0_array", LLSDMap("pi", 3.14), array_exc);
// functions. It seems pointless to repeat this with every variation:
// (free function | non-static method), (no | arbitrary) args. We
// should only need to engage it for one map-style registration and
// one array-style registration.
// Now that LLEventDispatcher has been extended to treat an LLSD
// scalar as a single-entry array, the error we expect in this case is
// that apply() is trying to pass that non-empty array to a nullary
// function.
call_logerr("free0_array", 17, "LL::apply");
// similarly, apply() doesn't accept an LLSD Map
call_logerr("free0_array", LLSDMap("pi", 3.14), "unsupported");
std::string map_exc("needs a map");
call_exc("free0_map", 17, map_exc);
call_logerr("free0_map", 17, map_exc);
// Passing an array to a map-style function works now! No longer an
// error case!
// call_exc("free0_map", llsd::array("a", "b"), map_exc);
@ -1125,7 +1154,7 @@ namespace tut
("free0_array", "free0_map",
"smethod0_array", "smethod0_map",
"method0_array", "method0_map"));
foreach(LLSD name, inArray(names))
for (LLSD name: inArray(names))
{
// Look up the Vars instance for this function.
Vars* vars(varsfor(name));
@ -1150,15 +1179,21 @@ namespace tut
template<> template<>
void object::test<19>()
{
set_test_name("call array-style functions with too-short arrays");
// Could have two different too-short arrays, one for *na and one for
// *nb, but since they both take 5 params...
set_test_name("call array-style functions with wrong-length arrays");
// Could have different wrong-length arrays for *na and for *nb, but
// since they both take 5 params...
LLSD tooshort(llsd::array("this", "array", "too", "short"));
foreach(const LLSD& funcsab, inArray(array_funcs))
LLSD toolong (llsd::array("this", "array", "is", "one", "too", "long"));
LLSD badargs (llsd::array(tooshort, toolong));
for (const LLSD& toosomething: inArray(badargs))
{
foreach(const llsd::MapEntry& e, inMap(funcsab))
for (const LLSD& funcsab: inArray(array_funcs))
{
call_exc(e.second, tooshort, "requires more arguments");
for (const llsd::MapEntry& e: inMap(funcsab))
{
// apply() complains about wrong number of array entries
call_logerr(e.second, toosomething, "LL::apply");
}
}
}
}
@ -1166,7 +1201,7 @@ namespace tut
template<> template<>
void object::test<20>()
{
set_test_name("call array-style functions with (just right | too long) arrays");
set_test_name("call array-style functions with right-size arrays");
std::vector<U8> binary;
for (size_t h(0x01), i(0); i < 5; h+= 0x22, ++i)
{
@ -1178,40 +1213,25 @@ namespace tut
LLDate("2011-02-03T15:07:00Z"),
LLURI("http://secondlife.com"),
binary)));
LLSD argsplus(args);
argsplus["a"].append("bogus");
argsplus["b"].append("bogus");
LLSD expect;
foreach(LLSD::String a, ab)
for (LLSD::String a: ab)
{
expect[a] = zipmap(params[a], args[a]);
}
// Adjust expect["a"]["cp"] for special Vars::cp treatment.
expect["a"]["cp"] = std::string("'") + expect["a"]["cp"].asString() + "'";
expect["a"]["cp"] = stringize("'", expect["a"]["cp"].asString(), "'");
debug("expect: ", expect);
// Use substantially the same logic for args and argsplus
LLSD argsarrays(llsd::array(args, argsplus));
// So i==0 selects 'args', i==1 selects argsplus
for (LLSD::Integer i(0), iend(argsarrays.size()); i < iend; ++i)
for (const LLSD& funcsab: inArray(array_funcs))
{
foreach(const LLSD& funcsab, inArray(array_funcs))
for (LLSD::String a: ab)
{
foreach(LLSD::String a, ab)
{
// Reset the Vars instance before each call
Vars* vars(varsfor(funcsab[a]));
*vars = Vars();
work(funcsab[a], argsarrays[i][a]);
ensure_llsd(STRINGIZE(funcsab[a].asString() <<
": expect[\"" << a << "\"] mismatch"),
vars->inspect(), expect[a], 7); // 7 bits ~= 2 decimal digits
// TODO: in the i==1 or argsplus case, intercept LL_WARNS
// output? Even without that, using argsplus verifies that
// passing too many args isn't fatal; it works -- but
// would be nice to notice the warning too.
}
// Reset the Vars instance before each call
Vars* vars(varsfor(funcsab[a]));
*vars = Vars();
work(funcsab[a], args[a]);
ensure_llsd(stringize(funcsab[a].asString(), ": expect[\"", a, "\"] mismatch"),
vars->inspect(), expect[a], 7); // 7 bits ~= 2 decimal digits
}
}
}
@ -1239,7 +1259,7 @@ namespace tut
("a", llsd::array(false, 255, 98.6, 1024.5, "pointer"))
("b", llsd::array("object", LLUUID::generateNewID(), LLDate::now(), LLURI("http://wiki.lindenlab.com/wiki"), LLSD::Binary(boost::begin(binary), boost::end(binary)))));
LLSD array_overfull(array_full);
foreach(LLSD::String a, ab)
for (LLSD::String a: ab)
{
array_overfull[a].append("bogus");
}
@ -1253,7 +1273,7 @@ namespace tut
ensure_not_equals("UUID collision",
array_full["b"][1].asUUID(), dft_array_full["b"][1].asUUID());
LLSD map_full, map_overfull;
foreach(LLSD::String a, ab)
for (LLSD::String a: ab)
{
map_full[a] = zipmap(params[a], array_full[a]);
map_overfull[a] = map_full[a];
@ -1294,21 +1314,360 @@ namespace tut
"freenb_map_mdft", "smethodnb_map_mdft", "methodnb_map_mdft")));
// Treat (full | overfull) (array | map) the same.
LLSD argssets(llsd::array(array_full, array_overfull, map_full, map_overfull));
foreach(const LLSD& args, inArray(argssets))
for (const LLSD& args: inArray(argssets))
{
foreach(LLSD::String a, ab)
for (LLSD::String a: ab)
{
foreach(LLSD::String name, inArray(names[a]))
for (LLSD::String name: inArray(names[a]))
{
// Reset the Vars instance
Vars* vars(varsfor(name));
*vars = Vars();
work(name, args[a]);
ensure_llsd(STRINGIZE(name << ": expect[\"" << a << "\"] mismatch"),
ensure_llsd(stringize(name, ": expect[\"", a, "\"] mismatch"),
vars->inspect(), expect[a], 7); // 7 bits, 2 decimal digits
// intercept LL_WARNS for the two overfull cases?
}
}
}
}
struct DispatchResult: public LLDispatchListener
{
using DR = DispatchResult;
DispatchResult(): LLDispatchListener("results", "op")
{
add("strfunc", "return string", &DR::strfunc);
add("voidfunc", "void function", &DR::voidfunc);
add("emptyfunc", "return empty LLSD", &DR::emptyfunc);
add("intfunc", "return Integer LLSD", &DR::intfunc);
add("llsdfunc", "return passed LLSD", &DR::llsdfunc);
add("mapfunc", "return map LLSD", &DR::mapfunc);
add("arrayfunc", "return array LLSD", &DR::arrayfunc);
}
std::string strfunc(const std::string& str) const { return "got " + str; }
void voidfunc() const {}
LLSD emptyfunc() const { return {}; }
int intfunc(int i) const { return -i; }
LLSD llsdfunc(const LLSD& event) const
{
LLSD result{ event };
result["with"] = "string";
return result;
}
LLSD mapfunc(int i, const std::string& str) const
{
return llsd::map("i", intfunc(i), "str", strfunc(str));
}
LLSD arrayfunc(int i, const std::string& str) const
{
return llsd::array(intfunc(i), strfunc(str));
}
};
template<> template<>
void object::test<23>()
{
set_test_name("string result");
DispatchResult service;
LLSD result{ service("strfunc", "a string") };
ensure_equals("strfunc() mismatch", result.asString(), "got a string");
}
template<> template<>
void object::test<24>()
{
set_test_name("void result");
DispatchResult service;
LLSD result{ service("voidfunc", LLSD()) };
ensure("voidfunc() returned defined", result.isUndefined());
}
template<> template<>
void object::test<25>()
{
set_test_name("Integer result");
DispatchResult service;
LLSD result{ service("intfunc", -17) };
ensure_equals("intfunc() mismatch", result.asInteger(), 17);
}
template<> template<>
void object::test<26>()
{
set_test_name("LLSD echo");
DispatchResult service;
LLSD result{ service("llsdfunc", llsd::map("op", "llsdfunc", "reqid", 17)) };
ensure_equals("llsdfunc() mismatch", result,
llsd::map("op", "llsdfunc", "reqid", 17, "with", "string"));
}
template<> template<>
void object::test<27>()
{
set_test_name("map LLSD result");
DispatchResult service;
LLSD result{ service("mapfunc", llsd::array(-12, "value")) };
ensure_equals("mapfunc() mismatch", result, llsd::map("i", 12, "str", "got value"));
}
template<> template<>
void object::test<28>()
{
set_test_name("array LLSD result");
DispatchResult service;
LLSD result{ service("arrayfunc", llsd::array(-8, "word")) };
ensure_equals("arrayfunc() mismatch", result, llsd::array(8, "got word"));
}
template<> template<>
void object::test<29>()
{
set_test_name("listener error, no reply");
DispatchResult service;
tut::call_exc(
[&service]()
{ service.post(llsd::map("op", "nosuchfunc", "reqid", 17)); },
"nosuchfunc");
}
template<> template<>
void object::test<30>()
{
set_test_name("listener error with reply");
DispatchResult service;
LLCaptureListener<LLSD> result;
service.post(llsd::map("op", "nosuchfunc", "reqid", 17, "reply", result.getName()));
LLSD reply{ result.get() };
ensure("no reply", reply.isDefined());
ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
ensure_has(reply["error"].asString(), "nosuchfunc");
}
template<> template<>
void object::test<31>()
{
set_test_name("listener call to void function");
DispatchResult service;
LLCaptureListener<LLSD> result;
result.set("non-empty");
for (const auto& func: StringVec{ "voidfunc", "emptyfunc" })
{
service.post(llsd::map(
"op", func,
"reqid", 17,
"reply", result.getName()));
ensure_equals("reply from " + func, result.get().asString(), "non-empty");
}
}
template<> template<>
void object::test<32>()
{
set_test_name("listener call to string function");
DispatchResult service;
LLCaptureListener<LLSD> result;
service.post(llsd::map(
"op", "strfunc",
"args", llsd::array("a string"),
"reqid", 17,
"reply", result.getName()));
LLSD reply{ result.get() };
ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
ensure_equals("bad reply from strfunc", reply["data"].asString(), "got a string");
}
template<> template<>
void object::test<33>()
{
set_test_name("listener call to map function");
DispatchResult service;
LLCaptureListener<LLSD> result;
service.post(llsd::map(
"op", "mapfunc",
"args", llsd::array(-7, "value"),
"reqid", 17,
"reply", result.getName()));
LLSD reply{ result.get() };
ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
ensure_equals("bad i from mapfunc", reply["i"].asInteger(), 7);
ensure_equals("bad str from mapfunc", reply["str"], "got value");
}
template<> template<>
void object::test<34>()
{
set_test_name("batched map success");
DispatchResult service;
LLCaptureListener<LLSD> result;
service.post(llsd::map(
"op", llsd::map(
"strfunc", "some string",
"intfunc", 2,
"voidfunc", LLSD(),
"arrayfunc", llsd::array(-5, "other string")),
"reqid", 17,
"reply", result.getName()));
LLSD reply{ result.get() };
ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
reply.erase("reqid");
ensure_equals(
"bad map batch",
reply,
llsd::map(
"strfunc", "got some string",
"intfunc", -2,
"voidfunc", LLSD(),
"arrayfunc", llsd::array(5, "got other string")));
}
template<> template<>
void object::test<35>()
{
set_test_name("batched map error");
DispatchResult service;
LLCaptureListener<LLSD> result;
service.post(llsd::map(
"op", llsd::map(
"badfunc", 34, // !
"strfunc", "some string",
"intfunc", 2,
"missing", LLSD(), // !
"voidfunc", LLSD(),
"arrayfunc", llsd::array(-5, "other string")),
"reqid", 17,
"reply", result.getName()));
LLSD reply{ result.get() };
ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
reply.erase("reqid");
auto error{ reply["error"].asString() };
reply.erase("error");
ensure_has(error, "badfunc");
ensure_has(error, "missing");
ensure_equals(
"bad partial batch",
reply,
llsd::map(
"strfunc", "got some string",
"intfunc", -2,
"voidfunc", LLSD(),
"arrayfunc", llsd::array(5, "got other string")));
}
template<> template<>
void object::test<36>()
{
set_test_name("batched map exception");
DispatchResult service;
auto error = tut::call_exc(
[&service]()
{
service.post(llsd::map(
"op", llsd::map(
"badfunc", 34, // !
"strfunc", "some string",
"intfunc", 2,
"missing", LLSD(), // !
"voidfunc", LLSD(),
"arrayfunc", llsd::array(-5, "other string")),
"reqid", 17));
// no "reply"
},
"badfunc");
ensure_has(error, "missing");
}
template<> template<>
void object::test<37>()
{
set_test_name("batched array success");
DispatchResult service;
LLCaptureListener<LLSD> result;
service.post(llsd::map(
"op", llsd::array(
llsd::array("strfunc", "some string"),
llsd::array("intfunc", 2),
"arrayfunc",
"voidfunc"),
"args", llsd::array(
LLSD(),
LLSD(),
llsd::array(-5, "other string")),
// args array deliberately short, since the default
// [3] is undefined, which should work for voidfunc
"reqid", 17,
"reply", result.getName()));
LLSD reply{ result.get() };
ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
reply.erase("reqid");
ensure_equals(
"bad array batch",
reply,
llsd::map(
"data", llsd::array(
"got some string",
-2,
llsd::array(5, "got other string"),
LLSD())));
}
template<> template<>
void object::test<38>()
{
set_test_name("batched array error");
DispatchResult service;
LLCaptureListener<LLSD> result;
service.post(llsd::map(
"op", llsd::array(
llsd::array("strfunc", "some string"),
llsd::array("intfunc", 2, "whoops"), // bad form
"arrayfunc",
"voidfunc"),
"args", llsd::array(
LLSD(),
LLSD(),
llsd::array(-5, "other string")),
// args array deliberately short, since the default
// [3] is undefined, which should work for voidfunc
"reqid", 17,
"reply", result.getName()));
LLSD reply{ result.get() };
ensure_equals("reqid not echoed", reply["reqid"].asInteger(), 17);
reply.erase("reqid");
auto error{ reply["error"] };
reply.erase("error");
ensure_has(error, "[1]");
ensure_has(error, "unsupported");
ensure_equals("bad array batch", reply,
llsd::map("data", llsd::array("got some string")));
}
template<> template<>
void object::test<39>()
{
set_test_name("batched array exception");
DispatchResult service;
auto error = tut::call_exc(
[&service]()
{
service.post(llsd::map(
"op", llsd::array(
llsd::array("strfunc", "some string"),
llsd::array("intfunc", 2, "whoops"), // bad form
"arrayfunc",
"voidfunc"),
"args", llsd::array(
LLSD(),
LLSD(),
llsd::array(-5, "other string")),
// args array deliberately short, since the default
// [3] is undefined, which should work for voidfunc
"reqid", 17));
// no "reply"
},
"[1]");
ensure_has(error, "unsupported");
}
} // namespace tut

View File

@ -17,8 +17,6 @@
// std headers
#include <functional>
// external library headers
#include <boost/assign/list_of.hpp>
#include <boost/phoenix/core/argument.hpp>
// other Linden headers
#include "../test/lltut.h"
#include "../test/namedtempfile.h"
@ -30,10 +28,6 @@
#include "stringize.h"
#include "StringVec.h"
using boost::assign::list_of;
StringVec sv(const StringVec& listof) { return listof; }
#if defined(LL_WINDOWS)
#define sleep(secs) _sleep((secs) * 1000)
@ -104,17 +98,12 @@ namespace tut
llleap_data():
reader(".py",
// This logic is adapted from vita.viewerclient.receiveEvent()
boost::phoenix::placeholders::arg1 <<
[](std::ostream& out){ out <<
"import re\n"
"import os\n"
"import sys\n"
"\n"
"try:\n"
// new freestanding llsd package
" import llsd\n"
"except ImportError:\n"
// older llbase.llsd module
" from llbase import llsd\n"
"import llsd\n"
"\n"
"class ProtocolError(Exception):\n"
" def __init__(self, msg, data):\n"
@ -193,7 +182,7 @@ namespace tut
"def request(pump, data):\n"
" # we expect 'data' is a dict\n"
" data['reply'] = _reply\n"
" send(pump, data)\n"),
" send(pump, data)\n";}),
// Get the actual pathname of the NamedExtTempFile and trim off
// the ".py" extension. (We could cache reader.getName() in a
// separate member variable, but I happen to know getName() just
@ -218,14 +207,14 @@ namespace tut
void object::test<1>()
{
set_test_name("multiple LLLeap instances");
NamedTempFile script("py",
"import time\n"
"time.sleep(1)\n");
NamedExtTempFile script("py",
"import time\n"
"time.sleep(1)\n");
LLLeapVector instances;
instances.push_back(LLLeap::create(get_test_name(),
sv(list_of(PYTHON)(script.getName())))->getWeak());
StringVec{PYTHON, script.getName()})->getWeak());
instances.push_back(LLLeap::create(get_test_name(),
sv(list_of(PYTHON)(script.getName())))->getWeak());
StringVec{PYTHON, script.getName()})->getWeak());
// In this case we're simply establishing that two LLLeap instances
// can coexist without throwing exceptions or bombing in any other
// way. Wait for them to terminate.
@ -236,10 +225,10 @@ namespace tut
void object::test<2>()
{
set_test_name("stderr to log");
NamedTempFile script("py",
"import sys\n"
"sys.stderr.write('''Hello from Python!\n"
"note partial line''')\n");
NamedExtTempFile script("py",
"import sys\n"
"sys.stderr.write('''Hello from Python!\n"
"note partial line''')\n");
StringVec vcommand{ PYTHON, script.getName() };
CaptureLog log(LLError::LEVEL_INFO);
waitfor(LLLeap::create(get_test_name(), vcommand));
@ -251,11 +240,11 @@ namespace tut
void object::test<3>()
{
set_test_name("bad stdout protocol");
NamedTempFile script("py",
"print('Hello from Python!')\n");
NamedExtTempFile script("py",
"print('Hello from Python!')\n");
CaptureLog log(LLError::LEVEL_WARN);
waitfor(LLLeap::create(get_test_name(),
sv(list_of(PYTHON)(script.getName()))));
StringVec{PYTHON, script.getName()}));
ensure_contains("error log line",
log.messageWith("invalid protocol"), "Hello from Python!");
}
@ -264,13 +253,13 @@ namespace tut
void object::test<4>()
{
set_test_name("leftover stdout");
NamedTempFile script("py",
"import sys\n"
// note lack of newline
"sys.stdout.write('Hello from Python!')\n");
NamedExtTempFile script("py",
"import sys\n"
// note lack of newline
"sys.stdout.write('Hello from Python!')\n");
CaptureLog log(LLError::LEVEL_WARN);
waitfor(LLLeap::create(get_test_name(),
sv(list_of(PYTHON)(script.getName()))));
StringVec{PYTHON, script.getName()}));
ensure_contains("error log line",
log.messageWith("Discarding"), "Hello from Python!");
}
@ -279,12 +268,12 @@ namespace tut
void object::test<5>()
{
set_test_name("bad stdout len prefix");
NamedTempFile script("py",
"import sys\n"
"sys.stdout.write('5a2:something')\n");
NamedExtTempFile script("py",
"import sys\n"
"sys.stdout.write('5a2:something')\n");
CaptureLog log(LLError::LEVEL_WARN);
waitfor(LLLeap::create(get_test_name(),
sv(list_of(PYTHON)(script.getName()))));
StringVec{PYTHON, script.getName()}));
ensure_contains("error log line",
log.messageWith("invalid protocol"), "5a2:");
}
@ -386,17 +375,18 @@ namespace tut
set_test_name("round trip");
AckAPI api;
Result result;
NamedTempFile script("py",
boost::phoenix::placeholders::arg1 <<
"from " << reader_module << " import *\n"
// make a request on our little API
"request(pump='" << api.getName() << "', data={})\n"
// wait for its response
"resp = get()\n"
"result = '' if resp == dict(pump=replypump(), data='ack')\\\n"
" else 'bad: ' + str(resp)\n"
"send(pump='" << result.getName() << "', data=result)\n");
waitfor(LLLeap::create(get_test_name(), sv(list_of(PYTHON)(script.getName()))));
NamedExtTempFile script("py",
[&](std::ostream& out){ out <<
"from " << reader_module << " import *\n"
// make a request on our little API
"request(pump='" << api.getName() << "', data={})\n"
// wait for its response
"resp = get()\n"
"result = '' if resp == dict(pump=replypump(), data='ack')\\\n"
" else 'bad: ' + str(resp)\n"
"send(pump='" << result.getName() << "', data=result)\n";});
waitfor(LLLeap::create(get_test_name(),
StringVec{PYTHON, script.getName()}));
result.ensure();
}
@ -424,38 +414,38 @@ namespace tut
// iterations etc. in OS pipes and the LLLeap/LLProcess implementation.
ReqIDAPI api;
Result result;
NamedTempFile script("py",
boost::phoenix::placeholders::arg1 <<
"import sys\n"
"from " << reader_module << " import *\n"
// Note that since reader imports llsd, this
// 'import *' gets us llsd too.
"sample = llsd.format_notation(dict(pump='" <<
api.getName() << "', data=dict(reqid=999999, reply=replypump())))\n"
// The whole packet has length prefix too: "len:data"
"samplen = len(str(len(sample))) + 1 + len(sample)\n"
// guess how many messages it will take to
// accumulate BUFFERED_LENGTH
"count = int(" << BUFFERED_LENGTH << "/samplen)\n"
"print('Sending %s requests' % count, file=sys.stderr)\n"
"for i in range(count):\n"
" request('" << api.getName() << "', dict(reqid=i))\n"
// The assumption in this specific test that
// replies will arrive in the same order as
// requests is ONLY valid because the API we're
// invoking sends replies instantly. If the API
// had to wait for some external event before
// sending its reply, replies could arrive in
// arbitrary order, and we'd have to tick them
// off from a set.
"result = ''\n"
"for i in range(count):\n"
" resp = get()\n"
" if resp['data']['reqid'] != i:\n"
" result = 'expected reqid=%s in %s' % (i, resp)\n"
" break\n"
"send(pump='" << result.getName() << "', data=result)\n");
waitfor(LLLeap::create(get_test_name(), sv(list_of(PYTHON)(script.getName()))),
NamedExtTempFile script("py",
[&](std::ostream& out){ out <<
"import sys\n"
"from " << reader_module << " import *\n"
// Note that since reader imports llsd, this
// 'import *' gets us llsd too.
"sample = llsd.format_notation(dict(pump='" <<
api.getName() << "', data=dict(reqid=999999, reply=replypump())))\n"
// The whole packet has length prefix too: "len:data"
"samplen = len(str(len(sample))) + 1 + len(sample)\n"
// guess how many messages it will take to
// accumulate BUFFERED_LENGTH
"count = int(" << BUFFERED_LENGTH << "/samplen)\n"
"print('Sending %s requests' % count, file=sys.stderr)\n"
"for i in range(count):\n"
" request('" << api.getName() << "', dict(reqid=i))\n"
// The assumption in this specific test that
// replies will arrive in the same order as
// requests is ONLY valid because the API we're
// invoking sends replies instantly. If the API
// had to wait for some external event before
// sending its reply, replies could arrive in
// arbitrary order, and we'd have to tick them
// off from a set.
"result = ''\n"
"for i in range(count):\n"
" resp = get()\n"
" if resp['data']['reqid'] != i:\n"
" result = 'expected reqid=%s in %s' % (i, resp)\n"
" break\n"
"send(pump='" << result.getName() << "', data=result)\n";});
waitfor(LLLeap::create(get_test_name(), StringVec{PYTHON, script.getName()}),
300); // needs more realtime than most tests
result.ensure();
}
@ -467,65 +457,62 @@ namespace tut
{
ReqIDAPI api;
Result result;
NamedTempFile script("py",
boost::phoenix::placeholders::arg1 <<
"import sys\n"
"from " << reader_module << " import *\n"
// Generate a very large string value.
"desired = int(sys.argv[1])\n"
// 7 chars per item: 6 digits, 1 comma
"count = int((desired - 50)/7)\n"
"large = ''.join('%06d,' % i for i in range(count))\n"
// Pass 'large' as reqid because we know the API
// will echo reqid, and we want to receive it back.
"request('" << api.getName() << "', dict(reqid=large))\n"
"try:\n"
" resp = get()\n"
"except ParseError as e:\n"
" # try to find where e.data diverges from expectation\n"
// Normally we'd expect a 'pump' key in there,
// too, with value replypump(). But Python
// serializes keys in a different order than C++,
// so incoming data start with 'data'.
// Truthfully, though, if we get as far as 'pump'
// before we find a difference, something's very
// strange.
" expect = llsd.format_notation(dict(data=dict(reqid=large)))\n"
" chunk = 40\n"
" for offset in range(0, max(len(e.data), len(expect)), chunk):\n"
" if e.data[offset:offset+chunk] != \\\n"
" expect[offset:offset+chunk]:\n"
" print('Offset %06d: expect %r,\\n'\\\n"
" ' get %r' %\\\n"
" (offset,\n"
" expect[offset:offset+chunk],\n"
" e.data[offset:offset+chunk]),\n"
" file=sys.stderr)\n"
" break\n"
" else:\n"
" print('incoming data matches expect?!', file=sys.stderr)\n"
" send('" << result.getName() << "', '%s: %s' % (e.__class__.__name__, e))\n"
" sys.exit(1)\n"
"\n"
"echoed = resp['data']['reqid']\n"
"if echoed == large:\n"
" send('" << result.getName() << "', '')\n"
" sys.exit(0)\n"
// Here we know echoed did NOT match; try to find where
"for i in range(count):\n"
" start = 7*i\n"
" end = 7*(i+1)\n"
" if end > len(echoed)\\\n"
" or echoed[start:end] != large[start:end]:\n"
" send('" << result.getName() << "',\n"
" 'at offset %s, expected %r but got %r' %\n"
" (start, large[start:end], echoed[start:end]))\n"
"sys.exit(1)\n");
NamedExtTempFile script("py",
[&](std::ostream& out){ out <<
"import sys\n"
"from " << reader_module << " import *\n"
// Generate a very large string value.
"desired = int(sys.argv[1])\n"
// 7 chars per item: 6 digits, 1 comma
"count = int((desired - 50)/7)\n"
"large = ''.join('%06d,' % i for i in range(count))\n"
// Pass 'large' as reqid because we know the API
// will echo reqid, and we want to receive it back.
"request('" << api.getName() << "', dict(reqid=large))\n"
"try:\n"
" resp = get()\n"
"except ParseError as e:\n"
" # try to find where e.data diverges from expectation\n"
// Normally we'd expect a 'pump' key in there,
// too, with value replypump(). But Python
// serializes keys in a different order than C++,
// so incoming data start with 'data'.
// Truthfully, though, if we get as far as 'pump'
// before we find a difference, something's very
// strange.
" expect = llsd.format_notation(dict(data=dict(reqid=large)))\n"
" chunk = 40\n"
" for offset in range(0, max(len(e.data), len(expect)), chunk):\n"
" if e.data[offset:offset+chunk] != \\\n"
" expect[offset:offset+chunk]:\n"
" print('Offset %06d: expect %r,\\n'\\\n"
" ' get %r' %\\\n"
" (offset,\n"
" expect[offset:offset+chunk],\n"
" e.data[offset:offset+chunk]),\n"
" file=sys.stderr)\n"
" break\n"
" else:\n"
" print('incoming data matches expect?!', file=sys.stderr)\n"
" send('" << result.getName() << "', '%s: %s' % (e.__class__.__name__, e))\n"
" sys.exit(1)\n"
"\n"
"echoed = resp['data']['reqid']\n"
"if echoed == large:\n"
" send('" << result.getName() << "', '')\n"
" sys.exit(0)\n"
// Here we know echoed did NOT match; try to find where
"for i in range(count):\n"
" start = 7*i\n"
" end = 7*(i+1)\n"
" if end > len(echoed)\\\n"
" or echoed[start:end] != large[start:end]:\n"
" send('" << result.getName() << "',\n"
" 'at offset %s, expected %r but got %r' %\n"
" (start, large[start:end], echoed[start:end]))\n"
"sys.exit(1)\n";});
waitfor(LLLeap::create(test_name,
sv(list_of
(PYTHON)
(script.getName())
(stringize(size)))),
StringVec{PYTHON, script.getName(), stringize(size)}),
180); // try a longer timeout
result.ensure();
}

View File

@ -151,8 +151,38 @@ struct PythonProcessLauncher
/// Launch Python script; verify that it launched
void launch()
{
mPy = LLProcess::create(mParams);
tut::ensure(STRINGIZE("Couldn't launch " << mDesc << " script"), bool(mPy));
try
{
mPy = LLProcess::create(mParams);
tut::ensure(STRINGIZE("Couldn't launch " << mDesc << " script"), bool(mPy));
}
catch (const tut::failure&)
{
// On Windows, if APR_LOG is set, our version of APR's
// apr_create_proc() logs to the specified file. If this test
// failed, try to report that log.
const char* APR_LOG = getenv("APR_LOG");
if (APR_LOG && *APR_LOG)
{
std::ifstream inf(APR_LOG);
if (! inf.is_open())
{
LL_WARNS() << "Couldn't open '" << APR_LOG << "'" << LL_ENDL;
}
else
{
LL_WARNS() << "==============================" << LL_ENDL;
LL_WARNS() << "From '" << APR_LOG << "':" << LL_ENDL;
std::string line;
while (std::getline(inf, line))
{
LL_WARNS() << line << LL_ENDL;
}
LL_WARNS() << "==============================" << LL_ENDL;
}
}
throw;
}
}
/// Run Python script and wait for it to complete.
@ -191,7 +221,7 @@ struct PythonProcessLauncher
LLProcess::Params mParams;
LLProcessPtr mPy;
std::string mDesc;
NamedTempFile mScript;
NamedExtTempFile mScript;
};
/// convenience function for PythonProcessLauncher::run()
@ -214,30 +244,26 @@ static std::string python_out(const std::string& desc, const CONTENT& script)
class NamedTempDir: public boost::noncopyable
{
public:
// Use python() function to create a temp directory: I've found
// nothing in either Boost.Filesystem or APR quite like Python's
// tempfile.mkdtemp().
// Special extra bonus: on Mac, mkdtemp() reports a pathname
// starting with /var/folders/something, whereas that's really a
// symlink to /private/var/folders/something. Have to use
// realpath() to compare properly.
NamedTempDir():
mPath(python_out("mkdtemp()",
"from __future__ import with_statement\n"
"import os.path, sys, tempfile\n"
"with open(sys.argv[1], 'w') as f:\n"
" f.write(os.path.normcase(os.path.normpath(os.path.realpath(tempfile.mkdtemp()))))\n"))
{}
mPath(NamedTempFile::temp_path()),
mCreated(boost::filesystem::create_directories(mPath))
{
mPath = boost::filesystem::canonical(mPath);
}
~NamedTempDir()
{
aprchk(apr_dir_remove(mPath.c_str(), gAPRPoolp));
if (mCreated)
{
boost::filesystem::remove_all(mPath);
}
}
std::string getName() const { return mPath; }
std::string getName() const { return mPath.string(); }
private:
std::string mPath;
boost::filesystem::path mPath;
bool mCreated;
};
/*****************************************************************************
@ -355,7 +381,7 @@ namespace tut
set_test_name("raw APR nonblocking I/O");
// Create a script file in a temporary place.
NamedTempFile script("py",
NamedExtTempFile script("py",
"from __future__ import print_function" EOL
"import sys" EOL
"import time" EOL
@ -565,7 +591,13 @@ namespace tut
" f.write(os.path.normcase(os.path.normpath(os.getcwd())))\n");
// Before running, call setWorkingDirectory()
py.mParams.cwd = tempdir.getName();
ensure_equals("os.getcwd()", py.run_read(), tempdir.getName());
std::string expected{ tempdir.getName() };
#if LL_WINDOWS
// SIGH, don't get tripped up by "C:" != "c:" --
// but on the Mac, using tolower() fails because "/users" != "/Users"!
expected = utf8str_tolower(expected);
#endif
ensure_equals("os.getcwd()", py.run_read(), expected);
}
template<> template<>

View File

@ -29,7 +29,23 @@
#include "../test/lltut.h"
#include "../llrand.h"
#include "stringize.h"
// In llrand.h, every function is documented to return less than the high end
// -- specifically, because you can pass a negative extent, they're documented
// never to return a value equal to the extent.
// So that we don't need two different versions of ensure_in_range(), when
// testing extent < 0, negate the return value and the extent before passing
// into ensure_in_range().
template <typename NUMBER>
void ensure_in_range(const std::string_view& name,
NUMBER value, NUMBER low, NUMBER high)
{
auto failmsg{ stringize(name, " >= ", low, " (", value, ')') };
tut::ensure(failmsg, (value >= low));
failmsg = stringize(name, " < ", high, " (", value, ')');
tut::ensure(failmsg, (value < high));
}
namespace tut
{
@ -44,84 +60,65 @@ namespace tut
template<> template<>
void random_object_t::test<1>()
{
F32 number = 0.0f;
for(S32 ii = 0; ii < 100000; ++ii)
{
number = ll_frand();
ensure("frand >= 0", (number >= 0.0f));
ensure("frand < 1", (number < 1.0f));
ensure_in_range("frand", ll_frand(), 0.0f, 1.0f);
}
}
template<> template<>
void random_object_t::test<2>()
{
F64 number = 0.0f;
for(S32 ii = 0; ii < 100000; ++ii)
{
number = ll_drand();
ensure("drand >= 0", (number >= 0.0));
ensure("drand < 1", (number < 1.0));
ensure_in_range("drand", ll_drand(), 0.0, 1.0);
}
}
template<> template<>
void random_object_t::test<3>()
{
F32 number = 0.0f;
for(S32 ii = 0; ii < 100000; ++ii)
{
number = ll_frand(2.0f) - 1.0f;
ensure("frand >= 0", (number >= -1.0f));
ensure("frand < 1", (number <= 1.0f));
ensure_in_range("frand(2.0f)", ll_frand(2.0f) - 1.0f, -1.0f, 1.0f);
}
}
template<> template<>
void random_object_t::test<4>()
{
F32 number = 0.0f;
for(S32 ii = 0; ii < 100000; ++ii)
{
number = ll_frand(-7.0);
ensure("drand <= 0", (number <= 0.0));
ensure("drand > -7", (number > -7.0));
// Negate the result so we don't have to allow a templated low-end
// comparison as well.
ensure_in_range("-frand(-7.0)", -ll_frand(-7.0), 0.0f, 7.0f);
}
}
template<> template<>
void random_object_t::test<5>()
{
F64 number = 0.0f;
for(S32 ii = 0; ii < 100000; ++ii)
{
number = ll_drand(-2.0);
ensure("drand <= 0", (number <= 0.0));
ensure("drand > -2", (number > -2.0));
ensure_in_range("-drand(-2.0)", -ll_drand(-2.0), 0.0, 2.0);
}
}
template<> template<>
void random_object_t::test<6>()
{
S32 number = 0;
for(S32 ii = 0; ii < 100000; ++ii)
{
number = ll_rand(100);
ensure("rand >= 0", (number >= 0));
ensure("rand < 100", (number < 100));
ensure_in_range("rand(100)", ll_rand(100), 0, 100);
}
}
template<> template<>
void random_object_t::test<7>()
{
S32 number = 0;
for(S32 ii = 0; ii < 100000; ++ii)
{
number = ll_rand(-127);
ensure("rand <= 0", (number <= 0));
ensure("rand > -127", (number > -127));
ensure_in_range("-rand(-127)", -ll_rand(-127), 0, 127);
}
}
}

View File

@ -45,11 +45,6 @@ typedef U32 uint32_t;
#endif
#include "boost/range.hpp"
#include "boost/foreach.hpp"
#include "boost/bind.hpp"
#include "boost/phoenix/bind/bind_function.hpp"
#include "boost/phoenix/core/argument.hpp"
using namespace boost::phoenix;
#include "llsd.h"
#include "llsdserialize.h"
@ -57,9 +52,11 @@ using namespace boost::phoenix;
#include "llformat.h"
#include "llmemorystream.h"
#include "../test/hexdump.h"
#include "../test/lltut.h"
#include "../test/namedtempfile.h"
#include "stringize.h"
#include "StringVec.h"
#include <functional>
typedef std::function<void(const LLSD& data, std::ostream& str)> FormatterFunction;
@ -1796,16 +1793,12 @@ namespace tut
// helper for TestPythonCompatible
static std::string import_llsd("import os.path\n"
"import sys\n"
"try:\n"
// new freestanding llsd package
" import llsd\n"
"except ImportError:\n"
// older llbase.llsd module
" from llbase import llsd\n");
"import llsd\n");
// helper for TestPythonCompatible
template <typename CONTENT>
void python(const std::string& desc, const CONTENT& script, int expect=0)
template <typename CONTENT, typename... ARGS>
void python_expect(const std::string& desc, const CONTENT& script, int expect=0,
ARGS&&... args)
{
auto PYTHON(LLStringUtil::getenv("PYTHON"));
ensure("Set $PYTHON to the Python interpreter", !PYTHON.empty());
@ -1816,7 +1809,8 @@ namespace tut
std::string q("\"");
std::string qPYTHON(q + PYTHON + q);
std::string qscript(q + scriptfile.getName() + q);
int rc = _spawnl(_P_WAIT, PYTHON.c_str(), qPYTHON.c_str(), qscript.c_str(), NULL);
int rc = _spawnl(_P_WAIT, PYTHON.c_str(), qPYTHON.c_str(), qscript.c_str(),
std::forward<ARGS>(args)..., NULL);
if (rc == -1)
{
char buffer[256];
@ -1832,6 +1826,10 @@ namespace tut
LLProcess::Params params;
params.executable = PYTHON;
params.args.add(scriptfile.getName());
for (const std::string& arg : StringVec{ std::forward<ARGS>(args)... })
{
params.args.add(arg);
}
LLProcessPtr py(LLProcess::create(params));
ensure(STRINGIZE("Couldn't launch " << desc << " script"), bool(py));
// Implementing timeout would mean messing with alarm() and
@ -1866,6 +1864,14 @@ namespace tut
#endif
}
// helper for TestPythonCompatible
template <typename CONTENT, typename... ARGS>
void python(const std::string& desc, const CONTENT& script, ARGS&&... args)
{
// plain python() expects rc 0
python_expect(desc, script, 0, std::forward<ARGS>(args)...);
}
struct TestPythonCompatible
{
TestPythonCompatible() {}
@ -1880,10 +1886,10 @@ namespace tut
void TestPythonCompatibleObject::test<1>()
{
set_test_name("verify python()");
python("hello",
"import sys\n"
"sys.exit(17)\n",
17); // expect nonzero rc
python_expect("hello",
"import sys\n"
"sys.exit(17)\n",
17); // expect nonzero rc
}
template<> template<>
@ -1899,7 +1905,7 @@ namespace tut
static void writeLLSDArray(const FormatterFunction& serialize,
std::ostream& out, const LLSD& array)
{
for (const LLSD& item : llsd::inArray(array))
for (const LLSD& item: llsd::inArray(array))
{
// It's important to delimit the entries in this file somehow
// because, although Python's llsd.parse() can accept a file
@ -1914,7 +1920,14 @@ namespace tut
auto buffstr{ buffer.str() };
int bufflen{ static_cast<int>(buffstr.length()) };
out.write(reinterpret_cast<const char*>(&bufflen), sizeof(bufflen));
LL_DEBUGS() << "Wrote length: "
<< hexdump(reinterpret_cast<const char*>(&bufflen),
sizeof(bufflen))
<< LL_ENDL;
out.write(buffstr.c_str(), buffstr.length());
LL_DEBUGS() << "Wrote data: "
<< hexmix(buffstr.c_str(), buffstr.length())
<< LL_ENDL;
}
}
@ -1943,10 +1956,10 @@ namespace tut
" else:\n"
" raise AssertionError('Too many data items')\n";
// Create an llsdXXXXXX file containing 'data' serialized to
// notation.
// Create an llsdXXXXXX file containing 'data' serialized per
// FormatterFunction.
NamedTempFile file("llsd",
// NamedTempFile's boost::function constructor
// NamedTempFile's function constructor
// takes a callable. To this callable it passes the
// std::ostream with which it's writing the
// NamedTempFile.
@ -1954,34 +1967,50 @@ namespace tut
(std::ostream& out)
{ writeLLSDArray(serialize, out, cdata); });
python("read C++ " + desc,
placeholders::arg1 <<
import_llsd <<
"from functools import partial\n"
"import io\n"
"import struct\n"
"lenformat = struct.Struct('i')\n"
"def parse_each(inf):\n"
" for rawlen in iter(partial(inf.read, lenformat.size), b''):\n"
" len = lenformat.unpack(rawlen)[0]\n"
// Since llsd.parse() has no max_bytes argument, instead of
// passing the input stream directly to parse(), read the item
// into a distinct bytes object and parse that.
" data = inf.read(len)\n"
" try:\n"
" frombytes = llsd.parse(data)\n"
" except llsd.LLSDParseError as err:\n"
" print(f'*** {err}')\n"
" print(f'Bad content:\\n{data!r}')\n"
" raise\n"
// Also try parsing from a distinct stream.
" stream = io.BytesIO(data)\n"
" fromstream = llsd.parse(stream)\n"
" assert frombytes == fromstream\n"
" yield frombytes\n"
<< pydata <<
// Don't forget raw-string syntax for Windows pathnames.
"verify(parse_each(open(r'" << file.getName() << "', 'rb')))\n");
// 'debug' starts empty because it's intended as an output file
NamedTempFile debug("debug", "");
try
{
python("read C++ " + desc,
[&](std::ostream& out){ out <<
import_llsd <<
"from functools import partial\n"
"import io\n"
"import struct\n"
"lenformat = struct.Struct('i')\n"
"def parse_each(inf):\n"
" for rawlen in iter(partial(inf.read, lenformat.size), b''):\n"
" print('Read length:', ''.join(('%02x' % b) for b in rawlen),\n"
" file=debug)\n"
" len = lenformat.unpack(rawlen)[0]\n"
// Since llsd.parse() has no max_bytes argument, instead of
// passing the input stream directly to parse(), read the item
// into a distinct bytes object and parse that.
" data = inf.read(len)\n"
" print('Read data: ', repr(data), file=debug)\n"
" try:\n"
" frombytes = llsd.parse(data)\n"
" except llsd.LLSDParseError as err:\n"
" print(f'*** {err}')\n"
" print(f'Bad content:\\n{data!r}')\n"
" raise\n"
// Also try parsing from a distinct stream.
" stream = io.BytesIO(data)\n"
" fromstream = llsd.parse(stream)\n"
" assert frombytes == fromstream\n"
" yield frombytes\n"
<< pydata <<
// Don't forget raw-string syntax for Windows pathnames.
"debug = open(r'" << debug.getName() << "', 'w')\n"
"verify(parse_each(open(r'" << file.getName() << "', 'rb')))\n";});
}
catch (const failure&)
{
LL_DEBUGS() << "Script debug output:" << LL_ENDL;
debug.peep_log();
throw;
}
}
template<> template<>
@ -2068,7 +2097,7 @@ namespace tut
NamedTempFile file("llsd", "");
python("Python " + pyformatter,
placeholders::arg1 <<
[&](std::ostream& out){ out <<
import_llsd <<
"import struct\n"
"lenformat = struct.Struct('i')\n"
@ -2086,7 +2115,7 @@ namespace tut
" for item in DATA:\n"
" serialized = llsd." << pyformatter << "(item)\n"
" f.write(lenformat.pack(len(serialized)))\n"
" f.write(serialized)\n");
" f.write(serialized)\n";});
std::ifstream inf(file.getName().c_str());
LLSD item;

View File

@ -38,7 +38,7 @@ namespace tut
{
struct workqueue_data
{
WorkQueue queue{"queue"};
WorkSchedule queue{"queue"};
};
typedef test_group<workqueue_data> workqueue_group;
typedef workqueue_group::object object;
@ -49,8 +49,8 @@ namespace tut
{
set_test_name("name");
ensure_equals("didn't capture name", queue.getKey(), "queue");
ensure("not findable", WorkQueue::getInstance("queue") == queue.getWeak().lock());
WorkQueue q2;
ensure("not findable", WorkSchedule::getInstance("queue") == queue.getWeak().lock());
WorkSchedule q2;
ensure("has no name", LLStringUtil::startsWith(q2.getKey(), "WorkQueue"));
}
@ -73,17 +73,21 @@ namespace tut
{
set_test_name("postEvery");
// record of runs
using Shared = std::deque<WorkQueue::TimePoint>;
using Shared = std::deque<WorkSchedule::TimePoint>;
// This is an example of how to share data between the originator of
// postEvery(work) and the work item itself, since usually a WorkQueue
// postEvery(work) and the work item itself, since usually a WorkSchedule
// is used to dispatch work to a different thread. Neither of them
// should call any of LLCond's wait methods: you don't want to stall
// either the worker thread or the originating thread (conventionally
// main). Use LLCond or a subclass even if all you want to do is
// signal the work item that it can quit; consider LLOneShotCond.
LLCond<Shared> data;
auto start = WorkQueue::TimePoint::clock::now();
auto interval = 100ms;
auto start = WorkSchedule::TimePoint::clock::now();
// 2s seems like a long time to wait, since it directly impacts the
// duration of this test program. Unfortunately GitHub's Mac runners
// are pretty wimpy, and we're getting spurious "too late" errors just
// because the thread doesn't wake up as soon as we want.
auto interval = 2s;
queue.postEvery(
interval,
[&data, count = 0]
@ -93,7 +97,7 @@ namespace tut
data.update_one(
[](Shared& data)
{
data.push_back(WorkQueue::TimePoint::clock::now());
data.push_back(WorkSchedule::TimePoint::clock::now());
});
// by the 3rd call, return false to stop
return (++count < 3);
@ -102,7 +106,7 @@ namespace tut
// postEvery() running, so run until we have exhausted the iterations
// or we time out waiting
for (auto finish = start + 10*interval;
WorkQueue::TimePoint::clock::now() < finish &&
WorkSchedule::TimePoint::clock::now() < finish &&
data.get([](const Shared& data){ return data.size(); }) < 3; )
{
queue.runPending();
@ -139,8 +143,8 @@ namespace tut
void object::test<4>()
{
set_test_name("postTo");
WorkQueue main("main");
auto qptr = WorkQueue::getInstance("queue");
WorkSchedule main("main");
auto qptr = WorkSchedule::getInstance("queue");
int result = 0;
main.postTo(
qptr,
@ -171,8 +175,8 @@ namespace tut
void object::test<5>()
{
set_test_name("postTo with void return");
WorkQueue main("main");
auto qptr = WorkQueue::getInstance("queue");
WorkSchedule main("main");
auto qptr = WorkSchedule::getInstance("queue");
std::string observe;
main.postTo(
qptr,
@ -194,7 +198,7 @@ namespace tut
std::string stored;
// Try to call waitForResult() on this thread's main coroutine. It
// should throw because the main coroutine must service the queue.
auto what{ catch_what<WorkQueue::Error>(
auto what{ catch_what<WorkSchedule::Error>(
[this, &stored](){ stored = queue.waitForResult(
[](){ return "should throw"; }); }) };
ensure("lambda should not have run", stored.empty());

View File

@ -226,6 +226,11 @@ public:
return boost::dynamic_pointer_cast<CaptureLogRecorder>(mRecorder)->streamto(out);
}
friend inline std::ostream& operator<<(std::ostream& out, const CaptureLog& self)
{
return self.streamto(out);
}
private:
LLError::FatalFunction mFatalFunction;
LLError::SettingsStoragePtr mOldSettings;

View File

@ -17,18 +17,58 @@
// std headers
// external library headers
// other Linden headers
#include "commoncontrol.h"
#include "llerror.h"
#include "llevents.h"
#include "llsd.h"
#include "stringize.h"
LL::ThreadPool::ThreadPool(const std::string& name, size_t threads, size_t capacity):
#include <boost/fiber/algo/round_robin.hpp>
/*****************************************************************************
* Custom fiber scheduler for worker threads
*****************************************************************************/
// As of 2022-12-06, each of our worker threads only runs a single (default)
// fiber: we don't launch explicit fibers within worker threads, nor do we
// anticipate doing so. So a worker thread that's simply waiting for incoming
// tasks should really sleep a little. Override the default fiber scheduler to
// implement that.
struct sleepy_robin: public boost::fibers::algo::round_robin
{
virtual void suspend_until( std::chrono::steady_clock::time_point const&) noexcept
{
#if LL_WINDOWS
// round_robin holds a std::condition_variable, and
// round_robin::suspend_until() calls
// std::condition_variable::wait_until(). On Windows, that call seems
// busier than it ought to be. Try just sleeping.
Sleep(1);
#else
// currently unused other than windows, but might as well have something here
// different units than Sleep(), but we actually just want to sleep for any de-minimis duration
usleep(1);
#endif
}
virtual void notify() noexcept
{
// Since our Sleep() call above will wake up on its own, we need not
// take any special action to wake it.
}
};
/*****************************************************************************
* ThreadPoolBase
*****************************************************************************/
LL::ThreadPoolBase::ThreadPoolBase(const std::string& name, size_t threads,
WorkQueueBase* queue):
super(name),
mQueue(name, capacity),
mName("ThreadPool:" + name),
mThreadCount(threads)
mThreadCount(getConfiguredWidth(name, threads)),
mQueue(queue)
{}
void LL::ThreadPool::start()
void LL::ThreadPoolBase::start()
{
for (size_t i = 0; i < mThreadCount; ++i)
{
@ -36,6 +76,7 @@ void LL::ThreadPool::start()
mThreads.emplace_back(tname, [this, tname]()
{
LL_PROFILER_SET_THREAD_NAME(tname.c_str());
LL_INFOS("THREAD") << "Started thread " << tname << LL_ENDL;
run(tname);
});
}
@ -56,17 +97,17 @@ void LL::ThreadPool::start()
});
}
LL::ThreadPool::~ThreadPool()
LL::ThreadPoolBase::~ThreadPoolBase()
{
close();
}
void LL::ThreadPool::close()
void LL::ThreadPoolBase::close()
{
if (! mQueue.isClosed())
if (! mQueue->isClosed())
{
LL_DEBUGS("ThreadPool") << mName << " closing queue and joining threads" << LL_ENDL;
mQueue.close();
mQueue->close();
for (auto& pair: mThreads)
{
LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL;
@ -76,14 +117,74 @@ void LL::ThreadPool::close()
}
}
void LL::ThreadPool::run(const std::string& name)
void LL::ThreadPoolBase::run(const std::string& name)
{
#if LL_WINDOWS
// Try using sleepy_robin fiber scheduler.
boost::fibers::use_scheduling_algorithm<sleepy_robin>();
#endif // LL_WINDOWS
LL_DEBUGS("ThreadPool") << name << " starting" << LL_ENDL;
run();
LL_DEBUGS("ThreadPool") << name << " stopping" << LL_ENDL;
}
void LL::ThreadPool::run()
void LL::ThreadPoolBase::run()
{
mQueue.runUntilClose();
mQueue->runUntilClose();
}
//static
size_t LL::ThreadPoolBase::getConfiguredWidth(const std::string& name, size_t dft)
{
LLSD poolSizes;
try
{
poolSizes = LL::CommonControl::get("Global", "ThreadPoolSizes");
// "ThreadPoolSizes" is actually a map containing the sizes of
// interest -- or should be, if this process has an
// LLViewerControlListener instance and its settings include
// "ThreadPoolSizes". If we failed to retrieve it, perhaps we're in a
// program that doesn't define that, or perhaps there's no such
// setting, or perhaps we're asking too early, before the LLEventAPI
// itself has been instantiated. In any of those cases, it seems worth
// warning.
if (! poolSizes.isDefined())
{
// Note: we don't warn about absence of an override key for a
// particular ThreadPool name, that's fine. This warning is about
// complete absence of a ThreadPoolSizes setting, which we expect
// in a normal viewer session.
LL_WARNS("ThreadPool") << "No 'ThreadPoolSizes' setting for ThreadPool '"
<< name << "'" << LL_ENDL;
}
}
catch (const LL::CommonControl::Error& exc)
{
// We don't want ThreadPool to *require* LLViewerControlListener.
// Just log it and carry on.
LL_WARNS("ThreadPool") << "Can't check 'ThreadPoolSizes': " << exc.what() << LL_ENDL;
}
LL_DEBUGS("ThreadPool") << "ThreadPoolSizes = " << poolSizes << LL_ENDL;
// LLSD treats an undefined value as an empty map when asked to retrieve a
// key, so we don't need this to be conditional.
LLSD sizeSpec{ poolSizes[name] };
// We retrieve sizeSpec as LLSD, rather than immediately as LLSD::Integer,
// so we can distinguish the case when it's undefined.
return sizeSpec.isInteger() ? sizeSpec.asInteger() : dft;
}
//static
size_t LL::ThreadPoolBase::getWidth(const std::string& name, size_t dft)
{
auto instance{ getInstance(name) };
if (instance)
{
return instance->getWidth();
}
else
{
return getConfiguredWidth(name, dft);
}
}

View File

@ -13,7 +13,9 @@
#if ! defined(LL_THREADPOOL_H)
#define LL_THREADPOOL_H
#include "threadpool_fwd.h"
#include "workqueue.h"
#include <memory> // std::unique_ptr
#include <string>
#include <thread>
#include <utility> // std::pair
@ -22,17 +24,24 @@
namespace LL
{
class ThreadPool: public LLInstanceTracker<ThreadPool, std::string>
class ThreadPoolBase: public LLInstanceTracker<ThreadPoolBase, std::string>
{
private:
using super = LLInstanceTracker<ThreadPool, std::string>;
using super = LLInstanceTracker<ThreadPoolBase, std::string>;
public:
/**
* Pass ThreadPool a string name. This can be used to look up the
* Pass ThreadPoolBase a string name. This can be used to look up the
* relevant WorkQueue.
*
* The number of threads you pass sets the compile-time default. But
* if the user has overridden the LLSD map in the "ThreadPoolSizes"
* setting with a key matching this ThreadPool name, that setting
* overrides this parameter.
*/
ThreadPool(const std::string& name, size_t threads=1, size_t capacity=1024);
virtual ~ThreadPool();
ThreadPoolBase(const std::string& name, size_t threads,
WorkQueueBase* queue);
virtual ~ThreadPoolBase();
/**
* Launch the ThreadPool. Until this call, a constructed ThreadPool
@ -53,8 +62,6 @@ namespace LL
std::string getName() const { return mName; }
size_t getWidth() const { return mThreads.size(); }
/// obtain a non-const reference to the WorkQueue to post work to it
WorkQueue& getQueue() { return mQueue; }
/**
* Override run() if you need special processing. The default run()
@ -62,16 +69,73 @@ namespace LL
*/
virtual void run();
/**
* getConfiguredWidth() returns the setting, if any, for the specified
* ThreadPool name. Returns dft if the "ThreadPoolSizes" map does not
* contain the specified name.
*/
static
size_t getConfiguredWidth(const std::string& name, size_t dft=0);
/**
* This getWidth() returns the width of the instantiated ThreadPool
* with the specified name, if any. If no instance exists, returns its
* getConfiguredWidth() if any. If there's no instance and no relevant
* override, return dft. Presumably dft should match the threads
* parameter passed to the ThreadPool constructor call that will
* eventually instantiate the ThreadPool with that name.
*/
static
size_t getWidth(const std::string& name, size_t dft);
protected:
std::unique_ptr<WorkQueueBase> mQueue;
private:
void run(const std::string& name);
protected: // <FS:Beq/> [FIRE-32453][BUG-232971] Improve shutdown behaviour.
WorkQueue mQueue;
std::string mName;
size_t mThreadCount;
std::vector<std::pair<std::string, std::thread>> mThreads;
};
/**
* Specialize with WorkQueue or, for timestamped tasks, WorkSchedule
*/
template <class QUEUE>
struct ThreadPoolUsing: public ThreadPoolBase
{
using queue_t = QUEUE;
/**
* Pass ThreadPoolUsing a string name. This can be used to look up the
* relevant WorkQueue.
*
* The number of threads you pass sets the compile-time default. But
* if the user has overridden the LLSD map in the "ThreadPoolSizes"
* setting with a key matching this ThreadPool name, that setting
* overrides this parameter.
*
* Pass an explicit capacity to limit the size of the queue.
* Constraining the queue can cause a submitter to block. Do not
* constrain any ThreadPool accepting work from the main thread.
*/
ThreadPoolUsing(const std::string& name, size_t threads=1, size_t capacity=1024*1024):
ThreadPoolBase(name, threads, new queue_t(name, capacity))
{}
~ThreadPoolUsing() override {}
/**
* obtain a non-const reference to the specific WorkQueue subclass to
* post work to it
*/
queue_t& getQueue() { return static_cast<queue_t&>(*mQueue); }
};
/// ThreadPool is shorthand for using the simpler WorkQueue
using ThreadPool = ThreadPoolUsing<WorkQueue>;
} // namespace LL
#endif /* ! defined(LL_THREADPOOL_H) */

Some files were not shown because too many files have changed in this diff Show More