Merge branch 'main' into DRTVWR-489

master
Alexander Gavriliuk 2023-11-30 17:47:58 +01:00
commit c9cd5631e4
825 changed files with 49846 additions and 44606 deletions

18
.github/release.yaml vendored Normal file
View File

@ -0,0 +1,18 @@
changelog:
exclude:
labels:
- ignore-for-release
authors:
- dependabot
categories:
- title: Breaking Changes 🛠
labels:
- semver-major
- breaking-change
- title: New Features 🎉
labels:
- semver-minor
- enhancement
- title: Other Changes
labels:
- '*'

View File

@ -4,54 +4,93 @@ on:
workflow_dispatch: workflow_dispatch:
pull_request: pull_request:
push: push:
branches: [main, contribute] branches: ["*"]
tags: ["*"] tags: ["*"]
jobs: jobs:
build: build:
strategy: strategy:
matrix: matrix:
runner: [windows-large] runner: [windows-large, macos-12-xl]
configuration: [ReleaseOS] configuration: [Release, ReleaseOS]
addrsize: [64] python-version: ["3.11"]
include: include:
- runner: windows-large - runner: macos-12-xl
developer_dir: "/Applications/Xcode_14.0.1.app/Contents/Developer"
exclude:
- runner: macos-12-xl
configuration: ReleaseOS configuration: ReleaseOS
addrsize: 32
runs-on: ${{ matrix.runner }} runs-on: ${{ matrix.runner }}
outputs:
viewer_channel: ${{ steps.build.outputs.viewer_channel }}
viewer_version: ${{ steps.build.outputs.viewer_version }}
imagename: ${{ steps.build.outputs.imagename }}
env: env:
AUTOBUILD_ADDRSIZE: 64
AUTOBUILD_BUILD_ID: ${{ github.run_id }}
AUTOBUILD_CONFIGURATION: ${{ matrix.configuration }} AUTOBUILD_CONFIGURATION: ${{ matrix.configuration }}
AUTOBUILD_ADDRSIZE: ${{ matrix.addrsize }} # authorizes fetching private constituent packages
AUTOBUILD_GITHUB_TOKEN: ${{ secrets.SHARED_AUTOBUILD_GITHUB_TOKEN }}
AUTOBUILD_INSTALLABLE_CACHE: ${{ github.workspace }}/.autobuild-installables AUTOBUILD_INSTALLABLE_CACHE: ${{ github.workspace }}/.autobuild-installables
AUTOBUILD_VARIABLES_FILE: ${{ github.workspace }}/.build-variables/variables AUTOBUILD_VARIABLES_FILE: ${{ github.workspace }}/.build-variables/variables
AUTOBUILD_VSVER: "170" # vs2k22 AUTOBUILD_VSVER: "170"
LOGFAIL: debug # Show details when tests fail DEVELOPER_DIR: ${{ matrix.developer_dir }}
# Ensure that Linden viewer builds engage Bugsplat.
BUGSPLAT_DB: ${{ matrix.configuration != 'ReleaseOS' && 'SecondLife_Viewer_2018' || '' }}
BUGSPLAT_PASS: ${{ secrets.BUGSPLAT_PASS }}
BUGSPLAT_USER: ${{ secrets.BUGSPLAT_USER }}
build_coverity: false
build_log_dir: ${{ github.workspace }}/.logs
build_viewer: true
BUILDSCRIPTS_SHARED: ${{ github.workspace }}/.shared
# extracted and committed to viewer repo
BUILDSCRIPTS_SUPPORT_FUNCTIONS: ${{ github.workspace }}/buildscripts_support_functions
GIT_REF: ${{ github.head_ref || github.ref }} GIT_REF: ${{ github.head_ref || github.ref }}
LL_SKIP_REQUIRE_SYSROOT: 1
# Setting this variable directs Linden's TUT test driver code to capture
# test-program log output at the specified level, but to display it only if
# the individual test fails.
LOGFAIL: DEBUG
master_message_template_checkout: ${{ github.workspace }}/.master-message-template
# Only set variants to the one configuration: don't let build.sh loop
# over variants, let GitHub distribute variants over multiple hosts.
variants: ${{ matrix.configuration }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
ref: ${{ github.event.pull_request.head.sha || github.sha }} ref: ${{ github.event.pull_request.head.sha || github.sha }}
- name: Setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Checkout build variables - name: Checkout build variables
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
repository: secondlife/build-variables repository: secondlife/build-variables
ref: viewer ref: viewer
path: .build-variables path: .build-variables
- name: Checkout master-message-template
uses: actions/checkout@v4
with:
repository: secondlife/master-message-template
path: .master-message-template
- name: Install autobuild and python dependencies - name: Install autobuild and python dependencies
run: pip3 install autobuild llbase run: pip3 install autobuild llsd
- name: Cache autobuild packages - name: Cache autobuild packages
uses: actions/cache@v3 uses: actions/cache@v3
id: cache-installables id: cache-installables
with: with:
path: .autobuild-installables path: .autobuild-installables
key: ${{ runner.os }}-${{ matrix.addrsize }}-${{ matrix.configuration }}-${{ hashFiles('autobuild.xml') }} key: ${{ runner.os }}-64-${{ matrix.configuration }}-${{ hashFiles('autobuild.xml') }}
restore-keys: | restore-keys: |
${{ runner.os }}-${{ matrix.addrsize }}-${{ matrix.configuration }}- ${{ runner.os }}-64-${{ matrix.configuration }}-
${{ runner.os }}-${{ matrix.addrsize }}- ${{ runner.os }}-64-
- name: Install windows dependencies - name: Install windows dependencies
if: runner.os == 'Windows' if: runner.os == 'Windows'
@ -63,31 +102,266 @@ jobs:
env: env:
RUNNER_OS: ${{ runner.os }} RUNNER_OS: ${{ runner.os }}
run: | run: |
# set up things the viewer's build.sh script expects
set -x
mkdir -p "$build_log_dir"
mkdir -p "$BUILDSCRIPTS_SHARED/packages/lib/python"
source "$BUILDSCRIPTS_SUPPORT_FUNCTIONS"
if [[ "$OSTYPE" =~ cygwin|msys ]]
then
native_path() { cygpath --windows "$1"; }
shell_path() { cygpath --unix "$1"; }
else
native_path() { echo "$1"; }
shell_path() { echo "$1"; }
fi
finalize()
{
case "$1" in
true|0)
record_success "Build Succeeded"
;;
*)
record_failure "Build Failed with $1"
;;
esac
}
initialize_build()
{
echo "initialize_build"
}
initialize_version()
{
export revision="$AUTOBUILD_BUILD_ID"
}
python_cmd()
{
if [[ "x${1:0:1}" == "x-" ]] # -m, -c, etc.
then # if $1 is a switch, don't try to twiddle paths
"$(shell_path "$PYTHON_COMMAND")" "$@"
elif [[ "$(basename "$1")" == "codeticket.py" ]]
then # ignore any attempt to contact codeticket
echo "## $@"
else # running a script at an explicit path: fix path for Python
local script="$1"
shift
"$(shell_path "$PYTHON_COMMAND")" "$(native_path "$script")" "$@"
fi
}
repo_branch()
{
git -C "$1" branch | grep '^* ' | cut -c 3-
}
record_dependencies_graph()
{
echo "TODO: generate and post dependency graph"
}
# Since we're not uploading to codeticket, DO NOT sleep for minutes.
sleep()
{
echo "Not sleeping for $1 seconds"
}
export -f native_path shell_path finalize initialize_build initialize_version
export -f python_cmd repo_branch record_dependencies_graph sleep
## Useful for diagnosing Windows LLProcess/LLLeap test failures
##export APR_LOG="${RUNNER_TEMP}/apr.log"
export arch=$(uname | cut -b-6)
# Surprise! GH Windows runner's MINGW6 is a $arch value we've never
# seen before, so numerous tests don't know about it.
[[ "$arch" == "MINGW6" ]] && arch=CYGWIN
export AUTOBUILD="$(which autobuild)"
# Build with a tag like "Second_Life_Project_Shiny#abcdef0" to get a
# viewer channel "Second Life Project Shiny" (ignoring "#hash",
# needed to disambiguate tags).
if [[ "$GITHUB_REF_TYPE" == "tag" && "${GITHUB_REF_NAME:0:12}" == "Second_Life_" ]]
then viewer_channel="${GITHUB_REF_NAME%#*}"
export viewer_channel="${viewer_channel//_/ }"
else export viewer_channel="Second Life Test"
fi
echo "viewer_channel=$viewer_channel" >> "$GITHUB_OUTPUT"
# On windows we need to point the build to the correct python # On windows we need to point the build to the correct python
# as neither CMake's FindPython nor our custom Python.cmake module # as neither CMake's FindPython nor our custom Python.cmake module
# will resolve the correct interpreter location. # will resolve the correct interpreter location.
if [[ "$RUNNER_OS" == "Windows" ]]; then if [[ "$RUNNER_OS" == "Windows" ]]; then
export PYTHON="$(cygpath -m "$(which python)")" export PYTHON="$(native_path "$(which python)")"
echo "Python location: $PYTHON" echo "Python location: $PYTHON"
export PYTHON_COMMAND="$PYTHON"
else
export PYTHON_COMMAND="python3"
fi fi
export PYTHON_COMMAND_NATIVE="$(native_path "$PYTHON_COMMAND")"
autobuild configure -- -DVIEWER_CHANNEL="Second Life Test ${GIT_REF##*/}"
autobuild build --no-configure
# Find artifacts ./build.sh
if [[ "$RUNNER_OS" == "Windows" ]]; then
installer_path=$(find ./build-*/newview/ | grep '_Setup\.exe') # Each artifact is downloaded as a distinct .zip file. Multiple jobs
installer_name="$(basename $installer_path)" # (per the matrix above) writing the same filepath to the same
elif [[ "$RUNNER_OS" == "macOS" ]]; then # artifact name will *overwrite* that file. Moreover, they can
installer_path=$(find ./build-*/newview/ | grep '\.dmg') # interfere with each other, causing the upload to fail.
installer_name="$(basename $installer_path)" # https://github.com/actions/upload-artifact#uploading-to-the-same-artifact
# Given the size of our installers, and the fact that we typically
# only want to download just one instead of a single zip containing
# several, generate a distinct artifact name for each installer.
# If the matrix above can run multiple builds on the same
# platform, we must disambiguate on more than the platform name.
# e.g. if we were still running Windows 32-bit builds, we'd need to
# qualify the artifact with bit width.
if [[ "$AUTOBUILD_CONFIGURATION" == "ReleaseOS" ]]
then cfg_suffix='OS'
else cfg_suffix=''
fi fi
echo "artifact=$RUNNER_OS$cfg_suffix" >> $GITHUB_OUTPUT
echo "installer_path=$installer_path" >> $GITHUB_OUTPUT - name: Upload executable
echo "installer_name=$installer_name" >> $GITHUB_OUTPUT if: matrix.configuration != 'ReleaseOS' && steps.build.outputs.viewer_app
- name: Upload installer
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v3
with: with:
name: ${{ steps.build.outputs.installer_name }} name: "${{ steps.build.outputs.artifact }}-app"
path: ${{ steps.build.outputs.installer_path }} path: |
${{ steps.build.outputs.viewer_app }}
# The other upload of nontrivial size is the symbol file. Use a distinct
# artifact for that too.
- name: Upload symbol file
if: matrix.configuration != 'ReleaseOS'
uses: actions/upload-artifact@v3
with:
name: "${{ steps.build.outputs.artifact }}-symbols"
path: |
${{ steps.build.outputs.symbolfile }}
- name: Upload metadata
if: matrix.configuration != 'ReleaseOS'
uses: actions/upload-artifact@v3
with:
name: "${{ steps.build.outputs.artifact }}-metadata"
# emitted by build.sh, possibly multiple lines
path: |
${{ steps.build.outputs.metadata }}
- name: Upload physics package
uses: actions/upload-artifact@v3
# should only be set for viewer-private
if: matrix.configuration != 'ReleaseOS' && steps.build.outputs.physicstpv
with:
name: "${{ steps.build.outputs.artifact }}-physics"
# emitted by build.sh, zero or one lines
path: |
${{ steps.build.outputs.physicstpv }}
sign-and-package-windows:
needs: build
runs-on: windows
steps:
- name: Sign and package Windows viewer
uses: secondlife/viewer-build-util/sign-pkg-windows@main
with:
vault_uri: "${{ secrets.AZURE_KEY_VAULT_URI }}"
cert_name: "${{ secrets.AZURE_CERT_NAME }}"
client_id: "${{ secrets.AZURE_CLIENT_ID }}"
client_secret: "${{ secrets.AZURE_CLIENT_SECRET }}"
tenant_id: "${{ secrets.AZURE_TENANT_ID }}"
sign-and-package-mac:
needs: build
runs-on: macos-latest
steps:
- name: Unpack Mac notarization credentials
id: note-creds
shell: bash
run: |
# In NOTARIZE_CREDS_MACOS we expect to find:
# USERNAME="..."
# PASSWORD="..."
# TEAM_ID="..."
eval "${{ secrets.NOTARIZE_CREDS_MACOS }}"
echo "::add-mask::$USERNAME"
echo "::add-mask::$PASSWORD"
echo "::add-mask::$TEAM_ID"
echo "note_user=$USERNAME" >> "$GITHUB_OUTPUT"
echo "note_pass=$PASSWORD" >> "$GITHUB_OUTPUT"
echo "note_team=$TEAM_ID" >> "$GITHUB_OUTPUT"
# If we didn't manage to retrieve all of these credentials, better
# find out sooner than later.
[[ -n "$USERNAME" && -n "$PASSWORD" && -n "$TEAM_ID" ]]
- name: Sign and package Mac viewer
uses: secondlife/viewer-build-util/sign-pkg-mac@main
with:
channel: ${{ needs.build.outputs.viewer_channel }}
imagename: ${{ needs.build.outputs.imagename }}
cert_base64: ${{ secrets.SIGNING_CERT_MACOS }}
cert_name: ${{ secrets.SIGNING_CERT_MACOS_IDENTITY }}
cert_pass: ${{ secrets.SIGNING_CERT_MACOS_PASSWORD }}
note_user: ${{ steps.note-creds.outputs.note_user }}
note_pass: ${{ steps.note-creds.outputs.note_pass }}
note_team: ${{ steps.note-creds.outputs.note_team }}
post-windows-symbols:
needs: build
runs-on: ubuntu-latest
steps:
- name: Post Windows symbols
uses: secondlife/viewer-build-util/post-bugsplat-windows@main
with:
username: ${{ secrets.BUGSPLAT_USER }}
password: ${{ secrets.BUGSPLAT_PASS }}
database: "SecondLife_Viewer_2018"
channel: ${{ needs.build.outputs.viewer_channel }}
version: ${{ needs.build.outputs.viewer_version }}
post-mac-symbols:
needs: build
runs-on: ubuntu-latest
steps:
- name: Post Mac symbols
uses: secondlife/viewer-build-util/post-bugsplat-mac@main
with:
username: ${{ secrets.BUGSPLAT_USER }}
password: ${{ secrets.BUGSPLAT_PASS }}
database: "SecondLife_Viewer_2018"
channel: ${{ needs.build.outputs.viewer_channel }}
version: ${{ needs.build.outputs.viewer_version }}
release:
needs: [sign-and-package-windows, sign-and-package-mac]
runs-on: ubuntu-latest
if: github.ref_type == 'tag' && startsWith(github.ref_name, 'Second_Life_')
steps:
- uses: actions/download-artifact@v3
with:
path: artifacts
- name: Reshuffle artifact files
uses: secondlife/viewer-build-util/release-artifacts@main
with:
input-path: artifacts
output-path: assets
# The *-app artifacts are for use only by the signing and
# packaging steps. Once we've generated signed installers, we no
# longer need them, and we CERTAINLY don't want to publish
# thousands of individual files as separate URLs.
exclude: |-
Windows-app
macOS-app
# Use just "Windows" or "macOS" prefix because these are the only
# artifacts in which we expect files from both platforms with
# colliding names (e.g. autobuild-package.xml). release-artifacts
# normally resolves collisions by prepending the artifact name, so
# when we anticipate collisions, it's good to keep the prefix
# short and sweet.
prefix: |-
Windows-metadata=Windows
macOS-metadata=macOS
# forked from softprops/action-gh-release
- uses: secondlife-3p/action-gh-release@v1
with:
# name the release page for the build number so we can find it
# easily (analogous to looking up a codeticket build page)
name: "v${{ github.run_id }}"
prerelease: true
generate_release_notes: true
# the only reason we generate a GH release is to post build products
fail_on_unmatched_files: true
files: "assets/*"

View File

@ -11,7 +11,7 @@ jobs:
pre-commit: pre-commit:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: 3.x python-version: 3.x

17
.gitignore vendored
View File

@ -7,9 +7,18 @@
*.pyc *.pyc
*.rej *.rej
*.swp *.swp
*.vcxproj
*.filters
*.sln
*.depend
*.stamp
*.rc
*~ *~
# Specific paths and/or names # Specific paths and/or names
CMakeCache.txt
cmake_install.cmake
LICENSES LICENSES
build-darwin-* build-darwin-*
build-linux-* build-linux-*
@ -17,6 +26,10 @@ debian/files
debian/secondlife-appearance-utility* debian/secondlife-appearance-utility*
debian/secondlife-viewer* debian/secondlife-viewer*
indra/.distcc indra/.distcc
indra/cmake/*
indra/out/*
indra/packages/*
build-vc80/ build-vc80/
build-vc100/ build-vc100/
build-vc120/ build-vc120/
@ -75,4 +88,6 @@ tarfile_tmp
trivial_change_force_build trivial_change_force_build
web/config.* web/config.*
web/locale.* web/locale.*
web/secondlife.com.* web/secondlife.com.*
.env

File diff suppressed because it is too large Load Diff

173
build.sh
View File

@ -16,6 +16,8 @@
# * The special style in which python is invoked is intentional to permit # * The special style in which python is invoked is intentional to permit
# use of a native python install on windows - which requires paths in DOS form # use of a native python install on windows - which requires paths in DOS form
cleanup="true"
retry_cmd() retry_cmd()
{ {
max_attempts="$1"; shift max_attempts="$1"; shift
@ -110,6 +112,34 @@ installer_CYGWIN()
fi fi
} }
[[ -n "$GITHUB_OUTPUT" ]] || fatal "Need to export GITHUB_OUTPUT"
# The following is based on the Warning for GitHub multiline output strings:
# https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-strings
EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
# Build up these arrays as we go
metadata=()
symbolfile=()
physicstpv=()
# and dump them to GITHUB_OUTPUT when done
cleanup="$cleanup ; \
arrayoutput metadata ; \
arrayoutput symbolfile ; \
arrayoutput physicstpv"
trap "$cleanup" EXIT
arrayoutput()
{
local outputname="$1"
# append "[*]" to the array name so array indirection works
local array="$1[*]"
local IFS='
'
echo "$outputname<<$EOF
${!array}
$EOF" >> "$GITHUB_OUTPUT"
}
pre_build() pre_build()
{ {
local variant="$1" local variant="$1"
@ -121,7 +151,7 @@ pre_build()
RELEASE_CRASH_REPORTING=ON RELEASE_CRASH_REPORTING=ON
HAVOK=ON HAVOK=ON
SIGNING=() SIGNING=()
if [ "$arch" == "Darwin" -a "$variant" == "Release" ] if [[ "$arch" == "Darwin" && "$variant" == "Release" ]]
then SIGNING=("-DENABLE_SIGNING:BOOL=YES" \ then SIGNING=("-DENABLE_SIGNING:BOOL=YES" \
"-DSIGNING_IDENTITY:STRING=Developer ID Application: Linden Research, Inc.") "-DSIGNING_IDENTITY:STRING=Developer ID Application: Linden Research, Inc.")
fi fi
@ -145,15 +175,27 @@ pre_build()
VIEWER_SYMBOL_FILE="$(native_path "$abs_build_dir/newview/$variant/secondlife-symbols-$symplat-${AUTOBUILD_ADDRSIZE}.tar.bz2")" VIEWER_SYMBOL_FILE="$(native_path "$abs_build_dir/newview/$variant/secondlife-symbols-$symplat-${AUTOBUILD_ADDRSIZE}.tar.bz2")"
fi fi
# don't spew credentials into build log # expect these variables to be set in the environment from GitHub secrets
bugsplat_sh="$build_secrets_checkout/bugsplat/bugsplat.sh" if [[ -n "$BUGSPLAT_DB" ]]
set +x then
if [ -r "$bugsplat_sh" ] # don't spew credentials into build log
then # show that we're doing this, just not the contents set +x
echo source "$bugsplat_sh" if [[ -z "$BUGSPLAT_USER" || -z "$BUGSPLAT_PASS" ]]
source "$bugsplat_sh" then
# older mechanism involving build-secrets repo -
# if build_secrets_checkout isn't set, report its name
bugsplat_sh="${build_secrets_checkout:-\$build_secrets_checkout}/bugsplat/bugsplat.sh"
if [ -r "$bugsplat_sh" ]
then # show that we're doing this, just not the contents
echo source "$bugsplat_sh"
source "$bugsplat_sh"
else
fatal "BUGSPLAT_USER or BUGSPLAT_PASS missing, and no $bugsplat_sh"
fi
fi
set -x
export BUGSPLAT_USER BUGSPLAT_PASS
fi fi
set -x
# honor autobuild_configure_parameters same as sling-buildscripts # honor autobuild_configure_parameters same as sling-buildscripts
eval_autobuild_configure_parameters=$(eval $(echo echo $autobuild_configure_parameters)) eval_autobuild_configure_parameters=$(eval $(echo echo $autobuild_configure_parameters))
@ -181,13 +223,17 @@ package_llphysicsextensions_tpv()
# nat 2016-12-21: without HAVOK, can't build PhysicsExtensions_TPV. # nat 2016-12-21: without HAVOK, can't build PhysicsExtensions_TPV.
if [ "$variant" = "Release" -a "${HAVOK:-}" != "OFF" ] if [ "$variant" = "Release" -a "${HAVOK:-}" != "OFF" ]
then then
test -r "$build_dir/packages/llphysicsextensions/autobuild-tpv.xml" || fatal "No llphysicsextensions_tpv autobuild configuration found" tpvconfig="$build_dir/packages/llphysicsextensions/autobuild-tpv.xml"
tpvconfig=$(native_path "$build_dir/packages/llphysicsextensions/autobuild-tpv.xml") test -r "$tpvconfig" || fatal "No llphysicsextensions_tpv autobuild configuration found"
"$autobuild" build --quiet --config-file "$tpvconfig" -c Tpv || fatal "failed to build llphysicsextensions_tpv" # SL-19942: autobuild ignores -c switch if AUTOBUILD_CONFIGURATION set
unset AUTOBUILD_CONFIGURATION
"$autobuild" build --quiet --config-file "$(native_path "$tpvconfig")" -c Tpv \
|| fatal "failed to build llphysicsextensions_tpv"
# capture the package file name for use in upload later... # capture the package file name for use in upload later...
PKGTMP=`mktemp -t pgktpv.XXXXXX` PKGTMP=`mktemp -t pgktpv.XXXXXX`
trap "rm $PKGTMP* 2>/dev/null" 0 cleanup="$cleanup ; rm $PKGTMP* 2>/dev/null"
trap "$cleanup" EXIT
"$autobuild" package --quiet --config-file "$tpvconfig" --results-file "$(native_path $PKGTMP)" || fatal "failed to package llphysicsextensions_tpv" "$autobuild" package --quiet --config-file "$tpvconfig" --results-file "$(native_path $PKGTMP)" || fatal "failed to package llphysicsextensions_tpv"
tpv_status=$? tpv_status=$?
if [ -r "${PKGTMP}" ] if [ -r "${PKGTMP}" ]
@ -313,12 +359,20 @@ begin_section "coding policy check"
# this far. Running coding policy checks on one platform *should* suffice... # this far. Running coding policy checks on one platform *should* suffice...
if [[ "$arch" == "Darwin" ]] if [[ "$arch" == "Darwin" ]]
then then
# install the git-hooks dependencies git_hooks_reqs="$git_hooks_checkout/requirements.txt"
pip install -r "$(native_path "$git_hooks_checkout/requirements.txt")" || \ if [[ -r "$(shell_path "$git_hooks_reqs")" ]]
fatal "pip install git-hooks failed" then
# validate the branch we're about to build # install the git-hooks dependencies
python_cmd "$git_hooks_checkout/coding_policy_git.py" --all_files || \ pip install -r "$(native_path "$git_hooks_reqs")" || \
fatal "coding policy check failed" fatal "pip install git-hooks failed"
fi
git_hooks_script="$git_hooks_checkout/coding_policy_git.py"
if [[ -r "$(shell_path "$git_hooks_script")" ]]
then
# validate the branch we're about to build
python_cmd "$(native_path "$git_hooks_script")" --all_files || \
fatal "coding policy check failed"
fi
fi fi
end_section "coding policy check" end_section "coding policy check"
@ -353,6 +407,7 @@ do
begin_section "Autobuild metadata" begin_section "Autobuild metadata"
python_cmd "$helpers/codeticket.py" addoutput "Autobuild Metadata" "$build_dir/autobuild-package.xml" --mimetype text/xml \ python_cmd "$helpers/codeticket.py" addoutput "Autobuild Metadata" "$build_dir/autobuild-package.xml" --mimetype text/xml \
|| fatal "Upload of autobuild metadata failed" || fatal "Upload of autobuild metadata failed"
metadata+=("$build_dir/autobuild-package.xml")
if [ "$arch" != "Linux" ] if [ "$arch" != "Linux" ]
then then
record_dependencies_graph "$build_dir/autobuild-package.xml" # defined in buildscripts/hg/bin/build.sh record_dependencies_graph "$build_dir/autobuild-package.xml" # defined in buildscripts/hg/bin/build.sh
@ -366,8 +421,11 @@ do
if [ -r "$build_dir/newview/viewer_version.txt" ] if [ -r "$build_dir/newview/viewer_version.txt" ]
then then
begin_section "Viewer Version" begin_section "Viewer Version"
python_cmd "$helpers/codeticket.py" addoutput "Viewer Version" "$(<"$build_dir/newview/viewer_version.txt")" --mimetype inline-text \ viewer_version="$(<"$build_dir/newview/viewer_version.txt")"
python_cmd "$helpers/codeticket.py" addoutput "Viewer Version" "$viewer_version" --mimetype inline-text \
|| fatal "Upload of viewer version failed" || fatal "Upload of viewer version failed"
metadata+=("$build_dir/newview/viewer_version.txt")
echo "viewer_version=$viewer_version" >> "$GITHUB_OUTPUT"
end_section "Viewer Version" end_section "Viewer Version"
fi fi
;; ;;
@ -376,12 +434,14 @@ do
then then
record_event "Doxygen warnings generated; see doxygen_warnings.log" record_event "Doxygen warnings generated; see doxygen_warnings.log"
python_cmd "$helpers/codeticket.py" addoutput "Doxygen Log" "$build_dir/doxygen_warnings.log" --mimetype text/plain ## TBD python_cmd "$helpers/codeticket.py" addoutput "Doxygen Log" "$build_dir/doxygen_warnings.log" --mimetype text/plain ## TBD
metadata+=("$build_dir/doxygen_warnings.log")
fi fi
if [ -d "$build_dir/doxygen/html" ] if [ -d "$build_dir/doxygen/html" ]
then then
tar -c -f "$build_dir/viewer-doxygen.tar.bz2" --strip-components 3 "$build_dir/doxygen/html" tar -c -f "$build_dir/viewer-doxygen.tar.bz2" --strip-components 3 "$build_dir/doxygen/html"
python_cmd "$helpers/codeticket.py" addoutput "Doxygen Tarball" "$build_dir/viewer-doxygen.tar.bz2" \ python_cmd "$helpers/codeticket.py" addoutput "Doxygen Tarball" "$build_dir/viewer-doxygen.tar.bz2" \
|| fatal "Upload of doxygen tarball failed" || fatal "Upload of doxygen tarball failed"
metadata+=("$build_dir/viewer-doxygen.tar.bz2")
fi fi
;; ;;
*) *)
@ -486,64 +546,29 @@ then
if $build_viewer if $build_viewer
then then
begin_section "Uploads" begin_section "Uploads"
# Upload installer # nat 2016-12-22: without RELEASE_CRASH_REPORTING, we have no symbol file.
package=$(installer_$arch) if [ "${RELEASE_CRASH_REPORTING:-}" != "OFF" ]
if [ x"$package" = x ] || test -d "$package"
then then
fatal "No installer found from `pwd`" # BugSplat wants to see xcarchive.zip
succeeded=$build_coverity # e.g. build-darwin-x86_64/newview/Release/Second Life Test.xcarchive.zip
else symbol_file="${build_dir}/newview/${variant}/${viewer_channel}.xcarchive.zip"
# Upload base package. if [[ ! -f "$symbol_file" ]]
retry_cmd 4 30 python_cmd "$helpers/codeticket.py" addoutput Installer "$package" \
|| fatal "Upload of installer failed"
wait_for_codeticket
# Upload additional packages.
for package_id in $additional_packages
do
package=$(installer_$arch "$package_id")
if [ x"$package" != x ]
then then
retry_cmd 4 30 python_cmd "$helpers/codeticket.py" addoutput "Installer $package_id" "$package" \ # symbol tarball we prep for (e.g.) Breakpad
|| fatal "Upload of installer $package_id failed" symbol_file="$VIEWER_SYMBOL_FILE"
wait_for_codeticket
else
record_failure "Failed to find additional package for '$package_id'."
fi fi
done # Upload crash reporter file
symbolfile+=("$symbol_file")
fi
if [ "$last_built_variant" = "Release" ] # Upload the llphysicsextensions_tpv package, if one was produced
then # Only upload this package when building the private repo so the
# nat 2016-12-22: without RELEASE_CRASH_REPORTING, we have no symbol file. # artifact is private.
if [ "${RELEASE_CRASH_REPORTING:-}" != "OFF" ] if [[ "x$GITHUB_REPOSITORY" == "xsecondlife/viewer-private" && \
then -r "$build_dir/llphysicsextensions_package" ]]
# Upload crash reporter file then
retry_cmd 4 30 python_cmd "$helpers/codeticket.py" addoutput "Symbolfile" "$VIEWER_SYMBOL_FILE" \ llphysicsextensions_package=$(cat $build_dir/llphysicsextensions_package)
|| fatal "Upload of symbolfile failed" physicstpv+=("$llphysicsextensions_package")
wait_for_codeticket
fi
# Upload the llphysicsextensions_tpv package, if one was produced
# *TODO: Make this an upload-extension
if [ -r "$build_dir/llphysicsextensions_package" ]
then
llphysicsextensions_package=$(cat $build_dir/llphysicsextensions_package)
retry_cmd 4 30 python_cmd "$helpers/codeticket.py" addoutput "Physics Extensions Package" "$llphysicsextensions_package" --private \
|| fatal "Upload of physics extensions package failed"
fi
fi
# Run upload extensions
# Ex: bugsplat
if [ -d ${build_dir}/packages/upload-extensions ]; then
for extension in ${build_dir}/packages/upload-extensions/*.sh; do
begin_section "Upload Extension $extension"
. $extension
[ $? -eq 0 ] || fatal "Upload of extension $extension failed"
wait_for_codeticket
end_section "Upload Extension $extension"
done
fi
fi fi
end_section "Uploads" end_section "Uploads"
else else

View File

@ -0,0 +1,60 @@
# standalone functions from sling-buildscripts
set_build_number_to_revision()
{
record_event "buildNumber $revision"
}
record_event()
{
echo "=== $@"
}
begin_section()
{
record_event "START $*"
sections+=("$*")
}
end_section()
{
# accommodate dumb Mac bash 3, which doesn't understand array[-1]
local last=$(( ${#sections[@]} - 1 ))
record_event "END ${*:-${sections[$last]}}"
unset "sections[$last]"
}
record_success()
{
record_event "SUCCESS $*"
}
record_failure()
{
record_event "FAILURE $*" >&2
}
fatal()
{
record_failure "$@"
finalize false
exit 1
}
# redefined fail for backward compatibility
alias fail=fatal
pass()
{
exit 0
}
export -f set_build_number_to_revision
export -f record_event
export -f begin_section
export -f end_section
export -f record_success
export -f record_failure
export -f fatal
export -f pass
export sections

View File

@ -19,6 +19,7 @@ Agathos Frascati
CT-317 CT-317
CT-352 CT-352
Ai Austin Ai Austin
SL-19399
Aiko Ying Aiko Ying
Aimee Trescothick Aimee Trescothick
SNOW-227 SNOW-227
@ -1421,6 +1422,7 @@ Sovereign Engineer
SL-18497 SL-18497
SL-18525 SL-18525
SL-18534 SL-18534
SL-19690
SL-19336 SL-19336
SpacedOut Frye SpacedOut Frye
VWR-34 VWR-34

View File

@ -26,6 +26,11 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} $ENV{LL_BUILD}")
# Portable compilation flags. # Portable compilation flags.
add_compile_definitions( ADDRESS_SIZE=${ADDRESS_SIZE}) add_compile_definitions( ADDRESS_SIZE=${ADDRESS_SIZE})
# Because older versions of Boost.Bind dumped placeholders _1, _2 et al. into
# the global namespace, Boost now requires either BOOST_BIND_NO_PLACEHOLDERS
# to avoid that or BOOST_BIND_GLOBAL_PLACEHOLDERS to state that we require it
# -- which we do. Without one or the other, we get a ton of Boost warnings.
add_compile_definitions(BOOST_BIND_GLOBAL_PLACEHOLDERS)
# Configure crash reporting # Configure crash reporting
set(RELEASE_CRASH_REPORTING OFF CACHE BOOL "Enable use of crash reporting in release builds") set(RELEASE_CRASH_REPORTING OFF CACHE BOOL "Enable use of crash reporting in release builds")
@ -55,15 +60,6 @@ if (WINDOWS)
# http://www.cmake.org/pipermail/cmake/2009-September/032143.html # http://www.cmake.org/pipermail/cmake/2009-September/032143.html
string(REPLACE "/Zm1000" " " CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) string(REPLACE "/Zm1000" " " CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
# Without PreferredToolArchitecture=x64, as of 2020-06-26 the 32-bit
# compiler on our TeamCity build hosts has started running out of virtual
# memory for the precompiled header file.
# CP changed to only append the flag for 32bit builds - on 64bit builds,
# locally at least, the build output is spammed with 1000s of 'D9002'
# warnings about this switch being ignored.
if(ADDRESS_SIZE EQUAL 32 AND DEFINED ENV{"TEAMCITY_PROJECT_NAME"})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /p:PreferredToolArchitecture=x64")
endif()
# zlib has assembly-language object files incompatible with SAFESEH # zlib has assembly-language object files incompatible with SAFESEH
add_link_options(/LARGEADDRESSAWARE add_link_options(/LARGEADDRESSAWARE
/SAFESEH:NO /SAFESEH:NO
@ -191,3 +187,4 @@ if (LINUX OR DARWIN)
endif (LINUX OR DARWIN) endif (LINUX OR DARWIN)

View File

@ -16,7 +16,6 @@ if (WINDOWS)
endif (LLCOMMON_LINK_SHARED) endif (LLCOMMON_LINK_SHARED)
target_link_libraries( ll::apr INTERFACE target_link_libraries( ll::apr INTERFACE
${ARCH_PREBUILT_DIRS_RELEASE}/${APR_selector}apr-1.lib ${ARCH_PREBUILT_DIRS_RELEASE}/${APR_selector}apr-1.lib
${ARCH_PREBUILT_DIRS_RELEASE}/${APR_selector}apriconv-1.lib
${ARCH_PREBUILT_DIRS_RELEASE}/${APR_selector}aprutil-1.lib ${ARCH_PREBUILT_DIRS_RELEASE}/${APR_selector}aprutil-1.lib
) )
elseif (DARWIN) elseif (DARWIN)
@ -37,7 +36,6 @@ else (WINDOWS)
target_link_libraries( ll::apr INTERFACE target_link_libraries( ll::apr INTERFACE
apr-1 apr-1
aprutil-1 aprutil-1
iconv
uuid uuid
rt rt
) )

View File

@ -65,6 +65,7 @@ set(cmake_SOURCE_FILES
VisualLeakDetector.cmake VisualLeakDetector.cmake
LibVLCPlugin.cmake LibVLCPlugin.cmake
XmlRpcEpi.cmake XmlRpcEpi.cmake
xxHash.cmake
ZLIBNG.cmake ZLIBNG.cmake
) )

View File

@ -57,7 +57,6 @@ if(WINDOWS)
openjp2.dll openjp2.dll
libapr-1.dll libapr-1.dll
libaprutil-1.dll libaprutil-1.dll
libapriconv-1.dll
nghttp2.dll nghttp2.dll
libhunspell.dll libhunspell.dll
uriparser.dll uriparser.dll
@ -181,7 +180,6 @@ elseif(DARWIN)
libndofdev.dylib libndofdev.dylib
libnghttp2.dylib libnghttp2.dylib
libnghttp2.14.dylib libnghttp2.14.dylib
libnghttp2.14.19.0.dylib
liburiparser.dylib liburiparser.dylib
liburiparser.1.dylib liburiparser.1.dylib
liburiparser.1.0.27.dylib liburiparser.1.0.27.dylib

View File

@ -2,7 +2,7 @@
include_guard() include_guard()
# FMODSTUDIO can be set when launching the make using the argument -DFMODSTUDIO:BOOL=ON # FMODSTUDIO can be set when launching the make using the argument -DUSE_FMODSTUDIO:BOOL=ON
# When building using proprietary binaries though (i.e. having access to LL private servers), # When building using proprietary binaries though (i.e. having access to LL private servers),
# we always build with FMODSTUDIO. # we always build with FMODSTUDIO.
if (INSTALL_PROPRIETARY) if (INSTALL_PROPRIETARY)

View File

@ -3,9 +3,7 @@ include(Prebuilt)
include(GLH) include(GLH)
add_library( ll::glext INTERFACE IMPORTED ) add_library( ll::glext INTERFACE IMPORTED )
if (WINDOWS OR LINUX) use_system_binary(glext)
use_system_binary(glext) use_prebuilt_binary(glext)
use_prebuilt_binary(glext)
endif (WINDOWS OR LINUX)

View File

@ -126,6 +126,13 @@ MACRO(LL_ADD_PROJECT_UNIT_TESTS project sources)
message("LL_ADD_PROJECT_UNIT_TESTS ${name}_test_additional_CFLAGS ${${name}_test_additional_CFLAGS}") message("LL_ADD_PROJECT_UNIT_TESTS ${name}_test_additional_CFLAGS ${${name}_test_additional_CFLAGS}")
endif() endif()
if (DARWIN)
# test binaries always need to be signed for local development
set_target_properties(PROJECT_${project}_TEST_${name}
PROPERTIES
XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "-")
endif ()
# #
# Setup test targets # Setup test targets
# #
@ -221,6 +228,13 @@ FUNCTION(LL_ADD_INTEGRATION_TEST
) )
endif () endif ()
if (DARWIN)
# test binaries always need to be signed for local development
set_target_properties(INTEGRATION_TEST_${testname}
PROPERTIES
XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY "-")
endif ()
# Add link deps to the executable # Add link deps to the executable
if(TEST_DEBUG) if(TEST_DEBUG)
message(STATUS "TARGET_LINK_LIBRARIES(INTEGRATION_TEST_${testname} ${libraries})") message(STATUS "TARGET_LINK_LIBRARIES(INTEGRATION_TEST_${testname} ${libraries})")

View File

@ -1,2 +1,5 @@
# -*- cmake -*- # -*- cmake -*-
include(Variables)
include(Mikktspace)

View File

@ -62,6 +62,7 @@ elseif (WINDOWS)
user32 user32
ole32 ole32
dbghelp dbghelp
rpcrt4.lib
legacy_stdio_definitions legacy_stdio_definitions
) )
else() else()

View File

@ -0,0 +1,6 @@
# -*- cmake -*-
include(Prebuilt)
if (NOT USESYSTEMLIBS)
use_prebuilt_binary(mikktspace)
endif (NOT USESYSTEMLIBS)

View File

@ -0,0 +1,7 @@
# -*- cmake -*-
include(Prebuilt)
use_prebuilt_binary(tinygltf)
set(TINYGLTF_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include/tinygltf)

View File

@ -11,8 +11,9 @@ if (USE_TRACY)
use_prebuilt_binary(tracy) use_prebuilt_binary(tracy)
target_include_directories( ll::tracy SYSTEM INTERFACE ${LIBS_PREBUILT_DIR}/include/tracy) target_include_directories( ll::tracy SYSTEM INTERFACE ${LIBS_PREBUILT_DIR}/include/tracy)
target_link_libraries( ll::tracy INTERFACE TracyClient )
# See: indra/llcommon/llprofiler.h # See: indra/llcommon/llprofiler.h
target_compile_definitions(ll::tracy INTERFACE LL_PROFILER_CONFIGURATION=3 ) add_compile_definitions(LL_PROFILER_CONFIGURATION=3)
endif (USE_TRACY) endif (USE_TRACY)

View File

@ -33,7 +33,7 @@ set(LIBS_OPEN_PREFIX)
set(SCRIPTS_PREFIX ../scripts) set(SCRIPTS_PREFIX ../scripts)
set(VIEWER_PREFIX) set(VIEWER_PREFIX)
set(INTEGRATION_TESTS_PREFIX) set(INTEGRATION_TESTS_PREFIX)
set(LL_TESTS ON CACHE BOOL "Build and run unit and integration tests (disable for build timing runs to reduce variation") set(LL_TESTS OFF CACHE BOOL "Build and run unit and integration tests (disable for build timing runs to reduce variation")
set(INCREMENTAL_LINK OFF CACHE BOOL "Use incremental linking on win32 builds (enable for faster links on some machines)") set(INCREMENTAL_LINK OFF CACHE BOOL "Use incremental linking on win32 builds (enable for faster links on some machines)")
set(ENABLE_MEDIA_PLUGINS ON CACHE BOOL "Turn off building media plugins if they are imported by third-party library mechanism") set(ENABLE_MEDIA_PLUGINS ON CACHE BOOL "Turn off building media plugins if they are imported by third-party library mechanism")
set(VIEWER_SYMBOL_FILE "" CACHE STRING "Name of tarball into which to place symbol files") set(VIEWER_SYMBOL_FILE "" CACHE STRING "Name of tarball into which to place symbol files")
@ -173,13 +173,17 @@ if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
set(CMAKE_XCODE_ATTRIBUTE_GCC_OPTIMIZATION_LEVEL "${CMAKE_MATCH_1}") set(CMAKE_XCODE_ATTRIBUTE_GCC_OPTIMIZATION_LEVEL "${CMAKE_MATCH_1}")
message(STATUS "CMAKE_XCODE_ATTRIBUTE_GCC_OPTIMIZATION_LEVEL = '${CMAKE_XCODE_ATTRIBUTE_GCC_OPTIMIZATION_LEVEL}'") message(STATUS "CMAKE_XCODE_ATTRIBUTE_GCC_OPTIMIZATION_LEVEL = '${CMAKE_XCODE_ATTRIBUTE_GCC_OPTIMIZATION_LEVEL}'")
string(REGEX MATCHALL "[^ ]+" LL_BUILD_LIST "$ENV{LL_BUILD}") # allow disabling this check by setting LL_SKIP_REQUIRE_SYSROOT either ON as cmake cache var or non-empty as environment var
list(FIND LL_BUILD_LIST "-iwithsysroot" sysroot_idx) set(LL_SKIP_REQUIRE_SYSROOT OFF CACHE BOOL "Skip requirement to set toolchain sysroot ahead of time. Not skipped by default for consistency, but skipping can be useful for selecting alternative xcode versions side by side")
if ("${sysroot_idx}" LESS 0) if("$ENV{LL_SKIP_REQUIRE_SYSROOT}" STREQUAL "" AND NOT ${LL_SKIP_REQUIRE_SYSROOT})
message(FATAL_ERROR "Environment variable LL_BUILD must contain '-iwithsysroot'") string(REGEX MATCHALL "[^ ]+" LL_BUILD_LIST "$ENV{LL_BUILD}")
endif () list(FIND LL_BUILD_LIST "-iwithsysroot" sysroot_idx)
math(EXPR sysroot_idx "${sysroot_idx} + 1") if ("${sysroot_idx}" LESS 0)
list(GET LL_BUILD_LIST "${sysroot_idx}" CMAKE_OSX_SYSROOT) message(FATAL_ERROR "Environment variable LL_BUILD must contain '-iwithsysroot'")
endif ()
math(EXPR sysroot_idx "${sysroot_idx} + 1")
list(GET LL_BUILD_LIST "${sysroot_idx}" CMAKE_OSX_SYSROOT)
endif()
message(STATUS "CMAKE_OSX_SYSROOT = '${CMAKE_OSX_SYSROOT}'") message(STATUS "CMAKE_OSX_SYSROOT = '${CMAKE_OSX_SYSROOT}'")
set(CMAKE_XCODE_ATTRIBUTE_GCC_VERSION "com.apple.compilers.llvm.clang.1_0") set(CMAKE_XCODE_ATTRIBUTE_GCC_VERSION "com.apple.compilers.llvm.clang.1_0")

View File

@ -0,0 +1,5 @@
# -*- cmake -*-
include(Prebuilt)
use_prebuilt_binary(vulkan_gltf)

View File

@ -38,6 +38,7 @@ import itertools
import operator import operator
import os import os
import re import re
import shlex
import shutil import shutil
import subprocess import subprocess
import sys import sys
@ -531,15 +532,15 @@ class LLManifest(object, metaclass=LLManifestRegistry):
self.cmakedirs(path) self.cmakedirs(path)
return path return path
def run_command(self, command): def run_command(self, command, **kwds):
""" """
Runs an external command. Runs an external command.
Raises ManifestError exception if the command returns a nonzero status. Raises ManifestError exception if the command returns a nonzero status.
""" """
print("Running command:", command) print("Running command:", shlex.join(command))
sys.stdout.flush() sys.stdout.flush()
try: try:
subprocess.check_call(command) subprocess.check_call(command, **kwds)
except subprocess.CalledProcessError as err: except subprocess.CalledProcessError as err:
raise ManifestError( "Command %s returned non-zero status (%s)" raise ManifestError( "Command %s returned non-zero status (%s)"
% (command, err.returncode) ) % (command, err.returncode) )

View File

@ -1050,7 +1050,6 @@ BOOL LLAvatarAppearance::loadSkeletonNode ()
mRoot->addChild(mMeshLOD[MESH_ID_UPPER_BODY]); mRoot->addChild(mMeshLOD[MESH_ID_UPPER_BODY]);
mRoot->addChild(mMeshLOD[MESH_ID_LOWER_BODY]); mRoot->addChild(mMeshLOD[MESH_ID_LOWER_BODY]);
mRoot->addChild(mMeshLOD[MESH_ID_SKIRT]); mRoot->addChild(mMeshLOD[MESH_ID_SKIRT]);
mRoot->addChild(mMeshLOD[MESH_ID_HEAD]);
LLAvatarJoint *skull = (LLAvatarJoint*)mRoot->findJoint("mSkull"); LLAvatarJoint *skull = (LLAvatarJoint*)mRoot->findJoint("mSkull");
if (skull) if (skull)

View File

@ -377,7 +377,6 @@ BOOL LLTexLayerSet::render( S32 x, S32 y, S32 width, S32 height, LLRenderTarget*
// clear buffer area to ensure we don't pick up UI elements // clear buffer area to ensure we don't pick up UI elements
{ {
gGL.flush(); gGL.flush();
LLGLDisable no_alpha(GL_ALPHA_TEST);
gAlphaMaskProgram.setMinimumAlpha(0.0f); gAlphaMaskProgram.setMinimumAlpha(0.0f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE); gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4f( 0.f, 0.f, 0.f, 1.f ); gGL.color4f( 0.f, 0.f, 0.f, 1.f );
@ -410,7 +409,6 @@ BOOL LLTexLayerSet::render( S32 x, S32 y, S32 width, S32 height, LLRenderTarget*
gGL.flush(); gGL.flush();
gGL.setSceneBlendType(LLRender::BT_REPLACE); gGL.setSceneBlendType(LLRender::BT_REPLACE);
LLGLDisable no_alpha(GL_ALPHA_TEST);
gAlphaMaskProgram.setMinimumAlpha(0.f); gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE); gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
@ -500,7 +498,6 @@ void LLTexLayerSet::renderAlphaMaskTextures(S32 x, S32 y, S32 width, S32 height,
{ {
// Set the alpha channel to one (clean up after previous blending) // Set the alpha channel to one (clean up after previous blending)
gGL.flush(); gGL.flush();
LLGLDisable no_alpha(GL_ALPHA_TEST);
gAlphaMaskProgram.setMinimumAlpha(0.f); gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE); gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4f( 0.f, 0.f, 0.f, 1.f ); gGL.color4f( 0.f, 0.f, 0.f, 1.f );
@ -1025,7 +1022,6 @@ void LLTexLayer::calculateTexLayerColor(const param_color_list_t &param_list, LL
BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bound_target) BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bound_target)
{ {
LLGLEnable color_mat(GL_COLOR_MATERIAL);
// *TODO: Is this correct? // *TODO: Is this correct?
//gPipeline.disableLights(); //gPipeline.disableLights();
stop_glerror(); stop_glerror();
@ -1112,7 +1108,6 @@ BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bou
if( tex ) if( tex )
{ {
bool no_alpha_test = getInfo()->mWriteAllChannels; bool no_alpha_test = getInfo()->mWriteAllChannels;
LLGLDisable alpha_test(no_alpha_test ? GL_ALPHA_TEST : 0);
if (no_alpha_test) if (no_alpha_test)
{ {
gAlphaMaskProgram.setMinimumAlpha(0.f); gAlphaMaskProgram.setMinimumAlpha(0.f);
@ -1162,7 +1157,6 @@ BOOL LLTexLayer::render(S32 x, S32 y, S32 width, S32 height, LLRenderTarget* bou
getInfo()->mStaticImageFileName.empty() && getInfo()->mStaticImageFileName.empty() &&
color_specified ) color_specified )
{ {
LLGLDisable no_alpha(GL_ALPHA_TEST);
gAlphaMaskProgram.setMinimumAlpha(0.000f); gAlphaMaskProgram.setMinimumAlpha(0.000f);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE); gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
@ -1260,7 +1254,6 @@ BOOL LLTexLayer::blendAlphaTexture(S32 x, S32 y, S32 width, S32 height)
LLGLTexture* tex = LLTexLayerStaticImageList::getInstance()->getTexture( getInfo()->mStaticImageFileName, getInfo()->mStaticImageIsMask ); LLGLTexture* tex = LLTexLayerStaticImageList::getInstance()->getTexture( getInfo()->mStaticImageFileName, getInfo()->mStaticImageIsMask );
if( tex ) if( tex )
{ {
LLGLSNoAlphaTest gls_no_alpha_test;
gAlphaMaskProgram.setMinimumAlpha(0.f); gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->bind(tex, TRUE); gGL.getTexUnit(0)->bind(tex, TRUE);
gl_rect_2d_simple_tex( width, height ); gl_rect_2d_simple_tex( width, height );
@ -1279,7 +1272,6 @@ BOOL LLTexLayer::blendAlphaTexture(S32 x, S32 y, S32 width, S32 height)
LLGLTexture* tex = mLocalTextureObject->getImage(); LLGLTexture* tex = mLocalTextureObject->getImage();
if (tex) if (tex)
{ {
LLGLSNoAlphaTest gls_no_alpha_test;
gAlphaMaskProgram.setMinimumAlpha(0.f); gAlphaMaskProgram.setMinimumAlpha(0.f);
gGL.getTexUnit(0)->bind(tex); gGL.getTexUnit(0)->bind(tex);
gl_rect_2d_simple_tex( width, height ); gl_rect_2d_simple_tex( width, height );
@ -1316,7 +1308,6 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
// Note: if the first param is a mulitply, multiply against the current buffer's alpha // Note: if the first param is a mulitply, multiply against the current buffer's alpha
if( !first_param || !first_param->getMultiplyBlend() ) if( !first_param || !first_param->getMultiplyBlend() )
{ {
LLGLDisable no_alpha(GL_ALPHA_TEST);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE); gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
// Clear the alpha // Clear the alpha
@ -1328,7 +1319,6 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
} }
// Accumulate alphas // Accumulate alphas
LLGLSNoAlphaTest gls_no_alpha_test;
gGL.color4f( 1.f, 1.f, 1.f, 1.f ); gGL.color4f( 1.f, 1.f, 1.f, 1.f );
for (LLTexLayerParamAlpha* param : mParamAlphaList) for (LLTexLayerParamAlpha* param : mParamAlphaList)
{ {
@ -1350,7 +1340,6 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
LLGLTexture* tex = mLocalTextureObject->getImage(); LLGLTexture* tex = mLocalTextureObject->getImage();
if( tex && (tex->getComponents() == 4) ) if( tex && (tex->getComponents() == 4) )
{ {
LLGLSNoAlphaTest gls_no_alpha_test;
LLTexUnit::eTextureAddressMode old_mode = tex->getAddressMode(); LLTexUnit::eTextureAddressMode old_mode = tex->getAddressMode();
gGL.getTexUnit(0)->bind(tex, TRUE); gGL.getTexUnit(0)->bind(tex, TRUE);
@ -1370,7 +1359,6 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
{ {
if( (tex->getComponents() == 4) || (tex->getComponents() == 1) ) if( (tex->getComponents() == 4) || (tex->getComponents() == 1) )
{ {
LLGLSNoAlphaTest gls_no_alpha_test;
gGL.getTexUnit(0)->bind(tex, TRUE); gGL.getTexUnit(0)->bind(tex, TRUE);
gl_rect_2d_simple_tex( width, height ); gl_rect_2d_simple_tex( width, height );
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE); gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
@ -1387,7 +1375,6 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
// Note: we're still using gGL.blendFunc( GL_DST_ALPHA, GL_ZERO ); // Note: we're still using gGL.blendFunc( GL_DST_ALPHA, GL_ZERO );
if ( !is_approx_equal(layer_color.mV[VW], 1.f) ) if ( !is_approx_equal(layer_color.mV[VW], 1.f) )
{ {
LLGLDisable no_alpha(GL_ALPHA_TEST);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE); gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4fv(layer_color.mV); gGL.color4fv(layer_color.mV);
gl_rect_2d_simple( width, height ); gl_rect_2d_simple( width, height );
@ -1472,7 +1459,14 @@ void LLTexLayer::renderMorphMasks(S32 x, S32 y, S32 width, S32 height, const LLC
} }
else else
{ // platforms with working drivers... { // platforms with working drivers...
glReadPixels(x, y, width, height, GL_ALPHA, GL_UNSIGNED_BYTE, alpha_data); // We just want GL_ALPHA, but that isn't supported in OGL core profile 4.
static const size_t TEMP_BYTES_PER_PIXEL = 4;
U8* temp_data = (U8*)ll_aligned_malloc_32(mem_size * TEMP_BYTES_PER_PIXEL);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, temp_data);
for (size_t pixel = 0; pixel < pixels; pixel++) {
alpha_data[pixel] = temp_data[(pixel * TEMP_BYTES_PER_PIXEL) + 3];
}
ll_aligned_free_32(temp_data);
} }
} }
else else

View File

@ -149,7 +149,7 @@ LLTexLayerParamAlpha::LLTexLayerParamAlpha(const LLTexLayerParamAlpha& pOther)
mCachedProcessedTexture(pOther.mCachedProcessedTexture), mCachedProcessedTexture(pOther.mCachedProcessedTexture),
mStaticImageTGA(pOther.mStaticImageTGA), mStaticImageTGA(pOther.mStaticImageTGA),
mStaticImageRaw(pOther.mStaticImageRaw), mStaticImageRaw(pOther.mStaticImageRaw),
mNeedsCreateTexture(pOther.mNeedsCreateTexture), mNeedsCreateTexture(pOther.mNeedsCreateTexture.load()),
mStaticImageInvalid(pOther.mStaticImageInvalid), mStaticImageInvalid(pOther.mStaticImageInvalid),
mAvgDistortionVec(pOther.mAvgDistortionVec), mAvgDistortionVec(pOther.mAvgDistortionVec),
mCachedEffectiveWeight(pOther.mCachedEffectiveWeight) mCachedEffectiveWeight(pOther.mCachedEffectiveWeight)
@ -344,7 +344,6 @@ BOOL LLTexLayerParamAlpha::render(S32 x, S32 y, S32 width, S32 height)
mCachedProcessedTexture->setAddressMode(LLTexUnit::TAM_CLAMP); mCachedProcessedTexture->setAddressMode(LLTexUnit::TAM_CLAMP);
} }
LLGLSNoAlphaTest gls_no_alpha_test;
gGL.getTexUnit(0)->bind(mCachedProcessedTexture); gGL.getTexUnit(0)->bind(mCachedProcessedTexture);
gl_rect_2d_simple_tex(width, height); gl_rect_2d_simple_tex(width, height);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE); gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
@ -361,7 +360,6 @@ BOOL LLTexLayerParamAlpha::render(S32 x, S32 y, S32 width, S32 height)
} }
else else
{ {
LLGLDisable no_alpha(GL_ALPHA_TEST);
gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE); gGL.getTexUnit(0)->unbind(LLTexUnit::TT_TEXTURE);
gGL.color4f(0.f, 0.f, 0.f, effective_weight); gGL.color4f(0.f, 0.f, 0.f, effective_weight);
gl_rect_2d_simple(width, height); gl_rect_2d_simple(width, height);

View File

@ -100,7 +100,7 @@ private:
LLPointer<LLGLTexture> mCachedProcessedTexture; LLPointer<LLGLTexture> mCachedProcessedTexture;
LLPointer<LLImageTGA> mStaticImageTGA; LLPointer<LLImageTGA> mStaticImageTGA;
LLPointer<LLImageRaw> mStaticImageRaw; LLPointer<LLImageRaw> mStaticImageRaw;
BOOL mNeedsCreateTexture; std::atomic<BOOL> mNeedsCreateTexture;
BOOL mStaticImageInvalid; BOOL mStaticImageInvalid;
LL_ALIGN_16(LLVector4a mAvgDistortionVec); LL_ALIGN_16(LLVector4a mAvgDistortionVec);
F32 mCachedEffectiveWeight; F32 mCachedEffectiveWeight;

View File

@ -607,40 +607,37 @@ void LLAudioDecodeMgr::Impl::startMoreDecodes()
// Kick off a decode // Kick off a decode
mDecodes[decode_id] = LLPointer<LLVorbisDecodeState>(NULL); mDecodes[decode_id] = LLPointer<LLVorbisDecodeState>(NULL);
try bool posted = main_queue->postTo(
{ general_queue,
main_queue->postTo( [decode_id]() // Work done on general queue
general_queue, {
[decode_id]() // Work done on general queue LLPointer<LLVorbisDecodeState> decode_state = beginDecodingAndWritingAudio(decode_id);
if (!decode_state)
{ {
LLPointer<LLVorbisDecodeState> decode_state = beginDecodingAndWritingAudio(decode_id); // Audio decode has errored
if (!decode_state)
{
// Audio decode has errored
return decode_state;
}
// Disk write of decoded audio is now in progress off-thread
return decode_state; return decode_state;
}, }
[decode_id, this](LLPointer<LLVorbisDecodeState> decode_state) // Callback to main thread
mutable {
if (!gAudiop)
{
// There is no LLAudioEngine anymore. This might happen if
// an audio decode is enqueued just before shutdown.
return;
}
// At this point, we can be certain that the pointer to "this" // Disk write of decoded audio is now in progress off-thread
// is valid because the lifetime of "this" is dependent upon return decode_state;
// the lifetime of gAudiop. },
[decode_id, this](LLPointer<LLVorbisDecodeState> decode_state) // Callback to main thread
mutable {
if (!gAudiop)
{
// There is no LLAudioEngine anymore. This might happen if
// an audio decode is enqueued just before shutdown.
return;
}
enqueueFinishAudio(decode_id, decode_state); // At this point, we can be certain that the pointer to "this"
}); // is valid because the lifetime of "this" is dependent upon
} // the lifetime of gAudiop.
catch (const LLThreadSafeQueueInterrupt&)
enqueueFinishAudio(decode_id, decode_state);
});
if (! posted)
{ {
// Shutdown // Shutdown
// Consider making processQueue() do a cleanup instead // Consider making processQueue() do a cleanup instead

View File

@ -30,6 +30,7 @@
#include <list> #include <list>
#include <map> #include <map>
#include <array>
#include "v3math.h" #include "v3math.h"
#include "v3dmath.h" #include "v3dmath.h"

View File

@ -42,6 +42,7 @@ class LLAudioEngine_OpenAL : public LLAudioEngine
virtual bool init(void *user_data, const std::string &app_title); virtual bool init(void *user_data, const std::string &app_title);
virtual std::string getDriverName(bool verbose); virtual std::string getDriverName(bool verbose);
virtual LLStreamingAudioInterface* createDefaultStreamingAudioImpl() const { return nullptr; }
virtual void allocateListener(); virtual void allocateListener();
virtual void shutdown(); virtual void shutdown();
@ -56,7 +57,6 @@ class LLAudioEngine_OpenAL : public LLAudioEngine
/*virtual*/ void updateWind(LLVector3 direction, F32 camera_altitude); /*virtual*/ void updateWind(LLVector3 direction, F32 camera_altitude);
private: private:
void * windDSP(void *newbuffer, int length);
typedef S16 WIND_SAMPLE_T; typedef S16 WIND_SAMPLE_T;
LLWindGen<WIND_SAMPLE_T> *mWindGen; LLWindGen<WIND_SAMPLE_T> *mWindGen;
S16 *mWindBuf; S16 *mWindBuf;

View File

@ -17,6 +17,7 @@ include(Tracy)
set(llcommon_SOURCE_FILES set(llcommon_SOURCE_FILES
commoncontrol.cpp
indra_constants.cpp indra_constants.cpp
llallocator.cpp llallocator.cpp
llallocator_heap_profile.cpp llallocator_heap_profile.cpp
@ -117,6 +118,7 @@ set(llcommon_HEADER_FILES
chrono.h chrono.h
classic_callback.h classic_callback.h
commoncontrol.h
ctype_workaround.h ctype_workaround.h
fix_macros.h fix_macros.h
indra_constants.h indra_constants.h
@ -173,6 +175,7 @@ set(llcommon_HEADER_FILES
llinitdestroyclass.h llinitdestroyclass.h
llinitparam.h llinitparam.h
llinstancetracker.h llinstancetracker.h
llinstancetrackersubclass.h
llkeybind.h llkeybind.h
llkeythrottle.h llkeythrottle.h
llleap.h llleap.h
@ -246,6 +249,7 @@ set(llcommon_HEADER_FILES
stdtypes.h stdtypes.h
stringize.h stringize.h
threadpool.h threadpool.h
threadpool_fwd.h
threadsafeschedule.h threadsafeschedule.h
timer.h timer.h
tuple.h tuple.h

View File

@ -0,0 +1,106 @@
/**
* @file commoncontrol.cpp
* @author Nat Goodspeed
* @date 2022-06-08
* @brief Implementation for commoncontrol.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
// Precompiled header
#include "linden_common.h"
// associated header
#include "commoncontrol.h"
// STL headers
// std headers
// external library headers
// other Linden headers
#include "llevents.h"
#include "llsdutil.h"
LLSD LL::CommonControl::access(const LLSD& params)
{
// We can't actually introduce a link-time dependency on llxml, or on any
// global LLControlGroup (*koff* gSavedSettings *koff*) but we can issue a
// runtime query. If we're running as part of a viewer with
// LLViewerControlListener, we can use that to interact with any
// instantiated LLControGroup.
LLSD response;
{
LLEventStream reply("reply");
LLTempBoundListener connection = reply.listen("listener",
[&response] (const LLSD& event)
{
response = event;
return false;
});
LLSD rparams{ params };
rparams["reply"] = reply.getName();
LLEventPumps::instance().obtain("LLViewerControl").post(rparams);
}
// LLViewerControlListener responds immediately. If it's listening at all,
// it will already have set response.
if (! response.isDefined())
{
LLTHROW(NoListener("No LLViewerControl listener instantiated"));
}
LLSD error{ response["error"] };
if (error.isDefined())
{
LLTHROW(ParamError(error));
}
response.erase("error");
response.erase("reqid");
return response;
}
/// set control group.key to defined default value
LLSD LL::CommonControl::set_default(const std::string& group, const std::string& key)
{
return access(llsd::map("op", "set",
"group", group, "key", key))["value"];
}
/// set control group.key to specified value
LLSD LL::CommonControl::set(const std::string& group, const std::string& key, const LLSD& value)
{
return access(llsd::map("op", "set",
"group", group, "key", key, "value", value))["value"];
}
/// toggle boolean control group.key
LLSD LL::CommonControl::toggle(const std::string& group, const std::string& key)
{
return access(llsd::map("op", "toggle",
"group", group, "key", key))["value"];
}
/// get the definition for control group.key, (! isDefined()) if bad
/// ["name"], ["type"], ["value"], ["comment"]
LLSD LL::CommonControl::get_def(const std::string& group, const std::string& key)
{
return access(llsd::map("op", "get",
"group", group, "key", key));
}
/// get the value of control group.key
LLSD LL::CommonControl::get(const std::string& group, const std::string& key)
{
return access(llsd::map("op", "get",
"group", group, "key", key))["value"];
}
/// get defined groups
std::vector<std::string> LL::CommonControl::get_groups()
{
auto groups{ access(llsd::map("op", "groups"))["groups"] };
return { groups.beginArray(), groups.endArray() };
}
/// get definitions for all variables in group
LLSD LL::CommonControl::get_vars(const std::string& group)
{
return access(llsd::map("op", "vars", "group", group))["vars"];
}

View File

@ -0,0 +1,75 @@
/**
* @file commoncontrol.h
* @author Nat Goodspeed
* @date 2022-06-08
* @brief Access LLViewerControl LLEventAPI, if process has one.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_COMMONCONTROL_H)
#define LL_COMMONCONTROL_H
#include <vector>
#include "llexception.h"
#include "llsd.h"
namespace LL
{
class CommonControl
{
public:
struct Error: public LLException
{
Error(const std::string& what): LLException(what) {}
};
/// Exception thrown if there's no LLViewerControl LLEventAPI
struct NoListener: public Error
{
NoListener(const std::string& what): Error(what) {}
};
struct ParamError: public Error
{
ParamError(const std::string& what): Error(what) {}
};
/// set control group.key to defined default value
static
LLSD set_default(const std::string& group, const std::string& key);
/// set control group.key to specified value
static
LLSD set(const std::string& group, const std::string& key, const LLSD& value);
/// toggle boolean control group.key
static
LLSD toggle(const std::string& group, const std::string& key);
/// get the definition for control group.key, (! isDefined()) if bad
/// ["name"], ["type"], ["value"], ["comment"]
static
LLSD get_def(const std::string& group, const std::string& key);
/// get the value of control group.key
static
LLSD get(const std::string& group, const std::string& key);
/// get defined groups
static
std::vector<std::string> get_groups();
/// get definitions for all variables in group
static
LLSD get_vars(const std::string& group);
private:
static
LLSD access(const LLSD& params);
};
} // namespace LL
#endif /* ! defined(LL_COMMONCONTROL_H) */

View File

@ -522,6 +522,7 @@ S32 LLAPRFile::seek(apr_file_t* file_handle, apr_seek_where_t where, S32 offset)
//static //static
S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool) S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool)
{ {
LL_PROFILE_ZONE_SCOPED;
//***************************************** //*****************************************
LLAPRFilePoolScope scope(pool); LLAPRFilePoolScope scope(pool);
apr_file_t* file_handle = open(filename, scope.getVolatileAPRPool(), APR_READ|APR_BINARY); apr_file_t* file_handle = open(filename, scope.getVolatileAPRPool(), APR_READ|APR_BINARY);
@ -566,6 +567,7 @@ S32 LLAPRFile::readEx(const std::string& filename, void *buf, S32 offset, S32 nb
//static //static
S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool) S32 LLAPRFile::writeEx(const std::string& filename, void *buf, S32 offset, S32 nbytes, LLVolatileAPRPool* pool)
{ {
LL_PROFILE_ZONE_SCOPED;
apr_int32_t flags = APR_CREATE|APR_WRITE|APR_BINARY; apr_int32_t flags = APR_CREATE|APR_WRITE|APR_BINARY;
if (offset < 0) if (offset < 0)
{ {

View File

@ -96,6 +96,7 @@ LLAssetDictionary::LLAssetDictionary()
addEntry(LLAssetType::AT_WIDGET, new AssetEntry("WIDGET", "widget", "widget", false, false, false)); addEntry(LLAssetType::AT_WIDGET, new AssetEntry("WIDGET", "widget", "widget", false, false, false));
addEntry(LLAssetType::AT_PERSON, new AssetEntry("PERSON", "person", "person", false, false, false)); addEntry(LLAssetType::AT_PERSON, new AssetEntry("PERSON", "person", "person", false, false, false));
addEntry(LLAssetType::AT_SETTINGS, new AssetEntry("SETTINGS", "settings", "settings blob", true, true, true)); addEntry(LLAssetType::AT_SETTINGS, new AssetEntry("SETTINGS", "settings", "settings blob", true, true, true));
addEntry(LLAssetType::AT_MATERIAL, new AssetEntry("MATERIAL", "material", "render material", true, true, true));
addEntry(LLAssetType::AT_UNKNOWN, new AssetEntry("UNKNOWN", "invalid", NULL, false, false, false)); addEntry(LLAssetType::AT_UNKNOWN, new AssetEntry("UNKNOWN", "invalid", NULL, false, false, false));
addEntry(LLAssetType::AT_NONE, new AssetEntry("NONE", "-1", NULL, FALSE, FALSE, FALSE)); addEntry(LLAssetType::AT_NONE, new AssetEntry("NONE", "-1", NULL, FALSE, FALSE, FALSE));

View File

@ -127,8 +127,9 @@ public:
AT_RESERVED_6 = 55, AT_RESERVED_6 = 55,
AT_SETTINGS = 56, // Collection of settings AT_SETTINGS = 56, // Collection of settings
AT_MATERIAL = 57, // Render Material
AT_COUNT = 57,
AT_COUNT = 58,
// +*********************************************************+ // +*********************************************************+
// | TO ADD AN ELEMENT TO THIS ENUM: | // | TO ADD AN ELEMENT TO THIS ENUM: |

View File

@ -79,9 +79,9 @@ struct LLContextStatus
LL_COMMON_API std::ostream& operator<<(std::ostream& s, const LLContextStatus& context_status); LL_COMMON_API std::ostream& operator<<(std::ostream& s, const LLContextStatus& context_status);
#define dumpStack(tag) \ #define dumpStack(tag) \
if (debugLoggingEnabled(tag)) \ LL_DEBUGS(tag) << "STACK:\n" \
{ \ << "====================\n" \
LLCallStack cs; \ << LLCallStack() \
LL_DEBUGS(tag) << "STACK:\n" << "====================\n" << cs << "====================" << LL_ENDL; \ << "====================" \
} << LL_ENDL;

View File

@ -37,12 +37,13 @@ thread_local bool gProfilerEnabled = false;
#if (TRACY_ENABLE) #if (TRACY_ENABLE)
// Override new/delete for tracy memory profiling // Override new/delete for tracy memory profiling
void *operator new(size_t size)
void* ll_tracy_new(size_t size)
{ {
void* ptr; void* ptr;
if (gProfilerEnabled) if (gProfilerEnabled)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; //LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
ptr = (malloc)(size); ptr = (malloc)(size);
} }
else else
@ -57,12 +58,22 @@ void *operator new(size_t size)
return ptr; return ptr;
} }
void operator delete(void *ptr) noexcept void* operator new(size_t size)
{
return ll_tracy_new(size);
}
void* operator new[](std::size_t count)
{
return ll_tracy_new(count);
}
void ll_tracy_delete(void* ptr)
{ {
TracyFree(ptr); TracyFree(ptr);
if (gProfilerEnabled) if (gProfilerEnabled)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY; //LL_PROFILE_ZONE_SCOPED_CATEGORY_MEMORY;
(free)(ptr); (free)(ptr);
} }
else else
@ -71,6 +82,16 @@ void operator delete(void *ptr) noexcept
} }
} }
void operator delete(void *ptr) noexcept
{
ll_tracy_delete(ptr);
}
void operator delete[](void* ptr) noexcept
{
ll_tracy_delete(ptr);
}
// C-style malloc/free can't be so easily overridden, so we define tracy versions and use // C-style malloc/free can't be so easily overridden, so we define tracy versions and use
// a pre-processor #define in linden_common.h to redirect to them. The parens around the native // a pre-processor #define in linden_common.h to redirect to them. The parens around the native
// functions below prevents recursive substitution by the preprocessor. // functions below prevents recursive substitution by the preprocessor.

View File

@ -123,11 +123,7 @@ LLCoros::LLCoros():
// Previously we used // Previously we used
// boost::context::guarded_stack_allocator::default_stacksize(); // boost::context::guarded_stack_allocator::default_stacksize();
// empirically this is insufficient. // empirically this is insufficient.
#if ADDRESS_SIZE == 64 mStackSize(768*1024),
mStackSize(512*1024),
#else
mStackSize(256*1024),
#endif
// mCurrent does NOT own the current CoroData instance -- it simply // mCurrent does NOT own the current CoroData instance -- it simply
// points to it. So initialize it with a no-op deleter. // points to it. So initialize it with a no-op deleter.
mCurrent{ [](CoroData*){} } mCurrent{ [](CoroData*){} }

View File

@ -1603,19 +1603,18 @@ namespace LLError
} }
} }
bool debugLoggingEnabled(const std::string& tag) void crashdriver(void (*callback)(int*))
{ {
LLMutexTrylock lock(getMutex<LOG_MUTEX>(), 5); // The LLERROR_CRASH macro used to have inline code of the form:
if (!lock.isLocked()) //int* make_me_crash = NULL;
{ //*make_me_crash = 0;
return false;
}
SettingsConfigPtr s = Globals::getInstance()->getSettingsConfig(); // But compilers are getting smart enough to recognize that, so we must
LLError::ELevel level = LLError::LEVEL_DEBUG; // assign to an address supplied by a separate source file. We could do
bool res = checkLevelMap(s->mTagLevelMap, tag, level); // the assignment here in crashdriver() -- but then BugSplat would group
return res; // all LL_ERRS() crashes as the fault of this one function, instead of
// identifying the specific LL_ERRS() source line. So instead, do the
// assignment in a lambda in the caller's source. We just provide the
// nullptr target.
callback(nullptr);
} }

View File

@ -82,9 +82,11 @@ const int LL_ERR_NOERR = 0;
#ifdef SHOW_ASSERT #ifdef SHOW_ASSERT
#define llassert(func) llassert_always_msg(func, #func) #define llassert(func) llassert_always_msg(func, #func)
#define llassert_msg(func, msg) llassert_always_msg(func, msg)
#define llverify(func) llassert_always_msg(func, #func) #define llverify(func) llassert_always_msg(func, #func)
#else #else
#define llassert(func) #define llassert(func)
#define llassert_msg(func, msg)
#define llverify(func) do {if (func) {}} while(0) #define llverify(func) do {if (func) {}} while(0)
#endif #endif
@ -383,11 +385,9 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG;
#define LL_NEWLINE '\n' #define LL_NEWLINE '\n'
// Use this only in LL_ERRS or in a place that LL_ERRS may not be used // Use this only in LL_ERRS or in a place that LL_ERRS may not be used
#define LLERROR_CRASH \ #define LLERROR_CRASH \
{ \ { \
int* make_me_crash = NULL;\ crashdriver([](int* ptr){ *ptr = 0; exit(*ptr); }); \
*make_me_crash = 0; \
exit(*make_me_crash); \
} }
#define LL_ENDL \ #define LL_ENDL \
@ -464,7 +464,32 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG;
LLError::CallSite& _site(_sites[which]); \ LLError::CallSite& _site(_sites[which]); \
lllog_test_() lllog_test_()
// Check at run-time whether logging is enabled, without generating output /*
// Check at run-time whether logging is enabled, without generating output.
Resist the temptation to add a function like this because it incurs the
expense of locking and map-searching every time control reaches it.
bool debugLoggingEnabled(const std::string& tag); bool debugLoggingEnabled(const std::string& tag);
Instead of:
if debugLoggingEnabled("SomeTag")
{
// ... presumably expensive operation ...
LL_DEBUGS("SomeTag") << ... << LL_ENDL;
}
Use this:
LL_DEBUGS("SomeTag");
// ... presumably expensive operation ...
LL_CONT << ...;
LL_ENDL;
LL_DEBUGS("SomeTag") performs the locking and map-searching ONCE, then caches
the result in a static variable.
*/
// used by LLERROR_CRASH
void crashdriver(void (*)(int*));
#endif // LL_LLERROR_H #endif // LL_LLERROR_H

View File

@ -29,11 +29,6 @@
#include "llframetimer.h" #include "llframetimer.h"
// We don't bother building a stand alone lib; we just need to include the one source file for Tracy support
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY || LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
#include "TracyClient.cpp"
#endif // LL_PROFILER_CONFIGURATION
// Static members // Static members
//LLTimer LLFrameTimer::sInternalTimer; //LLTimer LLFrameTimer::sInternalTimer;
U64 LLFrameTimer::sStartTotalTime = totalTime(); U64 LLFrameTimer::sStartTotalTime = totalTime();

View File

@ -104,22 +104,26 @@ public:
return LockStatic()->mMap.size(); return LockStatic()->mMap.size();
} }
// snapshot of std::pair<const KEY, std::shared_ptr<T>> pairs // snapshot of std::pair<const KEY, std::shared_ptr<SUBCLASS>> pairs, for
class snapshot // some SUBCLASS derived from T
template <typename SUBCLASS>
class snapshot_of
{ {
// It's very important that what we store in this snapshot are // It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any // weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot. // instance has been deleted during the lifespan of a snapshot.
typedef std::vector<std::pair<const KEY, weak_t>> VectorType; typedef std::vector<std::pair<const KEY, weak_t>> VectorType;
// Dereferencing our iterator produces a std::shared_ptr for each // Dereferencing the iterator we publish produces a
// instance that still exists. Since we store weak_ptrs, that involves // std::shared_ptr<SUBCLASS> for each instance that still exists.
// two chained transformations: // Since we store weak_ptr<T>, that involves two chained
// transformations:
// - a transform_iterator to lock the weak_ptr and return a shared_ptr // - a transform_iterator to lock the weak_ptr and return a shared_ptr
// - a filter_iterator to skip any shared_ptr that has become invalid. // - a filter_iterator to skip any shared_ptr<T> that has become
// invalid or references any T instance that isn't SUBCLASS.
// It is very important that we filter lazily, that is, during // It is very important that we filter lazily, that is, during
// traversal. Any one of our stored weak_ptrs might expire during // traversal. Any one of our stored weak_ptrs might expire during
// traversal. // traversal.
typedef std::pair<const KEY, ptr_t> strong_pair; typedef std::pair<const KEY, std::shared_ptr<SUBCLASS>> strong_pair;
// Note for future reference: nat has not yet had any luck (up to // Note for future reference: nat has not yet had any luck (up to
// Boost 1.67) trying to use boost::transform_iterator with a hand- // Boost 1.67) trying to use boost::transform_iterator with a hand-
// coded functor, only with actual functions. In my experience, an // coded functor, only with actual functions. In my experience, an
@ -127,7 +131,7 @@ public:
// result_type typedef. But this works. // result_type typedef. But this works.
static strong_pair strengthen(typename VectorType::value_type& pair) static strong_pair strengthen(typename VectorType::value_type& pair)
{ {
return { pair.first, pair.second.lock() }; return { pair.first, std::dynamic_pointer_cast<SUBCLASS>(pair.second.lock()) };
} }
static bool dead_skipper(const strong_pair& pair) static bool dead_skipper(const strong_pair& pair)
{ {
@ -135,7 +139,7 @@ public:
} }
public: public:
snapshot(): snapshot_of():
// populate our vector with a snapshot of (locked!) InstanceMap // populate our vector with a snapshot of (locked!) InstanceMap
// note, this assigns pair<KEY, shared_ptr> to pair<KEY, weak_ptr> // note, this assigns pair<KEY, shared_ptr> to pair<KEY, weak_ptr>
mData(mLock->mMap.begin(), mLock->mMap.end()) mData(mLock->mMap.begin(), mLock->mMap.end())
@ -184,44 +188,51 @@ public:
#endif // LL_WINDOWS #endif // LL_WINDOWS
VectorType mData; VectorType mData;
}; };
using snapshot = snapshot_of<T>;
// iterate over this for references to each instance // iterate over this for references to each SUBCLASS instance
class instance_snapshot: public snapshot template <typename SUBCLASS>
class instance_snapshot_of: public snapshot_of<SUBCLASS>
{ {
private: private:
static T& instance_getter(typename snapshot::iterator::reference pair) using super = snapshot_of<SUBCLASS>;
static T& instance_getter(typename super::iterator::reference pair)
{ {
return *pair.second; return *pair.second;
} }
public: public:
typedef boost::transform_iterator<decltype(instance_getter)*, typedef boost::transform_iterator<decltype(instance_getter)*,
typename snapshot::iterator> iterator; typename super::iterator> iterator;
iterator begin() { return iterator(snapshot::begin(), instance_getter); } iterator begin() { return iterator(super::begin(), instance_getter); }
iterator end() { return iterator(snapshot::end(), instance_getter); } iterator end() { return iterator(super::end(), instance_getter); }
void deleteAll() void deleteAll()
{ {
for (auto it(snapshot::begin()), end(snapshot::end()); it != end; ++it) for (auto it(super::begin()), end(super::end()); it != end; ++it)
{ {
delete it->second.get(); delete it->second.get();
} }
} }
}; };
using instance_snapshot = instance_snapshot_of<T>;
// iterate over this for each key // iterate over this for each key
class key_snapshot: public snapshot template <typename SUBCLASS>
class key_snapshot_of: public snapshot_of<SUBCLASS>
{ {
private: private:
static KEY key_getter(typename snapshot::iterator::reference pair) using super = snapshot_of<SUBCLASS>;
static KEY key_getter(typename super::iterator::reference pair)
{ {
return pair.first; return pair.first;
} }
public: public:
typedef boost::transform_iterator<decltype(key_getter)*, typedef boost::transform_iterator<decltype(key_getter)*,
typename snapshot::iterator> iterator; typename super::iterator> iterator;
iterator begin() { return iterator(snapshot::begin(), key_getter); } iterator begin() { return iterator(super::begin(), key_getter); }
iterator end() { return iterator(snapshot::end(), key_getter); } iterator end() { return iterator(super::end(), key_getter); }
}; };
using key_snapshot = key_snapshot_of<T>;
static ptr_t getInstance(const KEY& k) static ptr_t getInstance(const KEY& k)
{ {
@ -368,22 +379,25 @@ public:
return LockStatic()->mSet.size(); return LockStatic()->mSet.size();
} }
// snapshot of std::shared_ptr<T> pointers // snapshot of std::shared_ptr<SUBCLASS> pointers
class snapshot template <typename SUBCLASS>
class snapshot_of
{ {
// It's very important that what we store in this snapshot are // It's very important that what we store in this snapshot are
// weak_ptrs, NOT shared_ptrs. That's how we discover whether any // weak_ptrs, NOT shared_ptrs. That's how we discover whether any
// instance has been deleted during the lifespan of a snapshot. // instance has been deleted during the lifespan of a snapshot.
typedef std::vector<weak_t> VectorType; typedef std::vector<weak_t> VectorType;
// Dereferencing our iterator produces a std::shared_ptr for each // Dereferencing the iterator we publish produces a
// instance that still exists. Since we store weak_ptrs, that involves // std::shared_ptr<SUBCLASS> for each instance that still exists.
// two chained transformations: // Since we store weak_ptrs, that involves two chained
// transformations:
// - a transform_iterator to lock the weak_ptr and return a shared_ptr // - a transform_iterator to lock the weak_ptr and return a shared_ptr
// - a filter_iterator to skip any shared_ptr that has become invalid. // - a filter_iterator to skip any shared_ptr that has become invalid
typedef std::shared_ptr<T> strong_ptr; // or references any T instance that isn't SUBCLASS.
typedef std::shared_ptr<SUBCLASS> strong_ptr;
static strong_ptr strengthen(typename VectorType::value_type& ptr) static strong_ptr strengthen(typename VectorType::value_type& ptr)
{ {
return ptr.lock(); return std::dynamic_pointer_cast<SUBCLASS>(ptr.lock());
} }
static bool dead_skipper(const strong_ptr& ptr) static bool dead_skipper(const strong_ptr& ptr)
{ {
@ -391,7 +405,7 @@ public:
} }
public: public:
snapshot(): snapshot_of():
// populate our vector with a snapshot of (locked!) InstanceSet // populate our vector with a snapshot of (locked!) InstanceSet
// note, this assigns stored shared_ptrs to weak_ptrs for snapshot // note, this assigns stored shared_ptrs to weak_ptrs for snapshot
mData(mLock->mSet.begin(), mLock->mSet.end()) mData(mLock->mSet.begin(), mLock->mSet.end())
@ -437,22 +451,33 @@ public:
#endif // LL_WINDOWS #endif // LL_WINDOWS
VectorType mData; VectorType mData;
}; };
using snapshot = snapshot_of<T>;
// iterate over this for references to each instance // iterate over this for references to each instance
struct instance_snapshot: public snapshot template <typename SUBCLASS>
class instance_snapshot_of: public snapshot_of<SUBCLASS>
{ {
typedef boost::indirect_iterator<typename snapshot::iterator> iterator; private:
iterator begin() { return iterator(snapshot::begin()); } using super = snapshot_of<SUBCLASS>;
iterator end() { return iterator(snapshot::end()); }
public:
typedef boost::indirect_iterator<typename super::iterator> iterator;
iterator begin() { return iterator(super::begin()); }
iterator end() { return iterator(super::end()); }
void deleteAll() void deleteAll()
{ {
for (auto it(snapshot::begin()), end(snapshot::end()); it != end; ++it) for (auto it(super::begin()), end(super::end()); it != end; ++it)
{ {
delete it->get(); delete it->get();
} }
} }
}; };
using instance_snapshot = instance_snapshot_of<T>;
// key_snapshot_of isn't really meaningful, but define it anyway to avoid
// requiring two different LLInstanceTrackerSubclass implementations.
template <typename SUBCLASS>
using key_snapshot_of = instance_snapshot_of<SUBCLASS>;
protected: protected:
LLInstanceTracker() LLInstanceTracker()

View File

@ -0,0 +1,98 @@
/**
* @file llinstancetrackersubclass.h
* @author Nat Goodspeed
* @date 2022-12-09
* @brief Intermediate class to get subclass-specific types from
* LLInstanceTracker instance-retrieval methods.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_LLINSTANCETRACKERSUBCLASS_H)
#define LL_LLINSTANCETRACKERSUBCLASS_H
#include <memory> // std::shared_ptr, std::weak_ptr
/**
* Derive your subclass S of a subclass T of LLInstanceTracker<T> from
* LLInstanceTrackerSubclass<S, T> to perform appropriate downcasting and
* filtering for LLInstanceTracker access methods.
*
* LLInstanceTracker<T> uses CRTP, so that getWeak(), getInstance(), snapshot
* and instance_snapshot return pointers and references to T. The trouble is
* that subclasses T0 and T1 derived from T also get pointers and references
* to their base class T, requiring explicit downcasting. Moreover,
* T0::getInstance() shouldn't find an instance of any T subclass other than
* T0. Nor should T0::snapshot.
*
* @code
* class Tracked: public LLInstanceTracker<Tracked, std::string>
* {
* private:
* using super = LLInstanceTracker<Tracked, std::string>;
* public:
* Tracked(const std::string& name): super(name) {}
* // All references to Tracked::ptr_t, Tracked::getInstance() etc.
* // appropriately use Tracked.
* // ...
* };
*
* // But now we derive SubTracked from Tracked. We need SubTracked::ptr_t,
* // SubTracked::getInstance() etc. to use SubTracked, not Tracked.
* // This LLInstanceTrackerSubclass specialization is itself derived from
* // Tracked.
* class SubTracked: public LLInstanceTrackerSubclass<SubTracked, Tracked>
* {
* private:
* using super = LLInstanceTrackerSubclass<SubTracked, Tracked>;
* public:
* // LLInstanceTrackerSubclass's constructor forwards to Tracked's.
* SubTracked(const std::string& name): super(name) {}
* // SubTracked::getInstance() returns std::shared_ptr<SubTracked>, etc.
* // ...
* @endcode
*/
template <typename SUBCLASS, typename T>
class LLInstanceTrackerSubclass: public T
{
public:
using ptr_t = std::shared_ptr<SUBCLASS>;
using weak_t = std::weak_ptr<SUBCLASS>;
// forward any constructor call to the corresponding T ctor
template <typename... ARGS>
LLInstanceTrackerSubclass(ARGS&&... args):
T(std::forward<ARGS>(args)...)
{}
weak_t getWeak()
{
// call base-class getWeak(), try to lock, downcast to SUBCLASS
return std::dynamic_pointer_cast<SUBCLASS>(T::getWeak().lock());
}
template <typename KEY>
static ptr_t getInstance(const KEY& k)
{
return std::dynamic_pointer_cast<SUBCLASS>(T::getInstance(k));
}
using snapshot = typename T::template snapshot_of<SUBCLASS>;
using instance_snapshot = typename T::template instance_snapshot_of<SUBCLASS>;
using key_snapshot = typename T::template key_snapshot_of<SUBCLASS>;
static size_t instanceCount()
{
// T::instanceCount() lies because our snapshot, et al., won't
// necessarily return all the T instances -- only those that are also
// SUBCLASS instances. Count those.
size_t count = 0;
for (const auto& pair : snapshot())
++count;
return count;
}
};
#endif /* ! defined(LL_LLINSTANCETRACKERSUBCLASS_H) */

View File

@ -389,6 +389,17 @@ public:
// Read all remaining bytes and log. // Read all remaining bytes and log.
LL_INFOS("LLLeap") << mDesc << ": " << rest << LL_ENDL; LL_INFOS("LLLeap") << mDesc << ": " << rest << LL_ENDL;
} }
/*--------------------------- diagnostic ---------------------------*/
else if (data["eof"].asBoolean())
{
LL_DEBUGS("LLLeap") << mDesc << " ended, no partial line" << LL_ENDL;
}
else
{
LL_DEBUGS("LLLeap") << mDesc << " (still running, " << childerr.size()
<< " bytes pending)" << LL_ENDL;
}
/*------------------------- end diagnostic -------------------------*/
return false; return false;
} }

View File

@ -35,6 +35,7 @@
# include <sys/types.h> # include <sys/types.h>
# include <mach/task.h> # include <mach/task.h>
# include <mach/mach_init.h> # include <mach/mach_init.h>
#include <mach/mach_host.h>
#elif LL_LINUX #elif LL_LINUX
# include <unistd.h> # include <unistd.h>
#endif #endif
@ -109,6 +110,50 @@ void LLMemory::updateMemoryInfo()
{ {
sAvailPhysicalMemInKB = U32Kilobytes(0); sAvailPhysicalMemInKB = U32Kilobytes(0);
} }
#elif defined(LL_DARWIN)
task_vm_info info;
mach_msg_type_number_t infoCount = TASK_VM_INFO_COUNT;
// MACH_TASK_BASIC_INFO reports the same resident_size, but does not tell us the reusable bytes or phys_footprint.
if (task_info(mach_task_self(), TASK_VM_INFO, reinterpret_cast<task_info_t>(&info), &infoCount) == KERN_SUCCESS)
{
// Our Windows definition of PagefileUsage is documented by Microsoft as "the total amount of
// memory that the memory manager has committed for a running process", which is rss.
sAllocatedPageSizeInKB = U32Bytes(info.resident_size);
// Activity Monitor => Inspect Process => Real Memory Size appears to report resident_size
// Activity monitor => main window memory column appears to report phys_footprint, which spot checks as at least 30% less.
// I think that is because of compression, which isn't going to give us a consistent measurement. We want uncompressed totals.
//
// In between is resident_size - reusable. This is what Chrome source code uses, with source comments saying it is 'the "Real Memory" value
// reported for the app by the Memory Monitor in Instruments.' It is still about 8% bigger than phys_footprint.
//
// (On Windows, we use WorkingSetSize.)
sAllocatedMemInKB = U32Bytes(info.resident_size - info.reusable);
}
else
{
LL_WARNS() << "task_info failed" << LL_ENDL;
}
// Total installed and available physical memory are properties of the host, not just our process.
vm_statistics64_data_t vmstat;
mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
mach_port_t host = mach_host_self();
vm_size_t page_size;
host_page_size(host, &page_size);
kern_return_t result = host_statistics64(host, HOST_VM_INFO64, reinterpret_cast<host_info_t>(&vmstat), &count);
if (result == KERN_SUCCESS) {
// This is what Chrome reports as 'the "Physical Memory Free" value reported by the Memory Monitor in Instruments.'
// Note though that inactive pages are not included here and not yet free, but could become so under memory pressure.
sAvailPhysicalMemInKB = U32Bytes(vmstat.free_count * page_size);
sMaxPhysicalMemInKB = LLMemoryInfo::getHardwareMemSize();
}
else
{
LL_WARNS() << "task_info failed" << LL_ENDL;
}
#else #else
//not valid for other systems for now. //not valid for other systems for now.
sAllocatedMemInKB = U64Bytes(LLMemory::getCurrentRSS()); sAllocatedMemInKB = U64Bytes(LLMemory::getCurrentRSS());

View File

@ -36,7 +36,8 @@
//============================================================================ //============================================================================
#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO) //#define MUTEX_DEBUG (LL_DEBUG || LL_RELEASE_WITH_DEBUG_INFO)
#define MUTEX_DEBUG 0 //disable mutex debugging as it's interfering with profiles
#if MUTEX_DEBUG #if MUTEX_DEBUG
#include <map> #include <map>
@ -61,7 +62,7 @@ protected:
mutable LLThread::id_t mLockingThread; mutable LLThread::id_t mLockingThread;
#if MUTEX_DEBUG #if MUTEX_DEBUG
std::map<LLThread::id_t, BOOL> mIsLocked; std::unordered_map<LLThread::id_t, BOOL> mIsLocked;
#endif #endif
}; };

View File

@ -340,4 +340,28 @@ private:
bool mStayUnique; bool mStayUnique;
}; };
// boost hash adapter
template <class Type>
struct boost::hash<LLPointer<Type>>
{
typedef LLPointer<Type> argument_type;
typedef std::size_t result_type;
result_type operator()(argument_type const& s) const
{
return (std::size_t) s.get();
}
};
// Adapt boost hash to std hash
namespace std
{
template<class Type> struct hash<LLPointer<Type>>
{
std::size_t operator()(LLPointer<Type> const& s) const noexcept
{
return boost::hash<LLPointer<Type>>()(s);
}
};
}
#endif #endif

View File

@ -746,7 +746,7 @@ private:
__cpuid(0x1, eax, ebx, ecx, edx); __cpuid(0x1, eax, ebx, ecx, edx);
if(feature_infos[0] != (S32)edx) if(feature_infos[0] != (S32)edx)
{ {
LL_ERRS() << "machdep.cpu.feature_bits doesn't match expected cpuid result!" << LL_ENDL; LL_WARNS() << "machdep.cpu.feature_bits doesn't match expected cpuid result!" << LL_ENDL;
} }
#endif // LL_RELEASE_FOR_DOWNLOAD #endif // LL_RELEASE_FOR_DOWNLOAD

View File

@ -86,8 +86,12 @@ extern thread_local bool gProfilerEnabled;
#define TRACY_ONLY_IPV4 1 #define TRACY_ONLY_IPV4 1
#include "Tracy.hpp" #include "Tracy.hpp"
// Mutually exclusive with detailed memory tracing // Enable OpenGL profiling
#define LL_PROFILER_ENABLE_TRACY_OPENGL 0 #define LL_PROFILER_ENABLE_TRACY_OPENGL 0
// Enable RenderDoc labeling
#define LL_PROFILER_ENABLE_RENDER_DOC 0
#endif #endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY #if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY
@ -104,14 +108,13 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow #define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
#define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan #define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
#define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red #define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
#define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
#define LL_PROFILE_FREE(ptr) TracyFree(ptr)
#endif #endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_FAST_TIMER #if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_FAST_TIMER
#define LL_PROFILER_FRAME_END #define LL_PROFILER_FRAME_END
#define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name) #define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name)
#define LL_RECORD_BLOCK_TIME(name) const LLTrace::BlockTimer& LL_GLUE_TOKENS(block_time_recorder, __LINE__)(LLTrace::timeThisBlock(name)); (void)LL_GLUE_TOKENS(block_time_recorder, __LINE__); #define LL_RECORD_BLOCK_TIME(name) const LLTrace::BlockTimer& LL_GLUE_TOKENS(block_time_recorder, __LINE__)(LLTrace::timeThisBlock(name)); (void)LL_GLUE_TOKENS(block_time_recorder, __LINE__);
#define LL_PROFILE_ZONE_NAMED(name) // LL_PROFILE_ZONE_NAMED is a no-op when Tracy is disabled #define LL_PROFILE_ZONE_NAMED(name) // LL_PROFILE_ZONE_NAMED is a no-op when Tracy is disabled
#define LL_PROFILE_ZONE_NAMED_COLOR(name,color) // LL_PROFILE_ZONE_NAMED_COLOR is a no-op when Tracy is disabled
#define LL_PROFILE_ZONE_SCOPED // LL_PROFILE_ZONE_SCOPED is a no-op when Tracy is disabled #define LL_PROFILE_ZONE_SCOPED // LL_PROFILE_ZONE_SCOPED is a no-op when Tracy is disabled
#define LL_PROFILE_ZONE_COLOR(name,color) // LL_RECORD_BLOCK_TIME(name) #define LL_PROFILE_ZONE_COLOR(name,color) // LL_RECORD_BLOCK_TIME(name)
@ -121,8 +124,6 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) (void)(name); // Not supported #define LL_PROFILE_ZONE_ERR(name) (void)(name); // Not supported
#define LL_PROFILE_ZONE_INFO(name) (void)(name); // Not supported #define LL_PROFILE_ZONE_INFO(name) (void)(name); // Not supported
#define LL_PROFILE_ZONE_WARN(name) (void)(name); // Not supported #define LL_PROFILE_ZONE_WARN(name) (void)(name); // Not supported
#define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
#define LL_PROFILE_FREE(ptr) (void)(ptr);
#endif #endif
#if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER #if LL_PROFILER_CONFIGURATION == LL_PROFILER_CONFIG_TRACY_FAST_TIMER
#define LL_PROFILER_FRAME_END FrameMark #define LL_PROFILER_FRAME_END FrameMark
@ -138,14 +139,45 @@ extern thread_local bool gProfilerEnabled;
#define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow #define LL_PROFILE_ZONE_ERR(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0XFF0000 ) // RGB yellow
#define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan #define LL_PROFILE_ZONE_INFO(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0X00FFFF ) // RGB cyan
#define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red #define LL_PROFILE_ZONE_WARN(name) LL_PROFILE_ZONE_NAMED_COLOR( name, 0x0FFFF00 ) // RGB red
#define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
#define LL_PROFILE_FREE(ptr) TracyFree(ptr)
#endif #endif
#else #else
#define LL_PROFILER_FRAME_END #define LL_PROFILER_FRAME_END
#define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name) #define LL_PROFILER_SET_THREAD_NAME( name ) (void)(name)
#endif // LL_PROFILER #endif // LL_PROFILER
#if LL_PROFILER_ENABLE_TRACY_OPENGL
#define LL_PROFILE_GPU_ZONE(name) TracyGpuZone(name)
#define LL_PROFILE_GPU_ZONEC(name,color) TracyGpuZoneC(name,color)
#define LL_PROFILER_GPU_COLLECT TracyGpuCollect
#define LL_PROFILER_GPU_CONTEXT TracyGpuContext
// disable memory tracking (incompatible with GPU tracing
#define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
#define LL_PROFILE_FREE(ptr) (void)(ptr);
#else
#define LL_PROFILE_GPU_ZONE(name) (void)name;
#define LL_PROFILE_GPU_ZONEC(name,color) (void)name;(void)color;
#define LL_PROFILER_GPU_COLLECT
#define LL_PROFILER_GPU_CONTEXT
#define LL_LABEL_OBJECT_GL(type, name, length, label)
#if LL_PROFILER_CONFIGURATION > 1
#define LL_PROFILE_ALLOC(ptr, size) TracyAlloc(ptr, size)
#define LL_PROFILE_FREE(ptr) TracyFree(ptr)
#else
#define LL_PROFILE_ALLOC(ptr, size) (void)(ptr); (void)(size);
#define LL_PROFILE_FREE(ptr) (void)(ptr);
#endif
#endif
#if LL_PROFILER_ENABLE_RENDER_DOC
#define LL_LABEL_OBJECT_GL(type, name, length, label) glObjectLabel(type, name, length, label)
#else
#define LL_LABEL_OBJECT_GL(type, name, length, label)
#endif
#include "llprofilercategories.h" #include "llprofilercategories.h"
#endif // LL_PROFILER_H #endif // LL_PROFILER_H

View File

@ -52,7 +52,7 @@
#define LL_PROFILER_CATEGORY_ENABLE_LOGGING 1 #define LL_PROFILER_CATEGORY_ENABLE_LOGGING 1
#define LL_PROFILER_CATEGORY_ENABLE_MATERIAL 1 #define LL_PROFILER_CATEGORY_ENABLE_MATERIAL 1
#define LL_PROFILER_CATEGORY_ENABLE_MEDIA 1 #define LL_PROFILER_CATEGORY_ENABLE_MEDIA 1
#define LL_PROFILER_CATEGORY_ENABLE_MEMORY 1 #define LL_PROFILER_CATEGORY_ENABLE_MEMORY 0
#define LL_PROFILER_CATEGORY_ENABLE_NETWORK 1 #define LL_PROFILER_CATEGORY_ENABLE_NETWORK 1
#define LL_PROFILER_CATEGORY_ENABLE_OCTREE 1 #define LL_PROFILER_CATEGORY_ENABLE_OCTREE 1
#define LL_PROFILER_CATEGORY_ENABLE_PIPELINE 1 #define LL_PROFILER_CATEGORY_ENABLE_PIPELINE 1

View File

@ -26,20 +26,26 @@
#include "linden_common.h" #include "linden_common.h"
#include "llqueuedthread.h" #include "llqueuedthread.h"
#include <chrono>
#include "llstl.h" #include "llstl.h"
#include "lltimer.h" // ms_sleep() #include "lltimer.h" // ms_sleep()
#include "lltracethreadrecorder.h" #include "llmutex.h"
//============================================================================ //============================================================================
// MAIN THREAD // MAIN THREAD
LLQueuedThread::LLQueuedThread(const std::string& name, bool threaded, bool should_pause) : LLQueuedThread::LLQueuedThread(const std::string& name, bool threaded, bool should_pause) :
LLThread(name), LLThread(name),
mThreaded(threaded), mIdleThread(TRUE),
mIdleThread(TRUE), mNextHandle(0),
mNextHandle(0), mStarted(FALSE),
mStarted(FALSE) mThreaded(threaded),
mRequestQueue(name, 1024 * 1024)
{ {
llassert(threaded); // not threaded implementation is deprecated
mMainQueue = LL::WorkQueue::getInstance("mainloop");
if (mThreaded) if (mThreaded)
{ {
if(should_pause) if(should_pause)
@ -69,6 +75,11 @@ void LLQueuedThread::shutdown()
unpause(); // MAIN THREAD unpause(); // MAIN THREAD
if (mThreaded) if (mThreaded)
{ {
if (mRequestQueue.size() == 0)
{
mRequestQueue.close();
}
S32 timeout = 100; S32 timeout = 100;
for ( ; timeout>0; timeout--) for ( ; timeout>0; timeout--)
{ {
@ -104,6 +115,8 @@ void LLQueuedThread::shutdown()
{ {
LL_WARNS() << "~LLQueuedThread() called with active requests: " << active_count << LL_ENDL; LL_WARNS() << "~LLQueuedThread() called with active requests: " << active_count << LL_ENDL;
} }
mRequestQueue.close();
} }
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
@ -112,6 +125,7 @@ void LLQueuedThread::shutdown()
// virtual // virtual
size_t LLQueuedThread::update(F32 max_time_ms) size_t LLQueuedThread::update(F32 max_time_ms)
{ {
LL_PROFILE_ZONE_SCOPED;
if (!mStarted) if (!mStarted)
{ {
if (!mThreaded) if (!mThreaded)
@ -125,29 +139,34 @@ size_t LLQueuedThread::update(F32 max_time_ms)
size_t LLQueuedThread::updateQueue(F32 max_time_ms) size_t LLQueuedThread::updateQueue(F32 max_time_ms)
{ {
F64 max_time = (F64)max_time_ms * .001; LL_PROFILE_ZONE_SCOPED;
LLTimer timer;
size_t pending = 1;
// Frame Update // Frame Update
if (mThreaded) if (mThreaded)
{ {
pending = getPending(); // schedule a call to threadedUpdate for every call to updateQueue
if(pending > 0) if (!isQuitting())
{
mRequestQueue.post([=]()
{
LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qt - update");
mIdleThread = FALSE;
threadedUpdate();
mIdleThread = TRUE;
}
);
}
if(getPending() > 0)
{ {
unpause(); unpause();
} }
} }
else else
{ {
while (pending > 0) mRequestQueue.runFor(std::chrono::microseconds((int) (max_time_ms*1000.f)));
{ threadedUpdate();
pending = processNextRequest();
if (max_time && timer.getElapsedTimeF64() > max_time)
break;
}
} }
return pending; return getPending();
} }
void LLQueuedThread::incQueue() void LLQueuedThread::incQueue()
@ -166,11 +185,7 @@ void LLQueuedThread::incQueue()
// May be called from any thread // May be called from any thread
size_t LLQueuedThread::getPending() size_t LLQueuedThread::getPending()
{ {
size_t res; return mRequestQueue.size();
lockData();
res = mRequestQueue.size();
unlockData();
return res;
} }
// MAIN thread // MAIN thread
@ -195,35 +210,28 @@ void LLQueuedThread::waitOnPending()
// MAIN thread // MAIN thread
void LLQueuedThread::printQueueStats() void LLQueuedThread::printQueueStats()
{ {
lockData(); U32 size = mRequestQueue.size();
if (!mRequestQueue.empty()) if (size > 0)
{ {
QueuedRequest *req = *mRequestQueue.begin(); LL_INFOS() << llformat("Pending Requests:%d ", mRequestQueue.size()) << LL_ENDL;
LL_INFOS() << llformat("Pending Requests:%d Current status:%d", mRequestQueue.size(), req->getStatus()) << LL_ENDL;
} }
else else
{ {
LL_INFOS() << "Queued Thread Idle" << LL_ENDL; LL_INFOS() << "Queued Thread Idle" << LL_ENDL;
} }
unlockData();
} }
// MAIN thread // MAIN thread
LLQueuedThread::handle_t LLQueuedThread::generateHandle() LLQueuedThread::handle_t LLQueuedThread::generateHandle()
{ {
lockData(); U32 res = ++mNextHandle;
while ((mNextHandle == nullHandle()) || (mRequestHash.find(mNextHandle)))
{
mNextHandle++;
}
const LLQueuedThread::handle_t res = mNextHandle++;
unlockData();
return res; return res;
} }
// MAIN thread // MAIN thread
bool LLQueuedThread::addRequest(QueuedRequest* req) bool LLQueuedThread::addRequest(QueuedRequest* req)
{ {
LL_PROFILE_ZONE_SCOPED;
if (mStatus == QUITTING) if (mStatus == QUITTING)
{ {
return false; return false;
@ -231,14 +239,14 @@ bool LLQueuedThread::addRequest(QueuedRequest* req)
lockData(); lockData();
req->setStatus(STATUS_QUEUED); req->setStatus(STATUS_QUEUED);
mRequestQueue.insert(req); mRequestHash.insert(req);
mRequestHash.insert(req);
#if _DEBUG #if _DEBUG
// LL_INFOS() << llformat("LLQueuedThread::Added req [%08d]",handle) << LL_ENDL; // LL_INFOS() << llformat("LLQueuedThread::Added req [%08d]",handle) << LL_ENDL;
#endif #endif
unlockData(); unlockData();
incQueue(); llassert(!mDataLock->isSelfLocked());
mRequestQueue.post([this, req]() { processRequest(req); });
return true; return true;
} }
@ -246,6 +254,7 @@ bool LLQueuedThread::addRequest(QueuedRequest* req)
// MAIN thread // MAIN thread
bool LLQueuedThread::waitForResult(LLQueuedThread::handle_t handle, bool auto_complete) bool LLQueuedThread::waitForResult(LLQueuedThread::handle_t handle, bool auto_complete)
{ {
LL_PROFILE_ZONE_SCOPED;
llassert (handle != nullHandle()); llassert (handle != nullHandle());
bool res = false; bool res = false;
bool waspaused = isPaused(); bool waspaused = isPaused();
@ -312,6 +321,7 @@ LLQueuedThread::status_t LLQueuedThread::getRequestStatus(handle_t handle)
void LLQueuedThread::abortRequest(handle_t handle, bool autocomplete) void LLQueuedThread::abortRequest(handle_t handle, bool autocomplete)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
lockData(); lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle); QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
if (req) if (req)
@ -333,30 +343,9 @@ void LLQueuedThread::setFlags(handle_t handle, U32 flags)
unlockData(); unlockData();
} }
void LLQueuedThread::setPriority(handle_t handle, U32 priority)
{
lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
if (req)
{
if(req->getStatus() == STATUS_INPROGRESS)
{
// not in list
req->setPriority(priority);
}
else if(req->getStatus() == STATUS_QUEUED)
{
// remove from list then re-insert
llverify(mRequestQueue.erase(req) == 1);
req->setPriority(priority);
mRequestQueue.insert(req);
}
}
unlockData();
}
bool LLQueuedThread::completeRequest(handle_t handle) bool LLQueuedThread::completeRequest(handle_t handle)
{ {
LL_PROFILE_ZONE_SCOPED;
bool res = false; bool res = false;
lockData(); lockData();
QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle); QueuedRequest* req = (QueuedRequest*)mRequestHash.find(handle);
@ -399,88 +388,120 @@ bool LLQueuedThread::check()
//============================================================================ //============================================================================
// Runs on its OWN thread // Runs on its OWN thread
size_t LLQueuedThread::processNextRequest() void LLQueuedThread::processRequest(LLQueuedThread::QueuedRequest* req)
{ {
QueuedRequest *req; LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
mIdleThread = FALSE;
//threadedUpdate();
// Get next request from pool // Get next request from pool
lockData(); lockData();
while(1) if ((req->getFlags() & FLAG_ABORT) || (mStatus == QUITTING))
{ {
req = NULL; LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - abort");
if (mRequestQueue.empty()) req->setStatus(STATUS_ABORTED);
req->finishRequest(false);
if (req->getFlags() & FLAG_AUTO_COMPLETE)
{ {
break; mRequestHash.erase(req);
} req->deleteRequest();
req = *mRequestQueue.begin();
mRequestQueue.erase(mRequestQueue.begin());
if ((req->getFlags() & FLAG_ABORT) || (mStatus == QUITTING))
{
req->setStatus(STATUS_ABORTED);
req->finishRequest(false);
if (req->getFlags() & FLAG_AUTO_COMPLETE)
{
mRequestHash.erase(req);
req->deleteRequest();
// check(); // check();
}
continue;
} }
llassert_always(req->getStatus() == STATUS_QUEUED); unlockData();
break;
} }
U32 start_priority = 0 ; else
if (req) {
{ llassert_always(req->getStatus() == STATUS_QUEUED);
req->setStatus(STATUS_INPROGRESS);
start_priority = req->getPriority();
}
unlockData();
// This is the only place we will call req->setStatus() after if (req)
// it has initially been seet to STATUS_QUEUED, so it is {
// safe to access req. req->setStatus(STATUS_INPROGRESS);
if (req) }
{ unlockData();
// process request
bool complete = req->processRequest();
if (complete) // This is the only place we will call req->setStatus() after
{ // it has initially been seet to STATUS_QUEUED, so it is
lockData(); // safe to access req.
req->setStatus(STATUS_COMPLETE); if (req)
req->finishRequest(true); {
if (req->getFlags() & FLAG_AUTO_COMPLETE) // process request
{ bool complete = req->processRequest();
mRequestHash.erase(req);
req->deleteRequest();
// check();
}
unlockData();
}
else
{
lockData();
req->setStatus(STATUS_QUEUED);
mRequestQueue.insert(req);
unlockData();
if (mThreaded && start_priority < PRIORITY_NORMAL)
{
ms_sleep(1); // sleep the thread a little
}
}
LLTrace::get_thread_recorder()->pushToParent();
}
return getPending(); if (complete)
{
LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - complete");
lockData();
req->setStatus(STATUS_COMPLETE);
req->finishRequest(true);
if (req->getFlags() & FLAG_AUTO_COMPLETE)
{
mRequestHash.erase(req);
req->deleteRequest();
// check();
}
unlockData();
}
else
{
LL_PROFILE_ZONE_NAMED_CATEGORY_THREAD("qtpr - retry");
//put back on queue and try again in 0.1ms
lockData();
req->setStatus(STATUS_QUEUED);
unlockData();
llassert(!mDataLock->isSelfLocked());
#if 0
// try again on next frame
// NOTE: tried using "post" with a time in the future, but this
// would invariably cause this thread to wait for a long time (10+ ms)
// while work is pending
bool ret = LL::WorkQueue::postMaybe(
mMainQueue,
[=]()
{
LL_PROFILE_ZONE_NAMED("processRequest - retry");
mRequestQueue.post([=]()
{
LL_PROFILE_ZONE_NAMED("processRequest - retry"); // <-- not redundant, track retry on both queues
processRequest(req);
});
});
llassert(ret);
#else
using namespace std::chrono_literals;
auto retry_time = LL::WorkQueue::TimePoint::clock::now() + 16ms;
mRequestQueue.post([=]
{
LL_PROFILE_ZONE_NAMED("processRequest - retry");
if (LL::WorkQueue::TimePoint::clock::now() < retry_time)
{
auto sleep_time = std::chrono::duration_cast<std::chrono::milliseconds>(retry_time - LL::WorkQueue::TimePoint::clock::now());
if (sleep_time.count() > 0)
{
ms_sleep(sleep_time.count());
}
}
processRequest(req);
});
#endif
}
}
}
mIdleThread = TRUE;
} }
// virtual // virtual
bool LLQueuedThread::runCondition() bool LLQueuedThread::runCondition()
{ {
// mRunCondition must be locked here // mRunCondition must be locked here
if (mRequestQueue.empty() && mIdleThread) if (mRequestQueue.size() == 0 && mIdleThread)
return false; return false;
else else
return true; return true;
@ -494,18 +515,13 @@ void LLQueuedThread::run()
startThread(); startThread();
mStarted = TRUE; mStarted = TRUE;
while (1)
/*while (1)
{ {
LL_PROFILE_ZONE_SCOPED;
// this will block on the condition until runCondition() returns true, the thread is unpaused, or the thread leaves the RUNNING state. // this will block on the condition until runCondition() returns true, the thread is unpaused, or the thread leaves the RUNNING state.
checkPause(); checkPause();
if (isQuitting())
{
LLTrace::get_thread_recorder()->pushToParent();
endThread();
break;
}
mIdleThread = FALSE; mIdleThread = FALSE;
threadedUpdate(); threadedUpdate();
@ -514,12 +530,18 @@ void LLQueuedThread::run()
if (pending_work == 0) if (pending_work == 0)
{ {
//LL_PROFILE_ZONE_NAMED("LLQueuedThread - sleep");
mIdleThread = TRUE; mIdleThread = TRUE;
ms_sleep(1); //ms_sleep(1);
} }
//LLThread::yield(); // thread should yield after each request //LLThread::yield(); // thread should yield after each request
} }*/
mRequestQueue.runUntilClose();
endThread();
LL_INFOS() << "LLQueuedThread " << mName << " EXITING." << LL_ENDL; LL_INFOS() << "LLQueuedThread " << mName << " EXITING." << LL_ENDL;
} }
// virtual // virtual
@ -539,10 +561,9 @@ void LLQueuedThread::threadedUpdate()
//============================================================================ //============================================================================
LLQueuedThread::QueuedRequest::QueuedRequest(LLQueuedThread::handle_t handle, U32 priority, U32 flags) : LLQueuedThread::QueuedRequest::QueuedRequest(LLQueuedThread::handle_t handle, U32 flags) :
LLSimpleHashEntry<LLQueuedThread::handle_t>(handle), LLSimpleHashEntry<LLQueuedThread::handle_t>(handle),
mStatus(STATUS_UNKNOWN), mStatus(STATUS_UNKNOWN),
mPriority(priority),
mFlags(flags) mFlags(flags)
{ {
} }

View File

@ -36,6 +36,7 @@
#include "llthread.h" #include "llthread.h"
#include "llsimplehash.h" #include "llsimplehash.h"
#include "workqueue.h"
//============================================================================ //============================================================================
// Note: ~LLQueuedThread is O(N) N=# of queued threads, assumed to be small // Note: ~LLQueuedThread is O(N) N=# of queued threads, assumed to be small
@ -45,15 +46,6 @@ class LL_COMMON_API LLQueuedThread : public LLThread
{ {
//------------------------------------------------------------------------ //------------------------------------------------------------------------
public: public:
enum priority_t {
PRIORITY_IMMEDIATE = 0x7FFFFFFF,
PRIORITY_URGENT = 0x40000000,
PRIORITY_HIGH = 0x30000000,
PRIORITY_NORMAL = 0x20000000,
PRIORITY_LOW = 0x10000000,
PRIORITY_LOWBITS = 0x0FFFFFFF,
PRIORITY_HIGHBITS = 0x70000000
};
enum status_t { enum status_t {
STATUS_EXPIRED = -1, STATUS_EXPIRED = -1,
STATUS_UNKNOWN = 0, STATUS_UNKNOWN = 0,
@ -82,28 +74,17 @@ public:
virtual ~QueuedRequest(); // use deleteRequest() virtual ~QueuedRequest(); // use deleteRequest()
public: public:
QueuedRequest(handle_t handle, U32 priority, U32 flags = 0); QueuedRequest(handle_t handle, U32 flags = 0);
status_t getStatus() status_t getStatus()
{ {
return mStatus; return mStatus;
} }
U32 getPriority() const
{
return mPriority;
}
U32 getFlags() const U32 getFlags() const
{ {
return mFlags; return mFlags;
} }
bool higherPriority(const QueuedRequest& second) const
{
if ( mPriority == second.mPriority)
return mHashKey < second.mHashKey;
else
return mPriority > second.mPriority;
}
protected: protected:
status_t setStatus(status_t newstatus) status_t setStatus(status_t newstatus)
{ {
@ -121,28 +102,11 @@ public:
virtual void finishRequest(bool completed); // Always called from thread after request has completed or aborted virtual void finishRequest(bool completed); // Always called from thread after request has completed or aborted
virtual void deleteRequest(); // Only method to delete a request virtual void deleteRequest(); // Only method to delete a request
void setPriority(U32 pri)
{
// Only do this on a request that is not in a queued list!
mPriority = pri;
};
protected: protected:
LLAtomicBase<status_t> mStatus; LLAtomicBase<status_t> mStatus;
U32 mPriority;
U32 mFlags; U32 mFlags;
}; };
protected:
struct queued_request_less
{
bool operator()(const QueuedRequest* lhs, const QueuedRequest* rhs) const
{
return lhs->higherPriority(*rhs); // higher priority in front of queue (set)
}
};
//------------------------------------------------------------------------ //------------------------------------------------------------------------
public: public:
@ -167,7 +131,7 @@ private:
protected: protected:
handle_t generateHandle(); handle_t generateHandle();
bool addRequest(QueuedRequest* req); bool addRequest(QueuedRequest* req);
size_t processNextRequest(void); void processRequest(QueuedRequest* req);
void incQueue(); void incQueue();
public: public:
@ -186,7 +150,6 @@ public:
status_t getRequestStatus(handle_t handle); status_t getRequestStatus(handle_t handle);
void abortRequest(handle_t handle, bool autocomplete); void abortRequest(handle_t handle, bool autocomplete);
void setFlags(handle_t handle, U32 flags); void setFlags(handle_t handle, U32 flags);
void setPriority(handle_t handle, U32 priority);
bool completeRequest(handle_t handle); bool completeRequest(handle_t handle);
// This is public for support classes like LLWorkerThread, // This is public for support classes like LLWorkerThread,
// but generally the methods above should be used. // but generally the methods above should be used.
@ -200,8 +163,10 @@ protected:
BOOL mStarted; // required when mThreaded is false to call startThread() from update() BOOL mStarted; // required when mThreaded is false to call startThread() from update()
LLAtomicBool mIdleThread; // request queue is empty (or we are quitting) and the thread is idle LLAtomicBool mIdleThread; // request queue is empty (or we are quitting) and the thread is idle
typedef std::set<QueuedRequest*, queued_request_less> request_queue_t; //typedef std::set<QueuedRequest*, queued_request_less> request_queue_t;
request_queue_t mRequestQueue; //request_queue_t mRequestQueue;
LL::WorkQueue mRequestQueue;
LL::WorkQueue::weak_t mMainQueue;
enum { REQUEST_HASH_SIZE = 512 }; // must be power of 2 enum { REQUEST_HASH_SIZE = 512 }; // must be power of 2
typedef LLSimpleHash<handle_t, REQUEST_HASH_SIZE> request_hash_t; typedef LLSimpleHash<handle_t, REQUEST_HASH_SIZE> request_hash_t;

View File

@ -58,46 +58,14 @@
* to restore uniform distribution. * to restore uniform distribution.
*/ */
// *NOTE: The system rand implementation is probably not correct.
#define LL_USE_SYSTEM_RAND 0
#if LL_USE_SYSTEM_RAND
#include <cstdlib>
#endif
#if LL_USE_SYSTEM_RAND
class LLSeedRand
{
public:
LLSeedRand()
{
#if LL_WINDOWS
srand(LLUUID::getRandomSeed());
#else
srand48(LLUUID::getRandomSeed());
#endif
}
};
static LLSeedRand sRandomSeeder;
inline F64 ll_internal_random_double()
{
#if LL_WINDOWS
return (F64)rand() / (F64)RAND_MAX;
#else
return drand48();
#endif
}
inline F32 ll_internal_random_float()
{
#if LL_WINDOWS
return (F32)rand() / (F32)RAND_MAX;
#else
return (F32)drand48();
#endif
}
#else
static LLRandLagFib2281 gRandomGenerator(LLUUID::getRandomSeed()); static LLRandLagFib2281 gRandomGenerator(LLUUID::getRandomSeed());
inline F64 ll_internal_random_double()
// no default implementation, only specific F64 and F32 specializations
template <typename REAL>
inline REAL ll_internal_random();
template <>
inline F64 ll_internal_random<F64>()
{ {
// *HACK: Through experimentation, we have found that dual core // *HACK: Through experimentation, we have found that dual core
// CPUs (or at least multi-threaded processes) seem to // CPUs (or at least multi-threaded processes) seem to
@ -108,15 +76,35 @@ inline F64 ll_internal_random_double()
return rv; return rv;
} }
template <>
inline F32 ll_internal_random<F32>()
{
return F32(ll_internal_random<F64>());
}
/*------------------------------ F64 aliases -------------------------------*/
inline F64 ll_internal_random_double()
{
return ll_internal_random<F64>();
}
F64 ll_drand()
{
return ll_internal_random_double();
}
/*------------------------------ F32 aliases -------------------------------*/
inline F32 ll_internal_random_float() inline F32 ll_internal_random_float()
{ {
// The clamping rules are described above. return ll_internal_random<F32>();
F32 rv = (F32)gRandomGenerator();
if(!((rv >= 0.0f) && (rv < 1.0f))) return fmod(rv, 1.f);
return rv;
} }
#endif
F32 ll_frand()
{
return ll_internal_random_float();
}
/*-------------------------- clamped random range --------------------------*/
S32 ll_rand() S32 ll_rand()
{ {
return ll_rand(RAND_MAX); return ll_rand(RAND_MAX);
@ -130,42 +118,28 @@ S32 ll_rand(S32 val)
return rv; return rv;
} }
F32 ll_frand() template <typename REAL>
REAL ll_grand(REAL val)
{ {
return ll_internal_random_float(); // The clamping rules are described above.
REAL rv = ll_internal_random<REAL>() * val;
if(val > 0)
{
if(rv >= val) return REAL();
}
else
{
if(rv <= val) return REAL();
}
return rv;
} }
F32 ll_frand(F32 val) F32 ll_frand(F32 val)
{ {
// The clamping rules are described above. return ll_grand<F32>(val);
F32 rv = ll_internal_random_float() * val;
if(val > 0)
{
if(rv >= val) return 0.0f;
}
else
{
if(rv <= val) return 0.0f;
}
return rv;
}
F64 ll_drand()
{
return ll_internal_random_double();
} }
F64 ll_drand(F64 val) F64 ll_drand(F64 val)
{ {
// The clamping rules are described above. return ll_grand<F64>(val);
F64 rv = ll_internal_random_double() * val;
if(val > 0)
{
if(rv >= val) return 0.0;
}
else
{
if(rv <= val) return 0.0;
}
return rv;
} }

View File

@ -475,6 +475,7 @@ LLSDNotationParser::~LLSDNotationParser()
// virtual // virtual
S32 LLSDNotationParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) const S32 LLSDNotationParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) const
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// map: { string:object, string:object } // map: { string:object, string:object }
// array: [ object, object, object ] // array: [ object, object, object ]
// undef: ! // undef: !
@ -734,6 +735,7 @@ S32 LLSDNotationParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) c
S32 LLSDNotationParser::parseMap(std::istream& istr, LLSD& map, S32 max_depth) const S32 LLSDNotationParser::parseMap(std::istream& istr, LLSD& map, S32 max_depth) const
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// map: { string:object, string:object } // map: { string:object, string:object }
map = LLSD::emptyMap(); map = LLSD::emptyMap();
S32 parse_count = 0; S32 parse_count = 0;
@ -794,6 +796,7 @@ S32 LLSDNotationParser::parseMap(std::istream& istr, LLSD& map, S32 max_depth) c
S32 LLSDNotationParser::parseArray(std::istream& istr, LLSD& array, S32 max_depth) const S32 LLSDNotationParser::parseArray(std::istream& istr, LLSD& array, S32 max_depth) const
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// array: [ object, object, object ] // array: [ object, object, object ]
array = LLSD::emptyArray(); array = LLSD::emptyArray();
S32 parse_count = 0; S32 parse_count = 0;
@ -833,6 +836,7 @@ S32 LLSDNotationParser::parseArray(std::istream& istr, LLSD& array, S32 max_dept
bool LLSDNotationParser::parseString(std::istream& istr, LLSD& data) const bool LLSDNotationParser::parseString(std::istream& istr, LLSD& data) const
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
std::string value; std::string value;
auto count = deserialize_string(istr, value, mMaxBytesLeft); auto count = deserialize_string(istr, value, mMaxBytesLeft);
if(PARSE_FAILURE == count) return false; if(PARSE_FAILURE == count) return false;
@ -843,6 +847,7 @@ bool LLSDNotationParser::parseString(std::istream& istr, LLSD& data) const
bool LLSDNotationParser::parseBinary(std::istream& istr, LLSD& data) const bool LLSDNotationParser::parseBinary(std::istream& istr, LLSD& data) const
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
// binary: b##"ff3120ab1" // binary: b##"ff3120ab1"
// or: b(len)"..." // or: b(len)"..."
@ -945,6 +950,7 @@ LLSDBinaryParser::~LLSDBinaryParser()
// virtual // virtual
S32 LLSDBinaryParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) const S32 LLSDBinaryParser::doParse(std::istream& istr, LLSD& data, S32 max_depth) const
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
/** /**
* Undefined: '!'<br> * Undefined: '!'<br>
* Boolean: '1' for true '0' for false<br> * Boolean: '1' for true '0' for false<br>

View File

@ -923,6 +923,8 @@ void LLSDXMLParser::parsePart(const char *buf, llssize len)
// virtual // virtual
S32 LLSDXMLParser::doParse(std::istream& input, LLSD& data, S32 max_depth) const S32 LLSDXMLParser::doParse(std::istream& input, LLSD& data, S32 max_depth) const
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_LLSD
#ifdef XML_PARSER_PERFORMANCE_TESTS #ifdef XML_PARSER_PERFORMANCE_TESTS
XML_Timer timer( &parseTime ); XML_Timer timer( &parseTime );
#endif // XML_PARSER_PERFORMANCE_TESTS #endif // XML_PARSER_PERFORMANCE_TESTS

View File

@ -771,20 +771,28 @@ static U32Kilobytes LLMemoryAdjustKBResult(U32Kilobytes inKB)
} }
#endif #endif
#if LL_DARWIN
// static
U32Kilobytes LLMemoryInfo::getHardwareMemSize()
{
// This might work on Linux as well. Someone check...
uint64_t phys = 0;
int mib[2] = { CTL_HW, HW_MEMSIZE };
size_t len = sizeof(phys);
sysctl(mib, 2, &phys, &len, NULL, 0);
return U64Bytes(phys);
}
#endif
U32Kilobytes LLMemoryInfo::getPhysicalMemoryKB() const U32Kilobytes LLMemoryInfo::getPhysicalMemoryKB() const
{ {
#if LL_WINDOWS #if LL_WINDOWS
return LLMemoryAdjustKBResult(U32Kilobytes(mStatsMap["Total Physical KB"].asInteger())); return LLMemoryAdjustKBResult(U32Kilobytes(mStatsMap["Total Physical KB"].asInteger()));
#elif LL_DARWIN #elif LL_DARWIN
// This might work on Linux as well. Someone check... return getHardwareMemSize();
uint64_t phys = 0;
int mib[2] = { CTL_HW, HW_MEMSIZE };
size_t len = sizeof(phys);
sysctl(mib, 2, &phys, &len, NULL, 0);
return U64Bytes(phys);
#elif LL_LINUX #elif LL_LINUX
U64 phys = 0; U64 phys = 0;

View File

@ -129,7 +129,10 @@ public:
LLMemoryInfo(); ///< Default constructor LLMemoryInfo(); ///< Default constructor
void stream(std::ostream& s) const; ///< output text info to s void stream(std::ostream& s) const; ///< output text info to s
U32Kilobytes getPhysicalMemoryKB() const; U32Kilobytes getPhysicalMemoryKB() const;
#if LL_DARWIN
static U32Kilobytes getHardwareMemSize(); // Because some Mac linkers won't let us reference extern gSysMemory from a different lib.
#endif
//get the available memory infomation in KiloBytes. //get the available memory infomation in KiloBytes.
static void getAvailableMemoryKB(U32Kilobytes& avail_physical_mem_kb, U32Kilobytes& avail_virtual_mem_kb); static void getAvailableMemoryKB(U32Kilobytes& avail_physical_mem_kb, U32Kilobytes& avail_virtual_mem_kb);

View File

@ -42,6 +42,7 @@
#ifdef LL_WINDOWS #ifdef LL_WINDOWS
const DWORD MS_VC_EXCEPTION=0x406D1388; const DWORD MS_VC_EXCEPTION=0x406D1388;
#pragma pack(push,8) #pragma pack(push,8)
@ -133,6 +134,15 @@ void LLThread::threadRun()
{ {
#ifdef LL_WINDOWS #ifdef LL_WINDOWS
set_thread_name(-1, mName.c_str()); set_thread_name(-1, mName.c_str());
#if 0 // probably a bad idea, see usage of SetThreadIdealProcessor in LLWindowWin32)
HANDLE hThread = GetCurrentThread();
if (hThread)
{
SetThreadAffinityMask(hThread, (DWORD_PTR) 0xFFFFFFFFFFFFFFFE);
}
#endif
#endif #endif
LL_PROFILER_SET_THREAD_NAME( mName.c_str() ); LL_PROFILER_SET_THREAD_NAME( mName.c_str() );

View File

@ -30,6 +30,9 @@
#include "u64.h" #include "u64.h"
#include <chrono>
#include <thread>
#if LL_WINDOWS #if LL_WINDOWS
# include "llwin32headerslean.h" # include "llwin32headerslean.h"
#elif LL_LINUX || LL_DARWIN #elif LL_LINUX || LL_DARWIN
@ -62,9 +65,18 @@ LLTimer* LLTimer::sTimer = NULL;
//--------------------------------------------------------------------------- //---------------------------------------------------------------------------
#if LL_WINDOWS #if LL_WINDOWS
#if 0
void ms_sleep(U32 ms) void ms_sleep(U32 ms)
{ {
Sleep(ms); LL_PROFILE_ZONE_SCOPED;
using TimePoint = std::chrono::steady_clock::time_point;
auto resume_time = TimePoint::clock::now() + std::chrono::milliseconds(ms);
while (TimePoint::clock::now() < resume_time)
{
std::this_thread::yield(); //note: don't use LLThread::yield here to avoid yielding for too long
}
} }
U32 micro_sleep(U64 us, U32 max_yields) U32 micro_sleep(U64 us, U32 max_yields)
@ -74,6 +86,35 @@ U32 micro_sleep(U64 us, U32 max_yields)
ms_sleep((U32)(us / 1000)); ms_sleep((U32)(us / 1000));
return 0; return 0;
} }
#else
U32 micro_sleep(U64 us, U32 max_yields)
{
LL_PROFILE_ZONE_SCOPED
#if 0
LARGE_INTEGER ft;
ft.QuadPart = -static_cast<S64>(us * 10); // '-' using relative time
HANDLE timer = CreateWaitableTimer(NULL, TRUE, NULL);
SetWaitableTimer(timer, &ft, 0, NULL, NULL, 0);
WaitForSingleObject(timer, INFINITE);
CloseHandle(timer);
#else
Sleep(us / 1000);
#endif
return 0;
}
void ms_sleep(U32 ms)
{
LL_PROFILE_ZONE_SCOPED
micro_sleep(ms * 1000, 0);
}
#endif
#elif LL_LINUX || LL_DARWIN #elif LL_LINUX || LL_DARWIN
static void _sleep_loop(struct timespec& thiswait) static void _sleep_loop(struct timespec& thiswait)
{ {

File diff suppressed because it is too large Load Diff

View File

@ -73,6 +73,7 @@ void LLWorkerThread::clearDeleteList()
{ {
worker->mRequestHandle = LLWorkerThread::nullHandle(); worker->mRequestHandle = LLWorkerThread::nullHandle();
worker->clearFlags(LLWorkerClass::WCF_HAVE_WORK); worker->clearFlags(LLWorkerClass::WCF_HAVE_WORK);
worker->clearFlags(LLWorkerClass::WCF_WORKING);
delete worker; delete worker;
} }
mDeleteList.clear() ; mDeleteList.clear() ;
@ -97,6 +98,7 @@ size_t LLWorkerThread::update(F32 max_time_ms)
{ {
if (worker->getFlags(LLWorkerClass::WCF_WORK_FINISHED)) if (worker->getFlags(LLWorkerClass::WCF_WORK_FINISHED))
{ {
worker->setFlags(LLWorkerClass::WCF_DELETE_REQUESTED);
delete_list.push_back(worker); delete_list.push_back(worker);
mDeleteList.erase(curiter); mDeleteList.erase(curiter);
} }
@ -130,11 +132,11 @@ size_t LLWorkerThread::update(F32 max_time_ms)
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
LLWorkerThread::handle_t LLWorkerThread::addWorkRequest(LLWorkerClass* workerclass, S32 param, U32 priority) LLWorkerThread::handle_t LLWorkerThread::addWorkRequest(LLWorkerClass* workerclass, S32 param)
{ {
handle_t handle = generateHandle(); handle_t handle = generateHandle();
WorkRequest* req = new WorkRequest(handle, priority, workerclass, param); WorkRequest* req = new WorkRequest(handle, workerclass, param);
bool res = addRequest(req); bool res = addRequest(req);
if (!res) if (!res)
@ -157,8 +159,8 @@ void LLWorkerThread::deleteWorker(LLWorkerClass* workerclass)
//============================================================================ //============================================================================
// Runs on its OWN thread // Runs on its OWN thread
LLWorkerThread::WorkRequest::WorkRequest(handle_t handle, U32 priority, LLWorkerClass* workerclass, S32 param) : LLWorkerThread::WorkRequest::WorkRequest(handle_t handle, LLWorkerClass* workerclass, S32 param) :
LLQueuedThread::QueuedRequest(handle, priority), LLQueuedThread::QueuedRequest(handle),
mWorkerClass(workerclass), mWorkerClass(workerclass),
mParam(param) mParam(param)
{ {
@ -177,6 +179,7 @@ void LLWorkerThread::WorkRequest::deleteRequest()
// virtual // virtual
bool LLWorkerThread::WorkRequest::processRequest() bool LLWorkerThread::WorkRequest::processRequest()
{ {
LL_PROFILE_ZONE_SCOPED;
LLWorkerClass* workerclass = getWorkerClass(); LLWorkerClass* workerclass = getWorkerClass();
workerclass->setWorking(true); workerclass->setWorking(true);
bool complete = workerclass->doWork(getParam()); bool complete = workerclass->doWork(getParam());
@ -187,6 +190,7 @@ bool LLWorkerThread::WorkRequest::processRequest()
// virtual // virtual
void LLWorkerThread::WorkRequest::finishRequest(bool completed) void LLWorkerThread::WorkRequest::finishRequest(bool completed)
{ {
LL_PROFILE_ZONE_SCOPED;
LLWorkerClass* workerclass = getWorkerClass(); LLWorkerClass* workerclass = getWorkerClass();
workerclass->finishWork(getParam(), completed); workerclass->finishWork(getParam(), completed);
U32 flags = LLWorkerClass::WCF_WORK_FINISHED | (completed ? 0 : LLWorkerClass::WCF_WORK_ABORTED); U32 flags = LLWorkerClass::WCF_WORK_FINISHED | (completed ? 0 : LLWorkerClass::WCF_WORK_ABORTED);
@ -200,7 +204,6 @@ LLWorkerClass::LLWorkerClass(LLWorkerThread* workerthread, const std::string& na
: mWorkerThread(workerthread), : mWorkerThread(workerthread),
mWorkerClassName(name), mWorkerClassName(name),
mRequestHandle(LLWorkerThread::nullHandle()), mRequestHandle(LLWorkerThread::nullHandle()),
mRequestPriority(LLWorkerThread::PRIORITY_NORMAL),
mMutex(), mMutex(),
mWorkFlags(0) mWorkFlags(0)
{ {
@ -289,7 +292,7 @@ bool LLWorkerClass::yield()
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
// calls startWork, adds doWork() to queue // calls startWork, adds doWork() to queue
void LLWorkerClass::addWork(S32 param, U32 priority) void LLWorkerClass::addWork(S32 param)
{ {
mMutex.lock(); mMutex.lock();
llassert_always(!(mWorkFlags & (WCF_WORKING|WCF_HAVE_WORK))); llassert_always(!(mWorkFlags & (WCF_WORKING|WCF_HAVE_WORK)));
@ -303,7 +306,7 @@ void LLWorkerClass::addWork(S32 param, U32 priority)
startWork(param); startWork(param);
clearFlags(WCF_WORK_FINISHED|WCF_WORK_ABORTED); clearFlags(WCF_WORK_FINISHED|WCF_WORK_ABORTED);
setFlags(WCF_HAVE_WORK); setFlags(WCF_HAVE_WORK);
mRequestHandle = mWorkerThread->addWorkRequest(this, param, priority); mRequestHandle = mWorkerThread->addWorkRequest(this, param);
mMutex.unlock(); mMutex.unlock();
} }
@ -318,7 +321,6 @@ void LLWorkerClass::abortWork(bool autocomplete)
if (mRequestHandle != LLWorkerThread::nullHandle()) if (mRequestHandle != LLWorkerThread::nullHandle())
{ {
mWorkerThread->abortRequest(mRequestHandle, autocomplete); mWorkerThread->abortRequest(mRequestHandle, autocomplete);
mWorkerThread->setPriority(mRequestHandle, LLQueuedThread::PRIORITY_IMMEDIATE);
setFlags(WCF_ABORT_REQUESTED); setFlags(WCF_ABORT_REQUESTED);
} }
mMutex.unlock(); mMutex.unlock();
@ -392,16 +394,5 @@ void LLWorkerClass::scheduleDelete()
} }
} }
void LLWorkerClass::setPriority(U32 priority)
{
mMutex.lock();
if (mRequestHandle != LLWorkerThread::nullHandle() && mRequestPriority != priority)
{
mRequestPriority = priority;
mWorkerThread->setPriority(mRequestHandle, priority);
}
mMutex.unlock();
}
//============================================================================ //============================================================================

View File

@ -56,7 +56,7 @@ public:
virtual ~WorkRequest(); // use deleteRequest() virtual ~WorkRequest(); // use deleteRequest()
public: public:
WorkRequest(handle_t handle, U32 priority, LLWorkerClass* workerclass, S32 param); WorkRequest(handle_t handle, LLWorkerClass* workerclass, S32 param);
S32 getParam() S32 getParam()
{ {
@ -90,7 +90,7 @@ public:
/*virtual*/ size_t update(F32 max_time_ms); /*virtual*/ size_t update(F32 max_time_ms);
handle_t addWorkRequest(LLWorkerClass* workerclass, S32 param, U32 priority = PRIORITY_NORMAL); handle_t addWorkRequest(LLWorkerClass* workerclass, S32 param);
S32 getNumDeletes() { return (S32)mDeleteList.size(); } // debug S32 getNumDeletes() { return (S32)mDeleteList.size(); } // debug
@ -151,10 +151,6 @@ public:
bool isWorking() { return getFlags(WCF_WORKING); } bool isWorking() { return getFlags(WCF_WORKING); }
bool wasAborted() { return getFlags(WCF_ABORT_REQUESTED); } bool wasAborted() { return getFlags(WCF_ABORT_REQUESTED); }
// setPriority(): changes the priority of a request
void setPriority(U32 priority);
U32 getPriority() { return mRequestPriority; }
const std::string& getName() const { return mWorkerClassName; } const std::string& getName() const { return mWorkerClassName; }
protected: protected:
@ -169,7 +165,7 @@ protected:
void setWorkerThread(LLWorkerThread* workerthread); void setWorkerThread(LLWorkerThread* workerthread);
// addWork(): calls startWork, adds doWork() to queue // addWork(): calls startWork, adds doWork() to queue
void addWork(S32 param, U32 priority = LLWorkerThread::PRIORITY_NORMAL); void addWork(S32 param);
// abortWork(): requests that work be aborted // abortWork(): requests that work be aborted
void abortWork(bool autocomplete); void abortWork(bool autocomplete);
@ -193,7 +189,6 @@ protected:
LLWorkerThread* mWorkerThread; LLWorkerThread* mWorkerThread;
std::string mWorkerClassName; std::string mWorkerClassName;
handle_t mRequestHandle; handle_t mRequestHandle;
U32 mRequestPriority; // last priority set
private: private:
LLMutex mMutex; LLMutex mMutex;

View File

@ -17,8 +17,6 @@
// std headers // std headers
#include <functional> #include <functional>
// external library headers // external library headers
#include <boost/assign/list_of.hpp>
#include <boost/phoenix/core/argument.hpp>
// other Linden headers // other Linden headers
#include "../test/lltut.h" #include "../test/lltut.h"
#include "../test/namedtempfile.h" #include "../test/namedtempfile.h"
@ -30,10 +28,6 @@
#include "stringize.h" #include "stringize.h"
#include "StringVec.h" #include "StringVec.h"
using boost::assign::list_of;
StringVec sv(const StringVec& listof) { return listof; }
#if defined(LL_WINDOWS) #if defined(LL_WINDOWS)
#define sleep(secs) _sleep((secs) * 1000) #define sleep(secs) _sleep((secs) * 1000)
@ -104,17 +98,12 @@ namespace tut
llleap_data(): llleap_data():
reader(".py", reader(".py",
// This logic is adapted from vita.viewerclient.receiveEvent() // This logic is adapted from vita.viewerclient.receiveEvent()
boost::phoenix::placeholders::arg1 << [](std::ostream& out){ out <<
"import re\n" "import re\n"
"import os\n" "import os\n"
"import sys\n" "import sys\n"
"\n" "\n"
"try:\n" "import llsd\n"
// new freestanding llsd package
" import llsd\n"
"except ImportError:\n"
// older llbase.llsd module
" from llbase import llsd\n"
"\n" "\n"
"class ProtocolError(Exception):\n" "class ProtocolError(Exception):\n"
" def __init__(self, msg, data):\n" " def __init__(self, msg, data):\n"
@ -193,7 +182,7 @@ namespace tut
"def request(pump, data):\n" "def request(pump, data):\n"
" # we expect 'data' is a dict\n" " # we expect 'data' is a dict\n"
" data['reply'] = _reply\n" " data['reply'] = _reply\n"
" send(pump, data)\n"), " send(pump, data)\n";}),
// Get the actual pathname of the NamedExtTempFile and trim off // Get the actual pathname of the NamedExtTempFile and trim off
// the ".py" extension. (We could cache reader.getName() in a // the ".py" extension. (We could cache reader.getName() in a
// separate member variable, but I happen to know getName() just // separate member variable, but I happen to know getName() just
@ -218,14 +207,14 @@ namespace tut
void object::test<1>() void object::test<1>()
{ {
set_test_name("multiple LLLeap instances"); set_test_name("multiple LLLeap instances");
NamedTempFile script("py", NamedExtTempFile script("py",
"import time\n" "import time\n"
"time.sleep(1)\n"); "time.sleep(1)\n");
LLLeapVector instances; LLLeapVector instances;
instances.push_back(LLLeap::create(get_test_name(), instances.push_back(LLLeap::create(get_test_name(),
sv(list_of(PYTHON)(script.getName())))->getWeak()); StringVec{PYTHON, script.getName()})->getWeak());
instances.push_back(LLLeap::create(get_test_name(), instances.push_back(LLLeap::create(get_test_name(),
sv(list_of(PYTHON)(script.getName())))->getWeak()); StringVec{PYTHON, script.getName()})->getWeak());
// In this case we're simply establishing that two LLLeap instances // In this case we're simply establishing that two LLLeap instances
// can coexist without throwing exceptions or bombing in any other // can coexist without throwing exceptions or bombing in any other
// way. Wait for them to terminate. // way. Wait for them to terminate.
@ -236,10 +225,10 @@ namespace tut
void object::test<2>() void object::test<2>()
{ {
set_test_name("stderr to log"); set_test_name("stderr to log");
NamedTempFile script("py", NamedExtTempFile script("py",
"import sys\n" "import sys\n"
"sys.stderr.write('''Hello from Python!\n" "sys.stderr.write('''Hello from Python!\n"
"note partial line''')\n"); "note partial line''')\n");
StringVec vcommand{ PYTHON, script.getName() }; StringVec vcommand{ PYTHON, script.getName() };
CaptureLog log(LLError::LEVEL_INFO); CaptureLog log(LLError::LEVEL_INFO);
waitfor(LLLeap::create(get_test_name(), vcommand)); waitfor(LLLeap::create(get_test_name(), vcommand));
@ -251,11 +240,11 @@ namespace tut
void object::test<3>() void object::test<3>()
{ {
set_test_name("bad stdout protocol"); set_test_name("bad stdout protocol");
NamedTempFile script("py", NamedExtTempFile script("py",
"print('Hello from Python!')\n"); "print('Hello from Python!')\n");
CaptureLog log(LLError::LEVEL_WARN); CaptureLog log(LLError::LEVEL_WARN);
waitfor(LLLeap::create(get_test_name(), waitfor(LLLeap::create(get_test_name(),
sv(list_of(PYTHON)(script.getName())))); StringVec{PYTHON, script.getName()}));
ensure_contains("error log line", ensure_contains("error log line",
log.messageWith("invalid protocol"), "Hello from Python!"); log.messageWith("invalid protocol"), "Hello from Python!");
} }
@ -264,13 +253,13 @@ namespace tut
void object::test<4>() void object::test<4>()
{ {
set_test_name("leftover stdout"); set_test_name("leftover stdout");
NamedTempFile script("py", NamedExtTempFile script("py",
"import sys\n" "import sys\n"
// note lack of newline // note lack of newline
"sys.stdout.write('Hello from Python!')\n"); "sys.stdout.write('Hello from Python!')\n");
CaptureLog log(LLError::LEVEL_WARN); CaptureLog log(LLError::LEVEL_WARN);
waitfor(LLLeap::create(get_test_name(), waitfor(LLLeap::create(get_test_name(),
sv(list_of(PYTHON)(script.getName())))); StringVec{PYTHON, script.getName()}));
ensure_contains("error log line", ensure_contains("error log line",
log.messageWith("Discarding"), "Hello from Python!"); log.messageWith("Discarding"), "Hello from Python!");
} }
@ -279,12 +268,12 @@ namespace tut
void object::test<5>() void object::test<5>()
{ {
set_test_name("bad stdout len prefix"); set_test_name("bad stdout len prefix");
NamedTempFile script("py", NamedExtTempFile script("py",
"import sys\n" "import sys\n"
"sys.stdout.write('5a2:something')\n"); "sys.stdout.write('5a2:something')\n");
CaptureLog log(LLError::LEVEL_WARN); CaptureLog log(LLError::LEVEL_WARN);
waitfor(LLLeap::create(get_test_name(), waitfor(LLLeap::create(get_test_name(),
sv(list_of(PYTHON)(script.getName())))); StringVec{PYTHON, script.getName()}));
ensure_contains("error log line", ensure_contains("error log line",
log.messageWith("invalid protocol"), "5a2:"); log.messageWith("invalid protocol"), "5a2:");
} }
@ -386,17 +375,18 @@ namespace tut
set_test_name("round trip"); set_test_name("round trip");
AckAPI api; AckAPI api;
Result result; Result result;
NamedTempFile script("py", NamedExtTempFile script("py",
boost::phoenix::placeholders::arg1 << [&](std::ostream& out){ out <<
"from " << reader_module << " import *\n" "from " << reader_module << " import *\n"
// make a request on our little API // make a request on our little API
"request(pump='" << api.getName() << "', data={})\n" "request(pump='" << api.getName() << "', data={})\n"
// wait for its response // wait for its response
"resp = get()\n" "resp = get()\n"
"result = '' if resp == dict(pump=replypump(), data='ack')\\\n" "result = '' if resp == dict(pump=replypump(), data='ack')\\\n"
" else 'bad: ' + str(resp)\n" " else 'bad: ' + str(resp)\n"
"send(pump='" << result.getName() << "', data=result)\n"); "send(pump='" << result.getName() << "', data=result)\n";});
waitfor(LLLeap::create(get_test_name(), sv(list_of(PYTHON)(script.getName())))); waitfor(LLLeap::create(get_test_name(),
StringVec{PYTHON, script.getName()}));
result.ensure(); result.ensure();
} }
@ -424,38 +414,38 @@ namespace tut
// iterations etc. in OS pipes and the LLLeap/LLProcess implementation. // iterations etc. in OS pipes and the LLLeap/LLProcess implementation.
ReqIDAPI api; ReqIDAPI api;
Result result; Result result;
NamedTempFile script("py", NamedExtTempFile script("py",
boost::phoenix::placeholders::arg1 << [&](std::ostream& out){ out <<
"import sys\n" "import sys\n"
"from " << reader_module << " import *\n" "from " << reader_module << " import *\n"
// Note that since reader imports llsd, this // Note that since reader imports llsd, this
// 'import *' gets us llsd too. // 'import *' gets us llsd too.
"sample = llsd.format_notation(dict(pump='" << "sample = llsd.format_notation(dict(pump='" <<
api.getName() << "', data=dict(reqid=999999, reply=replypump())))\n" api.getName() << "', data=dict(reqid=999999, reply=replypump())))\n"
// The whole packet has length prefix too: "len:data" // The whole packet has length prefix too: "len:data"
"samplen = len(str(len(sample))) + 1 + len(sample)\n" "samplen = len(str(len(sample))) + 1 + len(sample)\n"
// guess how many messages it will take to // guess how many messages it will take to
// accumulate BUFFERED_LENGTH // accumulate BUFFERED_LENGTH
"count = int(" << BUFFERED_LENGTH << "/samplen)\n" "count = int(" << BUFFERED_LENGTH << "/samplen)\n"
"print('Sending %s requests' % count, file=sys.stderr)\n" "print('Sending %s requests' % count, file=sys.stderr)\n"
"for i in range(count):\n" "for i in range(count):\n"
" request('" << api.getName() << "', dict(reqid=i))\n" " request('" << api.getName() << "', dict(reqid=i))\n"
// The assumption in this specific test that // The assumption in this specific test that
// replies will arrive in the same order as // replies will arrive in the same order as
// requests is ONLY valid because the API we're // requests is ONLY valid because the API we're
// invoking sends replies instantly. If the API // invoking sends replies instantly. If the API
// had to wait for some external event before // had to wait for some external event before
// sending its reply, replies could arrive in // sending its reply, replies could arrive in
// arbitrary order, and we'd have to tick them // arbitrary order, and we'd have to tick them
// off from a set. // off from a set.
"result = ''\n" "result = ''\n"
"for i in range(count):\n" "for i in range(count):\n"
" resp = get()\n" " resp = get()\n"
" if resp['data']['reqid'] != i:\n" " if resp['data']['reqid'] != i:\n"
" result = 'expected reqid=%s in %s' % (i, resp)\n" " result = 'expected reqid=%s in %s' % (i, resp)\n"
" break\n" " break\n"
"send(pump='" << result.getName() << "', data=result)\n"); "send(pump='" << result.getName() << "', data=result)\n";});
waitfor(LLLeap::create(get_test_name(), sv(list_of(PYTHON)(script.getName()))), waitfor(LLLeap::create(get_test_name(), StringVec{PYTHON, script.getName()}),
300); // needs more realtime than most tests 300); // needs more realtime than most tests
result.ensure(); result.ensure();
} }
@ -467,65 +457,62 @@ namespace tut
{ {
ReqIDAPI api; ReqIDAPI api;
Result result; Result result;
NamedTempFile script("py", NamedExtTempFile script("py",
boost::phoenix::placeholders::arg1 << [&](std::ostream& out){ out <<
"import sys\n" "import sys\n"
"from " << reader_module << " import *\n" "from " << reader_module << " import *\n"
// Generate a very large string value. // Generate a very large string value.
"desired = int(sys.argv[1])\n" "desired = int(sys.argv[1])\n"
// 7 chars per item: 6 digits, 1 comma // 7 chars per item: 6 digits, 1 comma
"count = int((desired - 50)/7)\n" "count = int((desired - 50)/7)\n"
"large = ''.join('%06d,' % i for i in range(count))\n" "large = ''.join('%06d,' % i for i in range(count))\n"
// Pass 'large' as reqid because we know the API // Pass 'large' as reqid because we know the API
// will echo reqid, and we want to receive it back. // will echo reqid, and we want to receive it back.
"request('" << api.getName() << "', dict(reqid=large))\n" "request('" << api.getName() << "', dict(reqid=large))\n"
"try:\n" "try:\n"
" resp = get()\n" " resp = get()\n"
"except ParseError as e:\n" "except ParseError as e:\n"
" # try to find where e.data diverges from expectation\n" " # try to find where e.data diverges from expectation\n"
// Normally we'd expect a 'pump' key in there, // Normally we'd expect a 'pump' key in there,
// too, with value replypump(). But Python // too, with value replypump(). But Python
// serializes keys in a different order than C++, // serializes keys in a different order than C++,
// so incoming data start with 'data'. // so incoming data start with 'data'.
// Truthfully, though, if we get as far as 'pump' // Truthfully, though, if we get as far as 'pump'
// before we find a difference, something's very // before we find a difference, something's very
// strange. // strange.
" expect = llsd.format_notation(dict(data=dict(reqid=large)))\n" " expect = llsd.format_notation(dict(data=dict(reqid=large)))\n"
" chunk = 40\n" " chunk = 40\n"
" for offset in range(0, max(len(e.data), len(expect)), chunk):\n" " for offset in range(0, max(len(e.data), len(expect)), chunk):\n"
" if e.data[offset:offset+chunk] != \\\n" " if e.data[offset:offset+chunk] != \\\n"
" expect[offset:offset+chunk]:\n" " expect[offset:offset+chunk]:\n"
" print('Offset %06d: expect %r,\\n'\\\n" " print('Offset %06d: expect %r,\\n'\\\n"
" ' get %r' %\\\n" " ' get %r' %\\\n"
" (offset,\n" " (offset,\n"
" expect[offset:offset+chunk],\n" " expect[offset:offset+chunk],\n"
" e.data[offset:offset+chunk]),\n" " e.data[offset:offset+chunk]),\n"
" file=sys.stderr)\n" " file=sys.stderr)\n"
" break\n" " break\n"
" else:\n" " else:\n"
" print('incoming data matches expect?!', file=sys.stderr)\n" " print('incoming data matches expect?!', file=sys.stderr)\n"
" send('" << result.getName() << "', '%s: %s' % (e.__class__.__name__, e))\n" " send('" << result.getName() << "', '%s: %s' % (e.__class__.__name__, e))\n"
" sys.exit(1)\n" " sys.exit(1)\n"
"\n" "\n"
"echoed = resp['data']['reqid']\n" "echoed = resp['data']['reqid']\n"
"if echoed == large:\n" "if echoed == large:\n"
" send('" << result.getName() << "', '')\n" " send('" << result.getName() << "', '')\n"
" sys.exit(0)\n" " sys.exit(0)\n"
// Here we know echoed did NOT match; try to find where // Here we know echoed did NOT match; try to find where
"for i in range(count):\n" "for i in range(count):\n"
" start = 7*i\n" " start = 7*i\n"
" end = 7*(i+1)\n" " end = 7*(i+1)\n"
" if end > len(echoed)\\\n" " if end > len(echoed)\\\n"
" or echoed[start:end] != large[start:end]:\n" " or echoed[start:end] != large[start:end]:\n"
" send('" << result.getName() << "',\n" " send('" << result.getName() << "',\n"
" 'at offset %s, expected %r but got %r' %\n" " 'at offset %s, expected %r but got %r' %\n"
" (start, large[start:end], echoed[start:end]))\n" " (start, large[start:end], echoed[start:end]))\n"
"sys.exit(1)\n"); "sys.exit(1)\n";});
waitfor(LLLeap::create(test_name, waitfor(LLLeap::create(test_name,
sv(list_of StringVec{PYTHON, script.getName(), stringize(size)}),
(PYTHON)
(script.getName())
(stringize(size)))),
180); // try a longer timeout 180); // try a longer timeout
result.ensure(); result.ensure();
} }

View File

@ -151,8 +151,38 @@ struct PythonProcessLauncher
/// Launch Python script; verify that it launched /// Launch Python script; verify that it launched
void launch() void launch()
{ {
mPy = LLProcess::create(mParams); try
tut::ensure(STRINGIZE("Couldn't launch " << mDesc << " script"), bool(mPy)); {
mPy = LLProcess::create(mParams);
tut::ensure(STRINGIZE("Couldn't launch " << mDesc << " script"), bool(mPy));
}
catch (const tut::failure&)
{
// On Windows, if APR_LOG is set, our version of APR's
// apr_create_proc() logs to the specified file. If this test
// failed, try to report that log.
const char* APR_LOG = getenv("APR_LOG");
if (APR_LOG && *APR_LOG)
{
std::ifstream inf(APR_LOG);
if (! inf.is_open())
{
LL_WARNS() << "Couldn't open '" << APR_LOG << "'" << LL_ENDL;
}
else
{
LL_WARNS() << "==============================" << LL_ENDL;
LL_WARNS() << "From '" << APR_LOG << "':" << LL_ENDL;
std::string line;
while (std::getline(inf, line))
{
LL_WARNS() << line << LL_ENDL;
}
LL_WARNS() << "==============================" << LL_ENDL;
}
}
throw;
}
} }
/// Run Python script and wait for it to complete. /// Run Python script and wait for it to complete.
@ -191,7 +221,7 @@ struct PythonProcessLauncher
LLProcess::Params mParams; LLProcess::Params mParams;
LLProcessPtr mPy; LLProcessPtr mPy;
std::string mDesc; std::string mDesc;
NamedTempFile mScript; NamedExtTempFile mScript;
}; };
/// convenience function for PythonProcessLauncher::run() /// convenience function for PythonProcessLauncher::run()
@ -214,30 +244,26 @@ static std::string python_out(const std::string& desc, const CONTENT& script)
class NamedTempDir: public boost::noncopyable class NamedTempDir: public boost::noncopyable
{ {
public: public:
// Use python() function to create a temp directory: I've found
// nothing in either Boost.Filesystem or APR quite like Python's
// tempfile.mkdtemp().
// Special extra bonus: on Mac, mkdtemp() reports a pathname
// starting with /var/folders/something, whereas that's really a
// symlink to /private/var/folders/something. Have to use
// realpath() to compare properly.
NamedTempDir(): NamedTempDir():
mPath(python_out("mkdtemp()", mPath(NamedTempFile::temp_path()),
"from __future__ import with_statement\n" mCreated(boost::filesystem::create_directories(mPath))
"import os.path, sys, tempfile\n" {
"with open(sys.argv[1], 'w') as f:\n" mPath = boost::filesystem::canonical(mPath);
" f.write(os.path.normcase(os.path.normpath(os.path.realpath(tempfile.mkdtemp()))))\n")) }
{}
~NamedTempDir() ~NamedTempDir()
{ {
aprchk(apr_dir_remove(mPath.c_str(), gAPRPoolp)); if (mCreated)
{
boost::filesystem::remove_all(mPath);
}
} }
std::string getName() const { return mPath; } std::string getName() const { return mPath.string(); }
private: private:
std::string mPath; boost::filesystem::path mPath;
bool mCreated;
}; };
/***************************************************************************** /*****************************************************************************
@ -355,7 +381,7 @@ namespace tut
set_test_name("raw APR nonblocking I/O"); set_test_name("raw APR nonblocking I/O");
// Create a script file in a temporary place. // Create a script file in a temporary place.
NamedTempFile script("py", NamedExtTempFile script("py",
"from __future__ import print_function" EOL "from __future__ import print_function" EOL
"import sys" EOL "import sys" EOL
"import time" EOL "import time" EOL
@ -565,7 +591,13 @@ namespace tut
" f.write(os.path.normcase(os.path.normpath(os.getcwd())))\n"); " f.write(os.path.normcase(os.path.normpath(os.getcwd())))\n");
// Before running, call setWorkingDirectory() // Before running, call setWorkingDirectory()
py.mParams.cwd = tempdir.getName(); py.mParams.cwd = tempdir.getName();
ensure_equals("os.getcwd()", py.run_read(), tempdir.getName()); std::string expected{ tempdir.getName() };
#if LL_WINDOWS
// SIGH, don't get tripped up by "C:" != "c:" --
// but on the Mac, using tolower() fails because "/users" != "/Users"!
expected = utf8str_tolower(expected);
#endif
ensure_equals("os.getcwd()", py.run_read(), expected);
} }
template<> template<> template<> template<>

View File

@ -29,7 +29,23 @@
#include "../test/lltut.h" #include "../test/lltut.h"
#include "../llrand.h" #include "../llrand.h"
#include "stringize.h"
// In llrand.h, every function is documented to return less than the high end
// -- specifically, because you can pass a negative extent, they're documented
// never to return a value equal to the extent.
// So that we don't need two different versions of ensure_in_range(), when
// testing extent < 0, negate the return value and the extent before passing
// into ensure_in_range().
template <typename NUMBER>
void ensure_in_range(const std::string_view& name,
NUMBER value, NUMBER low, NUMBER high)
{
auto failmsg{ stringize(name, " >= ", low, " (", value, ')') };
tut::ensure(failmsg, (value >= low));
failmsg = stringize(name, " < ", high, " (", value, ')');
tut::ensure(failmsg, (value < high));
}
namespace tut namespace tut
{ {
@ -44,84 +60,65 @@ namespace tut
template<> template<> template<> template<>
void random_object_t::test<1>() void random_object_t::test<1>()
{ {
F32 number = 0.0f;
for(S32 ii = 0; ii < 100000; ++ii) for(S32 ii = 0; ii < 100000; ++ii)
{ {
number = ll_frand(); ensure_in_range("frand", ll_frand(), 0.0f, 1.0f);
ensure("frand >= 0", (number >= 0.0f));
ensure("frand < 1", (number < 1.0f));
} }
} }
template<> template<> template<> template<>
void random_object_t::test<2>() void random_object_t::test<2>()
{ {
F64 number = 0.0f;
for(S32 ii = 0; ii < 100000; ++ii) for(S32 ii = 0; ii < 100000; ++ii)
{ {
number = ll_drand(); ensure_in_range("drand", ll_drand(), 0.0, 1.0);
ensure("drand >= 0", (number >= 0.0));
ensure("drand < 1", (number < 1.0));
} }
} }
template<> template<> template<> template<>
void random_object_t::test<3>() void random_object_t::test<3>()
{ {
F32 number = 0.0f;
for(S32 ii = 0; ii < 100000; ++ii) for(S32 ii = 0; ii < 100000; ++ii)
{ {
number = ll_frand(2.0f) - 1.0f; ensure_in_range("frand(2.0f)", ll_frand(2.0f) - 1.0f, -1.0f, 1.0f);
ensure("frand >= 0", (number >= -1.0f));
ensure("frand < 1", (number <= 1.0f));
} }
} }
template<> template<> template<> template<>
void random_object_t::test<4>() void random_object_t::test<4>()
{ {
F32 number = 0.0f;
for(S32 ii = 0; ii < 100000; ++ii) for(S32 ii = 0; ii < 100000; ++ii)
{ {
number = ll_frand(-7.0); // Negate the result so we don't have to allow a templated low-end
ensure("drand <= 0", (number <= 0.0)); // comparison as well.
ensure("drand > -7", (number > -7.0)); ensure_in_range("-frand(-7.0)", -ll_frand(-7.0), 0.0f, 7.0f);
} }
} }
template<> template<> template<> template<>
void random_object_t::test<5>() void random_object_t::test<5>()
{ {
F64 number = 0.0f;
for(S32 ii = 0; ii < 100000; ++ii) for(S32 ii = 0; ii < 100000; ++ii)
{ {
number = ll_drand(-2.0); ensure_in_range("-drand(-2.0)", -ll_drand(-2.0), 0.0, 2.0);
ensure("drand <= 0", (number <= 0.0));
ensure("drand > -2", (number > -2.0));
} }
} }
template<> template<> template<> template<>
void random_object_t::test<6>() void random_object_t::test<6>()
{ {
S32 number = 0;
for(S32 ii = 0; ii < 100000; ++ii) for(S32 ii = 0; ii < 100000; ++ii)
{ {
number = ll_rand(100); ensure_in_range("rand(100)", ll_rand(100), 0, 100);
ensure("rand >= 0", (number >= 0));
ensure("rand < 100", (number < 100));
} }
} }
template<> template<> template<> template<>
void random_object_t::test<7>() void random_object_t::test<7>()
{ {
S32 number = 0;
for(S32 ii = 0; ii < 100000; ++ii) for(S32 ii = 0; ii < 100000; ++ii)
{ {
number = ll_rand(-127); ensure_in_range("-rand(-127)", -ll_rand(-127), 0, 127);
ensure("rand <= 0", (number <= 0));
ensure("rand > -127", (number > -127));
} }
} }
} }

View File

@ -45,11 +45,6 @@ typedef U32 uint32_t;
#endif #endif
#include "boost/range.hpp" #include "boost/range.hpp"
#include "boost/foreach.hpp"
#include "boost/bind.hpp"
#include "boost/phoenix/bind/bind_function.hpp"
#include "boost/phoenix/core/argument.hpp"
using namespace boost::phoenix;
#include "llsd.h" #include "llsd.h"
#include "llsdserialize.h" #include "llsdserialize.h"
@ -57,9 +52,11 @@ using namespace boost::phoenix;
#include "llformat.h" #include "llformat.h"
#include "llmemorystream.h" #include "llmemorystream.h"
#include "../test/hexdump.h"
#include "../test/lltut.h" #include "../test/lltut.h"
#include "../test/namedtempfile.h" #include "../test/namedtempfile.h"
#include "stringize.h" #include "stringize.h"
#include "StringVec.h"
#include <functional> #include <functional>
typedef std::function<void(const LLSD& data, std::ostream& str)> FormatterFunction; typedef std::function<void(const LLSD& data, std::ostream& str)> FormatterFunction;
@ -1796,16 +1793,12 @@ namespace tut
// helper for TestPythonCompatible // helper for TestPythonCompatible
static std::string import_llsd("import os.path\n" static std::string import_llsd("import os.path\n"
"import sys\n" "import sys\n"
"try:\n" "import llsd\n");
// new freestanding llsd package
" import llsd\n"
"except ImportError:\n"
// older llbase.llsd module
" from llbase import llsd\n");
// helper for TestPythonCompatible // helper for TestPythonCompatible
template <typename CONTENT> template <typename CONTENT, typename... ARGS>
void python(const std::string& desc, const CONTENT& script, int expect=0) void python_expect(const std::string& desc, const CONTENT& script, int expect=0,
ARGS&&... args)
{ {
auto PYTHON(LLStringUtil::getenv("PYTHON")); auto PYTHON(LLStringUtil::getenv("PYTHON"));
ensure("Set $PYTHON to the Python interpreter", !PYTHON.empty()); ensure("Set $PYTHON to the Python interpreter", !PYTHON.empty());
@ -1816,7 +1809,8 @@ namespace tut
std::string q("\""); std::string q("\"");
std::string qPYTHON(q + PYTHON + q); std::string qPYTHON(q + PYTHON + q);
std::string qscript(q + scriptfile.getName() + q); std::string qscript(q + scriptfile.getName() + q);
int rc = _spawnl(_P_WAIT, PYTHON.c_str(), qPYTHON.c_str(), qscript.c_str(), NULL); int rc = _spawnl(_P_WAIT, PYTHON.c_str(), qPYTHON.c_str(), qscript.c_str(),
std::forward<ARGS>(args)..., NULL);
if (rc == -1) if (rc == -1)
{ {
char buffer[256]; char buffer[256];
@ -1832,6 +1826,10 @@ namespace tut
LLProcess::Params params; LLProcess::Params params;
params.executable = PYTHON; params.executable = PYTHON;
params.args.add(scriptfile.getName()); params.args.add(scriptfile.getName());
for (const std::string& arg : StringVec{ std::forward<ARGS>(args)... })
{
params.args.add(arg);
}
LLProcessPtr py(LLProcess::create(params)); LLProcessPtr py(LLProcess::create(params));
ensure(STRINGIZE("Couldn't launch " << desc << " script"), bool(py)); ensure(STRINGIZE("Couldn't launch " << desc << " script"), bool(py));
// Implementing timeout would mean messing with alarm() and // Implementing timeout would mean messing with alarm() and
@ -1866,6 +1864,14 @@ namespace tut
#endif #endif
} }
// helper for TestPythonCompatible
template <typename CONTENT, typename... ARGS>
void python(const std::string& desc, const CONTENT& script, ARGS&&... args)
{
// plain python() expects rc 0
python_expect(desc, script, 0, std::forward<ARGS>(args)...);
}
struct TestPythonCompatible struct TestPythonCompatible
{ {
TestPythonCompatible() {} TestPythonCompatible() {}
@ -1880,10 +1886,10 @@ namespace tut
void TestPythonCompatibleObject::test<1>() void TestPythonCompatibleObject::test<1>()
{ {
set_test_name("verify python()"); set_test_name("verify python()");
python("hello", python_expect("hello",
"import sys\n" "import sys\n"
"sys.exit(17)\n", "sys.exit(17)\n",
17); // expect nonzero rc 17); // expect nonzero rc
} }
template<> template<> template<> template<>
@ -1899,7 +1905,7 @@ namespace tut
static void writeLLSDArray(const FormatterFunction& serialize, static void writeLLSDArray(const FormatterFunction& serialize,
std::ostream& out, const LLSD& array) std::ostream& out, const LLSD& array)
{ {
for (const LLSD& item : llsd::inArray(array)) for (const LLSD& item: llsd::inArray(array))
{ {
// It's important to delimit the entries in this file somehow // It's important to delimit the entries in this file somehow
// because, although Python's llsd.parse() can accept a file // because, although Python's llsd.parse() can accept a file
@ -1914,7 +1920,14 @@ namespace tut
auto buffstr{ buffer.str() }; auto buffstr{ buffer.str() };
int bufflen{ static_cast<int>(buffstr.length()) }; int bufflen{ static_cast<int>(buffstr.length()) };
out.write(reinterpret_cast<const char*>(&bufflen), sizeof(bufflen)); out.write(reinterpret_cast<const char*>(&bufflen), sizeof(bufflen));
LL_DEBUGS() << "Wrote length: "
<< hexdump(reinterpret_cast<const char*>(&bufflen),
sizeof(bufflen))
<< LL_ENDL;
out.write(buffstr.c_str(), buffstr.length()); out.write(buffstr.c_str(), buffstr.length());
LL_DEBUGS() << "Wrote data: "
<< hexmix(buffstr.c_str(), buffstr.length())
<< LL_ENDL;
} }
} }
@ -1943,10 +1956,10 @@ namespace tut
" else:\n" " else:\n"
" raise AssertionError('Too many data items')\n"; " raise AssertionError('Too many data items')\n";
// Create an llsdXXXXXX file containing 'data' serialized to // Create an llsdXXXXXX file containing 'data' serialized per
// notation. // FormatterFunction.
NamedTempFile file("llsd", NamedTempFile file("llsd",
// NamedTempFile's boost::function constructor // NamedTempFile's function constructor
// takes a callable. To this callable it passes the // takes a callable. To this callable it passes the
// std::ostream with which it's writing the // std::ostream with which it's writing the
// NamedTempFile. // NamedTempFile.
@ -1954,34 +1967,50 @@ namespace tut
(std::ostream& out) (std::ostream& out)
{ writeLLSDArray(serialize, out, cdata); }); { writeLLSDArray(serialize, out, cdata); });
python("read C++ " + desc, // 'debug' starts empty because it's intended as an output file
placeholders::arg1 << NamedTempFile debug("debug", "");
import_llsd <<
"from functools import partial\n" try
"import io\n" {
"import struct\n" python("read C++ " + desc,
"lenformat = struct.Struct('i')\n" [&](std::ostream& out){ out <<
"def parse_each(inf):\n" import_llsd <<
" for rawlen in iter(partial(inf.read, lenformat.size), b''):\n" "from functools import partial\n"
" len = lenformat.unpack(rawlen)[0]\n" "import io\n"
// Since llsd.parse() has no max_bytes argument, instead of "import struct\n"
// passing the input stream directly to parse(), read the item "lenformat = struct.Struct('i')\n"
// into a distinct bytes object and parse that. "def parse_each(inf):\n"
" data = inf.read(len)\n" " for rawlen in iter(partial(inf.read, lenformat.size), b''):\n"
" try:\n" " print('Read length:', ''.join(('%02x' % b) for b in rawlen),\n"
" frombytes = llsd.parse(data)\n" " file=debug)\n"
" except llsd.LLSDParseError as err:\n" " len = lenformat.unpack(rawlen)[0]\n"
" print(f'*** {err}')\n" // Since llsd.parse() has no max_bytes argument, instead of
" print(f'Bad content:\\n{data!r}')\n" // passing the input stream directly to parse(), read the item
" raise\n" // into a distinct bytes object and parse that.
// Also try parsing from a distinct stream. " data = inf.read(len)\n"
" stream = io.BytesIO(data)\n" " print('Read data: ', repr(data), file=debug)\n"
" fromstream = llsd.parse(stream)\n" " try:\n"
" assert frombytes == fromstream\n" " frombytes = llsd.parse(data)\n"
" yield frombytes\n" " except llsd.LLSDParseError as err:\n"
<< pydata << " print(f'*** {err}')\n"
// Don't forget raw-string syntax for Windows pathnames. " print(f'Bad content:\\n{data!r}')\n"
"verify(parse_each(open(r'" << file.getName() << "', 'rb')))\n"); " raise\n"
// Also try parsing from a distinct stream.
" stream = io.BytesIO(data)\n"
" fromstream = llsd.parse(stream)\n"
" assert frombytes == fromstream\n"
" yield frombytes\n"
<< pydata <<
// Don't forget raw-string syntax for Windows pathnames.
"debug = open(r'" << debug.getName() << "', 'w')\n"
"verify(parse_each(open(r'" << file.getName() << "', 'rb')))\n";});
}
catch (const failure&)
{
LL_DEBUGS() << "Script debug output:" << LL_ENDL;
debug.peep_log();
throw;
}
} }
template<> template<> template<> template<>
@ -2068,7 +2097,7 @@ namespace tut
NamedTempFile file("llsd", ""); NamedTempFile file("llsd", "");
python("Python " + pyformatter, python("Python " + pyformatter,
placeholders::arg1 << [&](std::ostream& out){ out <<
import_llsd << import_llsd <<
"import struct\n" "import struct\n"
"lenformat = struct.Struct('i')\n" "lenformat = struct.Struct('i')\n"
@ -2086,7 +2115,7 @@ namespace tut
" for item in DATA:\n" " for item in DATA:\n"
" serialized = llsd." << pyformatter << "(item)\n" " serialized = llsd." << pyformatter << "(item)\n"
" f.write(lenformat.pack(len(serialized)))\n" " f.write(lenformat.pack(len(serialized)))\n"
" f.write(serialized)\n"); " f.write(serialized)\n";});
std::ifstream inf(file.getName().c_str()); std::ifstream inf(file.getName().c_str());
LLSD item; LLSD item;

View File

@ -38,7 +38,7 @@ namespace tut
{ {
struct workqueue_data struct workqueue_data
{ {
WorkQueue queue{"queue"}; WorkSchedule queue{"queue"};
}; };
typedef test_group<workqueue_data> workqueue_group; typedef test_group<workqueue_data> workqueue_group;
typedef workqueue_group::object object; typedef workqueue_group::object object;
@ -49,8 +49,8 @@ namespace tut
{ {
set_test_name("name"); set_test_name("name");
ensure_equals("didn't capture name", queue.getKey(), "queue"); ensure_equals("didn't capture name", queue.getKey(), "queue");
ensure("not findable", WorkQueue::getInstance("queue") == queue.getWeak().lock()); ensure("not findable", WorkSchedule::getInstance("queue") == queue.getWeak().lock());
WorkQueue q2; WorkSchedule q2;
ensure("has no name", LLStringUtil::startsWith(q2.getKey(), "WorkQueue")); ensure("has no name", LLStringUtil::startsWith(q2.getKey(), "WorkQueue"));
} }
@ -73,17 +73,21 @@ namespace tut
{ {
set_test_name("postEvery"); set_test_name("postEvery");
// record of runs // record of runs
using Shared = std::deque<WorkQueue::TimePoint>; using Shared = std::deque<WorkSchedule::TimePoint>;
// This is an example of how to share data between the originator of // This is an example of how to share data between the originator of
// postEvery(work) and the work item itself, since usually a WorkQueue // postEvery(work) and the work item itself, since usually a WorkSchedule
// is used to dispatch work to a different thread. Neither of them // is used to dispatch work to a different thread. Neither of them
// should call any of LLCond's wait methods: you don't want to stall // should call any of LLCond's wait methods: you don't want to stall
// either the worker thread or the originating thread (conventionally // either the worker thread or the originating thread (conventionally
// main). Use LLCond or a subclass even if all you want to do is // main). Use LLCond or a subclass even if all you want to do is
// signal the work item that it can quit; consider LLOneShotCond. // signal the work item that it can quit; consider LLOneShotCond.
LLCond<Shared> data; LLCond<Shared> data;
auto start = WorkQueue::TimePoint::clock::now(); auto start = WorkSchedule::TimePoint::clock::now();
auto interval = 100ms; // 2s seems like a long time to wait, since it directly impacts the
// duration of this test program. Unfortunately GitHub's Mac runners
// are pretty wimpy, and we're getting spurious "too late" errors just
// because the thread doesn't wake up as soon as we want.
auto interval = 2s;
queue.postEvery( queue.postEvery(
interval, interval,
[&data, count = 0] [&data, count = 0]
@ -93,7 +97,7 @@ namespace tut
data.update_one( data.update_one(
[](Shared& data) [](Shared& data)
{ {
data.push_back(WorkQueue::TimePoint::clock::now()); data.push_back(WorkSchedule::TimePoint::clock::now());
}); });
// by the 3rd call, return false to stop // by the 3rd call, return false to stop
return (++count < 3); return (++count < 3);
@ -102,7 +106,7 @@ namespace tut
// postEvery() running, so run until we have exhausted the iterations // postEvery() running, so run until we have exhausted the iterations
// or we time out waiting // or we time out waiting
for (auto finish = start + 10*interval; for (auto finish = start + 10*interval;
WorkQueue::TimePoint::clock::now() < finish && WorkSchedule::TimePoint::clock::now() < finish &&
data.get([](const Shared& data){ return data.size(); }) < 3; ) data.get([](const Shared& data){ return data.size(); }) < 3; )
{ {
queue.runPending(); queue.runPending();
@ -139,8 +143,8 @@ namespace tut
void object::test<4>() void object::test<4>()
{ {
set_test_name("postTo"); set_test_name("postTo");
WorkQueue main("main"); WorkSchedule main("main");
auto qptr = WorkQueue::getInstance("queue"); auto qptr = WorkSchedule::getInstance("queue");
int result = 0; int result = 0;
main.postTo( main.postTo(
qptr, qptr,
@ -171,8 +175,8 @@ namespace tut
void object::test<5>() void object::test<5>()
{ {
set_test_name("postTo with void return"); set_test_name("postTo with void return");
WorkQueue main("main"); WorkSchedule main("main");
auto qptr = WorkQueue::getInstance("queue"); auto qptr = WorkSchedule::getInstance("queue");
std::string observe; std::string observe;
main.postTo( main.postTo(
qptr, qptr,
@ -194,7 +198,7 @@ namespace tut
std::string stored; std::string stored;
// Try to call waitForResult() on this thread's main coroutine. It // Try to call waitForResult() on this thread's main coroutine. It
// should throw because the main coroutine must service the queue. // should throw because the main coroutine must service the queue.
auto what{ catch_what<WorkQueue::Error>( auto what{ catch_what<WorkSchedule::Error>(
[this, &stored](){ stored = queue.waitForResult( [this, &stored](){ stored = queue.waitForResult(
[](){ return "should throw"; }); }) }; [](){ return "should throw"; }); }) };
ensure("lambda should not have run", stored.empty()); ensure("lambda should not have run", stored.empty());

View File

@ -17,18 +17,58 @@
// std headers // std headers
// external library headers // external library headers
// other Linden headers // other Linden headers
#include "commoncontrol.h"
#include "llerror.h" #include "llerror.h"
#include "llevents.h" #include "llevents.h"
#include "llsd.h"
#include "stringize.h" #include "stringize.h"
LL::ThreadPool::ThreadPool(const std::string& name, size_t threads, size_t capacity): #include <boost/fiber/algo/round_robin.hpp>
/*****************************************************************************
* Custom fiber scheduler for worker threads
*****************************************************************************/
// As of 2022-12-06, each of our worker threads only runs a single (default)
// fiber: we don't launch explicit fibers within worker threads, nor do we
// anticipate doing so. So a worker thread that's simply waiting for incoming
// tasks should really sleep a little. Override the default fiber scheduler to
// implement that.
struct sleepy_robin: public boost::fibers::algo::round_robin
{
virtual void suspend_until( std::chrono::steady_clock::time_point const&) noexcept
{
#if LL_WINDOWS
// round_robin holds a std::condition_variable, and
// round_robin::suspend_until() calls
// std::condition_variable::wait_until(). On Windows, that call seems
// busier than it ought to be. Try just sleeping.
Sleep(1);
#else
// currently unused other than windows, but might as well have something here
// different units than Sleep(), but we actually just want to sleep for any de-minimis duration
usleep(1);
#endif
}
virtual void notify() noexcept
{
// Since our Sleep() call above will wake up on its own, we need not
// take any special action to wake it.
}
};
/*****************************************************************************
* ThreadPoolBase
*****************************************************************************/
LL::ThreadPoolBase::ThreadPoolBase(const std::string& name, size_t threads,
WorkQueueBase* queue):
super(name), super(name),
mQueue(name, capacity),
mName("ThreadPool:" + name), mName("ThreadPool:" + name),
mThreadCount(threads) mThreadCount(getConfiguredWidth(name, threads)),
mQueue(queue)
{} {}
void LL::ThreadPool::start() void LL::ThreadPoolBase::start()
{ {
for (size_t i = 0; i < mThreadCount; ++i) for (size_t i = 0; i < mThreadCount; ++i)
{ {
@ -56,17 +96,17 @@ void LL::ThreadPool::start()
}); });
} }
LL::ThreadPool::~ThreadPool() LL::ThreadPoolBase::~ThreadPoolBase()
{ {
close(); close();
} }
void LL::ThreadPool::close() void LL::ThreadPoolBase::close()
{ {
if (! mQueue.isClosed()) if (! mQueue->isClosed())
{ {
LL_DEBUGS("ThreadPool") << mName << " closing queue and joining threads" << LL_ENDL; LL_DEBUGS("ThreadPool") << mName << " closing queue and joining threads" << LL_ENDL;
mQueue.close(); mQueue->close();
for (auto& pair: mThreads) for (auto& pair: mThreads)
{ {
LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL; LL_DEBUGS("ThreadPool") << mName << " waiting on thread " << pair.first << LL_ENDL;
@ -76,14 +116,74 @@ void LL::ThreadPool::close()
} }
} }
void LL::ThreadPool::run(const std::string& name) void LL::ThreadPoolBase::run(const std::string& name)
{ {
#if LL_WINDOWS
// Try using sleepy_robin fiber scheduler.
boost::fibers::use_scheduling_algorithm<sleepy_robin>();
#endif // LL_WINDOWS
LL_DEBUGS("ThreadPool") << name << " starting" << LL_ENDL; LL_DEBUGS("ThreadPool") << name << " starting" << LL_ENDL;
run(); run();
LL_DEBUGS("ThreadPool") << name << " stopping" << LL_ENDL; LL_DEBUGS("ThreadPool") << name << " stopping" << LL_ENDL;
} }
void LL::ThreadPool::run() void LL::ThreadPoolBase::run()
{ {
mQueue.runUntilClose(); mQueue->runUntilClose();
}
//static
size_t LL::ThreadPoolBase::getConfiguredWidth(const std::string& name, size_t dft)
{
LLSD poolSizes;
try
{
poolSizes = LL::CommonControl::get("Global", "ThreadPoolSizes");
// "ThreadPoolSizes" is actually a map containing the sizes of
// interest -- or should be, if this process has an
// LLViewerControlListener instance and its settings include
// "ThreadPoolSizes". If we failed to retrieve it, perhaps we're in a
// program that doesn't define that, or perhaps there's no such
// setting, or perhaps we're asking too early, before the LLEventAPI
// itself has been instantiated. In any of those cases, it seems worth
// warning.
if (! poolSizes.isDefined())
{
// Note: we don't warn about absence of an override key for a
// particular ThreadPool name, that's fine. This warning is about
// complete absence of a ThreadPoolSizes setting, which we expect
// in a normal viewer session.
LL_WARNS("ThreadPool") << "No 'ThreadPoolSizes' setting for ThreadPool '"
<< name << "'" << LL_ENDL;
}
}
catch (const LL::CommonControl::Error& exc)
{
// We don't want ThreadPool to *require* LLViewerControlListener.
// Just log it and carry on.
LL_WARNS("ThreadPool") << "Can't check 'ThreadPoolSizes': " << exc.what() << LL_ENDL;
}
LL_DEBUGS("ThreadPool") << "ThreadPoolSizes = " << poolSizes << LL_ENDL;
// LLSD treats an undefined value as an empty map when asked to retrieve a
// key, so we don't need this to be conditional.
LLSD sizeSpec{ poolSizes[name] };
// We retrieve sizeSpec as LLSD, rather than immediately as LLSD::Integer,
// so we can distinguish the case when it's undefined.
return sizeSpec.isInteger() ? sizeSpec.asInteger() : dft;
}
//static
size_t LL::ThreadPoolBase::getWidth(const std::string& name, size_t dft)
{
auto instance{ getInstance(name) };
if (instance)
{
return instance->getWidth();
}
else
{
return getConfiguredWidth(name, dft);
}
} }

View File

@ -13,7 +13,9 @@
#if ! defined(LL_THREADPOOL_H) #if ! defined(LL_THREADPOOL_H)
#define LL_THREADPOOL_H #define LL_THREADPOOL_H
#include "threadpool_fwd.h"
#include "workqueue.h" #include "workqueue.h"
#include <memory> // std::unique_ptr
#include <string> #include <string>
#include <thread> #include <thread>
#include <utility> // std::pair #include <utility> // std::pair
@ -22,17 +24,24 @@
namespace LL namespace LL
{ {
class ThreadPool: public LLInstanceTracker<ThreadPool, std::string> class ThreadPoolBase: public LLInstanceTracker<ThreadPoolBase, std::string>
{ {
private: private:
using super = LLInstanceTracker<ThreadPool, std::string>; using super = LLInstanceTracker<ThreadPoolBase, std::string>;
public: public:
/** /**
* Pass ThreadPool a string name. This can be used to look up the * Pass ThreadPoolBase a string name. This can be used to look up the
* relevant WorkQueue. * relevant WorkQueue.
*
* The number of threads you pass sets the compile-time default. But
* if the user has overridden the LLSD map in the "ThreadPoolSizes"
* setting with a key matching this ThreadPool name, that setting
* overrides this parameter.
*/ */
ThreadPool(const std::string& name, size_t threads=1, size_t capacity=1024); ThreadPoolBase(const std::string& name, size_t threads,
virtual ~ThreadPool(); WorkQueueBase* queue);
virtual ~ThreadPoolBase();
/** /**
* Launch the ThreadPool. Until this call, a constructed ThreadPool * Launch the ThreadPool. Until this call, a constructed ThreadPool
@ -50,8 +59,6 @@ namespace LL
std::string getName() const { return mName; } std::string getName() const { return mName; }
size_t getWidth() const { return mThreads.size(); } size_t getWidth() const { return mThreads.size(); }
/// obtain a non-const reference to the WorkQueue to post work to it
WorkQueue& getQueue() { return mQueue; }
/** /**
* Override run() if you need special processing. The default run() * Override run() if you need special processing. The default run()
@ -59,15 +66,72 @@ namespace LL
*/ */
virtual void run(); virtual void run();
/**
* getConfiguredWidth() returns the setting, if any, for the specified
* ThreadPool name. Returns dft if the "ThreadPoolSizes" map does not
* contain the specified name.
*/
static
size_t getConfiguredWidth(const std::string& name, size_t dft=0);
/**
* This getWidth() returns the width of the instantiated ThreadPool
* with the specified name, if any. If no instance exists, returns its
* getConfiguredWidth() if any. If there's no instance and no relevant
* override, return dft. Presumably dft should match the threads
* parameter passed to the ThreadPool constructor call that will
* eventually instantiate the ThreadPool with that name.
*/
static
size_t getWidth(const std::string& name, size_t dft);
protected:
std::unique_ptr<WorkQueueBase> mQueue;
private: private:
void run(const std::string& name); void run(const std::string& name);
WorkQueue mQueue;
std::string mName; std::string mName;
size_t mThreadCount; size_t mThreadCount;
std::vector<std::pair<std::string, std::thread>> mThreads; std::vector<std::pair<std::string, std::thread>> mThreads;
}; };
/**
* Specialize with WorkQueue or, for timestamped tasks, WorkSchedule
*/
template <class QUEUE>
struct ThreadPoolUsing: public ThreadPoolBase
{
using queue_t = QUEUE;
/**
* Pass ThreadPoolUsing a string name. This can be used to look up the
* relevant WorkQueue.
*
* The number of threads you pass sets the compile-time default. But
* if the user has overridden the LLSD map in the "ThreadPoolSizes"
* setting with a key matching this ThreadPool name, that setting
* overrides this parameter.
*
* Pass an explicit capacity to limit the size of the queue.
* Constraining the queue can cause a submitter to block. Do not
* constrain any ThreadPool accepting work from the main thread.
*/
ThreadPoolUsing(const std::string& name, size_t threads=1, size_t capacity=1024*1024):
ThreadPoolBase(name, threads, new queue_t(name, capacity))
{}
~ThreadPoolUsing() override {}
/**
* obtain a non-const reference to the specific WorkQueue subclass to
* post work to it
*/
queue_t& getQueue() { return static_cast<queue_t&>(*mQueue); }
};
/// ThreadPool is shorthand for using the simpler WorkQueue
using ThreadPool = ThreadPoolUsing<WorkQueue>;
} // namespace LL } // namespace LL
#endif /* ! defined(LL_THREADPOOL_H) */ #endif /* ! defined(LL_THREADPOOL_H) */

View File

@ -0,0 +1,25 @@
/**
* @file threadpool_fwd.h
* @author Nat Goodspeed
* @date 2022-12-09
* @brief Forward declarations for ThreadPool et al.
*
* $LicenseInfo:firstyear=2022&license=viewerlgpl$
* Copyright (c) 2022, Linden Research, Inc.
* $/LicenseInfo$
*/
#if ! defined(LL_THREADPOOL_FWD_H)
#define LL_THREADPOOL_FWD_H
#include "workqueue.h"
namespace LL
{
template <class QUEUE>
struct ThreadPoolUsing;
using ThreadPool = ThreadPoolUsing<WorkQueue>;
} // namespace LL
#endif /* ! defined(LL_THREADPOOL_FWD_H) */

View File

@ -26,14 +26,121 @@
using Mutex = LLCoros::Mutex; using Mutex = LLCoros::Mutex;
using Lock = LLCoros::LockType; using Lock = LLCoros::LockType;
LL::WorkQueue::WorkQueue(const std::string& name, size_t capacity): /*****************************************************************************
super(makeName(name)), * WorkQueueBase
mQueue(capacity) *****************************************************************************/
LL::WorkQueueBase::WorkQueueBase(const std::string& name):
super(makeName(name))
{ {
// TODO: register for "LLApp" events so we can implicitly close() on // TODO: register for "LLApp" events so we can implicitly close() on
// viewer shutdown. // viewer shutdown.
} }
void LL::WorkQueueBase::runUntilClose()
{
try
{
for (;;)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
callWork(pop_());
}
}
catch (const Closed&)
{
}
}
bool LL::WorkQueueBase::runPending()
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
for (Work work; tryPop_(work); )
{
callWork(work);
}
return ! done();
}
bool LL::WorkQueueBase::runOne()
{
Work work;
if (tryPop_(work))
{
callWork(work);
}
return ! done();
}
bool LL::WorkQueueBase::runUntil(const TimePoint& until)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
// Should we subtract some slop to allow for typical Work execution time?
// How much slop?
// runUntil() is simply a time-bounded runPending().
for (Work work; TimePoint::clock::now() < until && tryPop_(work); )
{
callWork(work);
}
return ! done();
}
std::string LL::WorkQueueBase::makeName(const std::string& name)
{
if (! name.empty())
return name;
static U32 discriminator = 0;
static Mutex mutex;
U32 num;
{
// Protect discriminator from concurrent access by different threads.
// It can't be thread_local, else two racing threads will come up with
// the same name.
Lock lk(mutex);
num = discriminator++;
}
return STRINGIZE("WorkQueue" << num);
}
void LL::WorkQueueBase::callWork(const Work& work)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
try
{
work();
}
catch (...)
{
// No matter what goes wrong with any individual work item, the worker
// thread must go on! Log our own instance name with the exception.
LOG_UNHANDLED_EXCEPTION(getKey());
}
}
void LL::WorkQueueBase::error(const std::string& msg)
{
LL_ERRS("WorkQueue") << msg << LL_ENDL;
}
void LL::WorkQueueBase::checkCoroutine(const std::string& method)
{
// By convention, the default coroutine on each thread has an empty name
// string. See also LLCoros::logname().
if (LLCoros::getName().empty())
{
LLTHROW(Error("Do not call " + method + " from a thread's default coroutine"));
}
}
/*****************************************************************************
* WorkQueue
*****************************************************************************/
LL::WorkQueue::WorkQueue(const std::string& name, size_t capacity):
super(name),
mQueue(capacity)
{
}
void LL::WorkQueue::close() void LL::WorkQueue::close()
{ {
mQueue.close(); mQueue.close();
@ -54,105 +161,85 @@ bool LL::WorkQueue::done()
return mQueue.done(); return mQueue.done();
} }
void LL::WorkQueue::runUntilClose() bool LL::WorkQueue::post(const Work& callable)
{ {
try return mQueue.pushIfOpen(callable);
{
for (;;)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD;
callWork(mQueue.pop());
}
}
catch (const Queue::Closed&)
{
}
} }
bool LL::WorkQueue::runPending() bool LL::WorkQueue::tryPost(const Work& callable)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD; return mQueue.tryPush(callable);
for (Work work; mQueue.tryPop(work); )
{
callWork(work);
}
return ! mQueue.done();
} }
bool LL::WorkQueue::runOne() LL::WorkQueue::Work LL::WorkQueue::pop_()
{ {
Work work; return mQueue.pop();
if (mQueue.tryPop(work))
{
callWork(work);
}
return ! mQueue.done();
} }
bool LL::WorkQueue::runUntil(const TimePoint& until) bool LL::WorkQueue::tryPop_(Work& work)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD; return mQueue.tryPop(work);
// Should we subtract some slop to allow for typical Work execution time?
// How much slop?
// runUntil() is simply a time-bounded runPending().
for (Work work; TimePoint::clock::now() < until && mQueue.tryPop(work); )
{
callWork(work);
}
return ! mQueue.done();
} }
std::string LL::WorkQueue::makeName(const std::string& name) /*****************************************************************************
* WorkSchedule
*****************************************************************************/
LL::WorkSchedule::WorkSchedule(const std::string& name, size_t capacity):
super(name),
mQueue(capacity)
{ {
if (! name.empty())
return name;
static U32 discriminator = 0;
static Mutex mutex;
U32 num;
{
// Protect discriminator from concurrent access by different threads.
// It can't be thread_local, else two racing threads will come up with
// the same name.
Lock lk(mutex);
num = discriminator++;
}
return STRINGIZE("WorkQueue" << num);
} }
void LL::WorkQueue::callWork(const Queue::DataTuple& work) void LL::WorkSchedule::close()
{ {
// ThreadSafeSchedule::pop() always delivers a tuple, even when mQueue.close();
// there's only one data field per item, as for us.
callWork(std::get<0>(work));
} }
void LL::WorkQueue::callWork(const Work& work) size_t LL::WorkSchedule::size()
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_THREAD; return mQueue.size();
try
{
work();
}
catch (...)
{
// No matter what goes wrong with any individual work item, the worker
// thread must go on! Log our own instance name with the exception.
LOG_UNHANDLED_EXCEPTION(getKey());
}
} }
void LL::WorkQueue::error(const std::string& msg) bool LL::WorkSchedule::isClosed()
{ {
LL_ERRS("WorkQueue") << msg << LL_ENDL; return mQueue.isClosed();
} }
void LL::WorkQueue::checkCoroutine(const std::string& method) bool LL::WorkSchedule::done()
{ {
// By convention, the default coroutine on each thread has an empty name return mQueue.done();
// string. See also LLCoros::logname(). }
if (LLCoros::getName().empty())
{ bool LL::WorkSchedule::post(const Work& callable)
LLTHROW(Error("Do not call " + method + " from a thread's default coroutine")); {
} // Use TimePoint::clock::now() instead of TimePoint's representation of
// the epoch because this WorkSchedule may contain a mix of past-due
// TimedWork items and TimedWork items scheduled for the future. Sift this
// new item into the correct place.
return post(callable, TimePoint::clock::now());
}
bool LL::WorkSchedule::post(const Work& callable, const TimePoint& time)
{
return mQueue.pushIfOpen(TimedWork(time, callable));
}
bool LL::WorkSchedule::tryPost(const Work& callable)
{
return tryPost(callable, TimePoint::clock::now());
}
bool LL::WorkSchedule::tryPost(const Work& callable, const TimePoint& time)
{
return mQueue.tryPush(TimedWork(time, callable));
}
LL::WorkSchedule::Work LL::WorkSchedule::pop_()
{
return std::get<0>(mQueue.pop());
}
bool LL::WorkSchedule::tryPop_(Work& work)
{
return mQueue.tryPop(work);
} }

View File

@ -15,6 +15,7 @@
#include "llcoros.h" #include "llcoros.h"
#include "llexception.h" #include "llexception.h"
#include "llinstancetracker.h" #include "llinstancetracker.h"
#include "llinstancetrackersubclass.h"
#include "threadsafeschedule.h" #include "threadsafeschedule.h"
#include <chrono> #include <chrono>
#include <exception> // std::current_exception #include <exception> // std::current_exception
@ -23,27 +24,23 @@
namespace LL namespace LL
{ {
/*****************************************************************************
* WorkQueueBase: API for WorkQueue and WorkSchedule
*****************************************************************************/
/** /**
* A typical WorkQueue has a string name that can be used to find it. * A typical WorkQueue has a string name that can be used to find it.
*/ */
class WorkQueue: public LLInstanceTracker<WorkQueue, std::string> class WorkQueueBase: public LLInstanceTracker<WorkQueueBase, std::string>
{ {
private: private:
using super = LLInstanceTracker<WorkQueue, std::string>; using super = LLInstanceTracker<WorkQueueBase, std::string>;
public: public:
using Work = std::function<void()>; using Work = std::function<void()>;
using Closed = LLThreadSafeQueueInterrupt;
private: // for runFor()
using Queue = ThreadSafeSchedule<Work>; using TimePoint = std::chrono::steady_clock::time_point;
// helper for postEvery()
template <typename Rep, typename Period, typename CALLABLE>
class BackJack;
public:
using TimePoint = Queue::TimePoint;
using TimedWork = Queue::TimeTuple;
using Closed = Queue::Closed;
struct Error: public LLException struct Error: public LLException
{ {
@ -51,18 +48,18 @@ namespace LL
}; };
/** /**
* You may omit the WorkQueue name, in which case a unique name is * You may omit the WorkQueueBase name, in which case a unique name is
* synthesized; for practical purposes that makes it anonymous. * synthesized; for practical purposes that makes it anonymous.
*/ */
WorkQueue(const std::string& name = std::string(), size_t capacity=1024); WorkQueueBase(const std::string& name);
/** /**
* Since the point of WorkQueue is to pass work to some other worker * Since the point of WorkQueue is to pass work to some other worker
* thread(s) asynchronously, it's important that the WorkQueue continue * thread(s) asynchronously, it's important that it continue to exist
* to exist until the worker thread(s) have drained it. To communicate * until the worker thread(s) have drained it. To communicate that
* that it's time for them to quit, close() the queue. * it's time for them to quit, close() the queue.
*/ */
void close(); virtual void close() = 0;
/** /**
* WorkQueue supports multiple producers and multiple consumers. In * WorkQueue supports multiple producers and multiple consumers. In
@ -78,152 +75,57 @@ namespace LL
* * If you're the only consumer, noticing that size() > 0 is * * If you're the only consumer, noticing that size() > 0 is
* meaningful. * meaningful.
*/ */
size_t size(); virtual size_t size() = 0;
/// producer end: are we prevented from pushing any additional items? /// producer end: are we prevented from pushing any additional items?
bool isClosed(); virtual bool isClosed() = 0;
/// consumer end: are we done, is the queue entirely drained? /// consumer end: are we done, is the queue entirely drained?
bool done(); virtual bool done() = 0;
/*---------------------- fire and forget API -----------------------*/ /*---------------------- fire and forget API -----------------------*/
/// fire-and-forget, but at a particular (future?) time
template <typename CALLABLE>
void post(const TimePoint& time, CALLABLE&& callable)
{
// Defer reifying an arbitrary CALLABLE until we hit this or
// postIfOpen(). All other methods should accept CALLABLEs of
// arbitrary type to avoid multiple levels of std::function
// indirection.
mQueue.push(TimedWork(time, std::move(callable)));
}
/// fire-and-forget
template <typename CALLABLE>
void post(CALLABLE&& callable)
{
// We use TimePoint::clock::now() instead of TimePoint's
// representation of the epoch because this WorkQueue may contain
// a mix of past-due TimedWork items and TimedWork items scheduled
// for the future. Sift this new item into the correct place.
post(TimePoint::clock::now(), std::move(callable));
}
/**
* post work for a particular time, unless the queue is closed before
* we can post
*/
template <typename CALLABLE>
bool postIfOpen(const TimePoint& time, CALLABLE&& callable)
{
// Defer reifying an arbitrary CALLABLE until we hit this or
// post(). All other methods should accept CALLABLEs of arbitrary
// type to avoid multiple levels of std::function indirection.
return mQueue.pushIfOpen(TimedWork(time, std::move(callable)));
}
/** /**
* post work, unless the queue is closed before we can post * post work, unless the queue is closed before we can post
*/ */
template <typename CALLABLE> virtual bool post(const Work&) = 0;
bool postIfOpen(CALLABLE&& callable)
{
return postIfOpen(TimePoint::clock::now(), std::move(callable));
}
/** /**
* Post work to be run at a specified time to another WorkQueue, which * post work, unless the queue is full
* may or may not still exist and be open. Return true if we were able
* to post.
*/ */
template <typename CALLABLE> virtual bool tryPost(const Work&) = 0;
static bool postMaybe(weak_t target, const TimePoint& time, CALLABLE&& callable);
/** /**
* Post work to another WorkQueue, which may or may not still exist * Post work to another WorkQueue, which may or may not still exist
* and be open. Return true if we were able to post. * and be open. Support any post() overload. Return true if we were
* able to post.
*/ */
template <typename CALLABLE> template <typename... ARGS>
static bool postMaybe(weak_t target, CALLABLE&& callable) static bool postMaybe(weak_t target, ARGS&&... args);
{
return postMaybe(target, TimePoint::clock::now(),
std::forward<CALLABLE>(callable));
}
/**
* Launch a callable returning bool that will trigger repeatedly at
* specified interval, until the callable returns false.
*
* If you need to signal that callable from outside, DO NOT bind a
* reference to a simple bool! That's not thread-safe. Instead, bind
* an LLCond variant, e.g. LLOneShotCond or LLBoolCond.
*/
template <typename Rep, typename Period, typename CALLABLE>
void postEvery(const std::chrono::duration<Rep, Period>& interval,
CALLABLE&& callable);
template <typename CALLABLE>
bool tryPost(CALLABLE&& callable)
{
return mQueue.tryPush(TimedWork(TimePoint::clock::now(), std::move(callable)));
}
/*------------------------- handshake API --------------------------*/ /*------------------------- handshake API --------------------------*/
/**
* Post work to another WorkQueue to be run at a specified time,
* requesting a specific callback to be run on this WorkQueue on
* completion.
*
* Returns true if able to post, false if the other WorkQueue is
* inaccessible.
*/
// Apparently some Microsoft header file defines a macro CALLBACK? The
// natural template argument name CALLBACK produces very weird Visual
// Studio compile errors that seem utterly unrelated to this source
// code.
template <typename CALLABLE, typename FOLLOWUP>
bool postTo(weak_t target,
const TimePoint& time, CALLABLE&& callable, FOLLOWUP&& callback);
/** /**
* Post work to another WorkQueue, requesting a specific callback to * Post work to another WorkQueue, requesting a specific callback to
* be run on this WorkQueue on completion. * be run on this WorkQueue on completion. Optional final argument is
* TimePoint for WorkSchedule.
* *
* Returns true if able to post, false if the other WorkQueue is * Returns true if able to post, false if the other WorkQueue is
* inaccessible. * inaccessible.
*/ */
template <typename CALLABLE, typename FOLLOWUP> template <typename CALLABLE, typename FOLLOWUP, typename... ARGS>
bool postTo(weak_t target, CALLABLE&& callable, FOLLOWUP&& callback) bool postTo(weak_t target, CALLABLE&& callable, FOLLOWUP&& callback,
{ ARGS&&... args);
return postTo(target, TimePoint::clock::now(),
std::move(callable), std::move(callback));
}
/**
* Post work to another WorkQueue to be run at a specified time,
* blocking the calling coroutine until then, returning the result to
* caller on completion.
*
* In general, we assume that each thread's default coroutine is busy
* servicing its WorkQueue or whatever. To try to prevent mistakes, we
* forbid calling waitForResult() from a thread's default coroutine.
*/
template <typename CALLABLE>
auto waitForResult(const TimePoint& time, CALLABLE&& callable);
/** /**
* Post work to another WorkQueue, blocking the calling coroutine * Post work to another WorkQueue, blocking the calling coroutine
* until then, returning the result to caller on completion. * until then, returning the result to caller on completion. Optional
* final argument is TimePoint for WorkSchedule.
* *
* In general, we assume that each thread's default coroutine is busy * In general, we assume that each thread's default coroutine is busy
* servicing its WorkQueue or whatever. To try to prevent mistakes, we * servicing its WorkQueue or whatever. To try to prevent mistakes, we
* forbid calling waitForResult() from a thread's default coroutine. * forbid calling waitForResult() from a thread's default coroutine.
*/ */
template <typename CALLABLE> template <typename CALLABLE, typename... ARGS>
auto waitForResult(CALLABLE&& callable) auto waitForResult(CALLABLE&& callable, ARGS&&... args);
{
return waitForResult(TimePoint::clock::now(), std::move(callable));
}
/*--------------------------- worker API ---------------------------*/ /*--------------------------- worker API ---------------------------*/
@ -270,7 +172,7 @@ namespace LL
*/ */
bool runUntil(const TimePoint& until); bool runUntil(const TimePoint& until);
private: protected:
template <typename CALLABLE, typename FOLLOWUP> template <typename CALLABLE, typename FOLLOWUP>
static auto makeReplyLambda(CALLABLE&& callable, FOLLOWUP&& callback); static auto makeReplyLambda(CALLABLE&& callable, FOLLOWUP&& callback);
/// general case: arbitrary C++ return type /// general case: arbitrary C++ return type
@ -290,13 +192,170 @@ namespace LL
static void checkCoroutine(const std::string& method); static void checkCoroutine(const std::string& method);
static void error(const std::string& msg); static void error(const std::string& msg);
static std::string makeName(const std::string& name); static std::string makeName(const std::string& name);
void callWork(const Queue::DataTuple& work);
void callWork(const Work& work); void callWork(const Work& work);
private:
virtual Work pop_() = 0;
virtual bool tryPop_(Work&) = 0;
};
/*****************************************************************************
* WorkQueue: no timestamped task support
*****************************************************************************/
class WorkQueue: public LLInstanceTrackerSubclass<WorkQueue, WorkQueueBase>
{
private:
using super = LLInstanceTrackerSubclass<WorkQueue, WorkQueueBase>;
public:
/**
* You may omit the WorkQueue name, in which case a unique name is
* synthesized; for practical purposes that makes it anonymous.
*/
WorkQueue(const std::string& name = std::string(), size_t capacity=1024);
/**
* Since the point of WorkQueue is to pass work to some other worker
* thread(s) asynchronously, it's important that it continue to exist
* until the worker thread(s) have drained it. To communicate that
* it's time for them to quit, close() the queue.
*/
void close() override;
/**
* WorkQueue supports multiple producers and multiple consumers. In
* the general case it's misleading to test size(), since any other
* thread might change it the nanosecond the lock is released. On that
* basis, some might argue against publishing a size() method at all.
*
* But there are two specific cases in which a test based on size()
* might be reasonable:
*
* * If you're the only producer, noticing that size() == 0 is
* meaningful.
* * If you're the only consumer, noticing that size() > 0 is
* meaningful.
*/
size_t size() override;
/// producer end: are we prevented from pushing any additional items?
bool isClosed() override;
/// consumer end: are we done, is the queue entirely drained?
bool done() override;
/*---------------------- fire and forget API -----------------------*/
/**
* post work, unless the queue is closed before we can post
*/
bool post(const Work&) override;
/**
* post work, unless the queue is full
*/
bool tryPost(const Work&) override;
private:
using Queue = LLThreadSafeQueue<Work>;
Queue mQueue; Queue mQueue;
Work pop_() override;
bool tryPop_(Work&) override;
};
/*****************************************************************************
* WorkSchedule: add support for timestamped tasks
*****************************************************************************/
class WorkSchedule: public LLInstanceTrackerSubclass<WorkSchedule, WorkQueueBase>
{
private:
using super = LLInstanceTrackerSubclass<WorkSchedule, WorkQueueBase>;
using Queue = ThreadSafeSchedule<Work>;
// helper for postEvery()
template <typename Rep, typename Period, typename CALLABLE>
class BackJack;
public:
using TimePoint = Queue::TimePoint;
using TimedWork = Queue::TimeTuple;
/**
* You may omit the WorkSchedule name, in which case a unique name is
* synthesized; for practical purposes that makes it anonymous.
*/
WorkSchedule(const std::string& name = std::string(), size_t capacity=1024);
/**
* Since the point of WorkSchedule is to pass work to some other worker
* thread(s) asynchronously, it's important that the WorkSchedule continue
* to exist until the worker thread(s) have drained it. To communicate
* that it's time for them to quit, close() the queue.
*/
void close() override;
/**
* WorkSchedule supports multiple producers and multiple consumers. In
* the general case it's misleading to test size(), since any other
* thread might change it the nanosecond the lock is released. On that
* basis, some might argue against publishing a size() method at all.
*
* But there are two specific cases in which a test based on size()
* might be reasonable:
*
* * If you're the only producer, noticing that size() == 0 is
* meaningful.
* * If you're the only consumer, noticing that size() > 0 is
* meaningful.
*/
size_t size() override;
/// producer end: are we prevented from pushing any additional items?
bool isClosed() override;
/// consumer end: are we done, is the queue entirely drained?
bool done() override;
/*---------------------- fire and forget API -----------------------*/
/**
* post work, unless the queue is closed before we can post
*/
bool post(const Work& callable) override;
/**
* post work for a particular time, unless the queue is closed before
* we can post
*/
bool post(const Work& callable, const TimePoint& time);
/**
* post work, unless the queue is full
*/
bool tryPost(const Work& callable) override;
/**
* post work for a particular time, unless the queue is full
*/
bool tryPost(const Work& callable, const TimePoint& time);
/**
* Launch a callable returning bool that will trigger repeatedly at
* specified interval, until the callable returns false.
*
* If you need to signal that callable from outside, DO NOT bind a
* reference to a simple bool! That's not thread-safe. Instead, bind
* an LLCond variant, e.g. LLOneShotCond or LLBoolCond.
*/
template <typename Rep, typename Period, typename CALLABLE>
bool postEvery(const std::chrono::duration<Rep, Period>& interval,
CALLABLE&& callable);
private:
Queue mQueue;
Work pop_() override;
bool tryPop_(Work&) override;
}; };
/** /**
* BackJack is, in effect, a hand-rolled lambda, binding a WorkQueue, a * BackJack is, in effect, a hand-rolled lambda, binding a WorkSchedule, a
* CALLABLE that returns bool, a TimePoint and an interval at which to * CALLABLE that returns bool, a TimePoint and an interval at which to
* relaunch it. As long as the callable continues returning true, BackJack * relaunch it. As long as the callable continues returning true, BackJack
* keeps resubmitting it to the target WorkQueue. * keeps resubmitting it to the target WorkQueue.
@ -305,7 +364,7 @@ namespace LL
// class method gets its own 'this' pointer -- which we need to resubmit // class method gets its own 'this' pointer -- which we need to resubmit
// the whole BackJack callable. // the whole BackJack callable.
template <typename Rep, typename Period, typename CALLABLE> template <typename Rep, typename Period, typename CALLABLE>
class WorkQueue::BackJack class WorkSchedule::BackJack
{ {
public: public:
// bind the desired data // bind the desired data
@ -319,9 +378,10 @@ namespace LL
mCallable(std::move(callable)) mCallable(std::move(callable))
{} {}
// Call by target WorkQueue -- note that although WE require a // This operator() method, called by target WorkSchedule, is what
// callable returning bool, WorkQueue wants a void callable. We // makes this object a Work item. Although WE require a callable
// consume the bool. // returning bool, WorkSchedule wants a void callable. We consume the
// bool.
void operator()() void operator()()
{ {
// If mCallable() throws an exception, don't catch it here: if it // If mCallable() throws an exception, don't catch it here: if it
@ -337,7 +397,7 @@ namespace LL
// register our intent to fire at exact mIntervals. // register our intent to fire at exact mIntervals.
mStart += mInterval; mStart += mInterval;
// We're being called at this moment by the target WorkQueue. // We're being called at this moment by the target WorkSchedule.
// Assume it still exists, rather than checking the result of // Assume it still exists, rather than checking the result of
// lock(). // lock().
// Resubmit the whole *this callable: that's why we're a class // Resubmit the whole *this callable: that's why we're a class
@ -345,14 +405,10 @@ namespace LL
// move-only callable; but naturally this statement must be // move-only callable; but naturally this statement must be
// the last time we reference this instance, which may become // the last time we reference this instance, which may become
// moved-from. // moved-from.
try auto target{ std::dynamic_pointer_cast<WorkSchedule>(mTarget.lock()) };
{ // Discard bool return: once this queue is closed, oh well,
mTarget.lock()->post(mStart, std::move(*this)); // just stop
} target->post(std::move(*this), mStart);
catch (const Closed&)
{
// Once this queue is closed, oh well, just stop
}
} }
} }
@ -364,8 +420,8 @@ namespace LL
}; };
template <typename Rep, typename Period, typename CALLABLE> template <typename Rep, typename Period, typename CALLABLE>
void WorkQueue::postEvery(const std::chrono::duration<Rep, Period>& interval, bool WorkSchedule::postEvery(const std::chrono::duration<Rep, Period>& interval,
CALLABLE&& callable) CALLABLE&& callable)
{ {
if (interval.count() <= 0) if (interval.count() <= 0)
{ {
@ -381,14 +437,14 @@ namespace LL
// Instantiate and post a suitable BackJack, binding a weak_ptr to // Instantiate and post a suitable BackJack, binding a weak_ptr to
// self, the current time, the desired interval and the desired // self, the current time, the desired interval and the desired
// callable. // callable.
post( return post(
BackJack<Rep, Period, CALLABLE>( BackJack<Rep, Period, CALLABLE>(
getWeak(), TimePoint::clock::now(), interval, std::move(callable))); getWeak(), TimePoint::clock::now(), interval, std::move(callable)));
} }
/// general case: arbitrary C++ return type /// general case: arbitrary C++ return type
template <typename CALLABLE, typename FOLLOWUP, typename RETURNTYPE> template <typename CALLABLE, typename FOLLOWUP, typename RETURNTYPE>
struct WorkQueue::MakeReplyLambda struct WorkQueueBase::MakeReplyLambda
{ {
auto operator()(CALLABLE&& callable, FOLLOWUP&& callback) auto operator()(CALLABLE&& callable, FOLLOWUP&& callback)
{ {
@ -409,7 +465,7 @@ namespace LL
/// specialize for CALLABLE returning void /// specialize for CALLABLE returning void
template <typename CALLABLE, typename FOLLOWUP> template <typename CALLABLE, typename FOLLOWUP>
struct WorkQueue::MakeReplyLambda<CALLABLE, FOLLOWUP, void> struct WorkQueueBase::MakeReplyLambda<CALLABLE, FOLLOWUP, void>
{ {
auto operator()(CALLABLE&& callable, FOLLOWUP&& callback) auto operator()(CALLABLE&& callable, FOLLOWUP&& callback)
{ {
@ -421,16 +477,16 @@ namespace LL
}; };
template <typename CALLABLE, typename FOLLOWUP> template <typename CALLABLE, typename FOLLOWUP>
auto WorkQueue::makeReplyLambda(CALLABLE&& callable, FOLLOWUP&& callback) auto WorkQueueBase::makeReplyLambda(CALLABLE&& callable, FOLLOWUP&& callback)
{ {
return MakeReplyLambda<CALLABLE, FOLLOWUP, return MakeReplyLambda<CALLABLE, FOLLOWUP,
decltype(std::forward<CALLABLE>(callable)())>() decltype(std::forward<CALLABLE>(callable)())>()
(std::move(callable), std::move(callback)); (std::move(callable), std::move(callback));
} }
template <typename CALLABLE, typename FOLLOWUP> template <typename CALLABLE, typename FOLLOWUP, typename... ARGS>
bool WorkQueue::postTo(weak_t target, bool WorkQueueBase::postTo(weak_t target, CALLABLE&& callable, FOLLOWUP&& callback,
const TimePoint& time, CALLABLE&& callable, FOLLOWUP&& callback) ARGS&&... args)
{ {
LL_PROFILE_ZONE_SCOPED; LL_PROFILE_ZONE_SCOPED;
// We're being asked to post to the WorkQueue at target. // We're being asked to post to the WorkQueue at target.
@ -443,13 +499,12 @@ namespace LL
// Here we believe target WorkQueue still exists. Post to it a // Here we believe target WorkQueue still exists. Post to it a
// lambda that packages our callable, our callback and a weak_ptr // lambda that packages our callable, our callback and a weak_ptr
// to this originating WorkQueue. // to this originating WorkQueue.
tptr->post( return tptr->post(
time,
[reply = super::getWeak(), [reply = super::getWeak(),
callable = std::move(callable), callable = std::move(callable),
callback = std::move(callback)] callback = std::move(callback)]
() () mutable
mutable { {
// Use postMaybe() below in case this originating WorkQueue // Use postMaybe() below in case this originating WorkQueue
// has been closed or destroyed. Remember, the outer lambda is // has been closed or destroyed. Remember, the outer lambda is
// now running on a thread servicing the target WorkQueue, and // now running on a thread servicing the target WorkQueue, and
@ -472,44 +527,34 @@ namespace LL
// originating WorkQueue. Once there, rethrow it. // originating WorkQueue. Once there, rethrow it.
[exc = std::current_exception()](){ std::rethrow_exception(exc); }); [exc = std::current_exception()](){ std::rethrow_exception(exc); });
} }
}); },
// if caller passed a TimePoint, pass it along to post()
// looks like we were able to post() std::forward<ARGS>(args)...);
return true;
} }
template <typename CALLABLE> template <typename... ARGS>
bool WorkQueue::postMaybe(weak_t target, const TimePoint& time, CALLABLE&& callable) bool WorkQueueBase::postMaybe(weak_t target, ARGS&&... args)
{ {
LL_PROFILE_ZONE_SCOPED; LL_PROFILE_ZONE_SCOPED;
// target is a weak_ptr: have to lock it to check it // target is a weak_ptr: have to lock it to check it
auto tptr = target.lock(); auto tptr = target.lock();
if (tptr) if (tptr)
{ {
try return tptr->post(std::forward<ARGS>(args)...);
{
tptr->post(time, std::forward<CALLABLE>(callable));
// we were able to post()
return true;
}
catch (const Closed&)
{
// target WorkQueue still exists, but is Closed
}
} }
// either target no longer exists, or its WorkQueue is Closed // target no longer exists
return false; return false;
} }
/// general case: arbitrary C++ return type /// general case: arbitrary C++ return type
template <typename CALLABLE, typename RETURNTYPE> template <typename CALLABLE, typename RETURNTYPE>
struct WorkQueue::WaitForResult struct WorkQueueBase::WaitForResult
{ {
auto operator()(WorkQueue* self, const TimePoint& time, CALLABLE&& callable) template <typename... ARGS>
auto operator()(WorkQueueBase* self, CALLABLE&& callable, ARGS&&... args)
{ {
LLCoros::Promise<RETURNTYPE> promise; LLCoros::Promise<RETURNTYPE> promise;
self->post( bool posted = self->post(
time,
// We dare to bind a reference to Promise because it's // We dare to bind a reference to Promise because it's
// specifically designed for cross-thread communication. // specifically designed for cross-thread communication.
[&promise, callable = std::move(callable)]() [&promise, callable = std::move(callable)]()
@ -523,7 +568,13 @@ namespace LL
{ {
promise.set_exception(std::current_exception()); promise.set_exception(std::current_exception());
} }
}); },
// if caller passed a TimePoint, pass it to post()
std::forward<ARGS>(args)...);
if (! posted)
{
LLTHROW(WorkQueueBase::Closed());
}
auto future{ LLCoros::getFuture(promise) }; auto future{ LLCoros::getFuture(promise) };
// now, on the calling thread, wait for that result // now, on the calling thread, wait for that result
LLCoros::TempStatus st("waiting for WorkQueue::waitForResult()"); LLCoros::TempStatus st("waiting for WorkQueue::waitForResult()");
@ -533,13 +584,13 @@ namespace LL
/// specialize for CALLABLE returning void /// specialize for CALLABLE returning void
template <typename CALLABLE> template <typename CALLABLE>
struct WorkQueue::WaitForResult<CALLABLE, void> struct WorkQueueBase::WaitForResult<CALLABLE, void>
{ {
void operator()(WorkQueue* self, const TimePoint& time, CALLABLE&& callable) template <typename... ARGS>
void operator()(WorkQueueBase* self, CALLABLE&& callable, ARGS&&... args)
{ {
LLCoros::Promise<void> promise; LLCoros::Promise<void> promise;
self->post( bool posted = self->post(
time,
// &promise is designed for cross-thread access // &promise is designed for cross-thread access
[&promise, callable = std::move(callable)]() [&promise, callable = std::move(callable)]()
mutable { mutable {
@ -552,7 +603,13 @@ namespace LL
{ {
promise.set_exception(std::current_exception()); promise.set_exception(std::current_exception());
} }
}); },
// if caller passed a TimePoint, pass it to post()
std::forward<ARGS>(args)...);
if (! posted)
{
LLTHROW(WorkQueueBase::Closed());
}
auto future{ LLCoros::getFuture(promise) }; auto future{ LLCoros::getFuture(promise) };
// block until set_value() // block until set_value()
LLCoros::TempStatus st("waiting for void WorkQueue::waitForResult()"); LLCoros::TempStatus st("waiting for void WorkQueue::waitForResult()");
@ -560,13 +617,13 @@ namespace LL
} }
}; };
template <typename CALLABLE> template <typename CALLABLE, typename... ARGS>
auto WorkQueue::waitForResult(const TimePoint& time, CALLABLE&& callable) auto WorkQueueBase::waitForResult(CALLABLE&& callable, ARGS&&... args)
{ {
checkCoroutine("waitForResult()"); checkCoroutine("waitForResult()");
// derive callable's return type so we can specialize for void // derive callable's return type so we can specialize for void
return WaitForResult<CALLABLE, decltype(std::forward<CALLABLE>(callable)())>() return WaitForResult<CALLABLE, decltype(std::forward<CALLABLE>(callable)())>()
(this, time, std::forward<CALLABLE>(callable)); (this, std::forward<CALLABLE>(callable), std::forward<ARGS>(args)...);
} }
} // namespace LL } // namespace LL

View File

@ -113,6 +113,7 @@ void HttpLibcurl::shutdown()
void HttpLibcurl::start(int policy_count) void HttpLibcurl::start(int policy_count)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
llassert_always(policy_count <= HTTP_POLICY_CLASS_LIMIT); llassert_always(policy_count <= HTTP_POLICY_CLASS_LIMIT);
llassert_always(! mMultiHandles); // One-time call only llassert_always(! mMultiHandles); // One-time call only
@ -143,6 +144,7 @@ void HttpLibcurl::start(int policy_count)
// sleep otherwise ask for a normal polling interval. // sleep otherwise ask for a normal polling interval.
HttpService::ELoopSpeed HttpLibcurl::processTransport() HttpService::ELoopSpeed HttpLibcurl::processTransport()
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpService::ELoopSpeed ret(HttpService::REQUEST_SLEEP); HttpService::ELoopSpeed ret(HttpService::REQUEST_SLEEP);
// Give libcurl some cycles to do I/O & callbacks // Give libcurl some cycles to do I/O & callbacks
@ -168,6 +170,7 @@ HttpService::ELoopSpeed HttpLibcurl::processTransport()
CURLMcode status(CURLM_CALL_MULTI_PERFORM); CURLMcode status(CURLM_CALL_MULTI_PERFORM);
do do
{ {
LL_PROFILE_ZONE_NAMED_CATEGORY_NETWORK("httppt - curl_multi_perform");
running = 0; running = 0;
status = curl_multi_perform(mMultiHandles[policy_class], &running); status = curl_multi_perform(mMultiHandles[policy_class], &running);
} }
@ -176,31 +179,34 @@ HttpService::ELoopSpeed HttpLibcurl::processTransport()
// Run completion on anything done // Run completion on anything done
CURLMsg * msg(NULL); CURLMsg * msg(NULL);
int msgs_in_queue(0); int msgs_in_queue(0);
while ((msg = curl_multi_info_read(mMultiHandles[policy_class], &msgs_in_queue))) {
{ LL_PROFILE_ZONE_NAMED_CATEGORY_NETWORK("httppt - curl_multi_info_read");
if (CURLMSG_DONE == msg->msg) while ((msg = curl_multi_info_read(mMultiHandles[policy_class], &msgs_in_queue)))
{ {
CURL * handle(msg->easy_handle); if (CURLMSG_DONE == msg->msg)
CURLcode result(msg->data.result); {
CURL* handle(msg->easy_handle);
CURLcode result(msg->data.result);
completeRequest(mMultiHandles[policy_class], handle, result); completeRequest(mMultiHandles[policy_class], handle, result);
handle = NULL; // No longer valid on return handle = NULL; // No longer valid on return
ret = HttpService::NORMAL; // If anything completes, we may have a free slot. ret = HttpService::NORMAL; // If anything completes, we may have a free slot.
// Turning around quickly reduces connection gap by 7-10mS. // Turning around quickly reduces connection gap by 7-10mS.
} }
else if (CURLMSG_NONE == msg->msg) else if (CURLMSG_NONE == msg->msg)
{ {
// Ignore this... it shouldn't mean anything. // Ignore this... it shouldn't mean anything.
; ;
} }
else else
{ {
LL_WARNS_ONCE(LOG_CORE) << "Unexpected message from libcurl. Msg code: " LL_WARNS_ONCE(LOG_CORE) << "Unexpected message from libcurl. Msg code: "
<< msg->msg << msg->msg
<< LL_ENDL; << LL_ENDL;
} }
msgs_in_queue = 0; msgs_in_queue = 0;
} }
}
} }
if (! mActiveOps.empty()) if (! mActiveOps.empty())
@ -214,6 +220,7 @@ HttpService::ELoopSpeed HttpLibcurl::processTransport()
// Caller has provided us with a ref count on op. // Caller has provided us with a ref count on op.
void HttpLibcurl::addOp(const HttpOpRequest::ptr_t &op) void HttpLibcurl::addOp(const HttpOpRequest::ptr_t &op)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
llassert_always(op->mReqPolicy < mPolicyCount); llassert_always(op->mReqPolicy < mPolicyCount);
llassert_always(mMultiHandles[op->mReqPolicy] != NULL); llassert_always(mMultiHandles[op->mReqPolicy] != NULL);
@ -257,6 +264,7 @@ void HttpLibcurl::addOp(const HttpOpRequest::ptr_t &op)
// method to kill the request. // method to kill the request.
bool HttpLibcurl::cancel(HttpHandle handle) bool HttpLibcurl::cancel(HttpHandle handle)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpOpRequest::ptr_t op = HttpOpRequest::fromHandle<HttpOpRequest>(handle); HttpOpRequest::ptr_t op = HttpOpRequest::fromHandle<HttpOpRequest>(handle);
active_set_t::iterator it(mActiveOps.find(op)); active_set_t::iterator it(mActiveOps.find(op));
if (mActiveOps.end() == it) if (mActiveOps.end() == it)
@ -282,6 +290,7 @@ bool HttpLibcurl::cancel(HttpHandle handle)
// op to the reply queue with refcount intact. // op to the reply queue with refcount intact.
void HttpLibcurl::cancelRequest(const HttpOpRequest::ptr_t &op) void HttpLibcurl::cancelRequest(const HttpOpRequest::ptr_t &op)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
// Deactivate request // Deactivate request
op->mCurlActive = false; op->mCurlActive = false;
@ -308,6 +317,7 @@ void HttpLibcurl::cancelRequest(const HttpOpRequest::ptr_t &op)
// Keep them synchronized as necessary. // Keep them synchronized as necessary.
bool HttpLibcurl::completeRequest(CURLM * multi_handle, CURL * handle, CURLcode status) bool HttpLibcurl::completeRequest(CURLM * multi_handle, CURL * handle, CURLcode status)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpHandle ophandle(NULL); HttpHandle ophandle(NULL);
CURLcode ccode(CURLE_OK); CURLcode ccode(CURLE_OK);
@ -445,6 +455,7 @@ int HttpLibcurl::getActiveCountInClass(int policy_class) const
void HttpLibcurl::policyUpdated(int policy_class) void HttpLibcurl::policyUpdated(int policy_class)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
if (policy_class < 0 || policy_class >= mPolicyCount || ! mMultiHandles) if (policy_class < 0 || policy_class >= mPolicyCount || ! mMultiHandles)
{ {
return; return;

View File

@ -62,7 +62,6 @@ HttpOperation::HttpOperation():
mReplyQueue(), mReplyQueue(),
mUserHandler(), mUserHandler(),
mReqPolicy(HttpRequest::DEFAULT_POLICY_ID), mReqPolicy(HttpRequest::DEFAULT_POLICY_ID),
mReqPriority(0U),
mTracing(HTTP_TRACE_OFF), mTracing(HTTP_TRACE_OFF),
mMyHandle(LLCORE_HTTP_HANDLE_INVALID) mMyHandle(LLCORE_HTTP_HANDLE_INVALID)
{ {

View File

@ -181,7 +181,6 @@ protected:
public: public:
// Request Data // Request Data
HttpRequest::policy_t mReqPolicy; HttpRequest::policy_t mReqPolicy;
HttpRequest::priority_t mReqPriority;
// Reply Data // Reply Data
HttpStatus mStatus; HttpStatus mStatus;

View File

@ -200,6 +200,7 @@ HttpOpRequest::~HttpOpRequest()
void HttpOpRequest::stageFromRequest(HttpService * service) void HttpOpRequest::stageFromRequest(HttpService * service)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpOpRequest::ptr_t self(boost::dynamic_pointer_cast<HttpOpRequest>(shared_from_this())); HttpOpRequest::ptr_t self(boost::dynamic_pointer_cast<HttpOpRequest>(shared_from_this()));
service->getPolicy().addOp(self); // transfers refcount service->getPolicy().addOp(self); // transfers refcount
} }
@ -207,6 +208,7 @@ void HttpOpRequest::stageFromRequest(HttpService * service)
void HttpOpRequest::stageFromReady(HttpService * service) void HttpOpRequest::stageFromReady(HttpService * service)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpOpRequest::ptr_t self(boost::dynamic_pointer_cast<HttpOpRequest>(shared_from_this())); HttpOpRequest::ptr_t self(boost::dynamic_pointer_cast<HttpOpRequest>(shared_from_this()));
service->getTransport().addOp(self); // transfers refcount service->getTransport().addOp(self); // transfers refcount
} }
@ -214,6 +216,7 @@ void HttpOpRequest::stageFromReady(HttpService * service)
void HttpOpRequest::stageFromActive(HttpService * service) void HttpOpRequest::stageFromActive(HttpService * service)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
if (mReplyLength) if (mReplyLength)
{ {
// If non-zero, we received and processed a Content-Range // If non-zero, we received and processed a Content-Range
@ -250,6 +253,7 @@ void HttpOpRequest::stageFromActive(HttpService * service)
void HttpOpRequest::visitNotifier(HttpRequest * request) void HttpOpRequest::visitNotifier(HttpRequest * request)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
if (mUserHandler) if (mUserHandler)
{ {
HttpResponse * response = new HttpResponse(); HttpResponse * response = new HttpResponse();
@ -292,6 +296,7 @@ void HttpOpRequest::visitNotifier(HttpRequest * request)
HttpStatus HttpOpRequest::cancel() HttpStatus HttpOpRequest::cancel()
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
mStatus = HttpStatus(HttpStatus::LLCORE, HE_OP_CANCELED); mStatus = HttpStatus(HttpStatus::LLCORE, HE_OP_CANCELED);
addAsReply(); addAsReply();
@ -301,12 +306,12 @@ HttpStatus HttpOpRequest::cancel()
HttpStatus HttpOpRequest::setupGet(HttpRequest::policy_t policy_id, HttpStatus HttpOpRequest::setupGet(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers) const HttpHeaders::ptr_t & headers)
{ {
setupCommon(policy_id, priority, url, NULL, options, headers); LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
setupCommon(policy_id, url, NULL, options, headers);
mReqMethod = HOR_GET; mReqMethod = HOR_GET;
return HttpStatus(); return HttpStatus();
@ -314,14 +319,14 @@ HttpStatus HttpOpRequest::setupGet(HttpRequest::policy_t policy_id,
HttpStatus HttpOpRequest::setupGetByteRange(HttpRequest::policy_t policy_id, HttpStatus HttpOpRequest::setupGetByteRange(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
size_t offset, size_t offset,
size_t len, size_t len,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers) const HttpHeaders::ptr_t & headers)
{ {
setupCommon(policy_id, priority, url, NULL, options, headers); LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
setupCommon(policy_id, url, NULL, options, headers);
mReqMethod = HOR_GET; mReqMethod = HOR_GET;
mReqOffset = offset; mReqOffset = offset;
mReqLength = len; mReqLength = len;
@ -335,13 +340,13 @@ HttpStatus HttpOpRequest::setupGetByteRange(HttpRequest::policy_t policy_id,
HttpStatus HttpOpRequest::setupPost(HttpRequest::policy_t policy_id, HttpStatus HttpOpRequest::setupPost(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers) const HttpHeaders::ptr_t & headers)
{ {
setupCommon(policy_id, priority, url, body, options, headers); LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
setupCommon(policy_id, url, body, options, headers);
mReqMethod = HOR_POST; mReqMethod = HOR_POST;
return HttpStatus(); return HttpStatus();
@ -349,13 +354,13 @@ HttpStatus HttpOpRequest::setupPost(HttpRequest::policy_t policy_id,
HttpStatus HttpOpRequest::setupPut(HttpRequest::policy_t policy_id, HttpStatus HttpOpRequest::setupPut(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers) const HttpHeaders::ptr_t & headers)
{ {
setupCommon(policy_id, priority, url, body, options, headers); LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
setupCommon(policy_id, url, body, options, headers);
mReqMethod = HOR_PUT; mReqMethod = HOR_PUT;
return HttpStatus(); return HttpStatus();
@ -363,12 +368,12 @@ HttpStatus HttpOpRequest::setupPut(HttpRequest::policy_t policy_id,
HttpStatus HttpOpRequest::setupDelete(HttpRequest::policy_t policy_id, HttpStatus HttpOpRequest::setupDelete(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers) const HttpHeaders::ptr_t & headers)
{ {
setupCommon(policy_id, priority, url, NULL, options, headers); LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
setupCommon(policy_id, url, NULL, options, headers);
mReqMethod = HOR_DELETE; mReqMethod = HOR_DELETE;
return HttpStatus(); return HttpStatus();
@ -376,13 +381,13 @@ HttpStatus HttpOpRequest::setupDelete(HttpRequest::policy_t policy_id,
HttpStatus HttpOpRequest::setupPatch(HttpRequest::policy_t policy_id, HttpStatus HttpOpRequest::setupPatch(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers) const HttpHeaders::ptr_t & headers)
{ {
setupCommon(policy_id, priority, url, body, options, headers); LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
setupCommon(policy_id, url, body, options, headers);
mReqMethod = HOR_PATCH; mReqMethod = HOR_PATCH;
return HttpStatus(); return HttpStatus();
@ -390,12 +395,12 @@ HttpStatus HttpOpRequest::setupPatch(HttpRequest::policy_t policy_id,
HttpStatus HttpOpRequest::setupCopy(HttpRequest::policy_t policy_id, HttpStatus HttpOpRequest::setupCopy(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t &headers) const HttpHeaders::ptr_t &headers)
{ {
setupCommon(policy_id, priority, url, NULL, options, headers); LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
setupCommon(policy_id, url, NULL, options, headers);
mReqMethod = HOR_COPY; mReqMethod = HOR_COPY;
return HttpStatus(); return HttpStatus();
@ -403,12 +408,12 @@ HttpStatus HttpOpRequest::setupCopy(HttpRequest::policy_t policy_id,
HttpStatus HttpOpRequest::setupMove(HttpRequest::policy_t policy_id, HttpStatus HttpOpRequest::setupMove(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t &headers) const HttpHeaders::ptr_t &headers)
{ {
setupCommon(policy_id, priority, url, NULL, options, headers); LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
setupCommon(policy_id, url, NULL, options, headers);
mReqMethod = HOR_MOVE; mReqMethod = HOR_MOVE;
return HttpStatus(); return HttpStatus();
@ -416,15 +421,14 @@ HttpStatus HttpOpRequest::setupMove(HttpRequest::policy_t policy_id,
void HttpOpRequest::setupCommon(HttpRequest::policy_t policy_id, void HttpOpRequest::setupCommon(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers) const HttpHeaders::ptr_t & headers)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
mProcFlags = 0U; mProcFlags = 0U;
mReqPolicy = policy_id; mReqPolicy = policy_id;
mReqPriority = priority;
mReqURL = url; mReqURL = url;
if (body) if (body)
{ {
@ -465,6 +469,7 @@ void HttpOpRequest::setupCommon(HttpRequest::policy_t policy_id,
// *TODO: Move this to _httplibcurl where it belongs. // *TODO: Move this to _httplibcurl where it belongs.
HttpStatus HttpOpRequest::prepareRequest(HttpService * service) HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
// Scrub transport and result data for retried op case // Scrub transport and result data for retried op case
mCurlActive = false; mCurlActive = false;
mCurlHandle = NULL; mCurlHandle = NULL;
@ -773,6 +778,7 @@ HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
size_t HttpOpRequest::writeCallback(void * data, size_t size, size_t nmemb, void * userdata) size_t HttpOpRequest::writeCallback(void * data, size_t size, size_t nmemb, void * userdata)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(userdata)); HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(userdata));
if (! op->mReplyBody) if (! op->mReplyBody)
@ -788,6 +794,7 @@ size_t HttpOpRequest::writeCallback(void * data, size_t size, size_t nmemb, void
size_t HttpOpRequest::readCallback(void * data, size_t size, size_t nmemb, void * userdata) size_t HttpOpRequest::readCallback(void * data, size_t size, size_t nmemb, void * userdata)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(userdata)); HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(userdata));
if (! op->mReqBody) if (! op->mReqBody)
@ -819,6 +826,7 @@ size_t HttpOpRequest::readCallback(void * data, size_t size, size_t nmemb, void
int HttpOpRequest::seekCallback(void *userdata, curl_off_t offset, int origin) int HttpOpRequest::seekCallback(void *userdata, curl_off_t offset, int origin)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(userdata)); HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(userdata));
if (!op->mReqBody) if (!op->mReqBody)
@ -850,6 +858,7 @@ int HttpOpRequest::seekCallback(void *userdata, curl_off_t offset, int origin)
size_t HttpOpRequest::headerCallback(void * data, size_t size, size_t nmemb, void * userdata) size_t HttpOpRequest::headerCallback(void * data, size_t size, size_t nmemb, void * userdata)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
static const char status_line[] = "HTTP/"; static const char status_line[] = "HTTP/";
static const size_t status_line_len = sizeof(status_line) - 1; static const size_t status_line_len = sizeof(status_line) - 1;
static const char con_ran_line[] = "content-range"; static const char con_ran_line[] = "content-range";
@ -999,6 +1008,7 @@ size_t HttpOpRequest::headerCallback(void * data, size_t size, size_t nmemb, voi
CURLcode HttpOpRequest::curlSslCtxCallback(CURL *curl, void *sslctx, void *userdata) CURLcode HttpOpRequest::curlSslCtxCallback(CURL *curl, void *sslctx, void *userdata)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(userdata)); HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(userdata));
if (op->mCallbackSSLVerify) if (op->mCallbackSSLVerify)
@ -1025,6 +1035,7 @@ CURLcode HttpOpRequest::curlSslCtxCallback(CURL *curl, void *sslctx, void *userd
int HttpOpRequest::sslCertVerifyCallback(X509_STORE_CTX *ctx, void *param) int HttpOpRequest::sslCertVerifyCallback(X509_STORE_CTX *ctx, void *param)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(param)); HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(param));
if (op->mCallbackSSLVerify) if (op->mCallbackSSLVerify)
@ -1037,6 +1048,7 @@ int HttpOpRequest::sslCertVerifyCallback(X509_STORE_CTX *ctx, void *param)
int HttpOpRequest::debugCallback(CURL * handle, curl_infotype info, char * buffer, size_t len, void * userdata) int HttpOpRequest::debugCallback(CURL * handle, curl_infotype info, char * buffer, size_t len, void * userdata)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(userdata)); HttpOpRequest::ptr_t op(HttpOpRequest::fromHandle<HttpOpRequest>(userdata));
std::string safe_line; std::string safe_line;

View File

@ -105,13 +105,11 @@ public:
/// Threading: called by application thread /// Threading: called by application thread
/// ///
HttpStatus setupGet(HttpRequest::policy_t policy_id, HttpStatus setupGet(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers); const HttpHeaders::ptr_t & headers);
HttpStatus setupGetByteRange(HttpRequest::policy_t policy_id, HttpStatus setupGetByteRange(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
size_t offset, size_t offset,
size_t len, size_t len,
@ -119,40 +117,34 @@ public:
const HttpHeaders::ptr_t & headers); const HttpHeaders::ptr_t & headers);
HttpStatus setupPost(HttpRequest::policy_t policy_id, HttpStatus setupPost(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers); const HttpHeaders::ptr_t & headers);
HttpStatus setupPut(HttpRequest::policy_t policy_id, HttpStatus setupPut(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers); const HttpHeaders::ptr_t & headers);
HttpStatus setupDelete(HttpRequest::policy_t policy_id, HttpStatus setupDelete(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers); const HttpHeaders::ptr_t & headers);
HttpStatus setupPatch(HttpRequest::policy_t policy_id, HttpStatus setupPatch(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers); const HttpHeaders::ptr_t & headers);
HttpStatus setupCopy(HttpRequest::policy_t policy_id, HttpStatus setupCopy(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers); const HttpHeaders::ptr_t & headers);
HttpStatus setupMove(HttpRequest::policy_t policy_id, HttpStatus setupMove(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers); const HttpHeaders::ptr_t & headers);
@ -172,7 +164,6 @@ protected:
// Threading: called by application thread // Threading: called by application thread
// //
void setupCommon(HttpRequest::policy_t policy_id, void setupCommon(HttpRequest::policy_t policy_id,
HttpRequest::priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
@ -239,19 +230,6 @@ public:
/// HttpOpRequestCompare isn't an operation but a uniform comparison
/// functor for STL containers that order by priority. Mainly
/// used for the ready queue container but defined here.
class HttpOpRequestCompare
{
public:
bool operator()(const HttpOpRequest * lhs, const HttpOpRequest * rhs)
{
return lhs->mReqPriority > rhs->mReqPriority;
}
}; // end class HttpOpRequestCompare
// --------------------------------------- // ---------------------------------------
// Free functions // Free functions
// --------------------------------------- // ---------------------------------------

View File

@ -24,6 +24,7 @@
* $/LicenseInfo$ * $/LicenseInfo$
*/ */
#if 0 // DEPRECATED
#include "_httpopsetpriority.h" #include "_httpopsetpriority.h"
#include "httpresponse.h" #include "httpresponse.h"
@ -61,3 +62,5 @@ void HttpOpSetPriority::stageFromRequest(HttpService * service)
} // end namespace LLCore } // end namespace LLCore
#endif

View File

@ -27,7 +27,7 @@
#ifndef _LLCORE_HTTP_SETPRIORITY_H_ #ifndef _LLCORE_HTTP_SETPRIORITY_H_
#define _LLCORE_HTTP_SETPRIORITY_H_ #define _LLCORE_HTTP_SETPRIORITY_H_
#if 0 // DEPRECATED
#include "httpcommon.h" #include "httpcommon.h"
#include "httprequest.h" #include "httprequest.h"
#include "_httpoperation.h" #include "_httpoperation.h"
@ -49,7 +49,7 @@ namespace LLCore
class HttpOpSetPriority : public HttpOperation class HttpOpSetPriority : public HttpOperation
{ {
public: public:
HttpOpSetPriority(HttpHandle handle, HttpRequest::priority_t priority); HttpOpSetPriority(HttpHandle handle);
virtual ~HttpOpSetPriority(); virtual ~HttpOpSetPriority();
@ -63,10 +63,10 @@ public:
protected: protected:
// Request Data // Request Data
HttpHandle mHandle; HttpHandle mHandle;
HttpRequest::priority_t mPriority;
}; // end class HttpOpSetPriority }; // end class HttpOpSetPriority
} // end namespace LLCore } // end namespace LLCore
#endif
#endif // _LLCORE_HTTP_SETPRIORITY_H_ #endif // _LLCORE_HTTP_SETPRIORITY_H_

View File

@ -330,37 +330,6 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
return result; return result;
} }
bool HttpPolicy::changePriority(HttpHandle handle, HttpRequest::priority_t priority)
{
for (int policy_class(0); policy_class < mClasses.size(); ++policy_class)
{
ClassState & state(*mClasses[policy_class]);
// We don't scan retry queue because a priority change there
// is meaningless. The request will be issued based on retry
// intervals not priority value, which is now moot.
// Scan ready queue for requests that match policy
HttpReadyQueue::container_type & c(state.mReadyQueue.get_container());
for (HttpReadyQueue::container_type::iterator iter(c.begin()); c.end() != iter;)
{
HttpReadyQueue::container_type::iterator cur(iter++);
if ((*cur)->getHandle() == handle)
{
HttpOpRequest::ptr_t op(*cur);
c.erase(cur); // All iterators are now invalidated
op->mReqPriority = priority;
state.mReadyQueue.push(op); // Re-insert using adapter class
return true;
}
}
}
return false;
}
bool HttpPolicy::cancel(HttpHandle handle) bool HttpPolicy::cancel(HttpHandle handle)
{ {
for (int policy_class(0); policy_class < mClasses.size(); ++policy_class) for (int policy_class(0); policy_class < mClasses.size(); ++policy_class)

View File

@ -110,12 +110,6 @@ public:
/// Threading: called by worker thread /// Threading: called by worker thread
void retryOp(const opReqPtr_t &); void retryOp(const opReqPtr_t &);
/// Attempt to change the priority of an earlier request.
/// Request that Shadows HttpService's method
///
/// Threading: called by worker thread
bool changePriority(HttpHandle handle, HttpRequest::priority_t priority);
/// Attempt to cancel a previous request. /// Attempt to cancel a previous request.
/// Shadows HttpService's method as well /// Shadows HttpService's method as well
/// ///

View File

@ -80,6 +80,7 @@ HttpService::HttpService()
HttpService::~HttpService() HttpService::~HttpService()
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
mExitRequested = 1U; mExitRequested = 1U;
if (RUNNING == sState) if (RUNNING == sState)
{ {
@ -131,6 +132,7 @@ HttpService::~HttpService()
void HttpService::init(HttpRequestQueue * queue) void HttpService::init(HttpRequestQueue * queue)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
llassert_always(! sInstance); llassert_always(! sInstance);
llassert_always(NOT_INITIALIZED == sState); llassert_always(NOT_INITIALIZED == sState);
sInstance = new HttpService(); sInstance = new HttpService();
@ -145,6 +147,7 @@ void HttpService::init(HttpRequestQueue * queue)
void HttpService::term() void HttpService::term()
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
if (sInstance) if (sInstance)
{ {
if (RUNNING == sState && sInstance->mThread) if (RUNNING == sState && sInstance->mThread)
@ -196,6 +199,7 @@ bool HttpService::isStopped()
/// Threading: callable by consumer thread *once*. /// Threading: callable by consumer thread *once*.
void HttpService::startThread() void HttpService::startThread()
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
llassert_always(! mThread || STOPPED == sState); llassert_always(! mThread || STOPPED == sState);
llassert_always(INITIALIZED == sState || STOPPED == sState); llassert_always(INITIALIZED == sState || STOPPED == sState);
@ -220,22 +224,6 @@ void HttpService::stopRequested()
} }
/// Threading: callable by worker thread.
bool HttpService::changePriority(HttpHandle handle, HttpRequest::priority_t priority)
{
bool found(false);
// Skip the request queue as we currently don't leave earlier
// requests sitting there. Start with the ready queue...
found = mPolicy->changePriority(handle, priority);
// If not there, we could try the transport/active queue but priority
// doesn't really have much effect there so we don't waste cycles.
return found;
}
/// Try to find the given request handle on any of the request /// Try to find the given request handle on any of the request
/// queues and cancel the operation. /// queues and cancel the operation.
/// ///
@ -244,6 +232,7 @@ bool HttpService::changePriority(HttpHandle handle, HttpRequest::priority_t prio
/// Threading: callable by worker thread. /// Threading: callable by worker thread.
bool HttpService::cancel(HttpHandle handle) bool HttpService::cancel(HttpHandle handle)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
bool canceled(false); bool canceled(false);
// Request can't be on request queue so skip that. // Request can't be on request queue so skip that.
@ -264,6 +253,7 @@ bool HttpService::cancel(HttpHandle handle)
/// Threading: callable by worker thread. /// Threading: callable by worker thread.
void HttpService::shutdown() void HttpService::shutdown()
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
// Disallow future enqueue of requests // Disallow future enqueue of requests
mRequestQueue->stopQueue(); mRequestQueue->stopQueue();
@ -293,6 +283,8 @@ void HttpService::shutdown()
// requested to stop. // requested to stop.
void HttpService::threadRun(LLCoreInt::HttpThread * thread) void HttpService::threadRun(LLCoreInt::HttpThread * thread)
{ {
LL_PROFILER_SET_THREAD_NAME("HttpService");
boost::this_thread::disable_interruption di; boost::this_thread::disable_interruption di;
LLThread::registerThreadID(); LLThread::registerThreadID();
@ -300,6 +292,7 @@ void HttpService::threadRun(LLCoreInt::HttpThread * thread)
ELoopSpeed loop(REQUEST_SLEEP); ELoopSpeed loop(REQUEST_SLEEP);
while (! mExitRequested) while (! mExitRequested)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
try try
{ {
loop = processRequestQueue(loop); loop = processRequestQueue(loop);
@ -344,6 +337,7 @@ void HttpService::threadRun(LLCoreInt::HttpThread * thread)
HttpService::ELoopSpeed HttpService::processRequestQueue(ELoopSpeed loop) HttpService::ELoopSpeed HttpService::processRequestQueue(ELoopSpeed loop)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpRequestQueue::OpContainer ops; HttpRequestQueue::OpContainer ops;
const bool wait_for_req(REQUEST_SLEEP == loop); const bool wait_for_req(REQUEST_SLEEP == loop);
@ -384,6 +378,7 @@ HttpService::ELoopSpeed HttpService::processRequestQueue(ELoopSpeed loop)
HttpStatus HttpService::getPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass, HttpStatus HttpService::getPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass,
long * ret_value) long * ret_value)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range
|| opt >= HttpRequest::PO_LAST // ditto || opt >= HttpRequest::PO_LAST // ditto
|| (! sOptionDesc[opt].mIsLong) // datatype is long || (! sOptionDesc[opt].mIsLong) // datatype is long
@ -416,6 +411,7 @@ HttpStatus HttpService::getPolicyOption(HttpRequest::EPolicyOption opt, HttpRequ
HttpStatus HttpService::getPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass, HttpStatus HttpService::getPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass,
std::string * ret_value) std::string * ret_value)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpStatus status(HttpStatus::LLCORE, LLCore::HE_INVALID_ARG); HttpStatus status(HttpStatus::LLCORE, LLCore::HE_INVALID_ARG);
if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range
@ -443,6 +439,7 @@ HttpStatus HttpService::getPolicyOption(HttpRequest::EPolicyOption opt, HttpRequ
HttpStatus HttpService::getPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass, HttpStatus HttpService::getPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass,
HttpRequest::policyCallback_t * ret_value) HttpRequest::policyCallback_t * ret_value)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpStatus status(HttpStatus::LLCORE, LLCore::HE_INVALID_ARG); HttpStatus status(HttpStatus::LLCORE, LLCore::HE_INVALID_ARG);
if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range
@ -472,6 +469,7 @@ HttpStatus HttpService::getPolicyOption(HttpRequest::EPolicyOption opt, HttpRequ
HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass, HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass,
long value, long * ret_value) long value, long * ret_value)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpStatus status(HttpStatus::LLCORE, LLCore::HE_INVALID_ARG); HttpStatus status(HttpStatus::LLCORE, LLCore::HE_INVALID_ARG);
if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range
@ -517,6 +515,7 @@ HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequ
HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass, HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass,
const std::string & value, std::string * ret_value) const std::string & value, std::string * ret_value)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpStatus status(HttpStatus::LLCORE, LLCore::HE_INVALID_ARG); HttpStatus status(HttpStatus::LLCORE, LLCore::HE_INVALID_ARG);
if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range
@ -548,6 +547,7 @@ HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequ
HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass, HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequest::policy_t pclass,
HttpRequest::policyCallback_t value, HttpRequest::policyCallback_t * ret_value) HttpRequest::policyCallback_t value, HttpRequest::policyCallback_t * ret_value)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpStatus status(HttpStatus::LLCORE, LLCore::HE_INVALID_ARG); HttpStatus status(HttpStatus::LLCORE, LLCore::HE_INVALID_ARG);
if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range if (opt < HttpRequest::PO_CONNECTION_LIMIT // option must be in range

View File

@ -146,15 +146,6 @@ public:
/// Threading: callable by worker thread. /// Threading: callable by worker thread.
void shutdown(); void shutdown();
/// Try to find the given request handle on any of the request
/// queues and reset the priority (and queue position) of the
/// request if found.
///
/// @return True if the request was found somewhere.
///
/// Threading: callable by worker thread.
bool changePriority(HttpHandle handle, HttpRequest::priority_t priority);
/// Try to find the given request handle on any of the request /// Try to find the given request handle on any of the request
/// queues and cancel the operation. /// queues and cancel the operation.
/// ///

View File

@ -469,11 +469,11 @@ bool WorkingSet::reload(LLCore::HttpRequest * hr, LLCore::HttpOptions::ptr_t & o
LLCore::HttpHandle handle; LLCore::HttpHandle handle;
if (offset || length) if (offset || length)
{ {
handle = hr->requestGetByteRange(0, 0, buffer, offset, length, opt, mHeaders, LLCore::HttpHandler::ptr_t(this, NoOpDeletor)); handle = hr->requestGetByteRange(0, buffer, offset, length, opt, mHeaders, LLCore::HttpHandler::ptr_t(this, NoOpDeletor));
} }
else else
{ {
handle = hr->requestGet(0, 0, buffer, opt, mHeaders, LLCore::HttpHandler::ptr_t(this, NoOpDeletor)); handle = hr->requestGet(0, buffer, opt, mHeaders, LLCore::HttpHandler::ptr_t(this, NoOpDeletor));
} }
if (! handle) if (! handle)
{ {

View File

@ -32,7 +32,6 @@
#include "_httppolicy.h" #include "_httppolicy.h"
#include "_httpoperation.h" #include "_httpoperation.h"
#include "_httpoprequest.h" #include "_httpoprequest.h"
#include "_httpopsetpriority.h"
#include "_httpopcancel.h" #include "_httpopcancel.h"
#include "_httpopsetget.h" #include "_httpopsetget.h"
@ -183,16 +182,16 @@ HttpStatus HttpRequest::getStatus() const
HttpHandle HttpRequest::requestGet(policy_t policy_id, HttpHandle HttpRequest::requestGet(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers, const HttpHeaders::ptr_t & headers,
HttpHandler::ptr_t user_handler) HttpHandler::ptr_t user_handler)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpStatus status; HttpStatus status;
HttpOpRequest::ptr_t op(new HttpOpRequest()); HttpOpRequest::ptr_t op(new HttpOpRequest());
if (! (status = op->setupGet(policy_id, priority, url, options, headers))) if (! (status = op->setupGet(policy_id, url, options, headers)))
{ {
mLastReqStatus = status; mLastReqStatus = status;
return LLCORE_HTTP_HANDLE_INVALID; return LLCORE_HTTP_HANDLE_INVALID;
@ -210,7 +209,6 @@ HttpHandle HttpRequest::requestGet(policy_t policy_id,
HttpHandle HttpRequest::requestGetByteRange(policy_t policy_id, HttpHandle HttpRequest::requestGetByteRange(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
size_t offset, size_t offset,
size_t len, size_t len,
@ -218,10 +216,11 @@ HttpHandle HttpRequest::requestGetByteRange(policy_t policy_id,
const HttpHeaders::ptr_t & headers, const HttpHeaders::ptr_t & headers,
HttpHandler::ptr_t user_handler) HttpHandler::ptr_t user_handler)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_NETWORK;
HttpStatus status; HttpStatus status;
HttpOpRequest::ptr_t op(new HttpOpRequest()); HttpOpRequest::ptr_t op(new HttpOpRequest());
if (! (status = op->setupGetByteRange(policy_id, priority, url, offset, len, options, headers))) if (! (status = op->setupGetByteRange(policy_id, url, offset, len, options, headers)))
{ {
mLastReqStatus = status; mLastReqStatus = status;
return LLCORE_HTTP_HANDLE_INVALID; return LLCORE_HTTP_HANDLE_INVALID;
@ -239,7 +238,6 @@ HttpHandle HttpRequest::requestGetByteRange(policy_t policy_id,
HttpHandle HttpRequest::requestPost(policy_t policy_id, HttpHandle HttpRequest::requestPost(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
@ -249,7 +247,7 @@ HttpHandle HttpRequest::requestPost(policy_t policy_id,
HttpStatus status; HttpStatus status;
HttpOpRequest::ptr_t op(new HttpOpRequest()); HttpOpRequest::ptr_t op(new HttpOpRequest());
if (! (status = op->setupPost(policy_id, priority, url, body, options, headers))) if (! (status = op->setupPost(policy_id, url, body, options, headers)))
{ {
mLastReqStatus = status; mLastReqStatus = status;
return LLCORE_HTTP_HANDLE_INVALID; return LLCORE_HTTP_HANDLE_INVALID;
@ -267,7 +265,6 @@ HttpHandle HttpRequest::requestPost(policy_t policy_id,
HttpHandle HttpRequest::requestPut(policy_t policy_id, HttpHandle HttpRequest::requestPut(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
@ -277,7 +274,7 @@ HttpHandle HttpRequest::requestPut(policy_t policy_id,
HttpStatus status; HttpStatus status;
HttpOpRequest::ptr_t op (new HttpOpRequest()); HttpOpRequest::ptr_t op (new HttpOpRequest());
if (! (status = op->setupPut(policy_id, priority, url, body, options, headers))) if (! (status = op->setupPut(policy_id, url, body, options, headers)))
{ {
mLastReqStatus = status; mLastReqStatus = status;
return LLCORE_HTTP_HANDLE_INVALID; return LLCORE_HTTP_HANDLE_INVALID;
@ -294,7 +291,6 @@ HttpHandle HttpRequest::requestPut(policy_t policy_id,
} }
HttpHandle HttpRequest::requestDelete(policy_t policy_id, HttpHandle HttpRequest::requestDelete(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers, const HttpHeaders::ptr_t & headers,
@ -303,7 +299,7 @@ HttpHandle HttpRequest::requestDelete(policy_t policy_id,
HttpStatus status; HttpStatus status;
HttpOpRequest::ptr_t op(new HttpOpRequest()); HttpOpRequest::ptr_t op(new HttpOpRequest());
if (!(status = op->setupDelete(policy_id, priority, url, options, headers))) if (!(status = op->setupDelete(policy_id, url, options, headers)))
{ {
mLastReqStatus = status; mLastReqStatus = status;
return LLCORE_HTTP_HANDLE_INVALID; return LLCORE_HTTP_HANDLE_INVALID;
@ -320,7 +316,6 @@ HttpHandle HttpRequest::requestDelete(policy_t policy_id,
} }
HttpHandle HttpRequest::requestPatch(policy_t policy_id, HttpHandle HttpRequest::requestPatch(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
@ -330,7 +325,7 @@ HttpHandle HttpRequest::requestPatch(policy_t policy_id,
HttpStatus status; HttpStatus status;
HttpOpRequest::ptr_t op (new HttpOpRequest()); HttpOpRequest::ptr_t op (new HttpOpRequest());
if (!(status = op->setupPatch(policy_id, priority, url, body, options, headers))) if (!(status = op->setupPatch(policy_id, url, body, options, headers)))
{ {
mLastReqStatus = status; mLastReqStatus = status;
return LLCORE_HTTP_HANDLE_INVALID; return LLCORE_HTTP_HANDLE_INVALID;
@ -347,7 +342,6 @@ HttpHandle HttpRequest::requestPatch(policy_t policy_id,
} }
HttpHandle HttpRequest::requestCopy(policy_t policy_id, HttpHandle HttpRequest::requestCopy(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers, const HttpHeaders::ptr_t & headers,
@ -356,7 +350,7 @@ HttpHandle HttpRequest::requestCopy(policy_t policy_id,
HttpStatus status; HttpStatus status;
HttpOpRequest::ptr_t op(new HttpOpRequest()); HttpOpRequest::ptr_t op(new HttpOpRequest());
if (!(status = op->setupCopy(policy_id, priority, url, options, headers))) if (!(status = op->setupCopy(policy_id, url, options, headers)))
{ {
mLastReqStatus = status; mLastReqStatus = status;
return LLCORE_HTTP_HANDLE_INVALID; return LLCORE_HTTP_HANDLE_INVALID;
@ -374,7 +368,6 @@ HttpHandle HttpRequest::requestCopy(policy_t policy_id,
} }
HttpHandle HttpRequest::requestMove(policy_t policy_id, HttpHandle HttpRequest::requestMove(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers, const HttpHeaders::ptr_t & headers,
@ -383,7 +376,7 @@ HttpHandle HttpRequest::requestMove(policy_t policy_id,
HttpStatus status; HttpStatus status;
HttpOpRequest::ptr_t op (new HttpOpRequest()); HttpOpRequest::ptr_t op (new HttpOpRequest());
if (!(status = op->setupMove(policy_id, priority, url, options, headers))) if (!(status = op->setupMove(policy_id, url, options, headers)))
{ {
mLastReqStatus = status; mLastReqStatus = status;
return LLCORE_HTTP_HANDLE_INVALID; return LLCORE_HTTP_HANDLE_INVALID;
@ -483,24 +476,6 @@ HttpHandle HttpRequest::requestCancel(HttpHandle request, HttpHandler::ptr_t use
} }
HttpHandle HttpRequest::requestSetPriority(HttpHandle request, priority_t priority,
HttpHandler::ptr_t handler)
{
HttpStatus status;
HttpOperation::ptr_t op (new HttpOpSetPriority(request, priority));
op->setReplyPath(mReplyQueue, handler);
if (! (status = mRequestQueue->addOp(op))) // transfers refcount
{
mLastReqStatus = status;
return LLCORE_HTTP_HANDLE_INVALID;
}
mLastReqStatus = status;
return op->getHandle();
}
// ==================================== // ====================================
// Utility Methods // Utility Methods
// ==================================== // ====================================

View File

@ -95,7 +95,6 @@ private:
public: public:
typedef unsigned int policy_t; typedef unsigned int policy_t;
typedef unsigned int priority_t;
typedef boost::shared_ptr<HttpRequest> ptr_t; typedef boost::shared_ptr<HttpRequest> ptr_t;
typedef boost::weak_ptr<HttpRequest> wptr_t; typedef boost::weak_ptr<HttpRequest> wptr_t;
@ -316,8 +315,6 @@ public:
/// ///
/// @param policy_id Default or user-defined policy class under /// @param policy_id Default or user-defined policy class under
/// which this request is to be serviced. /// which this request is to be serviced.
/// @param priority Standard priority scheme inherited from
/// Indra code base (U32-type scheme).
/// @param url URL with any encoded query parameters to /// @param url URL with any encoded query parameters to
/// be accessed. /// be accessed.
/// @param options Optional instance of an HttpOptions object /// @param options Optional instance of an HttpOptions object
@ -346,7 +343,6 @@ public:
/// case, @see getStatus() will return more info. /// case, @see getStatus() will return more info.
/// ///
HttpHandle requestGet(policy_t policy_id, HttpHandle requestGet(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers, const HttpHeaders::ptr_t & headers,
@ -377,7 +373,6 @@ public:
/// - Referer: /// - Referer:
/// ///
/// @param policy_id @see requestGet() /// @param policy_id @see requestGet()
/// @param priority "
/// @param url " /// @param url "
/// @param offset Offset of first byte into resource to be returned. /// @param offset Offset of first byte into resource to be returned.
/// @param len Count of bytes to be returned /// @param len Count of bytes to be returned
@ -387,7 +382,6 @@ public:
/// @return " /// @return "
/// ///
HttpHandle requestGetByteRange(policy_t policy_id, HttpHandle requestGetByteRange(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
size_t offset, size_t offset,
size_t len, size_t len,
@ -418,7 +412,6 @@ public:
/// - Expect: /// - Expect:
/// ///
/// @param policy_id @see requestGet() /// @param policy_id @see requestGet()
/// @param priority "
/// @param url " /// @param url "
/// @param body Byte stream to be sent as the body. No /// @param body Byte stream to be sent as the body. No
/// further encoding or escaping will be done /// further encoding or escaping will be done
@ -429,7 +422,6 @@ public:
/// @return " /// @return "
/// ///
HttpHandle requestPost(policy_t policy_id, HttpHandle requestPost(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
@ -459,7 +451,6 @@ public:
/// - Content-Type: /// - Content-Type:
/// ///
/// @param policy_id @see requestGet() /// @param policy_id @see requestGet()
/// @param priority "
/// @param url " /// @param url "
/// @param body Byte stream to be sent as the body. No /// @param body Byte stream to be sent as the body. No
/// further encoding or escaping will be done /// further encoding or escaping will be done
@ -470,7 +461,6 @@ public:
/// @return " /// @return "
/// ///
HttpHandle requestPut(policy_t policy_id, HttpHandle requestPut(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
@ -483,7 +473,6 @@ public:
/// encoding and communicating the content types. /// encoding and communicating the content types.
/// ///
/// @param policy_id @see requestGet() /// @param policy_id @see requestGet()
/// @param priority "
/// @param url " /// @param url "
/// @param options @see requestGet()K(optional) /// @param options @see requestGet()K(optional)
/// @param headers " /// @param headers "
@ -491,7 +480,6 @@ public:
/// @return " /// @return "
/// ///
HttpHandle requestDelete(policy_t policy_id, HttpHandle requestDelete(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers, const HttpHeaders::ptr_t & headers,
@ -502,7 +490,6 @@ public:
/// encoding and communicating the content types. /// encoding and communicating the content types.
/// ///
/// @param policy_id @see requestGet() /// @param policy_id @see requestGet()
/// @param priority "
/// @param url " /// @param url "
/// @param body Byte stream to be sent as the body. No /// @param body Byte stream to be sent as the body. No
/// further encoding or escaping will be done /// further encoding or escaping will be done
@ -513,7 +500,6 @@ public:
/// @return " /// @return "
/// ///
HttpHandle requestPatch(policy_t policy_id, HttpHandle requestPatch(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
BufferArray * body, BufferArray * body,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
@ -525,7 +511,6 @@ public:
/// encoding and communicating the content types. /// encoding and communicating the content types.
/// ///
/// @param policy_id @see requestGet() /// @param policy_id @see requestGet()
/// @param priority "
/// @param url " /// @param url "
/// @param options @see requestGet()K(optional) /// @param options @see requestGet()K(optional)
/// @param headers " /// @param headers "
@ -533,7 +518,6 @@ public:
/// @return " /// @return "
/// ///
HttpHandle requestCopy(policy_t policy_id, HttpHandle requestCopy(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers, const HttpHeaders::ptr_t & headers,
@ -544,7 +528,6 @@ public:
/// encoding and communicating the content types. /// encoding and communicating the content types.
/// ///
/// @param policy_id @see requestGet() /// @param policy_id @see requestGet()
/// @param priority "
/// @param url " /// @param url "
/// @param options @see requestGet()K(optional) /// @param options @see requestGet()K(optional)
/// @param headers " /// @param headers "
@ -552,7 +535,6 @@ public:
/// @return " /// @return "
/// ///
HttpHandle requestMove(policy_t policy_id, HttpHandle requestMove(policy_t policy_id,
priority_t priority,
const std::string & url, const std::string & url,
const HttpOptions::ptr_t & options, const HttpOptions::ptr_t & options,
const HttpHeaders::ptr_t & headers, const HttpHeaders::ptr_t & headers,
@ -593,18 +575,6 @@ public:
HttpHandle requestCancel(HttpHandle request, HttpHandler::ptr_t); HttpHandle requestCancel(HttpHandle request, HttpHandler::ptr_t);
/// Request that a previously-issued request be reprioritized.
/// The status of whether the change itself succeeded arrives
/// via notification.
///
/// @param request Handle of previously-issued request to
/// be changed.
/// @param priority New priority value.
/// @param handler @see requestGet()
/// @return "
///
HttpHandle requestSetPriority(HttpHandle request, priority_t priority, HttpHandler::ptr_t handler);
/// @} /// @}
/// @name UtilityMethods /// @name UtilityMethods

View File

@ -614,7 +614,6 @@ void HttpRequestTestObjectType::test<7>()
// Issue a GET that can't connect // Issue a GET that can't connect
mStatus = HttpStatus(HttpStatus::EXT_CURL_EASY, CURLE_COULDNT_CONNECT); mStatus = HttpStatus(HttpStatus::EXT_CURL_EASY, CURLE_COULDNT_CONNECT);
HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID,
0U,
"http://127.0.0.1:2/nothing/here", "http://127.0.0.1:2/nothing/here",
0, 0,
0, 0,
@ -716,7 +715,6 @@ void HttpRequestTestObjectType::test<8>()
// Issue a GET that *can* connect // Issue a GET that *can* connect
mStatus = HttpStatus(200); mStatus = HttpStatus(200);
HttpHandle handle = req->requestGet(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGet(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base, url_base,
HttpOptions::ptr_t(), HttpOptions::ptr_t(),
HttpHeaders::ptr_t(), HttpHeaders::ptr_t(),
@ -812,7 +810,6 @@ void HttpRequestTestObjectType::test<9>()
// Issue a GET that *can* connect // Issue a GET that *can* connect
mStatus = HttpStatus(200); mStatus = HttpStatus(200);
HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base, url_base,
0, 0,
0, 0,
@ -913,7 +910,6 @@ void HttpRequestTestObjectType::test<10>()
body->append(body_text, strlen(body_text)); body->append(body_text, strlen(body_text));
mStatus = HttpStatus(200); mStatus = HttpStatus(200);
HttpHandle handle = req->requestPut(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestPut(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base, url_base,
body, body,
HttpOptions::ptr_t(), HttpOptions::ptr_t(),
@ -1020,7 +1016,6 @@ void HttpRequestTestObjectType::test<11>()
body->append(body_text, strlen(body_text)); body->append(body_text, strlen(body_text));
mStatus = HttpStatus(200); mStatus = HttpStatus(200);
HttpHandle handle = req->requestPost(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestPost(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base, url_base,
body, body,
HttpOptions::ptr_t(), HttpOptions::ptr_t(),
@ -1127,7 +1122,6 @@ void HttpRequestTestObjectType::test<12>()
// Issue a GET that *can* connect // Issue a GET that *can* connect
mStatus = HttpStatus(200); mStatus = HttpStatus(200);
HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base, url_base,
0, 0,
0, 0,
@ -1240,7 +1234,6 @@ void HttpRequestTestObjectType::test<13>()
regex_container_t::value_type(boost::regex("X-LL-Special", boost::regex::icase), regex_container_t::value_type(boost::regex("X-LL-Special", boost::regex::icase),
boost::regex(".*", boost::regex::icase))); boost::regex(".*", boost::regex::icase)));
HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base, url_base,
0, 0,
0, 0,
@ -1346,7 +1339,6 @@ void HttpRequestTestObjectType::test<14>()
// Issue a GET that sleeps // Issue a GET that sleeps
mStatus = HttpStatus(HttpStatus::EXT_CURL_EASY, CURLE_OPERATION_TIMEDOUT); mStatus = HttpStatus(HttpStatus::EXT_CURL_EASY, CURLE_OPERATION_TIMEDOUT);
HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base, url_base,
0, 0,
0, 0,
@ -1454,7 +1446,6 @@ void HttpRequestTestObjectType::test<15>()
mStatus = HttpStatus(200); mStatus = HttpStatus(200);
handler.mCheckContentType = "application/llsd+xml"; handler.mCheckContentType = "application/llsd+xml";
HttpHandle handle = req->requestGet(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGet(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base, url_base,
HttpOptions::ptr_t(), HttpOptions::ptr_t(),
HttpHeaders::ptr_t(), HttpHeaders::ptr_t(),
@ -1609,7 +1600,6 @@ void HttpRequestTestObjectType::test<16>()
boost::regex("X-Reflect-content-encoding", boost::regex::icase), boost::regex("X-Reflect-content-encoding", boost::regex::icase),
boost::regex(".*", boost::regex::icase))); boost::regex(".*", boost::regex::icase)));
HttpHandle handle = req->requestGet(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGet(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base + "reflect/", url_base + "reflect/",
options, options,
HttpHeaders::ptr_t(), HttpHeaders::ptr_t(),
@ -1684,7 +1674,6 @@ void HttpRequestTestObjectType::test<16>()
boost::regex("X-Reflect-content-encoding", boost::regex::icase), boost::regex("X-Reflect-content-encoding", boost::regex::icase),
boost::regex(".*", boost::regex::icase))); boost::regex(".*", boost::regex::icase)));
handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID, handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base + "reflect/", url_base + "reflect/",
0, 0,
47, 47,
@ -1863,7 +1852,6 @@ void HttpRequestTestObjectType::test<17>()
boost::regex("X-Reflect-transfer_encoding", boost::regex::icase), boost::regex("X-Reflect-transfer_encoding", boost::regex::icase),
boost::regex(".*chunked.*", boost::regex::icase))); boost::regex(".*chunked.*", boost::regex::icase)));
HttpHandle handle = req->requestPost(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestPost(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base + "reflect/", url_base + "reflect/",
ba, ba,
options, options,
@ -2049,7 +2037,6 @@ void HttpRequestTestObjectType::test<18>()
boost::regex(".*", boost::regex::icase))); boost::regex(".*", boost::regex::icase)));
HttpHandle handle = req->requestPut(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestPut(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base + "reflect/", url_base + "reflect/",
ba, ba,
options, options,
@ -2249,7 +2236,6 @@ void HttpRequestTestObjectType::test<19>()
boost::regex("X-Reflect-content-encoding", boost::regex::icase), boost::regex("X-Reflect-content-encoding", boost::regex::icase),
boost::regex(".*", boost::regex::icase))); boost::regex(".*", boost::regex::icase)));
HttpHandle handle = req->requestGet(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGet(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base + "reflect/", url_base + "reflect/",
options, options,
headers, headers,
@ -2457,7 +2443,6 @@ void HttpRequestTestObjectType::test<20>()
boost::regex(".*", boost::regex::icase))); boost::regex(".*", boost::regex::icase)));
HttpHandle handle = req->requestPost(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestPost(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base + "reflect/", url_base + "reflect/",
ba, ba,
options, options,
@ -2666,7 +2651,6 @@ void HttpRequestTestObjectType::test<21>()
boost::regex("X-Reflect-content-type", boost::regex::icase), boost::regex("X-Reflect-content-type", boost::regex::icase),
boost::regex("text/html", boost::regex::icase))); boost::regex("text/html", boost::regex::icase)));
HttpHandle handle = req->requestPut(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestPut(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base + "reflect/", url_base + "reflect/",
ba, ba,
options, options,
@ -2797,7 +2781,6 @@ void HttpRequestTestObjectType::test<22>()
char buffer[128]; char buffer[128];
sprintf(buffer, "/bug2295/%d/", i); sprintf(buffer, "/bug2295/%d/", i);
HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base + buffer, url_base + buffer,
0, 0,
25, 25,
@ -2829,7 +2812,6 @@ void HttpRequestTestObjectType::test<22>()
char buffer[128]; char buffer[128];
sprintf(buffer, "/bug2295/00000012/%d/", i); sprintf(buffer, "/bug2295/00000012/%d/", i);
HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base + buffer, url_base + buffer,
0, 0,
25, 25,
@ -2861,7 +2843,6 @@ void HttpRequestTestObjectType::test<22>()
char buffer[128]; char buffer[128];
sprintf(buffer, "/bug2295/inv_cont_range/%d/", i); sprintf(buffer, "/bug2295/inv_cont_range/%d/", i);
HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID,
0U,
url_base + buffer, url_base + buffer,
0, 0,
25, 25,
@ -2984,7 +2965,6 @@ void HttpRequestTestObjectType::test<23>()
std::ostringstream url; std::ostringstream url;
url << url_base << i << "/"; url << url_base << i << "/";
HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID, HttpHandle handle = req->requestGetByteRange(HttpRequest::DEFAULT_POLICY_ID,
0U,
url.str(), url.str(),
0, 0,
0, 0,

View File

@ -418,7 +418,7 @@ bool LLCrashLogger::runCrashLogPost(std::string host, LLSD data, std::string msg
updateApplication(llformat("%s, try %d...", msg.c_str(), i+1)); updateApplication(llformat("%s, try %d...", msg.c_str(), i+1));
LL_INFOS("CRASHREPORT") << "POST crash data to " << host << LL_ENDL; LL_INFOS("CRASHREPORT") << "POST crash data to " << host << LL_ENDL;
LLCore::HttpHandle handle = LLCoreHttpUtil::requestPostWithLLSD(httpRequest.get(), LLCore::HttpRequest::DEFAULT_POLICY_ID, 0, LLCore::HttpHandle handle = LLCoreHttpUtil::requestPostWithLLSD(httpRequest.get(), LLCore::HttpRequest::DEFAULT_POLICY_ID,
host, data, httpOpts, LLCore::HttpHeaders::ptr_t(), LLCore::HttpHandler::ptr_t(new LLCrashLoggerHandler)); host, data, httpOpts, LLCore::HttpHeaders::ptr_t(), LLCore::HttpHandler::ptr_t(new LLCrashLoggerHandler));
if (handle == LLCORE_HTTP_HANDLE_INVALID) if (handle == LLCORE_HTTP_HANDLE_INVALID)

View File

@ -221,6 +221,7 @@ const std::string LLDiskCache::assetTypeToString(LLAssetType::EType at)
{ LLAssetType::AT_PERSON, "PERSON" }, { LLAssetType::AT_PERSON, "PERSON" },
{ LLAssetType::AT_MESH, "MESH" }, { LLAssetType::AT_MESH, "MESH" },
{ LLAssetType::AT_SETTINGS, "SETTINGS" }, { LLAssetType::AT_SETTINGS, "SETTINGS" },
{ LLAssetType::AT_MATERIAL, "MATERIAL" },
{ LLAssetType::AT_UNKNOWN, "UNKNOWN" } { LLAssetType::AT_UNKNOWN, "UNKNOWN" }
}; };

View File

@ -45,8 +45,7 @@ void LLLFSThread::initClass(bool local_is_threaded)
//static //static
S32 LLLFSThread::updateClass(U32 ms_elapsed) S32 LLLFSThread::updateClass(U32 ms_elapsed)
{ {
sLocal->update((F32)ms_elapsed); return sLocal->update((F32)ms_elapsed);
return sLocal->getPending();
} }
//static //static
@ -58,6 +57,7 @@ void LLLFSThread::cleanupClass()
{ {
sLocal->update(0); sLocal->update(0);
} }
sLocal->shutdown();
delete sLocal; delete sLocal;
sLocal = NULL; sLocal = NULL;
} }
@ -65,8 +65,7 @@ void LLLFSThread::cleanupClass()
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
LLLFSThread::LLLFSThread(bool threaded) : LLLFSThread::LLLFSThread(bool threaded) :
LLQueuedThread("LFS", threaded), LLQueuedThread("LFS", threaded)
mPriorityCounter(PRIORITY_LOWBITS)
{ {
if(!mLocalAPRFilePoolp) if(!mLocalAPRFilePoolp)
{ {
@ -84,14 +83,12 @@ LLLFSThread::~LLLFSThread()
LLLFSThread::handle_t LLLFSThread::read(const std::string& filename, /* Flawfinder: ignore */ LLLFSThread::handle_t LLLFSThread::read(const std::string& filename, /* Flawfinder: ignore */
U8* buffer, S32 offset, S32 numbytes, U8* buffer, S32 offset, S32 numbytes,
Responder* responder, U32 priority) Responder* responder)
{ {
LL_PROFILE_ZONE_SCOPED;
handle_t handle = generateHandle(); handle_t handle = generateHandle();
if (priority == 0) priority = PRIORITY_NORMAL | priorityCounter(); Request* req = new Request(this, handle,
else if (priority < PRIORITY_LOW) priority |= PRIORITY_LOW; // All reads are at least PRIORITY_LOW
Request* req = new Request(this, handle, priority,
FILE_READ, filename, FILE_READ, filename,
buffer, offset, numbytes, buffer, offset, numbytes,
responder); responder);
@ -107,13 +104,12 @@ LLLFSThread::handle_t LLLFSThread::read(const std::string& filename, /* Flawfind
LLLFSThread::handle_t LLLFSThread::write(const std::string& filename, LLLFSThread::handle_t LLLFSThread::write(const std::string& filename,
U8* buffer, S32 offset, S32 numbytes, U8* buffer, S32 offset, S32 numbytes,
Responder* responder, U32 priority) Responder* responder)
{ {
LL_PROFILE_ZONE_SCOPED;
handle_t handle = generateHandle(); handle_t handle = generateHandle();
if (priority == 0) priority = PRIORITY_LOW | priorityCounter(); Request* req = new Request(this, handle,
Request* req = new Request(this, handle, priority,
FILE_WRITE, filename, FILE_WRITE, filename,
buffer, offset, numbytes, buffer, offset, numbytes,
responder); responder);
@ -130,11 +126,11 @@ LLLFSThread::handle_t LLLFSThread::write(const std::string& filename,
//============================================================================ //============================================================================
LLLFSThread::Request::Request(LLLFSThread* thread, LLLFSThread::Request::Request(LLLFSThread* thread,
handle_t handle, U32 priority, handle_t handle,
operation_t op, const std::string& filename, operation_t op, const std::string& filename,
U8* buffer, S32 offset, S32 numbytes, U8* buffer, S32 offset, S32 numbytes,
Responder* responder) : Responder* responder) :
QueuedRequest(handle, priority, FLAG_AUTO_COMPLETE), QueuedRequest(handle, FLAG_AUTO_COMPLETE),
mThread(thread), mThread(thread),
mOperation(op), mOperation(op),
mFileName(filename), mFileName(filename),
@ -157,6 +153,7 @@ LLLFSThread::Request::~Request()
// virtual, called from own thread // virtual, called from own thread
void LLLFSThread::Request::finishRequest(bool completed) void LLLFSThread::Request::finishRequest(bool completed)
{ {
LL_PROFILE_ZONE_SCOPED;
if (mResponder.notNull()) if (mResponder.notNull())
{ {
mResponder->completed(completed ? mBytesRead : 0); mResponder->completed(completed ? mBytesRead : 0);
@ -166,6 +163,7 @@ void LLLFSThread::Request::finishRequest(bool completed)
void LLLFSThread::Request::deleteRequest() void LLLFSThread::Request::deleteRequest()
{ {
LL_PROFILE_ZONE_SCOPED;
if (getStatus() == STATUS_QUEUED) if (getStatus() == STATUS_QUEUED)
{ {
LL_ERRS() << "Attempt to delete a queued LLLFSThread::Request!" << LL_ENDL; LL_ERRS() << "Attempt to delete a queued LLLFSThread::Request!" << LL_ENDL;
@ -180,6 +178,7 @@ void LLLFSThread::Request::deleteRequest()
bool LLLFSThread::Request::processRequest() bool LLLFSThread::Request::processRequest()
{ {
LL_PROFILE_ZONE_SCOPED;
bool complete = false; bool complete = false;
if (mOperation == FILE_READ) if (mOperation == FILE_READ)
{ {

View File

@ -68,7 +68,7 @@ public:
public: public:
Request(LLLFSThread* thread, Request(LLLFSThread* thread,
handle_t handle, U32 priority, handle_t handle,
operation_t op, const std::string& filename, operation_t op, const std::string& filename,
U8* buffer, S32 offset, S32 numbytes, U8* buffer, S32 offset, S32 numbytes,
Responder* responder); Responder* responder);
@ -120,22 +120,15 @@ public:
// Return a Request handle // Return a Request handle
handle_t read(const std::string& filename, /* Flawfinder: ignore */ handle_t read(const std::string& filename, /* Flawfinder: ignore */
U8* buffer, S32 offset, S32 numbytes, U8* buffer, S32 offset, S32 numbytes,
Responder* responder, U32 pri=0); Responder* responder);
handle_t write(const std::string& filename, handle_t write(const std::string& filename,
U8* buffer, S32 offset, S32 numbytes, U8* buffer, S32 offset, S32 numbytes,
Responder* responder, U32 pri=0); Responder* responder);
// Misc
U32 priorityCounter() { return mPriorityCounter-- & PRIORITY_LOWBITS; } // Use to order IO operations
// static initializers // static initializers
static void initClass(bool local_is_threaded = TRUE); // Setup sLocal static void initClass(bool local_is_threaded = TRUE); // Setup sLocal
static S32 updateClass(U32 ms_elapsed); static S32 updateClass(U32 ms_elapsed);
static void cleanupClass(); // Delete sLocal static void cleanupClass(); // Delete sLocal
private:
U32 mPriorityCounter;
public: public:
static LLLFSThread* sLocal; // Default local file thread static LLLFSThread* sLocal; // Default local file thread

View File

@ -798,7 +798,6 @@ U8* LLImageBase::allocateDataSize(S32 width, S32 height, S32 ncomponents, S32 si
// LLImageRaw // LLImageRaw
//--------------------------------------------------------------------------- //---------------------------------------------------------------------------
S32 LLImageRaw::sGlobalRawMemory = 0;
S32 LLImageRaw::sRawImageCount = 0; S32 LLImageRaw::sRawImageCount = 0;
LLImageRaw::LLImageRaw() LLImageRaw::LLImageRaw()
@ -815,6 +814,15 @@ LLImageRaw::LLImageRaw(U16 width, U16 height, S8 components)
++sRawImageCount; ++sRawImageCount;
} }
LLImageRaw::LLImageRaw(const U8* data, U16 width, U16 height, S8 components)
: LLImageBase()
{
if (allocateDataSize(width, height, components))
{
memcpy(getData(), data, width * height * components);
}
}
LLImageRaw::LLImageRaw(U8 *data, U16 width, U16 height, S8 components, bool no_copy) LLImageRaw::LLImageRaw(U8 *data, U16 width, U16 height, S8 components, bool no_copy)
: LLImageBase() : LLImageBase()
{ {
@ -847,16 +855,13 @@ LLImageRaw::~LLImageRaw()
U8* LLImageRaw::allocateData(S32 size) U8* LLImageRaw::allocateData(S32 size)
{ {
U8* res = LLImageBase::allocateData(size); U8* res = LLImageBase::allocateData(size);
sGlobalRawMemory += getDataSize();
return res; return res;
} }
// virtual // virtual
U8* LLImageRaw::reallocateData(S32 size) U8* LLImageRaw::reallocateData(S32 size)
{ {
sGlobalRawMemory -= getDataSize();
U8* res = LLImageBase::reallocateData(size); U8* res = LLImageBase::reallocateData(size);
sGlobalRawMemory += getDataSize();
return res; return res;
} }
@ -869,7 +874,6 @@ void LLImageRaw::releaseData()
// virtual // virtual
void LLImageRaw::deleteData() void LLImageRaw::deleteData()
{ {
sGlobalRawMemory -= getDataSize();
LLImageBase::deleteData(); LLImageBase::deleteData();
} }
@ -985,6 +989,43 @@ void LLImageRaw::verticalFlip()
} }
bool LLImageRaw::optimizeAwayAlpha()
{
if (getComponents() == 4)
{
U8* data = getData();
U32 pixels = getWidth() * getHeight();
// check alpha channel for all 255
for (U32 i = 0; i < pixels; ++i)
{
if (data[i * 4 + 3] != 255)
{
return false;
}
}
// alpha channel is all 255, make a new copy of data without alpha channel
U8* new_data = (U8*) ll_aligned_malloc_16(getWidth() * getHeight() * 3);
for (U32 i = 0; i < pixels; ++i)
{
U32 di = i * 3;
U32 si = i * 4;
for (U32 j = 0; j < 3; ++j)
{
new_data[di+j] = data[si+j];
}
}
setDataAndSize(new_data, getWidth(), getHeight(), 3);
return true;
}
return false;
}
void LLImageRaw::expandToPowerOfTwo(S32 max_dim, bool scale_image) void LLImageRaw::expandToPowerOfTwo(S32 max_dim, bool scale_image)
{ {
// Find new sizes // Find new sizes

View File

@ -184,6 +184,7 @@ protected:
public: public:
LLImageRaw(); LLImageRaw();
LLImageRaw(U16 width, U16 height, S8 components); LLImageRaw(U16 width, U16 height, S8 components);
LLImageRaw(const U8* data, U16 width, U16 height, S8 components);
LLImageRaw(U8 *data, U16 width, U16 height, S8 components, bool no_copy = false); LLImageRaw(U8 *data, U16 width, U16 height, S8 components, bool no_copy = false);
// Construct using createFromFile (used by tools) // Construct using createFromFile (used by tools)
//LLImageRaw(const std::string& filename, bool j2c_lowest_mip_only = false); //LLImageRaw(const std::string& filename, bool j2c_lowest_mip_only = false);
@ -207,6 +208,10 @@ public:
void clear(U8 r=0, U8 g=0, U8 b=0, U8 a=255); void clear(U8 r=0, U8 g=0, U8 b=0, U8 a=255);
void verticalFlip(); void verticalFlip();
// if the alpha channel is all 100% opaque, delete it
// returns true if alpha channel was deleted
bool optimizeAwayAlpha();
static S32 biasedDimToPowerOfTwo(S32 curr_dim, S32 max_dim = MAX_IMAGE_SIZE); static S32 biasedDimToPowerOfTwo(S32 curr_dim, S32 max_dim = MAX_IMAGE_SIZE);
static S32 expandDimToPowerOfTwo(S32 curr_dim, S32 max_dim = MAX_IMAGE_SIZE); static S32 expandDimToPowerOfTwo(S32 curr_dim, S32 max_dim = MAX_IMAGE_SIZE);
@ -275,7 +280,6 @@ protected:
void setDataAndSize(U8 *data, S32 width, S32 height, S8 components) ; void setDataAndSize(U8 *data, S32 width, S32 height, S8 components) ;
public: public:
static S32 sGlobalRawMemory;
static S32 sRawImageCount; static S32 sRawImageCount;
private: private:

View File

@ -146,6 +146,7 @@ bool LLImageJ2C::initEncode(LLImageRaw &raw_image, int blocks_size, int precinct
bool LLImageJ2C::decode(LLImageRaw *raw_imagep, F32 decode_time) bool LLImageJ2C::decode(LLImageRaw *raw_imagep, F32 decode_time)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE;
return decodeChannels(raw_imagep, decode_time, 0, 4); return decodeChannels(raw_imagep, decode_time, 0, 4);
} }
@ -153,6 +154,7 @@ bool LLImageJ2C::decode(LLImageRaw *raw_imagep, F32 decode_time)
// Returns true to mean done, whether successful or not. // Returns true to mean done, whether successful or not.
bool LLImageJ2C::decodeChannels(LLImageRaw *raw_imagep, F32 decode_time, S32 first_channel, S32 max_channel_count ) bool LLImageJ2C::decodeChannels(LLImageRaw *raw_imagep, F32 decode_time, S32 first_channel, S32 max_channel_count )
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE;
LLTimer elapsed; LLTimer elapsed;
bool res = true; bool res = true;

View File

@ -28,64 +28,93 @@
#include "llimageworker.h" #include "llimageworker.h"
#include "llimagedxt.h" #include "llimagedxt.h"
#include "threadpool.h"
/*--------------------------------------------------------------------------*/
class ImageRequest
{
public:
ImageRequest(const LLPointer<LLImageFormatted>& image,
S32 discard, BOOL needs_aux,
const LLPointer<LLImageDecodeThread::Responder>& responder);
virtual ~ImageRequest();
/*virtual*/ bool processRequest();
/*virtual*/ void finishRequest(bool completed);
private:
// LLPointers stored in ImageRequest MUST be LLPointer instances rather
// than references: we need to increment the refcount when storing these.
// input
LLPointer<LLImageFormatted> mFormattedImage;
S32 mDiscardLevel;
BOOL mNeedsAux;
// output
LLPointer<LLImageRaw> mDecodedImageRaw;
LLPointer<LLImageRaw> mDecodedImageAux;
BOOL mDecodedRaw;
BOOL mDecodedAux;
LLPointer<LLImageDecodeThread::Responder> mResponder;
};
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
// MAIN THREAD // MAIN THREAD
LLImageDecodeThread::LLImageDecodeThread(bool threaded) LLImageDecodeThread::LLImageDecodeThread(bool /*threaded*/)
: LLQueuedThread("imagedecode", threaded)
{ {
mCreationMutex = new LLMutex(); mThreadPool.reset(new LL::ThreadPool("ImageDecode", 8));
mThreadPool->start();
} }
//virtual //virtual
LLImageDecodeThread::~LLImageDecodeThread() LLImageDecodeThread::~LLImageDecodeThread()
{ {}
delete mCreationMutex ;
}
// MAIN THREAD // MAIN THREAD
// virtual // virtual
size_t LLImageDecodeThread::update(F32 max_time_ms) size_t LLImageDecodeThread::update(F32 max_time_ms)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE;
LLMutexLock lock(mCreationMutex); return getPending();
for (creation_list_t::iterator iter = mCreationList.begin();
iter != mCreationList.end(); ++iter)
{
creation_info& info = *iter;
ImageRequest* req = new ImageRequest(info.handle, info.image,
info.priority, info.discard, info.needs_aux,
info.responder);
bool res = addRequest(req);
if (!res)
{
LL_ERRS() << "request added after LLLFSThread::cleanupClass()" << LL_ENDL;
}
}
mCreationList.clear();
S32 res = LLQueuedThread::update(max_time_ms);
return res;
} }
LLImageDecodeThread::handle_t LLImageDecodeThread::decodeImage(LLImageFormatted* image, size_t LLImageDecodeThread::getPending()
U32 priority, S32 discard, BOOL needs_aux, Responder* responder) {
return mThreadPool->getQueue().size();
}
LLImageDecodeThread::handle_t LLImageDecodeThread::decodeImage(
const LLPointer<LLImageFormatted>& image,
S32 discard,
BOOL needs_aux,
const LLPointer<LLImageDecodeThread::Responder>& responder)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE;
LLMutexLock lock(mCreationMutex);
handle_t handle = generateHandle(); // Instantiate the ImageRequest right in the lambda, why not?
mCreationList.push_back(creation_info(handle, image, priority, discard, needs_aux, responder)); bool posted = mThreadPool->getQueue().post(
return handle; [req = ImageRequest(image, discard, needs_aux, responder)]
() mutable
{
auto done = req.processRequest();
req.finishRequest(done);
});
if (! posted)
{
LL_DEBUGS() << "Tried to start decoding on shutdown" << LL_ENDL;
// should this return 0?
}
// It's important to our consumer (LLTextureFetchWorker) that we return a
// nonzero handle. It is NOT important that the nonzero handle be unique:
// nothing is ever done with it except to compare it to zero, or zero it.
return 17;
} }
// Used by unit test only void LLImageDecodeThread::shutdown()
// Returns the size of the mutex guarded list as an indication of sanity
S32 LLImageDecodeThread::tut_size()
{ {
LLMutexLock lock(mCreationMutex); mThreadPool->close();
S32 res = mCreationList.size();
return res;
} }
LLImageDecodeThread::Responder::~Responder() LLImageDecodeThread::Responder::~Responder()
@ -94,11 +123,10 @@ LLImageDecodeThread::Responder::~Responder()
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
LLImageDecodeThread::ImageRequest::ImageRequest(handle_t handle, LLImageFormatted* image, ImageRequest::ImageRequest(const LLPointer<LLImageFormatted>& image,
U32 priority, S32 discard, BOOL needs_aux, S32 discard, BOOL needs_aux,
LLImageDecodeThread::Responder* responder) const LLPointer<LLImageDecodeThread::Responder>& responder)
: LLQueuedThread::QueuedRequest(handle, priority, FLAG_AUTO_COMPLETE), : mFormattedImage(image),
mFormattedImage(image),
mDiscardLevel(discard), mDiscardLevel(discard),
mNeedsAux(needs_aux), mNeedsAux(needs_aux),
mDecodedRaw(FALSE), mDecodedRaw(FALSE),
@ -107,7 +135,7 @@ LLImageDecodeThread::ImageRequest::ImageRequest(handle_t handle, LLImageFormatte
{ {
} }
LLImageDecodeThread::ImageRequest::~ImageRequest() ImageRequest::~ImageRequest()
{ {
mDecodedImageRaw = NULL; mDecodedImageRaw = NULL;
mDecodedImageAux = NULL; mDecodedImageAux = NULL;
@ -118,10 +146,10 @@ LLImageDecodeThread::ImageRequest::~ImageRequest()
// Returns true when done, whether or not decode was successful. // Returns true when done, whether or not decode was successful.
bool LLImageDecodeThread::ImageRequest::processRequest() bool ImageRequest::processRequest()
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE;
const F32 decode_time_slice = .1f; const F32 decode_time_slice = 0.f; //disable time slicing
bool done = true; bool done = true;
if (!mDecodedRaw && mFormattedImage.notNull()) if (!mDecodedRaw && mFormattedImage.notNull())
{ {
@ -145,7 +173,7 @@ bool LLImageDecodeThread::ImageRequest::processRequest()
mFormattedImage->getHeight(), mFormattedImage->getHeight(),
mFormattedImage->getComponents()); mFormattedImage->getComponents());
} }
done = mFormattedImage->decode(mDecodedImageRaw, decode_time_slice); // 1ms done = mFormattedImage->decode(mDecodedImageRaw, decode_time_slice);
// some decoders are removing data when task is complete and there were errors // some decoders are removing data when task is complete and there were errors
mDecodedRaw = done && mDecodedImageRaw->getData(); mDecodedRaw = done && mDecodedImageRaw->getData();
} }
@ -158,14 +186,14 @@ bool LLImageDecodeThread::ImageRequest::processRequest()
mFormattedImage->getHeight(), mFormattedImage->getHeight(),
1); 1);
} }
done = mFormattedImage->decodeChannels(mDecodedImageAux, decode_time_slice, 4, 4); // 1ms done = mFormattedImage->decodeChannels(mDecodedImageAux, decode_time_slice, 4, 4);
mDecodedAux = done && mDecodedImageAux->getData(); mDecodedAux = done && mDecodedImageAux->getData();
} }
return done; return done;
} }
void LLImageDecodeThread::ImageRequest::finishRequest(bool completed) void ImageRequest::finishRequest(bool completed)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE;
if (mResponder.notNull()) if (mResponder.notNull())
@ -175,10 +203,3 @@ void LLImageDecodeThread::ImageRequest::finishRequest(bool completed)
} }
// Will automatically be deleted // Will automatically be deleted
} }
// Used by unit test only
// Checks that a responder exists for this instance so that something can happen when completion is reached
bool LLImageDecodeThread::ImageRequest::tut_isOK()
{
return mResponder.notNull();
}

View File

@ -29,9 +29,9 @@
#include "llimage.h" #include "llimage.h"
#include "llpointer.h" #include "llpointer.h"
#include "llworkerthread.h" #include "threadpool_fwd.h"
class LLImageDecodeThread : public LLQueuedThread class LLImageDecodeThread
{ {
public: public:
class Responder : public LLThreadSafeRefCount class Responder : public LLThreadSafeRefCount
@ -42,63 +42,24 @@ public:
virtual void completed(bool success, LLImageRaw* raw, LLImageRaw* aux) = 0; virtual void completed(bool success, LLImageRaw* raw, LLImageRaw* aux) = 0;
}; };
class ImageRequest : public LLQueuedThread::QueuedRequest
{
protected:
virtual ~ImageRequest(); // use deleteRequest()
public:
ImageRequest(handle_t handle, LLImageFormatted* image,
U32 priority, S32 discard, BOOL needs_aux,
LLImageDecodeThread::Responder* responder);
/*virtual*/ bool processRequest();
/*virtual*/ void finishRequest(bool completed);
// Used by unit tests to check the consitency of the request instance
bool tut_isOK();
private:
// input
LLPointer<LLImageFormatted> mFormattedImage;
S32 mDiscardLevel;
BOOL mNeedsAux;
// output
LLPointer<LLImageRaw> mDecodedImageRaw;
LLPointer<LLImageRaw> mDecodedImageAux;
BOOL mDecodedRaw;
BOOL mDecodedAux;
LLPointer<LLImageDecodeThread::Responder> mResponder;
};
public: public:
LLImageDecodeThread(bool threaded = true); LLImageDecodeThread(bool threaded = true);
virtual ~LLImageDecodeThread(); virtual ~LLImageDecodeThread();
handle_t decodeImage(LLImageFormatted* image, // meant to resemble LLQueuedThread::handle_t
U32 priority, S32 discard, BOOL needs_aux, typedef U32 handle_t;
Responder* responder); handle_t decodeImage(const LLPointer<LLImageFormatted>& image,
S32 discard, BOOL needs_aux,
const LLPointer<Responder>& responder);
size_t getPending();
size_t update(F32 max_time_ms); size_t update(F32 max_time_ms);
void shutdown();
// Used by unit tests to check the consistency of the thread instance
S32 tut_size();
private: private:
struct creation_info // As of SL-17483, LLImageDecodeThread is no longer itself an
{ // LLQueuedThread - instead this is the API by which we submit work to the
handle_t handle; // "ImageDecode" ThreadPool.
LLPointer<LLImageFormatted> image; std::unique_ptr<LL::ThreadPool> mThreadPool;
U32 priority;
S32 discard;
BOOL needs_aux;
LLPointer<Responder> responder;
creation_info(handle_t h, LLImageFormatted* i, U32 p, S32 d, BOOL aux, Responder* r)
: handle(h), image(i), priority(p), discard(d), needs_aux(aux), responder(r)
{}
};
typedef std::list<creation_info> creation_list_t;
creation_list_t mCreationList;
LLMutex* mCreationMutex;
}; };
#endif #endif

View File

@ -125,42 +125,11 @@ namespace tut
} }
}; };
// Test wrapper declaration : image worker
// Note: this class is not meant to be instantiated outside an LLImageDecodeThread instance
// but it's not a bad idea to get its public API a good shake as part of a thorough unit test set.
// Some gotcha with the destructor though (see below).
struct imagerequest_test
{
// Instance to be tested
LLImageDecodeThread::ImageRequest* mRequest;
bool done;
// Constructor and destructor of the test wrapper
imagerequest_test()
{
done = false;
mRequest = new LLImageDecodeThread::ImageRequest(0, 0,
LLQueuedThread::PRIORITY_NORMAL, 0, FALSE,
new responder_test(&done));
}
~imagerequest_test()
{
// We should delete the object *but*, because its destructor is protected, that cannot be
// done from outside an LLImageDecodeThread instance... So we leak memory here... It's fine...
//delete mRequest;
}
};
// Tut templating thingamagic: test group, object and test instance // Tut templating thingamagic: test group, object and test instance
typedef test_group<imagedecodethread_test> imagedecodethread_t; typedef test_group<imagedecodethread_test> imagedecodethread_t;
typedef imagedecodethread_t::object imagedecodethread_object_t; typedef imagedecodethread_t::object imagedecodethread_object_t;
tut::imagedecodethread_t tut_imagedecodethread("LLImageDecodeThread"); tut::imagedecodethread_t tut_imagedecodethread("LLImageDecodeThread");
typedef test_group<imagerequest_test> imagerequest_t;
typedef imagerequest_t::object imagerequest_object_t;
tut::imagerequest_t tut_imagerequest("LLImageRequest");
// --------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------
// Test functions // Test functions
// Notes: // Notes:
@ -172,64 +141,18 @@ namespace tut
// --------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------
// Test the LLImageDecodeThread interface // Test the LLImageDecodeThread interface
// --------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------
//
// Note on Unit Testing Queued Thread Classes
//
// Since methods on such a class are called on a separate loop and that we can't insert tut
// ensure() calls in there, we exercise the class with 2 sets of tests:
// - 1: Test as a single threaded instance: We declare the class but ask for no thread
// to be spawned (easy with LLThreads since there's a boolean argument on the constructor
// just for that). We can then unit test each public method like we do on a normal class.
// - 2: Test as a threaded instance: We let the thread launch and check that its external
// behavior is as expected (i.e. it runs, can accept a work order and processes
// it). Typically though there's no guarantee that this exercises all the methods of the
// class which is why we also need the previous "non threaded" set of unit tests for
// complete coverage.
//
// ---------------------------------------------------------------------------------------
template<> template<> template<> template<>
void imagedecodethread_object_t::test<1>() void imagedecodethread_object_t::test<1>()
{
// Test a *non threaded* instance of the class
mThread = new LLImageDecodeThread(false);
ensure("LLImageDecodeThread: non threaded constructor failed", mThread != NULL);
// Test that we start with an empty list right at creation
ensure("LLImageDecodeThread: non threaded init state incorrect", mThread->tut_size() == 0);
// Insert something in the queue
bool done = false;
LLImageDecodeThread::handle_t decodeHandle = mThread->decodeImage(NULL, LLQueuedThread::PRIORITY_NORMAL, 0, FALSE, new responder_test(&done));
// Verifies we got a valid handle
ensure("LLImageDecodeThread: non threaded decodeImage(), returned handle is null", decodeHandle != 0);
// Verifies that we do now have something in the queued list
ensure("LLImageDecodeThread: non threaded decodeImage() insertion in threaded list failed", mThread->tut_size() == 1);
// Trigger queue handling "manually" (on a threaded instance, this is done on the thread loop)
S32 res = mThread->update(0);
// Verifies that we successfully handled the list
ensure("LLImageDecodeThread: non threaded update() list handling test failed", res == 0);
// Verifies that the list is now empty
ensure("LLImageDecodeThread: non threaded update() list emptying test failed", mThread->tut_size() == 0);
}
template<> template<>
void imagedecodethread_object_t::test<2>()
{ {
// Test a *threaded* instance of the class // Test a *threaded* instance of the class
mThread = new LLImageDecodeThread(true); mThread = new LLImageDecodeThread(true);
ensure("LLImageDecodeThread: threaded constructor failed", mThread != NULL); ensure("LLImageDecodeThread: threaded constructor failed", mThread != NULL);
// Test that we start with an empty list right at creation
ensure("LLImageDecodeThread: threaded init state incorrect", mThread->tut_size() == 0);
// Insert something in the queue // Insert something in the queue
bool done = false; bool done = false;
LLImageDecodeThread::handle_t decodeHandle = mThread->decodeImage(NULL, LLQueuedThread::PRIORITY_NORMAL, 0, FALSE, new responder_test(&done)); LLImageDecodeThread::handle_t decodeHandle = mThread->decodeImage(NULL, 0, FALSE, new responder_test(&done));
// Verifies we get back a valid handle // Verifies we get back a valid handle
ensure("LLImageDecodeThread: threaded decodeImage(), returned handle is null", decodeHandle != 0); ensure("LLImageDecodeThread: threaded decodeImage(), returned handle is null", decodeHandle != 0);
// Wait a little so to simulate the main thread doing something on its main loop...
ms_sleep(500); // 500 milliseconds
// Verifies that the responder has *not* been called yet in the meantime
ensure("LLImageDecodeThread: responder creation failed", done == false);
// Ask the thread to update: that means tells the queue to check itself and creates work requests
mThread->update(1);
// Wait till the thread has time to handle the work order (though it doesn't do much per work order...) // Wait till the thread has time to handle the work order (though it doesn't do much per work order...)
const U32 INCREMENT_TIME = 500; // 500 milliseconds const U32 INCREMENT_TIME = 500; // 500 milliseconds
const U32 MAX_TIME = 20 * INCREMENT_TIME; // Do the loop 20 times max, i.e. wait 10 seconds but no more const U32 MAX_TIME = 20 * INCREMENT_TIME; // Do the loop 20 times max, i.e. wait 10 seconds but no more
@ -242,24 +165,4 @@ namespace tut
// Verifies that the responder has now been called // Verifies that the responder has now been called
ensure("LLImageDecodeThread: threaded work unit not processed", done == true); ensure("LLImageDecodeThread: threaded work unit not processed", done == true);
} }
// ---------------------------------------------------------------------------------------
// Test the LLImageDecodeThread::ImageRequest interface
// ---------------------------------------------------------------------------------------
template<> template<>
void imagerequest_object_t::test<1>()
{
// Test that we start with a correct request at creation
ensure("LLImageDecodeThread::ImageRequest::ImageRequest() constructor test failed", mRequest->tut_isOK());
bool res = mRequest->processRequest();
// Verifies that we processed the request successfully
ensure("LLImageDecodeThread::ImageRequest::processRequest() processing request test failed", res == true);
// Check that we can call the finishing call safely
try {
mRequest->finishRequest(false);
} catch (...) {
fail("LLImageDecodeThread::ImageRequest::finishRequest() test failed");
}
}
} }

View File

@ -726,6 +726,7 @@ bool LLImageJ2COJ::initDecode(LLImageJ2C &base, LLImageRaw &raw_image, int disca
bool LLImageJ2COJ::initEncode(LLImageJ2C &base, LLImageRaw &raw_image, int blocks_size, int precincts_size, int levels) bool LLImageJ2COJ::initEncode(LLImageJ2C &base, LLImageRaw &raw_image, int blocks_size, int precincts_size, int levels)
{ {
LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE;
// No specific implementation for this method in the OpenJpeg case // No specific implementation for this method in the OpenJpeg case
return false; return false;
} }

View File

@ -122,6 +122,7 @@ LLFolderDictionary::LLFolderDictionary()
addEntry(LLFolderType::FT_MARKETPLACE_VERSION, new FolderEntry("version", FALSE, FALSE, FALSE)); addEntry(LLFolderType::FT_MARKETPLACE_VERSION, new FolderEntry("version", FALSE, FALSE, FALSE));
addEntry(LLFolderType::FT_SETTINGS, new FolderEntry("settings", TRUE, FALSE, TRUE)); addEntry(LLFolderType::FT_SETTINGS, new FolderEntry("settings", TRUE, FALSE, TRUE));
addEntry(LLFolderType::FT_MATERIAL, new FolderEntry("material", TRUE, FALSE, TRUE));
addEntry(LLFolderType::FT_NONE, new FolderEntry("-1", FALSE, FALSE, FALSE)); addEntry(LLFolderType::FT_NONE, new FolderEntry("-1", FALSE, FALSE, FALSE));
}; };

Some files were not shown because too many files have changed in this diff Show More