Merge with LL 2.6.9, initial work. Needs tests, extra XUI merging with particular skins.

master
Arrehn 2011-08-04 11:53:40 -04:00
commit e277b8a4ac
866 changed files with 39338 additions and 19798 deletions

View File

@ -13,7 +13,10 @@ LICENSES
scripts/install.cache.*
indra/newview/res/viewerRes.rc
indra/.distcc
indra/build-darwin-*
build-linux-*
build-darwin-*
build-vc80/
build-vc100/
indra/build-vc[0-9]*
indra/build-VC[0-9]*
indra/CMakeFiles

106
.hgtags
View File

@ -57,14 +57,9 @@ a82e5b1e22c7f90e3c7977d146b80588f004ed0d 2.5.0-start
425f96b1e81e01644bf5e951961e7d1023bffb89 RLVa-1.2.0
fc0cbb86f5bd6e7737159e35aea2c4cf9f619b62 RLVa-1.2.1
76f586a8e22b1abe6b2339758c8ac0fa718975de 76f586a8e22b
76f586a8e22b1abe6b2339758c8ac0fa718975de 76f586a8e22b
0000000000000000000000000000000000000000 76f586a8e22b
0000000000000000000000000000000000000000 76f586a8e22b
345b17e7cf630db77e840b4fe3451bd476d750a3 76f586a8e22b
345b17e7cf630db77e840b4fe3451bd476d750a3 2.5.0-beta1
345b17e7cf630db77e840b4fe3451bd476d750a3 76f586a8e22b
0000000000000000000000000000000000000000 76f586a8e22b
54d772d8687c69b1d773f6ce14bbc7bdc9d6c05f 2.5.0-beta2
b542f8134a2bb5dd054ff4e509a44b2ee463b1bf nat-eventapi2-base
7076e22f9f43f479a4ea75eac447a36364bead5a DRTVWR-5_2.2.0-beta1
9822eb3e25f7fe0c28ffd8aba45c507caa383cbc DRTVWR-3_2.2.0-beta2
b0cd7e150009809a0b5b0a9d5785cd4bb230413a DRTVWR-7_2.2.0-beta3
@ -83,80 +78,41 @@ b723921b5c711bd24dbe77dc76ef488b544dac78 DRTVWR-34_2.5.0-beta3
b723921b5c711bd24dbe77dc76ef488b544dac78 2.5.0-release
b723921b5c711bd24dbe77dc76ef488b544dac78 DRTVWR-31_2.5.0-release
92e58e51776a4f8c29069b1a62ff21454d2085f0 2.6.0-start
3178e311da3a8739a85363665006ea3c4610cad4 dons-headless-hackathon-work
63a6aedfce785a6c760377bf685b2dae616797d2 2.5.1-start
4dede9ae1ec74d41f6887719f6f1de7340d8578d 2.5.1-release
4dede9ae1ec74d41f6887719f6f1de7340d8578d DRTVWR-37_2.5.1-release
b53a0576eec80614d7767ed72b40ed67aeff27c9 DRTVWR-38_2.5.2-release
b53a0576eec80614d7767ed72b40ed67aeff27c9 2.5.2-release
0000000000000000000000000000000000000000 2.1.1-release
0000000000000000000000000000000000000000 v2start
0000000000000000000000000000000000000000 2-1rn1
0000000000000000000000000000000000000000 2-1-beta-2
0000000000000000000000000000000000000000 beta-5
0000000000000000000000000000000000000000 beta-3-5
0000000000000000000000000000000000000000 2-1-1-beta-1
0000000000000000000000000000000000000000 2-1-beta-3
0000000000000000000000000000000000000000 beta-4
0000000000000000000000000000000000000000 2-1-beta-1
0000000000000000000000000000000000000000 viewer-2-1-0-start
0000000000000000000000000000000000000000 viewer-2-0-0
0000000000000000000000000000000000000000 alpha-3
0000000000000000000000000000000000000000 viewer-2-0-1
0000000000000000000000000000000000000000 2-1-1-beta-2
0000000000000000000000000000000000000000 alpha-4
0000000000000000000000000000000000000000 fork to viewer-20qa
0000000000000000000000000000000000000000 viewer-2-0-2-start
0000000000000000000000000000000000000000 2-1-release
0000000000000000000000000000000000000000 alpha-6
0000000000000000000000000000000000000000 2-1-1-release
0000000000000000000000000000000000000000 2-1-beta-4
0000000000000000000000000000000000000000 howard-demo
0000000000000000000000000000000000000000 alpha-5
0000000000000000000000000000000000000000 fork to viewer-2-0
0000000000000000000000000000000000000000 last_sprint
0000000000000000000000000000000000000000 beta_2.1.3
0000000000000000000000000000000000000000 2.2.0-beta1
0000000000000000000000000000000000000000 2.2.0-beta2
0000000000000000000000000000000000000000 2.2.0-beta3
0000000000000000000000000000000000000000 2.2.0-beta4
0000000000000000000000000000000000000000 2.3.0-beta1
0000000000000000000000000000000000000000 2.2.0-release
0000000000000000000000000000000000000000 2.3.0-start
0000000000000000000000000000000000000000 2.3.0-beta1
0000000000000000000000000000000000000000 2.3.0-beta2
0000000000000000000000000000000000000000 2.3.0-beta3
0000000000000000000000000000000000000000 2.3.0-release
0000000000000000000000000000000000000000 2.4.0-start
0000000000000000000000000000000000000000 2.4.0-beta1
0000000000000000000000000000000000000000 2.4.0-beta1
0000000000000000000000000000000000000000 2.4.0-beta2
0000000000000000000000000000000000000000 2.4.0-release
0000000000000000000000000000000000000000 2.5.0-start
0000000000000000000000000000000000000000 2.5.0-beta1
0000000000000000000000000000000000000000 2.5.0-beta2
0000000000000000000000000000000000000000 DRTVWR-5_2.2.0-beta1
0000000000000000000000000000000000000000 DRTVWR-3_2.2.0-beta2
0000000000000000000000000000000000000000 DRTVWR-7_2.2.0-beta3
0000000000000000000000000000000000000000 DRTVWR-8_2.2.0-release
0000000000000000000000000000000000000000 DRTVWR-14_2.3.0-beta1
0000000000000000000000000000000000000000 DRTVWR-17_2.3.0-beta2
0000000000000000000000000000000000000000 DRTVWR-20_2.3.0-beta3
0000000000000000000000000000000000000000 DRTVWR-13_2.3.0-release
0000000000000000000000000000000000000000 DRTVWR-26_2.4.0-beta1
0000000000000000000000000000000000000000 DRTVWR-27_2.4.0-beta2
0000000000000000000000000000000000000000 DRTVWR-25_2.4.0-release
0000000000000000000000000000000000000000 DRTVWR-32_2.5.0-beta1
0000000000000000000000000000000000000000 DRTVWR-33_2.5.0-beta2
0000000000000000000000000000000000000000 2.5.0-beta3
0000000000000000000000000000000000000000 DRTVWR-34_2.5.0-beta3
0000000000000000000000000000000000000000 2.5.0-release
0000000000000000000000000000000000000000 DRTVWR-31_2.5.0-release
0000000000000000000000000000000000000000 2.6.0-start
0000000000000000000000000000000000000000 2.5.1-start
0000000000000000000000000000000000000000 2.5.1-release
0000000000000000000000000000000000000000 DRTVWR-37_2.5.1-release
0000000000000000000000000000000000000000 DRTVWR-38_2.5.2-release
0000000000000000000000000000000000000000 2.5.2-release
8f16fc4647d4a97b02042094a213b1fdea5c9462 FSmerge-2.5.2
43cb7dc1804de1a25c0b2b3f0715584af1f8b470 RLVa-1.2.2
89532c8dfd5b6c29f1cb032665b44a74a52452e1 RLVa-1.3.0
4e9eec6a347f89b2b3f295beb72f1cf7837dff66 2.6.0-start
9283d6d1d7eb71dfe4c330e7c9144857e7356bde 2.6.0-beta1
9283d6d1d7eb71dfe4c330e7c9144857e7356bde DRTVWR-40_2.6.0-beta1
9e4641f4a7870c0f565a25a2971368d5a29516a1 DRTVWR-41_2.6.0-beta2
9e4641f4a7870c0f565a25a2971368d5a29516a1 2.6.0-beta2
c5bdef3aaa2744626aef3c217ce29e1900d357b3 2.6.1-start
c5bdef3aaa2744626aef3c217ce29e1900d357b3 DRTVWR-43_2.6.1-beta1
c5bdef3aaa2744626aef3c217ce29e1900d357b3 2.6.1-beta1
c9182ed77d427c759cfacf49a7b71a2e20d522aa DRTVWR-42_2.6.1-release
c9182ed77d427c759cfacf49a7b71a2e20d522aa 2.6.1-release
56b2778c743c2a964d82e1caf11084d76a87de2c 2.6.2-start
42f32494bac475d0737799346f6831558ae8bf5d DRTVWR-39_2.6.0-release
42f32494bac475d0737799346f6831558ae8bf5d 2.6.0-release
d1203046bb653b763f835b04d184646949d8dd5c DRTVWR-45_2.6.2-beta1
d1203046bb653b763f835b04d184646949d8dd5c 2.6.2-beta1
214180ad5714ce8392b82bbebcc92f4babd98300 DRTVWR-44_2.6.2-release
214180ad5714ce8392b82bbebcc92f4babd98300 2.6.2-release
52b2263ab28f0976c689fd0b76c55a9eb027cdbf end-of-develop.py
ec32f1045e7c2644015245df3a9933620aa194b8 2.6.3-start
d7fcefabdf32bb61a9ea6d6037c1bb26190a85bc DRTVWR-47_2.6.3-beta1
d7fcefabdf32bb61a9ea6d6037c1bb26190a85bc 2.6.3-beta1
0630e977504af5ea320c58d33cae4e1ddee793e9 DRTVWR-48_2.6.3-beta2
0630e977504af5ea320c58d33cae4e1ddee793e9 2.6.3-beta2
7db558aaa7c176f2022b3e9cfe38ac72f6d1fccd DRTVWR-50_2.6.5-beta1
7db558aaa7c176f2022b3e9cfe38ac72f6d1fccd 2.6.5-beta1
800cefce8d364ffdd2f383cbecb91294da3ea424 2.6.6-start
bb1075286b3b147b1dae2e3d6b2d56f04ff03f35 DRTVWR-52_2.6.6-beta1
bb1075286b3b147b1dae2e3d6b2d56f04ff03f35 2.6.6-beta1
e67da2c6e3125966dd49eef98b36317afac1fcfe 2.6.9-start

View File

@ -48,6 +48,12 @@ viewer-beta.login_channel = "Second Life Beta Viewer"
viewer-beta.build_debug_release_separately = true
viewer-beta.build_viewer_update_version_manager = true
viewer-pre-beta.viewer_channel = "Second Life Beta Viewer"
viewer-pre-beta.login_channel = "Second Life Beta Viewer"
viewer-pre-beta.build_debug_release_separately = true
viewer-pre-beta.build_viewer_update_version_manager = true
# ========================================
# Viewer Release
# ========================================
@ -55,26 +61,18 @@ viewer-release.viewer_channel = "Second Life Release"
viewer-release.login_channel = "Second Life Release"
viewer-release.build_debug_release_separately = true
viewer-release.build_viewer_update_version_manager = true
viewer-release.release-viewer.jira = DRTVWR-13
viewer-pre-release.viewer_channel = "Second Life Release"
viewer-pre-release.login_channel = "Second Life Release"
viewer-pre-release.build_debug_release_separately = true
viewer-pre-release.build_viewer_update_version_manager = true
#viewer-pre-release.release-viewer.jira = DRTVWR-13
viewer-pre-release.release-viewer.jira = DRTVWR-46
# ========================================
# aimee
# ========================================
viewer-development-import.build_debug_release_separately = true
viewer-development-fixes.build_debug_release_separately = true
viewer-development-tweaks.build_debug_release_separately = true
# =======================================
# brad
# ========================================
debug-halting.email = cg@lindenlab.com
debug-halting.build_server = false
debug-halting.build_server_tests = false
@ -158,56 +156,13 @@ media.build_viewer_update_version_manager = false
# oz
# ================
oz_viewer-review1_coverity.coverity_product = viewer
oz_viewer-devreview.build_debug_release_separately = true
oz_project-1.build_debug_release_separately = true
oz_project-2.build_debug_release_separately = true
oz-project-3.build_debug_release_separately = true
oz_viewer-review1.build_Linux = true
oz_viewer-review1_debug.build_Linux = false
oz_viewer-review1_coverity.build_Linux = false
oz_viewer-review1.build_Darwin = true
oz_viewer-review1_debug.build_Darwin = false
oz_viewer-review1_coverity.build_Darwin = false
oz_viewer-review1.build_CYGWIN = true
oz_viewer-review1.build_CYGWIN_Debug = false
oz_viewer-review1.build_CYGWIN_RelWithDebInfo = false
oz_viewer-review1.build_CYGWIN_Release = true
oz_viewer-review1_debug.build_CYGWIN_Debug = true
oz_viewer-review1_debug.build_CYGWIN_RelWithDebInfo = true
oz_viewer-review1_debug.build_CYGWIN_Release = false
oz_viewer-review1_coverity.build_coverity = true
oz_viewer-review1_coverity.build_CYGWIN_Debug = false
oz_viewer-review1_coverity.build_CYGWIN_RelWithDebInfo = false
oz_viewer-review1_coverity.build_CYGWIN_Release = false
oz_viewer-review2_coverity.coverity_product = viewer
oz_viewer-review2.build_Linux = true
oz_viewer-review2_debug.build_Linux = false
oz_viewer-review2_coverity.build_Linux = false
oz_viewer-review2.build_Darwin = true
oz_viewer-review2_debug.build_Darwin = false
oz_viewer-review2_coverity.build_Darwin = false
oz_viewer-review2.build_CYGWIN = true
oz_viewer-review2.build_CYGWIN_Debug = false
oz_viewer-review2.build_CYGWIN_RelWithDebInfo = false
oz_viewer-review2.build_CYGWIN_Release = true
oz_viewer-review2_debug.build_CYGWIN_Debug = true
oz_viewer-review2_debug.build_CYGWIN_RelWithDebInfo = true
oz_viewer-review2_debug.build_CYGWIN_Release = false
oz_viewer-review2_coverity.build_coverity = true
oz_viewer-review2_coverity.build_CYGWIN_Debug = false
oz_viewer-review2_coverity.build_CYGWIN_RelWithDebInfo = false
oz_viewer-review2_coverity.build_CYGWIN_Release = false
# ========================================
# tofu
# ========================================
tofu_viewer-development-staging.email = tofu.linden@lindenlab.com
oz_viewer-beta-review.build_debug_release_separately = true
# ========================================
# enus
# ========================================
@ -216,13 +171,6 @@ viewer-tut-teamcity.email = enus@lindenlab.com
viewer-tut-teamcity.build_server = false
viewer-tut-teamcity.build_server_tests = false
# ========================================
# experience
# ========================================
viewer-experience.public_build = false
viewer-experience.viewer_channel = "Second Life SkyLight Viewer"
viewer-experience.login_channel = "Second Life SkyLight Viewer"
# =================================================================
# asset delivery 2010 projects
# =================================================================
@ -240,5 +188,23 @@ viewer-asset-delivery-metrics.email = monty@lindenlab.com
viewer-asset-delivery-metrics.build_server = false
viewer-asset-delivery-metrics.build_server_tests = false
#==============================================================================
# autobuild viewers
#==============================================================================
viewer-autobuild2010.build_debug_release_separately = true
viewer-autobuild2010.viewer_channel = "Project Viewer - VS2010"
viewer-autobuild2010.login_channel = "Project Viewer - VS2010"
viewer-autobuild2010.viewer_grid = agni
viewer-autobuild2010.build_link_parallel = false
#========================================
# VS2010
#========================================
viewer-vs2010.viewer_channel = "Project Viewer - VS2010"
viewer-vs2010.login_channel = "Project Viewer - VS2010"
viewer-vs2010.viewer_grid = agni
viewer-vs2010.build_debug_release_separately = true
viewer-vs2010.build_viewer_update_version_manager = false
viewer-vs2010.build_link_parallel = false
# eof

55
README_BUILD_FIRESTORM_LINUX.txt Normal file → Executable file
View File

@ -1,15 +1,40 @@
First set up your system as described in the snowstorm linux wiki.
Note: You must manually install fmod as described there.
- Additionally, make sure gcc-4.4 ang g++-4.4 are installed.
- You should do non-standalone builds. If you try standalone, you will most likely run into trouble.
Run ./build_firestorm_linux.sh
By default your build will be set to use channel private-(your build machine). If you want to change this,
you can use pass the option "--chan private-SomeNameYouPrefer" to the build command above.
NOTE: IF you receive build failures related to libUUID, copy your system libUUID library over the download supplied by SL:
cd /your/firestorm/code/tree
cp /lib/libuuid.so.1.3.0 libraries/i686-linux/lib_release_client/libuuid.so
cp /lib/libuuid.so.1.3.0 libraries/i686-linux/lib_release_client/libuuid.so.1
First, make sure gcc-4.4 ang g++-4.4 are installed.
32bit build platforms are better tested at this point and strongly recommended.
Insure you can build a stock viewer-development try as described in the SL wiki. Before asking for any help
compiling Firestorm, make sure you can build viewer-development first. If you try and skip this step, you may
receive much less help. http://wiki.secondlife.com/wiki/Compiling_the_viewer_(Linux)
If you want to use licensed FMOD or KDU build libraries (they are optional) you have to provision these yourself.
If you're licensing these with Phoenix/Firestorm, ask for the libraries for fmod and kdu. Put them into:
/opt/firestorm
If you're a community builder, you'll need to build these libraries yourself, then change your autobuild.xml file to
point to your own versions, or create a different autobuild.xml with your customizations, and use this with autobuild
instead of our default autobuild.xml There are some examples of how to build FMOD on the LL Wiki and opensource-dev
mailing list. Ask Arrehn Oberlander about joining "Firestorm Self Compilers" for additional community help.
To build firestorm:
autobuild build -c ReleaseFS
Other examples:
autobuild configure -c ReleaseFS # basic configuration step, don't build, just configure
autobuild configure -c ReleaseFS -- --clean # clean the output area first, then configure
autobuild configure -c ReleaseFS -- --fmod --kdu # configure with fmod, kdu enabled
autobuild configure -c ReleaseFS -- --chan Private-Yourname # configure with a custom channel
autobuild build -c ReleaseFS -no-configure # default quick rebuild
autobuild build -c ReleaseFS -no-configure -- --clean # Clean rebuild
Any of the configure options can also be used (and do the same thing) with the build options.
Logs:
Look for logs in build-linux-i686/logs
Output:
Look for output in build-linux-i686/newview/Release

55
README_BUILD_FIRESTORM_MACOSX.txt Normal file → Executable file
View File

@ -1,11 +1,44 @@
Make sure xcode is installed, it's a free download from apple.
Insure you use Xcode version 3, and not version 4. You make need to use xcode-select to change the version number.
Make sure cmake is installed, use at least a 2.8.x version.
- Additionally, patch your source directory with fmodmacapi per the older snowglobe instructions.
- run ./build_firestorm_macosx.sh
By default your build will be set to use channel private-(your build machine). If you want to change this,
you can use pass the option "--chan private-SomeNameYouPrefer" to the build command above.
Make sure xcode is installed, it's a (sometimes) free download from apple.
Make sure cmake is installed, use at least a 2.8.x version.
Insure you can build a stock viewer-development try as described in the SL wiki. Before asking for any help
compiling Firestorm, make sure you can build viewer-development first. If you try and skip this step, you may
receive much less help. http://wiki.secondlife.com/wiki/Compiling_the_viewer_(Mac_OS_X)
If you want to use licensed FMOD or KDU build libraries (they are optional) you have to provision these yourself.
If you're licensing these with Phoenix/Firestorm, ask for the libraries for fmod and kdu. Put them into:
/opt/firestorm
If you're a community builder, you'll need to build these libraries yourself, then change your autobuild.xml file to
point to your own versions, or create a different autobuild.xml with your customizations, and use this with autobuild
instead of our default autobuild.xml There are some examples of how to build FMOD on the LL Wiki and opensource-dev
mailing list. Ask Arrehn Oberlander about joining "Firestorm Self Compilers" for additional community help.
Additionally, you will need to procure an update vivox build package and place it in:
/opt/firestorm
No licenses are required for this, but it is not yet directly available. Ask someone in Firestorm for a copy, until
this can be updated.
To build firestorm:
autobuild build -c ReleaseFS
Other examples:
autobuild configure -c ReleaseFS # basic configuration step, don't build, just configure
autobuild configure -c ReleaseFS -- --clean # clean the output area first, then configure
autobuild configure -c ReleaseFS -- --fmod --kdu # configure with fmod, kdu enabled
autobuild configure -c ReleaseFS -- --chan Private-Yourname # configure with a custom channel
autobuild build -c ReleaseFS -no-configure # default quick rebuild
autobuild build -c ReleaseFS -no-configure -- --clean # Clean rebuild
Any of the configure options can also be used (and do the same thing) with the build options.
Logs:
Look for logs in build-darwin-i386/logs
Output:
Look for output in build-darwin-i386/newview/Release

87
README_BUILD_FIRESTORM_WIN32.txt Normal file → Executable file
View File

@ -1,80 +1,43 @@
Before you start configuring your Windows build system, be aware of our tested configurations:
Memory: You will need at least 2GB RAM
Memory: You will need at least 2GB RAM, 4GB strongly recommendted.
CPU: Multiple CPUs are strongly recommended.
A build can take over an hour.
Tested Build Environments:
WinXPSP3, 32bit, 2GB RAM, Visual Studio Pro 2005 SP1,
latest VC++ runtime installed
Visual Studio 2010.
If you are not using something that closely matches a tested configuration, you
may run into trouble, particularly with different versions of Visual Studio. If you are running VS2008, you may be able to build the viewer for your local machine, but not package it into an installer.
Insure you can build a stock viewer-development try as described in the SL wiki. Before asking for any help
compiling Firestorm, make sure you can build viewer-development first. If you try and skip this step, you may
receive much less help. http://wiki.secondlife.com/wiki/Viewer_2_Microsoft_Windows_Builds
A free download of VS2005Express can be used to compile firestorm. You can download this at http://download.microsoft.com/download/8/3/a/83aad8f9-38ba-4503-b3cd-ba28c360c27b/ENU/vcsetup.exe
If you want to use licensed FMOD or KDU build libraries (they are optional) you have to provision these yourself.
If you're licensing these with Phoenix/Firestorm, ask for the libraries for fmod and kdu. Put them into:
To get started, follow the snowstorm instructions for setting up a windows build environment at this page: http://wiki.secondlife.com/wiki/Viewer_2_Microsoft_Windows_Builds
c:\cygwin\opt\firestorm
If you're a community builder, you'll need to build these libraries yourself, then change your autobuild.xml file to
point to your own versions, or create a different autobuild.xml with your customizations, and use this with autobuild
instead of our default autobuild.xml There are some examples of how to build FMOD on the LL Wiki and opensource-dev
mailing list. Ask Arrehn Oberlander about joining "Firestorm Self Compilers" for additional community help.
GET THE PHOENIX SOURCE
======================
To build firestorm:
Open up cygwin and run the following commands one at a time
mkdir /cygdrive/c/code
cd /cygdrive/c/code
hg clone http://hg.phoenixviewer.com/phoenix-firestorm-lgpl/
autobuild build -c ReleaseFS
Make sure to copy fmod.dll into both your indra/ folder and also libraries/i686-win32/Release and libraries/RelWithDebInfo
Other examples:
COMMAND LINE BUILDS
===================
autobuild configure -c ReleaseFS # basic configuration step, don't build, just configure
autobuild configure -c ReleaseFS -- --clean # clean the output area first, then configure
autobuild configure -c ReleaseFS -- --fmod --kdu # configure with fmod, kdu enabled
autobuild configure -c ReleaseFS -- --chan Private-Yourname # configure with a custom channel
Open up cygwin and navigate to your code directory. Example:
cd /cygdrive/c/my/path/to/phoenix-firestorm-lgpl
autobuild build -c ReleaseFS -no-configure # default quick rebuild
Execute the command to build firestorm in the cygwin window:
Any of the configure options can also be used (and do the same thing) with the build options.
./build_firestorm_win32.sh
Logs:
This will do a clean compile. Rebuilds should be possible by specifying --rebuild.
Look for logs in build-vc100/logs
By default your build will be set to use channel private-(your build machine). If you want to change this,
you can use pass the option "--chan private-SomeNameYouPrefer" to the build command above.
Output:
NOTE: It is normal to see errors about ambiguous include/library paths at this time. It will not cause the build to fail
Look for output in build-vc100/newview/Release
A log for the build will be placed in logs/build_firestorm_windows.log
When the build completes, your output installer will be in indra/VC80/newview/Release, look for a <product-build>-Setup.exe file
VISUAL STUDIO BUILDS
====================
0. Open up a regular CMD.exe command window. Navigate to your downloaded source code.
1. Run the command "develop.py -G vc80 -t Release configure -DLL_TESTS:BOOL=OFF" Change vc80 to vc90 for VS2008 or to VC100 for VS2010
(*as of Jan 1st 2011, VS2008 and VS2010 do not work, however, LL is working on supporting VS2010 in the future.)
1. Launch Visual Studio and open up <your downloaded phoenix code>\indra\build-vc80\Secondlife.sln
2. Set the build type to Release
3. Select the "firestorm-bin" target
4. Build.
5. Your output installer will be in indra/VC80/newview/Release, look for a <product-build>-Setup.exe file
BUILD ERRORS
============
1. Google Breakpad
If your build fails because of an error in 'dump_syms.exe' download a new version of this executable that is statically linked.
http://google-breakpad.googlecode.com/svn-history/r595/trunk/src/tools/windows/binaries/dump_syms.exe
Place this file under libraries/i686-win32/bin/dump_syms.exe
2. "Manifest multiple bindings error"
If your build fails to package with an error like the above, it is because you do not have an up to date C++ runtime library installed. You should enable windows auto-updates and install ALL required updates for your platform to resolve this error.
COMMITING CHANGES
=================
When commiting changes back to the phoenix-firestorm-lgpl repository, you must include the string "lgpl" or "LGPL" somewhere in your most recent commit message. Also, insure all code you commit to this repository is LGPL licensed!

2392
autobuild.xml Executable file

File diff suppressed because it is too large Load Diff

180
build.sh
View File

@ -22,12 +22,12 @@ build_dir_Darwin()
build_dir_Linux()
{
echo viewer-linux-i686-$(echo $1 | tr A-Z a-z)
echo build-linux-i686
}
build_dir_CYGWIN()
{
echo build-vc80
echo build-vc100
}
installer_Darwin()
@ -50,47 +50,29 @@ installer_CYGWIN()
pre_build()
{
local variant="$1"
local build_dir="$2"
begin_section "Pre$variant"
#export PATH="/cygdrive/c/Program Files/Microsoft Visual Studio 8/Common7/IDE/:$PATH"
python develop.py \
--incredibuild \
--unattended \
-t $variant \
-G "$cmake_generator" \
configure \
-DGRID:STRING="$viewer_grid" \
-DVIEWER_CHANNEL:STRING="$viewer_channel" \
-DVIEWER_LOGIN_CHANNEL:STRING="$login_channel" \
-DINSTALL_PROPRIETARY:BOOL=ON \
-DRELEASE_CRASH_REPORTING:BOOL=ON \
-DLOCALIZESETUP:BOOL=ON \
-DPACKAGE:BOOL=ON \
-DCMAKE_VERBOSE_MAKEFILE:BOOL=TRUE \
-DLL_TESTS:BOOL="$run_tests"
end_section "Pre$variant"
[ -n "$master_message_template_checkout" ] \
&& [ -r "$master_message_template_checkout/message_template.msg" ] \
&& template_verifier_master_url="-DTEMPLATE_VERIFIER_MASTER_URL=file://$master_message_template_checkout/message_template.msg"
"$AUTOBUILD" configure -c $variant -- \
-DPACKAGE:BOOL=ON \
-DRELEASE_CRASH_REPORTING:BOOL=ON \
-DVIEWER_CHANNEL:STRING="\"$viewer_channel\"" \
-DVIEWER_LOGIN_CHANNEL:STRING="\"$viewer_login_channel\"" \
-DGRID:STRING="\"$viewer_grid\"" \
-DLL_TESTS:BOOL="$run_tests" \
-DTEMPLATE_VERIFIER_OPTIONS:STRING="$template_verifier_options" $template_verifier_master_url
end_section "Pre$variant"
}
build()
{
local variant="$1"
local build_dir="$2"
if $build_viewer
then
begin_section "Viewer$variant"
if python develop.py \
--incredibuild \
--unattended \
-t $variant \
-G "$cmake_generator" \
build package
# && \
# python develop.py \
# --incredibuild \
# --unattended \
# -t $variant \
# -G "$cmake_generator" \
# build package
if "$AUTOBUILD" build --no-configure -c $variant
then
echo true >"$build_dir"/build_ok
else
@ -110,6 +92,7 @@ build_docs()
end_section Docs
}
# Check to see if we were invoked from the wrapper, if not, re-exec ourselves from there
if [ "x$arch" = x ]
then
@ -117,15 +100,11 @@ then
if [ -x "$top/../buildscripts/hg/bin/build.sh" ]
then
exec "$top/../buildscripts/hg/bin/build.sh" "$top"
elif [ -r "$top/README" ]
then
cat "$top/README"
exit 1
else
cat <<EOF
This script, if called in a development environment, requires that the branch
independent build script repository be checked out next to this repository.
This repository is located at http://hg.secondlife.com/buildscripts
This repository is located at http://hg.lindenlab.com/parabuild/buildscripts
EOF
exit 1
fi
@ -138,21 +117,66 @@ eval '$build_'"$arch" || pass
# File no longer exists in code-sep branch, so let's make sure it exists in order to use it.
if test -f scripts/update_version_files.py ; then
begin_section UpdateVer
scripts/update_version_files.py \
--channel="$viewer_channel" \
--server_channel="$server_channel" \
--revision=$revision \
--verbose \
eval $(python scripts/update_version_files.py \
--channel="$viewer_channel" \
--server_channel="$server_channel" \
--revision=$revision \
--verbose \
| sed -n -e "s,Setting viewer channel/version: '\([^']*\)' / '\([^']*\)',VIEWER_CHANNEL='\1';VIEWER_VERSION='\2',p")\
|| fail update_version_files.py
echo "{\"Type\":\"viewer\",\"Version\":\"${VIEWER_VERSION}\"}" > summary.json
end_section UpdateVer
fi
# Now retrieve the version for use in the version manager
# First three parts only, $revision will be appended automatically.
build_viewer_update_version_manager_version=`scripts/get_version.py --viewer-version | sed 's/\.[0-9]*$//'`
build_viewer_update_version_manager_version=`python scripts/get_version.py --viewer-version | sed 's/\.[0-9]*$//'`
if [ -z "$AUTOBUILD" ]
then
export autobuild_dir="$here/../../../autobuild/bin/"
if [ -d "$autobuild_dir" ]
then
export AUTOBUILD="$autobuild_dir"autobuild
if [ -x "$AUTOBUILD" ]
then
# *HACK - bash doesn't know how to pass real pathnames to native windows python
case "$arch" in
CYGWIN) AUTOBUILD=$(cygpath -u $AUTOBUILD.cmd) ;;
esac
else
record_failure "Not executable: $AUTOBUILD"
exit 1
fi
else
record_failure "Not found: $autobuild_dir"
exit 1
fi
fi
# load autbuild provided shell functions and variables
# Merov: going back to the previous code that passes even if it fails catching a failure
# TODO: use the correct code here under and fix the llbase import in python code
#if "$AUTOBUILD" source_environment > source_environment
#then
# . source_environment
#else
# dump environment variables for debugging
# env|sort
# record_failure "autobuild source_environment failed"
# cat source_environment >&3
# exit 1
#fi
eval "$("$AUTOBUILD" source_environment)"
# dump environment variables for debugging
env|sort
# Install packages.
"$AUTOBUILD" install --skip-license-check
# Now run the build
cd indra
succeeded=true
build_processes=
last_built_variant=
@ -169,59 +193,11 @@ do
build_dir_stubs="$build_dir/win_setup/$variant"
rm -rf "$build_dir"
mkdir -p "$build_dir"
mkdir -p "$build_dir/tmp"
#export TMP="$build_dir/tmp"
if pre_build "$variant" "$build_dir" >> "$build_log" 2>&1
then
if $build_coverity
then
mkdir -p "$build_dir/cvbuild"
coverity_config=`cygpath --windows "$coverity_dir/config/coverity_config.xml"`
coverity_tmpdir=`cygpath --windows "$build_dir/cvbuild"`
coverity_root=`cygpath --windows "$top/latest"`
case "$variant" in
Release)
begin_section Coverity
begin_section CovBuild
"$coverity_dir"/bin/cov-build\
--verbose 4 \
--config "$coverity_config"\
--dir "$coverity_tmpdir"\
python develop.py -t $variant -G "$cmake_generator" build "$coverity_product"\
>> "$build_log" 2>&1\
&&\
end_section CovBuild\
&&\
begin_section CovAnalyze\
&&\
"$coverity_dir"/bin/cov-analyze\
--security\
--concurrency\
--dir "$coverity_tmpdir"\
>> "$build_log" 2>&1\
&&\
end_section CovAnalyze\
&&\
begin_section CovCommit\
&&\
"$coverity_dir"/bin/cov-commit-defects\
--stream "$coverity_product"\
--dir "$coverity_tmpdir"\
--host "$coverity_server"\
--strip-path "$coverity_root"\
--target "$branch/$arch"\
--version "$revision"\
--description "$repo: $variant $revision"\
--user admin --password coverity\
>> "$build_log" 2>&1\
|| record_failure "Coverity Build Failed"
# since any step could have failed, rely on the enclosing block to close any pending sub-blocks
end_section Coverity
;;
esac
if test -r "$build_dir"/cvbuild/build-log.txt
then
upload_item log "$build_dir"/cvbuild/build-log.txt text/plain
fi
elif $build_link_parallel
if $build_link_parallel
then
begin_section BuildParallel
( build "$variant" "$build_dir" > "$build_dir/build.log" 2>&1 ) &
@ -229,10 +205,7 @@ do
end_section BuildParallel
else
begin_section "Build$variant"
build "$variant" "$build_dir" > "$build_log" 2>&1
begin_section Tests
grep --line-buffered "^##teamcity" "$build_log"
end_section Tests
build "$variant" "$build_dir" 2>&1 | tee -a "$build_log" | grep --line-buffered "^##teamcity"
if `cat "$build_dir/build_ok"`
then
echo so far so good.
@ -261,15 +234,13 @@ then
begin_section "Build$variant"
build_dir=`build_dir_$arch $variant`
build_dir_stubs="$build_dir/win_setup/$variant"
tee -a $build_log < "$build_dir/build.log" | grep --line-buffered "^##teamcity"
if `cat "$build_dir/build_ok"`
then
echo so far so good.
else
record_failure "Parallel build of \"$variant\" failed."
fi
begin_section Tests
tee -a $build_log < "$build_dir/build.log" | grep --line-buffered "^##teamcity"
end_section Tests
end_section "Build$variant"
done
end_section WaitParallel
@ -291,6 +262,7 @@ then
else
upload_item installer "$package" binary/octet-stream
upload_item quicklink "$package" binary/octet-stream
[ -f summary.json ] && upload_item installer summary.json text/plain
# Upload crash reporter files.
case "$last_built_variant" in

View File

@ -1,265 +0,0 @@
#!/bin/bash
###
### Constants
###
TRUE=0 # Map the shell's idea of truth to a variable for better documentation
FALSE=1
LOG="`pwd`/logs/build_linux.log"
###
### Global Variables
###
WANTS_CLEAN=$FALSE
WANTS_CONFIG=$FALSE
WANTS_BUILD=$FALSE
WANTS_PACKAGE=$FALSE
WANTS_VERSION=$FALSE
WANTS_FMOD=$FALSE
WANTS_KDU=$FALSE
BTYPE="Release"
CHANNEL="private-`hostname`"
###
### Helper Functions
###
showUsage()
{
echo
echo "Usage: "
echo "========================"
echo
echo " --clean : Remove past builds & configuration"
echo " --config : General a new architecture-specific config"
echo " --version : Update version number"
echo " --rebuild : Build, reusing unchanged projects to save time"
echo " --chan [Release|Beta|Private] : Private is the default, sets channel"
echo " --btype [Release|RelWithDebInfo] : Release is default, whether to use symbols"
echo " --fmod : Build with fmod"
echo " --kdu : Build with kdu"
}
getArgs()
# $* = the options passed in from main
{
if [ $# -gt 0 ]; then
while getoptex "clean config version fmod kdu rebuild help chan: btype:" "$@" ; do
#insure options are valid
if [ -z "$OPTOPT" ] ; then
showUsage
exit 1
fi
case "$OPTOPT" in
clean) WANTS_CLEAN=$TRUE;;
config) WANTS_CONFIG=$TRUE;;
version) WANTS_VERSION=$TRUE;;
rebuild) WANTS_BUILD=$TRUE
WANTS_VERSION=$TRUE
WANTS_PACKAGE=$TRUE;;
chan) CHANNEL="$OPTARG";;
btype) BTYPE="$OPTARG";;
fmod) WANTS_FMOD=$TRUE;;
kdu) WANTS_KDU=$TRUE;;
help) showUsage && exit 0;;
-*) showUsage && exit 1;;
*) showUsage && exit 1;;
esac
done
shift $[OPTIND-1]
if [ $OPTIND -le 1 ] ; then
showUsage && exit 1
fi
fi
if [ $WANTS_CLEAN -ne $TRUE ] && [ $WANTS_CONFIG -ne $TRUE ] && \
[ $WANTS_BUILD -ne $TRUE ] && [ $WANTS_VERSION -ne $TRUE ] && \
[ $WANTS_PACKAGE -ne $TRUE ] ; then
# the user didn't say what to do, so assume he wants to do everything
WANTS_CLEAN=$TRUE
WANTS_CONFIG=$TRUE
WANTS_BUILD=$TRUE
WANTS_VERSION=$TRUE
WANTS_PACKAGE=$TRUE
fi
}
function getoptex()
{
let $# || return 1
local optlist="${1#;}"
let OPTIND || OPTIND=1
[ $OPTIND -lt $# ] || return 1
shift $OPTIND
if [ "$1" != "-" -a "$1" != "${1#-}" ]
then OPTIND=$[OPTIND+1]; if [ "$1" != "--" ]
then
local o
o="-${1#-$OPTOFS}"
for opt in ${optlist#;}
do
OPTOPT="${opt%[;.:]}"
unset OPTARG
local opttype="${opt##*[^;:.]}"
[ -z "$opttype" ] && opttype=";"
if [ ${#OPTOPT} -gt 1 ]
then # long-named option
case $o in
"--$OPTOPT")
if [ "$opttype" != ":" ]; then return 0; fi
OPTARG="$2"
if [ -z "$OPTARG" ];
then # error: must have an agrument
let OPTERR && echo "$0: error: $OPTOPT must have an argument" >&2
OPTARG="$OPTOPT";
OPTOPT="?"
return 1;
fi
OPTIND=$[OPTIND+1] # skip option's argument
return 0
;;
"--$OPTOPT="*)
if [ "$opttype" = ";" ];
then # error: must not have arguments
let OPTERR && echo "$0: error: $OPTOPT must not have arguments" >&2
OPTARG="$OPTOPT"
OPTOPT="?"
return 1
fi
OPTARG=${o#"--$OPTOPT="}
return 0
;;
esac
else # short-named option
case "$o" in
"-$OPTOPT")
unset OPTOFS
[ "$opttype" != ":" ] && return 0
OPTARG="$2"
if [ -z "$OPTARG" ]
then
echo "$0: error: -$OPTOPT must have an argument" >&2
OPTARG="$OPTOPT"
OPTOPT="?"
return 1
fi
OPTIND=$[OPTIND+1] # skip option's argument
return 0
;;
"-$OPTOPT"*)
if [ $opttype = ";" ]
then # an option with no argument is in a chain of options
OPTOFS="$OPTOFS?" # move to the next option in the chain
OPTIND=$[OPTIND-1] # the chain still has other options
return 0
else
unset OPTOFS
OPTARG="${o#-$OPTOPT}"
return 0
fi
;;
esac
fi
done
echo "$0: error: invalid option: $o"
showUsage
exit 1
fi; fi
OPTOPT="?"
unset OPTARG
return 1
}
function optlistex
{
local l="$1"
local m # mask
local r # to store result
while [ ${#m} -lt $[${#l}-1] ]; do m="$m?"; done # create a "???..." mask
while [ -n "$l" ]
do
r="${r:+"$r "}${l%$m}" # append the first character of $l to $r
l="${l#?}" # cut the first charecter from $l
m="${m#?}" # cut one "?" sign from m
if [ -n "${l%%[^:.;]*}" ]
then # a special character (";", ".", or ":") was found
r="$r${l%$m}" # append it to $r
l="${l#?}" # cut the special character from l
m="${m#?}" # cut one more "?" sign
fi
done
echo $r
}
function getopt()
{
local optlist=`optlistex "$1"`
shift
getoptex "$optlist" "$@"
return $?
}
###
### Main Logic
###
getArgs $*
if [ -z $CC ] ; then
export CC=/usr/bin/gcc-4.4
fi
if [ -z $CXX ] ; then
export CXX=/usr/bin/g++-4.4
fi
export CMAKE_CXX_FLAGS_RELEASE="-O3 -msse -msse2"
if [ ! -d `dirname $LOG` ] ; then
mkdir -p `dirname $LOG`
fi
pushd indra > /dev/null
if [ $WANTS_CLEAN -eq $TRUE ] ; then
./develop.py -t release clean
find . -name "*.pyc" -exec rm {} \;
fi
if [ $WANTS_VERSION -eq $TRUE ] ; then
buildVer=`hg summary | head -1 | cut -d " " -f 2 | cut -d : -f 1`
majorVer=`cat Version | cut -d "=" -f 2 | cut -d "." -f 1`
minorVer=`cat Version | cut -d "=" -f 2 | cut -d "." -f 2`
patchVer=`cat Version | cut -d "=" -f 2 | cut -d "." -f 3`
echo "Building $CHANN- ${majorVer}.${minorVer}.${patchVer}.${buildVer}"
sed -e "s#LL_VERSION_BUILD = .*\$#LL_VERSION_BUILD = ${buildVer};#" \
-e "s#LL_VERSION_MAJOR = .*\$#LL_VERSION_MAJOR = ${majorVer};#" \
-e "s#LL_VERSION_MINOR = .*\$#LL_VERSION_MINOR = ${minorVer};#" \
-e "s#LL_VERSION_PATCH = .*\$#LL_VERSION_PATCH = ${patchVer};#" \
-e "s#LL_CHANNEL = .*\$#LL_CHANNEL = \"Firestorm-$CHANNEL\";#" llcommon/llversionviewer.cpp.in > llcommon/llversionviewer.cpp
fi
if [ $WANTS_CONFIG -eq $TRUE ] ; then
if [ $WANTS_FMOD -eq $TRUE ] ; then FMOD="-DFMOD:BOOL=ON" ; else FMOD="-DFMOD:BOOL=OFF" ; fi
if [ $WANTS_KDU -eq $TRUE ] ; then KDU="-DUSE_KDU:BOOL=ON" ; else KDU="-DUSE_KDU:BOOL=OFF" ; fi
mkdir -p ../logs > /dev/null 2>&1
./develop.py -t $BTYPE configure $KDU $FMOD -DFIRECYG:BOOL=ON -DPACKAGE:BOOL=ON -DLL_TESTS:BOOL=OFF -DVIEWER_CHANNEL:STRING=Firestorm-$CHANNEL -DVIEWER_LOGIN_CHANNEL:STRING=Firestorm-$CHANNEL 2>&1 | tee $LOG
./develop.py -t $BTYPE cmake $KDU $FMOD -DFIRECYG:BOOL=ON -DPACKAGE:BOOL=ON -DLL_TESTS:BOOL=OFF -DVIEWER_CHANNEL:STRING=Firestorm-$CHANNEL -DVIEWER_LOGIN_CHANNEL:STRING=Firestorm-$CHANNEL 2>&1 | tee $LOG
fi
if [ $WANTS_BUILD -eq $TRUE ] ; then
echo "Building in progress. Check $LOG for verbose status."
./develop.py -t $BTYPE build 2>&1 | tee -a "$LOG"
trap - INT TERM EXIT
echo "Complete"
fi
popd > /dev/null

View File

@ -1,281 +0,0 @@
#!/bin/bash
###
### Constants
###
TRUE=0 # Map the shell's idea of truth to a variable for better documentation
FALSE=1
# If you have some other version of python installed other than 26, do not edit this file.
# Add a line like this to ~/.bashrc:
# export WINPYTHON="/cygdrive/c/path_to_your_python.exe"
# And then close and restart cygwin
if [ -z $WINPYTHON ] ; then
WINPYTHON="/cygdrive/c/Python26/python.exe"
fi
LOG="`pwd`/logs/build_win32.log"
###
### Global Variables
###
WANTS_CLEAN=$FALSE
WANTS_CONFIG=$FALSE
WANTS_LAA=$TRUE
WANTS_BUILD=$FALSE
WANTS_PACKAGE=$FALSE
WANTS_VERSION=$FALSE
WANTS_KDU=$FALSE
BTYPE="Release"
CHANNEL="private-`hostname`"
###
### Helper Functions
###
showUsage()
{
echo
echo "Usage: "
echo "========================"
echo
echo " --clean : Remove past builds & configuration"
echo " --config : General a new architecture-specific config"
echo " --version : Update version number"
echo " --rebuild : Build, reusing unchanged projects to save time"
echo " --kdu : Build with KDU"
echo " --chan [Release|Beta|Private] : Private is the default, sets channel"
echo " --btype [Release|RelWithDebInfo] : Release is default, whether to use symbols"
echo " --laa [on|off] : Default is on, enables the viewer to use more than 2 GB ram"
}
getArgs()
# $* = the options passed in from main
{
if [ $# -gt 0 ]; then
while getoptex "clean config version kdu fmod rebuild help chan: btype: laa:" "$@" ; do
#insure options are valid
if [ -z "$OPTOPT" ] ; then
showUsage
exit 1
fi
case "$OPTOPT" in
clean) WANTS_CLEAN=$TRUE;;
config) WANTS_CONFIG=$TRUE;;
version) WANTS_VERSION=$TRUE;;
rebuild) WANTS_BUILD=$TRUE
WANTS_VERSION=$TRUE
WANTS_PACKAGE=$TRUE;;
chan) CHANNEL="$OPTARG";;
btype) BTYPE="$OPTARG";;
fmod) WANTS_FMOD=$TRUE;;
kdu) WANTS_KDU=$TRUE;;
laa) if [ "$OPTARG" == "on" ] ; then
WANTS_LAA=$TRUE
elif [ "$OPTARG" == "off" ] ; then
WANTS_LAA=$FALSE
else
showUsage && exit 1
fi;;
help) showUsage && exit 0;;
-*) showUsage && exit 1;;
*) showUsage && exit 1;;
esac
done
shift $[OPTIND-1]
if [ $OPTIND -le 1 ] ; then
showUsage && exit 1
fi
fi
if [ $WANTS_CLEAN -ne $TRUE ] && [ $WANTS_CONFIG -ne $TRUE ] && \
[ $WANTS_BUILD -ne $TRUE ] && [ $WANTS_VERSION -ne $TRUE ] && \
[ $WANTS_PACKAGE -ne $TRUE ] ; then
# the user didn't say what to do, so assume he wants to do everything
WANTS_CLEAN=$TRUE
WANTS_CONFIG=$TRUE
WANTS_BUILD=$TRUE
WANTS_VERSION=$TRUE
WANTS_PACKAGE=$TRUE
fi
}
function getoptex()
{
let $# || return 1
local optlist="${1#;}"
let OPTIND || OPTIND=1
[ $OPTIND -lt $# ] || return 1
shift $OPTIND
if [ "$1" != "-" -a "$1" != "${1#-}" ]
then OPTIND=$[OPTIND+1]; if [ "$1" != "--" ]
then
local o
o="-${1#-$OPTOFS}"
for opt in ${optlist#;}
do
OPTOPT="${opt%[;.:]}"
unset OPTARG
local opttype="${opt##*[^;:.]}"
[ -z "$opttype" ] && opttype=";"
if [ ${#OPTOPT} -gt 1 ]
then # long-named option
case $o in
"--$OPTOPT")
if [ "$opttype" != ":" ]; then return 0; fi
OPTARG="$2"
if [ -z "$OPTARG" ];
then # error: must have an agrument
let OPTERR && echo "$0: error: $OPTOPT must have an argument" >&2
OPTARG="$OPTOPT";
OPTOPT="?"
return 1;
fi
OPTIND=$[OPTIND+1] # skip option's argument
return 0
;;
"--$OPTOPT="*)
if [ "$opttype" = ";" ];
then # error: must not have arguments
let OPTERR && echo "$0: error: $OPTOPT must not have arguments" >&2
OPTARG="$OPTOPT"
OPTOPT="?"
return 1
fi
OPTARG=${o#"--$OPTOPT="}
return 0
;;
esac
else # short-named option
case "$o" in
"-$OPTOPT")
unset OPTOFS
[ "$opttype" != ":" ] && return 0
OPTARG="$2"
if [ -z "$OPTARG" ]
then
echo "$0: error: -$OPTOPT must have an argument" >&2
OPTARG="$OPTOPT"
OPTOPT="?"
return 1
fi
OPTIND=$[OPTIND+1] # skip option's argument
return 0
;;
"-$OPTOPT"*)
if [ $opttype = ";" ]
then # an option with no argument is in a chain of options
OPTOFS="$OPTOFS?" # move to the next option in the chain
OPTIND=$[OPTIND-1] # the chain still has other options
return 0
else
unset OPTOFS
OPTARG="${o#-$OPTOPT}"
return 0
fi
;;
esac
fi
done
echo "$0: error: invalid option: $o"
showUsage
exit 1
fi; fi
OPTOPT="?"
unset OPTARG
return 1
}
function optlistex
{
local l="$1"
local m # mask
local r # to store result
while [ ${#m} -lt $[${#l}-1] ]; do m="$m?"; done # create a "???..." mask
while [ -n "$l" ]
do
r="${r:+"$r "}${l%$m}" # append the first character of $l to $r
l="${l#?}" # cut the first charecter from $l
m="${m#?}" # cut one "?" sign from m
if [ -n "${l%%[^:.;]*}" ]
then # a special character (";", ".", or ":") was found
r="$r${l%$m}" # append it to $r
l="${l#?}" # cut the special character from l
m="${m#?}" # cut one more "?" sign
fi
done
echo $r
}
function getopt()
{
local optlist=`optlistex "$1"`
shift
getoptex "$optlist" "$@"
return $?
}
###
### Main Logic
###
MAIN_START_TIME=`date +%s`
getArgs $*
path=$WINPATH:/usr/local/bin:/usr/bin:/bin
if [ ! -f "$WINPYTHON" ] ; then
echo "ERROR: You need to edit .bashrc and set WINPYTHON at the top to point at the path of your windows python executable."
exit 1
fi
pushd indra > /dev/null
if [ $WANTS_CLEAN -eq $TRUE ] ; then
$WINPYTHON develop.py clean
find . -name "*.pyc" -exec rm {} \;
fi
if [ $WANTS_VERSION -eq $TRUE ] ; then
buildVer=`hg summary | head -1 | cut -d " " -f 2 | cut -d : -f 1`
majorVer=`cat Version | cut -d "=" -f 2 | cut -d "." -f 1`
minorVer=`cat Version | cut -d "=" -f 2 | cut -d "." -f 2`
patchVer=`cat Version | cut -d "=" -f 2 | cut -d "." -f 3`
echo "Building $CHANN- ${majorVer}.${minorVer}.${patchVer}.${buildVer}"
sed -e "s#LL_VERSION_BUILD = .*\$#LL_VERSION_BUILD = ${buildVer};#" \
-e "s#LL_VERSION_MAJOR = .*\$#LL_VERSION_MAJOR = ${majorVer};#" \
-e "s#LL_VERSION_MINOR = .*\$#LL_VERSION_MINOR = ${minorVer};#" \
-e "s#LL_VERSION_PATCH = .*\$#LL_VERSION_PATCH = ${patchVer};#" \
-e "s#LL_CHANNEL = .*\$#LL_CHANNEL = \"Firestorm-$CHANNEL\";#" llcommon/llversionviewer.cpp.in > llcommon/llversionviewer.cpp
fi
if [ $WANTS_CONFIG -eq $TRUE ] ; then
mkdir -p ../logs > /dev/null 2>&1
if [ $WANTS_LAA -eq $TRUE ] ; then LAA=ON ; else LAA=OFF ; fi
if [ $WANTS_KDU -eq $TRUE ] ; then KDU=ON ; else KDU=OFF ; fi
$WINPYTHON develop.py -G vc80 -t $BTYPE configure -DFIRECYG:BOOL=ON -DPACKAGE:BOOL=ON -DLL_TESTS:BOOL=OFF -DVIEWER_CHANNEL:STRING=Firestorm-$CHANNEL -DVIEWER_LOGIN_CHANNEL:STRING=Firestorm-$CHANNEL -DLAA:BOOL=$LAA -DUSE_KDU:BOOL=$KDU 2>&1 | tee $LOG
mkdir -p build-vc80/sharedlibs/RelWithDebInfo
mkdir -p build-vc80/sharedlibs/Release
cp -f ../libraries/i686-win32/lib/release/fmod.dll .
cp fmod.dll ./build-vc80/sharedlibs/RelWithDebInfo
cp fmod.dll ./build-vc80/sharedlibs/Release
fi
if [ $WANTS_BUILD -eq $TRUE ] ; then
echo "Building in progress. Check $LOG for verbose status."
$WINPYTHON develop.py -G vc80 -t $BTYPE build 2>&1 | tee -a $LOG | grep Build
trap - INT TERM EXIT
# Save the .h file we built with in case of errors in compile.
# Except during the build process, the .h file should ALWAYS be the
# same as existed in the source repository to avoid merge conflicts
# during updates.
echo "Complete"
fi
popd > /dev/null

View File

@ -1,8 +1,7 @@
Linden Lab would like to acknowledge source code contributions from the
following residents. The Second Life resident name is given below,
along with the issue identifier corresponding to the patches we've
received from them. To see more about these contributions, visit the
browsable version: http://wiki.secondlife.com/wiki/Source_contributions
received from them.
Able Whitman
VWR-650
@ -84,10 +83,12 @@ Aleric Inglewood
VWR-24315
VWR-24317
VWR-24320
VWR-24321
VWR-24321
VWR-24337
VWR-24354
VWR-24366
VWR-24519
VWR-24520
SNOW-84
SNOW-477
SNOW-744
@ -154,8 +155,11 @@ Angus Boyd
VWR-592
Ann Congrejo
CT-193
Ansariel Hiller
STORM-1101
Ardy Lay
VWR-19499
VWR-24917
Argent Stonecutter
VWR-68
Armin Weatherwax
@ -192,6 +196,7 @@ blino Nakamura
VWR-17
Boroondas Gupte
OPEN-29
OPEN-39
SNOW-278
SNOW-503
SNOW-510
@ -200,10 +205,12 @@ Boroondas Gupte
SNOW-624
SNOW-737
STORM-318
STORM-1182
VWR-233
VWR-20583
VWR-20891
VWR-23455
VWR-24487
WEB-262
Bulli Schumann
CT-218
@ -237,6 +244,8 @@ Coaldust Numbers
VWR-1095
Cron Stardust
VWR-10579
VWR-25120
STORM-1075
Cypren Christenson
STORM-417
Dale Glass
@ -355,6 +364,9 @@ Ian Kas
[NO JIRA] (Ukranian localization)
CT-322
CT-325
Ima Mechanique
OPEN-50
OPEN-61
Irene Muni
CT-324
CT-352
@ -403,8 +415,25 @@ Jonathan Yap
STORM-977
STORM-979
STORM-980
STORM-1040
VWR-17801
VWR-24347
STORM-975
STORM-1019
STORM-844
STORM-643
STORM-1020
STORM-1064
STORM-960
STORM-1101
STORM-1108
STORM-1094
STORM-1077
STORM-953
STORM-1128
STORM-956
STORM-1095
STORM-1236
Kage Pixel
VWR-11
Ken March
@ -421,10 +450,13 @@ Kitty Barnett
STORM-288
STORM-799
STORM-800
STORM-1001
VWR-24217
Kunnis Basiat
VWR-82
VWR-102
Lance Corrimal
VWR-25269
Latif Khalifa
VWR-5370
leliel Mirihi
@ -566,6 +598,10 @@ Nicholaz Beresford
VWR-2412
VWR-2682
VWR-2684
Nicky Perian
OPEN-1
STORM-1087
STORM-1090
Nounouch Hapmouche
VWR-238
Patric Mills
@ -635,6 +671,9 @@ Robin Cornelius
SNOW-747
STORM-422
STORM-960
STORM-1019
STORM-1095
STORM-1128
VWR-2488
VWR-9557
VWR-10579
@ -685,6 +724,7 @@ Shawn Kaufmat
SNOW-240
Siana Gearz
STORM-960
STORM-1088
SignpostMarv Martin
VWR-153
VWR-154
@ -776,6 +816,8 @@ Thickbrick Sleaford
VWR-13483
VWR-13947
VWR-24420
STORM-956
STORM-1147
Thraxis Epsilon
SVC-371
VWR-383
@ -837,11 +879,14 @@ Whoops Babii
Wilton Lundquist
VWR-7682
WolfPup Lowenhar
OPEN-1
OPEN-37
SNOW-622
SNOW-772
STORM-102
STORM-103
STORM-143
STORM-236
STORM-255
STORM-256
STORM-288
@ -851,6 +896,7 @@ WolfPup Lowenhar
STORM-674
STORM-776
STORM-825
STORM-1098
VWR-20741
VWR-20933
Zai Lynch

View File

@ -20,10 +20,6 @@ project(${ROOT_PROJECT_NAME})
set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake")
include(Variables)
include(Utils)
# Load version data and build data
load_version_data()
if (DARWIN)
# 2.6.4 fixes a Mac bug in get_target_property(... "SLPlugin" LOCATION):
@ -39,8 +35,10 @@ endif (NOT CMAKE_BUILD_TYPE)
# For the library installation process;
# see cmake/Prebuild.cmake for the counterpart code.
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/temp)
file(WRITE ${CMAKE_BINARY_DIR}/temp/sentinel_installed "0")
if ("${CMAKE_SOURCE_DIR}/../autobuild.xml" IS_NEWER_THAN "${CMAKE_BINARY_DIR}/temp/sentinel_installed")
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/temp)
file(WRITE ${CMAKE_BINARY_DIR}/temp/sentinel_installed "0")
endif ("${CMAKE_SOURCE_DIR}/../autobuild.xml" IS_NEWER_THAN "${CMAKE_BINARY_DIR}/temp/sentinel_installed")
add_subdirectory(cmake)
add_subdirectory(${LIBS_OPEN_PREFIX}llaudio)
@ -107,7 +105,7 @@ if (VIEWER)
endif (VIEWER)
# Linux builds the viewer and server in 2 separate projects
# In order for ./develop.py build server to work on linux,
# In order for build server to work on linux,
# the viewer project needs a server target.
# This is not true for mac and windows.
if (LINUX)

View File

@ -1 +1 @@
VERSION_VIEWER=2.5.3
VERSION_VIEWER=2.6.9

50
indra/cmake/00-Common.cmake Normal file → Executable file
View File

@ -7,10 +7,10 @@ include(Variables)
# Portable compilation flags.
set(CMAKE_CXX_FLAGS_DEBUG "-D_DEBUG -DLL_DEBUG=1")
set(CMAKE_CXX_FLAGS_RELEASE
"-DLL_RELEASE=1 -DLL_RELEASE_FOR_DOWNLOAD=1 -D_SECURE_SCL=0 -DNDEBUG")
"-DLL_RELEASE=1 -DLL_RELEASE_FOR_DOWNLOAD=1 -DNDEBUG")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO
"-DLL_RELEASE=1 -D_SECURE_SCL=0 -DNDEBUG -DLL_RELEASE_WITH_DEBUG_INFO=1")
"-DLL_RELEASE=1 -DNDEBUG -DLL_RELEASE_WITH_DEBUG_INFO=1")
# Configure crash reporting
set(RELEASE_CRASH_REPORTING OFF CACHE BOOL "Enable use of crash reporting in release builds")
@ -36,18 +36,23 @@ if (WINDOWS)
# Don't build DLLs.
set(BUILD_SHARED_LIBS OFF)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /Zi /MD /MP"
# for "backwards compatibility", cmake sneaks in the Zm1000 option which royally
# screws incredibuild. this hack disables it.
# for details see: http://connect.microsoft.com/VisualStudio/feedback/details/368107/clxx-fatal-error-c1027-inconsistent-values-for-ym-between-creation-and-use-of-precompiled-headers
# http://www.ogre3d.org/forums/viewtopic.php?f=2&t=60015
# http://www.cmake.org/pipermail/cmake/2009-September/032143.html
string(REPLACE "/Zm1000" " " CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /Zi /MDd /MP -D_SCL_SECURE_NO_WARNINGS=1"
CACHE STRING "C++ compiler debug options" FORCE)
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO
"${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /Od /Zi /MD /MP /Ob2 /arch:SSE2"
"${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /Od /Zi /MD /MP /Ob2 -D_SECURE_STL=0"
CACHE STRING "C++ compiler release-with-debug options" FORCE)
set(CMAKE_CXX_FLAGS_RELEASE
"${CMAKE_CXX_FLAGS_RELEASE} ${LL_CXX_FLAGS} /O2 /Zi /MD /MP /Ob2 /Oi /GF /Gy /arch:SSE2"
"${CMAKE_CXX_FLAGS_RELEASE} ${LL_CXX_FLAGS} /O2 /Zi /MD /MP /Ob2 -D_SECURE_STL=0 -D_HAS_ITERATOR_DEBUGGING=0 /arch:SSE2"
CACHE STRING "C++ compiler release options" FORCE)
if (LAA)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE")
endif (LAA)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /LARGEADDRESSAWARE /INCREMENTAL")
set(CMAKE_CXX_STANDARD_LIBRARIES "")
set(CMAKE_C_STANDARD_LIBRARIES "")
@ -63,18 +68,9 @@ if (WINDOWS)
/Zc:forScope
/nologo
/Oy-
)
if(MSVC80 OR MSVC90)
set(CMAKE_CXX_FLAGS_RELEASE
"${CMAKE_CXX_FLAGS_RELEASE} -D_SECURE_STL=0 -D_HAS_ITERATOR_DEBUGGING=0"
CACHE STRING "C++ compiler release options" FORCE)
add_definitions(
/Zc:wchar_t-
)
endif (MSVC80 OR MSVC90)
# Are we using the crummy Visual Studio KDU build workaround?
if (NOT VS_DISABLE_FATAL_WARNINGS)
add_definitions(/WX)
@ -83,20 +79,6 @@ if (WINDOWS)
# configure win32 API for windows XP+ compatibility
set(WINVER "0x0501" CACHE STRING "Win32 API Target version (see http://msdn.microsoft.com/en-us/library/aa383745%28v=VS.85%29.aspx)")
add_definitions("/DWINVER=${WINVER}" "/D_WIN32_WINNT=${WINVER}")
# Various libs are compiler specific, generate some variables here we can just use
# when we require them instead of reimplementing the test each time.
if (MSVC71)
set(MSVC_DIR 7.1)
set(MSVC_SUFFIX 71)
elseif (MSVC80)
set(MSVC_DIR 8.0)
set(MSVC_SUFFIX 80)
elseif (MSVC90)
set(MSVC_DIR 9.0)
set(MSVC_SUFFIX 90)
endif (MSVC71)
endif (WINDOWS)
@ -200,7 +182,7 @@ if (LINUX)
endif (VIEWER)
set(CMAKE_CXX_FLAGS_DEBUG "-fno-inline ${CMAKE_CXX_FLAGS_DEBUG}")
set(CMAKE_CXX_FLAGS_RELEASE "-O3 ${CMAKE_CXX_FLAGS_RELEASE}")
set(CMAKE_CXX_FLAGS_RELEASE "-O2 ${CMAKE_CXX_FLAGS_RELEASE}")
endif (LINUX)

View File

@ -32,27 +32,21 @@ else (STANDALONE)
)
elseif (DARWIN)
if (LLCOMMON_LINK_SHARED)
set(APR_selector "0.3.7.dylib")
set(APRUTIL_selector "0.3.8.dylib")
set(APR_selector "0.dylib")
set(APRUTIL_selector "0.dylib")
else (LLCOMMON_LINK_SHARED)
set(APR_selector "a")
set(APRUTIL_selector "a")
endif (LLCOMMON_LINK_SHARED)
set(APR_LIBRARIES
debug ${ARCH_PREBUILT_DIRS_DEBUG}/libapr-1.${APR_selector}
optimized ${ARCH_PREBUILT_DIRS_RELEASE}/libapr-1.${APR_selector}
)
set(APRUTIL_LIBRARIES
debug ${ARCH_PREBUILT_DIRS_DEBUG}/libaprutil-1.${APRUTIL_selector}
optimized ${ARCH_PREBUILT_DIRS_RELEASE}/libaprutil-1.${APRUTIL_selector}
)
set(APR_LIBRARIES libapr-1.${APR_selector})
set(APRUTIL_LIBRARIES libaprutil-1.${APRUTIL_selector})
set(APRICONV_LIBRARIES iconv)
else (WINDOWS)
set(APR_LIBRARIES apr-1)
set(APRUTIL_LIBRARIES aprutil-1)
set(APRICONV_LIBRARIES iconv)
endif (WINDOWS)
set(APR_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include/apr-1)
set(APR_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include/apr-1)
if (LINUX)
if (VIEWER)

View File

@ -8,7 +8,8 @@ if (STANDALONE)
else (STANDALONE)
if (LINUX)
# Need to add dependency pthread explicitely to support ld.gold.
set(DB_LIBRARIES db-4.2 pthread)
use_prebuilt_binary(db)
set(DB_LIBRARIES db-5.1 pthread)
else (LINUX)
set(DB_LIBRARIES db-4.2)
endif (LINUX)

View File

@ -10,43 +10,49 @@ if (STANDALONE)
set(BOOST_PROGRAM_OPTIONS_LIBRARY boost_program_options-mt)
set(BOOST_REGEX_LIBRARY boost_regex-mt)
set(BOOST_SIGNALS_LIBRARY boost_signals-mt)
set(BOOST_SYSTEM_LIBRARY boost_system-mt)
set(BOOST_FILESYSTEM_LIBRARY boost_filesystem-mt)
else (STANDALONE)
use_prebuilt_binary(boost)
set(Boost_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include)
if (WINDOWS)
set(BOOST_VERSION 1_39)
# SNOW-788
# 00-Common.cmake alreay sets MSVC_SUFFIX to be correct for the VS we are using eg VC71, VC80, VC90 etc
# The precompiled boost libs for VC71 use a different suffix to VS80 and VS90
# This code should ensure the cmake rules are valid for any VS being used in future as long as the approprate
# boost libs are avaiable - RC.
if (MSVC71)
set(BOOST_OPTIM_SUFFIX mt-s)
set(BOOST_DEBUG_SUFFIX mt-sgd)
else (MSVC71)
set(BOOST_OPTIM_SUFFIX mt)
set(BOOST_DEBUG_SUFFIX mt-gd)
endif (MSVC71)
set(BOOST_PROGRAM_OPTIONS_LIBRARY
optimized libboost_program_options-vc${MSVC_SUFFIX}-${BOOST_OPTIM_SUFFIX}-${BOOST_VERSION}
debug libboost_program_options-vc${MSVC_SUFFIX}-${BOOST_DEBUG_SUFFIX}-${BOOST_VERSION})
set(BOOST_REGEX_LIBRARY
optimized libboost_regex-vc${MSVC_SUFFIX}-${BOOST_OPTIM_SUFFIX}-${BOOST_VERSION}
debug libboost_regex-vc${MSVC_SUFFIX}-${BOOST_DEBUG_SUFFIX}-${BOOST_VERSION})
set(BOOST_SIGNALS_LIBRARY
optimized libboost_signals-vc${MSVC_SUFFIX}-${BOOST_OPTIM_SUFFIX}-${BOOST_VERSION}
debug libboost_signals-vc${MSVC_SUFFIX}-${BOOST_DEBUG_SUFFIX}-${BOOST_VERSION})
elseif (DARWIN)
set(BOOST_PROGRAM_OPTIONS_LIBRARY boost_program_options-xgcc40-mt)
set(BOOST_REGEX_LIBRARY boost_regex-xgcc40-mt)
set(BOOST_SIGNALS_LIBRARY boost_signals-xgcc40-mt)
elseif (LINUX)
set(BOOST_PROGRAM_OPTIONS_LIBRARY boost_program_options-gcc41-mt)
set(BOOST_REGEX_LIBRARY boost_regex-gcc41-mt)
set(BOOST_SIGNALS_LIBRARY boost_signals-gcc41-mt)
set(BOOST_VERSION 1_45)
if(MSVC80)
set(BOOST_PROGRAM_OPTIONS_LIBRARY
optimized libboost_program_options-vc80-mt-${BOOST_VERSION}
debug libboost_program_options-vc80-mt-gd-${BOOST_VERSION})
set(BOOST_REGEX_LIBRARY
optimized libboost_regex-vc80-mt-${BOOST_VERSION}
debug libboost_regex-vc80-mt-gd-${BOOST_VERSION})
set(BOOST_SIGNALS_LIBRARY
optimized libboost_signals-vc80-mt-${BOOST_VERSION}
debug libboost_signals-vc80-mt-gd-${BOOST_VERSION})
set(BOOST_SYSTEM_LIBRARY
optimized libboost_system-vc80-mt-${BOOST_VERSION}
debug libboost_system-vc80-mt-gd-${BOOST_VERSION})
set(BOOST_FILESYSTEM_LIBRARY
optimized libboost_filesystem-vc80-mt-${BOOST_VERSION}
debug libboost_filesystem-vc80-mt-gd-${BOOST_VERSION})
else(MSVC80)
# MSVC 10.0 config
set(BOOST_PROGRAM_OPTIONS_LIBRARY
optimized libboost_program_options-vc100-mt-${BOOST_VERSION}
debug libboost_program_options-vc100-mt-gd-${BOOST_VERSION})
set(BOOST_REGEX_LIBRARY
optimized libboost_regex-vc100-mt-${BOOST_VERSION}
debug libboost_regex-vc100-mt-gd-${BOOST_VERSION})
set(BOOST_SYSTEM_LIBRARY
optimized libboost_system-vc100-mt-${BOOST_VERSION}
debug libboost_system-vc100-mt-gd-${BOOST_VERSION})
set(BOOST_FILESYSTEM_LIBRARY
optimized libboost_filesystem-vc100-mt-${BOOST_VERSION}
debug libboost_filesystem-vc100-mt-gd-${BOOST_VERSION})
endif (MSVC80)
elseif (DARWIN OR LINUX)
set(BOOST_PROGRAM_OPTIONS_LIBRARY boost_program_options)
set(BOOST_REGEX_LIBRARY boost_regex)
set(BOOST_SYSTEM_LIBRARY boost_system)
set(BOOST_FILESYSTEM_LIBRARY boost_filesystem)
endif (WINDOWS)
endif (STANDALONE)

18
indra/cmake/BuildVersion.cmake Normal file → Executable file
View File

@ -5,7 +5,7 @@ include(Python)
macro (build_version _target)
execute_process(
COMMAND ${PYTHON_EXECUTABLE} ${SCRIPTS_DIR}/build_version.py
llversion${_target}.cpp ${LLCOMMON_INCLUDE_DIRS}
llversion${_target}.h ${LLCOMMON_INCLUDE_DIRS}
OUTPUT_VARIABLE ${_target}_VERSION
OUTPUT_STRIP_TRAILING_WHITESPACE
)
@ -16,3 +16,19 @@ macro (build_version _target)
message(SEND_ERROR "Could not determine ${_target} version")
endif (${_target}_VERSION)
endmacro (build_version)
macro (build_channel _target)
execute_process(
COMMAND ${PYTHON_EXECUTABLE} ${SCRIPTS_DIR}/build_channel.py
llversion${_target}.h ${LLCOMMON_INCLUDE_DIRS}
OUTPUT_VARIABLE VIEWER_CHANNEL
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if (VIEWER_CHANNEL)
message(STATUS "Channel is ${VIEWER_CHANNEL}")
else (VIEWER_CHANNEL)
message(SEND_ERROR "Could not determine channel")
endif (VIEWER_CHANNEL)
endmacro (build_channel)

View File

@ -13,10 +13,7 @@ else (STANDALONE)
if (WINDOWS)
set(CARES_LIBRARIES areslib)
elseif (DARWIN)
set(CARES_LIBRARIES
optimized ${ARCH_PREBUILT_DIRS_RELEASE}/libcares.a
debug ${ARCH_PREBUILT_DIRS_DEBUG}/libcares.a
)
set(CARES_LIBRARIES cares)
else (WINDOWS)
set(CARES_LIBRARIES cares)
endif (WINDOWS)

View File

@ -20,7 +20,6 @@ set(cmake_SOURCE_FILES
CSharpMacros.cmake
DBusGlib.cmake
DirectX.cmake
ELFIO.cmake
EXPAT.cmake
FindAPR.cmake
FindBerkeleyDB.cmake
@ -29,7 +28,6 @@ set(cmake_SOURCE_FILES
FindFMOD.cmake
FindGooglePerfTools.cmake
FindMono.cmake
FindMT.cmake
FindMySQL.cmake
FindOpenJPEG.cmake
FindXmlRpcEpi.cmake
@ -77,7 +75,6 @@ set(cmake_SOURCE_FILES
UI.cmake
UnixInstall.cmake
Variables.cmake
Versions.cmake
XmlRpcEpi.cmake
ZLIB.cmake
)
@ -86,7 +83,6 @@ source_group("Shared Rules" FILES ${cmake_SOURCE_FILES})
set(master_SOURCE_FILES
../CMakeLists.txt
../develop.py
)
if (SERVER)

120
indra/cmake/Copy3rdPartyLibs.cmake Normal file → Executable file
View File

@ -5,6 +5,7 @@
# VisualStudio.
include(CMakeCopyIfDifferent)
include(Linking)
###################################################################
# set up platform specific lists of files that need to be copied
@ -16,36 +17,40 @@ if(WINDOWS)
#*******************************
# VIVOX - *NOTE: no debug version
set(vivox_src_dir "${CMAKE_SOURCE_DIR}/newview/vivox-runtime/i686-win32")
set(vivox_src_dir "${ARCH_PREBUILT_DIRS_RELEASE}")
set(vivox_files
SLVoice.exe
libsndfile-1.dll
vivoxplatform.dll
vivoxsdk.dll
ortp.dll
alut.dll
wrap_oal.dll
alut.dll
wrap_oal.dll
zlib1.dll
vivoxoal.dll
)
#*******************************
# Misc shared libs
# *TODO - update this to use LIBS_PREBUILT_DIR and LL_ARCH_DIR variables
# or ARCH_PREBUILT_DIRS
set(debug_src_dir "${CMAKE_SOURCE_DIR}/../libraries/i686-win32/lib/debug")
set(debug_src_dir "${ARCH_PREBUILT_DIRS_DEBUG}")
set(debug_files
openjpegd.dll
libapr-1.dll
libaprutil-1.dll
libapriconv-1.dll
ssleay32.dll
libeay32.dll
)
# *TODO - update this to use LIBS_PREBUILT_DIR and LL_ARCH_DIR variables
# or ARCH_PREBUILT_DIRS
set(release_src_dir "${CMAKE_SOURCE_DIR}/../libraries/i686-win32/lib/release")
set(release_src_dir "${ARCH_PREBUILT_DIRS_RELEASE}")
set(release_files
openjpeg.dll
libapr-1.dll
libaprutil-1.dll
libapriconv-1.dll
ssleay32.dll
libeay32.dll
)
if(USE_GOOGLE_PERFTOOLS)
@ -119,6 +124,64 @@ if (MSVC80)
set(third_party_targets ${third_party_targets} ${out_targets})
endif (EXISTS ${release_msvc8_redist_path})
elseif (MSVC_VERSION EQUAL 1600) # VisualStudio 2010
FIND_PATH(debug_msvc10_redist_path msvcr100d.dll
PATHS
${MSVC_DEBUG_REDIST_PATH}
[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\10.0\\Setup\\VC;ProductDir]/redist/Debug_NonRedist/x86/Microsoft.VC100.DebugCRT
[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Windows;Directory]/SysWOW64
[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Windows;Directory]/System32
NO_DEFAULT_PATH
)
if(EXISTS ${debug_msvc10_redist_path})
set(debug_msvc10_files
msvcr100d.dll
msvcp100d.dll
)
copy_if_different(
${debug_msvc10_redist_path}
"${SHARED_LIB_STAGING_DIR_DEBUG}"
out_targets
${debug_msvc10_files}
)
set(third_party_targets ${third_party_targets} ${out_targets})
endif ()
FIND_PATH(release_msvc10_redist_path msvcr100.dll
PATHS
${MSVC_REDIST_PATH}
[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\10.0\\Setup\\VC;ProductDir]/redist/x86/Microsoft.VC100.CRT
[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Windows;Directory]/SysWOW64
[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Windows;Directory]/System32
NO_DEFAULT_PATH
)
if(EXISTS ${release_msvc10_redist_path})
set(release_msvc10_files
msvcr100.dll
msvcp100.dll
)
copy_if_different(
${release_msvc10_redist_path}
"${SHARED_LIB_STAGING_DIR_RELEASE}"
out_targets
${release_msvc10_files}
)
set(third_party_targets ${third_party_targets} ${out_targets})
copy_if_different(
${release_msvc10_redist_path}
"${SHARED_LIB_STAGING_DIR_RELWITHDEBINFO}"
out_targets
${release_msvc10_files}
)
set(third_party_targets ${third_party_targets} ${out_targets})
endif ()
endif (MSVC80)
elseif(DARWIN)
@ -126,28 +189,26 @@ elseif(DARWIN)
set(SHARED_LIB_STAGING_DIR_RELWITHDEBINFO "${SHARED_LIB_STAGING_DIR}/RelWithDebInfo/Resources")
set(SHARED_LIB_STAGING_DIR_RELEASE "${SHARED_LIB_STAGING_DIR}/Release/Resources")
set(vivox_src_dir "${CMAKE_SOURCE_DIR}/newview/vivox-runtime/universal-darwin")
set(vivox_src_dir "${ARCH_PREBUILT_DIRS_RELEASE}")
set(vivox_files
SLVoice
libalut.dylib
libopenal.dylib
libsndfile.dylib
libvivoxoal.dylib
libortp.dylib
libalut.dylib
libvivoxplatform.dylib
libvivoxsdk.dylib
)
# *TODO - update this to use LIBS_PREBUILT_DIR and LL_ARCH_DIR variables
# or ARCH_PREBUILT_DIRS
set(debug_src_dir "${CMAKE_SOURCE_DIR}/../libraries/universal-darwin/lib_debug")
set(debug_src_dir "${ARCH_PREBUILT_DIRS_DEBUG}")
set(debug_files
)
# *TODO - update this to use LIBS_PREBUILT_DIR and LL_ARCH_DIR variables
# or ARCH_PREBUILT_DIRS
set(release_src_dir "${CMAKE_SOURCE_DIR}/../libraries/universal-darwin/lib_release")
set(release_src_dir "${ARCH_PREBUILT_DIRS_RELEASE}")
set(release_files
libapr-1.0.3.7.dylib
libapr-1.0.dylib
libapr-1.dylib
libaprutil-1.0.3.8.dylib
libaprutil-1.0.dylib
libaprutil-1.dylib
libexpat.0.5.0.dylib
libexpat.1.5.2.dylib
libexpat.dylib
libllqtwebkit.dylib
libndofdev.dylib
@ -164,7 +225,7 @@ elseif(LINUX)
set(SHARED_LIB_STAGING_DIR_RELWITHDEBINFO "${SHARED_LIB_STAGING_DIR}")
set(SHARED_LIB_STAGING_DIR_RELEASE "${SHARED_LIB_STAGING_DIR}")
set(vivox_src_dir "${CMAKE_SOURCE_DIR}/newview/vivox-runtime/i686-linux")
set(vivox_src_dir "${ARCH_PREBUILT_DIRS_RELEASE}")
set(vivox_files
libsndfile.so.1
libortp.so
@ -175,20 +236,20 @@ elseif(LINUX)
)
# *TODO - update this to use LIBS_PREBUILT_DIR and LL_ARCH_DIR variables
# or ARCH_PREBUILT_DIRS
set(debug_src_dir "${CMAKE_SOURCE_DIR}/../libraries/i686-linux/lib_debug")
set(debug_src_dir "${ARCH_PREBUILT_DIRS_DEBUG}")
set(debug_files
)
# *TODO - update this to use LIBS_PREBUILT_DIR and LL_ARCH_DIR variables
# or ARCH_PREBUILT_DIRS
set(release_src_dir "${CMAKE_SOURCE_DIR}/../libraries/i686-linux/lib_release_client")
set(release_src_dir "${ARCH_PREBUILT_DIRS_RELEASE}")
# *FIX - figure out what to do with duplicate libalut.so here -brad
set(release_files
libapr-1.so.0
libaprutil-1.so.0
libatk-1.0.so
libbreakpad_client.so.0
libcrypto.so.0.9.7
libdb-4.2.so
libcrypto.so.1.0.0
libdb-5.1.so
libexpat.so
libexpat.so.1
libgmock_main.so
@ -200,10 +261,11 @@ elseif(LINUX)
libopenal.so
libopenjpeg.so
libssl.so
libstacktrace.so
libtcmalloc.so
libuuid.so.1
libssl.so.0.9.7
libuuid.so.16
libuuid.so.16.0.22
libssl.so.1.0.0
libfontconfig.so.1.4.4
)
if (FMOD)

View File

@ -10,7 +10,7 @@ elseif (LINUX)
use_prebuilt_binary(dbusglib)
set(DBUSGLIB_FOUND ON FORCE BOOL)
set(DBUSGLIB_INCLUDE_DIRS
${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include/glib-2.0
${LIBS_PREBUILT_DIR}/include/dbus
)
# We don't need to explicitly link against dbus-glib itself, because
# the viewer probes for the system's copy at runtime.

View File

@ -3,14 +3,14 @@
if (VIEWER AND WINDOWS)
find_path(DIRECTX_INCLUDE_DIR dxdiag.h
"$ENV{DXSDK_DIR}/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (June 2010)/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (February 2010)/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (August 2009)/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (March 2009)/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (August 2008)/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (June 2008)/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (March 2008)/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (November 2007)/Include"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (August 2007)/Include"
"C:/DX90SDK/Include"
"$ENV{PROGRAMFILES}/DX90SDK/Include"
)
if (DIRECTX_INCLUDE_DIR)
@ -25,14 +25,14 @@ if (VIEWER AND WINDOWS)
find_path(DIRECTX_LIBRARY_DIR dxguid.lib
"$ENV{DXSDK_DIR}/Lib/x86"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (June 2010)/Lib/x86"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (February 2010)/Lib/x86"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (August 2009)/Lib/x86"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (March 2009)/Lib/x86"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (August 2008)/Lib/x86"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (June 2008)/Lib/x86"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (March 2008)/Lib/x86"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (November 2007)/Lib/x86"
"$ENV{PROGRAMFILES}/Microsoft DirectX SDK (August 2007)/Lib/x86"
"C:/DX90SDK/Lib"
"$ENV{PROGRAMFILES}/DX90SDK/Lib"
)
if (DIRECTX_LIBRARY_DIR)

View File

@ -1,19 +0,0 @@
# -*- cmake -*-
include(Prebuilt)
set(ELFIO_FIND_QUIETLY ON)
if (STANDALONE)
include(FindELFIO)
elseif (LINUX)
use_prebuilt_binary(elfio)
set(ELFIO_LIBRARIES ELFIO)
set(ELFIO_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include)
set(ELFIO_FOUND "YES")
endif (STANDALONE)
if (ELFIO_FOUND)
add_definitions(-DLL_ELFBIN=1)
else (ELFIO_FOUND)
set(ELFIO_INCLUDE_DIR "")
endif (ELFIO_FOUND)

103
indra/cmake/FMOD.cmake Normal file → Executable file
View File

@ -1,64 +1,39 @@
# -*- cmake -*-
include(Linking)
if(INSTALL_PROPRIETARY)
include(Prebuilt)
use_prebuilt_binary(fmod)
endif(INSTALL_PROPRIETARY)
find_library(FMOD_LIBRARY_RELEASE
NAMES fmod fmodvc fmod-3.75
PATHS
${ARCH_PREBUILT_DIRS_RELEASE}
)
find_library(FMOD_LIBRARY_DEBUG
NAMES fmod fmodvc fmod-3.75
PATHS
${ARCH_PREBUILT_DIRS_DEBUG}
)
if (FMOD_LIBRARY_RELEASE AND FMOD_LIBRARY_DEBUG)
set(FMOD_LIBRARY
debug ${FMOD_LIBRARY_DEBUG}
optimized ${FMOD_LIBRARY_RELEASE})
elseif (FMOD_LIBRARY_RELEASE)
set(FMOD_LIBRARY ${FMOD_LIBRARY_RELEASE})
endif (FMOD_LIBRARY_RELEASE AND FMOD_LIBRARY_DEBUG)
if (NOT FMOD_LIBRARY)
set(FMOD_SDK_DIR CACHE PATH "Path to the FMOD SDK.")
if (FMOD_SDK_DIR)
find_library(FMOD_LIBRARY
NAMES fmodvc fmod-3.75 fmod
PATHS
${FMOD_SDK_DIR}/api/lib
${FMOD_SDK_DIR}/api
${FMOD_SDK_DIR}/lib
${FMOD_SDK_DIR}
)
endif (FMOD_SDK_DIR)
endif (NOT FMOD_LIBRARY)
find_path(FMOD_INCLUDE_DIR fmod.h
${LIBS_PREBUILT_DIR}/include
${FMOD_SDK_DIR}/api/inc
${FMOD_SDK_DIR}/inc
${FMOD_SDK_DIR}
)
if (FMOD_LIBRARY AND FMOD_INCLUDE_DIR)
set(FMOD ON CACHE BOOL "Use closed source FMOD sound library.")
else (FMOD_LIBRARY AND FMOD_INCLUDE_DIR)
set(FMOD_LIBRARY "")
set(FMOD_INCLUDE_DIR "")
if (FMOD)
message(STATUS "No support for FMOD audio (need to set FMOD_SDK_DIR?)")
endif (FMOD)
set(FMOD OFF CACHE BOOL "Use closed source FMOD sound library.")
endif (FMOD_LIBRARY AND FMOD_INCLUDE_DIR)
if (FMOD)
message(STATUS "Building with FMOD audio support")
endif (FMOD)
# -*- cmake -*-
# FMOD can be set when launching the make using the argument -DFMOD:BOOL=ON
# When building using proprietary binaries though (i.e. having access to LL private servers),
# we always build with FMOD.
# Open source devs should use the -DFMOD:BOOL=ON then if they want to build with FMOD, whether
# they are using STANDALONE or not.
if (INSTALL_PROPRIETARY)
set(FMOD ON CACHE BOOL "Use FMOD sound library.")
endif (INSTALL_PROPRIETARY)
if (FMOD)
if (STANDALONE)
# In that case, we use the version of the library installed on the system
set(FMOD_FIND_REQUIRED ON)
include(FindFMOD)
else (STANDALONE)
if (FMOD_LIBRARY AND FMOD_INCLUDE_DIR)
# If the path have been specified in the arguments, use that
set(FMOD_LIBRARIES ${FMOD_LIBRARY})
MESSAGE(STATUS "Using FMOD path: ${FMOD_LIBRARIES}, ${FMOD_INCLUDE_DIR}")
else (FMOD_LIBRARY AND FMOD_INCLUDE_DIR)
# If not, we're going to try to get the package listed in autobuild.xml
# Note: if you're not using INSTALL_PROPRIETARY, the package URL should be local (file:/// URL)
# as accessing the private LL location will fail if you don't have the credential
include(Prebuilt)
use_prebuilt_binary(fmod)
if (WINDOWS)
set(FMOD_LIBRARY fmod)
elseif (DARWIN)
set(FMOD_LIBRARY fmod)
elseif (LINUX)
set(FMOD_LIBRARY fmod-3.75)
endif (WINDOWS)
set(FMOD_LIBRARIES ${FMOD_LIBRARY})
set(FMOD_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include)
endif (FMOD_LIBRARY AND FMOD_INCLUDE_DIR)
endif (STANDALONE)
endif (FMOD)

View File

@ -0,0 +1,41 @@
# -*- cmake -*-
#
# Find the autobuild tool
#
# Output variables:
#
# AUTOBUILD_EXECUTABLE - path to autobuild or pautobuild executable
# *TODO - if cmake was executed by autobuild, autobuild will have set the AUTOBUILD env var
# update this to check for that case
IF (NOT AUTOBUILD_EXECUTABLE)
IF(WIN32)
SET(AUTOBUILD_EXE_NAMES autobuild.cmd autobuild.exe)
ELSE(WIN32)
SET(AUTOBUILD_EXE_NAMES autobuild)
ENDIF(WIN32)
SET(AUTOBUILD_EXECUTABLE)
FIND_PROGRAM(
AUTOBUILD_EXECUTABLE
NAMES ${AUTOBUILD_EXE_NAMES}
PATHS
ENV PATH
${CMAKE_SOURCE_DIR}/..
${CMAKE_SOURCE_DIR}/../..
${CMAKE_SOURCE_DIR}/../../..
PATH_SUFFIXES "/autobuild/bin/"
)
IF (AUTOBUILD_EXECUTABLE)
GET_FILENAME_COMPONENT(_autobuild_name ${AUTOBUILD_EXECUTABLE} NAME_WE)
MESSAGE(STATUS "Using autobuild at: ${AUTOBUILD_EXECUTABLE}")
ELSE (AUTOBUILD_EXECUTABLE)
IF (AUTOBUILD_FIND_REQUIRED)
MESSAGE(FATAL_ERROR "Could not find autobuild executable")
ENDIF (AUTOBUILD_FIND_REQUIRED)
ENDIF (AUTOBUILD_EXECUTABLE)
MARK_AS_ADVANCED(AUTOBUILD_EXECUTABLE)
ENDIF (NOT AUTOBUILD_EXECUTABLE)

View File

@ -11,7 +11,7 @@
FIND_PATH(FMOD_INCLUDE_DIR fmod.h PATH_SUFFIXES fmod)
SET(FMOD_NAMES ${FMOD_NAMES} fmod fmodvc fmod-3.75)
SET(FMOD_NAMES ${FMOD_NAMES} fmod fmodvc fmodex fmod-3.75)
FIND_LIBRARY(FMOD_LIBRARY
NAMES ${FMOD_NAMES}
PATH_SUFFIXES fmod

View File

@ -22,9 +22,9 @@ if (PKG_CONFIG_FOUND)
else (LLQtWebkit_FIND_REQUIRED AND LLQtWebkit_FIND_VERSION)
set(_PACKAGE_ARGS libllqtwebkit)
endif (LLQtWebkit_FIND_REQUIRED AND LLQtWebkit_FIND_VERSION)
if (NOT "${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" VERSION_LESS "2.8")
if (NOT "${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}.${CMAKE_PATCH_VERSION}" VERSION_LESS "2.8.2")
# As virtually nobody will have a pkg-config file for this, do this check always quiet.
# Unfortunately cmake 2.8 or higher is required for pkg_check_modules to have a 'QUIET'.
# Unfortunately cmake 2.8.2 or higher is required for pkg_check_modules to have a 'QUIET'.
set(_PACKAGE_ARGS ${_PACKAGE_ARGS} QUIET)
endif ()
pkg_check_modules(LLQTWEBKIT ${_PACKAGE_ARGS})

View File

@ -9,7 +9,7 @@ else (STANDALONE)
use_prebuilt_binary(freetype)
if (LINUX)
set(FREETYPE_INCLUDE_DIRS
${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include)
${LIBS_PREBUILT_DIR}/include)
else (LINUX)
set(FREETYPE_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include)
endif (LINUX)

View File

@ -13,9 +13,9 @@ elseif (LINUX)
set(GSTREAMER010_FOUND ON FORCE BOOL)
set(GSTREAMER010_PLUGINS_BASE_FOUND ON FORCE BOOL)
set(GSTREAMER010_INCLUDE_DIRS
${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include/gstreamer-0.10
${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include/glib-2.0
${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include/libxml2
${LIBS_PREBUILT_DIR}/include/gstreamer-0.10
${LIBS_PREBUILT_DIR}/include/glib-2.0
${LIBS_PREBUILT_DIR}/include/libxml2
)
# We don't need to explicitly link against gstreamer itself, because
# LLMediaImplGStreamer probes for the system's copy at runtime.

View File

@ -4,7 +4,6 @@ include(Prebuilt)
if (STANDALONE)
include(FindGooglePerfTools)
else (STANDALONE)
use_prebuilt_binary(google)
if (WINDOWS)
use_prebuilt_binary(google-perftools)
set(TCMALLOC_LIBRARIES
@ -13,11 +12,11 @@ else (STANDALONE)
set(GOOGLE_PERFTOOLS_FOUND "YES")
endif (WINDOWS)
if (LINUX)
use_prebuilt_binary(google-perftools)
set(TCMALLOC_LIBRARIES tcmalloc)
set(STACKTRACE_LIBRARIES stacktrace)
set(PROFILER_LIBRARIES profiler)
set(GOOGLE_PERFTOOLS_INCLUDE_DIR
${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include)
${LIBS_PREBUILT_DIR}/include)
set(GOOGLE_PERFTOOLS_FOUND "YES")
endif (LINUX)
endif (STANDALONE)

View File

@ -12,10 +12,7 @@ else (STANDALONE)
if (LINUX)
set(JPEG_LIBRARIES jpeg)
elseif (DARWIN)
set(JPEG_LIBRARIES
optimized ${ARCH_PREBUILT_DIRS_RELEASE}/liblljpeg.a
debug ${ARCH_PREBUILT_DIRS_DEBUG}/liblljpeg.a
)
set(JPEG_LIBRARIES jpeg)
elseif (WINDOWS)
set(JPEG_LIBRARIES jpeglib)
endif (LINUX)

View File

@ -11,12 +11,12 @@ else (STANDALONE)
use_prebuilt_binary(jsoncpp)
if (WINDOWS)
set(JSONCPP_LIBRARIES
debug json_vc80d
optimized json_vc80)
debug json_vc100debug_libmt.lib
optimized json_vc100_libmt)
elseif (DARWIN)
set(JSONCPP_LIBRARIES json_mac-universal-gcc_libmt)
set(JSONCPP_LIBRARIES libjson_linux-gcc-4.0.1_libmt.a)
elseif (LINUX)
set(JSONCPP_LIBRARIES jsoncpp)
set(JSONCPP_LIBRARIES libjson_linux-gcc-4.1.3_libmt.a)
endif (WINDOWS)
set(JSONCPP_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include/jsoncpp)
set(JSONCPP_INCLUDE_DIRS "${LIBS_PREBUILT_DIR}/include/jsoncpp" "${LIBS_PREBUILT_DIR}/include/json")
endif (STANDALONE)

View File

@ -1,6 +1,7 @@
# -*- cmake -*-
include(LLTestCommand)
include(GoogleMock)
include(Tut)
MACRO(LL_ADD_PROJECT_UNIT_TESTS project sources)
# Given a project name and a list of sourcefiles (with optional properties on each),
@ -94,7 +95,7 @@ INCLUDE(GoogleMock)
IF(${name}_test_additional_INCLUDE_DIRS MATCHES NOTFOUND)
SET(${name}_test_additional_INCLUDE_DIRS "")
ENDIF(${name}_test_additional_INCLUDE_DIRS MATCHES NOTFOUND)
INCLUDE_DIRECTORIES(${alltest_INCLUDE_DIRS} ${name}_test_additional_INCLUDE_DIRS )
INCLUDE_DIRECTORIES(${alltest_INCLUDE_DIRS} ${${name}_test_additional_INCLUDE_DIRS} )
IF(LL_TEST_VERBOSE)
MESSAGE("LL_ADD_PROJECT_UNIT_TESTS ${name}_test_additional_INCLUDE_DIRS ${${name}_test_additional_INCLUDE_DIRS}")
ENDIF(LL_TEST_VERBOSE)

View File

@ -1,21 +1,21 @@
# -*- cmake -*-
include(Prebuilt)
# USE_KDU can be set when launching cmake or develop.py as an option using the argument -DUSE_KDU:BOOL=ON
# When building using proprietary binaries though (i.e. having access to LL private servers), we always build with KDU
if (INSTALL_PROPRIETARY AND NOT STANDALONE)
set(USE_KDU ON)
endif (INSTALL_PROPRIETARY AND NOT STANDALONE)
# USE_KDU can be set when launching cmake as an option using the argument -DUSE_KDU:BOOL=ON
# When building using proprietary binaries though (i.e. having access to LL private servers),
# we always build with KDU
if (INSTALL_PROPRIETARY)
set(USE_KDU ON CACHE BOOL "Use Kakadu library.")
endif (INSTALL_PROPRIETARY)
if (USE_KDU)
# AO: Don't download KDU, assume it is provisioned out of band locally.
#use_prebuilt_binary(kdu)
include(Prebuilt)
use_prebuilt_binary(kdu)
if (WINDOWS)
set(KDU_LIBRARY kdu.lib)
else (WINDOWS)
set(KDU_LIBRARY libkdu.a)
endif (WINDOWS)
set(KDU_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include/kdu)
set(KDU_INCLUDE_DIR ${AUTOBUILD_INSTALL_DIR}/include/kdu)
set(LLKDU_INCLUDE_DIRS ${LIBS_OPEN_DIR}/llkdu)
set(LLKDU_LIBRARIES llkdu)
endif (USE_KDU)

View File

@ -18,7 +18,7 @@ else (STANDALONE)
use_prebuilt_binary(SDL)
set (SDL_FOUND TRUE)
set (SDL_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/i686-linux)
set (SDL_LIBRARY SDL)
set (SDL_LIBRARY SDL directfb fusion direct)
endif (LINUX AND VIEWER)
endif (STANDALONE)

View File

@ -1,32 +1,43 @@
# -*- cmake -*-
include(Variables)
if (NOT STANDALONE)
set(ARCH_PREBUILT_DIRS ${AUTOBUILD_INSTALL_DIR}/lib)
set(ARCH_PREBUILT_DIRS_RELEASE ${AUTOBUILD_INSTALL_DIR}/lib/release)
set(ARCH_PREBUILT_DIRS_DEBUG ${AUTOBUILD_INSTALL_DIR}/lib/debug)
if (WINDOWS)
set(ARCH_PREBUILT_DIRS ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/lib)
set(ARCH_PREBUILT_DIRS_RELEASE ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/lib/release)
set(ARCH_PREBUILT_DIRS_DEBUG ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/lib/debug)
set(SHARED_LIB_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs CACHE FILEPATH "Location of staged DLLs")
set(EXE_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs CACHE FILEPATH "Location of staged executables")
set(SHARED_LIB_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs)
set(EXE_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs)
elseif (LINUX)
if (VIEWER)
set(ARCH_PREBUILT_DIRS ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/lib_release_client)
else (VIEWER)
set(ARCH_PREBUILT_DIRS ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/lib_release)
endif (VIEWER)
set(ARCH_PREBUILT_DIRS_RELEASE ${ARCH_PREBUILT_DIRS})
set(ARCH_PREBUILT_DIRS_DEBUG ${ARCH_PREBUILT_DIRS})
set(SHARED_LIB_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs/lib CACHE FILEPATH "Location of staged .sos")
set(EXE_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs/bin CACHE FILEPATH "Location of staged executables")
set(SHARED_LIB_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs/lib)
set(EXE_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs/bin)
elseif (DARWIN)
set(ARCH_PREBUILT_DIRS_RELEASE ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/lib_release)
set(ARCH_PREBUILT_DIRS ${ARCH_PREBUILT_DIRS_RELEASE})
set(ARCH_PREBUILT_DIRS_DEBUG ${ARCH_PREBUILT_DIRS_RELEASE})
set(SHARED_LIB_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs CACHE FILEPATH "Location of staged DLLs")
set(EXE_STAGING_DIR "${CMAKE_BINARY_DIR}/sharedlibs/\$(CONFIGURATION)" CACHE FILEPATH "Location of staged executables")
set(SHARED_LIB_STAGING_DIR ${CMAKE_BINARY_DIR}/sharedlibs)
set(EXE_STAGING_DIR "${CMAKE_BINARY_DIR}/sharedlibs/\$(CONFIGURATION)")
endif (WINDOWS)
endif (NOT STANDALONE)
link_directories(${ARCH_PREBUILT_DIRS})
# Autobuild packages must provide 'release' versions of libraries, but may provide versions for
# specific build types. AUTOBUILD_LIBS_INSTALL_DIRS lists first the build type directory and then
# the 'release' directory (as a default fallback).
# *NOTE - we have to take special care to use CMAKE_CFG_INTDIR on IDE generators (like mac and
# windows) and CMAKE_BUILD_TYPE on Makefile based generators (like linux). The reason for this is
# that CMAKE_BUILD_TYPE is essentially meaningless at configuration time for IDE generators and
# CMAKE_CFG_INTDIR is meaningless at build time for Makefile generators
if(WINDOWS OR DARWIN)
# the cmake xcode and VS generators implicitly append ${CMAKE_CFG_INTDIR} to the library paths for us
# fortunately both windows and darwin are case insensitive filesystems so this works.
set(AUTOBUILD_LIBS_INSTALL_DIRS "${AUTOBUILD_INSTALL_DIR}/lib/")
else(WINDOWS OR DARWIN)
# else block is for linux and any other makefile based generators
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
set(AUTOBUILD_LIBS_INSTALL_DIRS ${AUTOBUILD_INSTALL_DIR}/lib/${CMAKE_BUILD_TYPE_LOWER})
endif(WINDOWS OR DARWIN)
list(APPEND AUTOBUILD_LIBS_INSTALL_DIRS ${ARCH_PREBUILT_DIRS_RELEASE})
link_directories(${AUTOBUILD_LIBS_INSTALL_DIRS})
if (LINUX)
set(DL_LIBRARY dl)

View File

@ -37,9 +37,9 @@ IF (DARWIN)
ELSE (DARWIN)
SET(MONO_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include)
SET(MONO_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include)
SET(GLIB_2_0_PLATFORM_INCLUDE_DIR
${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include/glib-2.0)
${LIBS_PREBUILT_DIR}/include/glib-2.0)
SET(GLIB_2_0_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include/glib-2.0)
INCLUDE_DIRECTORIES(

View File

@ -7,7 +7,7 @@ use_prebuilt_binary(mysql)
if (LINUX)
if (WORD_SIZE EQUAL 32 OR DEBIAN_VERSION STREQUAL "3.1")
set(MYSQL_LIBRARIES mysqlclient)
set(MYSQL_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include)
set(MYSQL_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include)
else (WORD_SIZE EQUAL 32 OR DEBIAN_VERSION STREQUAL "3.1")
# Use the native MySQL library on a 64-bit system.
set(MYSQL_FIND_QUIETLY ON)
@ -16,9 +16,9 @@ if (LINUX)
endif (WORD_SIZE EQUAL 32 OR DEBIAN_VERSION STREQUAL "3.1")
elseif (WINDOWS)
set(MYSQL_LIBRARIES mysqlclient)
set(MYSQL_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include)
set(MYSQL_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include)
elseif (DARWIN)
set(MYSQL_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include)
set(MYSQL_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include)
set(MYSQL_LIBRARIES
optimized ${ARCH_PREBUILT_DIRS_RELEASE}/libmysqlclient.a
debug ${ARCH_PREBUILT_DIRS_DEBUG}/libmysqlclient.a

View File

@ -9,18 +9,26 @@ else (LINUX)
endif (LINUX)
if (OPENAL)
set(OPENAL_LIB_INCLUDE_DIRS "${LIBS_PREBUILT_DIR}/include/AL")
if (STANDALONE)
include(FindPkgConfig)
include(FindOpenAL)
pkg_check_modules(OPENAL_LIB REQUIRED openal)
pkg_check_modules(FREEALUT_LIB REQUIRED freealut)
else (STANDALONE)
use_prebuilt_binary(openal-soft)
use_prebuilt_binary(openal_soft)
endif (STANDALONE)
set(OPENAL_LIBRARIES
openal
alut
if(WINDOWS)
set(OPENAL_LIBRARIES
OpenAL32
alut
)
else()
set(OPENAL_LIBRARIES
openal
alut
)
endif()
endif (OPENAL)
if (OPENAL)

View File

@ -5,5 +5,5 @@ if (NOT STANDALONE)
use_prebuilt_binary(GL)
# possible glh_linear should have its own .cmake file instead
use_prebuilt_binary(glh_linear)
set(GLEXT_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include)
set(GLEXT_INCLUDE_DIR ${LIBS_PREBUILT_DIR}/include)
endif (NOT STANDALONE)

View File

@ -13,11 +13,11 @@ else (STANDALONE)
else (WINDOWS)
set(OPENSSL_LIBRARIES ssl)
endif (WINDOWS)
set(OPENSSL_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include)
set(OPENSSL_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include)
endif (STANDALONE)
if (LINUX)
set(CRYPTO_LIBRARIES crypto)
elseif (DARWIN)
set(CRYPTO_LIBRARIES llcrypto)
set(CRYPTO_LIBRARIES crypto)
endif (LINUX)

View File

@ -8,6 +8,14 @@ if (STANDALONE)
include(FindPNG)
else (STANDALONE)
use_prebuilt_binary(libpng)
set(PNG_LIBRARIES png12)
set(PNG_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include/libpng12)
if (WINDOWS)
set(PNG_LIBRARIES libpng15)
set(PNG_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include/libpng15)
elseif(DARWIN)
set(PNG_LIBRARIES png15)
set(PNG_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include/libpng15)
else()
set(PNG_LIBRARIES png15)
set(PNG_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include/libpng15)
endif()
endif (STANDALONE)

View File

@ -1,44 +1,53 @@
# -*- cmake -*-
include(Python)
include(FindSCP)
include(FindAutobuild)
if(INSTALL_PROPRIETARY)
include(FindSCP)
endif(INSTALL_PROPRIETARY)
# The use_prebuilt_binary macro handles automated installation of package
# dependencies using autobuild. The goal is that 'autobuild install' should
# only be run when we know we need to install a new package. This should be
# the case in a clean checkout, or if autobuild.xml has been updated since the
# last run (encapsulated by the file ${CMAKE_BINARY_DIR}/temp/sentinel_installed),
# or if a previous attempt to install the package has failed (the exit status
# of previous attempts is serialized in the file
# ${CMAKE_BINARY_DIR}/temp/${_binary}_installed)
macro (use_prebuilt_binary _binary)
if (NOT STANDALONE)
if(${CMAKE_BINARY_DIR}/temp/sentinel_installed IS_NEWER_THAN ${CMAKE_BINARY_DIR}/temp/${_binary}_installed)
if(INSTALL_PROPRIETARY)
include(FindSCP)
if(DEBUG_PREBUILT)
message("cd ${SCRIPTS_DIR} && ${PYTHON_EXECUTABLE} install.py --install-dir=${CMAKE_SOURCE_DIR}/.. --scp=${SCP_EXECUTABLE} ${_binary}")
endif(DEBUG_PREBUILT)
execute_process(COMMAND ${PYTHON_EXECUTABLE}
install.py
--install-dir=${CMAKE_SOURCE_DIR}/..
--scp=${SCP_EXECUTABLE}
${_binary}
WORKING_DIRECTORY ${SCRIPTS_DIR}
RESULT_VARIABLE ${_binary}_installed
)
else(INSTALL_PROPRIETARY)
if(DEBUG_PREBUILT)
message("cd ${SCRIPTS_DIR} && ${PYTHON_EXECUTABLE} install.py --install-dir=${CMAKE_SOURCE_DIR}/.. ${_binary}")
endif(DEBUG_PREBUILT)
execute_process(COMMAND ${PYTHON_EXECUTABLE}
install.py
--install-dir=${CMAKE_SOURCE_DIR}/..
${_binary}
WORKING_DIRECTORY ${SCRIPTS_DIR}
RESULT_VARIABLE ${_binary}_installed
)
endif(INSTALL_PROPRIETARY)
if (NOT DEFINED STANDALONE_${_binary})
set(STANDALONE_${_binary} ${STANDALONE})
endif (NOT DEFINED STANDALONE_${_binary})
if (NOT STANDALONE_${_binary})
if("${${_binary}_installed}" STREQUAL "" AND EXISTS "${CMAKE_BINARY_DIR}/temp/${_binary}_installed")
file(READ ${CMAKE_BINARY_DIR}/temp/${_binary}_installed "${_binary}_installed")
if(DEBUG_PREBUILT)
message(STATUS "${_binary}_installed: \"${${_binary}_installed}\"")
endif(DEBUG_PREBUILT)
endif("${${_binary}_installed}" STREQUAL "" AND EXISTS "${CMAKE_BINARY_DIR}/temp/${_binary}_installed")
if(${CMAKE_BINARY_DIR}/temp/sentinel_installed IS_NEWER_THAN ${CMAKE_BINARY_DIR}/temp/${_binary}_installed OR NOT ${${_binary}_installed} EQUAL 0)
if(DEBUG_PREBUILT)
message("cd ${CMAKE_SOURCE_DIR} && ${AUTOBUILD_EXECUTABLE} install
--install-dir=${AUTOBUILD_INSTALL_DIR}
--skip-license-check
${_binary} ")
endif(DEBUG_PREBUILT)
execute_process(COMMAND "${AUTOBUILD_EXECUTABLE}"
install
--install-dir=${AUTOBUILD_INSTALL_DIR}
--skip-license-check
${_binary}
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
RESULT_VARIABLE ${_binary}_installed
)
file(WRITE ${CMAKE_BINARY_DIR}/temp/${_binary}_installed "${${_binary}_installed}")
else(${CMAKE_BINARY_DIR}/temp/sentinel_installed IS_NEWER_THAN ${CMAKE_BINARY_DIR}/temp/${_binary}_installed)
set(${_binary}_installed 0)
endif(${CMAKE_BINARY_DIR}/temp/sentinel_installed IS_NEWER_THAN ${CMAKE_BINARY_DIR}/temp/${_binary}_installed)
endif(${CMAKE_BINARY_DIR}/temp/sentinel_installed IS_NEWER_THAN ${CMAKE_BINARY_DIR}/temp/${_binary}_installed OR NOT ${${_binary}_installed} EQUAL 0)
if(NOT ${_binary}_installed EQUAL 0)
message(FATAL_ERROR
"Failed to download or unpack prebuilt '${_binary}'."
" Process returned ${${_binary}_installed}.")
endif (NOT ${_binary}_installed EQUAL 0)
endif (NOT STANDALONE)
endif (NOT STANDALONE_${_binary})
endmacro (use_prebuilt_binary _binary)

View File

@ -1,21 +0,0 @@
# -*- cmake -*-
include(Prebuilt)
set(PTH_FIND_QUIETLY ON)
set(PTH_FIND_REQUIRED ON)
if (STANDALONE)
# ?? How would I construct FindPTH.cmake? This file was cloned from
# CURL.cmake, which uses include(FindCURL), but there's no FindCURL.cmake?
# include(FindPTH)
else (STANDALONE)
# This library is only needed to support Boost.Coroutine, and only on Mac.
if (DARWIN)
use_prebuilt_binary(pth)
set(PTH_LIBRARIES pth)
set(PTH_INCLUDE_DIRS ${LIBS_PREBUILT_DIR}/include)
else (DARWIN)
set(PTH_LIBRARIES)
set(PTH_INCLUDE_DIRS)
endif (DARWIN)
endif (STANDALONE)

View File

@ -33,7 +33,7 @@ elseif (WINDOWS)
endif (DEBUG_QUICKTIME_LIBRARY AND RELEASE_QUICKTIME_LIBRARY)
include_directories(
${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include/quicktime
${LIBS_PREBUILT_DIR}/include/quicktime
"${QUICKTIME_SDK_DIR}\\CIncludes"
)
endif (DARWIN)

View File

@ -5,11 +5,10 @@ include(Python)
macro (check_message_template _target)
add_custom_command(
TARGET ${_target}
POST_BUILD
PRE_LINK
COMMAND ${PYTHON_EXECUTABLE}
ARGS ${SCRIPTS_DIR}/md5check.py
3f19d130400c547de36278a6b6f9b028
${SCRIPTS_DIR}/messages/message_template.msg
ARGS ${SCRIPTS_DIR}/template_verifier.py
--mode=development --cache_master --master_url=${TEMPLATE_VERIFIER_MASTER_URL} ${TEMPLATE_VERIFIER_OPTIONS}
COMMENT "Verifying message template - See http://wiki.secondlife.com/wiki/Template_verifier.py"
)
endmacro (check_message_template)

View File

@ -51,11 +51,11 @@ else (STANDALONE)
endif (LINUX)
include_directories (
${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include
${LIBS_PREBUILT_DIR}/include
${LIBS_PREBUILT_DIR}/include
)
foreach(include ${${LL_ARCH}_INCLUDES})
include_directories(${LIBS_PREBUILT_DIR}/${LL_ARCH_DIR}/include/${include})
include_directories(${LIBS_PREBUILT_DIR}/include/${include})
endforeach(include)
endif (STANDALONE)

View File

@ -17,6 +17,10 @@
# Relative and absolute paths to subtrees.
if(NOT DEFINED COMMON_CMAKE_DIR)
set(COMMON_CMAKE_DIR "${CMAKE_SOURCE_DIR}/cmake")
endif(NOT DEFINED COMMON_CMAKE_DIR)
set(LIBS_CLOSED_PREFIX)
set(LIBS_OPEN_PREFIX)
set(LIBS_SERVER_PREFIX)
@ -26,21 +30,39 @@ set(VIEWER_PREFIX)
set(INTEGRATION_TESTS_PREFIX)
set(LL_TESTS OFF CACHE BOOL "Build and run unit and integration tests (disable for build timing runs to reduce variation")
set(LIBS_CLOSED_DIR ${CMAKE_SOURCE_DIR}/${LIBS_CLOSED_PREFIX})
set(LIBS_OPEN_DIR ${CMAKE_SOURCE_DIR}/${LIBS_OPEN_PREFIX})
if(LIBS_CLOSED_DIR)
file(TO_CMAKE_PATH "${LIBS_CLOSED_DIR}" LIBS_CLOSED_DIR)
else(LIBS_CLOSED_DIR)
set(LIBS_CLOSED_DIR ${CMAKE_SOURCE_DIR}/${LIBS_CLOSED_PREFIX})
endif(LIBS_CLOSED_DIR)
if(LIBS_COMMON_DIR)
file(TO_CMAKE_PATH "${LIBS_COMMON_DIR}" LIBS_COMMON_DIR)
else(LIBS_COMMON_DIR)
set(LIBS_COMMON_DIR ${CMAKE_SOURCE_DIR}/${LIBS_OPEN_PREFIX})
endif(LIBS_COMMON_DIR)
set(LIBS_OPEN_DIR ${LIBS_COMMON_DIR})
set(LIBS_SERVER_DIR ${CMAKE_SOURCE_DIR}/${LIBS_SERVER_PREFIX})
set(SCRIPTS_DIR ${CMAKE_SOURCE_DIR}/${SCRIPTS_PREFIX})
set(SERVER_DIR ${CMAKE_SOURCE_DIR}/${SERVER_PREFIX})
set(VIEWER_DIR ${CMAKE_SOURCE_DIR}/${VIEWER_PREFIX})
set(LIBS_PREBUILT_DIR ${CMAKE_SOURCE_DIR}/../libraries CACHE PATH
set(AUTOBUILD_INSTALL_DIR ${CMAKE_BINARY_DIR}/packages)
set(LIBS_PREBUILT_DIR ${AUTOBUILD_INSTALL_DIR} CACHE PATH
"Location of prebuilt libraries.")
if (EXISTS ${CMAKE_SOURCE_DIR}/Server.cmake)
# We use this as a marker that you can try to use the proprietary libraries.
set(INSTALL_PROPRIETARY ON CACHE BOOL "Install proprietary binaries")
endif (EXISTS ${CMAKE_SOURCE_DIR}/Server.cmake)
set(TEMPLATE_VERIFIER_OPTIONS "" CACHE STRING "Options for scripts/template_verifier.py")
set(TEMPLATE_VERIFIER_MASTER_URL "http://bitbucket.org/lindenlab/master-message-template/raw/tip/message_template.msg" CACHE STRING "Location of the master message template")
if (NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE RelWithDebInfo CACHE STRING
"Build type. One of: Debug Release RelWithDebInfo" FORCE)
endif (NOT CMAKE_BUILD_TYPE)
if (${CMAKE_SYSTEM_NAME} MATCHES "Windows")
set(WINDOWS ON BOOL FORCE)
@ -54,20 +76,19 @@ if (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
set(LINUX ON BOOl FORCE)
# If someone has specified a word size, use that to determine the
# architecture. Otherwise, let the compiler specify the word size.
# Using uname will break under chroots and other cross arch compiles. RC
# architecture. Otherwise, let the architecture specify the word size.
if (WORD_SIZE EQUAL 32)
set(ARCH i686)
elseif (WORD_SIZE EQUAL 64)
set(ARCH x86_64)
else (WORD_SIZE EQUAL 32)
if(CMAKE_SIZEOF_VOID_P MATCHES 4)
set(ARCH i686)
set(WORD_SIZE 32)
else(CMAKE_SIZEOF_VOID_P MATCHES 4)
set(ARCH x86_64)
execute_process(COMMAND uname -m COMMAND sed s/i.86/i686/
OUTPUT_VARIABLE ARCH OUTPUT_STRIP_TRAILING_WHITESPACE)
if (ARCH STREQUAL x86_64)
set(WORD_SIZE 64)
endif(CMAKE_SIZEOF_VOID_P MATCHES 4)
else (ARCH STREQUAL x86_64)
set(WORD_SIZE 32)
endif (ARCH STREQUAL x86_64)
endif (WORD_SIZE EQUAL 32)
set(LL_ARCH ${ARCH}_linux)
@ -76,25 +97,12 @@ endif (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
set(DARWIN 1)
# NOTE: If specifying a different SDK with CMAKE_OSX_SYSROOT at configure
# time you should also specify CMAKE_OSX_DEPLOYMENT_TARGET explicitly,
# otherwise CMAKE_OSX_SYSROOT will be overridden here. We can't just check
# for it being unset, as it gets set to the system default :(
# Default to building against the 10.4 SDK if no deployment target is
# specified.
if (NOT CMAKE_OSX_DEPLOYMENT_TARGET)
# NOTE: setting -isysroot is NOT adequate: http://lists.apple.com/archives/Xcode-users/2007/Oct/msg00696.html
# see http://public.kitware.com/Bug/view.php?id=9959 + poppy
set(CMAKE_OSX_SYSROOT /Developer/SDKs/MacOSX10.5.sdk)
set(CMAKE_OSX_DEPLOYMENT_TARGET 10.4)
endif (NOT CMAKE_OSX_DEPLOYMENT_TARGET)
# GCC 4.2 is incompatible with the MacOSX 10.4 SDK
if (${CMAKE_OSX_SYSROOT} MATCHES "10.4u")
set(CMAKE_XCODE_ATTRIBUTE_GCC_VERSION "4.0")
endif (${CMAKE_OSX_SYSROOT} MATCHES "10.4u")
# To support a different SDK update these Xcode settings:
set(CMAKE_OSX_DEPLOYMENT_TARGET 10.5)
set(CMAKE_OSX_SYSROOT /Developer/SDKs/MacOSX10.5.sdk)
set(CMAKE_XCODE_ATTRIBUTE_GCC_VERSION "4.2")
set(CMAKE_XCODE_ATTRIBUTE_DEBUG_INFORMATION_FORMAT dwarf-with-dsym)
# NOTE: To attempt an i386/PPC Universal build, add this on the configure line:
# -DCMAKE_OSX_ARCHITECTURES:STRING='i386;ppc'
@ -122,14 +130,16 @@ endif (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
set(GRID agni CACHE STRING "Target Grid")
set(VIEWER ON CACHE BOOL "Build Firestorm viewer.")
set(VIEWER_CHANNEL "FirestormPrivate" CACHE STRING "Viewer Channel Name")
set(VIEWER_CHANNEL "Firestorm-Private" CACHE STRING "Viewer Channel Name")
set(VIEWER_LOGIN_CHANNEL ${VIEWER_CHANNEL} CACHE STRING "Fake login channel for A/B Testing")
# Flickr API keys.
set(FLICKR_API_KEY "daaabff93a967e0f37fa18863bb43b29")
set(FLICKR_API_SECRET "846f0958020b553e")
set(VERSION_BUILD "0" CACHE STRING "Revision number passed in from the outside")
set(STANDALONE OFF CACHE BOOL "Do not use Linden-supplied prebuilt libraries.")
set(UNATTENDED OFF CACHE BOOL "Should be set to ON for building with VC Express editions.")
if (NOT STANDALONE AND EXISTS ${CMAKE_SOURCE_DIR}/llphysics)
set(SERVER ON CACHE BOOL "Build Second Life server software.")
@ -148,3 +158,4 @@ endif (LINUX AND SERVER AND VIEWER)
set(USE_PRECOMPILED_HEADERS ON CACHE BOOL "Enable use of precompiled header directives where supported.")
source_group("CMake Rules" FILES CMakeLists.txt)

1
indra/cmake/Versions.cmake Normal file → Executable file
View File

@ -2,4 +2,5 @@ include(BuildVersion)
if(VIEWER)
build_version(viewer)
build_channel(viewer)
endif(VIEWER)

View File

@ -62,16 +62,13 @@ elseif (LINUX)
else (STANDALONE)
set(WEBKIT_PLUGIN_LIBRARIES
llqtwebkit
qgif
qjpeg
QtWebKit
QtOpenGL
QtNetwork
QtGui
QtCore
qgif
qjpeg
jpeg
fontconfig
X11

View File

@ -9,7 +9,10 @@ if (STANDALONE)
else (STANDALONE)
use_prebuilt_binary(xmlrpc-epi)
if (WINDOWS)
set(XMLRPCEPI_LIBRARIES xmlrpcepi)
set(XMLRPCEPI_LIBRARIES
debug xmlrpc-epid
optimized xmlrpc-epi
)
else (WINDOWS)
set(XMLRPCEPI_LIBRARIES xmlrpc-epi)
endif (WINDOWS)

0
indra/cmake/run_build_test.py Normal file → Executable file
View File

View File

@ -1,863 +0,0 @@
#!/usr/bin/env python
"""\
@file develop.py
@authors Bryan O'Sullivan, Mark Palange, Aaron Brashears
@brief Fire and forget script to appropriately configure cmake for SL.
$LicenseInfo:firstyear=2007&license=viewerlgpl$
Second Life Viewer Source Code
Copyright (C) 2007-2011, Linden Research, Inc.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation;
version 2.1 of the License only.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
$/LicenseInfo$
"""
import errno
import getopt
import os
import random
import re
import shutil
import socket
import sys
import commands
import subprocess
class CommandError(Exception):
pass
def mkdir(path):
try:
os.mkdir(path)
return path
except OSError, err:
if err.errno != errno.EEXIST or not os.path.isdir(path):
raise
def getcwd():
cwd = os.getcwd()
if 'a' <= cwd[0] <= 'z' and cwd[1] == ':':
# CMake wants DOS drive letters to be in uppercase. The above
# condition never asserts on platforms whose full path names
# always begin with a slash, so we don't need to test whether
# we are running on Windows.
cwd = cwd[0].upper() + cwd[1:]
return cwd
def quote(opts):
return '"' + '" "'.join([ opt.replace('"', '') for opt in opts ]) + '"'
class PlatformSetup(object):
generator = None
build_types = {}
for t in ('Debug', 'Release', 'RelWithDebInfo'):
build_types[t.lower()] = t
build_type = build_types['relwithdebinfo']
standalone = 'OFF'
unattended = 'OFF'
universal = 'OFF'
project_name = 'SecondLife'
distcc = True
cmake_opts = []
word_size = 32
using_express = False
def __init__(self):
self.script_dir = os.path.realpath(
os.path.dirname(__import__(__name__).__file__))
def os(self):
'''Return the name of the OS.'''
raise NotImplemented('os')
def arch(self):
'''Return the CPU architecture.'''
return None
def platform(self):
'''Return a stringified two-tuple of the OS name and CPU
architecture.'''
ret = self.os()
if self.arch():
ret += '-' + self.arch()
return ret
def build_dirs(self):
'''Return the top-level directories in which builds occur.
This can return more than one directory, e.g. if doing a
32-bit viewer and server build on Linux.'''
return ['build-' + self.platform()]
def cmake_commandline(self, src_dir, build_dir, opts, simple):
'''Return the command line to run cmake with.'''
args = dict(
dir=src_dir,
generator=self.generator,
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
word_size=self.word_size,
type=self.build_type.upper(),
)
#if simple:
# return 'cmake %(opts)s %(dir)r' % args
return ('cmake -DCMAKE_BUILD_TYPE:STRING=%(type)s '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-G %(generator)r %(opts)s %(dir)r' % args)
def run_cmake(self, args=[]):
'''Run cmake.'''
# do a sanity check to make sure we have a generator
if not hasattr(self, 'generator'):
raise "No generator available for '%s'" % (self.__name__,)
cwd = getcwd()
created = []
try:
for d in self.build_dirs():
simple = True
if mkdir(d):
created.append(d)
simple = False
try:
os.chdir(d)
cmd = self.cmake_commandline(cwd, d, args, simple)
print 'Running %r in %r' % (cmd, d)
self.run(cmd, 'cmake')
finally:
os.chdir(cwd)
except:
# If we created a directory in which to run cmake and
# something went wrong, the directory probably just
# contains garbage, so delete it.
os.chdir(cwd)
for d in created:
print 'Cleaning %r' % d
shutil.rmtree(d)
raise
def parse_build_opts(self, arguments):
opts, targets = getopt.getopt(arguments, 'o:', ['option='])
build_opts = []
for o, a in opts:
if o in ('-o', '--option'):
build_opts.append(a)
return build_opts, targets
def run_build(self, opts, targets):
'''Build the default targets for this platform.'''
raise NotImplemented('run_build')
def cleanup(self):
'''Delete all build directories.'''
cleaned = 0
for d in self.build_dirs():
if os.path.isdir(d):
print 'Cleaning %r' % d
shutil.rmtree(d)
cleaned += 1
if not cleaned:
print 'Nothing to clean up!'
def is_internal_tree(self):
'''Indicate whether we are building in an internal source tree.'''
return os.path.isdir(os.path.join(self.script_dir, 'newsim'))
def find_in_path(self, name, defval=None, basename=False):
for ext in self.exe_suffixes:
name_ext = name + ext
if os.sep in name_ext:
path = os.path.abspath(name_ext)
if os.access(path, os.X_OK):
return [basename and os.path.basename(path) or path]
for p in os.getenv('PATH', self.search_path).split(os.pathsep):
path = os.path.join(p, name_ext)
if os.access(path, os.X_OK):
return [basename and os.path.basename(path) or path]
if defval:
return [defval]
return []
class UnixSetup(PlatformSetup):
'''Generic Unixy build instructions.'''
search_path = '/usr/bin:/usr/local/bin'
exe_suffixes = ('',)
def __init__(self):
super(UnixSetup, self).__init__()
self.generator = 'Unix Makefiles'
def os(self):
return 'unix'
def arch(self):
cpu = os.uname()[-1]
if cpu.endswith('386'):
cpu = 'i386'
elif cpu.endswith('86'):
cpu = 'i686'
elif cpu in ('athlon',):
cpu = 'i686'
elif cpu == 'Power Macintosh':
cpu = 'ppc'
elif cpu == 'x86_64' and self.word_size == 32:
cpu = 'i686'
return cpu
def run(self, command, name=None):
'''Run a program. If the program fails, raise an exception.'''
sys.stdout.flush()
ret = os.system(command)
if ret:
if name is None:
name = command.split(None, 1)[0]
if os.WIFEXITED(ret):
st = os.WEXITSTATUS(ret)
if st == 127:
event = 'was not found'
else:
event = 'exited with status %d' % st
elif os.WIFSIGNALED(ret):
event = 'was killed by signal %d' % os.WTERMSIG(ret)
else:
event = 'died unexpectedly (!?) with 16-bit status %d' % ret
raise CommandError('the command %r %s' %
(name, event))
class LinuxSetup(UnixSetup):
def __init__(self):
super(LinuxSetup, self).__init__()
try:
self.debian_sarge = open('/etc/debian_version').read().strip() == '3.1'
except:
self.debian_sarge = False
def os(self):
return 'linux'
def build_dirs(self):
# Only build the server code if we have it.
platform_build = '%s-%s' % (self.platform(), self.build_type.lower())
if self.arch() == 'i686' and self.is_internal_tree():
return ['viewer-' + platform_build, 'server-' + platform_build]
elif self.arch() == 'x86_64' and self.is_internal_tree():
# the viewer does not build in 64bit -- kdu5 issues
# we can either use openjpeg, or overhaul our viewer to handle kdu5 or higher
# doug knows about kdu issues
return ['server-' + platform_build]
else:
return ['viewer-' + platform_build]
def cmake_commandline(self, src_dir, build_dir, opts, simple):
args = dict(
dir=src_dir,
generator=self.generator,
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
type=self.build_type.upper(),
project_name=self.project_name,
word_size=self.word_size,
)
if not self.is_internal_tree():
args.update({'cxx':'g++', 'server':'OFF', 'viewer':'ON'})
else:
if self.distcc:
distcc = self.find_in_path('distcc')
baseonly = True
else:
distcc = []
baseonly = False
if 'server' in build_dir:
gcc = distcc + self.find_in_path(
self.debian_sarge and 'g++-3.3' or 'g++-4.1',
'g++', baseonly)
args.update({'cxx': ' '.join(gcc), 'server': 'ON',
'viewer': 'OFF'})
else:
gcc41 = distcc + self.find_in_path('g++-4.1', 'g++', baseonly)
args.update({'cxx': ' '.join(gcc41),
'server': 'OFF',
'viewer': 'ON'})
cmd = (('cmake -DCMAKE_BUILD_TYPE:STRING=%(type)s '
'-G %(generator)r -DSERVER:BOOL=%(server)s '
'-DVIEWER:BOOL=%(viewer)s -DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(opts)s %(dir)r')
% args)
if 'CXX' not in os.environ:
args.update({'cmd':cmd})
cmd = ('CXX=%(cxx)r %(cmd)s' % args)
return cmd
def run_build(self, opts, targets):
job_count = None
for i in range(len(opts)):
if opts[i].startswith('-j'):
try:
job_count = int(opts[i][2:])
except ValueError:
try:
job_count = int(opts[i+1])
except ValueError:
job_count = True
def get_cpu_count():
count = 0
for line in open('/proc/cpuinfo'):
if re.match(r'processor\s*:', line):
count += 1
return count
def localhost():
count = get_cpu_count()
return 'localhost/' + str(count), count
def get_distcc_hosts():
try:
hosts = []
name = os.getenv('DISTCC_DIR', '/etc/distcc') + '/hosts'
for l in open(name):
l = l[l.find('#')+1:].strip()
if l: hosts.append(l)
return hosts
except IOError:
return (os.getenv('DISTCC_HOSTS', '').split() or
[localhost()[0]])
def count_distcc_hosts():
cpus = 0
hosts = 0
for host in get_distcc_hosts():
m = re.match(r'.*/(\d+)', host)
hosts += 1
cpus += m and int(m.group(1)) or 1
return hosts, cpus
def mk_distcc_hosts(basename, range, num_cpus):
'''Generate a list of LL-internal machines to build on.'''
loc_entry, cpus = localhost()
hosts = [loc_entry]
dead = []
stations = [s for s in xrange(range) if s not in dead]
random.shuffle(stations)
hosts += ['%s%d.lindenlab.com/%d,lzo' % (basename, s, num_cpus) for s in stations]
cpus += 2 * len(stations)
return ' '.join(hosts), cpus
if job_count is None:
hosts, job_count = count_distcc_hosts()
hostname = socket.gethostname()
if hosts == 1:
if hostname.startswith('station'):
hosts, job_count = mk_distcc_hosts('station', 36, 2)
os.environ['DISTCC_HOSTS'] = hosts
if hostname.startswith('eniac'):
hosts, job_count = mk_distcc_hosts('eniac', 71, 2)
os.environ['DISTCC_HOSTS'] = hosts
if hostname.startswith('build'):
max_jobs = 6
else:
max_jobs = 12
if job_count > max_jobs:
job_count = max_jobs;
opts.extend(['-j', str(job_count)])
if targets:
targets = ' '.join(targets)
else:
targets = 'all'
for d in self.build_dirs():
cmd = 'make -C %r %s %s' % (d, ' '.join(opts), targets)
print 'Running %r' % cmd
self.run(cmd)
class DarwinSetup(UnixSetup):
def __init__(self):
super(DarwinSetup, self).__init__()
self.generator = 'Xcode'
def os(self):
return 'darwin'
def arch(self):
if self.universal == 'ON':
return 'universal'
else:
return UnixSetup.arch(self)
def cmake_commandline(self, src_dir, build_dir, opts, simple):
args = dict(
dir=src_dir,
generator=self.generator,
opts=quote(opts),
standalone=self.standalone,
word_size=self.word_size,
unattended=self.unattended,
project_name=self.project_name,
universal=self.universal,
type=self.build_type.upper(),
)
if self.universal == 'ON':
args['universal'] = '-DCMAKE_OSX_ARCHITECTURES:STRING=\'i386;ppc\''
#if simple:
# return 'cmake %(opts)s %(dir)r' % args
return ('cmake -G %(generator)r '
'-DCMAKE_BUILD_TYPE:STRING=%(type)s '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(universal)s '
'%(opts)s %(dir)r' % args)
def run_build(self, opts, targets):
cwd = getcwd()
if targets:
targets = ' '.join(['-target ' + repr(t) for t in targets])
else:
targets = ''
cmd = ('xcodebuild -configuration %s %s %s | grep -v "^[[:space:]]*setenv" ; exit ${PIPESTATUS[0]}' %
(self.build_type, ' '.join(opts), targets))
for d in self.build_dirs():
try:
os.chdir(d)
print 'Running %r in %r' % (cmd, d)
self.run(cmd)
finally:
os.chdir(cwd)
class WindowsSetup(PlatformSetup):
gens = {
'vc71' : {
'gen' : r'Visual Studio 7 .NET 2003',
'ver' : r'7.1'
},
'vc80' : {
'gen' : r'Visual Studio 8 2005',
'ver' : r'8.0'
},
'vc90' : {
'gen' : r'Visual Studio 9 2008',
'ver' : r'9.0'
}
}
gens['vs2003'] = gens['vc71']
gens['vs2005'] = gens['vc80']
gens['vs2008'] = gens['vc90']
search_path = r'C:\windows'
exe_suffixes = ('.exe', '.bat', '.com')
def __init__(self):
super(WindowsSetup, self).__init__()
self._generator = None
self.incredibuild = False
def _get_generator(self):
if self._generator is None:
for version in 'vc80 vc90 vc71'.split():
if self.find_visual_studio(version):
self._generator = version
print 'Building with ', self.gens[version]['gen']
break
else:
print >> sys.stderr, 'Cannot find a Visual Studio installation, testing for express editions'
for version in 'vc80 vc90 vc71'.split():
if self.find_visual_studio_express(version):
self._generator = version
self.using_express = True
print 'Building with ', self.gens[version]['gen'] , "Express edition"
break
else:
print >> sys.stderr, 'Cannot find any Visual Studio installation'
sys.exit(1)
return self._generator
def _set_generator(self, gen):
self._generator = gen
generator = property(_get_generator, _set_generator)
def os(self):
return 'win32'
def build_dirs(self):
return ['build-' + self.generator]
def cmake_commandline(self, src_dir, build_dir, opts, simple):
args = dict(
dir=src_dir,
generator=self.gens[self.generator.lower()]['gen'],
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
project_name=self.project_name,
word_size=self.word_size,
)
#if simple:
# return 'cmake %(opts)s "%(dir)s"' % args
return ('cmake -G "%(generator)s" '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DUNATTENDED:BOOL=%(unattended)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(opts)s "%(dir)s"' % args)
def get_HKLM_registry_value(self, key_str, value_str):
import _winreg
reg = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
key = _winreg.OpenKey(reg, key_str)
value = _winreg.QueryValueEx(key, value_str)[0]
print 'Found: %s' % value
return value
def find_visual_studio(self, gen=None):
if gen is None:
gen = self._generator
gen = gen.lower()
value_str = (r'EnvironmentDirectory')
key_str = (r'SOFTWARE\Microsoft\VisualStudio\%s\Setup\VS' %
self.gens[gen]['ver'])
print ('Reading VS environment from HKEY_LOCAL_MACHINE\%s\%s' %
(key_str, value_str))
try:
return self.get_HKLM_registry_value(key_str, value_str)
except WindowsError, err:
key_str = (r'SOFTWARE\Wow6432Node\Microsoft\VisualStudio\%s\Setup\VS' %
self.gens[gen]['ver'])
try:
return self.get_HKLM_registry_value(key_str, value_str)
except:
print >> sys.stderr, "Didn't find ", self.gens[gen]['gen']
return ''
def find_visual_studio_express(self, gen=None):
if gen is None:
gen = self._generator
gen = gen.lower()
try:
import _winreg
key_str = (r'SOFTWARE\Microsoft\VCEXpress\%s\Setup\VC' %
self.gens[gen]['ver'])
value_str = (r'ProductDir')
print ('Reading VS environment from HKEY_LOCAL_MACHINE\%s\%s' %
(key_str, value_str))
print key_str
reg = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
key = _winreg.OpenKey(reg, key_str)
value = _winreg.QueryValueEx(key, value_str)[0]+"IDE"
print 'Found: %s' % value
return value
except WindowsError, err:
print >> sys.stderr, "Didn't find ", self.gens[gen]['gen']
return ''
def get_build_cmd(self):
if self.incredibuild:
config = self.build_type
if self.gens[self.generator]['ver'] in [ r'8.0', r'9.0' ]:
config = '\"%s|Win32\"' % config
executable = 'buildconsole'
cmd = "%(bin)s %(prj)s.sln /build /cfg=%(cfg)s" % {'prj': self.project_name, 'cfg': config, 'bin': executable}
return (executable, cmd)
environment = self.find_visual_studio()
if environment == '':
environment = self.find_visual_studio_express()
if environment == '':
print >> sys.stderr, "Something went very wrong during build stage, could not find a Visual Studio installation."
else:
build_dirs=self.build_dirs();
print >> sys.stderr, "\nSolution generation complete, it can can now be found in:", build_dirs[0]
print >> sys.stderr, "\nPlease see https://wiki.secondlife.com/wiki/Microsoft_Visual_Studio#Extra_steps_for_Visual_Studio_Express_editions for express specific information"
exit(0)
# devenv.com is CLI friendly, devenv.exe... not so much.
executable = '%sdevenv.com' % (self.find_visual_studio(),)
cmd = ('"%s" %s.sln /build %s' %
(executable, self.project_name, self.build_type))
return (executable, cmd)
def run(self, command, name=None, retry_on=None, retries=1):
'''Run a program. If the program fails, raise an exception.'''
assert name is not None, 'On windows an executable path must be given in name. [DEV-44838]'
if os.path.isfile(name):
path = name
else:
path = self.find_in_path(name)[0]
while retries:
retries = retries - 1
print "develop.py tries to run:", command
ret = subprocess.call(command, executable=path)
print "got ret", ret, "from", command
if ret == 0:
break
else:
error = 'exited with status %d' % ret
if retry_on is not None and retry_on == ret:
print "Retrying... the command %r %s" % (name, error)
else:
raise CommandError('the command %r %s' % (name, error))
def run_cmake(self, args=[]):
'''Override to add the vstool.exe call after running cmake.'''
PlatformSetup.run_cmake(self, args)
if self.unattended == 'OFF':
if self.using_express == False:
self.run_vstool()
def run_vstool(self):
for build_dir in self.build_dirs():
stamp = os.path.join(build_dir, 'vstool.txt')
try:
prev_build = open(stamp).read().strip()
except IOError:
prev_build = ''
if prev_build == self.build_type:
# Only run vstool if the build type has changed.
continue
executable = os.path.join('tools','vstool','VSTool.exe')
vstool_cmd = (executable +
' --solution ' +
os.path.join(build_dir,'SecondLife.sln') +
' --config ' + self.build_type +
' --startup firestorm-bin')
print 'Running %r in %r' % (vstool_cmd, getcwd())
self.run(vstool_cmd, name=executable)
print >> open(stamp, 'w'), self.build_type
def run_build(self, opts, targets):
for t in targets:
assert t.strip(), 'Unexpected empty targets: ' + repr(targets)
cwd = getcwd()
executable, build_cmd = self.get_build_cmd()
for d in self.build_dirs():
try:
os.chdir(d)
if targets:
for t in targets:
cmd = '%s /project %s %s' % (build_cmd, t, ' '.join(opts))
print 'Running %r in %r' % (cmd, d)
self.run(cmd, name=executable, retry_on=4, retries=3)
else:
cmd = '%s %s' % (build_cmd, ' '.join(opts))
print 'Running %r in %r' % (cmd, d)
self.run(cmd, name=executable, retry_on=4, retries=3)
finally:
os.chdir(cwd)
class CygwinSetup(WindowsSetup):
def __init__(self):
super(CygwinSetup, self).__init__()
self.generator = 'vc80'
def cmake_commandline(self, src_dir, build_dir, opts, simple):
dos_dir = commands.getoutput("cygpath -w %s" % src_dir)
args = dict(
dir=dos_dir,
generator=self.gens[self.generator.lower()]['gen'],
opts=quote(opts),
standalone=self.standalone,
unattended=self.unattended,
project_name=self.project_name,
word_size=self.word_size,
)
#if simple:
# return 'cmake %(opts)s "%(dir)s"' % args
return ('cmake -G "%(generator)s" '
'-DUNATTENDED:BOOl=%(unattended)s '
'-DSTANDALONE:BOOL=%(standalone)s '
'-DWORD_SIZE:STRING=%(word_size)s '
'-DROOT_PROJECT_NAME:STRING=%(project_name)s '
'%(opts)s "%(dir)s"' % args)
setup_platform = {
'darwin': DarwinSetup,
'linux2': LinuxSetup,
'win32' : WindowsSetup,
'cygwin' : CygwinSetup
}
usage_msg = '''
Usage: develop.py [options] [command [command-options]]
Options:
-h | --help print this help message
--standalone build standalone, without Linden prebuild libraries
--unattended build unattended, do not invoke any tools requiring
a human response
--universal build a universal binary on Mac OS X (unsupported)
-t | --type=NAME build type ("Debug", "Release", or "RelWithDebInfo")
-m32 | -m64 build architecture (32-bit or 64-bit)
-N | --no-distcc disable use of distcc
-G | --generator=NAME generator name
Windows: VC71 or VS2003 (default), VC80 (VS2005) or
VC90 (VS2008)
Mac OS X: Xcode (default), Unix Makefiles
Linux: Unix Makefiles (default), KDevelop3
-p | --project=NAME set the root project name. (Doesn't effect makefiles)
Commands:
build configure and build default target
clean delete all build directories, does not affect sources
configure configure project by running cmake (default if none given)
printbuilddirs print the build directory that will be used
Command-options for "configure":
We use cmake variables to change the build configuration.
-DSERVER:BOOL=OFF Don't configure simulator/dataserver/etc
-DVIEWER:BOOL=OFF Don't configure the viewer
-DPACKAGE:BOOL=ON Create "package" target to make installers
-DLOCALIZESETUP:BOOL=ON Create one win_setup target per supported language
Examples:
Set up a viewer-only project for your system:
develop.py configure -DSERVER:BOOL=OFF
Set up a Visual Studio 2005 project with "package" target:
develop.py -G vc80 configure -DPACKAGE:BOOL=ON
'''
def main(arguments):
setup = setup_platform[sys.platform]()
try:
opts, args = getopt.getopt(
arguments,
'?hNt:p:G:m:',
['help', 'standalone', 'no-distcc', 'unattended', 'universal', 'type=', 'incredibuild', 'generator=', 'project='])
except getopt.GetoptError, err:
print >> sys.stderr, 'Error:', err
print >> sys.stderr, """
Note: You must pass -D options to cmake after the "configure" command
For example: develop.py configure -DSERVER:BOOL=OFF"""
print >> sys.stderr, usage_msg.strip()
sys.exit(1)
for o, a in opts:
if o in ('-?', '-h', '--help'):
print usage_msg.strip()
sys.exit(0)
elif o in ('--standalone',):
setup.standalone = 'ON'
elif o in ('--unattended',):
setup.unattended = 'ON'
elif o in ('--universal',):
setup.universal = 'ON'
elif o in ('-m',):
if a in ('32', '64'):
setup.word_size = int(a)
else:
print >> sys.stderr, 'Error: unknown word size', repr(a)
print >> sys.stderr, 'Supported word sizes: 32, 64'
sys.exit(1)
elif o in ('-t', '--type'):
try:
setup.build_type = setup.build_types[a.lower()]
except KeyError:
print >> sys.stderr, 'Error: unknown build type', repr(a)
print >> sys.stderr, 'Supported build types:'
types = setup.build_types.values()
types.sort()
for t in types:
print ' ', t
sys.exit(1)
elif o in ('-G', '--generator'):
setup.generator = a
elif o in ('-N', '--no-distcc'):
setup.distcc = False
elif o in ('-p', '--project'):
setup.project_name = a
elif o in ('--incredibuild'):
setup.incredibuild = True
else:
print >> sys.stderr, 'INTERNAL ERROR: unhandled option', repr(o)
sys.exit(1)
if not args:
setup.run_cmake()
return
try:
cmd = args.pop(0)
if cmd in ('cmake', 'configure'):
setup.run_cmake(args)
elif cmd == 'build':
if os.getenv('DISTCC_DIR') is None:
distcc_dir = os.path.join(getcwd(), '.distcc')
if not os.path.exists(distcc_dir):
os.mkdir(distcc_dir)
print "setting DISTCC_DIR to %s" % distcc_dir
os.environ['DISTCC_DIR'] = distcc_dir
else:
print "DISTCC_DIR is set to %s" % os.getenv('DISTCC_DIR')
for d in setup.build_dirs():
if not os.path.exists(d):
raise CommandError('run "develop.py cmake" first')
setup.run_cmake()
opts, targets = setup.parse_build_opts(args)
setup.run_build(opts, targets)
elif cmd == 'clean':
if args:
raise CommandError('clean takes no arguments')
setup.cleanup()
elif cmd == 'printbuilddirs':
for d in setup.build_dirs():
print >> sys.stdout, d
else:
print >> sys.stderr, 'Error: unknown subcommand', repr(cmd)
print >> sys.stderr, "(run 'develop.py --help' for help)"
sys.exit(1)
except getopt.GetoptError, err:
print >> sys.stderr, 'Error with %r subcommand: %s' % (cmd, err)
sys.exit(1)
if __name__ == '__main__':
try:
main(sys.argv[1:])
except CommandError, err:
print >> sys.stderr, 'Error:', err
sys.exit(1)

View File

@ -1,3 +1,8 @@
# -*- cmake -*-
add_subdirectory(llui_libtest)
IF (LLIMAGE_LIBTEST)
MESSAGE(STATUS "Build llimage_libtest")
add_subdirectory(llimage_libtest)
ELSE (LLIMAGE_LIBTEST)
MESSAGE(STATUS "Skip llimage_libtest")
ENDIF (LLIMAGE_LIBTEST)

View File

@ -0,0 +1,125 @@
# -*- cmake -*-
# Integration tests of the llimage library (JPEG2000, PNG, jpeg, etc... images reading and writing)
project (llimage_libtest)
include(00-Common)
include(LLCommon)
include(LLImage)
include(LLImageJ2COJ)
include(LLKDU)
include(LLVFS)
include_directories(
${LLCOMMON_INCLUDE_DIRS}
${LLVFS_INCLUDE_DIRS}
${LLIMAGE_INCLUDE_DIRS}
)
set(llimage_libtest_SOURCE_FILES
llimage_libtest.cpp
)
set(llimage_libtest_HEADER_FILES
CMakeLists.txt
llimage_libtest.h
)
set_source_files_properties(${llimage_libtest_HEADER_FILES}
PROPERTIES HEADER_FILE_ONLY TRUE)
list(APPEND llimage_libtest_SOURCE_FILES ${llimage_libtest_HEADER_FILES})
add_executable(llimage_libtest
WIN32
MACOSX_BUNDLE
${llimage_libtest_SOURCE_FILES}
)
set_target_properties(llimage_libtest
PROPERTIES
WIN32_EXECUTABLE
FALSE
)
# OS-specific libraries
if (DARWIN)
include(CMakeFindFrameworks)
find_library(COREFOUNDATION_LIBRARY CoreFoundation)
set(OS_LIBRARIES ${COREFOUNDATION_LIBRARY})
elseif (WINDOWS)
set(OS_LIBRARIES)
elseif (LINUX)
set(OS_LIBRARIES)
else (DARWIN)
message(FATAL_ERROR "Unknown platform")
endif (DARWIN)
# Libraries on which this application depends on
# Sort by high-level to low-level
target_link_libraries(llimage_libtest
${LLCOMMON_LIBRARIES}
${LLVFS_LIBRARIES}
${LLIMAGE_LIBRARIES}
${LLKDU_LIBRARIES}
${KDU_LIBRARY}
${LLIMAGEJ2COJ_LIBRARIES}
${OS_LIBRARIES}
)
if (DARWIN)
# Path inside the app bundle where we'll need to copy libraries
set(LLIMAGE_LIBTEST_DESTINATION_DIR
${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/llimage_libtest.app/Contents/Resources
)
# Create the Contents/Resources directory
add_custom_command(
TARGET llimage_libtest POST_BUILD
COMMAND ${CMAKE_COMMAND}
ARGS
-E
make_directory
${LLIMAGE_LIBTEST_DESTINATION_DIR}
COMMENT "Creating Resources directory in app bundle."
)
else (DARWIN)
set(LLIMAGE_LIBTEST_DESTINATION_DIR
${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/
)
endif (DARWIN)
get_target_property(BUILT_LLCOMMON llcommon LOCATION)
add_custom_command(TARGET llimage_libtest POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${BUILT_LLCOMMON} ${LLIMAGE_LIBTEST_DESTINATION_DIR}
DEPENDS ${BUILT_LLCOMMON}
)
if (DARWIN)
# Copy the required libraries to the package app
add_custom_command(TARGET llimage_libtest POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/../build-darwin-i386/packages/lib/release/libapr-1.0.dylib ${LLIMAGE_LIBTEST_DESTINATION_DIR}
DEPENDS ${CMAKE_SOURCE_DIR}/../build-darwin-i386/packages/lib/release/libapr-1.0.dylib
)
add_custom_command(TARGET llimage_libtest POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/../build-darwin-i386/packages/lib/release/libaprutil-1.0.dylib ${LLIMAGE_LIBTEST_DESTINATION_DIR}
DEPENDS ${CMAKE_SOURCE_DIR}/../build-darwin-i386/packages/lib/release/libaprutil-1.0.dylib
)
add_custom_command(TARGET llimage_libtest POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/../build-darwin-i386/packages/lib/release/libexception_handler.dylib ${LLIMAGE_LIBTEST_DESTINATION_DIR}
DEPENDS ${CMAKE_SOURCE_DIR}/../build-darwin-i386/packages/lib/release/libexception_handler.dylib
)
add_custom_command(TARGET llimage_libtest POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/../build-darwin-i386/packages/lib/release/libexpat.1.5.2.dylib ${LLIMAGE_LIBTEST_DESTINATION_DIR}
DEPENDS ${CMAKE_SOURCE_DIR}/../build-darwin-i386/packages/lib/release/libexpat.1.5.2.dylib
)
endif (DARWIN)
if (WINDOWS)
# Check indra/test_apps/llplugintest/CMakeLists.txt for an example of what to copy over for Windows and how
endif (WINDOWS)
# Ensure people working on the viewer don't break this library
# *NOTE: This could be removed, or only built by TeamCity, if the build
# and link times become too long.
add_dependencies(viewer llimage_libtest)

View File

@ -0,0 +1,559 @@
/**
* @file llimage_libtest.cpp
* @author Merov Linden
* @brief Integration test for the llimage library
*
* $LicenseInfo:firstyear=2011&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2011, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
#include "linden_common.h"
#include "llpointer.h"
#include "lltimer.h"
#include "llimage_libtest.h"
// Linden library includes
#include "llimage.h"
#include "llimagejpeg.h"
#include "llimagepng.h"
#include "llimagebmp.h"
#include "llimagetga.h"
#include "llimagej2c.h"
#include "lldir.h"
// system libraries
#include <iostream>
// doc string provided when invoking the program with --help
static const char USAGE[] = "\n"
"usage:\tllimage_libtest [options]\n"
"\n"
" -h, --help\n"
" Print this help\n"
" -i, --input <file1 .. file2>\n"
" List of image files to load and convert. Patterns with wild cards can be used.\n"
" -o, --output <file1 .. file2> OR <type>\n"
" List of image files to create (assumes same order as for input files)\n"
" OR 3 letters file type extension to convert each input file into.\n"
" -r, --region <x0, y0, x1, y1>\n"
" Crop region applied to the input files in pixels.\n"
" Only used for j2c images. Default is no region cropping.\n"
" -d, --discard_level <n>\n"
" Discard level max used on input. 0 is highest resolution. Max discard level is 5.\n"
" This allows the input image to be clamped in resolution when loading.\n"
" Only valid for j2c images. Default is no discard.\n"
" -p, --precincts <n>\n"
" Dimension of precincts in pixels. Precincts are assumed square and identical for\n"
" all levels. Note that this option also add PLT and tile markers to the codestream, \n"
" and uses RPCL order. Power of 2 must be used.\n"
" Only valid for output j2c images. Default is no precincts used.\n"
" -b, --blocks <n>\n"
" Dimension of coding blocks in pixels. Blocks are assumed square. Power of 2 must\n"
" be used. Blocks must be smaller than precincts. Like precincts, this option adds\n"
" PLT, tile markers and uses RPCL.\n"
" Only valid for output j2c images. Default is 64.\n"
" -l, --levels <n>\n"
" Number of decomposition levels (aka discard levels) in the output image.\n"
" The maximum number of levels authorized is 32.\n"
" Only valid for output j2c images. Default is 5.\n"
" -rev, --reversible\n"
" Set the compression to be lossless (reversible in j2c parlance).\n"
" Only valid for output j2c images.\n"
" -log, --logmetrics <metric>\n"
" Log performance data for <metric>. Results in <metric>.slp\n"
" Note: so far, only ImageCompressionTester has been tested.\n"
" -a, --analyzeperformance\n"
" Create a report comparing <metric>_baseline.slp with current <metric>.slp\n"
" Results in <metric>_report.csv\n"
" -s, --image-stats\n"
" Output stats for each input and output image.\n"
"\n";
// true when all image loading is done. Used by metric logging thread to know when to stop the thread.
static bool sAllDone = false;
// Create an empty formatted image instance of the correct type from the filename
LLPointer<LLImageFormatted> create_image(const std::string &filename)
{
std::string exten = gDirUtilp->getExtension(filename);
LLPointer<LLImageFormatted> image = LLImageFormatted::createFromExtension(exten);
return image;
}
void output_image_stats(LLPointer<LLImageFormatted> image, const std::string &filename)
{
// Print out some statistical data on the image
std::cout << "Image stats for : " << filename << ", extension : " << image->getExtension() << std::endl;
std::cout << " with : " << (int)(image->getWidth()) << ", height : " << (int)(image->getHeight()) << std::endl;
std::cout << " comp : " << (int)(image->getComponents()) << ", levels : " << (int)(image->getDiscardLevel()) << std::endl;
std::cout << " head : " << (int)(image->calcHeaderSize()) << ", data : " << (int)(image->getDataSize()) << std::endl;
return;
}
// Load an image from file and return a raw (decompressed) instance of its data
LLPointer<LLImageRaw> load_image(const std::string &src_filename, int discard_level, int* region, bool output_stats)
{
LLPointer<LLImageFormatted> image = create_image(src_filename);
// This just loads the image file stream into a buffer. No decoding done.
if (!image->load(src_filename))
{
return NULL;
}
if( (image->getComponents() != 3) && (image->getComponents() != 4) )
{
std::cout << "Image files with less than 3 or more than 4 components are not supported\n";
return NULL;
}
if (output_stats)
{
output_image_stats(image, src_filename);
}
LLPointer<LLImageRaw> raw_image = new LLImageRaw;
// Set the image restriction on load in the case of a j2c image
if ((image->getCodec() == IMG_CODEC_J2C) && ((discard_level != -1) || (region != NULL)))
{
// That method doesn't exist (and likely, doesn't make sense) for any other image file format
// hence the required cryptic cast.
((LLImageJ2C*)(image.get()))->initDecode(*raw_image, discard_level, region);
}
if (!image->decode(raw_image, 0.0f))
{
return NULL;
}
return raw_image;
}
// Save a raw image instance into a file
bool save_image(const std::string &dest_filename, LLPointer<LLImageRaw> raw_image, int blocks_size, int precincts_size, int levels, bool reversible, bool output_stats)
{
LLPointer<LLImageFormatted> image = create_image(dest_filename);
// Set the image codestream parameters on output in the case of a j2c image
if (image->getCodec() == IMG_CODEC_J2C)
{
// That method doesn't exist (and likely, doesn't make sense) for any other image file format
// hence the required cryptic cast.
if ((blocks_size != -1) || (precincts_size != -1) || (levels != 0))
{
((LLImageJ2C*)(image.get()))->initEncode(*raw_image, blocks_size, precincts_size, levels);
}
((LLImageJ2C*)(image.get()))->setReversible(reversible);
}
if (!image->encode(raw_image, 0.0f))
{
return false;
}
if (output_stats)
{
output_image_stats(image, dest_filename);
}
return image->save(dest_filename);
}
void store_input_file(std::list<std::string> &input_filenames, const std::string &path)
{
// Break the incoming path in its components
std::string dir = gDirUtilp->getDirName(path);
std::string name = gDirUtilp->getBaseFileName(path);
std::string exten = gDirUtilp->getExtension(path);
// std::cout << "store_input_file : " << path << ", dir : " << dir << ", name : " << name << ", exten : " << exten << std::endl;
// If extension is not an image type or "*", exit
// Note: we don't support complex patterns for the extension like "j??"
// Note: on most shells, the pattern expansion is done by the shell so that pattern matching limitation is actually not a problem
if ((exten.compare("*") != 0) && (LLImageBase::getCodecFromExtension(exten) == IMG_CODEC_INVALID))
{
return;
}
if ((name.find('*') != -1) || ((name.find('?') != -1)))
{
// If file name is a pattern, iterate to get each file name and store
std::string next_name;
while (gDirUtilp->getNextFileInDir(dir,name,next_name))
{
std::string file_name = dir + gDirUtilp->getDirDelimiter() + next_name;
input_filenames.push_back(file_name);
}
}
else
{
// Verify that the file does exist before storing
if (gDirUtilp->fileExists(path))
{
input_filenames.push_back(path);
}
else
{
std::cout << "store_input_file : the file " << path << " could not be found" << std::endl;
}
}
}
void store_output_file(std::list<std::string> &output_filenames, std::list<std::string> &input_filenames, const std::string &path)
{
// Break the incoming path in its components
std::string dir = gDirUtilp->getDirName(path);
std::string name = gDirUtilp->getBaseFileName(path);
std::string exten = gDirUtilp->getExtension(path);
// std::cout << "store_output_file : " << path << ", dir : " << dir << ", name : " << name << ", exten : " << exten << std::endl;
if (dir.empty() && exten.empty())
{
// If dir and exten are empty, we interpret the name as a file extension type name and will iterate through input list to populate the output list
exten = name;
// Make sure the extension is an image type
if (LLImageBase::getCodecFromExtension(exten) == IMG_CODEC_INVALID)
{
return;
}
std::string delim = gDirUtilp->getDirDelimiter();
std::list<std::string>::iterator in_file = input_filenames.begin();
std::list<std::string>::iterator end = input_filenames.end();
for (; in_file != end; ++in_file)
{
dir = gDirUtilp->getDirName(*in_file);
name = gDirUtilp->getBaseFileName(*in_file,true);
std::string file_name;
if (!dir.empty())
{
file_name = dir + delim + name + "." + exten;
}
else
{
file_name = name + "." + exten;
}
output_filenames.push_back(file_name);
}
}
else
{
// Make sure the extension is an image type
if (LLImageBase::getCodecFromExtension(exten) == IMG_CODEC_INVALID)
{
return;
}
// Store the path
output_filenames.push_back(path);
}
}
// Holds the metric gathering output in a thread safe way
class LogThread : public LLThread
{
public:
std::string mFile;
LogThread(std::string& test_name) : LLThread("llimage_libtest log")
{
std::string file_name = test_name + std::string(".slp");
mFile = file_name;
}
void run()
{
std::ofstream os(mFile.c_str());
while (!sAllDone)
{
LLFastTimer::writeLog(os);
os.flush();
ms_sleep(32);
}
LLFastTimer::writeLog(os);
os.flush();
os.close();
}
};
int main(int argc, char** argv)
{
// List of input and output files
std::list<std::string> input_filenames;
std::list<std::string> output_filenames;
// Other optional parsed arguments
bool analyze_performance = false;
bool image_stats = false;
int* region = NULL;
int discard_level = -1;
int precincts_size = -1;
int blocks_size = -1;
int levels = 0;
bool reversible = false;
// Init whatever is necessary
ll_init_apr();
LLImage::initClass();
LogThread* fast_timer_log_thread = NULL; // For performance and metric gathering
// Analyze command line arguments
for (int arg = 1; arg < argc; ++arg)
{
if (!strcmp(argv[arg], "--help") || !strcmp(argv[arg], "-h"))
{
// Send the usage to standard out
std::cout << USAGE << std::endl;
return 0;
}
else if ((!strcmp(argv[arg], "--input") || !strcmp(argv[arg], "-i")) && arg < argc-1)
{
std::string file_name = argv[arg+1];
while (file_name[0] != '-') // if arg starts with '-', we consider it's not a file name but some other argument
{
// std::cout << "input file name : " << file_name << std::endl;
store_input_file(input_filenames, file_name);
arg += 1; // Skip that arg now we know it's a file name
if ((arg + 1) == argc) // Break out of the loop if we reach the end of the arg list
break;
file_name = argv[arg+1]; // Next argument and loop over
}
}
else if ((!strcmp(argv[arg], "--output") || !strcmp(argv[arg], "-o")) && arg < argc-1)
{
std::string file_name = argv[arg+1];
while (file_name[0] != '-') // if arg starts with '-', we consider it's not a file name but some other argument
{
// std::cout << "output file name : " << file_name << std::endl;
store_output_file(output_filenames, input_filenames, file_name);
arg += 1; // Skip that arg now we know it's a file name
if ((arg + 1) == argc) // Break out of the loop if we reach the end of the arg list
break;
file_name = argv[arg+1]; // Next argument and loop over
}
}
else if ((!strcmp(argv[arg], "--region") || !strcmp(argv[arg], "-r")) && arg < argc-1)
{
std::string value_str = argv[arg+1];
int index = 0;
region = new int[4];
while (value_str[0] != '-') // if arg starts with '-', it's the next option
{
int value = atoi(value_str.c_str());
region[index++] = value;
arg += 1; // Definitely skip that arg now we know it's a number
if ((arg + 1) == argc) // Break out of the loop if we reach the end of the arg list
break;
if (index == 4) // Break out of the loop if we captured 4 values already
break;
value_str = argv[arg+1]; // Next argument and loop over
}
if (index != 4)
{
std::cout << "--region arguments invalid" << std::endl;
delete [] region;
region = NULL;
}
}
else if (!strcmp(argv[arg], "--discard_level") || !strcmp(argv[arg], "-d"))
{
std::string value_str;
if ((arg + 1) < argc)
{
value_str = argv[arg+1];
}
if (((arg + 1) >= argc) || (value_str[0] == '-'))
{
std::cout << "No valid --discard_level argument given, discard_level ignored" << std::endl;
}
else
{
discard_level = atoi(value_str.c_str());
// Clamp to the values accepted by the viewer
discard_level = llclamp(discard_level,0,5);
}
}
else if (!strcmp(argv[arg], "--precincts") || !strcmp(argv[arg], "-p"))
{
std::string value_str;
if ((arg + 1) < argc)
{
value_str = argv[arg+1];
}
if (((arg + 1) >= argc) || (value_str[0] == '-'))
{
std::cout << "No valid --precincts argument given, precincts ignored" << std::endl;
}
else
{
precincts_size = atoi(value_str.c_str());
}
}
else if (!strcmp(argv[arg], "--blocks") || !strcmp(argv[arg], "-b"))
{
std::string value_str;
if ((arg + 1) < argc)
{
value_str = argv[arg+1];
}
if (((arg + 1) >= argc) || (value_str[0] == '-'))
{
std::cout << "No valid --blocks argument given, blocks ignored" << std::endl;
}
else
{
blocks_size = atoi(value_str.c_str());
}
}
else if (!strcmp(argv[arg], "--levels") || !strcmp(argv[arg], "-l"))
{
std::string value_str;
if ((arg + 1) < argc)
{
value_str = argv[arg+1];
}
if (((arg + 1) >= argc) || (value_str[0] == '-'))
{
std::cout << "No valid --levels argument given, default (5) will be used" << std::endl;
}
else
{
levels = atoi(value_str.c_str());
}
}
else if (!strcmp(argv[arg], "--reversible") || !strcmp(argv[arg], "-rev"))
{
reversible = true;
}
else if (!strcmp(argv[arg], "--logmetrics") || !strcmp(argv[arg], "-log"))
{
// '--logmetrics' needs to be specified with a named test metric argument
// Note: for the moment, only ImageCompressionTester has been tested
std::string test_name;
if ((arg + 1) < argc)
{
test_name = argv[arg+1];
}
if (((arg + 1) >= argc) || (test_name[0] == '-'))
{
// We don't have an argument left in the arg list or the next argument is another option
std::cout << "No --logmetrics argument given, no perf data will be gathered" << std::endl;
}
else
{
LLFastTimer::sMetricLog = TRUE;
LLFastTimer::sLogName = test_name;
arg += 1; // Skip that arg now we know it's a valid test name
if ((arg + 1) == argc) // Break out of the loop if we reach the end of the arg list
break;
}
}
else if (!strcmp(argv[arg], "--analyzeperformance") || !strcmp(argv[arg], "-a"))
{
analyze_performance = true;
}
else if (!strcmp(argv[arg], "--image-stats") || !strcmp(argv[arg], "-s"))
{
image_stats = true;
}
}
// Check arguments consistency. Exit with proper message if inconsistent.
if (input_filenames.size() == 0)
{
std::cout << "No input file, nothing to do -> exit" << std::endl;
return 0;
}
if (analyze_performance && !LLFastTimer::sMetricLog)
{
std::cout << "Cannot create perf report if no perf gathered (i.e. use argument -log <perf> with -a) -> exit" << std::endl;
return 0;
}
// Create the logging thread if required
if (LLFastTimer::sMetricLog)
{
LLFastTimer::sLogLock = new LLMutex(NULL);
fast_timer_log_thread = new LogThread(LLFastTimer::sLogName);
fast_timer_log_thread->start();
}
// Perform action on each input file
std::list<std::string>::iterator in_file = input_filenames.begin();
std::list<std::string>::iterator out_file = output_filenames.begin();
std::list<std::string>::iterator in_end = input_filenames.end();
std::list<std::string>::iterator out_end = output_filenames.end();
for (; in_file != in_end; ++in_file, ++out_file)
{
// Load file
LLPointer<LLImageRaw> raw_image = load_image(*in_file, discard_level, region, image_stats);
if (!raw_image)
{
std::cout << "Error: Image " << *in_file << " could not be loaded" << std::endl;
continue;
}
// Save file
if (out_file != out_end)
{
if (!save_image(*out_file, raw_image, blocks_size, precincts_size, levels, reversible, image_stats))
{
std::cout << "Error: Image " << *out_file << " could not be saved" << std::endl;
}
else
{
std::cout << *in_file << " -> " << *out_file << std::endl;
}
}
}
// Output perf data if requested by user
if (analyze_performance)
{
std::string baseline_name = LLFastTimer::sLogName + "_baseline.slp";
std::string current_name = LLFastTimer::sLogName + ".slp";
std::string report_name = LLFastTimer::sLogName + "_report.csv";
std::cout << "Analyzing performance, check report in : " << report_name << std::endl;
LLMetricPerformanceTesterBasic::doAnalysisMetrics(baseline_name, current_name, report_name);
}
// Stop the perf gathering system if needed
if (LLFastTimer::sMetricLog)
{
LLMetricPerformanceTesterBasic::deleteTester(LLFastTimer::sLogName);
sAllDone = true;
}
// Cleanup and exit
LLImage::cleanupClass();
if (fast_timer_log_thread)
{
fast_timer_log_thread->shutdown();
}
return 0;
}

View File

@ -1,10 +1,9 @@
/**
* @file llversionviewer.h
* @brief
* @file llimage_libtest.h
*
* $LicenseInfo:firstyear=2002&license=viewerlgpl$
* $LicenseInfo:firstyear=2011&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2010, Linden Research, Inc.
* Copyright (C) 2011, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@ -23,13 +22,8 @@
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
#include "linden_common.h"
#include "llversionviewer.h"
#ifndef LLIMAGE_LIBTEST_H
#define LLIMAGE_LIBTEST_H
const S32 LL_VERSION_MAJOR = @VERSION_VIEWER_MAJOR@;
const S32 LL_VERSION_MINOR = @VERSION_VIEWER_MINOR@;
const S32 LL_VERSION_PATCH = @VERSION_VIEWER_PATCH@;
const S32 LL_VERSION_BUILD = @VERSION_BUILD@;
const char * const LL_CHANNEL = "Firestorm-private";
#endif

View File

@ -91,14 +91,14 @@ if (WINDOWS)
# Copy over OpenJPEG.dll
# *NOTE: On Windows with VS2005, only the first comment prints
set(OPENJPEG_RELEASE
"${CMAKE_SOURCE_DIR}/../libraries/i686-win32/lib/release/openjpeg.dll")
"${ARCH_PREBUILT_DIRS_RELEASE}/openjpeg.dll")
add_custom_command( TARGET llui_libtest POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different
${OPENJPEG_RELEASE} ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Copying OpenJPEG DLLs to binary directory"
)
set(OPENJPEG_DEBUG
"${CMAKE_SOURCE_DIR}/../libraries/i686-win32/lib/debug/openjpegd.dll")
"${ARCH_PREBUILT_DIRS_DEBUG}/openjpegd.dll")
add_custom_command( TARGET llui_libtest POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy_if_different
${OPENJPEG_DEBUG} ${CMAKE_CURRENT_BINARY_DIR}

View File

@ -33,6 +33,7 @@
// linden library includes
#include "llcontrol.h" // LLControlGroup
#include "lldir.h"
#include "lldiriterator.h"
#include "llerrorcontrol.h"
#include "llfloater.h"
#include "llfontfreetype.h"
@ -185,7 +186,9 @@ void export_test_floaters()
std::string delim = gDirUtilp->getDirDelimiter();
std::string xui_dir = get_xui_dir() + "en" + delim;
std::string filename;
while (gDirUtilp->getNextFileInDir(xui_dir, "floater_test_*.xml", filename))
LLDirIterator iter(xui_dir, "floater_test_*.xml");
while (iter.next(filename))
{
if (filename.find("_new.xml") != std::string::npos)
{

2
indra/lib/python/indra/util/llmanifest.py Normal file → Executable file
View File

@ -78,7 +78,7 @@ def get_default_platform(dummy):
def get_default_version(srctree):
# look up llversion.h and parse out the version info
paths = [os.path.join(srctree, x, 'llversionviewer.cpp') for x in ['llcommon', '../llcommon', '../../indra/llcommon.h']]
paths = [os.path.join(srctree, x, 'llversionviewer.h') for x in ['llcommon', '../llcommon', '../../indra/llcommon.h']]
for p in paths:
if os.path.exists(p):
contents = open(p, 'r').read()

View File

@ -33,6 +33,7 @@
#include "llerrorcontrol.h"
#include "llfile.h"
#include "lldir.h"
#include "lldiriterator.h"
#include "llxmlnode.h"
#include "lltrans.h"
@ -55,6 +56,8 @@ typedef struct _updater_app_state {
std::string strings_dirs;
std::string strings_file;
LLDirIterator *image_dir_iter;
GtkWidget *window;
GtkWidget *progress_bar;
GtkWidget *image;
@ -88,9 +91,16 @@ bool translate_init(std::string comma_delim_path_list,
std::vector<std::string> paths;
LLStringUtil::getTokens(comma_delim_path_list, paths, ","); // split over ','
for(std::vector<std::string>::iterator it = paths.begin(), end_it = paths.end();
it != end_it;
++it)
{
(*it) = gDirUtilp->findSkinnedFilename(*it, base_xml_name);
}
// suck the translation xml files into memory
LLXMLNodePtr root;
bool success = LLXMLNode::getLayeredXMLNode(base_xml_name, root, paths);
bool success = LLXMLNode::getLayeredXMLNode(root, paths);
if (!success)
{
// couldn't load string table XML
@ -108,7 +118,7 @@ bool translate_init(std::string comma_delim_path_list,
void updater_app_ui_init(void);
void updater_app_quit(UpdaterAppState *app_state);
void parse_args_and_init(int argc, char **argv, UpdaterAppState *app_state);
std::string next_image_filename(std::string& image_path);
std::string next_image_filename(std::string& image_path, LLDirIterator& iter);
void display_error(GtkWidget *parent, std::string title, std::string message);
BOOL install_package(std::string package_file, std::string destination);
BOOL spawn_viewer(UpdaterAppState *app_state);
@ -174,7 +184,7 @@ void updater_app_ui_init(UpdaterAppState *app_state)
// load the first image
app_state->image = gtk_image_new_from_file
(next_image_filename(app_state->image_dir).c_str());
(next_image_filename(app_state->image_dir, *app_state->image_dir_iter).c_str());
gtk_widget_set_size_request(app_state->image, 340, 310);
gtk_container_add(GTK_CONTAINER(frame), app_state->image);
@ -205,7 +215,7 @@ gboolean rotate_image_cb(gpointer data)
llassert(data != NULL);
app_state = (UpdaterAppState *) data;
filename = next_image_filename(app_state->image_dir);
filename = next_image_filename(app_state->image_dir, *app_state->image_dir_iter);
gdk_threads_enter();
gtk_image_set_from_file(GTK_IMAGE(app_state->image), filename.c_str());
@ -214,10 +224,10 @@ gboolean rotate_image_cb(gpointer data)
return TRUE;
}
std::string next_image_filename(std::string& image_path)
std::string next_image_filename(std::string& image_path, LLDirIterator& iter)
{
std::string image_filename;
gDirUtilp->getNextFileInDir(image_path, "/*.jpg", image_filename);
iter.next(image_filename);
return image_path + "/" + image_filename;
}
@ -741,6 +751,7 @@ void parse_args_and_init(int argc, char **argv, UpdaterAppState *app_state)
else if ((!strcmp(argv[i], "--image-dir")) && (++i < argc))
{
app_state->image_dir = argv[i];
app_state->image_dir_iter = new LLDirIterator(argv[i], "/*.jpg");
}
else if ((!strcmp(argv[i], "--dest")) && (++i < argc))
{
@ -825,6 +836,7 @@ int main(int argc, char **argv)
}
bool success = !app_state->failure;
delete app_state->image_dir_iter;
delete app_state;
return success ? 0 : 1;
}

View File

@ -14,7 +14,6 @@ include(LLVFS)
include_directories(
${LLAUDIO_INCLUDE_DIRS}
${FMOD_INCLUDE_DIR}
${LLCOMMON_INCLUDE_DIRS}
${LLMATH_INCLUDE_DIRS}
${LLMESSAGE_INCLUDE_DIRS}
@ -25,6 +24,7 @@ include_directories(
${VORBIS_INCLUDE_DIRS}
${OPENAL_LIB_INCLUDE_DIRS}
${FREEAULT_LIB_INCLUDE_DIRS}
${FMOD_INCLUDE_DIR}
)
set(llaudio_SOURCE_FILES
@ -45,6 +45,10 @@ set(llaudio_HEADER_FILES
)
if (FMOD)
include_directories(
${FMOD_INCLUDE_DIR}
)
list(APPEND llaudio_SOURCE_FILES
llaudioengine_fmod.cpp
lllistener_fmod.cpp

View File

@ -680,4 +680,10 @@ BOOL LLAudioDecodeMgr::addDecodeRequest(const LLUUID &uuid)
return FALSE;
}
#if LL_DARWIN || LL_LINUX
// HACK: to fool the compiler into not emitting unused warnings.
namespace {
const ov_callbacks callback_array[4] = {OV_CALLBACKS_DEFAULT, OV_CALLBACKS_NOCLOSE, OV_CALLBACKS_STREAMONLY,
OV_CALLBACKS_STREAMONLY_NOCLOSE};
}
#endif

View File

@ -32,6 +32,8 @@
#include "lllistener_openal.h"
const float LLAudioEngine_OpenAL::WIND_BUFFER_SIZE_SEC = 0.05f;
LLAudioEngine_OpenAL::LLAudioEngine_OpenAL()
:
mWindGen(NULL),

View File

@ -67,7 +67,7 @@ class LLAudioEngine_OpenAL : public LLAudioEngine
int mNumEmptyWindALBuffers;
static const int MAX_NUM_WIND_BUFFERS = 80;
static const float WIND_BUFFER_SIZE_SEC = 0.05f; // 1/20th sec
static const float WIND_BUFFER_SIZE_SEC; // 1/20th sec
};
class LLAudioChannelOpenAL : public LLAudioChannel

View File

@ -42,10 +42,10 @@ using namespace std;
#define INCHES_TO_METERS 0.02540005f
const F32 POSITION_KEYFRAME_THRESHOLD = 0.03f;
const F32 POSITION_KEYFRAME_THRESHOLD_SQUARED = 0.03f * 0.03f;
const F32 ROTATION_KEYFRAME_THRESHOLD = 0.01f;
const F32 POSITION_MOTION_THRESHOLD = 0.001f;
const F32 POSITION_MOTION_THRESHOLD_SQUARED = 0.001f * 0.001f;
const F32 ROTATION_MOTION_THRESHOLD = 0.001f;
char gInFile[1024]; /* Flawfinder: ignore */
@ -1196,7 +1196,7 @@ void LLBVHLoader::optimize()
if (ki_prev == ki_last_good_pos)
{
joint->mNumPosKeys++;
if (dist_vec(LLVector3(ki_prev->mPos), first_frame_pos) > POSITION_MOTION_THRESHOLD)
if (dist_vec_squared(LLVector3(ki_prev->mPos), first_frame_pos) > POSITION_MOTION_THRESHOLD_SQUARED)
{
pos_changed = TRUE;
}
@ -1209,12 +1209,12 @@ void LLBVHLoader::optimize()
LLVector3 current_pos(ki->mPos);
LLVector3 interp_pos = lerp(current_pos, last_good_pos, 1.f / (F32)numPosFramesConsidered);
if (dist_vec(current_pos, first_frame_pos) > POSITION_MOTION_THRESHOLD)
if (dist_vec_squared(current_pos, first_frame_pos) > POSITION_MOTION_THRESHOLD_SQUARED)
{
pos_changed = TRUE;
}
if (dist_vec(interp_pos, test_pos) < POSITION_KEYFRAME_THRESHOLD)
if (dist_vec_squared(interp_pos, test_pos) < POSITION_KEYFRAME_THRESHOLD_SQUARED)
{
ki_prev->mIgnorePos = TRUE;
numPosFramesConsidered++;

View File

@ -3,22 +3,20 @@
project(llcommon)
include(00-Common)
include(LLCommon)
include(Linking)
include(Boost)
include(Pth)
include(LLSharedLibs)
include(GoogleBreakpad)
include(GooglePerfTools)
include(Copy3rdPartyLibs)
include(ZLIB)
include_directories(
${EXPAT_INCLUDE_DIRS}
${LLCOMMON_INCLUDE_DIRS}
${ZLIB_INCLUDE_DIRS}
${PTH_INCLUDE_DIRS}
)
# add_executable(lltreeiterators lltreeiterators.cpp)
@ -63,6 +61,7 @@ set(llcommon_SOURCE_FILES
llformat.cpp
llframetimer.cpp
llheartbeat.cpp
llinstancetracker.cpp
llliveappconfig.cpp
lllivefile.cpp
lllog.cpp
@ -258,25 +257,6 @@ set_source_files_properties(${llcommon_HEADER_FILES}
list(APPEND llcommon_SOURCE_FILES ${llcommon_HEADER_FILES})
if(WINDOWS)
# Add resource files to the project.
set(llcommon_RESOURCE_FILES
res/llcommon.rc
)
SOURCE_GROUP("Resource Files" FILES ${llcommon_RESOURCE_FILES})
list(APPEND llcommon_SOURCE_FILES ${llcommon_RESOURCE_FILES})
endif(WINDOWS)
if (WINDOWS AND NOT FIRECYG)
configure_files(llcommon_GENERATED_SOURCE_FILES
llversionviewer.cpp
)
endif(WINDOWS AND NOT FIRECYG)
list(APPEND llcommon_SOURCE_FILES ${llcommon_GENERATED_SOURCE_FILES} llversionviewer.cpp)
if(LLCOMMON_LINK_SHARED)
add_library (llcommon SHARED ${llcommon_SOURCE_FILES})
if(NOT WORD_SIZE EQUAL 32)
@ -286,6 +266,10 @@ if(LLCOMMON_LINK_SHARED)
add_definitions(-fPIC)
endif(WINDOWS)
endif(NOT WORD_SIZE EQUAL 32)
if(WINDOWS)
# always generate llcommon.pdb, even for "Release" builds
set_target_properties(llcommon PROPERTIES LINK_FLAGS "/DEBUG")
endif(WINDOWS)
ll_stage_sharedlib(llcommon)
else(LLCOMMON_LINK_SHARED)
add_library (llcommon ${llcommon_SOURCE_FILES})
@ -301,10 +285,15 @@ target_link_libraries(
${WINDOWS_LIBRARIES}
${BOOST_PROGRAM_OPTIONS_LIBRARY}
${BOOST_REGEX_LIBRARY}
${PTH_LIBRARIES}
${GOOGLE_PERFTOOLS_LIBRARIES}
)
if (DARWIN)
include(CMakeFindFrameworks)
find_library(CARBON_LIBRARY Carbon)
target_link_libraries(llcommon ${CARBON_LIBRARY})
endif (DARWIN)
add_dependencies(llcommon stage_third_party_libs)
if (LL_TESTS)
@ -333,6 +322,7 @@ if (LL_TESTS)
LL_ADD_INTEGRATION_TEST(lluri "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(reflection "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(stringize "" "${test_libs}")
LL_ADD_INTEGRATION_TEST(lleventdispatcher "" "${test_libs}")
# *TODO - reenable these once tcmalloc libs no longer break the build.
#ADD_BUILD_TEST(llallocator llcommon)

View File

@ -312,6 +312,14 @@ const F32 CHAT_SHOUT_RADIUS = 100.f;
const F32 CHAT_MAX_RADIUS = CHAT_SHOUT_RADIUS;
const F32 CHAT_MAX_RADIUS_BY_TWO = CHAT_MAX_RADIUS / 2.f;
// squared editions of the above for distance checks
const F32 CHAT_WHISPER_RADIUS_SQUARED = CHAT_WHISPER_RADIUS * CHAT_WHISPER_RADIUS;
const F32 CHAT_NORMAL_RADIUS_SQUARED = CHAT_NORMAL_RADIUS * CHAT_NORMAL_RADIUS;
const F32 CHAT_SHOUT_RADIUS_SQUARED = CHAT_SHOUT_RADIUS * CHAT_SHOUT_RADIUS;
const F32 CHAT_MAX_RADIUS_SQUARED = CHAT_SHOUT_RADIUS_SQUARED;
const F32 CHAT_MAX_RADIUS_BY_TWO_SQUARED = CHAT_MAX_RADIUS_BY_TWO * CHAT_MAX_RADIUS_BY_TWO;
// this times above gives barely audible radius
const F32 CHAT_BARELY_AUDIBLE_FACTOR = 2.0f;

View File

@ -24,6 +24,10 @@
* $/LicenseInfo$
*/
#include "linden_common.h"
#include "llapp.h"
#include <cstdlib>
#ifdef LL_DARWIN
@ -32,9 +36,6 @@
#include <sys/sysctl.h>
#endif
#include "linden_common.h"
#include "llapp.h"
#include "llcommon.h"
#include "llapr.h"
#include "llerrorcontrol.h"

View File

@ -41,6 +41,354 @@
#include "llevents.h"
#include "llerror.h"
#include "llsdutil.h"
#include "stringize.h"
#include <memory> // std::auto_ptr
/*****************************************************************************
* LLSDArgsSource
*****************************************************************************/
/**
* Store an LLSD array, producing its elements one at a time. Die with LL_ERRS
* if the consumer requests more elements than the array contains.
*/
class LL_COMMON_API LLSDArgsSource
{
public:
LLSDArgsSource(const std::string function, const LLSD& args);
~LLSDArgsSource();
LLSD next();
void done() const;
private:
std::string _function;
LLSD _args;
LLSD::Integer _index;
};
LLSDArgsSource::LLSDArgsSource(const std::string function, const LLSD& args):
_function(function),
_args(args),
_index(0)
{
if (! (_args.isUndefined() || _args.isArray()))
{
LL_ERRS("LLSDArgsSource") << _function << " needs an args array instead of "
<< _args << LL_ENDL;
}
}
LLSDArgsSource::~LLSDArgsSource()
{
done();
}
LLSD LLSDArgsSource::next()
{
if (_index >= _args.size())
{
LL_ERRS("LLSDArgsSource") << _function << " requires more arguments than the "
<< _args.size() << " provided: " << _args << LL_ENDL;
}
return _args[_index++];
}
void LLSDArgsSource::done() const
{
if (_index < _args.size())
{
LL_WARNS("LLSDArgsSource") << _function << " only consumed " << _index
<< " of the " << _args.size() << " arguments provided: "
<< _args << LL_ENDL;
}
}
/*****************************************************************************
* LLSDArgsMapper
*****************************************************************************/
/**
* From a formal parameters description and a map of arguments, construct an
* arguments array.
*
* That is, given:
* - an LLSD array of length n containing parameter-name strings,
* corresponding to the arguments of a function of interest
* - an LLSD collection specifying default parameter values, either:
* - an LLSD array of length m <= n, matching the rightmost m params, or
* - an LLSD map explicitly stating default name=value pairs
* - an LLSD map of parameter names and actual values for a particular
* function call
* construct an LLSD array of actual argument values for this function call.
*
* The parameter-names array and the defaults collection describe the function
* being called. The map might vary with every call, providing argument values
* for the described parameters.
*
* The array of parameter names must match the number of parameters expected
* by the function of interest.
*
* If you pass a map of default parameter values, it provides default values
* as you might expect. It is an error to specify a default value for a name
* not listed in the parameters array.
*
* If you pass an array of default parameter values, it is mapped to the
* rightmost m of the n parameter names. It is an error if the default-values
* array is longer than the parameter-names array. Consider the following
* parameter names: ["a", "b", "c", "d"].
*
* - An empty array of default values (or an isUndefined() value) asserts that
* every one of the above parameter names is required.
* - An array of four default values [1, 2, 3, 4] asserts that every one of
* the above parameters is optional. If the current parameter map is empty,
* they will be passed to the function as [1, 2, 3, 4].
* - An array of two default values [11, 12] asserts that parameters "a" and
* "b" are required, while "c" and "d" are optional, having default values
* "c"=11 and "d"=12.
*
* The arguments array is constructed as follows:
*
* - Arguments-map keys not found in the parameter-names array are ignored.
* - Entries from the map provide values for an improper subset of the
* parameters named in the parameter-names array. This results in a
* tentative values array with "holes." (size of map) + (number of holes) =
* (size of names array)
* - Holes are filled with the default values.
* - Any remaining holes constitute an error.
*/
class LL_COMMON_API LLSDArgsMapper
{
public:
/// Accept description of function: function name, param names, param
/// default values
LLSDArgsMapper(const std::string& function, const LLSD& names, const LLSD& defaults);
/// Given arguments map, return LLSD::Array of parameter values, or LL_ERRS.
LLSD map(const LLSD& argsmap) const;
private:
static std::string formatlist(const LLSD&);
// The function-name string is purely descriptive. We want error messages
// to be able to indicate which function's LLSDArgsMapper has the problem.
std::string _function;
// Store the names array pretty much as given.
LLSD _names;
// Though we're handed an array of name strings, it's more useful to us to
// store it as a map from name string to position index. Of course that's
// easy to generate from the incoming names array, but why do it more than
// once?
typedef std::map<LLSD::String, LLSD::Integer> IndexMap;
IndexMap _indexes;
// Generated array of default values, aligned with the array of param names.
LLSD _defaults;
// Indicate whether we have a default value for each param.
typedef std::vector<char> FilledVector;
FilledVector _has_dft;
};
LLSDArgsMapper::LLSDArgsMapper(const std::string& function,
const LLSD& names, const LLSD& defaults):
_function(function),
_names(names),
_has_dft(names.size())
{
if (! (_names.isUndefined() || _names.isArray()))
{
LL_ERRS("LLSDArgsMapper") << function << " names must be an array, not " << names << LL_ENDL;
}
LLSD::Integer nparams(_names.size());
// From _names generate _indexes.
for (LLSD::Integer ni = 0, nend = _names.size(); ni < nend; ++ni)
{
_indexes[_names[ni]] = ni;
}
// Presize _defaults() array so we don't have to resize it more than once.
// All entries are initialized to LLSD(); but since _has_dft is still all
// 0, they're all "holes" for now.
if (nparams)
{
_defaults[nparams - 1] = LLSD();
}
if (defaults.isUndefined() || defaults.isArray())
{
LLSD::Integer ndefaults = defaults.size();
// defaults is a (possibly empty) array. Right-align it with names.
if (ndefaults > nparams)
{
LL_ERRS("LLSDArgsMapper") << function << " names array " << names
<< " shorter than defaults array " << defaults << LL_ENDL;
}
// Offset by which we slide defaults array right to right-align with
// _names array
LLSD::Integer offset = nparams - ndefaults;
// Fill rightmost _defaults entries from defaults, and mark them as
// filled
for (LLSD::Integer i = 0, iend = ndefaults; i < iend; ++i)
{
_defaults[i + offset] = defaults[i];
_has_dft[i + offset] = 1;
}
}
else if (defaults.isMap())
{
// defaults is a map. Use it to populate the _defaults array.
LLSD bogus;
for (LLSD::map_const_iterator mi(defaults.beginMap()), mend(defaults.endMap());
mi != mend; ++mi)
{
IndexMap::const_iterator ixit(_indexes.find(mi->first));
if (ixit == _indexes.end())
{
bogus.append(mi->first);
continue;
}
LLSD::Integer pos = ixit->second;
// Store default value at that position in the _defaults array.
_defaults[pos] = mi->second;
// Don't forget to record the fact that we've filled this
// position.
_has_dft[pos] = 1;
}
if (bogus.size())
{
LL_ERRS("LLSDArgsMapper") << function << " defaults specified for nonexistent params "
<< formatlist(bogus) << LL_ENDL;
}
}
else
{
LL_ERRS("LLSDArgsMapper") << function << " defaults must be a map or an array, not "
<< defaults << LL_ENDL;
}
}
LLSD LLSDArgsMapper::map(const LLSD& argsmap) const
{
if (! (argsmap.isUndefined() || argsmap.isMap() || argsmap.isArray()))
{
LL_ERRS("LLSDArgsMapper") << _function << " map() needs a map or array, not "
<< argsmap << LL_ENDL;
}
// Initialize the args array. Indexing a non-const LLSD array grows it
// to appropriate size, but we don't want to resize this one on each
// new operation. Just make it as big as we need before we start
// stuffing values into it.
LLSD args(LLSD::emptyArray());
if (_defaults.size() == 0)
{
// If this function requires no arguments, fast exit. (Don't try to
// assign to args[-1].)
return args;
}
args[_defaults.size() - 1] = LLSD();
// Get a vector of chars to indicate holes. It's tempting to just scan
// for LLSD::isUndefined() values after filling the args array from
// the map, but it's plausible for caller to explicitly pass
// isUndefined() as the value of some parameter name. That's legal
// since isUndefined() has well-defined conversions (default value)
// for LLSD data types. So use a whole separate array for detecting
// holes. (Avoid std::vector<bool> which is known to be odd -- can we
// iterate?)
FilledVector filled(args.size());
if (argsmap.isArray())
{
// Fill args from array. If there are too many args in passed array,
// ignore the rest.
LLSD::Integer size(argsmap.size());
if (size > args.size())
{
// We don't just use std::min() because we want to sneak in this
// warning if caller passes too many args.
LL_WARNS("LLSDArgsMapper") << _function << " needs " << args.size()
<< " params, ignoring last " << (size - args.size())
<< " of passed " << size << ": " << argsmap << LL_ENDL;
size = args.size();
}
for (LLSD::Integer i(0); i < size; ++i)
{
// Copy the actual argument from argsmap
args[i] = argsmap[i];
// Note that it's been filled
filled[i] = 1;
}
}
else
{
// argsmap is in fact a map. Walk the map.
for (LLSD::map_const_iterator mi(argsmap.beginMap()), mend(argsmap.endMap());
mi != mend; ++mi)
{
// mi->first is a parameter-name string, with mi->second its
// value. Look up the name's position index in _indexes.
IndexMap::const_iterator ixit(_indexes.find(mi->first));
if (ixit == _indexes.end())
{
// Allow for a map containing more params than were passed in
// our names array. Caller typically receives a map containing
// the function name, cruft such as reqid, etc. Ignore keys
// not defined in _indexes.
LL_DEBUGS("LLSDArgsMapper") << _function << " ignoring "
<< mi->first << "=" << mi->second << LL_ENDL;
continue;
}
LLSD::Integer pos = ixit->second;
// Store the value at that position in the args array.
args[pos] = mi->second;
// Don't forget to record the fact that we've filled this
// position.
filled[pos] = 1;
}
}
// Fill any remaining holes from _defaults.
LLSD unfilled(LLSD::emptyArray());
for (LLSD::Integer i = 0, iend = args.size(); i < iend; ++i)
{
if (! filled[i])
{
// If there's no default value for this parameter, that's an
// error.
if (! _has_dft[i])
{
unfilled.append(_names[i]);
}
else
{
args[i] = _defaults[i];
}
}
}
// If any required args -- args without defaults -- were left unfilled
// by argsmap, that's a problem.
if (unfilled.size())
{
LL_ERRS("LLSDArgsMapper") << _function << " missing required arguments "
<< formatlist(unfilled) << " from " << argsmap << LL_ENDL;
}
// done
return args;
}
std::string LLSDArgsMapper::formatlist(const LLSD& list)
{
std::ostringstream out;
const char* delim = "";
for (LLSD::array_const_iterator li(list.beginArray()), lend(list.endArray());
li != lend; ++li)
{
out << delim << li->asString();
delim = ", ";
}
return out.str();
}
LLEventDispatcher::LLEventDispatcher(const std::string& desc, const std::string& key):
mDesc(desc),
@ -52,12 +400,181 @@ LLEventDispatcher::~LLEventDispatcher()
{
}
/**
* DispatchEntry subclass used for callables accepting(const LLSD&)
*/
struct LLEventDispatcher::LLSDDispatchEntry: public LLEventDispatcher::DispatchEntry
{
LLSDDispatchEntry(const std::string& desc, const Callable& func, const LLSD& required):
DispatchEntry(desc),
mFunc(func),
mRequired(required)
{}
Callable mFunc;
LLSD mRequired;
virtual void call(const std::string& desc, const LLSD& event) const
{
// Validate the syntax of the event itself.
std::string mismatch(llsd_matches(mRequired, event));
if (! mismatch.empty())
{
LL_ERRS("LLEventDispatcher") << desc << ": bad request: " << mismatch << LL_ENDL;
}
// Event syntax looks good, go for it!
mFunc(event);
}
virtual LLSD addMetadata(LLSD meta) const
{
meta["required"] = mRequired;
return meta;
}
};
/**
* DispatchEntry subclass for passing LLSD to functions accepting
* arbitrary argument types (convertible via LLSDParam)
*/
struct LLEventDispatcher::ParamsDispatchEntry: public LLEventDispatcher::DispatchEntry
{
ParamsDispatchEntry(const std::string& desc, const invoker_function& func):
DispatchEntry(desc),
mInvoker(func)
{}
invoker_function mInvoker;
virtual void call(const std::string& desc, const LLSD& event) const
{
LLSDArgsSource src(desc, event);
mInvoker(boost::bind(&LLSDArgsSource::next, boost::ref(src)));
}
};
/**
* DispatchEntry subclass for dispatching LLSD::Array to functions accepting
* arbitrary argument types (convertible via LLSDParam)
*/
struct LLEventDispatcher::ArrayParamsDispatchEntry: public LLEventDispatcher::ParamsDispatchEntry
{
ArrayParamsDispatchEntry(const std::string& desc, const invoker_function& func,
LLSD::Integer arity):
ParamsDispatchEntry(desc, func),
mArity(arity)
{}
LLSD::Integer mArity;
virtual LLSD addMetadata(LLSD meta) const
{
LLSD array(LLSD::emptyArray());
// Resize to number of arguments required
if (mArity)
array[mArity - 1] = LLSD();
llassert_always(array.size() == mArity);
meta["required"] = array;
return meta;
}
};
/**
* DispatchEntry subclass for dispatching LLSD::Map to functions accepting
* arbitrary argument types (convertible via LLSDParam)
*/
struct LLEventDispatcher::MapParamsDispatchEntry: public LLEventDispatcher::ParamsDispatchEntry
{
MapParamsDispatchEntry(const std::string& name, const std::string& desc,
const invoker_function& func,
const LLSD& params, const LLSD& defaults):
ParamsDispatchEntry(desc, func),
mMapper(name, params, defaults),
mRequired(LLSD::emptyMap())
{
// Build the set of all param keys, then delete the ones that are
// optional. What's left are the ones that are required.
for (LLSD::array_const_iterator pi(params.beginArray()), pend(params.endArray());
pi != pend; ++pi)
{
mRequired[pi->asString()] = LLSD();
}
if (defaults.isArray() || defaults.isUndefined())
{
// Right-align the params and defaults arrays.
LLSD::Integer offset = params.size() - defaults.size();
// Now the name of every defaults[i] is at params[i + offset].
for (LLSD::Integer i(0), iend(defaults.size()); i < iend; ++i)
{
// Erase this optional param from mRequired.
mRequired.erase(params[i + offset].asString());
// Instead, make an entry in mOptional with the default
// param's name and value.
mOptional[params[i + offset].asString()] = defaults[i];
}
}
else if (defaults.isMap())
{
// if defaults is already a map, then it's already in the form we
// intend to deliver in metadata
mOptional = defaults;
// Just delete from mRequired every key appearing in mOptional.
for (LLSD::map_const_iterator mi(mOptional.beginMap()), mend(mOptional.endMap());
mi != mend; ++mi)
{
mRequired.erase(mi->first);
}
}
}
LLSDArgsMapper mMapper;
LLSD mRequired;
LLSD mOptional;
virtual void call(const std::string& desc, const LLSD& event) const
{
// Just convert from LLSD::Map to LLSD::Array using mMapper, then pass
// to base-class call() method.
ParamsDispatchEntry::call(desc, mMapper.map(event));
}
virtual LLSD addMetadata(LLSD meta) const
{
meta["required"] = mRequired;
meta["optional"] = mOptional;
return meta;
}
};
void LLEventDispatcher::addArrayParamsDispatchEntry(const std::string& name,
const std::string& desc,
const invoker_function& invoker,
LLSD::Integer arity)
{
mDispatch.insert(
DispatchMap::value_type(name, DispatchMap::mapped_type(
new ArrayParamsDispatchEntry(desc, invoker, arity))));
}
void LLEventDispatcher::addMapParamsDispatchEntry(const std::string& name,
const std::string& desc,
const invoker_function& invoker,
const LLSD& params,
const LLSD& defaults)
{
mDispatch.insert(
DispatchMap::value_type(name, DispatchMap::mapped_type(
new MapParamsDispatchEntry(name, desc, invoker, params, defaults))));
}
/// Register a callable by name
void LLEventDispatcher::add(const std::string& name, const std::string& desc,
const Callable& callable, const LLSD& required)
{
mDispatch.insert(DispatchMap::value_type(name,
DispatchMap::mapped_type(callable, desc, required)));
mDispatch.insert(
DispatchMap::value_type(name, DispatchMap::mapped_type(
new LLSDDispatchEntry(desc, callable, required))));
}
void LLEventDispatcher::addFail(const std::string& name, const std::string& classname) const
@ -83,7 +600,7 @@ bool LLEventDispatcher::remove(const std::string& name)
/// such callable exists, die with LL_ERRS.
void LLEventDispatcher::operator()(const std::string& name, const LLSD& event) const
{
if (! attemptCall(name, event))
if (! try_call(name, event))
{
LL_ERRS("LLEventDispatcher") << "LLEventDispatcher(" << mDesc << "): '" << name
<< "' not found" << LL_ENDL;
@ -98,44 +615,29 @@ void LLEventDispatcher::operator()(const LLSD& event) const
// This could/should be implemented in terms of the two-arg overload.
// However -- we can produce a more informative error message.
std::string name(event[mKey]);
if (! attemptCall(name, event))
if (! try_call(name, event))
{
LL_ERRS("LLEventDispatcher") << "LLEventDispatcher(" << mDesc << "): bad " << mKey
<< " value '" << name << "'" << LL_ENDL;
}
}
bool LLEventDispatcher::attemptCall(const std::string& name, const LLSD& event) const
bool LLEventDispatcher::try_call(const LLSD& event) const
{
DispatchMap::const_iterator found = mDispatch.find(name);
if (found == mDispatch.end())
{
// The reason we only return false, leaving it up to our caller to die
// with LL_ERRS, is that different callers have different amounts of
// available information.
return false;
}
// Found the name, so it's plausible to even attempt the call. But first,
// validate the syntax of the event itself.
std::string mismatch(llsd_matches(found->second.mRequired, event));
if (! mismatch.empty())
{
LL_ERRS("LLEventDispatcher") << "LLEventDispatcher(" << mDesc << ") calling '" << name
<< "': bad request: " << mismatch << LL_ENDL;
}
// Event syntax looks good, go for it!
(found->second.mFunc)(event);
return true; // tell caller we were able to call
return try_call(event[mKey], event);
}
LLEventDispatcher::Callable LLEventDispatcher::get(const std::string& name) const
bool LLEventDispatcher::try_call(const std::string& name, const LLSD& event) const
{
DispatchMap::const_iterator found = mDispatch.find(name);
if (found == mDispatch.end())
{
return Callable();
return false;
}
return found->second.mFunc;
// Found the name, so it's plausible to even attempt the call.
found->second->call(STRINGIZE("LLEventDispatcher(" << mDesc << ") calling '" << name << "'"),
event);
return true; // tell caller we were able to call
}
LLSD LLEventDispatcher::getMetadata(const std::string& name) const
@ -147,9 +649,8 @@ LLSD LLEventDispatcher::getMetadata(const std::string& name) const
}
LLSD meta;
meta["name"] = name;
meta["desc"] = found->second.mDesc;
meta["required"] = found->second.mRequired;
return meta;
meta["desc"] = found->second->mDesc;
return found->second->addMetadata(meta);
}
LLDispatchListener::LLDispatchListener(const std::string& pumpname, const std::string& key):
@ -164,3 +665,8 @@ bool LLDispatchListener::process(const LLSD& event)
(*this)(event);
return false;
}
LLEventDispatcher::DispatchEntry::DispatchEntry(const std::string& desc):
mDesc(desc)
{}

View File

@ -27,18 +27,56 @@
*
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*
* The invoker machinery that constructs a boost::fusion argument list for use
* with boost::fusion::invoke() is derived from
* http://www.boost.org/doc/libs/1_45_0/libs/function_types/example/interpreter.hpp
* whose license information is copied below:
*
* "(C) Copyright Tobias Schwinger
*
* Use modification and distribution are subject to the boost Software License,
* Version 1.0. (See http://www.boost.org/LICENSE_1_0.txt)."
*/
#if ! defined(LL_LLEVENTDISPATCHER_H)
#define LL_LLEVENTDISPATCHER_H
// nil is too generic a term to be allowed to be a global macro. In
// particular, boost::fusion defines a 'class nil' (properly encapsulated in a
// namespace) that a global 'nil' macro breaks badly.
#if defined(nil)
// Capture the value of the macro 'nil', hoping int is an appropriate type.
static const int nil_(nil);
// Now forget the macro.
#undef nil
// Finally, reintroduce 'nil' as a properly-scoped alias for the previously-
// defined const 'nil_'. Make it static since otherwise it produces duplicate-
// symbol link errors later.
static const int& nil(nil_);
#endif
#include <string>
#include <map>
#include <boost/shared_ptr.hpp>
#include <boost/function.hpp>
#include <boost/bind.hpp>
#include <boost/iterator/transform_iterator.hpp>
#include <boost/utility/enable_if.hpp>
#include <boost/function_types/is_nonmember_callable_builtin.hpp>
#include <boost/function_types/parameter_types.hpp>
#include <boost/function_types/function_arity.hpp>
#include <boost/type_traits/remove_cv.hpp>
#include <boost/type_traits/remove_reference.hpp>
#include <boost/fusion/include/push_back.hpp>
#include <boost/fusion/include/cons.hpp>
#include <boost/fusion/include/invoke.hpp>
#include <boost/mpl/begin.hpp>
#include <boost/mpl/end.hpp>
#include <boost/mpl/next.hpp>
#include <boost/mpl/deref.hpp>
#include <typeinfo>
#include "llevents.h"
#include "llsdutil.h"
class LLSD;
@ -54,12 +92,18 @@ public:
LLEventDispatcher(const std::string& desc, const std::string& key);
virtual ~LLEventDispatcher();
/// Accept any C++ callable, typically a boost::bind() expression
/// @name Register functions accepting(const LLSD&)
//@{
/// Accept any C++ callable with the right signature, typically a
/// boost::bind() expression
typedef boost::function<void(const LLSD&)> Callable;
/**
* Register a @a callable by @a name. The optional @a required parameter
* is used to validate the structure of each incoming event (see
* Register a @a callable by @a name. The passed @a callable accepts a
* single LLSD value and uses it in any way desired, e.g. extract
* parameters and call some other function. The optional @a required
* parameter is used to validate the structure of each incoming event (see
* llsd_matches()).
*/
void add(const std::string& name,
@ -67,10 +111,24 @@ public:
const Callable& callable,
const LLSD& required=LLSD());
/**
* The case of a free function (or static method) accepting(const LLSD&)
* could also be intercepted by the arbitrary-args overload below. Ensure
* that it's directed to the Callable overload above instead.
*/
void add(const std::string& name,
const std::string& desc,
void (*f)(const LLSD&),
const LLSD& required=LLSD())
{
add(name, desc, Callable(f), required);
}
/**
* Special case: a subclass of this class can pass an unbound member
* function pointer without explicitly specifying the
* <tt>boost::bind()</tt> expression.
* function pointer (of an LLEventDispatcher subclass) without explicitly
* specifying the <tt>boost::bind()</tt> expression. The passed @a method
* accepts a single LLSD value, presumably containing other parameters.
*/
template <class CLASS>
void add(const std::string& name,
@ -81,7 +139,8 @@ public:
addMethod<CLASS>(name, desc, method, required);
}
/// Overload for both const and non-const methods
/// Overload for both const and non-const methods. The passed @a method
/// accepts a single LLSD value, presumably containing other parameters.
template <class CLASS>
void add(const std::string& name,
const std::string& desc,
@ -91,15 +150,106 @@ public:
addMethod<CLASS>(name, desc, method, required);
}
/// Convenience: for LLEventDispatcher, not every callable needs a
/// documentation string.
template <typename CALLABLE>
void add(const std::string& name,
CALLABLE callable,
const LLSD& required=LLSD())
{
add(name, "", callable, required);
}
//@}
/// @name Register functions with arbitrary param lists
//@{
/**
* Register a free function with arbitrary parameters. (This also works
* for static class methods.)
*
* @note This supports functions with up to about 6 parameters -- after
* that you start getting dismaying compile errors in which
* boost::fusion::joint_view is mentioned a surprising number of times.
*
* When calling this name, pass an LLSD::Array. Each entry in turn will be
* converted to the corresponding parameter type using LLSDParam.
*/
template<typename Function>
typename boost::enable_if< boost::function_types::is_nonmember_callable_builtin<Function>
>::type add(const std::string& name,
const std::string& desc,
Function f);
/**
* Register a nonstatic class method with arbitrary parameters.
*
* @note This supports functions with up to about 6 parameters -- after
* that you start getting dismaying compile errors in which
* boost::fusion::joint_view is mentioned a surprising number of times.
*
* To cover cases such as a method on an LLSingleton we don't yet want to
* instantiate, instead of directly storing an instance pointer, accept a
* nullary callable returning a pointer/reference to the desired class
* instance. If you already have an instance in hand,
* boost::lambda::var(instance) or boost::lambda::constant(instance_ptr)
* produce suitable callables.
*
* When calling this name, pass an LLSD::Array. Each entry in turn will be
* converted to the corresponding parameter type using LLSDParam.
*/
template<typename Method, typename InstanceGetter>
typename boost::enable_if< boost::function_types::is_member_function_pointer<Method>
>::type add(const std::string& name,
const std::string& desc,
Method f,
const InstanceGetter& getter);
/**
* Register a free function with arbitrary parameters. (This also works
* for static class methods.)
*
* @note This supports functions with up to about 6 parameters -- after
* that you start getting dismaying compile errors in which
* boost::fusion::joint_view is mentioned a surprising number of times.
*
* Pass an LLSD::Array of parameter names, and optionally another
* LLSD::Array of default parameter values, a la LLSDArgsMapper.
*
* When calling this name, pass an LLSD::Map. We will internally generate
* an LLSD::Array using LLSDArgsMapper and then convert each entry in turn
* to the corresponding parameter type using LLSDParam.
*/
template<typename Function>
typename boost::enable_if< boost::function_types::is_nonmember_callable_builtin<Function>
>::type add(const std::string& name,
const std::string& desc,
Function f,
const LLSD& params,
const LLSD& defaults=LLSD());
/**
* Register a nonstatic class method with arbitrary parameters.
*
* @note This supports functions with up to about 6 parameters -- after
* that you start getting dismaying compile errors in which
* boost::fusion::joint_view is mentioned a surprising number of times.
*
* To cover cases such as a method on an LLSingleton we don't yet want to
* instantiate, instead of directly storing an instance pointer, accept a
* nullary callable returning a pointer/reference to the desired class
* instance. If you already have an instance in hand,
* boost::lambda::var(instance) or boost::lambda::constant(instance_ptr)
* produce suitable callables.
*
* Pass an LLSD::Array of parameter names, and optionally another
* LLSD::Array of default parameter values, a la LLSDArgsMapper.
*
* When calling this name, pass an LLSD::Map. We will internally generate
* an LLSD::Array using LLSDArgsMapper and then convert each entry in turn
* to the corresponding parameter type using LLSDParam.
*/
template<typename Method, typename InstanceGetter>
typename boost::enable_if< boost::function_types::is_member_function_pointer<Method>
>::type add(const std::string& name,
const std::string& desc,
Method f,
const InstanceGetter& getter,
const LLSD& params,
const LLSD& defaults=LLSD());
//@}
/// Unregister a callable
bool remove(const std::string& name);
@ -109,12 +259,25 @@ public:
/// the @a required prototype specified at add() time, die with LL_ERRS.
void operator()(const std::string& name, const LLSD& event) const;
/// Call a registered callable with an explicitly-specified name and
/// return <tt>true</tt>. If no such callable exists, return
/// <tt>false</tt>. If the @a event fails to match the @a required
/// prototype specified at add() time, die with LL_ERRS.
bool try_call(const std::string& name, const LLSD& event) const;
/// Extract the @a key value from the incoming @a event, and call the
/// callable whose name is specified by that map @a key. If no such
/// callable exists, die with LL_ERRS. If the @a event fails to match the
/// @a required prototype specified at add() time, die with LL_ERRS.
void operator()(const LLSD& event) const;
/// Extract the @a key value from the incoming @a event, call the callable
/// whose name is specified by that map @a key and return <tt>true</tt>.
/// If no such callable exists, return <tt>false</tt>. If the @a event
/// fails to match the @a required prototype specified at add() time, die
/// with LL_ERRS.
bool try_call(const LLSD& event) const;
/// @name Iterate over defined names
//@{
typedef std::pair<std::string, std::string> NameDesc;
@ -122,16 +285,22 @@ public:
private:
struct DispatchEntry
{
DispatchEntry(const Callable& func, const std::string& desc, const LLSD& required):
mFunc(func),
mDesc(desc),
mRequired(required)
{}
Callable mFunc;
DispatchEntry(const std::string& desc);
virtual ~DispatchEntry() {} // suppress MSVC warning, sigh
std::string mDesc;
LLSD mRequired;
virtual void call(const std::string& desc, const LLSD& event) const = 0;
virtual LLSD addMetadata(LLSD) const = 0;
};
typedef std::map<std::string, DispatchEntry> DispatchMap;
// Tried using boost::ptr_map<std::string, DispatchEntry>, but ptr_map<>
// wants its value type to be "clonable," even just to dereference an
// iterator. I don't want to clone entries -- if I have to copy an entry
// around, I want it to continue pointing to the same DispatchEntry
// subclass object. However, I definitely want DispatchMap to destroy
// DispatchEntry if no references are outstanding at the time an entry is
// removed. This looks like a job for boost::shared_ptr.
typedef std::map<std::string, boost::shared_ptr<DispatchEntry> > DispatchMap;
public:
/// We want the flexibility to redefine what data we store per name,
@ -149,10 +318,6 @@ public:
}
//@}
/// Fetch the Callable for the specified name. If no such name was
/// registered, return an empty() Callable.
Callable get(const std::string& name) const;
/// Get information about a specific Callable
LLSD getMetadata(const std::string& name) const;
@ -175,18 +340,184 @@ private:
}
}
void addFail(const std::string& name, const std::string& classname) const;
/// try to dispatch, return @c true if success
bool attemptCall(const std::string& name, const LLSD& event) const;
std::string mDesc, mKey;
DispatchMap mDispatch;
static NameDesc makeNameDesc(const DispatchMap::value_type& item)
{
return NameDesc(item.first, item.second.mDesc);
return NameDesc(item.first, item.second->mDesc);
}
struct LLSDDispatchEntry;
struct ParamsDispatchEntry;
struct ArrayParamsDispatchEntry;
struct MapParamsDispatchEntry;
// Step 2 of parameter analysis. Instantiating invoker<some_function_type>
// implicitly sets its From and To parameters to the (compile time) begin
// and end iterators over that function's parameter types.
template< typename Function
, class From = typename boost::mpl::begin< boost::function_types::parameter_types<Function> >::type
, class To = typename boost::mpl::end< boost::function_types::parameter_types<Function> >::type
>
struct invoker;
// deliver LLSD arguments one at a time
typedef boost::function<LLSD()> args_source;
// obtain args from an args_source to build param list and call target
// function
typedef boost::function<void(const args_source&)> invoker_function;
template <typename Function>
invoker_function make_invoker(Function f);
template <typename Method, typename InstanceGetter>
invoker_function make_invoker(Method f, const InstanceGetter& getter);
void addArrayParamsDispatchEntry(const std::string& name,
const std::string& desc,
const invoker_function& invoker,
LLSD::Integer arity);
void addMapParamsDispatchEntry(const std::string& name,
const std::string& desc,
const invoker_function& invoker,
const LLSD& params,
const LLSD& defaults);
};
/*****************************************************************************
* LLEventDispatcher template implementation details
*****************************************************************************/
// Step 3 of parameter analysis, the recursive case.
template<typename Function, class From, class To>
struct LLEventDispatcher::invoker
{
template<typename T>
struct remove_cv_ref
: boost::remove_cv< typename boost::remove_reference<T>::type >
{ };
// apply() accepts an arbitrary boost::fusion sequence as args. It
// examines the next parameter type in the parameter-types sequence
// bounded by From and To, obtains the next LLSD object from the passed
// args_source and constructs an LLSDParam of appropriate type to try
// to convert the value. It then recurs with the next parameter-types
// iterator, passing the args sequence thus far.
template<typename Args>
static inline
void apply(Function func, const args_source& argsrc, Args const & args)
{
typedef typename boost::mpl::deref<From>::type arg_type;
typedef typename boost::mpl::next<From>::type next_iter_type;
typedef typename remove_cv_ref<arg_type>::type plain_arg_type;
invoker<Function, next_iter_type, To>::apply
( func, argsrc, boost::fusion::push_back(args, LLSDParam<plain_arg_type>(argsrc())));
}
// Special treatment for instance (first) parameter of a non-static member
// function. Accept the instance-getter callable, calling that to produce
// the first args value. Since we know we're at the top of the recursion
// chain, we need not also require a partial args sequence from our caller.
template <typename InstanceGetter>
static inline
void method_apply(Function func, const args_source& argsrc, const InstanceGetter& getter)
{
typedef typename boost::mpl::next<From>::type next_iter_type;
// Instead of grabbing the first item from argsrc and making an
// LLSDParam of it, call getter() and pass that as the instance param.
invoker<Function, next_iter_type, To>::apply
( func, argsrc, boost::fusion::push_back(boost::fusion::nil(), boost::ref(getter())));
}
};
// Step 4 of parameter analysis, the leaf case. When the general
// invoker<Function, From, To> logic has advanced From until it matches To,
// the compiler will pick this template specialization.
template<typename Function, class To>
struct LLEventDispatcher::invoker<Function,To,To>
{
// the argument list is complete, now call the function
template<typename Args>
static inline
void apply(Function func, const args_source&, Args const & args)
{
boost::fusion::invoke(func, args);
}
};
template<typename Function>
typename boost::enable_if< boost::function_types::is_nonmember_callable_builtin<Function> >::type
LLEventDispatcher::add(const std::string& name, const std::string& desc, Function f)
{
// Construct an invoker_function, a callable accepting const args_source&.
// Add to DispatchMap an ArrayParamsDispatchEntry that will handle the
// caller's LLSD::Array.
addArrayParamsDispatchEntry(name, desc, make_invoker(f),
boost::function_types::function_arity<Function>::value);
}
template<typename Method, typename InstanceGetter>
typename boost::enable_if< boost::function_types::is_member_function_pointer<Method> >::type
LLEventDispatcher::add(const std::string& name, const std::string& desc, Method f,
const InstanceGetter& getter)
{
// Subtract 1 from the compile-time arity because the getter takes care of
// the first parameter. We only need (arity - 1) additional arguments.
addArrayParamsDispatchEntry(name, desc, make_invoker(f, getter),
boost::function_types::function_arity<Method>::value - 1);
}
template<typename Function>
typename boost::enable_if< boost::function_types::is_nonmember_callable_builtin<Function> >::type
LLEventDispatcher::add(const std::string& name, const std::string& desc, Function f,
const LLSD& params, const LLSD& defaults)
{
// See comments for previous is_nonmember_callable_builtin add().
addMapParamsDispatchEntry(name, desc, make_invoker(f), params, defaults);
}
template<typename Method, typename InstanceGetter>
typename boost::enable_if< boost::function_types::is_member_function_pointer<Method> >::type
LLEventDispatcher::add(const std::string& name, const std::string& desc, Method f,
const InstanceGetter& getter,
const LLSD& params, const LLSD& defaults)
{
addMapParamsDispatchEntry(name, desc, make_invoker(f, getter), params, defaults);
}
template <typename Function>
LLEventDispatcher::invoker_function
LLEventDispatcher::make_invoker(Function f)
{
// Step 1 of parameter analysis, the top of the recursion. Passing a
// suitable f (see add()'s enable_if condition) to this method causes it
// to infer the function type; specifying that function type to invoker<>
// causes it to fill in the begin/end MPL iterators over the function's
// list of parameter types.
// While normally invoker::apply() could infer its template type from the
// boost::fusion::nil parameter value, here we must be explicit since
// we're boost::bind()ing it rather than calling it directly.
return boost::bind(&invoker<Function>::template apply<boost::fusion::nil>,
f,
_1,
boost::fusion::nil());
}
template <typename Method, typename InstanceGetter>
LLEventDispatcher::invoker_function
LLEventDispatcher::make_invoker(Method f, const InstanceGetter& getter)
{
// Use invoker::method_apply() to treat the instance (first) arg specially.
return boost::bind(&invoker<Method>::template method_apply<InstanceGetter>,
f,
_1,
getter);
}
/*****************************************************************************
* LLDispatchListener
*****************************************************************************/
/**
* Bundle an LLEventPump and a listener with an LLEventDispatcher. A class
* that contains (or derives from) LLDispatchListener need only specify the

View File

@ -588,3 +588,16 @@ void LLReqID::stamp(LLSD& response) const
}
response["reqid"] = mReqid;
}
bool sendReply(const LLSD& reply, const LLSD& request, const std::string& replyKey)
{
// Copy 'reply' to modify it.
LLSD newreply(reply);
// Get the ["reqid"] element from request
LLReqID reqID(request);
// and copy it to 'newreply'.
reqID.stamp(newreply);
// Send reply on LLEventPump named in request[replyKey]. Don't forget to
// send the modified 'newreply' instead of the original 'reply'.
return LLEventPumps::instance().obtain(request[replyKey]).post(newreply);
}

View File

@ -691,6 +691,20 @@ private:
LLSD mReqid;
};
/**
* Conventionally send a reply to a request event.
*
* @a reply is the LLSD reply event to send
* @a request is the corresponding LLSD request event
* @a replyKey is the key in the @a request event, conventionally ["reply"],
* whose value is the name of the LLEventPump on which to send the reply.
*
* Before sending the reply event, sendReply() copies the ["reqid"] item from
* the request to the reply.
*/
LL_COMMON_API bool sendReply(const LLSD& reply, const LLSD& request,
const std::string& replyKey="reply");
/**
* Base class for LLListenerWrapper. See visit_and_connect() and llwrap(). We
* provide virtual @c accept_xxx() methods, customization points allowing a

View File

@ -32,6 +32,17 @@
// external library headers
// other Linden headers
// llinstancetracker.h is presently header-only. This file exists only because our CMake
// test macro ADD_BUILD_TEST requires it.
int dummy = 0;
//static
void * & LLInstanceTrackerBase::getInstances(std::type_info const & info)
{
static std::map<std::string, void *> instances;
std::string k = info.name();
if(instances.find(k) == instances.end())
{
instances[k] = NULL;
}
return instances[k];
}

View File

@ -37,14 +37,21 @@
#include <boost/iterator/transform_iterator.hpp>
#include <boost/iterator/indirect_iterator.hpp>
class LL_COMMON_API LLInstanceTrackerBase : public boost::noncopyable
{
protected:
static void * & getInstances(std::type_info const & info);
};
/// This mix-in class adds support for tracking all instances of the specified class parameter T
/// The (optional) key associates a value of type KEY with a given instance of T, for quick lookup
/// If KEY is not provided, then instances are stored in a simple set
/// @NOTE: see explicit specialization below for default KEY==T* case
template<typename T, typename KEY = T*>
class LLInstanceTracker : boost::noncopyable
class LLInstanceTracker : public LLInstanceTrackerBase
{
typedef typename std::map<KEY, T*> InstanceMap;
typedef LLInstanceTracker<T, KEY> MyT;
typedef boost::function<const KEY&(typename InstanceMap::value_type&)> KeyGetter;
typedef boost::function<T*(typename InstanceMap::value_type&)> InstancePtrGetter;
public:
@ -99,25 +106,26 @@ private:
static InstanceMap& getMap_()
{
if (! sInstances)
void * & instances = getInstances(typeid(MyT));
if (! instances)
{
sInstances = new InstanceMap;
instances = new InstanceMap;
}
return *sInstances;
return * static_cast<InstanceMap*>(instances);
}
private:
KEY mKey;
static InstanceMap* sInstances;
};
/// explicit specialization for default case where KEY is T*
/// use a simple std::set<T*>
template<typename T>
class LLInstanceTracker<T, T*>
class LLInstanceTracker<T, T*> : public LLInstanceTrackerBase
{
typedef typename std::set<T*> InstanceSet;
typedef LLInstanceTracker<T, T*> MyT;
public:
/// Dereferencing key_iter gives you a T* (since T* is the key)
typedef typename InstanceSet::iterator key_iter;
@ -172,19 +180,17 @@ protected:
static InstanceSet& getSet_()
{
if (! sInstances)
void * & instances = getInstances(typeid(MyT));
if (! instances)
{
sInstances = new InstanceSet;
instances = new InstanceSet;
}
return *sInstances;
return * static_cast<InstanceSet *>(instances);
}
static InstanceSet* sInstances;
static S32 sIterationNestDepth;
};
template <typename T, typename KEY> typename LLInstanceTracker<T, KEY>::InstanceMap* LLInstanceTracker<T, KEY>::sInstances = NULL;
template <typename T> typename LLInstanceTracker<T, T*>::InstanceSet* LLInstanceTracker<T, T*>::sInstances = NULL;
template <typename T> S32 LLInstanceTracker<T, T*>::sIterationNestDepth = 0;
#endif

View File

@ -63,7 +63,18 @@ BOOL LLMetricPerformanceTesterBasic::addTester(LLMetricPerformanceTesterBasic* t
sTesterMap.insert(std::make_pair(name, tester));
return TRUE;
}
/*static*/
void LLMetricPerformanceTesterBasic::deleteTester(std::string name)
{
name_tester_map_t::iterator tester = sTesterMap.find(name);
if (tester != sTesterMap.end())
{
delete tester->second;
sTesterMap.erase(tester);
}
}
/*static*/
LLMetricPerformanceTesterBasic* LLMetricPerformanceTesterBasic::getTester(std::string name)
{
@ -83,7 +94,78 @@ BOOL LLMetricPerformanceTesterBasic::isMetricLogRequested(std::string name)
return (LLFastTimer::sMetricLog && ((LLFastTimer::sLogName == name) || (LLFastTimer::sLogName == DEFAULT_METRIC_NAME)));
}
/*static*/
LLSD LLMetricPerformanceTesterBasic::analyzeMetricPerformanceLog(std::istream& is)
{
LLSD ret;
LLSD cur;
while (!is.eof() && LLSDSerialize::fromXML(cur, is))
{
for (LLSD::map_iterator iter = cur.beginMap(); iter != cur.endMap(); ++iter)
{
std::string label = iter->first;
LLMetricPerformanceTesterBasic* tester = LLMetricPerformanceTesterBasic::getTester(iter->second["Name"].asString()) ;
if(tester)
{
ret[label]["Name"] = iter->second["Name"] ;
S32 num_of_metrics = tester->getNumberOfMetrics() ;
for(S32 index = 0 ; index < num_of_metrics ; index++)
{
ret[label][ tester->getMetricName(index) ] = iter->second[ tester->getMetricName(index) ] ;
}
}
}
}
return ret;
}
/*static*/
void LLMetricPerformanceTesterBasic::doAnalysisMetrics(std::string baseline, std::string target, std::string output)
{
if(!LLMetricPerformanceTesterBasic::hasMetricPerformanceTesters())
{
return ;
}
// Open baseline and current target, exit if one is inexistent
std::ifstream base_is(baseline.c_str());
std::ifstream target_is(target.c_str());
if (!base_is.is_open() || !target_is.is_open())
{
llwarns << "'-analyzeperformance' error : baseline or current target file inexistent" << llendl;
base_is.close();
target_is.close();
return;
}
//analyze baseline
LLSD base = analyzeMetricPerformanceLog(base_is);
base_is.close();
//analyze current
LLSD current = analyzeMetricPerformanceLog(target_is);
target_is.close();
//output comparision
std::ofstream os(output.c_str());
os << "Label, Metric, Base(B), Target(T), Diff(T-B), Percentage(100*T/B)\n";
for(LLMetricPerformanceTesterBasic::name_tester_map_t::iterator iter = LLMetricPerformanceTesterBasic::sTesterMap.begin() ;
iter != LLMetricPerformanceTesterBasic::sTesterMap.end() ; ++iter)
{
LLMetricPerformanceTesterBasic* tester = ((LLMetricPerformanceTesterBasic*)iter->second) ;
tester->analyzePerformance(&os, &base, &current) ;
}
os.flush();
os.close();
}
//----------------------------------------------------------------------------------------------
// LLMetricPerformanceTesterBasic : Tester instance methods
//----------------------------------------------------------------------------------------------

View File

@ -62,6 +62,8 @@ public:
*/
virtual void analyzePerformance(std::ofstream* os, LLSD* base, LLSD* current) ;
static void doAnalysisMetrics(std::string baseline, std::string target, std::string output) ;
/**
* @return Returns the number of the test metrics in this tester instance.
*/
@ -116,6 +118,7 @@ protected:
private:
void preOutputTestResults(LLSD* sd) ;
void postOutputTestResults(LLSD* sd) ;
static LLSD analyzeMetricPerformanceLog(std::istream& is) ;
std::string mName ; // Name of this tester instance
S32 mCount ; // Current record count
@ -134,6 +137,12 @@ public:
*/
static LLMetricPerformanceTesterBasic* getTester(std::string name) ;
/**
* @return Delete the named tester from the list
* @param[in] name - Name of the tester instance to delete.
*/
static void deleteTester(std::string name);
/**
* @return Returns TRUE if that metric *or* the default catch all metric has been requested to be logged
* @param[in] name - Name of the tester queried.

View File

@ -103,10 +103,30 @@ int LLProcessLauncher::launch(void)
char *args2 = new char[args.size() + 1];
strcpy(args2, args.c_str());
if( ! CreateProcessA( NULL, args2, NULL, NULL, FALSE, 0, NULL, NULL, &sinfo, &pinfo ) )
const char * working_directory = 0;
if(!mWorkingDir.empty()) working_directory = mWorkingDir.c_str();
if( ! CreateProcessA( NULL, args2, NULL, NULL, FALSE, 0, NULL, working_directory, &sinfo, &pinfo ) )
{
// TODO: do better than returning the OS-specific error code on failure...
result = GetLastError();
LPTSTR error_str = 0;
if(
FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
NULL,
result,
0,
(LPTSTR)&error_str,
0,
NULL)
!= 0)
{
char message[256];
wcstombs(message, error_str, 256);
message[255] = 0;
llwarns << "CreateProcessA failed: " << message << llendl;
LocalFree(error_str);
}
if(result == 0)
{
// Make absolutely certain we return a non-zero value on failure.

View File

@ -41,6 +41,7 @@
#include "llsdserialize.h"
#include "stringize.h"
#include "is_approx_equal_fraction.h"
#include <map>
#include <set>
@ -571,7 +572,7 @@ std::string llsd_matches(const LLSD& prototype, const LLSD& data, const std::str
return match_types(prototype.type(), TypeVector(), data.type(), pfx);
}
bool llsd_equals(const LLSD& lhs, const LLSD& rhs)
bool llsd_equals(const LLSD& lhs, const LLSD& rhs, unsigned bits)
{
// We're comparing strict equality of LLSD representation rather than
// performing any conversions. So if the types aren't equal, the LLSD
@ -588,6 +589,20 @@ bool llsd_equals(const LLSD& lhs, const LLSD& rhs)
// Both are TypeUndefined. There's nothing more to know.
return true;
case LLSD::TypeReal:
// This is where the 'bits' argument comes in handy. If passed
// explicitly, it means to use is_approx_equal_fraction() to compare.
if (bits >= 0)
{
return is_approx_equal_fraction(lhs.asReal(), rhs.asReal(), bits);
}
// Otherwise we compare bit representations, and the usual caveats
// about comparing floating-point numbers apply. Omitting 'bits' when
// comparing Real values is only useful when we expect identical bit
// representation for a given Real value, e.g. for integer-valued
// Reals.
return (lhs.asReal() == rhs.asReal());
#define COMPARE_SCALAR(type) \
case LLSD::Type##type: \
/* LLSD::URI has operator!=() but not operator==() */ \
@ -596,10 +611,6 @@ bool llsd_equals(const LLSD& lhs, const LLSD& rhs)
COMPARE_SCALAR(Boolean);
COMPARE_SCALAR(Integer);
// The usual caveats about comparing floating-point numbers apply. This is
// only useful when we expect identical bit representation for a given
// Real value, e.g. for integer-valued Reals.
COMPARE_SCALAR(Real);
COMPARE_SCALAR(String);
COMPARE_SCALAR(UUID);
COMPARE_SCALAR(Date);
@ -617,7 +628,7 @@ bool llsd_equals(const LLSD& lhs, const LLSD& rhs)
for ( ; lai != laend && rai != raend; ++lai, ++rai)
{
// If any one array element is unequal, the arrays are unequal.
if (! llsd_equals(*lai, *rai))
if (! llsd_equals(*lai, *rai, bits))
return false;
}
// Here we've reached the end of one or the other array. They're equal
@ -644,7 +655,7 @@ bool llsd_equals(const LLSD& lhs, const LLSD& rhs)
if (rhskeys.erase(lmi->first) != 1)
return false;
// Both maps have the current key. Compare values.
if (! llsd_equals(lmi->second, rhs[lmi->first]))
if (! llsd_equals(lmi->second, rhs[lmi->first], bits))
return false;
}
// We've now established that all the lhs keys have equal values in
@ -657,7 +668,7 @@ bool llsd_equals(const LLSD& lhs, const LLSD& rhs)
// We expect that every possible type() value is specifically handled
// above. Failing to extend this switch to support a new LLSD type is
// an error that must be brought to the coder's attention.
LL_ERRS("llsd_equals") << "llsd_equals(" << lhs << ", " << rhs << "): "
LL_ERRS("llsd_equals") << "llsd_equals(" << lhs << ", " << rhs << ", " << bits << "): "
"unknown type " << lhs.type() << LL_ENDL;
return false; // pacify the compiler
}

View File

@ -123,8 +123,10 @@ LL_COMMON_API BOOL compare_llsd_with_template(
*/
LL_COMMON_API std::string llsd_matches(const LLSD& prototype, const LLSD& data, const std::string& pfx="");
/// Deep equality
LL_COMMON_API bool llsd_equals(const LLSD& lhs, const LLSD& rhs);
/// Deep equality. If you want to compare LLSD::Real values for approximate
/// equality rather than bitwise equality, pass @a bits as for
/// is_approx_equal_fraction().
LL_COMMON_API bool llsd_equals(const LLSD& lhs, const LLSD& rhs, unsigned bits=-1);
// Simple function to copy data out of input & output iterators if
// there is no need for casting.
@ -138,4 +140,283 @@ template<typename Input> LLSD llsd_copy_array(Input iter, Input end)
return dest;
}
/*****************************************************************************
* LLSDArray
*****************************************************************************/
/**
* Construct an LLSD::Array inline, with implicit conversion to LLSD. Usage:
*
* @code
* void somefunc(const LLSD&);
* ...
* somefunc(LLSDArray("text")(17)(3.14));
* @endcode
*
* For completeness, LLSDArray() with no args constructs an empty array, so
* <tt>LLSDArray()("text")(17)(3.14)</tt> produces an array equivalent to the
* above. But for most purposes, LLSD() is already equivalent to an empty
* array, and if you explicitly want an empty isArray(), there's
* LLSD::emptyArray(). However, supporting a no-args LLSDArray() constructor
* follows the principle of least astonishment.
*/
class LLSDArray
{
public:
LLSDArray():
_data(LLSD::emptyArray())
{}
/**
* Need an explicit copy constructor. Consider the following:
*
* @code
* LLSD array_of_arrays(LLSDArray(LLSDArray(17)(34))
* (LLSDArray("x")("y")));
* @endcode
*
* The coder intends to construct [[17, 34], ["x", "y"]].
*
* With the compiler's implicit copy constructor, s/he gets instead
* [17, 34, ["x", "y"]].
*
* The expression LLSDArray(17)(34) constructs an LLSDArray with those two
* values. The reader assumes it should be converted to LLSD, as we always
* want with LLSDArray, before passing it to the @em outer LLSDArray
* constructor! This copy constructor makes that happen.
*/
LLSDArray(const LLSDArray& inner):
_data(LLSD::emptyArray())
{
_data.append(inner);
}
LLSDArray(const LLSD& value):
_data(LLSD::emptyArray())
{
_data.append(value);
}
LLSDArray& operator()(const LLSD& value)
{
_data.append(value);
return *this;
}
operator LLSD() const { return _data; }
LLSD get() const { return _data; }
private:
LLSD _data;
};
/*****************************************************************************
* LLSDMap
*****************************************************************************/
/**
* Construct an LLSD::Map inline, with implicit conversion to LLSD. Usage:
*
* @code
* void somefunc(const LLSD&);
* ...
* somefunc(LLSDMap("alpha", "abc")("number", 17)("pi", 3.14));
* @endcode
*
* For completeness, LLSDMap() with no args constructs an empty map, so
* <tt>LLSDMap()("alpha", "abc")("number", 17)("pi", 3.14)</tt> produces a map
* equivalent to the above. But for most purposes, LLSD() is already
* equivalent to an empty map, and if you explicitly want an empty isMap(),
* there's LLSD::emptyMap(). However, supporting a no-args LLSDMap()
* constructor follows the principle of least astonishment.
*/
class LLSDMap
{
public:
LLSDMap():
_data(LLSD::emptyMap())
{}
LLSDMap(const LLSD::String& key, const LLSD& value):
_data(LLSD::emptyMap())
{
_data[key] = value;
}
LLSDMap& operator()(const LLSD::String& key, const LLSD& value)
{
_data[key] = value;
return *this;
}
operator LLSD() const { return _data; }
LLSD get() const { return _data; }
private:
LLSD _data;
};
/*****************************************************************************
* LLSDParam
*****************************************************************************/
/**
* LLSDParam is a customization point for passing LLSD values to function
* parameters of more or less arbitrary type. LLSD provides a small set of
* native conversions; but if a generic algorithm explicitly constructs an
* LLSDParam object in the function's argument list, a consumer can provide
* LLSDParam specializations to support more different parameter types than
* LLSD's native conversions.
*
* Usage:
*
* @code
* void somefunc(const paramtype&);
* ...
* somefunc(..., LLSDParam<paramtype>(someLLSD), ...);
* @endcode
*/
template <typename T>
class LLSDParam
{
public:
/**
* Default implementation converts to T on construction, saves converted
* value for later retrieval
*/
LLSDParam(const LLSD& value):
_value(value)
{}
operator T() const { return _value; }
private:
T _value;
};
/**
* Turns out that several target types could accept an LLSD param using any of
* a few different conversions, e.g. LLUUID's constructor can accept LLUUID or
* std::string. Therefore, the compiler can't decide which LLSD conversion
* operator to choose, even though to us it seems obvious. But that's okay, we
* can specialize LLSDParam for such target types, explicitly specifying the
* desired conversion -- that's part of what LLSDParam is all about. Turns out
* we have to do that enough to make it worthwhile generalizing. Use a macro
* because I need to specify one of the asReal, etc., explicit conversion
* methods as well as a type. If I'm overlooking a clever way to implement
* that using a template instead, feel free to reimplement.
*/
#define LLSDParam_for(T, AS) \
template <> \
class LLSDParam<T> \
{ \
public: \
LLSDParam(const LLSD& value): \
_value(value.AS()) \
{} \
\
operator T() const { return _value; } \
\
private: \
T _value; \
}
LLSDParam_for(float, asReal);
LLSDParam_for(LLUUID, asUUID);
LLSDParam_for(LLDate, asDate);
LLSDParam_for(LLURI, asURI);
LLSDParam_for(LLSD::Binary, asBinary);
/**
* LLSDParam<const char*> is an example of the kind of conversion you can
* support with LLSDParam beyond native LLSD conversions. Normally you can't
* pass an LLSD object to a function accepting const char* -- but you can
* safely pass an LLSDParam<const char*>(yourLLSD).
*/
template <>
class LLSDParam<const char*>
{
private:
// The difference here is that we store a std::string rather than a const
// char*. It's important that the LLSDParam object own the std::string.
std::string _value;
// We don't bother storing the incoming LLSD object, but we do have to
// distinguish whether _value is an empty string because the LLSD object
// contains an empty string or because it's isUndefined().
bool _undefined;
public:
LLSDParam(const LLSD& value):
_value(value),
_undefined(value.isUndefined())
{}
// The const char* we retrieve is for storage owned by our _value member.
// That's how we guarantee that the const char* is valid for the lifetime
// of this LLSDParam object. Constructing your LLSDParam in the argument
// list should ensure that the LLSDParam object will persist for the
// duration of the function call.
operator const char*() const
{
if (_undefined)
{
// By default, an isUndefined() LLSD object's asString() method
// will produce an empty string. But for a function accepting
// const char*, it's often important to be able to pass NULL, and
// isUndefined() seems like the best way. If you want to pass an
// empty string, you can still pass LLSD(""). Without this special
// case, though, no LLSD value could pass NULL.
return NULL;
}
return _value.c_str();
}
};
namespace llsd
{
/*****************************************************************************
* BOOST_FOREACH() helpers for LLSD
*****************************************************************************/
/// Usage: BOOST_FOREACH(LLSD item, inArray(someLLSDarray)) { ... }
class inArray
{
public:
inArray(const LLSD& array):
_array(array)
{}
typedef LLSD::array_const_iterator const_iterator;
typedef LLSD::array_iterator iterator;
iterator begin() { return _array.beginArray(); }
iterator end() { return _array.endArray(); }
const_iterator begin() const { return _array.beginArray(); }
const_iterator end() const { return _array.endArray(); }
private:
LLSD _array;
};
/// MapEntry is what you get from dereferencing an LLSD::map_[const_]iterator.
typedef std::map<LLSD::String, LLSD>::value_type MapEntry;
/// Usage: BOOST_FOREACH([const] MapEntry& e, inMap(someLLSDmap)) { ... }
class inMap
{
public:
inMap(const LLSD& map):
_map(map)
{}
typedef LLSD::map_const_iterator const_iterator;
typedef LLSD::map_iterator iterator;
iterator begin() { return _map.beginMap(); }
iterator end() { return _map.endMap(); }
const_iterator begin() const { return _map.beginMap(); }
const_iterator end() const { return _map.endMap(); }
private:
LLSD _map;
};
} // namespace llsd
#endif // LL_LLSDUTIL_H

View File

@ -46,6 +46,7 @@
# include <sys/sysctl.h>
# include <sys/utsname.h>
# include <stdint.h>
# include <Carbon/Carbon.h>
#elif LL_LINUX
# include <errno.h>
# include <sys/utsname.h>
@ -328,7 +329,58 @@ LLOSInfo::LLOSInfo() :
}
mOSString += compatibility_mode;
#elif LL_DARWIN
// Initialize mOSStringSimple to something like:
// "Mac OS X 10.6.7"
{
const char * DARWIN_PRODUCT_NAME = "Mac OS X";
SInt32 major_version, minor_version, bugfix_version;
OSErr r1 = Gestalt(gestaltSystemVersionMajor, &major_version);
OSErr r2 = Gestalt(gestaltSystemVersionMinor, &minor_version);
OSErr r3 = Gestalt(gestaltSystemVersionBugFix, &bugfix_version);
if((r1 == noErr) && (r2 == noErr) && (r3 == noErr))
{
mMajorVer = major_version;
mMinorVer = minor_version;
mBuild = bugfix_version;
std::stringstream os_version_string;
os_version_string << DARWIN_PRODUCT_NAME << " " << mMajorVer << "." << mMinorVer << "." << mBuild;
// Put it in the OS string we are compiling
mOSStringSimple.append(os_version_string.str());
}
else
{
mOSStringSimple.append("Unable to collect OS info");
}
}
// Initialize mOSString to something like:
// "Mac OS X 10.6.7 Darwin Kernel Version 10.7.0: Sat Jan 29 15:17:16 PST 2011; root:xnu-1504.9.37~1/RELEASE_I386 i386"
struct utsname un;
if(uname(&un) != -1)
{
mOSString = mOSStringSimple;
mOSString.append(" ");
mOSString.append(un.sysname);
mOSString.append(" ");
mOSString.append(un.release);
mOSString.append(" ");
mOSString.append(un.version);
mOSString.append(" ");
mOSString.append(un.machine);
}
else
{
mOSString = mOSStringSimple;
}
#else
struct utsname un;
if(uname(&un) != -1)
{
@ -344,15 +396,7 @@ LLOSInfo::LLOSInfo() :
// Simplify 'Simple'
std::string ostype = mOSStringSimple.substr(0, mOSStringSimple.find_first_of(" ", 0));
if (ostype == "Darwin")
{
// Only care about major Darwin versions, truncate at first '.'
S32 idx1 = mOSStringSimple.find_first_of(".", 0);
std::string simple = mOSStringSimple.substr(0, idx1);
if (simple.length() > 0)
mOSStringSimple = simple;
}
else if (ostype == "Linux")
if (ostype == "Linux")
{
// Only care about major and minor Linux versions, truncate at second '.'
std::string::size_type idx1 = mOSStringSimple.find_first_of(".", 0);

View File

@ -28,9 +28,9 @@
#define LL_LLVERSIONSERVER_H
const S32 LL_VERSION_MAJOR = 2;
const S32 LL_VERSION_MINOR = 1;
const S32 LL_VERSION_PATCH = 0;
const S32 LL_VERSION_BUILD = 13828;
const S32 LL_VERSION_MINOR = 6;
const S32 LL_VERSION_PATCH = 9;
const S32 LL_VERSION_BUILD = 16953;
const char * const LL_CHANNEL = "Second Life Server";

View File

@ -27,12 +27,12 @@
#ifndef LL_LLVERSIONVIEWER_H
#define LL_LLVERSIONVIEWER_H
LL_COMMON_API extern const S32 LL_VERSION_MAJOR;
LL_COMMON_API extern const S32 LL_VERSION_MINOR;
LL_COMMON_API extern const S32 LL_VERSION_PATCH;
LL_COMMON_API extern const S32 LL_VERSION_BUILD;
const S32 LL_VERSION_MAJOR = 2;
const S32 LL_VERSION_MINOR = 6;
const S32 LL_VERSION_PATCH = 9;
const S32 LL_VERSION_BUILD = 16953;
LL_COMMON_API extern const char * const LL_CHANNEL;
const char * const LL_CHANNEL = "Firestorm-private-Romana";
#if LL_DARWIN
const char * const LL_VERSION_BUNDLE_ID = "com.phoenixviewer.firestorm.viewer";

View File

@ -24,8 +24,8 @@ LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US
//
VS_VERSION_INFO VERSIONINFO
FILEVERSION 2,1,1,0
PRODUCTVERSION 2,1,1,0
FILEVERSION 2,6,9,16953
PRODUCTVERSION 2,6,9,16953
FILEFLAGSMASK 0x3fL
#ifdef _DEBUG
FILEFLAGS 0x1L
@ -42,12 +42,12 @@ BEGIN
BEGIN
VALUE "CompanyName", "Phoenix Viewer"
VALUE "FileDescription", "Phoenix Firestorm Viewer Common Library"
VALUE "FileVersion", "2.1.1.0"
VALUE "FileVersion", "2.6.9.16953"
VALUE "InternalName", "llcommon.dll"
VALUE "LegalCopyright", "Copyright © 2010 Phoenix Viewer"
VALUE "OriginalFilename", "llcommon.dll"
VALUE "ProductName", "Phoenix Firestorm Viewer"
VALUE "ProductVersion", "2.1.1.0"
VALUE "ProductVersion", "2.6.9.16953"
END
END
BLOCK "VarFileInfo"

View File

@ -258,10 +258,10 @@ namespace tut
++const_iterator;
ensure_equals(const_iterator->first, "def");
ensure_equals(const_iterator->second, 2);
NameIndexDeps::node_range node_range(nideps.get_node_range());
ensure_equals(instance_from_range<std::vector<int> >(node_range), make< std::vector<int> >(list_of(1)(2)(3)));
*node_range.begin() = 0;
*node_range.begin() = 1;
// NameIndexDeps::node_range node_range(nideps.get_node_range());
// ensure_equals(instance_from_range<std::vector<int> >(node_range), make< std::vector<int> >(list_of(1)(2)(3)));
// *node_range.begin() = 0;
// *node_range.begin() = 1;
NameIndexDeps::const_node_range const_node_range(const_nideps.get_node_range());
ensure_equals(instance_from_range<std::vector<int> >(const_node_range), make< std::vector<int> >(list_of(1)(2)(3)));
NameIndexDeps::const_key_range const_key_range(const_nideps.get_key_range());
@ -278,8 +278,8 @@ namespace tut
def);
ensure_equals(instance_from_range<StringList>(const_nideps.get_after_range(const_nideps.get_range().begin())),
def);
ensure_equals(instance_from_range<StringList>(nideps.get_after_range(nideps.get_node_range().begin())),
def);
// ensure_equals(instance_from_range<StringList>(nideps.get_after_range(nideps.get_node_range().begin())),
// def);
ensure_equals(instance_from_range<StringList>(const_nideps.get_after_range(const_nideps.get_node_range().begin())),
def);
ensure_equals(instance_from_range<StringList>(nideps.get_after_range(nideps.get_key_range().begin())),

View File

@ -48,7 +48,10 @@ namespace
{
static bool fatalWasCalled;
void fatalCall(const std::string&) { fatalWasCalled = true; }
}
namespace tut
{
class TestRecorder : public LLError::Recorder
{
public:
@ -56,7 +59,7 @@ namespace
~TestRecorder() { LLError::removeRecorder(this); }
void recordMessage(LLError::ELevel level,
const std::string& message)
const std::string& message)
{
mMessages.push_back(message);
}
@ -66,12 +69,12 @@ namespace
void setWantsTime(bool t) { mWantsTime = t; }
bool wantsTime() { return mWantsTime; }
std::string message(int n)
{
std::ostringstream test_name;
test_name << "testing message " << n << ", not enough messages";
tut::ensure(test_name.str(), n < countMessages());
return mMessages[n];
}
@ -82,10 +85,7 @@ namespace
bool mWantsTime;
};
}
namespace tut
{
struct ErrorTestData
{
TestRecorder mRecorder;
@ -381,7 +381,7 @@ namespace
}
typedef std::string (*LogFromFunction)(bool);
void testLogName(TestRecorder& recorder, LogFromFunction f,
void testLogName(tut::TestRecorder& recorder, LogFromFunction f,
const std::string& class_name = "")
{
recorder.clearMessages();

File diff suppressed because it is too large Load Diff

View File

@ -452,7 +452,7 @@ namespace tut
checkRoundTrip(msg + " nested arrays", v);
v = LLSD::emptyMap();
fillmap(v, 10, 6); // 10^6 maps
fillmap(v, 10, 3); // 10^6 maps
checkRoundTrip(msg + " many nested maps", v);
}

View File

@ -1254,28 +1254,7 @@ bool LLImageRaw::createFromFile(const std::string &filename, bool j2c_lowest_mip
return false;
}
LLPointer<LLImageFormatted> image;
switch(codec)
{
//case IMG_CODEC_RGB:
case IMG_CODEC_BMP:
image = new LLImageBMP();
break;
case IMG_CODEC_TGA:
image = new LLImageTGA();
break;
case IMG_CODEC_JPEG:
image = new LLImageJPEG();
break;
case IMG_CODEC_J2C:
image = new LLImageJ2C();
break;
case IMG_CODEC_DXT:
image = new LLImageDXT();
break;
default:
return false;
}
LLPointer<LLImageFormatted> image = LLImageFormatted::createFromType(codec);
llassert(image.notNull());
U8 *buffer = image->allocateData(length);

View File

@ -35,8 +35,21 @@
const S32 MIN_IMAGE_MIP = 2; // 4x4, only used for expand/contract power of 2
const S32 MAX_IMAGE_MIP = 11; // 2048x2048
// *TODO : Use MAX_IMAGE_MIP as max discard level and modify j2c management so that the number
// of levels is read from the header's file, not inferred from its size.
const S32 MAX_DISCARD_LEVEL = 5;
// JPEG2000 size constraints
// Those are declared here as they are germane to other image constraints used in the viewer
// and declared right here. Some come from the JPEG2000 spec, some conventions specific to SL.
const S32 MAX_DECOMPOSITION_LEVELS = 32; // Number of decomposition levels cannot exceed 32 according to jpeg2000 spec
const S32 MIN_DECOMPOSITION_LEVELS = 5; // the SL viewer will *crash* trying to decode images with fewer than 5 decomposition levels (unless image is small that is)
const S32 MAX_PRECINCT_SIZE = 2048; // No reason to be bigger than MAX_IMAGE_SIZE
const S32 MIN_PRECINCT_SIZE = 4; // Can't be smaller than MIN_BLOCK_SIZE
const S32 MAX_BLOCK_SIZE = 64; // Max total block size is 4096, hence 64x64 when using square blocks
const S32 MIN_BLOCK_SIZE = 4; // Min block dim is 4 according to jpeg2000 spec
const S32 MIN_IMAGE_SIZE = (1<<MIN_IMAGE_MIP); // 4, only used for expand/contract power of 2
const S32 MAX_IMAGE_SIZE = (1<<MAX_IMAGE_MIP); // 2048
const S32 MIN_IMAGE_AREA = MIN_IMAGE_SIZE * MIN_IMAGE_SIZE;
@ -266,13 +279,13 @@ public:
// subclasses must return a prefered file extension (lowercase without a leading dot)
virtual std::string getExtension() = 0;
// calcHeaderSize() returns the maximum size of header;
// 0 indicates we don't know have a header and have to lead the entire file
// 0 indicates we don't have a header and have to read the entire file
virtual S32 calcHeaderSize() { return 0; };
// calcDataSize() returns how many bytes to read to load discard_level (including header)
virtual S32 calcDataSize(S32 discard_level);
// calcDiscardLevelBytes() returns the smallest valid discard level based on the number of input bytes
virtual S32 calcDiscardLevelBytes(S32 bytes);
// getRawDiscardLevel()by default returns mDiscardLevel, but may be overridden (LLImageJ2C)
// getRawDiscardLevel() by default returns mDiscardLevel, but may be overridden (LLImageJ2C)
virtual S8 getRawDiscardLevel() { return mDiscardLevel; }
BOOL load(const std::string& filename);

View File

@ -94,7 +94,7 @@ bool LLImageDimensionsInfo::getImageDimensionsBmp()
}
// Read image dimensions.
mInfile.seek(APR_CUR, 16); // BMP header (14) + DIB header size (4) - signature (2)
mInfile.seek(APR_CUR, 16);
mWidth = read_reverse_s32();
mHeight = read_reverse_s32();
@ -172,8 +172,8 @@ bool LLImageDimensionsInfo::getImageDimensionsJpeg()
/* Make sure this is a JPEG file. */
const size_t JPEG_MAGIC_SIZE = 2;
const U8 jpeg_magic[JPEG_MAGIC_SIZE] = {0xFF, 0xD8};
U8 signature[JPEG_MAGIC_SIZE];
const uint8_t jpeg_magic[JPEG_MAGIC_SIZE] = {0xFF, 0xD8};
uint8_t signature[JPEG_MAGIC_SIZE];
if (fread(signature, sizeof(signature), 1, fp) != 1)
{

View File

@ -139,6 +139,15 @@ BOOL LLImageJ2C::updateData()
return res;
}
BOOL LLImageJ2C::initDecode(LLImageRaw &raw_image, int discard_level, int* region)
{
return mImpl->initDecode(*this,raw_image,discard_level,region);
}
BOOL LLImageJ2C::initEncode(LLImageRaw &raw_image, int blocks_size, int precincts_size, int levels)
{
return mImpl->initEncode(*this,raw_image,blocks_size,precincts_size,levels);
}
BOOL LLImageJ2C::decode(LLImageRaw *raw_imagep, F32 decode_time)
{
@ -251,6 +260,9 @@ S32 LLImageJ2C::calcHeaderSizeJ2C()
//static
S32 LLImageJ2C::calcDataSizeJ2C(S32 w, S32 h, S32 comp, S32 discard_level, F32 rate)
{
// Note: this only provides an *estimate* of the size in bytes of an image level
// *TODO: find a way to read the true size (when available) and convey the fact
// that the result is an estimate in the other cases
if (rate <= 0.f) rate = .125f;
while (discard_level > 0)
{
@ -474,6 +486,7 @@ LLImageCompressionTester::LLImageCompressionTester() : LLMetricPerformanceTester
LLImageCompressionTester::~LLImageCompressionTester()
{
outputTestResults();
LLImageJ2C::sTesterp = NULL;
}

View File

@ -56,6 +56,8 @@ public:
/*virtual*/ void resetLastError();
/*virtual*/ void setLastError(const std::string& message, const std::string& filename = std::string());
BOOL initDecode(LLImageRaw &raw_image, int discard_level, int* region);
BOOL initEncode(LLImageRaw &raw_image, int blocks_size, int precincts_size, int levels);
// Encode with comment text
BOOL encode(const LLImageRaw *raw_imagep, const char* comment_text, F32 encode_time=0.0);
@ -117,6 +119,8 @@ protected:
virtual BOOL decodeImpl(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, S32 first_channel, S32 max_channel_count) = 0;
virtual BOOL encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, const char* comment_text, F32 encode_time=0.0,
BOOL reversible=FALSE) = 0;
virtual BOOL initDecode(LLImageJ2C &base, LLImageRaw &raw_image, int discard_level = -1, int* region = NULL) = 0;
virtual BOOL initEncode(LLImageJ2C &base, LLImageRaw &raw_image, int blocks_size = -1, int precincts_size = -1, int levels = 0) = 0;
friend class LLImageJ2C;
};

View File

@ -107,6 +107,17 @@ LLImageJ2COJ::~LLImageJ2COJ()
{
}
BOOL LLImageJ2COJ::initDecode(LLImageJ2C &base, LLImageRaw &raw_image, int discard_level, int* region)
{
// No specific implementation for this method in the OpenJpeg case
return FALSE;
}
BOOL LLImageJ2COJ::initEncode(LLImageJ2C &base, LLImageRaw &raw_image, int blocks_size, int precincts_size, int levels)
{
// No specific implementation for this method in the OpenJpeg case
return FALSE;
}
BOOL LLImageJ2COJ::decodeImpl(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, S32 first_channel, S32 max_channel_count)
{

View File

@ -39,6 +39,8 @@ protected:
/*virtual*/ BOOL decodeImpl(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, S32 first_channel, S32 max_channel_count);
/*virtual*/ BOOL encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, const char* comment_text, F32 encode_time=0.0,
BOOL reversible = FALSE);
/*virtual*/ BOOL initDecode(LLImageJ2C &base, LLImageRaw &raw_image, int discard_level = -1, int* region = NULL);
/*virtual*/ BOOL initEncode(LLImageJ2C &base, LLImageRaw &raw_image, int blocks_size = -1, int precincts_size = -1, int levels = 0);
};
#endif

View File

@ -72,6 +72,7 @@ static const std::string PARCEL_CATEGORY_STRING[LLParcel::C_COUNT] =
"shopping",
"stage",
"other",
"rental"
};
static const std::string PARCEL_CATEGORY_UI_STRING[LLParcel::C_COUNT + 1] =
{
@ -89,6 +90,7 @@ static const std::string PARCEL_CATEGORY_UI_STRING[LLParcel::C_COUNT + 1] =
"Shopping",
"Stage",
"Other",
"Rental",
"Any", // valid string for parcel searches
};
@ -683,7 +685,7 @@ void LLParcel::packMessage(LLSD& msg)
msg["auto_scale"] = getMediaAutoScale();
msg["media_loop"] = getMediaLoop();
msg["media_current_url"] = getMediaCurrentURL();
msg["obscure_media"] = false; // OBSOLETE - no longer used
msg["obscure_media"] = false; // OBSOLETE - no longer used
msg["obscure_music"] = false; // OBSOLETE - no longer used
msg["media_id"] = getMediaID();
msg["media_allow_navigate"] = getMediaAllowNavigate();

View File

@ -165,6 +165,7 @@ public:
C_SHOPPING,
C_STAGE,
C_OTHER,
C_RENTAL,
C_COUNT,
C_ANY = -1 // only useful in queries
};

View File

@ -19,6 +19,7 @@ include_directories(
${LLCOMMON_INCLUDE_DIRS}
${LLIMAGE_INCLUDE_DIRS}
${KDU_INCLUDE_DIR}
${LLKDU_INCLUDE_DIRS}
${LLMATH_INCLUDE_DIRS}
)
@ -49,6 +50,15 @@ if (USE_KDU)
SET(llkdu_TEST_SOURCE_FILES
llimagej2ckdu.cpp
)
SET(llkdu_test_additional_HEADER_FILES
llimagej2ckdu.h
llkdumem.h
lltut.h
)
SET(llkdu_test_additional_INCLUDE_DIRS
${KDU_INCLUDE_DIR}
${LLKDU_INCLUDE_DIRS}
)
LL_ADD_PROJECT_UNIT_TESTS(llkdu "${llkdu_TEST_SOURCE_FILES}")
endif (LL_TESTS)

View File

@ -29,40 +29,41 @@
#include "lltimer.h"
#include "llpointer.h"
#include "llmath.h"
#include "llkdumem.h"
class kdc_flow_control {
public: // Member functions
kdc_flow_control(kdu_image_in_base *img_in, kdu_codestream codestream);
~kdc_flow_control();
bool advance_components();
void process_components();
public:
kdc_flow_control(kdu_image_in_base *img_in, kdu_codestream codestream);
~kdc_flow_control();
bool advance_components();
void process_components();
private: // Data
struct kdc_component_flow_control {
public: // Data
kdu_image_in_base *reader;
int vert_subsampling;
int ratio_counter; /* Initialized to 0, decremented by `count_delta';
private:
struct kdc_component_flow_control {
public:
kdu_image_in_base *reader;
int vert_subsampling;
int ratio_counter; /* Initialized to 0, decremented by `count_delta';
when < 0, a new line must be processed, after
which it is incremented by `vert_subsampling'. */
int initial_lines;
int remaining_lines;
kdu_line_buf *line;
};
kdu_codestream codestream;
kdu_dims valid_tile_indices;
kdu_coords tile_idx;
kdu_tile tile;
int num_components;
kdc_component_flow_control *components;
int count_delta; // Holds the minimum of the `vert_subsampling' fields
kdu_multi_analysis engine;
kdu_long max_buffer_memory;
int initial_lines;
int remaining_lines;
kdu_line_buf *line;
};
kdu_codestream codestream;
kdu_dims valid_tile_indices;
kdu_coords tile_idx;
kdu_tile tile;
int num_components;
kdc_component_flow_control *components;
int count_delta; // Holds the minimum of the `vert_subsampling' fields
kdu_multi_analysis engine;
kdu_long max_buffer_memory;
};
//
@ -72,7 +73,8 @@ void set_default_colour_weights(kdu_params *siz);
const char* engineInfoLLImageJ2CKDU()
{
return "KDU v6.4.1";
std::string version = llformat("KDU %s", KDU_CORE_VERSION);
return version.c_str();
}
LLImageJ2CKDU* createLLImageJ2CKDU()
@ -105,7 +107,11 @@ const char* fallbackEngineInfoLLImageJ2CImpl()
class LLKDUDecodeState
{
public:
LLKDUDecodeState(kdu_tile tile, kdu_byte *buf, S32 row_gap);
~LLKDUDecodeState();
BOOL processTileDecode(F32 decode_time, BOOL limit_time = TRUE);
private:
S32 mNumComponents;
BOOL mUseYCC;
kdu_dims mDims;
@ -113,22 +119,12 @@ public:
kdu_tile_comp mComps[4];
kdu_line_buf mLines[4];
kdu_pull_ifc mEngines[4];
bool mReversible[4]; // Some components may be reversible and others not.
int mBitDepths[4]; // Original bit-depth may be quite different from 8.
bool mReversible[4]; // Some components may be reversible and others not
int mBitDepths[4]; // Original bit-depth may be quite different from 8
kdu_tile mTile;
kdu_byte *mBuf;
S32 mRowGap;
LLKDUDecodeState(kdu_tile tile, kdu_byte *buf, S32 row_gap);
~LLKDUDecodeState();
BOOL processTileDecode(F32 decode_time, BOOL limit_time = TRUE);
public:
int *AssignLayerBytes(siz_params *siz, int &num_specs);
void setupCodeStream(BOOL keep_codestream, LLImageJ2CKDU::ECodeStreamMode mode);
BOOL initDecode(LLImageRaw &raw_image, F32 decode_time, LLImageJ2CKDU::ECodeStreamMode mode, S32 first_channel, S32 max_channel_count );
};
void ll_kdu_error( void )
@ -153,7 +149,7 @@ class LLKDUMessageError : public kdu_message
public:
/*virtual*/ void put_text(const char *s);
/*virtual*/ void put_text(const kdu_uint16 *s);
/*virtual*/ void flush(bool end_of_message=false);
/*virtual*/ void flush(bool end_of_message = false);
static LLKDUMessageError sDefaultMessage;
};
@ -179,7 +175,7 @@ void LLKDUMessageError::put_text(const kdu_uint16 *s)
void LLKDUMessageError::flush(bool end_of_message)
{
if( end_of_message )
if (end_of_message)
{
throw "KDU throwing an exception";
}
@ -195,7 +191,10 @@ mCodeStreamp(NULL),
mTPosp(NULL),
mTileIndicesp(NULL),
mRawImagep(NULL),
mDecodeState(NULL)
mDecodeState(NULL),
mBlocksSize(-1),
mPrecinctsSize(-1),
mLevels(0)
{
}
@ -210,7 +209,7 @@ void transfer_bytes(kdu_byte *dest, kdu_line_buf &src, int gap, int precision);
void LLImageJ2CKDU::setupCodeStream(LLImageJ2C &base, BOOL keep_codestream, ECodeStreamMode mode)
{
S32 data_size = base.getDataSize();
S32 max_bytes = base.getMaxBytes() ? base.getMaxBytes() : data_size;
S32 max_bytes = (base.getMaxBytes() ? base.getMaxBytes() : data_size);
//
// Initialization
@ -247,21 +246,21 @@ void LLImageJ2CKDU::setupCodeStream(LLImageJ2C &base, BOOL keep_codestream, ECod
// Set the maximum number of bytes to use from the codestream
mCodeStreamp->set_max_bytes(max_bytes);
// If you want to flip or rotate the image for some reason, change
// If you want to flip or rotate the image for some reason, change
// the resolution, or identify a restricted region of interest, this is
// the place to do it. You may use "kdu_codestream::change_appearance"
// and "kdu_codestream::apply_input_restrictions" for this purpose.
// If you wish to truncate the code-stream prior to decompression, you
// If you wish to truncate the code-stream prior to decompression, you
// may use "kdu_codestream::set_max_bytes".
// If you wish to retain all compressed data so that the material
// If you wish to retain all compressed data so that the material
// can be decompressed multiple times, possibly with different appearance
// parameters, you should call "kdu_codestream::set_persistent" here.
// There are a variety of other features which must be enabled at
// There are a variety of other features which must be enabled at
// this point if you want to take advantage of them. See the
// descriptions appearing with the "kdu_codestream" interface functions
// in "kdu_compressed.h" for an itemized account of these capabilities.
switch( mode )
switch (mode)
{
case MODE_FAST:
mCodeStreamp->set_fast();
@ -326,7 +325,38 @@ void LLImageJ2CKDU::cleanupCodeStream()
mTileIndicesp = NULL;
}
BOOL LLImageJ2CKDU::initDecode(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, ECodeStreamMode mode, S32 first_channel, S32 max_channel_count )
BOOL LLImageJ2CKDU::initDecode(LLImageJ2C &base, LLImageRaw &raw_image, int discard_level, int* region)
{
return initDecode(base,raw_image,0.0f,MODE_FAST,0,4,discard_level,region);
}
BOOL LLImageJ2CKDU::initEncode(LLImageJ2C &base, LLImageRaw &raw_image, int blocks_size, int precincts_size, int levels)
{
mPrecinctsSize = precincts_size;
if (mPrecinctsSize != -1)
{
mPrecinctsSize = get_lower_power_two(mPrecinctsSize,MAX_PRECINCT_SIZE);
mPrecinctsSize = llmax(mPrecinctsSize,MIN_PRECINCT_SIZE);
}
mBlocksSize = blocks_size;
if (mBlocksSize != -1)
{
mBlocksSize = get_lower_power_two(mBlocksSize,MAX_BLOCK_SIZE);
mBlocksSize = llmax(mBlocksSize,MIN_BLOCK_SIZE);
if (mPrecinctsSize != -1)
{
mBlocksSize = llmin(mBlocksSize,mPrecinctsSize); // blocks *must* be smaller than precincts
}
}
mLevels = levels;
if (mLevels != 0)
{
mLevels = llclamp(mLevels,MIN_DECOMPOSITION_LEVELS,MIN_DECOMPOSITION_LEVELS);
}
return TRUE;
}
BOOL LLImageJ2CKDU::initDecode(LLImageJ2C &base, LLImageRaw &raw_image, F32 decode_time, ECodeStreamMode mode, S32 first_channel, S32 max_channel_count, int discard_level, int* region)
{
base.resetLastError();
@ -339,17 +369,38 @@ BOOL LLImageJ2CKDU::initDecode(LLImageJ2C &base, LLImageRaw &raw_image, F32 deco
mRawImagep = &raw_image;
mCodeStreamp->change_appearance(false, true, false);
mCodeStreamp->apply_input_restrictions(first_channel,max_channel_count,base.getRawDiscardLevel(),0,NULL);
kdu_dims dims; mCodeStreamp->get_dims(0,dims);
S32 channels = base.getComponents() - first_channel;
if( channels > max_channel_count )
// Apply loading discard level and cropping if required
kdu_dims* region_kdu = NULL;
if (region != NULL)
{
channels = max_channel_count;
region_kdu = new kdu_dims;
region_kdu->pos.x = region[0];
region_kdu->pos.y = region[1];
region_kdu->size.x = region[2] - region[0];
region_kdu->size.y = region[3] - region[1];
}
int discard = (discard_level != -1 ? discard_level : base.getRawDiscardLevel());
// Apply loading restrictions
mCodeStreamp->apply_input_restrictions( first_channel, max_channel_count, discard, 0, region_kdu);
// Clean-up
if (region_kdu)
{
delete region_kdu;
region_kdu = NULL;
}
raw_image.resize(dims.size.x, dims.size.y, channels);
// llinfos << "Resizing to " << dims.size.x << ":" << dims.size.y << llendl;
// Resize raw_image according to the image to be decoded
kdu_dims dims; mCodeStreamp->get_dims(0,dims);
// *TODO: Use the real number of levels read from the file throughout the code instead of relying on an infered value from dimensions
//S32 levels = mCodeStreamp->get_min_dwt_levels();
S32 channels = base.getComponents() - first_channel;
channels = llmin(channels,max_channel_count);
raw_image.resize(dims.size.x, dims.size.y, channels);
//llinfos << "j2c image dimension: width = " << dims.size.x << ", height = " << dims.size.y << ", channels = " << channels << ", levels = " << levels << llendl;
if (!mTileIndicesp)
{
mTileIndicesp = new kdu_dims;
@ -426,7 +477,7 @@ BOOL LLImageJ2CKDU::decodeImpl(LLImageJ2C &base, LLImageRaw &raw_image, F32 deco
// canvas coordinate system. Comparing the two tells
// us where the current tile is in the buffer.
S32 channels = base.getComponents() - first_channel;
if( channels > max_channel_count )
if (channels > max_channel_count)
{
channels = max_channel_count;
}
@ -452,14 +503,14 @@ BOOL LLImageJ2CKDU::decodeImpl(LLImageJ2C &base, LLImageRaw &raw_image, F32 deco
return FALSE;
}
}
catch( const char* msg )
catch (const char* msg)
{
base.setLastError(ll_safe_string(msg));
base.decodeFailed();
cleanupCodeStream();
return TRUE; // done
}
catch( ... )
catch (...)
{
base.setLastError( "Unknown J2C error" );
base.decodeFailed();
@ -482,28 +533,17 @@ BOOL LLImageJ2CKDU::decodeImpl(LLImageJ2C &base, LLImageRaw &raw_image, F32 deco
BOOL LLImageJ2CKDU::encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, const char* comment_text, F32 encode_time, BOOL reversible)
{
// Collect simple arguments.
bool transpose, vflip, hflip;
bool allow_rate_prediction, mem, quiet, no_weights;
int cpu_iterations;
std::ostream *record_stream;
transpose = false;
record_stream = NULL;
allow_rate_prediction = true;
no_weights = false;
cpu_iterations = -1;
mem = false;
quiet = false;
vflip = true;
hflip = false;
// Declare and set simple arguments
bool transpose = false;
bool vflip = true;
bool hflip = false;
try
{
// Set up input image files.
// Set up input image files
siz_params siz;
// Should set rate someplace here.
// Should set rate someplace here
LLKDUMemIn mem_in(raw_image.getData(),
raw_image.getDataSize(),
raw_image.getWidth(),
@ -521,26 +561,17 @@ BOOL LLImageJ2CKDU::encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, co
siz.set(Sprecision,0,0,8); // Image samples have original bit-depth of 8
siz.set(Ssigned,0,0,false); // Image samples are originally unsigned
kdu_params *siz_ref = &siz; siz_ref->finalize();
siz_params transformed_siz; // Use this one to construct code-strea
kdu_params *siz_ref = &siz;
siz_ref->finalize();
siz_params transformed_siz; // Use this one to construct code-stream
transformed_siz.copy_from(&siz,-1,-1,-1,0,transpose,false,false);
// Construct the `kdu_codestream' object and parse all remaining arguments.
// Construct the `kdu_codestream' object and parse all remaining arguments
U32 max_output_size = base.getWidth()*base.getHeight()*base.getComponents();
if (max_output_size < 1000)
{
max_output_size = 1000;
}
max_output_size = (max_output_size < 1000 ? 1000 : max_output_size);
U8 *output_buffer = new U8[max_output_size];
U32 output_size = max_output_size; // gets modified
LLKDUMemTarget output(output_buffer, output_size, base.getWidth()*base.getHeight()*base.getComponents());
if (output_size > max_output_size)
{
llerrs << llformat("LLImageJ2C::encode output_size(%d) > max_output_size(%d)",
output_size,max_output_size) << llendl;
}
U32 output_size = 0; // Address updated by LLKDUMemTarget to give the final compressed buffer size
LLKDUMemTarget output(output_buffer, output_size, max_output_size);
kdu_codestream codestream;
codestream.create(&transformed_siz,&output);
@ -558,16 +589,22 @@ BOOL LLImageJ2CKDU::encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, co
kdu_long layer_bytes[64];
U32 max_bytes = 0;
if ((num_components >= 3) && !no_weights)
if (num_components >= 3)
{
// Note that we always use YCC and not YUV
// *TODO: Verify this doesn't screws up reversible textures (like sculpties) as YCC is not reversible but YUV is...
set_default_colour_weights(codestream.access_siz());
}
if (reversible)
{
// If we're doing reversible, assume we're not using quality layers.
// Yes, I know this is incorrect!
codestream.access_siz()->parse_string("Creversible=yes");
// *TODO: we should use yuv in reversible mode and one level since those images are small.
// Don't turn this on now though as both create problems on decoding for the moment
//codestream.access_siz()->parse_string("Clevels=1");
//codestream.access_siz()->parse_string("Cycc=no");
// If we're doing reversible (i.e. lossless compression), assumes we're not using quality layers.
// *TODO: this is incorrect and unecessary. Try using the regular layer setting.
codestream.access_siz()->parse_string("Clayers=1");
num_layer_specs = 1;
layer_bytes[0] = 0;
@ -577,6 +614,7 @@ BOOL LLImageJ2CKDU::encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, co
// Rate is the argument passed into the LLImageJ2C which
// specifies the target compression rate. The default is 8:1.
// Possibly if max_bytes < 500, we should just use the default setting?
// *TODO: mRate is actually always 8:1 in the viewer. Test different values. Also force to reversible for small (< 500 bytes) textures.
if (base.mRate != 0.f)
{
max_bytes = (U32)(base.mRate*base.getWidth()*base.getHeight()*base.getComponents());
@ -617,42 +655,61 @@ BOOL LLImageJ2CKDU::encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, co
codestream.access_siz()->parse_string(layer_string.c_str());
}
}
codestream.access_siz()->finalize_all();
if (cpu_iterations >= 0)
// Set up data ordering, markers, etc... if precincts or blocks specified
if ((mBlocksSize != -1) || (mPrecinctsSize != -1))
{
codestream.collect_timing_stats(cpu_iterations);
if (mPrecinctsSize != -1)
{
std::string precincts_string = llformat("Cprecincts={%d,%d}",mPrecinctsSize,mPrecinctsSize);
codestream.access_siz()->parse_string(precincts_string.c_str());
}
if (mBlocksSize != -1)
{
std::string blocks_string = llformat("Cblk={%d,%d}",mBlocksSize,mBlocksSize);
codestream.access_siz()->parse_string(blocks_string.c_str());
}
std::string ordering_string = llformat("Corder=RPCL");
codestream.access_siz()->parse_string(ordering_string.c_str());
std::string PLT_string = llformat("ORGgen_plt=yes");
codestream.access_siz()->parse_string(PLT_string.c_str());
std::string Parts_string = llformat("ORGtparts=R");
codestream.access_siz()->parse_string(Parts_string.c_str());
}
if (mLevels != 0)
{
std::string levels_string = llformat("Clevels=%d",mLevels);
codestream.access_siz()->parse_string(levels_string.c_str());
}
codestream.access_siz()->finalize_all();
codestream.change_appearance(transpose,vflip,hflip);
// Now we are ready for sample data processing.
kdc_flow_control *tile = new kdc_flow_control(&mem_in,codestream);
bool done = false;
while (!done)
{
// Process line by line
done = true;
if (tile->advance_components())
{
done = false;
tile->process_components();
}
}
kdc_flow_control *tile = new kdc_flow_control(&mem_in,codestream);
bool done = false;
while (!done)
{
// Process line by line
if (tile->advance_components())
{
tile->process_components();
}
else
{
done = true;
}
}
// Produce the compressed output
codestream.flush(layer_bytes,num_layer_specs);
codestream.flush(layer_bytes,num_layer_specs);
// Cleanup
delete tile;
delete tile;
codestream.destroy();
if (record_stream != NULL)
{
delete record_stream;
}
// Now that we're done encoding, create the new data buffer for the compressed
// image and stick it there.
base.copyData(output_buffer, output_size);
base.updateData(); // set width, height
delete[] output_buffer;
@ -674,19 +731,19 @@ BOOL LLImageJ2CKDU::encodeImpl(LLImageJ2C &base, const LLImageRaw &raw_image, co
BOOL LLImageJ2CKDU::getMetadata(LLImageJ2C &base)
{
// *FIX: kdu calls our callback function if there's an error, and
// then bombs. To regain control, we throw an exception, and
// then bombs. To regain control, we throw an exception, and
// catch it here.
try
{
setupCodeStream(base, FALSE, MODE_FAST);
return TRUE;
}
catch( const char* msg )
catch (const char* msg)
{
base.setLastError(ll_safe_string(msg));
return FALSE;
}
catch( ... )
catch (...)
{
base.setLastError( "Unknown J2C error" );
return FALSE;
@ -699,37 +756,49 @@ void set_default_colour_weights(kdu_params *siz)
assert(cod != NULL);
bool can_use_ycc = true;
bool rev0=false;
int depth0=0, sub_x0=1, sub_y0=1;
for (int c=0; c < 3; c++)
bool rev0 = false;
int depth0 = 0, sub_x0 = 1, sub_y0 = 1;
for (int c = 0; c < 3; c++)
{
int depth=0; siz->get(Sprecision,c,0,depth);
int sub_y=1; siz->get(Ssampling,c,0,sub_y);
int sub_x=1; siz->get(Ssampling,c,1,sub_x);
int depth = 0; siz->get(Sprecision,c,0,depth);
int sub_y = 1; siz->get(Ssampling,c,0,sub_y);
int sub_x = 1; siz->get(Ssampling,c,1,sub_x);
kdu_params *coc = cod->access_relation(-1,c);
bool rev=false; coc->get(Creversible,0,0,rev);
bool rev = false; coc->get(Creversible,0,0,rev);
if (c == 0)
{ rev0=rev; depth0=depth; sub_x0=sub_x; sub_y0=sub_y; }
else if ((rev != rev0) || (depth != depth0) ||
(sub_x != sub_x0) || (sub_y != sub_y0))
{
rev0 = rev; depth0 = depth; sub_x0 = sub_x; sub_y0 = sub_y;
}
else if ((rev != rev0) || (depth != depth0) ||
(sub_x != sub_x0) || (sub_y != sub_y0))
{
can_use_ycc = false;
}
}
if (!can_use_ycc)
{
return;
}
bool use_ycc;
if (!cod->get(Cycc,0,0,use_ycc))
{
cod->set(Cycc,0,0,use_ycc=true);
}
if (!use_ycc)
{
return;
}
float weight;
if (cod->get(Clev_weights,0,0,weight) ||
cod->get(Cband_weights,0,0,weight))
return; // Weights already specified explicitly.
if (cod->get(Clev_weights,0,0,weight) || cod->get(Cband_weights,0,0,weight))
{
// Weights already specified explicitly -> nothing to do
return;
}
/* These example weights are adapted from numbers generated by Marcus Nadenau
at EPFL, for a viewing distance of 15 cm and a display resolution of
300 DPI. */
// These example weights are adapted from numbers generated by Marcus Nadenau
// at EPFL, for a viewing distance of 15 cm and a display resolution of
// 300 DPI.
cod->parse_string("Cband_weights:C0="
"{0.0901},{0.2758},{0.2758},"
@ -775,7 +844,7 @@ all necessary level shifting, type conversion, rounding and truncation. */
val += 128;
if (val & ((-1)<<8))
{
val = (val<0)?0:255;
val = (val < 0 ? 0 : 255);
}
*dest = (kdu_byte) val;
}
@ -793,7 +862,7 @@ all necessary level shifting, type conversion, rounding and truncation. */
val += 128;
if (val & ((-1)<<8))
{
val = (val<0)?0:255;
val = (val < 0 ? 0 : 255);
}
*dest = (kdu_byte) val;
}
@ -816,7 +885,7 @@ all necessary level shifting, type conversion, rounding and truncation. */
val += 128;
if (val & ((-1)<<8))
{
val = (val<0)?0:255;
val = (val < 0 ? 0 : 255);
}
*dest = (kdu_byte) val;
}
@ -835,7 +904,7 @@ all necessary level shifting, type conversion, rounding and truncation. */
val += 128;
if (val & ((-1)<<8))
{
val = (val<0)?0:(256-(1<<upshift));
val = (val < 0 ? 0 : 256 - (1<<upshift));
}
*dest = (kdu_byte) val;
}
@ -857,7 +926,7 @@ all necessary level shifting, type conversion, rounding and truncation. */
val += 128;
if (val & ((-1)<<8))
{
val = (val<0)?0:255;
val = (val < 0 ? 0 : 255);
}
*dest = (kdu_byte) val;
}
@ -873,7 +942,7 @@ all necessary level shifting, type conversion, rounding and truncation. */
val += 128;
if (val & ((-1)<<8))
{
val = (val<0)?0:(256-(1<<upshift));
val = (val < 0 ? 0 : 256 - (1<<upshift));
}
*dest = (kdu_byte) val;
}
@ -892,17 +961,17 @@ LLKDUDecodeState::LLKDUDecodeState(kdu_tile tile, kdu_byte *buf, S32 row_gap)
mNumComponents = tile.get_num_components();
llassert(mNumComponents<=4);
llassert(mNumComponents <= 4);
mUseYCC = tile.get_ycc();
for (c=0; c<4; ++c)
for (c = 0; c < 4; ++c)
{
mReversible[c] = false;
mBitDepths[c] = 0;
}
// Open tile-components and create processing engines and resources
for (c=0; c < mNumComponents; c++)
for (c = 0; c < mNumComponents; c++)
{
mComps[c] = mTile.access_component(c);
mReversible[c] = mComps[c].get_reversible();
@ -929,7 +998,7 @@ LLKDUDecodeState::LLKDUDecodeState(kdu_tile tile, kdu_byte *buf, S32 row_gap)
}
}
mAllocator.finalize(); // Actually creates buffering resources
for (c=0; c < mNumComponents; c++)
for (c = 0; c < mNumComponents; c++)
{
mLines[c].create(); // Grabs resources from the allocator.
}
@ -937,13 +1006,11 @@ LLKDUDecodeState::LLKDUDecodeState(kdu_tile tile, kdu_byte *buf, S32 row_gap)
LLKDUDecodeState::~LLKDUDecodeState()
{
S32 c;
// Cleanup
for (c=0; c < mNumComponents; c++)
for (S32 c = 0; c < mNumComponents; c++)
{
mEngines[c].destroy(); // engines are interfaces; no default destructors
}
mTile.close();
}
@ -962,7 +1029,7 @@ separation between consecutive rows in the real buffer. */
LLTimer decode_timer;
while (mDims.size.y--)
{
for (c=0; c < mNumComponents; c++)
for (c = 0; c < mNumComponents; c++)
{
mEngines[c].pull(mLines[c],true);
}
@ -970,7 +1037,7 @@ separation between consecutive rows in the real buffer. */
{
kdu_convert_ycc_to_rgb(mLines[0],mLines[1],mLines[2]);
}
for (c=0; c < mNumComponents; c++)
for (c = 0; c < mNumComponents; c++)
{
transfer_bytes(mBuf+c,mLines[c],mNumComponents,mBitDepths[c]);
}
@ -990,96 +1057,100 @@ separation between consecutive rows in the real buffer. */
kdc_flow_control::kdc_flow_control (kdu_image_in_base *img_in, kdu_codestream codestream)
{
int n;
this->codestream = codestream;
codestream.get_valid_tiles(valid_tile_indices);
tile_idx = valid_tile_indices.pos;
tile = codestream.open_tile(tile_idx,NULL);
// Set up the individual components
num_components = codestream.get_num_components(true);
components = new kdc_component_flow_control[num_components];
count_delta = 0;
kdc_component_flow_control *comp = components;
for (n = 0; n < num_components; n++, comp++)
{
comp->line = NULL;
comp->reader = img_in;
kdu_coords subsampling;
codestream.get_subsampling(n,subsampling,true);
kdu_dims dims;
codestream.get_tile_dims(tile_idx,n,dims,true);
comp->vert_subsampling = subsampling.y;
if ((n == 0) || (comp->vert_subsampling < count_delta))
{
count_delta = comp->vert_subsampling;
}
comp->ratio_counter = 0;
comp->remaining_lines = comp->initial_lines = dims.size.y;
}
assert(num_components >= 0);
tile.set_components_of_interest(num_components);
max_buffer_memory = engine.create(codestream,tile,false,NULL,false,1,NULL,NULL,false);
int n;
this->codestream = codestream;
codestream.get_valid_tiles(valid_tile_indices);
tile_idx = valid_tile_indices.pos;
tile = codestream.open_tile(tile_idx,NULL);
// Set up the individual components
num_components = codestream.get_num_components(true);
components = new kdc_component_flow_control[num_components];
count_delta = 0;
kdc_component_flow_control *comp = components;
for (n = 0; n < num_components; n++, comp++)
{
comp->line = NULL;
comp->reader = img_in;
kdu_coords subsampling;
codestream.get_subsampling(n,subsampling,true);
kdu_dims dims;
codestream.get_tile_dims(tile_idx,n,dims,true);
comp->vert_subsampling = subsampling.y;
if ((n == 0) || (comp->vert_subsampling < count_delta))
{
count_delta = comp->vert_subsampling;
}
comp->ratio_counter = 0;
comp->remaining_lines = comp->initial_lines = dims.size.y;
}
assert(num_components >= 0);
tile.set_components_of_interest(num_components);
max_buffer_memory = engine.create(codestream,tile,false,NULL,false,1,NULL,NULL,false);
}
kdc_flow_control::~kdc_flow_control()
{
if (components != NULL)
delete[] components;
if (engine.exists())
engine.destroy();
if (components != NULL)
{
delete[] components;
}
if (engine.exists())
{
engine.destroy();
}
}
bool kdc_flow_control::advance_components()
{
bool found_line = false;
while (!found_line)
{
bool all_done = true;
kdc_component_flow_control *comp = components;
for (int n = 0; n < num_components; n++, comp++)
{
assert(comp->ratio_counter >= 0);
if (comp->remaining_lines > 0)
{
all_done = false;
comp->ratio_counter -= count_delta;
if (comp->ratio_counter < 0)
{
found_line = true;
comp->line = engine.exchange_line(n,NULL,NULL);
assert(comp->line != NULL);
bool found_line = false;
while (!found_line)
{
bool all_done = true;
kdc_component_flow_control *comp = components;
for (int n = 0; n < num_components; n++, comp++)
{
assert(comp->ratio_counter >= 0);
if (comp->remaining_lines > 0)
{
all_done = false;
comp->ratio_counter -= count_delta;
if (comp->ratio_counter < 0)
{
found_line = true;
comp->line = engine.exchange_line(n,NULL,NULL);
assert(comp->line != NULL);
if (comp->line->get_width())
{
comp->reader->get(n,*(comp->line),0);
}
}
}
}
if (all_done)
{
return false;
}
}
return true;
}
}
}
if (all_done)
{
return false;
}
}
return true;
}
void kdc_flow_control::process_components()
{
kdc_component_flow_control *comp = components;
for (int n = 0; n < num_components; n++, comp++)
{
if (comp->ratio_counter < 0)
{
comp->ratio_counter += comp->vert_subsampling;
assert(comp->ratio_counter >= 0);
assert(comp->remaining_lines > 0);
comp->remaining_lines--;
assert(comp->line != NULL);
engine.exchange_line(n,comp->line,NULL);
comp->line = NULL;
}
}
kdc_component_flow_control *comp = components;
for (int n = 0; n < num_components; n++, comp++)
{
if (comp->ratio_counter < 0)
{
comp->ratio_counter += comp->vert_subsampling;
assert(comp->ratio_counter >= 0);
assert(comp->remaining_lines > 0);
comp->remaining_lines--;
assert(comp->line != NULL);
engine.exchange_line(n,comp->line,NULL);
comp->line = NULL;
}
}
}

Some files were not shown because too many files have changed in this diff Show More