From ac5d59b9392dbd107ff6e8ac05ab8a3cd48fd694 Mon Sep 17 00:00:00 2001 From: TommyTheTerrible <81168766+TommyTheTerrible@users.noreply.github.com> Date: Mon, 5 May 2025 03:00:36 -0400 Subject: [PATCH 01/24] calcDataSizeJ2C Adjust curve for more than 6 layers (#4018, #4020) KDU is uploading 2k files with 7 and 8 layers which is shifting the location of discard 1 and 2. To accommodate, this commit adds a max_layer check based on max_dimension and the MAX_BLOCK_SIZE to allow the extra layers for 2k. Also shifted the starting size to the MIN_LAYER_SIZE instead of MAX_BLOCK_SIZE's area to allow smaller files to be decoded at discard 5 completely. Finally able to walk around Fantasy Faire without any gray blobs! --- indra/llimage/llimagej2c.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/indra/llimage/llimagej2c.cpp b/indra/llimage/llimagej2c.cpp index aa161709a1..5a941dc958 100644 --- a/indra/llimage/llimagej2c.cpp +++ b/indra/llimage/llimagej2c.cpp @@ -281,10 +281,11 @@ S32 LLImageJ2C::calcDataSizeJ2C(S32 w, S32 h, S32 comp, S32 discard_level, F32 r S32 height = (h > 0) ? h : 2048; S32 max_dimension = llmax(width, height); // Find largest dimension S32 block_area = MAX_BLOCK_SIZE * MAX_BLOCK_SIZE; // Calculated initial block area from established max block size (currently 64) - block_area *= llmax((max_dimension / MAX_BLOCK_SIZE / max_components), 1); // Adjust initial block area by ratio of largest dimension to block size per component - S32 totalbytes = (S32) (block_area * max_components * precision); // First block layer computed before loop without compression rate - S32 block_layers = 1; // Start at layer 1 since first block layer is computed outside loop - while (block_layers < 6) // Walk five layers for the five discards in JPEG2000 + S32 max_layers = (S32)llmax(llround(log2f((float)max_dimension) - log2f((float)MAX_BLOCK_SIZE)), 4); // Find number of powers of two between extents and block size to a minimum of 4 + block_area *= llmax(max_layers, 1); // Adjust initial block area by max number of layers + S32 totalbytes = (S32) (MIN_LAYER_SIZE * max_components * precision); // Start estimation with a minimum reasonable size + S32 block_layers = 0; + while (block_layers <= max_layers) // Walk the layers { if (block_layers <= (5 - discard_level)) // Walk backwards from discard 5 to required discard layer. totalbytes += (S32) (block_area * max_components * precision * rate); // Add each block layer reduced by assumed compression rate From 89512d44f82dcb9679067bb2303acc40b7b43951 Mon Sep 17 00:00:00 2001 From: TommyTheTerrible <81168766+TommyTheTerrible@users.noreply.github.com> Date: Mon, 5 May 2025 04:52:58 -0400 Subject: [PATCH 02/24] updateImageDecodePriority - Avoid Long Face Loop (#4019, #4021) * updateImageDecodePriority - Avoid Long Face Loop To avoid running a long loop on thousands of faces, some textures were being set to a BOOST level to avoid the updateImageDecodePriority function entirely but this was causing many of them to never be deleted over the course of a user's travels. Instead of relying on BOOST, this commit changes the logic of the texture channel loop such that the face loop will only run if the number of faces is below the threshold. To do this, we move the face_count incrementing outside of the face loop into the channel loop and increment it using the getNumFaces function instead. We then check the face_count against the maximum number of faces we want to check and if it exceeds the number we set the number of faces for the face loop to check down to zero. This avoids branch prediction misses and the long face loop issue. Later, if the face_count is above the threshold, we assign the virtual size to the maximum. I personally believe the max_faces_to_check should be lower than 1024, but I left that value in for continuity. I use 64 faces as my max on my compiled version of the viewer without any noticeable issues for memory use. * updateImageDecodePriority - Face Loop Increment Swap Looks like compilers like knowing the incrementing in the for loop information for optimizations and parallelization. Sorry for the tiny commit. * updateImageDecodePriority - Suggested Cleanup Remove trailing white-space. Co-authored-by: Andrey Lihatskiy --- indra/newview/llviewertexturelist.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/indra/newview/llviewertexturelist.cpp b/indra/newview/llviewertexturelist.cpp index 63d5a2d778..fb8e8e7bf3 100644 --- a/indra/newview/llviewertexturelist.cpp +++ b/indra/newview/llviewertexturelist.cpp @@ -911,6 +911,7 @@ void LLViewerTextureList::updateImageDecodePriority(LLViewerFetchedTexture* imag bool on_screen = false; U32 face_count = 0; + U32 max_faces_to_check = 1024; // get adjusted bias based on image resolution LLImageGL* img = imagep->getGLTexture(); @@ -923,13 +924,15 @@ void LLViewerTextureList::updateImageDecodePriority(LLViewerFetchedTexture* imag LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE; for (U32 i = 0; i < LLRender::NUM_TEXTURE_CHANNELS; ++i) { - for (S32 fi = 0; fi < imagep->getNumFaces(i); ++fi) + face_count += imagep->getNumFaces(i); + S32 faces_to_check = (face_count > max_faces_to_check) ? 0 : imagep->getNumFaces(i); + + for (S32 fi = 0; fi < faces_to_check; ++fi) { LLFace* face = (*(imagep->getFaceList(i)))[fi]; if (face && face->getViewerObject()) { - ++face_count; F32 radius; F32 cos_angle_to_view_dir; @@ -992,11 +995,10 @@ void LLViewerTextureList::updateImageDecodePriority(LLViewerFetchedTexture* imag } } - if (face_count > 1024) + if (face_count > max_faces_to_check) { // this texture is used in so many places we should just boost it and not bother checking its vsize // this is especially important because the above is not time sliced and can hit multiple ms for a single texture - imagep->setBoostLevel(LLViewerFetchedTexture::BOOST_HIGH); - // Do we ever remove it? This also sets texture nodelete! + max_vsize = MAX_IMAGE_AREA; } if (imagep->getType() == LLViewerTexture::LOD_TEXTURE && imagep->getBoostLevel() == LLViewerTexture::BOOST_NONE) From 6e6e105f01726208d60c060c4de653cd15470db3 Mon Sep 17 00:00:00 2001 From: Andrey Kleshchev Date: Fri, 3 Jan 2025 21:20:38 +0200 Subject: [PATCH 03/24] #3317 Fix full screen issue with Windows 11 24H2 --- indra/llwindow/llwindow.cpp | 1 - indra/llwindow/llwindow.h | 1 - indra/llwindow/llwindowwin32.cpp | 21 +++++++-------------- indra/llwindow/llwindowwin32.h | 2 +- 4 files changed, 8 insertions(+), 17 deletions(-) diff --git a/indra/llwindow/llwindow.cpp b/indra/llwindow/llwindow.cpp index 378e633cd2..eb11a28360 100644 --- a/indra/llwindow/llwindow.cpp +++ b/indra/llwindow/llwindow.cpp @@ -103,7 +103,6 @@ LLWindow::LLWindow(LLWindowCallbacks* callbacks, bool fullscreen, U32 flags) mFullscreen(fullscreen), mFullscreenWidth(0), mFullscreenHeight(0), - mFullscreenBits(0), mFullscreenRefresh(0), mSupportedResolutions(NULL), mNumSupportedResolutions(0), diff --git a/indra/llwindow/llwindow.h b/indra/llwindow/llwindow.h index 5e06e665f3..151028113a 100644 --- a/indra/llwindow/llwindow.h +++ b/indra/llwindow/llwindow.h @@ -223,7 +223,6 @@ protected: bool mFullscreen; S32 mFullscreenWidth; S32 mFullscreenHeight; - S32 mFullscreenBits; S32 mFullscreenRefresh; LLWindowResolution* mSupportedResolutions; S32 mNumSupportedResolutions; diff --git a/indra/llwindow/llwindowwin32.cpp b/indra/llwindow/llwindowwin32.cpp index 1cac6ffe08..5d42258068 100644 --- a/indra/llwindow/llwindowwin32.cpp +++ b/indra/llwindow/llwindowwin32.cpp @@ -695,8 +695,7 @@ LLWindowWin32::LLWindowWin32(LLWindowCallbacks* callbacks, } if (dev_mode.dmPelsWidth == width && - dev_mode.dmPelsHeight == height && - dev_mode.dmBitsPerPel == BITS_PER_PIXEL) + dev_mode.dmPelsHeight == height) { success = true; if ((dev_mode.dmDisplayFrequency - current_refresh) @@ -736,7 +735,7 @@ LLWindowWin32::LLWindowWin32(LLWindowCallbacks* callbacks, // If we found a good resolution, use it. if (success) { - success = setDisplayResolution(width, height, BITS_PER_PIXEL, closest_refresh); + success = setDisplayResolution(width, height, closest_refresh); } // Keep a copy of the actual current device mode in case we minimize @@ -749,7 +748,6 @@ LLWindowWin32::LLWindowWin32(LLWindowCallbacks* callbacks, mFullscreen = true; mFullscreenWidth = dev_mode.dmPelsWidth; mFullscreenHeight = dev_mode.dmPelsHeight; - mFullscreenBits = dev_mode.dmBitsPerPel; mFullscreenRefresh = dev_mode.dmDisplayFrequency; LL_INFOS("Window") << "Running at " << dev_mode.dmPelsWidth @@ -763,7 +761,6 @@ LLWindowWin32::LLWindowWin32(LLWindowCallbacks* callbacks, mFullscreen = false; mFullscreenWidth = -1; mFullscreenHeight = -1; - mFullscreenBits = -1; mFullscreenRefresh = -1; std::map args; @@ -1185,7 +1182,7 @@ bool LLWindowWin32::switchContext(bool fullscreen, const LLCoordScreen& size, bo // If we found a good resolution, use it. if (success) { - success = setDisplayResolution(width, height, BITS_PER_PIXEL, closest_refresh); + success = setDisplayResolution(width, height, closest_refresh); } // Keep a copy of the actual current device mode in case we minimize @@ -1197,7 +1194,6 @@ bool LLWindowWin32::switchContext(bool fullscreen, const LLCoordScreen& size, bo mFullscreen = true; mFullscreenWidth = dev_mode.dmPelsWidth; mFullscreenHeight = dev_mode.dmPelsHeight; - mFullscreenBits = dev_mode.dmBitsPerPel; mFullscreenRefresh = dev_mode.dmDisplayFrequency; LL_INFOS("Window") << "Running at " << dev_mode.dmPelsWidth @@ -1223,7 +1219,6 @@ bool LLWindowWin32::switchContext(bool fullscreen, const LLCoordScreen& size, bo mFullscreen = false; mFullscreenWidth = -1; mFullscreenHeight = -1; - mFullscreenBits = -1; mFullscreenRefresh = -1; LL_INFOS("Window") << "Unable to run fullscreen at " << width << "x" << height << LL_ENDL; @@ -3517,7 +3512,7 @@ F32 LLWindowWin32::getPixelAspectRatio() // Change display resolution. Returns true if successful. // protected -bool LLWindowWin32::setDisplayResolution(S32 width, S32 height, S32 bits, S32 refresh) +bool LLWindowWin32::setDisplayResolution(S32 width, S32 height, S32 refresh) { DEVMODE dev_mode; ::ZeroMemory(&dev_mode, sizeof(DEVMODE)); @@ -3529,7 +3524,6 @@ bool LLWindowWin32::setDisplayResolution(S32 width, S32 height, S32 bits, S32 re { if (dev_mode.dmPelsWidth == width && dev_mode.dmPelsHeight == height && - dev_mode.dmBitsPerPel == bits && dev_mode.dmDisplayFrequency == refresh ) { // ...display mode identical, do nothing @@ -3541,9 +3535,8 @@ bool LLWindowWin32::setDisplayResolution(S32 width, S32 height, S32 bits, S32 re dev_mode.dmSize = sizeof(dev_mode); dev_mode.dmPelsWidth = width; dev_mode.dmPelsHeight = height; - dev_mode.dmBitsPerPel = bits; dev_mode.dmDisplayFrequency = refresh; - dev_mode.dmFields = DM_BITSPERPEL | DM_PELSWIDTH | DM_PELSHEIGHT | DM_DISPLAYFREQUENCY; + dev_mode.dmFields = DM_PELSWIDTH | DM_PELSHEIGHT | DM_DISPLAYFREQUENCY; // CDS_FULLSCREEN indicates that this is a temporary change to the device mode. LONG cds_result = ChangeDisplaySettings(&dev_mode, CDS_FULLSCREEN); @@ -3553,7 +3546,7 @@ bool LLWindowWin32::setDisplayResolution(S32 width, S32 height, S32 bits, S32 re if (!success) { LL_WARNS("Window") << "setDisplayResolution failed, " - << width << "x" << height << "x" << bits << " @ " << refresh << LL_ENDL; + << width << "x" << height << " @ " << refresh << LL_ENDL; } return success; @@ -3564,7 +3557,7 @@ bool LLWindowWin32::setFullscreenResolution() { if (mFullscreen) { - return setDisplayResolution( mFullscreenWidth, mFullscreenHeight, mFullscreenBits, mFullscreenRefresh); + return setDisplayResolution( mFullscreenWidth, mFullscreenHeight, mFullscreenRefresh); } else { diff --git a/indra/llwindow/llwindowwin32.h b/indra/llwindow/llwindowwin32.h index 36e89e4586..561f07d388 100644 --- a/indra/llwindow/llwindowwin32.h +++ b/indra/llwindow/llwindowwin32.h @@ -150,7 +150,7 @@ protected: virtual LLSD getNativeKeyData(); // Changes display resolution. Returns true if successful - bool setDisplayResolution(S32 width, S32 height, S32 bits, S32 refresh); + bool setDisplayResolution(S32 width, S32 height, S32 refresh); // Go back to last fullscreen display resolution. bool setFullscreenResolution(); From efdb86dcbb467232803c370a635c2bf12729361f Mon Sep 17 00:00:00 2001 From: Hecklezz Date: Mon, 5 May 2025 19:46:35 +1000 Subject: [PATCH 04/24] Fixes fullscreen mode in Windows 11 24H2 --- indra/newview/llappviewerwin32.cpp | 23 +++++++++++++++++++++++ indra/newview/llappviewerwin32.h | 1 + 2 files changed, 24 insertions(+) diff --git a/indra/newview/llappviewerwin32.cpp b/indra/newview/llappviewerwin32.cpp index aaf2a7ea3e..94fc097b0f 100644 --- a/indra/newview/llappviewerwin32.cpp +++ b/indra/newview/llappviewerwin32.cpp @@ -816,6 +816,29 @@ bool LLAppViewerWin32::reportCrashToBugsplat(void* pExcepInfo) return false; } +bool LLAppViewerWin32::initWindow() +{ + // This is a workaround/hotfix for a change in Windows 11 24H2 (and possibly later) + // Where the window width and height need to correctly reflect an available FullScreen size + if (gSavedSettings.getBOOL("FullScreen")) + { + DEVMODE dev_mode; + ::ZeroMemory(&dev_mode, sizeof(DEVMODE)); + dev_mode.dmSize = sizeof(DEVMODE); + if (EnumDisplaySettings(NULL, ENUM_CURRENT_SETTINGS, &dev_mode)) + { + gSavedSettings.setU32("WindowWidth", dev_mode.dmPelsWidth); + gSavedSettings.setU32("WindowHeight", dev_mode.dmPelsHeight); + } + else + { + LL_WARNS("AppInit") << "Unable to set WindowWidth and WindowHeight for FullScreen mode" << LL_ENDL; + } + } + + return LLAppViewer::initWindow(); +} + void LLAppViewerWin32::initLoggingAndGetLastDuration() { LLAppViewer::initLoggingAndGetLastDuration(); diff --git a/indra/newview/llappviewerwin32.h b/indra/newview/llappviewerwin32.h index 250e72edf3..3fad53ec72 100644 --- a/indra/newview/llappviewerwin32.h +++ b/indra/newview/llappviewerwin32.h @@ -46,6 +46,7 @@ public: bool reportCrashToBugsplat(void* pExcepInfo) override; protected: + bool initWindow() override; // Override to initialize the viewer's window. void initLoggingAndGetLastDuration() override; // Override to clean stack_trace info. void initConsole() override; // Initialize OS level debugging console. bool initHardwareTest() override; // Win32 uses DX9 to test hardware. From f68a5b2363b14c98e74a09a548571486e74f8510 Mon Sep 17 00:00:00 2001 From: AtlasLinden <114031241+AtlasLinden@users.noreply.github.com> Date: Mon, 5 May 2025 15:08:10 -0700 Subject: [PATCH 05/24] Introduce workflow dispatch and mac functionality to qatest.yaml A workflow dispatch has been added in an attempt to not only manually trigger this workflow but to also test this from a different branch without having to first merge to develop. Also steps have been added to allow this workflow to run on mac runners when added. Mac runner info currently commented out. --- .github/workflows/qatest.yaml | 271 +++++++++++++++++++++++++++++----- 1 file changed, 236 insertions(+), 35 deletions(-) diff --git a/.github/workflows/qatest.yaml b/.github/workflows/qatest.yaml index 6a4ca440ed..5a609fdc5b 100644 --- a/.github/workflows/qatest.yaml +++ b/.github/workflows/qatest.yaml @@ -5,10 +5,20 @@ on: workflows: ["Build"] types: - completed + workflow_dispatch: + inputs: + branch_name: + description: 'Branch name to simulate workflow (e.g. develop)' + required: true + default: 'develop' + build_id: + description: 'Build workflow run ID (e.g. For github.com/secondlife/viewer/actions/runs/1234567890 the ID is 1234567890)' + required: true + default: '14806728332' concurrency: - group: qa-test-run - cancel-in-progress: true # Cancels any queued job when a new one starts + group: qa-test-run-${{ matrix.runner }} + cancel-in-progress: false # Prevents cancellation of in-progress jobs jobs: debug-workflow: @@ -26,39 +36,75 @@ jobs: echo "GitHub Workflow Name: ${{ github.workflow }}" install-viewer-and-run-tests: - runs-on: [self-hosted, qa-machine] - # Run test only on successful builds of Second_Life_X branches + strategy: + matrix: + include: + - os: windows + runner: qa-windows-atlas + artifact: Windows-installer + install-path: 'C:\viewer-sikulix-main' + - os: windows + runner: qa-dan-asus + artifact: Windows-installer + install-path: 'C:\viewer-sikulix-main' + # Commented out until mac runner is available + # - os: mac + # runner: qa-mac + # artifact: Mac-installer + # install-path: 'HOME/Documents/viewer-sikulix-main' + fail-fast: false + + runs-on: [self-hosted, "${{ matrix.runner }}"] + # Run test only on successful builds of Second_Life_X branches or on manual dispatch if: > + (github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success' && - ( - startsWith(github.event.workflow_run.head_branch, 'Second_Life') - ) + startsWith(github.event.workflow_run.head_branch, 'Second_Life')) || + github.event_name == 'workflow_dispatch' steps: - - name: Temporarily Allow PowerShell Scripts (Process Scope) + # Common steps for both OSes + - name: Set Build ID + shell: bash + run: | + if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + echo "BUILD_ID=${{ github.event.inputs.build_id }}" >> $GITHUB_ENV + echo "ARTIFACTS_URL=https://api.github.com/repos/secondlife/viewer/actions/runs/${{ github.event.inputs.build_id }}/artifacts" >> $GITHUB_ENV + else + echo "BUILD_ID=${{ github.event.workflow_run.id }}" >> $GITHUB_ENV + echo "ARTIFACTS_URL=https://api.github.com/repos/secondlife/viewer/actions/runs/${{ github.event.workflow_run.id }}/artifacts" >> $GITHUB_ENV + fi + + # Windows-specific steps + - name: Temporarily Allow PowerShell Scripts (Windows) + if: matrix.os == 'windows' + shell: pwsh run: | Set-ExecutionPolicy RemoteSigned -Scope Process -Force - - name: Verify viewer-sikulix-main Exists + - name: Verify viewer-sikulix-main Exists (Windows) + if: matrix.os == 'windows' + shell: pwsh run: | - if (-Not (Test-Path -Path 'C:\viewer-sikulix-main')) { + if (-Not (Test-Path -Path '${{ matrix.install-path }}')) { Write-Host '❌ Error: viewer-sikulix not found on runner!' exit 1 } Write-Host '✅ viewer-sikulix is already available.' - - name: Fetch & Download Windows Installer Artifact + - name: Fetch & Download Installer Artifact (Windows) + if: matrix.os == 'windows' shell: pwsh run: | - $BUILD_ID = "${{ github.event.workflow_run.id }}" - $ARTIFACTS_URL = "https://api.github.com/repos/secondlife/viewer/actions/runs/$BUILD_ID/artifacts" + $BUILD_ID = "${{ env.BUILD_ID }}" + $ARTIFACTS_URL = "${{ env.ARTIFACTS_URL }}" # Fetch the correct artifact URL $response = Invoke-RestMethod -Headers @{Authorization="token ${{ secrets.GITHUB_TOKEN }}" } -Uri $ARTIFACTS_URL - $ARTIFACT_NAME = ($response.artifacts | Where-Object { $_.name -eq "Windows-installer" }).archive_download_url + $ARTIFACT_NAME = ($response.artifacts | Where-Object { $_.name -eq "${{ matrix.artifact }}" }).archive_download_url if (-Not $ARTIFACT_NAME) { - Write-Host "❌ Error: Windows-installer artifact not found!" + Write-Host "❌ Error: ${{ matrix.artifact }} artifact not found!" exit 1 } @@ -74,16 +120,19 @@ jobs: # Ensure download succeeded if (-Not (Test-Path $InstallerPath)) { - Write-Host "❌ Error: Failed to download Windows-installer.zip" + Write-Host "❌ Error: Failed to download ${{ matrix.artifact }}.zip" exit 1 } - - name: Extract Installer & Locate Executable + # Set the path for other steps + echo "DOWNLOAD_PATH=$DownloadPath" | Out-File -FilePath $env:GITHUB_ENV -Append + + - name: Extract Installer & Locate Executable (Windows) + if: matrix.os == 'windows' shell: pwsh run: | - # Explicitly set BUILD_ID again (since it does not appear to persist across steps) - $BUILD_ID = "${{ github.event.workflow_run.id }}" - $ExtractPath = "$env:TEMP\secondlife-build-$BUILD_ID" + $BUILD_ID = "${{ env.BUILD_ID }}" + $ExtractPath = "${{ env.DOWNLOAD_PATH }}" $InstallerZip = "$ExtractPath\installer.zip" # Print paths for debugging @@ -113,16 +162,19 @@ jobs: Write-Host "✅ Installer found: $INSTALLER_PATH" echo "INSTALLER_PATH=$INSTALLER_PATH" | Out-File -FilePath $env:GITHUB_ENV -Append - - name: Install Second Life Using Task Scheduler (Bypass UAC) + - name: Install Second Life (Windows) + if: matrix.os == 'windows' shell: pwsh run: | + # Windows - Use Task Scheduler to bypass UAC $action = New-ScheduledTaskAction -Execute "${{ env.INSTALLER_PATH }}" -Argument "/S" $principal = New-ScheduledTaskPrincipal -UserId "SYSTEM" -LogonType ServiceAccount -RunLevel Highest $task = New-ScheduledTask -Action $action -Principal $principal Register-ScheduledTask -TaskName "SilentSLInstaller" -InputObject $task -Force Start-ScheduledTask -TaskName "SilentSLInstaller" - - name: Wait for Installation to Complete + - name: Wait for Installation to Complete (Windows) + if: matrix.os == 'windows' shell: pwsh run: | Write-Host "Waiting for the Second Life installer to finish..." @@ -133,18 +185,16 @@ jobs: Write-Host "✅ Installation completed!" - - name: Cleanup Task Scheduler Entry + - name: Cleanup After Installation (Windows) + if: matrix.os == 'windows' shell: pwsh run: | + # Cleanup Task Scheduler Entry Unregister-ScheduledTask -TaskName "SilentSLInstaller" -Confirm:$false Write-Host "✅ Task Scheduler entry removed." - - name: Delete Installer ZIP - shell: pwsh - run: | - # Explicitly set BUILD_ID again - $BUILD_ID = "${{ github.event.workflow_run.id }}" - $DeletePath = "$env:TEMP\secondlife-build-$BUILD_ID\installer.zip" + # Delete Installer ZIP + $DeletePath = "${{ env.DOWNLOAD_PATH }}\installer.zip" Write-Host "Checking if installer ZIP exists: $DeletePath" @@ -156,13 +206,164 @@ jobs: Write-Host "⚠️ Warning: ZIP file does not exist, skipping deletion." } - - name: Run QA Test Script + - name: Run QA Test Script (Windows) + if: matrix.os == 'windows' + shell: pwsh run: | - Write-Host "Running QA Test script..." - python C:\viewer-sikulix-main\runTests.py + Write-Host "Running QA Test script on Windows runner: ${{ matrix.runner }}..." + python "${{ matrix.install-path }}\runTests.py" + + # Mac-specific steps + - name: Verify viewer-sikulix-main Exists (Mac) + if: matrix.os == 'mac' + shell: bash + run: | + if [ ! -d "${{ matrix.install-path }}" ]; then + echo "❌ Error: viewer-sikulix not found on runner!" + exit 1 + fi + echo "✅ viewer-sikulix is already available." + + - name: Fetch & Download Installer Artifact (Mac) + if: matrix.os == 'mac' + shell: bash + run: | + # Mac-specific Bash commands + response=$(curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -s ${{ env.ARTIFACTS_URL }}) + ARTIFACT_NAME=$(echo $response | jq -r '.artifacts[] | select(.name=="${{ matrix.artifact }}") | .archive_download_url') + + if [ -z "$ARTIFACT_NAME" ]; then + echo "❌ Error: ${{ matrix.artifact }} artifact not found!" + exit 1 + fi + + echo "✅ Artifact found: $ARTIFACT_NAME" + + # Secure download path + DOWNLOAD_PATH="/tmp/secondlife-build-${{ env.BUILD_ID }}" + mkdir -p $DOWNLOAD_PATH + INSTALLER_PATH="$DOWNLOAD_PATH/installer.zip" + + # Download the ZIP + curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -L $ARTIFACT_NAME -o $INSTALLER_PATH + + # Ensure download succeeded + if [ ! -f "$INSTALLER_PATH" ]; then + echo "❌ Error: Failed to download ${{ matrix.artifact }}.zip" + exit 1 + fi + + # Set the path for other steps + echo "DOWNLOAD_PATH=$DOWNLOAD_PATH" >> $GITHUB_ENV + + - name: Extract Installer & Locate Executable (Mac) + if: matrix.os == 'mac' + shell: bash + run: | + EXTRACT_PATH="${{ env.DOWNLOAD_PATH }}" + INSTALLER_ZIP="$EXTRACT_PATH/installer.zip" + + # Debug output + echo "Extract Path: $EXTRACT_PATH" + echo "Installer ZIP Path: $INSTALLER_ZIP" + + # Verify ZIP exists + if [ ! -f "$INSTALLER_ZIP" ]; then + echo "❌ Error: ZIP file not found at $INSTALLER_ZIP!" + exit 1 + fi + + echo "✅ ZIP file exists and is valid. Extracting..." + + # Extract the ZIP + unzip -o "$INSTALLER_ZIP" -d "$EXTRACT_PATH" + + # Find DMG file + INSTALLER_PATH=$(find "$EXTRACT_PATH" -name "*.dmg" -type f | head -1) + + if [ -z "$INSTALLER_PATH" ]; then + echo "❌ Error: No installer DMG found in the extracted files!" + echo "📂 Extracted Files:" + ls -la "$EXTRACT_PATH" + exit 1 + fi + + echo "✅ Installer found: $INSTALLER_PATH" + echo "INSTALLER_PATH=$INSTALLER_PATH" >> $GITHUB_ENV + + - name: Install Second Life (Mac) + if: matrix.os == 'mac' + shell: bash + run: | + # Mac installation + echo "Mounting DMG installer..." + MOUNT_POINT="/tmp/secondlife-dmg" + mkdir -p "$MOUNT_POINT" + + # Mount the DMG + hdiutil attach "${{ env.INSTALLER_PATH }}" -mountpoint "$MOUNT_POINT" -nobrowse + + echo "✅ DMG mounted at $MOUNT_POINT" + + # Find the app in the mounted DMG + APP_PATH=$(find "$MOUNT_POINT" -name "*.app" -type d | head -1) + + if [ -z "$APP_PATH" ]; then + echo "❌ Error: No .app bundle found in the mounted DMG!" + exit 1 + fi + + echo "Installing application to Applications folder..." + + # Copy the app to the Applications folder (or specified install path) + cp -R "$APP_PATH" "${{ matrix.install-path }}" + + # Verify the app was copied successfully + if [ ! -d "${{ matrix.install-path }}/$(basename "$APP_PATH")" ]; then + echo "❌ Error: Failed to install application to ${{ matrix.install-path }}!" + exit 1 + fi + + echo "✅ Application installed successfully to ${{ matrix.install-path }}" + + # Save mount point for cleanup + echo "MOUNT_POINT=$MOUNT_POINT" >> $GITHUB_ENV + + - name: Wait for Installation to Complete (Mac) + if: matrix.os == 'mac' + shell: bash + run: | + echo "Waiting for installation to complete..." + # Sleep to allow installation to finish (adjust as needed) + sleep 30 + echo "✅ Installation completed" + + - name: Cleanup After Installation (Mac) + if: matrix.os == 'mac' + shell: bash + run: | + # Mac cleanup + # Unmount the DMG + echo "Unmounting DMG..." + hdiutil detach "${{ env.MOUNT_POINT }}" -force + + # Clean up temporary files + echo "Cleaning up temporary files..." + rm -rf "${{ env.DOWNLOAD_PATH }}" + rm -rf "${{ env.MOUNT_POINT }}" + + echo "✅ Cleanup completed" + + - name: Run QA Test Script (Mac) + if: matrix.os == 'mac' + shell: bash + run: | + echo "Running QA Test script on Mac runner: ${{ matrix.runner }}..." + python "${{ matrix.install-path }}/runTests.py" # - name: Upload Test Results - # uses: actions/upload-artifact@v3 + # if: always() + # uses: actions/upload-artifact@v4 # with: - # name: test-results - # path: C:\viewer-sikulix-main\regressionTest\test_results.html + # name: test-results-${{ matrix.runner }} + # path: ${{ matrix.install-path }}/regressionTest/test_results.html From a30342a78b9309167c8c8d35a59049ce455b56b8 Mon Sep 17 00:00:00 2001 From: Ayane Date: Tue, 6 May 2025 14:26:35 +0200 Subject: [PATCH 06/24] Fix and optimize openjpeg J2C encoder (#4017, #4032) --- indra/llimagej2coj/llimagej2coj.cpp | 199 ++++++++++++++++------------ indra/llimagej2coj/llimagej2coj.h | 2 + 2 files changed, 116 insertions(+), 85 deletions(-) diff --git a/indra/llimagej2coj/llimagej2coj.cpp b/indra/llimagej2coj/llimagej2coj.cpp index c027aecfc9..c56e94aaa4 100644 --- a/indra/llimagej2coj/llimagej2coj.cpp +++ b/indra/llimagej2coj/llimagej2coj.cpp @@ -32,8 +32,6 @@ #include "event.h" #include "cio.h" -#define MAX_ENCODED_DISCARD_LEVELS 5 - // Factory function: see declaration in llimagej2c.cpp LLImageJ2CImpl* fallbackCreateLLImageJ2CImpl() { @@ -132,73 +130,96 @@ static void opj_error(const char* msg, void* user_data) static OPJ_SIZE_T opj_read(void * buffer, OPJ_SIZE_T bytes, void* user_data) { - llassert(user_data); + llassert(user_data && buffer); + JPEG2KBase* jpeg_codec = static_cast(user_data); - OPJ_SIZE_T remainder = (jpeg_codec->size - jpeg_codec->offset); - if (remainder <= 0) + + if (jpeg_codec->offset < 0 || static_cast(jpeg_codec->offset) >= jpeg_codec->size) { jpeg_codec->offset = jpeg_codec->size; - // Indicate end of stream (hacky?) - return (OPJ_OFF_T)-1; + return static_cast(-1); // Indicate EOF } - OPJ_SIZE_T to_read = llclamp(U32(bytes), U32(0), U32(remainder)); + + OPJ_SIZE_T remainder = jpeg_codec->size - static_cast(jpeg_codec->offset); + OPJ_SIZE_T to_read = (bytes < remainder) ? bytes : remainder; + memcpy(buffer, jpeg_codec->buffer + jpeg_codec->offset, to_read); jpeg_codec->offset += to_read; + return to_read; } static OPJ_SIZE_T opj_write(void * buffer, OPJ_SIZE_T bytes, void* user_data) { - llassert(user_data); + llassert(user_data && buffer); + JPEG2KBase* jpeg_codec = static_cast(user_data); - OPJ_SIZE_T remainder = jpeg_codec->size - jpeg_codec->offset; - if (remainder < bytes) + OPJ_OFF_T required_offset = jpeg_codec->offset + static_cast(bytes); + + // Overflow check + if (required_offset < jpeg_codec->offset) + return 0; // Overflow detected + + // Resize if needed (exponential growth) + if (required_offset > static_cast(jpeg_codec->size)) { - OPJ_SIZE_T new_size = jpeg_codec->size + (bytes - remainder); + OPJ_SIZE_T new_size = jpeg_codec->size ? jpeg_codec->size : 1024; + while (required_offset > static_cast(new_size)) + new_size *= 2; + + const OPJ_SIZE_T MAX_BUFFER_SIZE = 512 * 1024 * 1024; // 512 MB, increase if needed + if (new_size > MAX_BUFFER_SIZE) return 0; + U8* new_buffer = (U8*)ll_aligned_malloc_16(new_size); - memcpy(new_buffer, jpeg_codec->buffer, jpeg_codec->offset); - U8* old_buffer = jpeg_codec->buffer; + if (!new_buffer) return 0; // Allocation failed + + if (jpeg_codec->offset > 0) + memcpy(new_buffer, jpeg_codec->buffer, static_cast(jpeg_codec->offset)); + + ll_aligned_free_16(jpeg_codec->buffer); jpeg_codec->buffer = new_buffer; - ll_aligned_free_16(old_buffer); jpeg_codec->size = new_size; } - memcpy(jpeg_codec->buffer + jpeg_codec->offset, buffer, bytes); - jpeg_codec->offset += bytes; + + memcpy(jpeg_codec->buffer + jpeg_codec->offset, buffer, static_cast(bytes)); + jpeg_codec->offset = required_offset; return bytes; } static OPJ_OFF_T opj_skip(OPJ_OFF_T bytes, void* user_data) { + llassert(user_data); JPEG2KBase* jpeg_codec = static_cast(user_data); - jpeg_codec->offset += bytes; - if (jpeg_codec->offset > (OPJ_OFF_T)jpeg_codec->size) + OPJ_OFF_T new_offset = jpeg_codec->offset + bytes; + + if (new_offset < 0 || new_offset > static_cast(jpeg_codec->size)) { - jpeg_codec->offset = jpeg_codec->size; - // Indicate end of stream - return (OPJ_OFF_T)-1; - } - - if (jpeg_codec->offset < 0) - { - // Shouldn't be possible? - jpeg_codec->offset = 0; + // Clamp and indicate EOF or error + jpeg_codec->offset = llclamp(new_offset, 0, static_cast(jpeg_codec->size)); return (OPJ_OFF_T)-1; } + jpeg_codec->offset = new_offset; return bytes; } -static OPJ_BOOL opj_seek(OPJ_OFF_T bytes, void * user_data) +static OPJ_BOOL opj_seek(OPJ_OFF_T offset, void * user_data) { + llassert(user_data); JPEG2KBase* jpeg_codec = static_cast(user_data); - jpeg_codec->offset = bytes; - jpeg_codec->offset = llclamp(U32(jpeg_codec->offset), U32(0), U32(jpeg_codec->size)); + + if (offset < 0 || offset > static_cast(jpeg_codec->size)) + return OPJ_FALSE; + + jpeg_codec->offset = offset; return OPJ_TRUE; } static void opj_free_user_data(void * user_data) { + llassert(user_data); + JPEG2KBase* jpeg_codec = static_cast(user_data); // Don't free, data is managed externally jpeg_codec->buffer = nullptr; @@ -208,14 +229,54 @@ static void opj_free_user_data(void * user_data) static void opj_free_user_data_write(void * user_data) { + llassert(user_data); + JPEG2KBase* jpeg_codec = static_cast(user_data); // Free, data was allocated here - ll_aligned_free_16(jpeg_codec->buffer); - jpeg_codec->buffer = nullptr; + if (jpeg_codec->buffer) + { + ll_aligned_free_16(jpeg_codec->buffer); + jpeg_codec->buffer = nullptr; + } jpeg_codec->size = 0; jpeg_codec->offset = 0; } +/** + * Estimates the number of layers necessary depending on the image surface (w x h) + */ +static U32 estimate_num_layers(U32 surface) +{ + if (surface <= 1024) return 2; // Tiny (≤32×32) + else if (surface <= 16384) return 3; // Small (≤128×128) + else if (surface <= 262144) return 4; // Medium (≤512×512) + else if (surface <= 1048576) return 5; // Up to ~1MP + else return 6; // Up to ~1.5–2MP +} + +/** + * Sets the parameters.tcp_rates according to the number of layers and a last tcp_rate value (which equals to the final compression ratio). + * + * Example for 6 layers: + * + * i = 5, parameters.tcp_rates[6 - 1 - 5] = 8.0f * (1 << (5 << 1)) = 8192 // Layer 5 (lowest quality) + * i = 4, parameters.tcp_rates[6 - 1 - 4] = 8.0f * (1 << (4 << 1)) = 2048 // Layer 4 + * i = 3, parameters.tcp_rates[6 - 1 - 3] = 8.0f * (1 << (3 << 1)) = 512 // Layer 3 + * i = 2, parameters.tcp_rates[6 - 1 - 2] = 8.0f * (1 << (2 << 1)) = 128 // Layer 2 + * i = 1, parameters.tcp_rates[6 - 1 - 1] = 8.0f * (1 << (1 << 1)) = 32 // Layer 1 + * i = 0, parameters.tcp_rates[6 - 1 - 0] = 8.0f * (1 << (0 << 1)) = 8 // Layer 0 (highest quality) + * + */ +static void set_tcp_rates(opj_cparameters_t* parameters, U32 num_layers = 1, F32 last_tcp_rate = LAST_TCP_RATE) +{ + parameters->tcp_numlayers = num_layers; + + for (int i = num_layers - 1; i >= 0; i--) + { + parameters->tcp_rates[num_layers - 1 - i] = last_tcp_rate * static_cast(1 << (i << 1)); + } +} + class JPEG2KDecode : public JPEG2KBase { public: @@ -430,15 +491,16 @@ public: opj_set_default_encoder_parameters(¶meters); parameters.cod_format = OPJ_CODEC_J2K; - parameters.cp_disto_alloc = 1; + parameters.prog_order = OPJ_RLCP; // should be the default, but, just in case + parameters.cp_disto_alloc = 1; // enable rate allocation by distortion + parameters.max_cs_size = 0; // do not cap max size because we're using tcp_rates and also irrelevant with lossless. if (reversible) { - parameters.max_cs_size = 0; // do not limit size for reversible compression parameters.irreversible = 0; // should be the default, but, just in case parameters.tcp_numlayers = 1; /* documentation seems to be wrong, should be 0.0f for lossless, not 1.0f - see https://github.com/uclouvain/openjpeg/blob/39e8c50a2f9bdcf36810ee3d41bcbf1cc78968ae/src/lib/openjp2/j2k.c#L7755 + see https://github.com/uclouvain/openjpeg/blob/e7453e398b110891778d8da19209792c69ca7169/src/lib/openjp2/j2k.c#L7817 */ parameters.tcp_rates[0] = 0.0f; } @@ -493,53 +555,22 @@ public: encoder = opj_create_compress(OPJ_CODEC_J2K); - parameters.tcp_mct = (image->numcomps >= 3) ? 1 : 0; - parameters.cod_format = OPJ_CODEC_J2K; - parameters.prog_order = OPJ_RLCP; - parameters.cp_disto_alloc = 1; + parameters.tcp_mct = (image->numcomps >= 3) ? 1 : 0; // no color transform for RGBA images + // if not lossless compression, computes tcp_numlayers and max_cs_size depending on the image dimensions - if( parameters.irreversible ) { + if( parameters.irreversible ) + { // computes a number of layers U32 surface = rawImageIn.getWidth() * rawImageIn.getHeight(); - U32 nb_layers = 1; - U32 s = 64*64; - while (surface > s) - { - nb_layers++; - s *= 4; - } - nb_layers = llclamp(nb_layers, 1, 6); - parameters.tcp_numlayers = nb_layers; - parameters.tcp_rates[nb_layers - 1] = (U32)(1.f / DEFAULT_COMPRESSION_RATE); // 1:8 by default + // gets the necessary number of layers + U32 nb_layers = estimate_num_layers(surface); - // for each subsequent layer, computes its rate and adds surface * numcomps * 1/rate to the max_cs_size - U32 max_cs_size = (U32)(surface * image->numcomps * DEFAULT_COMPRESSION_RATE); - U32 multiplier; - for (int i = nb_layers - 2; i >= 0; i--) - { - if( i == nb_layers - 2 ) - { - multiplier = 15; - } - else if( i == nb_layers - 3 ) - { - multiplier = 4; - } - else - { - multiplier = 2; - } - parameters.tcp_rates[i] = parameters.tcp_rates[i + 1] * multiplier; - max_cs_size += (U32)(surface * image->numcomps * (1 / parameters.tcp_rates[i])); - } + // fills parameters.tcp_rates and updates parameters.tcp_numlayers + set_tcp_rates(¶meters, nb_layers, LAST_TCP_RATE); - //ensure that we have at least a minimal size - max_cs_size = llmax(max_cs_size, (U32)FIRST_PACKET_SIZE); - - parameters.max_cs_size = max_cs_size; } if (!opj_setup_encoder(encoder, ¶meters, image)) @@ -579,7 +610,7 @@ public: opj_stream_destroy(stream); } - stream = opj_stream_create(data_size_guess, false); + stream = opj_stream_create(data_size_guess, OPJ_FALSE); if (!stream) { return false; @@ -620,17 +651,15 @@ public: void setImage(const LLImageRaw& raw) { - opj_image_cmptparm_t cmptparm[MAX_ENCODED_DISCARD_LEVELS]; - memset(&cmptparm[0], 0, MAX_ENCODED_DISCARD_LEVELS * sizeof(opj_image_cmptparm_t)); - S32 numcomps = raw.getComponents(); - S32 width = raw.getWidth(); - S32 height = raw.getHeight(); + S32 width = raw.getWidth(); + S32 height = raw.getHeight(); + + std::vector cmptparm(numcomps); for (S32 c = 0; c < numcomps; c++) { - cmptparm[c].prec = 8; - cmptparm[c].bpp = 8; + cmptparm[c].prec = 8; // replaces .bpp cmptparm[c].sgnd = 0; cmptparm[c].dx = parameters.subsampling_dx; cmptparm[c].dy = parameters.subsampling_dy; @@ -638,7 +667,7 @@ public: cmptparm[c].h = height; } - image = opj_image_create(numcomps, &cmptparm[0], OPJ_CLRSPC_SRGB); + image = opj_image_create(numcomps, cmptparm.data(), OPJ_CLRSPC_SRGB); image->x1 = width; image->y1 = height; @@ -650,7 +679,7 @@ public: { for (S32 x = 0; x < width; x++) { - const U8 *pixel = src_datap + (y*width + x) * numcomps; + const U8 *pixel = src_datap + (y * width + x) * numcomps; for (S32 c = 0; c < numcomps; c++) { image->comps[c].data[i] = *pixel; diff --git a/indra/llimagej2coj/llimagej2coj.h b/indra/llimagej2coj/llimagej2coj.h index 498502451a..da49597302 100644 --- a/indra/llimagej2coj/llimagej2coj.h +++ b/indra/llimagej2coj/llimagej2coj.h @@ -29,6 +29,8 @@ #include "llimagej2c.h" +const F32 LAST_TCP_RATE = 1.f/DEFAULT_COMPRESSION_RATE; // should be 8, giving a 1:8 ratio + class LLImageJ2COJ : public LLImageJ2CImpl { public: From 4bb51a5f276ba96c08ed8b204ab8a32feb9af037 Mon Sep 17 00:00:00 2001 From: AtlasLinden <114031241+AtlasLinden@users.noreply.github.com> Date: Tue, 6 May 2025 06:28:57 -0700 Subject: [PATCH 07/24] Add permissions to QA Workflow Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> --- .github/workflows/qatest.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/qatest.yaml b/.github/workflows/qatest.yaml index 5a609fdc5b..f1835a4b7f 100644 --- a/.github/workflows/qatest.yaml +++ b/.github/workflows/qatest.yaml @@ -1,5 +1,8 @@ name: Run QA Test # Runs automated tests on a self-hosted QA machine +permissions: + contents: read + on: workflow_run: workflows: ["Build"] From 2c176c75fc6951388668e41bd8bb59a5190b0f07 Mon Sep 17 00:00:00 2001 From: AtlasLinden <114031241+AtlasLinden@users.noreply.github.com> Date: Tue, 6 May 2025 11:02:57 -0700 Subject: [PATCH 08/24] Resolve qatest.yaml concurrency group error Error: "The workflow is not valid. .github/workflows/qatest.yaml (Line: 23, Col: 10): Unrecognized named-value: 'matrix'. Located at position 1 within expression: matrix.runner" --- .github/workflows/qatest.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/qatest.yaml b/.github/workflows/qatest.yaml index f1835a4b7f..96ce672d4c 100644 --- a/.github/workflows/qatest.yaml +++ b/.github/workflows/qatest.yaml @@ -1,4 +1,4 @@ -name: Run QA Test # Runs automated tests on a self-hosted QA machine +name: Run QA Test # Runs automated tests on self-hosted QA machines permissions: contents: read @@ -10,17 +10,13 @@ on: - completed workflow_dispatch: inputs: - branch_name: - description: 'Branch name to simulate workflow (e.g. develop)' - required: true - default: 'develop' build_id: description: 'Build workflow run ID (e.g. For github.com/secondlife/viewer/actions/runs/1234567890 the ID is 1234567890)' required: true default: '14806728332' concurrency: - group: qa-test-run-${{ matrix.runner }} + group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: false # Prevents cancellation of in-progress jobs jobs: From 469730f18798f0c4baba17759bdedbb3dd342214 Mon Sep 17 00:00:00 2001 From: AtlasLinden <114031241+AtlasLinden@users.noreply.github.com> Date: Tue, 6 May 2025 12:31:18 -0700 Subject: [PATCH 09/24] Separate Build ID step for each OS --- .github/workflows/qatest.yaml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/.github/workflows/qatest.yaml b/.github/workflows/qatest.yaml index 96ce672d4c..f3f93a9c55 100644 --- a/.github/workflows/qatest.yaml +++ b/.github/workflows/qatest.yaml @@ -62,9 +62,10 @@ jobs: github.event_name == 'workflow_dispatch' steps: - # Common steps for both OSes + # Windows-specific steps - name: Set Build ID - shell: bash + if: matrix.os == 'windows' + shell: pwsh run: | if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then echo "BUILD_ID=${{ github.event.inputs.build_id }}" >> $GITHUB_ENV @@ -74,7 +75,6 @@ jobs: echo "ARTIFACTS_URL=https://api.github.com/repos/secondlife/viewer/actions/runs/${{ github.event.workflow_run.id }}/artifacts" >> $GITHUB_ENV fi - # Windows-specific steps - name: Temporarily Allow PowerShell Scripts (Windows) if: matrix.os == 'windows' shell: pwsh @@ -213,6 +213,18 @@ jobs: python "${{ matrix.install-path }}\runTests.py" # Mac-specific steps + - name: Set Build ID (Mac) + if: matrix.os == 'mac' + shell: bash + run: | + if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then + echo "BUILD_ID=${{ github.event.inputs.build_id }}" >> $GITHUB_ENV + echo "ARTIFACTS_URL=https://api.github.com/repos/secondlife/viewer/actions/runs/${{ github.event.inputs.build_id }}/artifacts" >> $GITHUB_ENV + else + echo "BUILD_ID=${{ github.event.workflow_run.id }}" >> $GITHUB_ENV + echo "ARTIFACTS_URL=https://api.github.com/repos/secondlife/viewer/actions/runs/${{ github.event.workflow_run.id }}/artifacts" >> $GITHUB_ENV + fi + - name: Verify viewer-sikulix-main Exists (Mac) if: matrix.os == 'mac' shell: bash From 8c5df1ad9d812a50e0ed5281bcafbfb867d670eb Mon Sep 17 00:00:00 2001 From: AtlasLinden <114031241+AtlasLinden@users.noreply.github.com> Date: Tue, 6 May 2025 12:41:08 -0700 Subject: [PATCH 10/24] Update Windows Build ID step to use pwsh syntax --- .github/workflows/qatest.yaml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/qatest.yaml b/.github/workflows/qatest.yaml index f3f93a9c55..43fdb86b7e 100644 --- a/.github/workflows/qatest.yaml +++ b/.github/workflows/qatest.yaml @@ -67,13 +67,13 @@ jobs: if: matrix.os == 'windows' shell: pwsh run: | - if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then - echo "BUILD_ID=${{ github.event.inputs.build_id }}" >> $GITHUB_ENV - echo "ARTIFACTS_URL=https://api.github.com/repos/secondlife/viewer/actions/runs/${{ github.event.inputs.build_id }}/artifacts" >> $GITHUB_ENV - else - echo "BUILD_ID=${{ github.event.workflow_run.id }}" >> $GITHUB_ENV - echo "ARTIFACTS_URL=https://api.github.com/repos/secondlife/viewer/actions/runs/${{ github.event.workflow_run.id }}/artifacts" >> $GITHUB_ENV - fi + if ("${{ github.event_name }}" -eq "workflow_dispatch") { + echo "BUILD_ID=${{ github.event.inputs.build_id }}" | Out-File -FilePath $env:GITHUB_ENV -Append + echo "ARTIFACTS_URL=https://api.github.com/repos/secondlife/viewer/actions/runs/${{ github.event.inputs.build_id }}/artifacts" | Out-File -FilePath $env:GITHUB_ENV -Append + } else { + echo "BUILD_ID=${{ github.event.workflow_run.id }}" | Out-File -FilePath $env:GITHUB_ENV -Append + echo "ARTIFACTS_URL=https://api.github.com/repos/secondlife/viewer/actions/runs/${{ github.event.workflow_run.id }}/artifacts" | Out-File -FilePath $env:GITHUB_ENV -Append + } - name: Temporarily Allow PowerShell Scripts (Windows) if: matrix.os == 'windows' From 2c0e90a72de6fe79713db2390d91d6d0b9ecfded Mon Sep 17 00:00:00 2001 From: TJ Date: Wed, 7 May 2025 19:48:03 +1000 Subject: [PATCH 11/24] Fixed World Map Find sending two requests and possibly showing wrong results (#4037, #4038) --- indra/newview/skins/default/xui/en/floater_world_map.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/indra/newview/skins/default/xui/en/floater_world_map.xml b/indra/newview/skins/default/xui/en/floater_world_map.xml index 5ab0177de6..f65d95e344 100644 --- a/indra/newview/skins/default/xui/en/floater_world_map.xml +++ b/indra/newview/skins/default/xui/en/floater_world_map.xml @@ -556,7 +556,7 @@ name="DoSearch" tool_tip="Search for region" width="62"> -