First HTTP pipelining viewer. Enable pipelining for
GetTexture and GetMesh2 at a pipeline depth of 5. Create global debug option, HttpPipelining, to enable and disable HTTP pipelining (defaults to true). Tweak texture and mesh low- and high-water request levels based on pipelining status and depth. Fixup texture console which was damaged in a recent release. Split logging of the no-request HTTP error case into two cases: one for missing URL in HTTP request, one for HTTP request not created. A refactor in llcorehttp is coming: I will be moving all libcurl- using code into libcurl-specific modules.master
parent
d16e1b1b55
commit
5cca78e718
|
|
@ -145,8 +145,11 @@ const int HTTP_CONNECTION_LIMIT_DEFAULT = 8;
|
|||
const int HTTP_CONNECTION_LIMIT_MIN = 1;
|
||||
const int HTTP_CONNECTION_LIMIT_MAX = 256;
|
||||
|
||||
// Miscellaneous defaults
|
||||
// Pipelining limits
|
||||
const long HTTP_PIPELINING_DEFAULT = 0L;
|
||||
const long HTTP_PIPELINING_MAX = 20L;
|
||||
|
||||
// Miscellaneous defaults
|
||||
const bool HTTP_USE_RETRY_AFTER_DEFAULT = true;
|
||||
const long HTTP_THROTTLE_RATE_DEFAULT = 0L;
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
|
||||
* Second Life Viewer Source Code
|
||||
* Copyright (C) 2012-2013, Linden Research, Inc.
|
||||
* Copyright (C) 2012-2014, Linden Research, Inc.
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
|
|
@ -33,6 +33,15 @@
|
|||
|
||||
#include "llhttpconstants.h"
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
// Error testing and reporting for libcurl status codes
|
||||
void check_curl_multi_code(CURLMcode code);
|
||||
void check_curl_multi_code(CURLMcode code, int curl_setopt_option);
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
|
||||
namespace LLCore
|
||||
{
|
||||
|
|
@ -92,14 +101,44 @@ void HttpLibcurl::start(int policy_count)
|
|||
llassert_always(policy_count <= HTTP_POLICY_CLASS_LIMIT);
|
||||
llassert_always(! mMultiHandles); // One-time call only
|
||||
|
||||
HttpPolicy & policy(mService->getPolicy());
|
||||
mPolicyCount = policy_count;
|
||||
mMultiHandles = new CURLM * [mPolicyCount];
|
||||
mActiveHandles = new int [mPolicyCount];
|
||||
|
||||
for (int policy_class(0); policy_class < mPolicyCount; ++policy_class)
|
||||
{
|
||||
mMultiHandles[policy_class] = curl_multi_init();
|
||||
HttpPolicyClass & options(policy.getClassOptions(policy_class));
|
||||
|
||||
mActiveHandles[policy_class] = 0;
|
||||
if (NULL == (mMultiHandles[policy_class] = curl_multi_init()))
|
||||
{
|
||||
LL_ERRS("CoreHttp") << "Failed to allocate multi handle in libcurl."
|
||||
<< LL_ENDL;
|
||||
}
|
||||
|
||||
if (options.mPipelining > 1)
|
||||
{
|
||||
CURLMcode code;
|
||||
|
||||
// We'll try to do pipelining on this multihandle
|
||||
code = curl_multi_setopt(mMultiHandles[policy_class],
|
||||
CURLMOPT_PIPELINING,
|
||||
1L);
|
||||
check_curl_multi_code(code, CURLMOPT_PIPELINING);
|
||||
code = curl_multi_setopt(mMultiHandles[policy_class],
|
||||
CURLMOPT_MAX_PIPELINE_LENGTH,
|
||||
long(options.mPipelining));
|
||||
check_curl_multi_code(code, CURLMOPT_MAX_PIPELINE_LENGTH);
|
||||
code = curl_multi_setopt(mMultiHandles[policy_class],
|
||||
CURLMOPT_MAX_HOST_CONNECTIONS,
|
||||
long(options.mPerHostConnectionLimit));
|
||||
check_curl_multi_code(code, CURLMOPT_MAX_HOST_CONNECTIONS);
|
||||
code = curl_multi_setopt(mMultiHandles[policy_class],
|
||||
CURLMOPT_MAX_TOTAL_CONNECTIONS,
|
||||
long(options.mConnectionLimit));
|
||||
check_curl_multi_code(code, CURLMOPT_MAX_TOTAL_CONNECTIONS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -376,3 +415,29 @@ struct curl_slist * append_headers_to_slist(const HttpHeaders * headers, struct
|
|||
|
||||
|
||||
} // end namespace LLCore
|
||||
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
void check_curl_multi_code(CURLMcode code, int curl_setopt_option)
|
||||
{
|
||||
if (CURLM_OK != code)
|
||||
{
|
||||
LL_WARNS("CoreHttp") << "libcurl multi error detected: " << curl_multi_strerror(code)
|
||||
<< ", curl_multi_setopt option: " << curl_setopt_option
|
||||
<< LL_ENDL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void check_curl_multi_code(CURLMcode code)
|
||||
{
|
||||
if (CURLM_OK != code)
|
||||
{
|
||||
LL_WARNS("CoreHttp") << "libcurl multi error detected: " << curl_multi_strerror(code)
|
||||
<< LL_ENDL;
|
||||
}
|
||||
}
|
||||
|
||||
} // end anonymous namespace
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
|
||||
* Second Life Viewer Source Code
|
||||
* Copyright (C) 2012-2013, Linden Research, Inc.
|
||||
* Copyright (C) 2012-2014, Linden Research, Inc.
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
|
|
@ -128,7 +128,8 @@ void HttpPolicy::shutdown()
|
|||
|
||||
|
||||
void HttpPolicy::start()
|
||||
{}
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void HttpPolicy::addOp(HttpOpRequest * op)
|
||||
|
|
@ -234,7 +235,11 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
|
|||
}
|
||||
|
||||
int active(transport.getActiveCountInClass(policy_class));
|
||||
int needed(state.mOptions.mConnectionLimit - active); // Expect negatives here
|
||||
int active_limit(state.mOptions.mPipelining > 1L
|
||||
? (state.mOptions.mPerHostConnectionLimit
|
||||
* state.mOptions.mPipelining)
|
||||
: state.mOptions.mConnectionLimit);
|
||||
int needed(active_limit - active); // Expect negatives here
|
||||
|
||||
if (needed > 0)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
|
||||
* Second Life Viewer Source Code
|
||||
* Copyright (C) 2012-2013, Linden Research, Inc.
|
||||
* Copyright (C) 2012-2014, Linden Research, Inc.
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
|
|
@ -78,8 +78,8 @@ HttpStatus HttpPolicyClass::set(HttpRequest::EPolicyOption opt, long value)
|
|||
mPerHostConnectionLimit = llclamp(value, long(HTTP_CONNECTION_LIMIT_MIN), mConnectionLimit);
|
||||
break;
|
||||
|
||||
case HttpRequest::PO_ENABLE_PIPELINING:
|
||||
mPipelining = llclamp(value, 0L, 1L);
|
||||
case HttpRequest::PO_PIPELINING_DEPTH:
|
||||
mPipelining = llclamp(value, 0L, HTTP_PIPELINING_MAX);
|
||||
break;
|
||||
|
||||
case HttpRequest::PO_THROTTLE_RATE:
|
||||
|
|
@ -106,7 +106,7 @@ HttpStatus HttpPolicyClass::get(HttpRequest::EPolicyOption opt, long * value) co
|
|||
*value = mPerHostConnectionLimit;
|
||||
break;
|
||||
|
||||
case HttpRequest::PO_ENABLE_PIPELINING:
|
||||
case HttpRequest::PO_PIPELINING_DEPTH:
|
||||
*value = mPipelining;
|
||||
break;
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
|
||||
* Second Life Viewer Source Code
|
||||
* Copyright (C) 2012-2013, Linden Research, Inc.
|
||||
* Copyright (C) 2012-2014, Linden Research, Inc.
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
|
|
@ -183,11 +183,38 @@ public:
|
|||
/// Global only
|
||||
PO_TRACE,
|
||||
|
||||
/// Suitable requests are allowed to pipeline on their
|
||||
/// connections when they ask for it.
|
||||
/// If greater than 1, suitable requests are allowed to
|
||||
/// pipeline on their connections when they ask for it.
|
||||
/// Value gives the maximum number of outstanding requests
|
||||
/// on a connection.
|
||||
///
|
||||
/// There is some interaction between PO_CONNECTION_LIMIT,
|
||||
/// PO_PER_HOST_CONNECTION_LIMIT, and PO_PIPELINING_DEPTH.
|
||||
/// When PIPELINING_DEPTH is 0 or 1 (no pipelining), this
|
||||
/// library manages connection lifecycle and honors the
|
||||
/// PO_CONNECTION_LIMIT setting as the maximum in-flight
|
||||
/// request limit. Libcurl itself may be caching additional
|
||||
/// connections under its connection cache policy.
|
||||
///
|
||||
/// When PIPELINING_DEPTH is 2 or more, libcurl performs
|
||||
/// connection management and both PO_CONNECTION_LIMIT and
|
||||
/// PO_PER_HOST_CONNECTION_LIMIT should be set and non-zero.
|
||||
/// In this case (as of libcurl 7.37.0), libcurl will
|
||||
/// open new connections in preference to pipelining, up
|
||||
/// to the above limits at which time pipelining begins.
|
||||
/// And as usual, an additional cache of open but inactive
|
||||
/// connections may still be maintained within libcurl.
|
||||
/// For SL, a good rule-of-thumb is to set
|
||||
/// PO_PER_HOST_CONNECTION_LIMIT to the user-visible
|
||||
/// concurrency value and PO_CONNECTION_LIMIT to twice
|
||||
/// that for baked texture loads and region crossings where
|
||||
/// additional connection load will be tolerated. If
|
||||
/// either limit is 0, libcurl will prefer pipelining
|
||||
/// over connection creation, which is still interesting,
|
||||
/// but won't be pursued at this time.
|
||||
///
|
||||
/// Per-class only
|
||||
PO_ENABLE_PIPELINING,
|
||||
PO_PIPELINING_DEPTH,
|
||||
|
||||
/// Controls whether client-side throttling should be
|
||||
/// performed on this policy class. Positive values
|
||||
|
|
|
|||
|
|
@ -4467,6 +4467,17 @@
|
|||
<key>Value</key>
|
||||
<string />
|
||||
</map>
|
||||
<key>HttpPipelining</key>
|
||||
<map>
|
||||
<key>Comment</key>
|
||||
<string>If true, viewer will pipeline HTTP requests to servers. Static.</string>
|
||||
<key>Persist</key>
|
||||
<integer>1</integer>
|
||||
<key>Type</key>
|
||||
<string>Boolean</string>
|
||||
<key>Value</key>
|
||||
<integer>1</integer>
|
||||
</map>
|
||||
<key>IMShowTimestamps</key>
|
||||
<map>
|
||||
<key>Comment</key>
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
|
||||
* Second Life Viewer Source Code
|
||||
* Copyright (C) 2012-2013, Linden Research, Inc.
|
||||
* Copyright (C) 2012-2014, Linden Research, Inc.
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
|
|
@ -40,6 +40,8 @@
|
|||
// be open at a time.
|
||||
|
||||
const F64 LLAppCoreHttp::MAX_THREAD_WAIT_TIME(10.0);
|
||||
const long LLAppCoreHttp::PIPELINING_DEPTH(5L);
|
||||
|
||||
static const struct
|
||||
{
|
||||
LLAppCoreHttp::EAppPolicy mPolicy;
|
||||
|
|
@ -47,42 +49,43 @@ static const struct
|
|||
U32 mMin;
|
||||
U32 mMax;
|
||||
U32 mRate;
|
||||
bool mPipelined;
|
||||
std::string mKey;
|
||||
const char * mUsage;
|
||||
} init_data[] = // Default and dynamic values for classes
|
||||
{
|
||||
{
|
||||
LLAppCoreHttp::AP_DEFAULT, 8, 8, 8, 0,
|
||||
LLAppCoreHttp::AP_DEFAULT, 8, 8, 8, 0, false,
|
||||
"",
|
||||
"other"
|
||||
},
|
||||
{
|
||||
LLAppCoreHttp::AP_TEXTURE, 8, 1, 12, 0,
|
||||
LLAppCoreHttp::AP_TEXTURE, 8, 1, 12, 0, true,
|
||||
"TextureFetchConcurrency",
|
||||
"texture fetch"
|
||||
},
|
||||
{
|
||||
LLAppCoreHttp::AP_MESH1, 32, 1, 128, 100,
|
||||
LLAppCoreHttp::AP_MESH1, 32, 1, 128, 100, false,
|
||||
"MeshMaxConcurrentRequests",
|
||||
"mesh fetch"
|
||||
},
|
||||
{
|
||||
LLAppCoreHttp::AP_MESH2, 8, 1, 32, 100,
|
||||
LLAppCoreHttp::AP_MESH2, 8, 1, 32, 100, true,
|
||||
"Mesh2MaxConcurrentRequests",
|
||||
"mesh2 fetch"
|
||||
},
|
||||
{
|
||||
LLAppCoreHttp::AP_LARGE_MESH, 2, 1, 8, 0,
|
||||
LLAppCoreHttp::AP_LARGE_MESH, 2, 1, 8, 0, false,
|
||||
"",
|
||||
"large mesh fetch"
|
||||
},
|
||||
{
|
||||
LLAppCoreHttp::AP_UPLOADS, 2, 1, 8, 0,
|
||||
LLAppCoreHttp::AP_UPLOADS, 2, 1, 8, 0, false,
|
||||
"",
|
||||
"asset upload"
|
||||
},
|
||||
{
|
||||
LLAppCoreHttp::AP_LONG_POLL, 32, 32, 32, 0,
|
||||
LLAppCoreHttp::AP_LONG_POLL, 32, 32, 32, 0, false,
|
||||
"",
|
||||
"long poll"
|
||||
}
|
||||
|
|
@ -91,18 +94,20 @@ static const struct
|
|||
static void setting_changed();
|
||||
|
||||
|
||||
LLAppCoreHttp::HttpClass::HttpClass()
|
||||
: mPolicy(LLCore::HttpRequest::DEFAULT_POLICY_ID),
|
||||
mConnLimit(0U),
|
||||
mPipelined(false)
|
||||
{}
|
||||
|
||||
|
||||
LLAppCoreHttp::LLAppCoreHttp()
|
||||
: mRequest(NULL),
|
||||
mStopHandle(LLCORE_HTTP_HANDLE_INVALID),
|
||||
mStopRequested(0.0),
|
||||
mStopped(false)
|
||||
{
|
||||
for (int i(0); i < LL_ARRAY_SIZE(mPolicies); ++i)
|
||||
{
|
||||
mPolicies[i] = LLCore::HttpRequest::DEFAULT_POLICY_ID;
|
||||
mSettings[i] = 0U;
|
||||
}
|
||||
}
|
||||
mStopped(false),
|
||||
mPipelined(true)
|
||||
{}
|
||||
|
||||
|
||||
LLAppCoreHttp::~LLAppCoreHttp()
|
||||
|
|
@ -121,6 +126,14 @@ void LLAppCoreHttp::init()
|
|||
<< LL_ENDL;
|
||||
}
|
||||
|
||||
// Global pipelining preference from settings
|
||||
static const std::string http_pipelining("HttpPipelining");
|
||||
if (gSavedSettings.controlExists(http_pipelining))
|
||||
{
|
||||
// Default to true if absent.
|
||||
mPipelined = gSavedSettings.getBOOL(http_pipelining);
|
||||
}
|
||||
|
||||
// Point to our certs or SSH/https: will fail on connect
|
||||
status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_CA_FILE,
|
||||
LLCore::HttpRequest::GLOBAL_POLICY_ID,
|
||||
|
|
@ -157,27 +170,27 @@ void LLAppCoreHttp::init()
|
|||
}
|
||||
|
||||
// Setup default policy and constrain if directed to
|
||||
mPolicies[AP_DEFAULT] = LLCore::HttpRequest::DEFAULT_POLICY_ID;
|
||||
mHttpClasses[AP_DEFAULT].mPolicy = LLCore::HttpRequest::DEFAULT_POLICY_ID;
|
||||
|
||||
// Setup additional policies based on table and some special rules
|
||||
for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
|
||||
{
|
||||
const EAppPolicy policy(init_data[i].mPolicy);
|
||||
const EAppPolicy app_policy(init_data[i].mPolicy);
|
||||
|
||||
if (AP_DEFAULT == policy)
|
||||
if (AP_DEFAULT == app_policy)
|
||||
{
|
||||
// Pre-created
|
||||
continue;
|
||||
}
|
||||
|
||||
mPolicies[policy] = LLCore::HttpRequest::createPolicyClass();
|
||||
if (! mPolicies[policy])
|
||||
mHttpClasses[app_policy].mPolicy = LLCore::HttpRequest::createPolicyClass();
|
||||
if (! mHttpClasses[app_policy].mPolicy)
|
||||
{
|
||||
// Use default policy (but don't accidentally modify default)
|
||||
LL_WARNS("Init") << "Failed to create HTTP policy class for " << init_data[i].mUsage
|
||||
<< ". Using default policy."
|
||||
<< LL_ENDL;
|
||||
mPolicies[policy] = mPolicies[AP_DEFAULT];
|
||||
mHttpClasses[app_policy].mPolicy = mHttpClasses[AP_DEFAULT].mPolicy;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
|
@ -196,6 +209,9 @@ void LLAppCoreHttp::init()
|
|||
<< LL_ENDL;
|
||||
}
|
||||
|
||||
// *NOTE: Pipelining isn't dynamic yet. When it is, add a global
|
||||
// signal for the setting here.
|
||||
|
||||
// Register signals for settings and state changes
|
||||
for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
|
||||
{
|
||||
|
|
@ -209,7 +225,7 @@ void LLAppCoreHttp::init()
|
|||
}
|
||||
else
|
||||
{
|
||||
mSettingsSignal[i] = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed));
|
||||
mHttpClasses[i].mSettingsSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -261,9 +277,9 @@ void LLAppCoreHttp::cleanup()
|
|||
}
|
||||
}
|
||||
|
||||
for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
|
||||
for (int i(0); i < LL_ARRAY_SIZE(mHttpClasses); ++i)
|
||||
{
|
||||
mSettingsSignal[i].disconnect();
|
||||
mHttpClasses[i].mSettingsSignal.disconnect();
|
||||
}
|
||||
|
||||
delete mRequest;
|
||||
|
|
@ -278,30 +294,57 @@ void LLAppCoreHttp::cleanup()
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void LLAppCoreHttp::refreshSettings(bool initial)
|
||||
{
|
||||
LLCore::HttpStatus status;
|
||||
|
||||
for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
|
||||
{
|
||||
const EAppPolicy policy(init_data[i].mPolicy);
|
||||
const EAppPolicy app_policy(init_data[i].mPolicy);
|
||||
|
||||
// Set any desired throttle
|
||||
if (initial && init_data[i].mRate)
|
||||
if (initial)
|
||||
{
|
||||
// Init-time only, can use the static setters here
|
||||
status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_THROTTLE_RATE,
|
||||
mPolicies[policy],
|
||||
init_data[i].mRate,
|
||||
NULL);
|
||||
if (! status)
|
||||
// Init-time only settings, can use the static setters here
|
||||
|
||||
if (init_data[i].mRate)
|
||||
{
|
||||
LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
|
||||
<< " throttle rate. Reason: " << status.toString()
|
||||
<< LL_ENDL;
|
||||
// Set any desired throttle
|
||||
status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_THROTTLE_RATE,
|
||||
mHttpClasses[app_policy].mPolicy,
|
||||
init_data[i].mRate,
|
||||
NULL);
|
||||
if (! status)
|
||||
{
|
||||
LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
|
||||
<< " throttle rate. Reason: " << status.toString()
|
||||
<< LL_ENDL;
|
||||
}
|
||||
}
|
||||
|
||||
mHttpClasses[app_policy].mPipelined = false;
|
||||
if (mPipelined && init_data[i].mPipelined)
|
||||
{
|
||||
// Pipelining election is currently static (init-time).
|
||||
status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH,
|
||||
mHttpClasses[app_policy].mPolicy,
|
||||
PIPELINING_DEPTH,
|
||||
NULL);
|
||||
if (! status)
|
||||
{
|
||||
LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
|
||||
<< " to pipelined mode. Reason: " << status.toString()
|
||||
<< LL_ENDL;
|
||||
}
|
||||
else
|
||||
{
|
||||
mHttpClasses[app_policy].mPipelined = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Init- or run-time settings
|
||||
|
||||
// Get target connection concurrency value
|
||||
U32 setting(init_data[i].mDefault);
|
||||
if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey))
|
||||
|
|
@ -314,19 +357,31 @@ void LLAppCoreHttp::refreshSettings(bool initial)
|
|||
}
|
||||
}
|
||||
|
||||
if (! initial && setting == mSettings[policy])
|
||||
if (! initial && setting == mHttpClasses[app_policy].mConnLimit)
|
||||
{
|
||||
// Unchanged, try next setting
|
||||
continue;
|
||||
}
|
||||
|
||||
// Set it and report
|
||||
// *TODO: These are intended to be per-host limits when we can
|
||||
// support that in llcorehttp/libcurl.
|
||||
// Set it and report. Strategies depend on pipelining:
|
||||
//
|
||||
// No Pipelining. Llcorehttp manages connections itself based
|
||||
// on the PO_CONNECTION_LIMIT setting. Set both limits to the
|
||||
// same value for logical consistency. In the future, may
|
||||
// hand over connection management to libcurl after the
|
||||
// connection cache has been better vetted.
|
||||
//
|
||||
// Pipelining. Libcurl is allowed to manage connections to a
|
||||
// great degree. Steady state will connection limit based on
|
||||
// the per-host setting. Transitions (region crossings, new
|
||||
// avatars, etc.) can request additional outbound connections
|
||||
// to other servers via 2X total connection limit.
|
||||
//
|
||||
LLCore::HttpHandle handle;
|
||||
handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT,
|
||||
mPolicies[policy],
|
||||
setting, NULL);
|
||||
mHttpClasses[app_policy].mPolicy,
|
||||
(mHttpClasses[app_policy].mPipelined ? 2 * setting : setting),
|
||||
NULL);
|
||||
if (LLCORE_HTTP_HANDLE_INVALID == handle)
|
||||
{
|
||||
status = mRequest->getStatus();
|
||||
|
|
@ -336,16 +391,30 @@ void LLAppCoreHttp::refreshSettings(bool initial)
|
|||
}
|
||||
else
|
||||
{
|
||||
LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage
|
||||
<< " concurrency. New value: " << setting
|
||||
<< LL_ENDL;
|
||||
mSettings[policy] = setting;
|
||||
if (initial && setting != init_data[i].mDefault)
|
||||
handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT,
|
||||
mHttpClasses[app_policy].mPolicy,
|
||||
setting,
|
||||
NULL);
|
||||
if (LLCORE_HTTP_HANDLE_INVALID == handle)
|
||||
{
|
||||
LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage
|
||||
<< " concurrency. New value: " << setting
|
||||
status = mRequest->getStatus();
|
||||
LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
|
||||
<< " per-host concurrency. Reason: " << status.toString()
|
||||
<< LL_ENDL;
|
||||
}
|
||||
else
|
||||
{
|
||||
LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage
|
||||
<< " concurrency. New value: " << setting
|
||||
<< LL_ENDL;
|
||||
mHttpClasses[app_policy].mConnLimit = setting;
|
||||
if (initial && setting != init_data[i].mDefault)
|
||||
{
|
||||
LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage
|
||||
<< " concurrency. New value: " << setting
|
||||
<< LL_ENDL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
|
||||
* Second Life Viewer Source Code
|
||||
* Copyright (C) 2012-2013, Linden Research, Inc.
|
||||
* Copyright (C) 2012-2014, Linden Research, Inc.
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
|
|
@ -41,6 +41,8 @@
|
|||
class LLAppCoreHttp : public LLCore::HttpHandler
|
||||
{
|
||||
public:
|
||||
static const long PIPELINING_DEPTH;
|
||||
|
||||
typedef LLCore::HttpRequest::policy_t policy_t;
|
||||
|
||||
enum EAppPolicy
|
||||
|
|
@ -70,7 +72,7 @@ public:
|
|||
/// Long poll: no
|
||||
/// Concurrency: high
|
||||
/// Request rate: high
|
||||
/// Pipelined: soon
|
||||
/// Pipelined: yes
|
||||
AP_TEXTURE,
|
||||
|
||||
/// Legacy mesh fetching policy class. Used to
|
||||
|
|
@ -98,7 +100,7 @@ public:
|
|||
/// Long poll: no
|
||||
/// Concurrency: high
|
||||
/// Request rate: high
|
||||
/// Pipelined: soon
|
||||
/// Pipelined: yes
|
||||
AP_MESH2,
|
||||
|
||||
/// Large mesh fetching policy class. Used to
|
||||
|
|
@ -116,7 +118,7 @@ public:
|
|||
/// Long poll: no
|
||||
/// Concurrency: low
|
||||
/// Request rate: low
|
||||
/// Pipelined: soon
|
||||
/// Pipelined: no
|
||||
AP_LARGE_MESH,
|
||||
|
||||
/// Asset upload policy class. Used to store
|
||||
|
|
@ -180,7 +182,13 @@ public:
|
|||
// application function.
|
||||
policy_t getPolicy(EAppPolicy policy) const
|
||||
{
|
||||
return mPolicies[policy];
|
||||
return mHttpClasses[policy].mPolicy;
|
||||
}
|
||||
|
||||
// Return whether a policy is using pipelined operations.
|
||||
bool isPipelined(EAppPolicy policy) const
|
||||
{
|
||||
return mHttpClasses[policy].mPipelined;
|
||||
}
|
||||
|
||||
// Apply initial or new settings from the environment.
|
||||
|
|
@ -190,13 +198,26 @@ private:
|
|||
static const F64 MAX_THREAD_WAIT_TIME;
|
||||
|
||||
private:
|
||||
LLCore::HttpRequest * mRequest; // Request queue to issue shutdowns
|
||||
|
||||
// PODish container for per-class settings and state.
|
||||
struct HttpClass
|
||||
{
|
||||
public:
|
||||
HttpClass();
|
||||
|
||||
public:
|
||||
policy_t mPolicy; // Policy class id for the class
|
||||
U32 mConnLimit;
|
||||
bool mPipelined;
|
||||
boost::signals2::connection mSettingsSignal; // Signal to global setting that affect this class (if any)
|
||||
};
|
||||
|
||||
LLCore::HttpRequest * mRequest; // Request queue to issue shutdowns
|
||||
LLCore::HttpHandle mStopHandle;
|
||||
F64 mStopRequested;
|
||||
bool mStopped;
|
||||
policy_t mPolicies[AP_COUNT]; // Policy class id for each connection set
|
||||
U32 mSettings[AP_COUNT];
|
||||
boost::signals2::connection mSettingsSignal[AP_COUNT]; // Signals to global settings that affect us
|
||||
HttpClass mHttpClasses[AP_COUNT];
|
||||
bool mPipelined; // Global setting
|
||||
};
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -343,9 +343,9 @@ const S32 REQUEST_HIGH_WATER_MAX = 150; // Should remain under 2X throttle
|
|||
const S32 REQUEST_LOW_WATER_MIN = 16;
|
||||
const S32 REQUEST_LOW_WATER_MAX = 75;
|
||||
const S32 REQUEST2_HIGH_WATER_MIN = 32; // Limits for GetMesh2 regions
|
||||
const S32 REQUEST2_HIGH_WATER_MAX = 80;
|
||||
const S32 REQUEST2_HIGH_WATER_MAX = 100;
|
||||
const S32 REQUEST2_LOW_WATER_MIN = 16;
|
||||
const S32 REQUEST2_LOW_WATER_MAX = 40;
|
||||
const S32 REQUEST2_LOW_WATER_MAX = 50;
|
||||
const U32 LARGE_MESH_FETCH_THRESHOLD = 1U << 21; // Size at which requests goes to narrow/slow queue
|
||||
const long SMALL_MESH_XFER_TIMEOUT = 120L; // Seconds to complete xfer, small mesh downloads
|
||||
const long LARGE_MESH_XFER_TIMEOUT = 600L; // Seconds to complete xfer, large downloads
|
||||
|
|
@ -754,7 +754,9 @@ LLMeshRepoThread::LLMeshRepoThread()
|
|||
mHttpLargePolicyClass(LLCore::HttpRequest::DEFAULT_POLICY_ID),
|
||||
mHttpPriority(0),
|
||||
mGetMeshVersion(2)
|
||||
{
|
||||
{
|
||||
LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp());
|
||||
|
||||
mMutex = new LLMutex(NULL);
|
||||
mHeaderMutex = new LLMutex(NULL);
|
||||
mSignal = new LLCondition(NULL);
|
||||
|
|
@ -767,10 +769,10 @@ LLMeshRepoThread::LLMeshRepoThread()
|
|||
mHttpLargeOptions->setUseRetryAfter(gSavedSettings.getBOOL("MeshUseHttpRetryAfter"));
|
||||
mHttpHeaders = new LLCore::HttpHeaders;
|
||||
mHttpHeaders->append("Accept", "application/vnd.ll.mesh");
|
||||
mHttpPolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_MESH2);
|
||||
mHttpLegacyPolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_MESH1);
|
||||
mHttpLargePolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_LARGE_MESH);
|
||||
}
|
||||
mHttpPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_MESH2);
|
||||
mHttpLegacyPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_MESH1);
|
||||
mHttpLargePolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_LARGE_MESH);
|
||||
}
|
||||
|
||||
|
||||
LLMeshRepoThread::~LLMeshRepoThread()
|
||||
|
|
@ -846,48 +848,49 @@ void LLMeshRepoThread::run()
|
|||
{
|
||||
// Dispatch all HttpHandler notifications
|
||||
mHttpRequest->update(0L);
|
||||
}
|
||||
}
|
||||
sRequestWaterLevel = mHttpRequestSet.size(); // Stats data update
|
||||
|
||||
// NOTE: order of queue processing intentionally favors LOD requests over header requests
|
||||
|
||||
while (!mLODReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
|
||||
{
|
||||
{
|
||||
if (! mMutex)
|
||||
{
|
||||
{
|
||||
break;
|
||||
}
|
||||
mMutex->lock();
|
||||
LODRequest req = mLODReqQ.front();
|
||||
mLODReqQ.pop();
|
||||
LLMeshRepository::sLODProcessing--;
|
||||
mMutex->unlock();
|
||||
mMutex->lock();
|
||||
LODRequest req = mLODReqQ.front();
|
||||
mLODReqQ.pop();
|
||||
LLMeshRepository::sLODProcessing--;
|
||||
mMutex->unlock();
|
||||
|
||||
if (!fetchMeshLOD(req.mMeshParams, req.mLOD)) // failed, resubmit
|
||||
{
|
||||
mMutex->lock();
|
||||
mLODReqQ.push(req);
|
||||
{
|
||||
mMutex->lock();
|
||||
mLODReqQ.push(req);
|
||||
++LLMeshRepository::sLODProcessing;
|
||||
mMutex->unlock();
|
||||
}
|
||||
}
|
||||
mMutex->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
while (!mHeaderReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
|
||||
{
|
||||
{
|
||||
if (! mMutex)
|
||||
{
|
||||
{
|
||||
break;
|
||||
}
|
||||
mMutex->lock();
|
||||
HeaderRequest req = mHeaderReqQ.front();
|
||||
mHeaderReqQ.pop();
|
||||
mMutex->unlock();
|
||||
mMutex->lock();
|
||||
HeaderRequest req = mHeaderReqQ.front();
|
||||
mHeaderReqQ.pop();
|
||||
mMutex->unlock();
|
||||
if (!fetchMeshHeader(req.mMeshParams))//failed, resubmit
|
||||
{
|
||||
mMutex->lock();
|
||||
mHeaderReqQ.push(req) ;
|
||||
mMutex->unlock();
|
||||
}
|
||||
}
|
||||
{
|
||||
mMutex->lock();
|
||||
mHeaderReqQ.push(req) ;
|
||||
mMutex->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// For the final three request lists, similar goal to above but
|
||||
// slightly different queue structures. Stay off the mutex when
|
||||
|
|
@ -983,7 +986,7 @@ void LLMeshRepoThread::run()
|
|||
}
|
||||
}
|
||||
mMutex->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// For dev purposes only. A dynamic change could make this false
|
||||
// and that shouldn't assert.
|
||||
|
|
@ -1250,7 +1253,6 @@ bool LLMeshRepoThread::fetchMeshSkinInfo(const LLUUID& mesh_id)
|
|||
<< LL_ENDL;
|
||||
delete handler;
|
||||
ret = false;
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
@ -1860,7 +1862,7 @@ LLMeshUploadThread::LLMeshUploadThread(LLMeshUploadThread::instance_list& data,
|
|||
bool upload_skin, bool upload_joints, const std::string & upload_url, bool do_upload,
|
||||
LLHandle<LLWholeModelFeeObserver> fee_observer,
|
||||
LLHandle<LLWholeModelUploadObserver> upload_observer)
|
||||
: LLThread("mesh upload"),
|
||||
: LLThread("mesh upload"),
|
||||
LLCore::HttpHandler(),
|
||||
mDiscarded(false),
|
||||
mDoUpload(do_upload),
|
||||
|
|
@ -3198,9 +3200,13 @@ void LLMeshRepository::notifyLoadedMeshes()
|
|||
else
|
||||
{
|
||||
// GetMesh2 operation with keepalives, etc. With pipelining,
|
||||
// we'll increase this.
|
||||
// we'll increase this. See llappcorehttp and llcorehttp for
|
||||
// discussion on connection strategies.
|
||||
LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp());
|
||||
S32 scale(app_core_http.isPipelined(LLAppCoreHttp::AP_MESH2) ? 10 : 5);
|
||||
|
||||
LLMeshRepoThread::sMaxConcurrentRequests = gSavedSettings.getU32("Mesh2MaxConcurrentRequests");
|
||||
LLMeshRepoThread::sRequestHighWater = llclamp(5 * S32(LLMeshRepoThread::sMaxConcurrentRequests),
|
||||
LLMeshRepoThread::sRequestHighWater = llclamp(scale * S32(LLMeshRepoThread::sMaxConcurrentRequests),
|
||||
REQUEST2_HIGH_WATER_MIN,
|
||||
REQUEST2_HIGH_WATER_MAX);
|
||||
LLMeshRepoThread::sRequestLowWater = llclamp(LLMeshRepoThread::sRequestHighWater / 2,
|
||||
|
|
|
|||
|
|
@ -241,8 +241,10 @@ LLTrace::EventStatHandle<F64Milliseconds > LLTextureFetch::sCacheReadLatency("te
|
|||
|
||||
// Tuning/Parameterization Constants
|
||||
|
||||
static const S32 HTTP_REQUESTS_IN_QUEUE_HIGH_WATER = 40; // Maximum requests to have active in HTTP
|
||||
static const S32 HTTP_REQUESTS_IN_QUEUE_LOW_WATER = 20; // Active level at which to refill
|
||||
static const S32 HTTP_PIPE_REQUESTS_HIGH_WATER = 100; // Maximum requests to have active in HTTP (pipelined)
|
||||
static const S32 HTTP_PIPE_REQUESTS_LOW_WATER = 50; // Active level at which to refill
|
||||
static const S32 HTTP_NONPIPE_REQUESTS_HIGH_WATER = 40;
|
||||
static const S32 HTTP_NONPIPE_REQUESTS_LOW_WATER = 20;
|
||||
|
||||
// BUG-3323/SH-4375
|
||||
// *NOTE: This is a heuristic value. Texture fetches have a habit of using a
|
||||
|
|
@ -608,16 +610,16 @@ private:
|
|||
|
||||
LLCore::HttpHandle mHttpHandle; // Handle of any active request
|
||||
LLCore::BufferArray * mHttpBufferArray; // Refcounted pointer to response data
|
||||
S32 mHttpPolicyClass;
|
||||
S32 mHttpPolicyClass;
|
||||
bool mHttpActive; // Active request to http library
|
||||
U32 mHttpReplySize, // Actual received data size
|
||||
mHttpReplyOffset; // Actual received data offset
|
||||
U32 mHttpReplySize, // Actual received data size
|
||||
mHttpReplyOffset; // Actual received data offset
|
||||
bool mHttpHasResource; // Counts against Fetcher's mHttpSemaphore
|
||||
|
||||
// State history
|
||||
U32 mCacheReadCount,
|
||||
mCacheWriteCount,
|
||||
mResourceWaitCount; // Requests entering WAIT_HTTP_RESOURCE2
|
||||
U32 mCacheReadCount,
|
||||
mCacheWriteCount,
|
||||
mResourceWaitCount; // Requests entering WAIT_HTTP_RESOURCE2
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
|
@ -1525,36 +1527,49 @@ bool LLTextureFetchWorker::doWork(S32 param)
|
|||
mRequestedOffset -= 1;
|
||||
mRequestedSize += 1;
|
||||
}
|
||||
|
||||
mHttpHandle = LLCORE_HTTP_HANDLE_INVALID;
|
||||
if (!mUrl.empty())
|
||||
{
|
||||
mRequestedTimer.reset();
|
||||
mLoaded = FALSE;
|
||||
mGetStatus = LLCore::HttpStatus();
|
||||
mGetReason.clear();
|
||||
LL_DEBUGS(LOG_TXT) << "HTTP GET: " << mID << " Offset: " << mRequestedOffset
|
||||
<< " Bytes: " << mRequestedSize
|
||||
<< " Bandwidth(kbps): " << mFetcher->getTextureBandwidth() << "/" << mFetcher->mMaxBandwidth
|
||||
<< LL_ENDL;
|
||||
|
||||
// Will call callbackHttpGet when curl request completes
|
||||
// Only server bake images use the returned headers currently, for getting retry-after field.
|
||||
LLCore::HttpOptions *options = (mFTType == FTT_SERVER_BAKE) ? mFetcher->mHttpOptionsWithHeaders: mFetcher->mHttpOptions;
|
||||
mHttpHandle = mFetcher->mHttpRequest->requestGetByteRange(mHttpPolicyClass,
|
||||
mWorkPriority,
|
||||
mUrl,
|
||||
mRequestedOffset,
|
||||
(mRequestedOffset + mRequestedSize) > HTTP_REQUESTS_RANGE_END_MAX
|
||||
? 0
|
||||
: mRequestedSize,
|
||||
options,
|
||||
mFetcher->mHttpHeaders,
|
||||
this);
|
||||
if (mUrl.empty())
|
||||
{
|
||||
// *FIXME: This should not be reachable except it has become
|
||||
// so after some recent 'work'. Need to track this down
|
||||
// and illuminate the unenlightened.
|
||||
LL_WARNS(LOG_TXT) << "HTTP GET request failed for " << mID
|
||||
<< " on empty URL." << LL_ENDL;
|
||||
resetFormattedData();
|
||||
releaseHttpSemaphore();
|
||||
return true; // failed
|
||||
}
|
||||
|
||||
mRequestedTimer.reset();
|
||||
mLoaded = FALSE;
|
||||
mGetStatus = LLCore::HttpStatus();
|
||||
mGetReason.clear();
|
||||
LL_DEBUGS(LOG_TXT) << "HTTP GET: " << mID << " Offset: " << mRequestedOffset
|
||||
<< " Bytes: " << mRequestedSize
|
||||
<< " Bandwidth(kbps): " << mFetcher->getTextureBandwidth() << "/" << mFetcher->mMaxBandwidth
|
||||
<< LL_ENDL;
|
||||
|
||||
// Will call callbackHttpGet when curl request completes
|
||||
// Only server bake images use the returned headers currently, for getting retry-after field.
|
||||
LLCore::HttpOptions *options = (mFTType == FTT_SERVER_BAKE) ? mFetcher->mHttpOptionsWithHeaders: mFetcher->mHttpOptions;
|
||||
mHttpHandle = mFetcher->mHttpRequest->requestGetByteRange(mHttpPolicyClass,
|
||||
mWorkPriority,
|
||||
mUrl,
|
||||
mRequestedOffset,
|
||||
(mRequestedOffset + mRequestedSize) > HTTP_REQUESTS_RANGE_END_MAX
|
||||
? 0
|
||||
: mRequestedSize,
|
||||
options,
|
||||
mFetcher->mHttpHeaders,
|
||||
this);
|
||||
if (LLCORE_HTTP_HANDLE_INVALID == mHttpHandle)
|
||||
{
|
||||
LL_WARNS(LOG_TXT) << "HTTP GET request failed for " << mID << LL_ENDL;
|
||||
LLCore::HttpStatus status(mFetcher->mHttpRequest->getStatus());
|
||||
LL_WARNS(LOG_TXT) << "HTTP GET request failed for " << mID
|
||||
<< ", Status: " << status.toTerseString()
|
||||
<< " Reason: '" << status.toString() << "'"
|
||||
<< LL_ENDL;
|
||||
resetFormattedData();
|
||||
releaseHttpSemaphore();
|
||||
return true; // failed
|
||||
|
|
@ -1610,10 +1625,6 @@ bool LLTextureFetchWorker::doWork(S32 param)
|
|||
else if (http_service_unavail == mGetStatus)
|
||||
{
|
||||
LL_INFOS_ONCE(LOG_TXT) << "Texture server busy (503): " << mUrl << LL_ENDL;
|
||||
LL_INFOS(LOG_TXT) << "503: HTTP GET failed for: " << mUrl
|
||||
<< " Status: " << mGetStatus.toHex()
|
||||
<< " Reason: '" << mGetReason << "'"
|
||||
<< LL_ENDL;
|
||||
}
|
||||
else if (http_not_sat == mGetStatus)
|
||||
{
|
||||
|
|
@ -2482,7 +2493,6 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image
|
|||
mHttpHeaders(NULL),
|
||||
mHttpMetricsHeaders(NULL),
|
||||
mHttpPolicyClass(LLCore::HttpRequest::DEFAULT_POLICY_ID),
|
||||
mHttpSemaphore(HTTP_REQUESTS_IN_QUEUE_HIGH_WATER),
|
||||
mTotalCacheReadCount(0U),
|
||||
mTotalCacheWriteCount(0U),
|
||||
mTotalResourceWaitCount(0U),
|
||||
|
|
@ -2494,6 +2504,30 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image
|
|||
mMaxBandwidth = gSavedSettings.getF32("ThrottleBandwidthKBPS");
|
||||
mTextureInfo.setUpLogging(gSavedSettings.getBOOL("LogTextureDownloadsToViewerLog"), gSavedSettings.getBOOL("LogTextureDownloadsToSimulator"), U32Bytes(gSavedSettings.getU32("TextureLoggingThreshold")));
|
||||
|
||||
mHttpRequest = new LLCore::HttpRequest;
|
||||
mHttpOptions = new LLCore::HttpOptions;
|
||||
mHttpOptionsWithHeaders = new LLCore::HttpOptions;
|
||||
mHttpOptionsWithHeaders->setWantHeaders(true);
|
||||
mHttpHeaders = new LLCore::HttpHeaders;
|
||||
mHttpHeaders->append("Accept", "image/x-j2c");
|
||||
mHttpMetricsHeaders = new LLCore::HttpHeaders;
|
||||
mHttpMetricsHeaders->append("Content-Type", "application/llsd+xml");
|
||||
LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp());
|
||||
mHttpPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_TEXTURE);
|
||||
if (app_core_http.isPipelined(LLAppCoreHttp::AP_TEXTURE))
|
||||
{
|
||||
mHttpHighWater = HTTP_PIPE_REQUESTS_HIGH_WATER;
|
||||
mHttpLowWater = HTTP_PIPE_REQUESTS_LOW_WATER;
|
||||
}
|
||||
else
|
||||
{
|
||||
mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER;
|
||||
mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER;
|
||||
}
|
||||
mHttpSemaphore = mHttpHighWater;
|
||||
|
||||
// Conditionally construct debugger object after 'this' is
|
||||
// fully initialized.
|
||||
LLTextureFetchDebugger::sDebuggerEnabled = gSavedSettings.getBOOL("TextureFetchDebuggerEnabled");
|
||||
if(LLTextureFetchDebugger::isEnabled())
|
||||
{
|
||||
|
|
@ -2506,16 +2540,6 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image
|
|||
}
|
||||
mOriginFetchSource = mFetchSource;
|
||||
}
|
||||
|
||||
mHttpRequest = new LLCore::HttpRequest;
|
||||
mHttpOptions = new LLCore::HttpOptions;
|
||||
mHttpOptionsWithHeaders = new LLCore::HttpOptions;
|
||||
mHttpOptionsWithHeaders->setWantHeaders(true);
|
||||
mHttpHeaders = new LLCore::HttpHeaders;
|
||||
mHttpHeaders->append("Accept", "image/x-j2c");
|
||||
mHttpMetricsHeaders = new LLCore::HttpHeaders;
|
||||
mHttpMetricsHeaders->append("Content-Type", "application/llsd+xml");
|
||||
mHttpPolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_TEXTURE);
|
||||
}
|
||||
|
||||
LLTextureFetch::~LLTextureFetch()
|
||||
|
|
@ -3645,7 +3669,7 @@ void LLTextureFetch::releaseHttpWaiters()
|
|||
{
|
||||
// Use mHttpSemaphore rather than mHTTPTextureQueue.size()
|
||||
// to avoid a lock.
|
||||
if (mHttpSemaphore < (HTTP_REQUESTS_IN_QUEUE_HIGH_WATER - HTTP_REQUESTS_IN_QUEUE_LOW_WATER))
|
||||
if (mHttpSemaphore < (mHttpHighWater - mHttpLowWater))
|
||||
return;
|
||||
|
||||
// Quickly make a copy of all the LLUIDs. Get off the
|
||||
|
|
@ -4538,7 +4562,7 @@ S32 LLTextureFetchDebugger::fillCurlQueue()
|
|||
mNbCurlCompleted = mFetchingHistory.size();
|
||||
return 0;
|
||||
}
|
||||
if (mNbCurlRequests > HTTP_REQUESTS_IN_QUEUE_LOW_WATER)
|
||||
if (mNbCurlRequests > HTTP_NONPIPE_REQUESTS_LOW_WATER)
|
||||
{
|
||||
return mNbCurlRequests;
|
||||
}
|
||||
|
|
@ -4571,7 +4595,7 @@ S32 LLTextureFetchDebugger::fillCurlQueue()
|
|||
mFetchingHistory[i].mHttpHandle = handle;
|
||||
mFetchingHistory[i].mCurlState = FetchEntry::CURL_IN_PROGRESS;
|
||||
mNbCurlRequests++;
|
||||
if (mNbCurlRequests >= HTTP_REQUESTS_IN_QUEUE_HIGH_WATER) // emulate normal pipeline
|
||||
if (mNbCurlRequests >= HTTP_NONPIPE_REQUESTS_HIGH_WATER) // emulate normal pipeline
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -356,7 +356,9 @@ private:
|
|||
LLCore::HttpHeaders * mHttpHeaders; // Ttf
|
||||
LLCore::HttpHeaders * mHttpMetricsHeaders; // Ttf
|
||||
LLCore::HttpRequest::policy_t mHttpPolicyClass; // T*
|
||||
|
||||
S32 mHttpHighWater; // T* (ro)
|
||||
S32 mHttpLowWater; // T* (ro)
|
||||
|
||||
// We use a resource semaphore to keep HTTP requests in
|
||||
// WAIT_HTTP_RESOURCE2 if there aren't sufficient slots in the
|
||||
// transport. This keeps them near where they can be cheaply
|
||||
|
|
|
|||
Loading…
Reference in New Issue