+
Example Domain
+
This domain is established to be used for illustrative examples in documents. You may use this
+ domain in examples without prior coordination or asking for permission.
+
More information...
+
+
+
+----------------------------------------------------------------------------
+
+
+ You'll also get a detailed trace of the HTTP operation itself. Note
+ the HEADEROUT line which shows the additional header added to the
+ request.
+
+
+----------------------------------------------------------------------------
+HttpService::processRequestQueue: TRACE, FromRequestQueue, Handle: 086D3148
+HttpLibcurl::addOp: TRACE, ToActiveQueue, Handle: 086D3148, Actives: 0, Readies: 0
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: TEXT, Data: About to connect() to www.example.com port 80 (#0)
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: TEXT, Data: Trying 93.184.216.119...
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: TEXT, Data: Connected to www.example.com (93.184.216.119) port 80 (#0)
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: TEXT, Data: Connected to www.example.com (93.184.216.119) port 80 (#0)
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADEROUT, Data: GET / HTTP/1.1 Host: www.example.com Accept-Encoding: deflate, gzip Connection: keep-alive Keep-alive: 300 Accept: text/html, application/llsd+xml
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: HTTP/1.1 200 OK
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: Accept-Ranges: bytes
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: Cache-Control: max-age=604800
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: Content-Type: text/html
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: Date: Tue, 17 Sep 2013 20:26:56 GMT
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: Etag: "3012602696"
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: Expires: Tue, 24 Sep 2013 20:26:56 GMT
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: Last-Modified: Fri, 09 Aug 2013 23:54:35 GMT
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: Server: ECS (ewr/1590)
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: X-Cache: HIT
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: x-ec-custom-error: 1
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data: Content-Length: 1270
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: HEADERIN, Data:
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: DATAIN, Data: 256 Bytes
+HttpOpRequest::debugCallback: TRACE, LibcurlDebug, Handle: 086D3148, Type: TEXT, Data: Connection #0 to host www.example.com left intact
+HttpLibcurl::completeRequest: TRACE, RequestComplete, Handle: 086D3148, Status: Http_200
+HttpOperation::addAsReply: TRACE, ToReplyQueue, Handle: 086D3148
+----------------------------------------------------------------------------
+
+
+4. What Does All That Mean, Part 2
+
+ HttpStatus. The HttpStatus object encodes errors from libcurl, the
+ library itself and HTTP status values. It does this to avoid
+ collapsing all non-HTTP error into a single '499' HTTP status and to
+ make errors distinct.
+
+ To aid programming, the usual bool conversions are available so that
+ you can write 'if (status)' and the expected thing will happen
+ whether it's an HTTP, libcurl or library error. There's also
+ provision to override the treatment of HTTP errors (making 404 a
+ success, say).
+
+ Share data, don't copy it. The library was started with the goal of
+ avoiding data copies as much as possible. Instead, read-only data
+ sharing across threads with atomic reference counts is used for a
+ number of data types. These currently are:
+
+ * BufferArray. Linked list of data blocks/HTTP bodies.
+ * HttpHeaders. Shared headers for both requests and responses.
+ * HttpOptions. Request-only data modifying HTTP behavior.
+ * HttpResponse. HTTP response description given to onCompleted.
+
+ Using objects of these types requires a few rules:
+
+ * Constructor always gives a reference to caller.
+ * References are dropped with release() not delete.
+ * Additional references may be taken out with addRef().
+ * Unless otherwise stated, once an object is shared with another
+ thread it should be treated as read-only. There's no
+ synchronization on the objects themselves.
+
+ HttpResponse. You'll encounter this mainly in onCompleted() methods.
+ Commonly-used interfaces on this object:
+
+ * getStatus() to return the final status of the request.
+ * getBody() to retrieve the response body which may be NULL or
+ zero-length.
+ * getContentType() to return the value of the 'Content-Type'
+ header or an empty string if none was sent.
+
+ This is a reference-counted object so you can call addRef() on it
+ and hold onto the response for an arbitrary time. But you'll
+ usually just call a few methods and return from onCompleted() whose
+ caller will release the object.
+
+ BufferArray. The core data representation for request and response
+ bodies. In HTTP responses, it's fetched with the getBody() method
+ and may be NULL or non-NULL with zero length. All successful data
+ handling should check both conditions before attempting to fetch
+ data from the object. Data access model uses simple read/write
+ semantics:
+
+ * append()
+ * size()
+ * read()
+ * write()
+
+ (There is a more sophisticated stream adapter that extends these
+ methods and will be covered below.) So, one way to retrieve data
+ from a request is as follows:
+
+
+ LLCore::BufferArray * data = response->getBody();
+ if (data && data->size())
+ {
+ size_t data_len = data->size();
+ char * data_blob = new char [data_len + 1];
+ data->read(0, data_blob, data_len);
+
+
+ HttpOptions and HttpResponse. Really just simple containers of POD
+ and std::string pairs. But reference counted and the rule about not
+ modifying after sharing must be followed. You'll have the urge to
+ change options dynamically at some point. And you'll try to do that
+ by just writing new values to the shared object. And in tests
+ everything will appear to work. Then you ship and people in the
+ real world start hitting read/write races in strings and then crash.
+ Don't be lazy.
+
+ HttpHandle. Uniquely identifies a request and can be used to
+ identify it in an onCompleted() method or cancel it if it's still
+ queued. But as soon as a request's onCompleted() invocation
+ returns, the handle becomes invalid and may be reused immediately
+ for new requests. Don't hold on to handles after notification.
+
+
+5. And Still More Refinements
+
+ (Note: The following refinements are just code fragments. They
+ don't directly fit into the working example above. But they
+ demonstrate several idioms you'll want to copy.)
+
+ LLSD, std::streambuf, std::iostream. The read(), write() and
+ append() methods may be adequate for your purposes. But we use a
+ lot of LLSD. Its interfaces aren't particularly compatible with
+ BufferArray. And so two adapters are available to give
+ stream-like behaviors: BufferArrayStreamBuf and BufferArrayStream,
+ which implement the std::streambuf and std::iostream interfaces,
+ respectively.
+
+ A std::streambuf interface isn't something you'll want to use
+ directly. Instead, you'll use the much friendlier std::iostream
+ interface found in BufferArrayStream. This adapter gives you all
+ the '>>' and '<<' operators you'll want as well as working
+ directly with the LLSD conversion operators.
+
+ Some new headers:
+
+
+ #include "bufferstream.h"
+ #include "llsdserialize.h"
+
+
+ And an updated fragment based on onCompleted() above:
+
+
+ // Successful request. Try to fetch the data
+ LLCore::BufferArray * data = response->getBody();
+ LLSD resp_llsd;
+
+ if (data && data->size())
+ {
+ // There's some data and we expect this to be
+ // LLSD. Checking of content type and validation
+ // during parsing would be admirable additions.
+ // But we'll forgo that now.
+ LLCore::BufferArrayStream data_stream(data);
+ LLSDSerialize::fromXML(resp_llsd, data_stream);
+ }
+ LL_INFOS("Hack") << "LLSD Received: " << resp_llsd << LL_ENDL;
+ }
+ else
+ {
+
+
+ Converting an LLSD object into an XML stream stored in a
+ BufferArray is just the reverse of the above:
+
+
+ BufferArray * data = new BufferArray();
+ LLCore::BufferArrayStream data_stream(data);
+
+ LLSD src_llsd;
+ src_llsd["foo"] = "bar";
+
+ LLSDSerialize::toXML(src_llsd, data_stream);
+
+ // 'data' now contains an XML payload and can be sent
+ // to a web service using the requestPut() or requestPost()
+ // methods.
+ ... requestPost(...);
+
+ // And don't forget to release the BufferArray.
+ data->release();
+ data = NULL;
+
+
+ LLSD will often go hand-in-hand with BufferArray and data
+ transport. But you can also do all the streaming I/O you'd expect
+ of a std::iostream object:
+
+
+ BufferArray * data = new BufferArray();
+ LLCore::BufferArrayStream data_stream(data);
+
+ data_stream << "Hello, World!" << 29.4 << '\n';
+ std::string str;
+ data_stream >> str;
+ std::cout << str << std::endl;
+
+ data->release();
+ // Actual delete will occur when 'data_stream'
+ // falls out of scope and is destructed.
+
+
+ Scoping objects and cleaning up. The examples haven't bothered
+ with cleanup of objects that are no longer needed. Instead, most
+ objects have been allocated as if they were global and eternal.
+ You'll put the objects in more appropriate feature objects and
+ clean them up as a group. Here's a checklist for actions you may
+ need to take on cleanup:
+
+ * Call delete on:
+ o HttpHandlers created on the heap
+ o HttpRequest objects
+ * Call release() on:
+ o BufferArray objects
+ o HttpHeaders objects
+ o HttpOptions objects
+ o HttpResponse objects
+
+ On program exit, as threads wind down, the library continues to
+ operate safely. Threads don't interact via the library and even
+ dangling references to HttpHandler objects are safe. If you don't
+ call HttpRequest::update(), handler references are never
+ dereferenced.
+
+ You can take a more thorough approach to wind-down. Keep a list
+ of HttpHandles (not HttpHandlers) of outstanding requests. For
+ each of these, call HttpRequest::requestCancel() to cancel the
+ operation. (Don't add the cancel requests' handled to the list.)
+ This will cancel the outstanding requests that haven't completed.
+ Canceled or completed, all requests will queue notifications. You
+ can now cycle calling update() discarding responses. Continue
+ until all requests notify or a few seconds have passed.
+
+ Global startup and shutdown is handled in the viewer. But you can
+ learn about it in the code or in the documentation in the headers.
+
+
+6. Choosing a Policy Class
+
+ Now it's time to get rid of the default policy class. Take a look
+ at the policy class definitions in newview/llappcorehttp.h.
+ Ideally, you'll find one that's compatible with what you're doing.
+ Some of the compatibility guidelines are:
+
+ * Destination: Pair of host and port. Mixing requests with
+ different destinations may cause more connection setup and tear
+ down.
+
+ * Method: http or https. Usually moot given destination. But
+ mixing these may also cause connection churn.
+
+ * Transfer size: If you're moving 100MB at a time and you make your
+ requests to the same policy class as a lot of small, fast event
+ information that fast traffic is going to get stuck behind you
+ and someone's experience is going to be miserable.
+
+ * Long poll requests: These are long-lived, must- do operations.
+ They have a special home called AP_LONG_POLL.
+
+ * Concurrency: High concurrency (5 or more) and large transfer
+ sizes are incompatible. Another head-of-the-line problem. High
+ concurrency is tolerated when it's desired to get maximal
+ throughput. Mesh and texture downloads, for example.
+
+ * Pipelined: If your requests are not idempotent, stay away from
+ anything marked 'soon' or 'yes'. Hidden retries may be a
+ problem for you. For now, would also recommend keeping PUT and
+ POST requests out of classes that may be pipelined. Support for
+ that is still a bit new.
+
+ If you haven't found a compatible match, you can either create a
+ new class (llappcorehttp.*) or just use AP_DEFAULT, the catchall
+ class when all else fails. Inventory query operations might be a
+ candidate for a new class that supported pipelining on https:.
+ Same with display name lookups and other bursty-at-login
+ operations. For other things, AP_DEFAULT will do what it can and
+ will, in some way or another, tolerate any usage. Whether the
+ users' experiences are good are for you to determine.
+
+
+7. FAQ
+
+ Q1. What do these policy classes achieve?
+
+ A1. Previously, HTTP-using code in the viewer was written as if
+ it were some isolated, local operation that didn't have to
+ consider resources, contention or impact on services and the
+ larger environment. The result was an application with on the
+ order of 100 HTTP launch points in its codebase that could create
+ dozens or even 100's of TCP connections zeroing in on grid
+ services and disrupting networking equipment, web services and
+ innocent users. The use of policy classes (modeled on
+ http://en.wikipedia.org/wiki/Class-based_queueing) is a means to
+ restrict connection concurrency, good and necessary in itself. In
+ turn, that reduces demands on an expensive resource (connection
+ setup and concurrency) which relieves strain on network points.
+ That enables connection keepalive and opportunites for true
+ improvements in throughput and user experience.
+
+ Another aspect of the classes is that they give some control over
+ how competing demands for the network will be apportioned. If
+ mesh fetches, texture fetches and inventory queries are all being
+ made at once, the relative weights of their classes' concurrency
+ limits established that apportioning. We now have an opportunity
+ to balance the entire viewer system.
+
+ Q2. How's that data sharing with refcounts working for you?
+
+ A2. Meh. It does reduce memory churn and the frequency at which
+ free blocks must be moved between threads. But it's also a design
+ for static configuration and dynamic reconfiguration (not
+ requiring a restart) is favored. Creating new options for every
+ request isn't too bad, it a sequence of "new, fill, request,
+ release" for each requested operation. That in contrast to doing
+ the "new, fill, release" at startup. The bad comes in getting at
+ the source data. One rule in this work was "no new thread
+ problems." And one source for those is pulling setting values out
+ of gSettings in threads. None of that is thread safe though we
+ tend to get away with it.
+
+ Q3. What needs to be done?
+
+ A3. There's a To-Do list in _httpinternal.h. It has both large
+ and small projects here if someone would like to try changes.
diff --git a/indra/llcorehttp/_httpinternal.h b/indra/llcorehttp/_httpinternal.h
index 008e4fd95c..f80d7f60f5 100755
--- a/indra/llcorehttp/_httpinternal.h
+++ b/indra/llcorehttp/_httpinternal.h
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012, Linden Research, Inc.
+ * Copyright (C) 2012-2013, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -36,7 +36,8 @@
// General library to-do list
//
// - Implement policy classes. Structure is mostly there just didn't
-// need it for the first consumer.
+// need it for the first consumer. [Classes are there. More
+// advanced features, like borrowing, aren't there yet.]
// - Consider Removing 'priority' from the request interface. Its use
// in an always active class can lead to starvation of low-priority
// requests. Requires coodination of priority values across all
@@ -46,6 +47,7 @@
// may not really need it.
// - Set/get for global policy and policy classes is clumsy. Rework
// it heading in a direction that allows for more dynamic behavior.
+// [Mostly fixed]
// - Move HttpOpRequest::prepareRequest() to HttpLibcurl for the
// pedantic.
// - Update downloader and other long-duration services are going to
@@ -64,6 +66,12 @@
// This won't help in the face of the router problems we've looked
// at, however. Detect starvation due to UDP activity and provide
// feedback to it.
+// - Change the transfer timeout scheme. We're less interested in
+// absolute time, in most cases, than in continuous progress.
+// - Many of the policy class settings are currently applied to the
+// entire class. Some, like connection limits, would be better
+// applied to each destination target making multiple targets
+// independent.
//
// Integration to-do list
// - LLTextureFetch still needs a major refactor. The use of
@@ -73,7 +81,6 @@
// the main source file.
// - Expand areas of usage eventually leading to the removal of LLCurl.
// Rough order of expansion:
-// . Mesh fetch
// . Avatar names
// . Group membership lists
// . Caps access in general
@@ -97,8 +104,8 @@ namespace LLCore
{
// Maxium number of policy classes that can be defined.
-// *TODO: Currently limited to the default class, extend.
-const int HTTP_POLICY_CLASS_LIMIT = 1;
+// *TODO: Currently limited to the default class + 1, extend.
+const int HTTP_POLICY_CLASS_LIMIT = 8;
// Debug/informational tracing. Used both
// as a global option and in per-request traces.
@@ -129,6 +136,7 @@ const int HTTP_REDIRECTS_DEFAULT = 10;
// Retries and time-on-queue are not included and aren't
// accounted for.
const long HTTP_REQUEST_TIMEOUT_DEFAULT = 30L;
+const long HTTP_REQUEST_XFER_TIMEOUT_DEFAULT = 0L;
const long HTTP_REQUEST_TIMEOUT_MIN = 0L;
const long HTTP_REQUEST_TIMEOUT_MAX = 3600L;
@@ -137,6 +145,11 @@ const int HTTP_CONNECTION_LIMIT_DEFAULT = 8;
const int HTTP_CONNECTION_LIMIT_MIN = 1;
const int HTTP_CONNECTION_LIMIT_MAX = 256;
+// Miscellaneous defaults
+const long HTTP_PIPELINING_DEFAULT = 0L;
+const bool HTTP_USE_RETRY_AFTER_DEFAULT = true;
+const long HTTP_THROTTLE_RATE_DEFAULT = 0L;
+
// Tuning parameters
// Time worker thread sleeps after a pass through the
diff --git a/indra/llcorehttp/_httplibcurl.cpp b/indra/llcorehttp/_httplibcurl.cpp
index 6fe0bfc7d1..fc257fb0c1 100755
--- a/indra/llcorehttp/_httplibcurl.cpp
+++ b/indra/llcorehttp/_httplibcurl.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012, Linden Research, Inc.
+ * Copyright (C) 2012-2013, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -41,7 +41,8 @@ namespace LLCore
HttpLibcurl::HttpLibcurl(HttpService * service)
: mService(service),
mPolicyCount(0),
- mMultiHandles(NULL)
+ mMultiHandles(NULL),
+ mActiveHandles(NULL)
{}
@@ -77,6 +78,9 @@ void HttpLibcurl::shutdown()
delete [] mMultiHandles;
mMultiHandles = NULL;
+
+ delete [] mActiveHandles;
+ mActiveHandles = NULL;
}
mPolicyCount = 0;
@@ -90,9 +94,12 @@ void HttpLibcurl::start(int policy_count)
mPolicyCount = policy_count;
mMultiHandles = new CURLM * [mPolicyCount];
+ mActiveHandles = new int [mPolicyCount];
+
for (int policy_class(0); policy_class < mPolicyCount; ++policy_class)
{
mMultiHandles[policy_class] = curl_multi_init();
+ mActiveHandles[policy_class] = 0;
}
}
@@ -110,8 +117,10 @@ HttpService::ELoopSpeed HttpLibcurl::processTransport()
// Give libcurl some cycles to do I/O & callbacks
for (int policy_class(0); policy_class < mPolicyCount; ++policy_class)
{
- if (! mMultiHandles[policy_class])
+ if (! mActiveHandles[policy_class] || ! mMultiHandles[policy_class])
+ {
continue;
+ }
int running(0);
CURLMcode status(CURLM_CALL_MULTI_PERFORM);
@@ -132,12 +141,10 @@ HttpService::ELoopSpeed HttpLibcurl::processTransport()
CURL * handle(msg->easy_handle);
CURLcode result(msg->data.result);
- if (completeRequest(mMultiHandles[policy_class], handle, result))
- {
- // Request is still active, don't get too sleepy
- ret = HttpService::NORMAL;
- }
- handle = NULL; // No longer valid on return
+ completeRequest(mMultiHandles[policy_class], handle, result);
+ handle = NULL; // No longer valid on return
+ ret = HttpService::NORMAL; // If anything completes, we may have a free slot.
+ // Turning around quickly reduces connection gap by 7-10mS.
}
else if (CURLMSG_NONE == msg->msg)
{
@@ -193,6 +200,7 @@ void HttpLibcurl::addOp(HttpOpRequest * op)
// On success, make operation active
mActiveOps.insert(op);
+ ++mActiveHandles[op->mReqPolicy];
}
@@ -214,6 +222,7 @@ bool HttpLibcurl::cancel(HttpHandle handle)
// Drop references
mActiveOps.erase(it);
+ --mActiveHandles[op->mReqPolicy];
op->release();
return true;
@@ -240,7 +249,7 @@ void HttpLibcurl::cancelRequest(HttpOpRequest * op)
{
LL_INFOS("CoreHttp") << "TRACE, RequestCanceled, Handle: "
<< static_cast