Result of svn merge -r65183:65337 svn+ssh://svn/svn/linden/branches/python-shuffle into release. Also includes untabification of many python files.
parent
88d7d45316
commit
7964c6f7a5
|
|
@ -1,5 +1,7 @@
|
|||
# @file __init__.py
|
||||
# @brief Initialization file for the indra module.
|
||||
#
|
||||
# Copyright (c) 2006-$CurrentYear$, Linden Research, Inc.
|
||||
# $License$
|
||||
"""\
|
||||
@file __init__.py
|
||||
@brief Initialization file for the indra module.
|
||||
|
||||
Copyright (c) 2006-2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -0,0 +1,7 @@
|
|||
"""\
|
||||
@file __init__.py
|
||||
@brief Initialization file for the indra.base module.
|
||||
|
||||
Copyright (c) 2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
"""\
|
||||
@file config.py
|
||||
@brief Utility module for parsing and accessing the indra.xml config file.
|
||||
|
||||
Copyright (c) 2006-2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
from os.path import dirname, join, realpath
|
||||
import types
|
||||
from indra.base import llsd
|
||||
|
||||
_g_config_dict = None
|
||||
|
||||
def load(indra_xml_file=None):
|
||||
global _g_config_dict
|
||||
if _g_config_dict == None:
|
||||
if indra_xml_file is None:
|
||||
## going from:
|
||||
## "/opt/linden/indra/lib/python/indra/base/config.py"
|
||||
## to:
|
||||
## "/opt/linden/etc/indra.xml"
|
||||
indra_xml_file = realpath(
|
||||
dirname(realpath(__file__)) + "../../../../../../etc/indra.xml")
|
||||
config_file = file(indra_xml_file)
|
||||
_g_config_dict = llsd.LLSD().parse(config_file.read())
|
||||
config_file.close()
|
||||
#print "loaded config from",indra_xml_file,"into",_g_config_dict
|
||||
|
||||
def update(xml_file):
|
||||
"""Load an XML file and apply its map as overrides or additions
|
||||
to the existing config. The dataserver does this with indra.xml
|
||||
and dataserver.xml."""
|
||||
global _g_config_dict
|
||||
if _g_config_dict == None:
|
||||
raise Exception("no config loaded before config.update()")
|
||||
config_file = file(xml_file)
|
||||
overrides = llsd.LLSD().parse(config_file.read())
|
||||
config_file.close()
|
||||
_g_config_dict.update(overrides)
|
||||
|
||||
def get(key):
|
||||
global _g_config_dict
|
||||
if _g_config_dict == None:
|
||||
load()
|
||||
return _g_config_dict.get(key)
|
||||
|
|
@ -0,0 +1,808 @@
|
|||
"""\
|
||||
@file llsd.py
|
||||
@brief Types as well as parsing and formatting functions for handling LLSD.
|
||||
|
||||
Copyright (c) 2006-2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import base64
|
||||
import struct
|
||||
import time
|
||||
import types
|
||||
import re
|
||||
|
||||
#from cElementTree import fromstring ## This does not work under Windows
|
||||
try:
|
||||
## This is the old name of elementtree, for use with 2.3
|
||||
from elementtree.ElementTree import fromstring
|
||||
except ImportError:
|
||||
## This is the name of elementtree under python 2.5
|
||||
from xml.etree.ElementTree import fromstring
|
||||
|
||||
from indra.base import lluuid
|
||||
|
||||
int_regex = re.compile("[-+]?\d+")
|
||||
real_regex = re.compile("[-+]?(\d+(\.\d*)?|\d*\.\d+)([eE][-+]?\d+)?")
|
||||
alpha_regex = re.compile("[a-zA-Z]+")
|
||||
date_regex = re.compile("(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(?P<second_float>\.\d{2})?Z")
|
||||
#date: d"YYYY-MM-DDTHH:MM:SS.FFZ"
|
||||
|
||||
class LLSDParseError(Exception):
|
||||
pass
|
||||
|
||||
class LLSDSerializationError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class binary(str):
|
||||
pass
|
||||
|
||||
class uri(str):
|
||||
pass
|
||||
|
||||
|
||||
BOOL_TRUE = ('1', '1.0', 'true')
|
||||
BOOL_FALSE = ('0', '0.0', 'false', '')
|
||||
|
||||
|
||||
def bool_to_python(node):
|
||||
val = node.text or ''
|
||||
if val in BOOL_TRUE:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def int_to_python(node):
|
||||
val = node.text or ''
|
||||
if not val.strip():
|
||||
return 0
|
||||
return int(val)
|
||||
|
||||
def real_to_python(node):
|
||||
val = node.text or ''
|
||||
if not val.strip():
|
||||
return 0.0
|
||||
return float(val)
|
||||
|
||||
def uuid_to_python(node):
|
||||
return lluuid.UUID(node.text)
|
||||
|
||||
def str_to_python(node):
|
||||
return unicode(node.text or '').encode('utf8', 'replace')
|
||||
|
||||
def bin_to_python(node):
|
||||
return binary(base64.decodestring(node.text or ''))
|
||||
|
||||
def date_to_python(node):
|
||||
val = node.text or ''
|
||||
if not val:
|
||||
val = "1970-01-01T00:00:00Z"
|
||||
return datetime.datetime(
|
||||
*time.strptime(val, '%Y-%m-%dT%H:%M:%SZ')[:6])
|
||||
|
||||
def uri_to_python(node):
|
||||
val = node.text or ''
|
||||
if not val:
|
||||
return None
|
||||
return uri(val)
|
||||
|
||||
def map_to_python(node):
|
||||
result = {}
|
||||
for index in range(len(node))[::2]:
|
||||
result[node[index].text] = to_python(node[index+1])
|
||||
return result
|
||||
|
||||
def array_to_python(node):
|
||||
return [to_python(child) for child in node]
|
||||
|
||||
|
||||
NODE_HANDLERS = dict(
|
||||
undef=lambda x: None,
|
||||
boolean=bool_to_python,
|
||||
integer=int_to_python,
|
||||
real=real_to_python,
|
||||
uuid=uuid_to_python,
|
||||
string=str_to_python,
|
||||
binary=bin_to_python,
|
||||
date=date_to_python,
|
||||
uri=uri_to_python,
|
||||
map=map_to_python,
|
||||
array=array_to_python,
|
||||
)
|
||||
|
||||
def to_python(node):
|
||||
return NODE_HANDLERS[node.tag](node)
|
||||
|
||||
class Nothing(object):
|
||||
pass
|
||||
|
||||
|
||||
class LLSDXMLFormatter(object):
|
||||
def __init__(self):
|
||||
self.type_map = {
|
||||
type(None) : self.UNDEF,
|
||||
bool : self.BOOLEAN,
|
||||
int : self.INTEGER,
|
||||
long : self.INTEGER,
|
||||
float : self.REAL,
|
||||
lluuid.UUID : self.UUID,
|
||||
binary : self.BINARY,
|
||||
str : self.STRING,
|
||||
unicode : self.STRING,
|
||||
uri : self.URI,
|
||||
datetime.datetime : self.DATE,
|
||||
list : self.ARRAY,
|
||||
tuple : self.ARRAY,
|
||||
types.GeneratorType : self.ARRAY,
|
||||
dict : self.MAP
|
||||
}
|
||||
|
||||
def elt(self, name, contents=None):
|
||||
if(contents is None or contents is ''):
|
||||
return "<%s />" % (name,)
|
||||
else:
|
||||
return "<%s>%s</%s>" % (name, contents, name)
|
||||
|
||||
def xml_esc(self, v):
|
||||
return v.replace('&', '&').replace('<', '<').replace('>', '>')
|
||||
|
||||
def UNDEF(self, v):
|
||||
return self.elt('undef')
|
||||
def BOOLEAN(self, v):
|
||||
if v:
|
||||
return self.elt('boolean', 'true')
|
||||
else:
|
||||
return self.elt('boolean', 'false')
|
||||
def INTEGER(self, v):
|
||||
return self.elt('integer', v)
|
||||
def REAL(self, v):
|
||||
return self.elt('real', v)
|
||||
def UUID(self, v):
|
||||
if(v.isNull()):
|
||||
return self.elt('uuid')
|
||||
else:
|
||||
return self.elt('uuid', v)
|
||||
def BINARY(self, v):
|
||||
return self.elt('binary', base64.encodestring(v))
|
||||
def STRING(self, v):
|
||||
return self.elt('string', self.xml_esc(v))
|
||||
def URI(self, v):
|
||||
return self.elt('uri', self.xml_esc(str(v)))
|
||||
def DATE(self, v):
|
||||
return self.elt('date', v.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
||||
def ARRAY(self, v):
|
||||
return self.elt('array', ''.join([self.generate(item) for item in v]))
|
||||
def MAP(self, v):
|
||||
return self.elt(
|
||||
'map',
|
||||
''.join(["%s%s" % (self.elt('key', key), self.generate(value))
|
||||
for key, value in v.items()]))
|
||||
|
||||
def generate(self, something):
|
||||
t = type(something)
|
||||
if self.type_map.has_key(t):
|
||||
return self.type_map[t](something)
|
||||
else:
|
||||
raise LLSDSerializationError("Cannot serialize unknown type: %s (%s)" % (
|
||||
t, something))
|
||||
|
||||
def format(self, something):
|
||||
return '<?xml version="1.0" ?>' + self.elt("llsd", self.generate(something))
|
||||
|
||||
def format_xml(something):
|
||||
return LLSDXMLFormatter().format(something)
|
||||
|
||||
class LLSDNotationFormatter(object):
|
||||
def __init__(self):
|
||||
self.type_map = {
|
||||
type(None) : self.UNDEF,
|
||||
bool : self.BOOLEAN,
|
||||
int : self.INTEGER,
|
||||
long : self.INTEGER,
|
||||
float : self.REAL,
|
||||
lluuid.UUID : self.UUID,
|
||||
binary : self.BINARY,
|
||||
str : self.STRING,
|
||||
unicode : self.STRING,
|
||||
uri : self.URI,
|
||||
datetime.datetime : self.DATE,
|
||||
list : self.ARRAY,
|
||||
tuple : self.ARRAY,
|
||||
types.GeneratorType : self.ARRAY,
|
||||
dict : self.MAP
|
||||
}
|
||||
|
||||
def UNDEF(self, v):
|
||||
return '!'
|
||||
def BOOLEAN(self, v):
|
||||
if v:
|
||||
return 'true'
|
||||
else:
|
||||
return 'false'
|
||||
def INTEGER(self, v):
|
||||
return "i%s" % v
|
||||
def REAL(self, v):
|
||||
return "r%s" % v
|
||||
def UUID(self, v):
|
||||
return "u%s" % v
|
||||
def BINARY(self, v):
|
||||
raise LLSDSerializationError("binary notation not yet supported")
|
||||
def STRING(self, v):
|
||||
return "'%s'" % v.replace("\\", "\\\\").replace("'", "\\'")
|
||||
def URI(self, v):
|
||||
return 'l"%s"' % str(v).replace("\\", "\\\\").replace('"', '\\"')
|
||||
def DATE(self, v):
|
||||
second_str = ""
|
||||
if v.microsecond > 0:
|
||||
seconds = v.second + float(v.microsecond) / 1000000
|
||||
second_str = "%05.2f" % seconds
|
||||
else:
|
||||
second_str = "%d" % v.second
|
||||
return 'd"%s%sZ"' % (v.strftime('%Y-%m-%dT%H:%M:'), second_str)
|
||||
def ARRAY(self, v):
|
||||
return "[%s]" % ','.join([self.generate(item) for item in v])
|
||||
def MAP(self, v):
|
||||
return "{%s}" % ','.join(["'%s':%s" % (key.replace("\\", "\\\\").replace("'", "\\'"), self.generate(value))
|
||||
for key, value in v.items()])
|
||||
|
||||
def generate(self, something):
|
||||
t = type(something)
|
||||
if self.type_map.has_key(t):
|
||||
return self.type_map[t](something)
|
||||
else:
|
||||
raise LLSDSerializationError("Cannot serialize unknown type: %s (%s)" % (
|
||||
t, something))
|
||||
|
||||
def format(self, something):
|
||||
return self.generate(something)
|
||||
|
||||
def format_notation(something):
|
||||
return LLSDNotationFormatter().format(something)
|
||||
|
||||
def _hex_as_nybble(hex):
|
||||
if (hex >= '0') and (hex <= '9'):
|
||||
return ord(hex) - ord('0')
|
||||
elif (hex >= 'a') and (hex <='f'):
|
||||
return 10 + ord(hex) - ord('a')
|
||||
elif (hex >= 'A') and (hex <='F'):
|
||||
return 10 + ord(hex) - ord('A');
|
||||
|
||||
class LLSDBinaryParser(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def parse(self, buffer, ignore_binary = False):
|
||||
"""
|
||||
This is the basic public interface for parsing.
|
||||
|
||||
@param buffer the binary data to parse in an indexable sequence.
|
||||
@param ignore_binary parser throws away data in llsd binary nodes.
|
||||
@return returns a python object.
|
||||
"""
|
||||
self._buffer = buffer
|
||||
self._index = 0
|
||||
self._keep_binary = not ignore_binary
|
||||
return self._parse()
|
||||
|
||||
def _parse(self):
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
if cc == '{':
|
||||
return self._parse_map()
|
||||
elif cc == '[':
|
||||
return self._parse_array()
|
||||
elif cc == '!':
|
||||
return None
|
||||
elif cc == '0':
|
||||
return False
|
||||
elif cc == '1':
|
||||
return True
|
||||
elif cc == 'i':
|
||||
# 'i' = integer
|
||||
idx = self._index
|
||||
self._index += 4
|
||||
return struct.unpack("!i", self._buffer[idx:idx+4])[0]
|
||||
elif cc == ('r'):
|
||||
# 'r' = real number
|
||||
idx = self._index
|
||||
self._index += 8
|
||||
return struct.unpack("!d", self._buffer[idx:idx+8])[0]
|
||||
elif cc == 'u':
|
||||
# 'u' = uuid
|
||||
idx = self._index
|
||||
self._index += 16
|
||||
return lluuid.uuid_bits_to_uuid(self._buffer[idx:idx+16])
|
||||
elif cc == 's':
|
||||
# 's' = string
|
||||
return self._parse_string()
|
||||
elif cc in ("'", '"'):
|
||||
# delimited/escaped string
|
||||
return self._parse_string_delim(cc)
|
||||
elif cc == 'l':
|
||||
# 'l' = uri
|
||||
return uri(self._parse_string())
|
||||
elif cc == ('d'):
|
||||
# 'd' = date in seconds since epoch
|
||||
idx = self._index
|
||||
self._index += 8
|
||||
seconds = struct.unpack("!d", self._buffer[idx:idx+8])[0]
|
||||
return datetime.datetime.fromtimestamp(seconds)
|
||||
elif cc == 'b':
|
||||
binary = self._parse_string()
|
||||
if self._keep_binary:
|
||||
return binary
|
||||
# *NOTE: maybe have a binary placeholder which has the
|
||||
# length.
|
||||
return None
|
||||
else:
|
||||
raise LLSDParseError("invalid binary token at byte %d: %d" % (
|
||||
self._index - 1, ord(cc)))
|
||||
|
||||
def _parse_map(self):
|
||||
rv = {}
|
||||
size = struct.unpack("!i", self._buffer[self._index:self._index+4])[0]
|
||||
self._index += 4
|
||||
count = 0
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
key = ''
|
||||
while (cc != '}') and (count < size):
|
||||
if cc == 'k':
|
||||
key = self._parse_string()
|
||||
elif cc in ("'", '"'):
|
||||
key = self._parse_string_delim(cc)
|
||||
else:
|
||||
raise LLSDParseError("invalid map key at byte %d." % (
|
||||
self._index - 1,))
|
||||
value = self._parse()
|
||||
#print "kv:",key,value
|
||||
rv[key] = value
|
||||
count += 1
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
if cc != '}':
|
||||
raise LLSDParseError("invalid map close token at byte %d." % (
|
||||
self._index,))
|
||||
return rv
|
||||
|
||||
def _parse_array(self):
|
||||
rv = []
|
||||
size = struct.unpack("!i", self._buffer[self._index:self._index+4])[0]
|
||||
self._index += 4
|
||||
count = 0
|
||||
cc = self._buffer[self._index]
|
||||
while (cc != ']') and (count < size):
|
||||
rv.append(self._parse())
|
||||
count += 1
|
||||
cc = self._buffer[self._index]
|
||||
if cc != ']':
|
||||
raise LLSDParseError("invalid array close token at byte %d." % (
|
||||
self._index,))
|
||||
self._index += 1
|
||||
return rv
|
||||
|
||||
def _parse_string(self):
|
||||
size = struct.unpack("!i", self._buffer[self._index:self._index+4])[0]
|
||||
self._index += 4
|
||||
rv = self._buffer[self._index:self._index+size]
|
||||
self._index += size
|
||||
return rv
|
||||
|
||||
def _parse_string_delim(self, delim):
|
||||
list = []
|
||||
found_escape = False
|
||||
found_hex = False
|
||||
found_digit = False
|
||||
byte = 0
|
||||
while True:
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
if found_escape:
|
||||
if found_hex:
|
||||
if found_digit:
|
||||
found_escape = False
|
||||
found_hex = False
|
||||
found_digit = False
|
||||
byte <<= 4
|
||||
byte |= _hex_as_nybble(cc)
|
||||
list.append(chr(byte))
|
||||
byte = 0
|
||||
else:
|
||||
found_digit = True
|
||||
byte = _hex_as_nybble(cc)
|
||||
elif cc == 'x':
|
||||
found_hex = True
|
||||
else:
|
||||
if cc == 'a':
|
||||
list.append('\a')
|
||||
elif cc == 'b':
|
||||
list.append('\b')
|
||||
elif cc == 'f':
|
||||
list.append('\f')
|
||||
elif cc == 'n':
|
||||
list.append('\n')
|
||||
elif cc == 'r':
|
||||
list.append('\r')
|
||||
elif cc == 't':
|
||||
list.append('\t')
|
||||
elif cc == 'v':
|
||||
list.append('\v')
|
||||
else:
|
||||
list.append(cc)
|
||||
found_escape = False
|
||||
elif cc == '\\':
|
||||
found_escape = True
|
||||
elif cc == delim:
|
||||
break
|
||||
else:
|
||||
list.append(cc)
|
||||
return ''.join(list)
|
||||
|
||||
class LLSDNotationParser(object):
|
||||
""" Parse LLSD notation:
|
||||
map: { string:object, string:object }
|
||||
array: [ object, object, object ]
|
||||
undef: !
|
||||
boolean: true | false | 1 | 0 | T | F | t | f | TRUE | FALSE
|
||||
integer: i####
|
||||
real: r####
|
||||
uuid: u####
|
||||
string: "g'day" | 'have a "nice" day' | s(size)"raw data"
|
||||
uri: l"escaped"
|
||||
date: d"YYYY-MM-DDTHH:MM:SS.FFZ"
|
||||
binary: b##"ff3120ab1" | b(size)"raw data" """
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def parse(self, buffer, ignore_binary = False):
|
||||
"""
|
||||
This is the basic public interface for parsing.
|
||||
|
||||
@param buffer the notation string to parse.
|
||||
@param ignore_binary parser throws away data in llsd binary nodes.
|
||||
@return returns a python object.
|
||||
"""
|
||||
if buffer == "":
|
||||
return False
|
||||
|
||||
self._buffer = buffer
|
||||
self._index = 0
|
||||
return self._parse()
|
||||
|
||||
def _parse(self):
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
if cc == '{':
|
||||
return self._parse_map()
|
||||
elif cc == '[':
|
||||
return self._parse_array()
|
||||
elif cc == '!':
|
||||
return None
|
||||
elif cc == '0':
|
||||
return False
|
||||
elif cc == '1':
|
||||
return True
|
||||
elif cc in ('F', 'f'):
|
||||
self._skip_alpha()
|
||||
return False
|
||||
elif cc in ('T', 't'):
|
||||
self._skip_alpha()
|
||||
return True
|
||||
elif cc == 'i':
|
||||
# 'i' = integer
|
||||
return self._parse_integer()
|
||||
elif cc == ('r'):
|
||||
# 'r' = real number
|
||||
return self._parse_real()
|
||||
elif cc == 'u':
|
||||
# 'u' = uuid
|
||||
return self._parse_uuid()
|
||||
elif cc in ("'", '"', 's'):
|
||||
return self._parse_string(cc)
|
||||
elif cc == 'l':
|
||||
# 'l' = uri
|
||||
delim = self._buffer[self._index]
|
||||
self._index += 1
|
||||
val = uri(self._parse_string(delim))
|
||||
if len(val) == 0:
|
||||
return None
|
||||
return val
|
||||
elif cc == ('d'):
|
||||
# 'd' = date in seconds since epoch
|
||||
return self._parse_date()
|
||||
elif cc == 'b':
|
||||
raise LLSDParseError("binary notation not yet supported")
|
||||
else:
|
||||
print cc
|
||||
raise LLSDParseError("invalid token at index %d: %d" % (
|
||||
self._index - 1, ord(cc)))
|
||||
|
||||
def _parse_map(self):
|
||||
""" map: { string:object, string:object } """
|
||||
rv = {}
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
key = ''
|
||||
found_key = False
|
||||
while (cc != '}'):
|
||||
if not found_key:
|
||||
if cc in ("'", '"', 's'):
|
||||
key = self._parse_string(cc)
|
||||
found_key = True
|
||||
#print "key:",key
|
||||
elif cc.isspace() or cc == ',':
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
else:
|
||||
raise LLSDParseError("invalid map key at byte %d." % (
|
||||
self._index - 1,))
|
||||
else:
|
||||
if cc.isspace() or cc == ':':
|
||||
#print "skipping whitespace '%s'" % cc
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
continue
|
||||
self._index += 1
|
||||
value = self._parse()
|
||||
#print "kv:",key,value
|
||||
rv[key] = value
|
||||
found_key = False
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
#if cc == '}':
|
||||
# break
|
||||
#cc = self._buffer[self._index]
|
||||
#self._index += 1
|
||||
|
||||
return rv
|
||||
|
||||
def _parse_array(self):
|
||||
""" array: [ object, object, object ] """
|
||||
rv = []
|
||||
cc = self._buffer[self._index]
|
||||
while (cc != ']'):
|
||||
if cc.isspace() or cc == ',':
|
||||
self._index += 1
|
||||
cc = self._buffer[self._index]
|
||||
continue
|
||||
rv.append(self._parse())
|
||||
cc = self._buffer[self._index]
|
||||
|
||||
if cc != ']':
|
||||
raise LLSDParseError("invalid array close token at index %d." % (
|
||||
self._index,))
|
||||
self._index += 1
|
||||
return rv
|
||||
|
||||
def _parse_uuid(self):
|
||||
match = re.match(lluuid.UUID.uuid_regex, self._buffer[self._index:])
|
||||
if not match:
|
||||
raise LLSDParseError("invalid uuid token at index %d." % self._index)
|
||||
|
||||
(start, end) = match.span()
|
||||
start += self._index
|
||||
end += self._index
|
||||
self._index = end
|
||||
return lluuid.UUID(self._buffer[start:end])
|
||||
|
||||
def _skip_alpha(self):
|
||||
match = re.match(alpha_regex, self._buffer[self._index:])
|
||||
if match:
|
||||
self._index += match.end()
|
||||
|
||||
def _parse_date(self):
|
||||
delim = self._buffer[self._index]
|
||||
self._index += 1
|
||||
datestr = self._parse_string(delim)
|
||||
|
||||
if datestr == "":
|
||||
return datetime.datetime(1970, 1, 1)
|
||||
|
||||
match = re.match(date_regex, datestr)
|
||||
if not match:
|
||||
raise LLSDParseError("invalid date string '%s'." % datestr)
|
||||
|
||||
year = int(match.group('year'))
|
||||
month = int(match.group('month'))
|
||||
day = int(match.group('day'))
|
||||
hour = int(match.group('hour'))
|
||||
minute = int(match.group('minute'))
|
||||
second = int(match.group('second'))
|
||||
seconds_float = match.group('second_float')
|
||||
microsecond = 0
|
||||
if seconds_float:
|
||||
microsecond = int(seconds_float[1:]) * 10000
|
||||
return datetime.datetime(year, month, day, hour, minute, second, microsecond)
|
||||
|
||||
def _parse_real(self):
|
||||
match = re.match(real_regex, self._buffer[self._index:])
|
||||
if not match:
|
||||
raise LLSDParseError("invalid real token at index %d." % self._index)
|
||||
|
||||
(start, end) = match.span()
|
||||
start += self._index
|
||||
end += self._index
|
||||
self._index = end
|
||||
return float( self._buffer[start:end] )
|
||||
|
||||
def _parse_integer(self):
|
||||
match = re.match(int_regex, self._buffer[self._index:])
|
||||
if not match:
|
||||
raise LLSDParseError("invalid integer token at index %d." % self._index)
|
||||
|
||||
(start, end) = match.span()
|
||||
start += self._index
|
||||
end += self._index
|
||||
self._index = end
|
||||
return int( self._buffer[start:end] )
|
||||
|
||||
def _parse_string(self, delim):
|
||||
""" string: "g'day" | 'have a "nice" day' | s(size)"raw data" """
|
||||
rv = ""
|
||||
|
||||
if delim in ("'", '"'):
|
||||
rv = self._parse_string_delim(delim)
|
||||
elif delim == 's':
|
||||
rv = self._parse_string_raw()
|
||||
else:
|
||||
raise LLSDParseError("invalid string token at index %d." % self._index)
|
||||
|
||||
return rv
|
||||
|
||||
|
||||
def _parse_string_delim(self, delim):
|
||||
""" string: "g'day" | 'have a "nice" day' """
|
||||
list = []
|
||||
found_escape = False
|
||||
found_hex = False
|
||||
found_digit = False
|
||||
byte = 0
|
||||
while True:
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
if found_escape:
|
||||
if found_hex:
|
||||
if found_digit:
|
||||
found_escape = False
|
||||
found_hex = False
|
||||
found_digit = False
|
||||
byte <<= 4
|
||||
byte |= _hex_as_nybble(cc)
|
||||
list.append(chr(byte))
|
||||
byte = 0
|
||||
else:
|
||||
found_digit = True
|
||||
byte = _hex_as_nybble(cc)
|
||||
elif cc == 'x':
|
||||
found_hex = True
|
||||
else:
|
||||
if cc == 'a':
|
||||
list.append('\a')
|
||||
elif cc == 'b':
|
||||
list.append('\b')
|
||||
elif cc == 'f':
|
||||
list.append('\f')
|
||||
elif cc == 'n':
|
||||
list.append('\n')
|
||||
elif cc == 'r':
|
||||
list.append('\r')
|
||||
elif cc == 't':
|
||||
list.append('\t')
|
||||
elif cc == 'v':
|
||||
list.append('\v')
|
||||
else:
|
||||
list.append(cc)
|
||||
found_escape = False
|
||||
elif cc == '\\':
|
||||
found_escape = True
|
||||
elif cc == delim:
|
||||
break
|
||||
else:
|
||||
list.append(cc)
|
||||
return ''.join(list)
|
||||
|
||||
def _parse_string_raw(self):
|
||||
""" string: s(size)"raw data" """
|
||||
# Read the (size) portion.
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
if cc != '(':
|
||||
raise LLSDParseError("invalid string token at index %d." % self._index)
|
||||
|
||||
rparen = self._buffer.find(')', self._index)
|
||||
if rparen == -1:
|
||||
raise LLSDParseError("invalid string token at index %d." % self._index)
|
||||
|
||||
size = int(self._buffer[self._index:rparen])
|
||||
|
||||
self._index = rparen + 1
|
||||
delim = self._buffer[self._index]
|
||||
self._index += 1
|
||||
if delim not in ("'", '"'):
|
||||
raise LLSDParseError("invalid string token at index %d." % self._index)
|
||||
|
||||
rv = self._buffer[self._index:(self._index + size)]
|
||||
self._index += size
|
||||
cc = self._buffer[self._index]
|
||||
self._index += 1
|
||||
if cc != delim:
|
||||
raise LLSDParseError("invalid string token at index %d." % self._index)
|
||||
|
||||
return rv
|
||||
|
||||
def format_binary(something):
|
||||
return '<?llsd/binary?>\n' + _format_binary_recurse(something)
|
||||
|
||||
def _format_binary_recurse(something):
|
||||
if something is None:
|
||||
return '!'
|
||||
elif isinstance(something, bool):
|
||||
if something:
|
||||
return '1'
|
||||
else:
|
||||
return '0'
|
||||
elif isinstance(something, (int, long)):
|
||||
return 'i' + struct.pack('!i', something)
|
||||
elif isinstance(something, float):
|
||||
return 'r' + struct.pack('!d', something)
|
||||
elif isinstance(something, lluuid.UUID):
|
||||
return 'u' + something._bits
|
||||
elif isinstance(something, binary):
|
||||
return 'b' + struct.pack('!i', len(something)) + something
|
||||
elif isinstance(something, (str, unicode)):
|
||||
return 's' + struct.pack('!i', len(something)) + something
|
||||
elif isinstance(something, uri):
|
||||
return 'l' + struct.pack('!i', len(something)) + something
|
||||
elif isinstance(something, datetime.datetime):
|
||||
seconds_since_epoch = time.mktime(something.timetuple())
|
||||
return 'd' + struct.pack('!d', seconds_since_epoch)
|
||||
elif isinstance(something, (list, tuple)):
|
||||
array_builder = []
|
||||
array_builder.append('[' + struct.pack('!i', len(something)))
|
||||
for item in something:
|
||||
array_builder.append(_format_binary_recurse(item))
|
||||
array_builder.append(']')
|
||||
return ''.join(array_builder)
|
||||
elif isinstance(something, dict):
|
||||
map_builder = []
|
||||
map_builder.append('{' + struct.pack('!i', len(something)))
|
||||
for key, value in something.items():
|
||||
map_builder.append('k' + struct.pack('!i', len(key)) + key)
|
||||
map_builder.append(_format_binary_recurse(value))
|
||||
map_builder.append('}')
|
||||
return ''.join(map_builder)
|
||||
else:
|
||||
raise LLSDSerializationError("Cannot serialize unknown type: %s (%s)" % (
|
||||
type(something), something))
|
||||
|
||||
|
||||
def parse(something):
|
||||
try:
|
||||
if something.startswith('<?llsd/binary?>'):
|
||||
just_binary = something.split('\n', 1)[1]
|
||||
return LLSDBinaryParser().parse(just_binary)
|
||||
# This should be better.
|
||||
elif something.startswith('<'):
|
||||
return to_python(fromstring(something)[0])
|
||||
else:
|
||||
return LLSDNotationParser().parse(something)
|
||||
except KeyError:
|
||||
raise Exception('LLSD could not be parsed: %s' % (something,))
|
||||
|
||||
class LLSD(object):
|
||||
def __init__(self, thing=None):
|
||||
self.thing = thing
|
||||
|
||||
def __str__(self):
|
||||
return self.toXML(self.thing)
|
||||
|
||||
parse = staticmethod(parse)
|
||||
toXML = staticmethod(format_xml)
|
||||
toBinary = staticmethod(format_binary)
|
||||
toNotation = staticmethod(format_notation)
|
||||
|
||||
|
||||
undef = LLSD(None)
|
||||
|
||||
|
|
@ -0,0 +1,265 @@
|
|||
"""\
|
||||
@file lluuid.py
|
||||
@brief UUID parser/generator.
|
||||
|
||||
Copyright (c) 2004-2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
import md5, random, socket, string, time, re
|
||||
|
||||
def _int2binstr(i,l):
|
||||
s=''
|
||||
for a in range(l):
|
||||
s=chr(i&0xFF)+s
|
||||
i>>=8
|
||||
return s
|
||||
|
||||
def _binstr2int(s):
|
||||
i = long(0)
|
||||
for c in s:
|
||||
i = (i<<8) + ord(c)
|
||||
return i
|
||||
|
||||
class UUID(object):
|
||||
"""
|
||||
A class which represents a 16 byte integer. Stored as a 16 byte 8
|
||||
bit character string.
|
||||
|
||||
The string version is to be of the form:
|
||||
AAAAAAAA-AAAA-BBBB-BBBB-BBBBBBCCCCCC (a 128-bit number in hex)
|
||||
where A=network address, B=timestamp, C=random.
|
||||
"""
|
||||
|
||||
NULL_STR = "00000000-0000-0000-0000-000000000000"
|
||||
|
||||
# the UUIDREGEX_STRING is helpful for parsing UUID's in text
|
||||
hex_wildcard = r"[0-9a-fA-F]"
|
||||
word = hex_wildcard + r"{4,4}-"
|
||||
long_word = hex_wildcard + r"{8,8}-"
|
||||
very_long_word = hex_wildcard + r"{12,12}"
|
||||
UUID_REGEX_STRING = long_word + word + word + word + very_long_word
|
||||
uuid_regex = re.compile(UUID_REGEX_STRING)
|
||||
|
||||
rand = random.Random()
|
||||
ip = ''
|
||||
try:
|
||||
ip = socket.gethostbyname(socket.gethostname())
|
||||
except(socket.gaierror):
|
||||
# no ip address, so just default to somewhere in 10.x.x.x
|
||||
ip = '10'
|
||||
for i in range(3):
|
||||
ip += '.' + str(rand.randrange(1,254))
|
||||
hexip = ''.join(["%04x" % long(i) for i in ip.split('.')])
|
||||
lastid = ''
|
||||
|
||||
def __init__(self, string_with_uuid=None):
|
||||
"""
|
||||
Initialize to first valid UUID in string argument,
|
||||
or to null UUID if none found or string is not supplied.
|
||||
"""
|
||||
self._bits = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
|
||||
if string_with_uuid:
|
||||
uuid_match = UUID.uuid_regex.search(string_with_uuid)
|
||||
if uuid_match:
|
||||
uuid_string = uuid_match.group()
|
||||
s = string.replace(uuid_string, '-', '')
|
||||
self._bits = _int2binstr(string.atol(s[:8],16),4) + \
|
||||
_int2binstr(string.atol(s[8:16],16),4) + \
|
||||
_int2binstr(string.atol(s[16:24],16),4) + \
|
||||
_int2binstr(string.atol(s[24:],16),4)
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
Used by the len() builtin.
|
||||
"""
|
||||
return 36
|
||||
|
||||
def __nonzero__(self):
|
||||
return self._bits != "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
|
||||
|
||||
def __str__(self):
|
||||
uuid_string = self.toString()
|
||||
return uuid_string
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
def __getitem__(self, index):
|
||||
return str(self)[index]
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, (str, unicode)):
|
||||
other = UUID(other)
|
||||
return self._bits == getattr(other, '_bits', '')
|
||||
|
||||
def __le__(self, other):
|
||||
return self._bits <= other._bits
|
||||
|
||||
def __ge__(self, other):
|
||||
return self._bits >= other._bits
|
||||
|
||||
def __lt__(self, other):
|
||||
return self._bits < other._bits
|
||||
|
||||
def __gt__(self, other):
|
||||
return self._bits > other._bits
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._bits)
|
||||
|
||||
def set(self, uuid):
|
||||
self._bits = uuid._bits
|
||||
|
||||
def setFromString(self, uuid_string):
|
||||
"""
|
||||
Given a string version of a uuid, set self bits
|
||||
appropriately. Returns self.
|
||||
"""
|
||||
s = string.replace(uuid_string, '-', '')
|
||||
self._bits = _int2binstr(string.atol(s[:8],16),4) + \
|
||||
_int2binstr(string.atol(s[8:16],16),4) + \
|
||||
_int2binstr(string.atol(s[16:24],16),4) + \
|
||||
_int2binstr(string.atol(s[24:],16),4)
|
||||
return self
|
||||
|
||||
def setFromMemoryDump(self, gdb_string):
|
||||
"""
|
||||
We expect to get gdb_string as four hex units. eg:
|
||||
0x147d54db 0xc34b3f1b 0x714f989b 0x0a892fd2
|
||||
Which will be translated to:
|
||||
db547d14-1b3f4bc3-9b984f71-d22f890a
|
||||
Returns self.
|
||||
"""
|
||||
s = string.replace(gdb_string, '0x', '')
|
||||
s = string.replace(s, ' ', '')
|
||||
t = ''
|
||||
for i in range(8,40,8):
|
||||
for j in range(0,8,2):
|
||||
t = t + s[i-j-2:i-j]
|
||||
self.setFromString(t)
|
||||
|
||||
def toString(self):
|
||||
"""
|
||||
Return as a string matching the LL standard
|
||||
AAAAAAAA-AAAA-BBBB-BBBB-BBBBBBCCCCCC (a 128-bit number in hex)
|
||||
where A=network address, B=timestamp, C=random.
|
||||
"""
|
||||
return uuid_bits_to_string(self._bits)
|
||||
|
||||
def getAsString(self):
|
||||
"""
|
||||
Return a different string representation of the form
|
||||
AAAAAAAA-AAAABBBB-BBBBBBBB-BBCCCCCC (a 128-bit number in hex)
|
||||
where A=network address, B=timestamp, C=random.
|
||||
"""
|
||||
i1 = _binstr2int(self._bits[0:4])
|
||||
i2 = _binstr2int(self._bits[4:8])
|
||||
i3 = _binstr2int(self._bits[8:12])
|
||||
i4 = _binstr2int(self._bits[12:16])
|
||||
return '%08lx-%08lx-%08lx-%08lx' % (i1,i2,i3,i4)
|
||||
|
||||
def generate(self):
|
||||
"""
|
||||
Generate a new uuid. This algorithm is slightly different
|
||||
from c++ implementation for portability reasons.
|
||||
Returns self.
|
||||
"""
|
||||
newid = self.__class__.lastid
|
||||
while newid == self.__class__.lastid:
|
||||
now = long(time.time() * 1000)
|
||||
newid = ("%016x" % now) + self.__class__.hexip
|
||||
newid += ("%03x" % (self.__class__.rand.randrange(0,4095)))
|
||||
self.__class__.lastid = newid
|
||||
m = md5.new()
|
||||
m.update(newid)
|
||||
self._bits = m.digest()
|
||||
return self
|
||||
|
||||
def isNull(self):
|
||||
"""
|
||||
Returns 1 if the uuid is null - ie, equal to default uuid.
|
||||
"""
|
||||
return (self._bits == "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0")
|
||||
|
||||
def xor(self, rhs):
|
||||
"""
|
||||
xors self with rhs.
|
||||
"""
|
||||
v1 = _binstr2int(self._bits[0:4]) ^ _binstr2int(rhs._bits[0:4])
|
||||
v2 = _binstr2int(self._bits[4:8]) ^ _binstr2int(rhs._bits[4:8])
|
||||
v3 = _binstr2int(self._bits[8:12]) ^ _binstr2int(rhs._bits[8:12])
|
||||
v4 = _binstr2int(self._bits[12:16]) ^ _binstr2int(rhs._bits[12:16])
|
||||
self._bits = _int2binstr(v1,4) + \
|
||||
_int2binstr(v2,4) + \
|
||||
_int2binstr(v3,4) + \
|
||||
_int2binstr(v4,4)
|
||||
|
||||
def printTranslatedMemory(four_hex_uints):
|
||||
"""
|
||||
We expect to get the string as four hex units. eg:
|
||||
0x147d54db 0xc34b3f1b 0x714f989b 0x0a892fd2
|
||||
Which will be translated to:
|
||||
db547d14-1b3f4bc3-9b984f71-d22f890a
|
||||
"""
|
||||
uuid = UUID()
|
||||
uuid.setFromMemoryDump(four_hex_uints)
|
||||
print uuid.toString()
|
||||
|
||||
def isPossiblyID(id_str):
|
||||
"""
|
||||
This function returns 1 if the string passed has some uuid-like
|
||||
characteristics. Otherwise returns 0.
|
||||
"""
|
||||
if not id_str or len(id_str) < 5 or len(id_str) > 36:
|
||||
return 0
|
||||
|
||||
if isinstance(id_str, UUID) or UUID.uuid_regex.match(id_str):
|
||||
return 1
|
||||
# build a string which matches every character.
|
||||
hex_wildcard = r"[0-9a-fA-F]"
|
||||
chars = len(id_str)
|
||||
next = min(chars, 8)
|
||||
matcher = hex_wildcard+"{"+str(next)+","+str(next)+"}"
|
||||
chars = chars - next
|
||||
if chars > 0:
|
||||
matcher = matcher + "-"
|
||||
chars = chars - 1
|
||||
for block in range(3):
|
||||
next = max(min(chars, 4), 0)
|
||||
if next:
|
||||
matcher = matcher + hex_wildcard+"{"+str(next)+","+str(next)+"}"
|
||||
chars = chars - next
|
||||
if chars > 0:
|
||||
matcher = matcher + "-"
|
||||
chars = chars - 1
|
||||
if chars > 0:
|
||||
next = min(chars, 12)
|
||||
matcher = matcher + hex_wildcard+"{"+str(next)+","+str(next)+"}"
|
||||
#print matcher
|
||||
uuid_matcher = re.compile(matcher)
|
||||
if uuid_matcher.match(id_str):
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def uuid_bits_to_string(bits):
|
||||
i1 = _binstr2int(bits[0:4])
|
||||
i2 = _binstr2int(bits[4:6])
|
||||
i3 = _binstr2int(bits[6:8])
|
||||
i4 = _binstr2int(bits[8:10])
|
||||
i5 = _binstr2int(bits[10:12])
|
||||
i6 = _binstr2int(bits[12:16])
|
||||
return '%08lx-%04lx-%04lx-%04lx-%04lx%08lx' % (i1,i2,i3,i4,i5,i6)
|
||||
|
||||
def uuid_bits_to_uuid(bits):
|
||||
return UUID(uuid_bits_to_string(bits))
|
||||
|
||||
|
||||
try:
|
||||
from mulib import mu
|
||||
except ImportError:
|
||||
print "Couldn't import mulib, not registering UUID converter"
|
||||
else:
|
||||
def convertUUID(req, uuid):
|
||||
return str(uuid)
|
||||
|
||||
mu.registerConverter(UUID, convertUUID)
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
"""\
|
||||
@file __init__.py
|
||||
@brief Initialization file for the indra ipc module.
|
||||
|
||||
Copyright (c) 2006-2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
"""\
|
||||
@file compatibility.py
|
||||
@brief Classes that manage compatibility states.
|
||||
|
||||
Copyright (c) 2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
|
||||
"""Compatibility combination table:
|
||||
|
||||
I M O N S
|
||||
-- -- -- -- --
|
||||
I: I I I I I
|
||||
M: I M M M M
|
||||
O: I M O M O
|
||||
N: I M M N N
|
||||
S: I M O N S
|
||||
|
||||
"""
|
||||
|
||||
class _Compatibility(object):
|
||||
def __init__(self, reason):
|
||||
self.reasons = [ ]
|
||||
if reason:
|
||||
self.reasons.append(reason)
|
||||
|
||||
def combine(self, other):
|
||||
if self._level() <= other._level():
|
||||
return self._buildclone(other)
|
||||
else:
|
||||
return other._buildclone(self)
|
||||
|
||||
def prefix(self, leadin):
|
||||
self.reasons = [ leadin + r for r in self.reasons ]
|
||||
|
||||
def same(self): return self._level() >= 1
|
||||
def deployable(self): return self._level() > 0
|
||||
def resolved(self): return self._level() > -1
|
||||
def compatible(self): return self._level() > -2
|
||||
|
||||
def explain(self):
|
||||
return self.__class__.__name__ + "\n" + "\n".join(self.reasons) + "\n"
|
||||
|
||||
def _buildclone(self, other=None):
|
||||
c = self._buildinstance()
|
||||
c.reasons = self.reasons
|
||||
if other:
|
||||
c.reasons = c.reasons + other.reasons
|
||||
return c
|
||||
|
||||
def _buildinstance(self):
|
||||
return self.__class__(None)
|
||||
|
||||
# def _level(self):
|
||||
# raise RuntimeError('implement in subclass')
|
||||
|
||||
|
||||
class Incompatible(_Compatibility):
|
||||
def _level(self):
|
||||
return -2
|
||||
|
||||
class Mixed(_Compatibility):
|
||||
def __init__(self, *inputs):
|
||||
_Compatibility.__init__(self, None)
|
||||
for i in inputs:
|
||||
self.reasons += i.reasons
|
||||
|
||||
def _buildinstance(self):
|
||||
return self.__class__()
|
||||
|
||||
def _level(self):
|
||||
return -1
|
||||
|
||||
class _Aged(_Compatibility):
|
||||
def combine(self, other):
|
||||
if self._level() == other._level():
|
||||
return self._buildclone(other)
|
||||
if int(self._level()) == int(other._level()):
|
||||
return Mixed(self, other)
|
||||
return _Compatibility.combine(self, other)
|
||||
|
||||
class Older(_Aged):
|
||||
def _level(self):
|
||||
return -0.25
|
||||
|
||||
class Newer(_Aged):
|
||||
def _level(self):
|
||||
return 0.25
|
||||
|
||||
class Same(_Compatibility):
|
||||
def __init__(self):
|
||||
_Compatibility.__init__(self, None)
|
||||
|
||||
def _buildinstance(self):
|
||||
return self.__class__()
|
||||
|
||||
def _level(self):
|
||||
return 1
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,94 @@
|
|||
"""\
|
||||
@file httputil.py
|
||||
@brief HTTP utilities. HTTP date conversion and non-blocking HTTP
|
||||
client support.
|
||||
|
||||
Copyright (c) 2006-2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import time
|
||||
import urlparse
|
||||
|
||||
import httplib
|
||||
try:
|
||||
from mx.DateTime import Parser
|
||||
|
||||
parse_date = Parser.DateTimeFromString
|
||||
except ImportError:
|
||||
from dateutil import parser
|
||||
|
||||
parse_date = parser.parse
|
||||
|
||||
|
||||
HTTP_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
|
||||
|
||||
|
||||
to_http_time = lambda t: time.strftime(HTTP_TIME_FORMAT, time.gmtime(t))
|
||||
from_http_time = lambda t: int(parse_date(t).gmticks())
|
||||
|
||||
def host_and_port_from_url(url):
|
||||
"""@breif Simple function to get host and port from an http url.
|
||||
@return Returns host, port and port may be None.
|
||||
"""
|
||||
host = None
|
||||
port = None
|
||||
parsed_url = urlparse.urlparse(url)
|
||||
try:
|
||||
host, port = parsed_url[1].split(':')
|
||||
except ValueError:
|
||||
host = parsed_url[1].split(':')
|
||||
return host, port
|
||||
|
||||
|
||||
def better_putrequest(self, method, url, skip_host=0):
|
||||
self.method = method
|
||||
self.path = url
|
||||
self.old_putrequest(method, url, skip_host)
|
||||
|
||||
|
||||
class HttpClient(httplib.HTTPConnection):
|
||||
"""A subclass of httplib.HTTPConnection which works around a bug
|
||||
in the interaction between eventlet sockets and httplib. httplib relies
|
||||
on gc to close the socket, causing the socket to be closed too early.
|
||||
|
||||
This is an awful hack and the bug should be fixed properly ASAP.
|
||||
"""
|
||||
def __init__(self, host, port=None, strict=None):
|
||||
httplib.HTTPConnection.__init__(self, host, port, strict)
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
old_putrequest = httplib.HTTPConnection.putrequest
|
||||
putrequest = better_putrequest
|
||||
|
||||
|
||||
class HttpsClient(httplib.HTTPSConnection):
|
||||
def close(self):
|
||||
pass
|
||||
old_putrequest = httplib.HTTPSConnection.putrequest
|
||||
putrequest = better_putrequest
|
||||
|
||||
|
||||
|
||||
scheme_to_factory_map = {
|
||||
'http': HttpClient,
|
||||
'https': HttpsClient,
|
||||
}
|
||||
|
||||
|
||||
def makeConnection(scheme, location, use_proxy):
|
||||
if use_proxy:
|
||||
if "http_proxy" in os.environ:
|
||||
location = os.environ["http_proxy"]
|
||||
elif "ALL_PROXY" in os.environ:
|
||||
location = os.environ["ALL_PROXY"]
|
||||
else:
|
||||
location = "localhost:3128" #default to local squid
|
||||
if location.startswith("http://"):
|
||||
location = location[len("http://"):]
|
||||
return scheme_to_factory_map[scheme](location)
|
||||
|
||||
|
|
@ -0,0 +1,353 @@
|
|||
"""\
|
||||
@file llmessage.py
|
||||
@brief Message template parsing and compatiblity
|
||||
|
||||
Copyright (c) 2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
from sets import Set, ImmutableSet
|
||||
|
||||
from compatibility import Incompatible, Older, Newer, Same
|
||||
from tokenstream import TokenStream
|
||||
|
||||
###
|
||||
### Message Template
|
||||
###
|
||||
|
||||
class Template:
|
||||
def __init__(self):
|
||||
self.messages = { }
|
||||
|
||||
def addMessage(self, m):
|
||||
self.messages[m.name] = m
|
||||
|
||||
def compatibleWithBase(self, base):
|
||||
messagenames = (
|
||||
ImmutableSet(self.messages.keys())
|
||||
| ImmutableSet(base.messages.keys())
|
||||
)
|
||||
|
||||
compatibility = Same()
|
||||
for name in messagenames:
|
||||
selfmessage = self.messages.get(name, None)
|
||||
basemessage = base.messages.get(name, None)
|
||||
|
||||
if not selfmessage:
|
||||
c = Older("missing message %s, did you mean to deprecate?" % name)
|
||||
elif not basemessage:
|
||||
c = Newer("added message %s" % name)
|
||||
else:
|
||||
c = selfmessage.compatibleWithBase(basemessage)
|
||||
c.prefix("in message %s: " % name)
|
||||
|
||||
compatibility = compatibility.combine(c)
|
||||
|
||||
return compatibility
|
||||
|
||||
|
||||
|
||||
class Message:
|
||||
HIGH = "High"
|
||||
MEDIUM = "Medium"
|
||||
LOW = "Low"
|
||||
FIXED = "Fixed"
|
||||
priorities = [ HIGH, MEDIUM, LOW, FIXED ]
|
||||
prioritieswithnumber = [ FIXED ]
|
||||
|
||||
TRUSTED = "Trusted"
|
||||
NOTTRUSTED = "NotTrusted"
|
||||
trusts = [ TRUSTED, NOTTRUSTED ]
|
||||
|
||||
UNENCODED = "Unencoded"
|
||||
ZEROCODED = "Zerocoded"
|
||||
encodings = [ UNENCODED, ZEROCODED ]
|
||||
|
||||
NOTDEPRECATED = "NotDeprecated"
|
||||
DEPRECATED = "Deprecated"
|
||||
UDPDEPRECATED = "UDPDeprecated"
|
||||
deprecations = [ NOTDEPRECATED, UDPDEPRECATED, DEPRECATED ]
|
||||
# in order of increasing deprecation
|
||||
|
||||
def __init__(self, name, number, priority, trust, coding):
|
||||
self.name = name
|
||||
self.number = number
|
||||
self.priority = priority
|
||||
self.trust = trust
|
||||
self.coding = coding
|
||||
self.deprecateLevel = 0
|
||||
self.blocks = [ ]
|
||||
|
||||
def deprecated(self):
|
||||
return self.deprecateLevel != 0
|
||||
|
||||
def deprecate(self, deprecation):
|
||||
self.deprecateLevel = self.deprecations.index(deprecation)
|
||||
|
||||
def addBlock(self, block):
|
||||
self.blocks.append(block)
|
||||
|
||||
def compatibleWithBase(self, base):
|
||||
if self.name != base.name:
|
||||
# this should never happen in real life because of the
|
||||
# way Template matches up messages by name
|
||||
return Incompatible("has different name: %s vs. %s in base"
|
||||
% (self.name, base.name))
|
||||
if self.priority != base.priority:
|
||||
return Incompatible("has different priority: %s vs. %s in base"
|
||||
% (self.priority, base.priority))
|
||||
if self.trust != base.trust:
|
||||
return Incompatible("has different trust: %s vs. %s in base"
|
||||
% (self.trust, base.trust))
|
||||
if self.coding != base.coding:
|
||||
return Incompatible("has different coding: %s vs. %s in base"
|
||||
% (self.coding, base.coding))
|
||||
if self.number != base.number:
|
||||
return Incompatible("has different number: %s vs. %s in base"
|
||||
% (self.number, base.number))
|
||||
|
||||
compatibility = Same()
|
||||
|
||||
if self.deprecateLevel != base.deprecateLevel:
|
||||
if self.deprecateLevel < base.deprecateLevel:
|
||||
c = Older("is less deprecated: %s vs. %s in base" % (
|
||||
self.deprecations[self.deprecateLevel],
|
||||
self.deprecations[base.deprecateLevel]))
|
||||
else:
|
||||
c = Newer("is more deprecated: %s vs. %s in base" % (
|
||||
self.deprecations[self.deprecateLevel],
|
||||
self.deprecations[base.deprecateLevel]))
|
||||
compatibility = compatibility.combine(c)
|
||||
|
||||
selflen = len(self.blocks)
|
||||
baselen = len(base.blocks)
|
||||
samelen = min(selflen, baselen)
|
||||
|
||||
for i in xrange(0, samelen):
|
||||
selfblock = self.blocks[i]
|
||||
baseblock = base.blocks[i]
|
||||
|
||||
c = selfblock.compatibleWithBase(baseblock)
|
||||
if not c.same():
|
||||
c = Incompatible("block %d isn't identical" % i)
|
||||
compatibility = compatibility.combine(c)
|
||||
|
||||
if selflen > baselen:
|
||||
c = Newer("has %d extra blocks" % (selflen - baselen))
|
||||
elif selflen < baselen:
|
||||
c = Older("missing %d extra blocks" % (baselen - selflen))
|
||||
else:
|
||||
c = Same()
|
||||
|
||||
compatibility = compatibility.combine(c)
|
||||
return compatibility
|
||||
|
||||
|
||||
|
||||
class Block(object):
|
||||
SINGLE = "Single"
|
||||
MULTIPLE = "Multiple"
|
||||
VARIABLE = "Variable"
|
||||
repeats = [ SINGLE, MULTIPLE, VARIABLE ]
|
||||
repeatswithcount = [ MULTIPLE ]
|
||||
|
||||
def __init__(self, name, repeat, count=None):
|
||||
self.name = name
|
||||
self.repeat = repeat
|
||||
self.count = count
|
||||
self.variables = [ ]
|
||||
|
||||
def addVariable(self, variable):
|
||||
self.variables.append(variable)
|
||||
|
||||
def compatibleWithBase(self, base):
|
||||
if self.name != base.name:
|
||||
return Incompatible("has different name: %s vs. %s in base"
|
||||
% (self.name, base.name))
|
||||
if self.repeat != base.repeat:
|
||||
return Incompatible("has different repeat: %s vs. %s in base"
|
||||
% (self.repeat, base.repeat))
|
||||
if self.repeat in Block.repeatswithcount:
|
||||
if self.count != base.count:
|
||||
return Incompatible("has different count: %s vs. %s in base"
|
||||
% (self.count, base.count))
|
||||
|
||||
compatibility = Same()
|
||||
|
||||
selflen = len(self.variables)
|
||||
baselen = len(base.variables)
|
||||
|
||||
for i in xrange(0, min(selflen, baselen)):
|
||||
selfvar = self.variables[i]
|
||||
basevar = base.variables[i]
|
||||
|
||||
c = selfvar.compatibleWithBase(basevar)
|
||||
if not c.same():
|
||||
c = Incompatible("variable %d isn't identical" % i)
|
||||
compatibility = compatibility.combine(c)
|
||||
|
||||
if selflen > baselen:
|
||||
c = Newer("has %d extra variables" % (selflen - baselen))
|
||||
elif selflen < baselen:
|
||||
c = Older("missing %d extra variables" % (baselen - selflen))
|
||||
else:
|
||||
c = Same()
|
||||
|
||||
compatibility = compatibility.combine(c)
|
||||
return compatibility
|
||||
|
||||
|
||||
|
||||
class Variable:
|
||||
U8 = "U8"; U16 = "U16"; U32 = "U32"; U64 = "U64"
|
||||
S8 = "S8"; S16 = "S16"; S32 = "S32"; S64 = "S64"
|
||||
F32 = "F32"; F64 = "F64"
|
||||
LLVECTOR3 = "LLVector3"; LLVECTOR3D = "LLVector3d"; LLVECTOR4 = "LLVector4"
|
||||
LLQUATERNION = "LLQuaternion"
|
||||
LLUUID = "LLUUID"
|
||||
BOOL = "BOOL"
|
||||
IPADDR = "IPADDR"; IPPORT = "IPPORT"
|
||||
FIXED = "Fixed"
|
||||
VARIABLE = "Variable"
|
||||
types = [ U8, U16, U32, U64, S8, S16, S32, S64, F32, F64,
|
||||
LLVECTOR3, LLVECTOR3D, LLVECTOR4, LLQUATERNION,
|
||||
LLUUID, BOOL, IPADDR, IPPORT, FIXED, VARIABLE ]
|
||||
typeswithsize = [ FIXED, VARIABLE ]
|
||||
|
||||
def __init__(self, name, type, size):
|
||||
self.name = name
|
||||
self.type = type
|
||||
self.size = size
|
||||
|
||||
def compatibleWithBase(self, base):
|
||||
if self.name != base.name:
|
||||
return Incompatible("has different name: %s vs. %s in base"
|
||||
% (self.name, base.name))
|
||||
if self.type != base.type:
|
||||
return Incompatible("has different type: %s vs. %s in base"
|
||||
% (self.type, base.type))
|
||||
if self.type in Variable.typeswithsize:
|
||||
if self.size != base.size:
|
||||
return Incompatible("has different size: %s vs. %s in base"
|
||||
% (self.size, base.size))
|
||||
return Same()
|
||||
|
||||
|
||||
|
||||
###
|
||||
### Parsing Message Templates
|
||||
###
|
||||
|
||||
class TemplateParser:
|
||||
def __init__(self, tokens):
|
||||
self._tokens = tokens
|
||||
self._version = 0
|
||||
self._numbers = { }
|
||||
for p in Message.priorities:
|
||||
self._numbers[p] = 0
|
||||
|
||||
def parseTemplate(self):
|
||||
tokens = self._tokens
|
||||
t = Template()
|
||||
while True:
|
||||
if tokens.want("version"):
|
||||
v = float(tokens.require(tokens.wantFloat()))
|
||||
self._version = v
|
||||
t.version = v
|
||||
continue
|
||||
|
||||
m = self.parseMessage()
|
||||
if m:
|
||||
t.addMessage(m)
|
||||
continue
|
||||
|
||||
if self._version >= 2.0:
|
||||
tokens.require(tokens.wantEOF())
|
||||
break
|
||||
else:
|
||||
if tokens.wantEOF():
|
||||
break
|
||||
|
||||
tokens.consume()
|
||||
# just assume (gulp) that this is a comment
|
||||
# line 468: "sim -> dataserver"
|
||||
return t
|
||||
|
||||
|
||||
def parseMessage(self):
|
||||
tokens = self._tokens
|
||||
if not tokens.want("{"):
|
||||
return None
|
||||
|
||||
name = tokens.require(tokens.wantSymbol())
|
||||
priority = tokens.require(tokens.wantOneOf(Message.priorities))
|
||||
|
||||
if self._version >= 2.0 or priority in Message.prioritieswithnumber:
|
||||
number = int("+" + tokens.require(tokens.wantInteger()), 0)
|
||||
else:
|
||||
self._numbers[priority] += 1
|
||||
number = self._numbers[priority]
|
||||
|
||||
trust = tokens.require(tokens.wantOneOf(Message.trusts))
|
||||
coding = tokens.require(tokens.wantOneOf(Message.encodings))
|
||||
|
||||
m = Message(name, number, priority, trust, coding)
|
||||
|
||||
if self._version >= 2.0:
|
||||
d = tokens.wantOneOf(Message.deprecations)
|
||||
if d:
|
||||
m.deprecate(d)
|
||||
|
||||
while True:
|
||||
b = self.parseBlock()
|
||||
if not b:
|
||||
break
|
||||
m.addBlock(b)
|
||||
|
||||
tokens.require(tokens.want("}"))
|
||||
|
||||
return m
|
||||
|
||||
|
||||
def parseBlock(self):
|
||||
tokens = self._tokens
|
||||
if not tokens.want("{"):
|
||||
return None
|
||||
name = tokens.require(tokens.wantSymbol())
|
||||
repeat = tokens.require(tokens.wantOneOf(Block.repeats))
|
||||
if repeat in Block.repeatswithcount:
|
||||
count = int(tokens.require(tokens.wantInteger()))
|
||||
else:
|
||||
count = None
|
||||
|
||||
b = Block(name, repeat, count)
|
||||
|
||||
while True:
|
||||
v = self.parseVariable()
|
||||
if not v:
|
||||
break
|
||||
b.addVariable(v)
|
||||
|
||||
tokens.require(tokens.want("}"))
|
||||
return b
|
||||
|
||||
|
||||
def parseVariable(self):
|
||||
tokens = self._tokens
|
||||
if not tokens.want("{"):
|
||||
return None
|
||||
name = tokens.require(tokens.wantSymbol())
|
||||
type = tokens.require(tokens.wantOneOf(Variable.types))
|
||||
if type in Variable.typeswithsize:
|
||||
size = tokens.require(tokens.wantInteger())
|
||||
else:
|
||||
tokens.wantInteger() # in LandStatRequest: "{ ParcelLocalID S32 1 }"
|
||||
size = None
|
||||
tokens.require(tokens.want("}"))
|
||||
return Variable(name, type, size)
|
||||
|
||||
def parseTemplateString(s):
|
||||
return TemplateParser(TokenStream().fromString(s)).parseTemplate()
|
||||
|
||||
def parseTemplateFile(f):
|
||||
return TemplateParser(TokenStream().fromFile(f)).parseTemplate()
|
||||
|
|
@ -0,0 +1,257 @@
|
|||
"""\
|
||||
@file llsdhttp.py
|
||||
@brief Functions to ease moving llsd over http
|
||||
|
||||
Copyright (c) 2006-2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
import os.path
|
||||
import os
|
||||
import os
|
||||
import urlparse
|
||||
|
||||
from indra.base import llsd
|
||||
from indra.ipc import httputil
|
||||
LLSD = llsd.LLSD()
|
||||
|
||||
|
||||
class ConnectionError(Exception):
|
||||
def __init__(self, method, host, port, path, status, reason, body):
|
||||
self.method = method
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.path = path
|
||||
self.status = status
|
||||
self.reason = reason
|
||||
self.body = body
|
||||
Exception.__init__(self)
|
||||
|
||||
def __str__(self):
|
||||
return "%s(%r, %r, %r, %r, %r, %r, %r)" % (
|
||||
type(self).__name__,
|
||||
self.method, self.host, self.port,
|
||||
self.path, self.status, self.reason, self.body)
|
||||
|
||||
|
||||
class NotFound(ConnectionError):
|
||||
pass
|
||||
|
||||
class Forbidden(ConnectionError):
|
||||
pass
|
||||
|
||||
class MalformedResponse(ConnectionError):
|
||||
pass
|
||||
|
||||
class NotImplemented(ConnectionError):
|
||||
pass
|
||||
|
||||
def runConnection(connection, raise_parse_errors=True):
|
||||
response = connection.getresponse()
|
||||
if (response.status not in [200, 201, 204]):
|
||||
klass = {404:NotFound,
|
||||
403:Forbidden,
|
||||
501:NotImplemented}.get(response.status, ConnectionError)
|
||||
raise klass(
|
||||
connection.method, connection.host, connection.port,
|
||||
connection.path, response.status, response.reason, response.read())
|
||||
content_length = response.getheader('content-length')
|
||||
|
||||
if content_length: # Check to see whether it is not None
|
||||
content_length = int(content_length)
|
||||
|
||||
if content_length: # Check to see whether the length is not 0
|
||||
body = response.read(content_length)
|
||||
else:
|
||||
body = ''
|
||||
|
||||
if not body.strip():
|
||||
#print "No body:", `body`
|
||||
return None
|
||||
try:
|
||||
return LLSD.parse(body)
|
||||
except Exception, e:
|
||||
if raise_parse_errors:
|
||||
print "Exception: %s, Could not parse LLSD: " % (e), body
|
||||
raise MalformedResponse(
|
||||
connection.method, connection.host, connection.port,
|
||||
connection.path, response.status, response.reason, body)
|
||||
else:
|
||||
return None
|
||||
|
||||
class FileScheme(object):
|
||||
"""Retarded scheme to local file wrapper."""
|
||||
host = '<file>'
|
||||
port = '<file>'
|
||||
reason = '<none>'
|
||||
|
||||
def __init__(self, location):
|
||||
pass
|
||||
|
||||
def request(self, method, fullpath, body='', headers=None):
|
||||
self.status = 200
|
||||
self.path = fullpath.split('?')[0]
|
||||
self.method = method = method.lower()
|
||||
assert method in ('get', 'put', 'delete')
|
||||
if method == 'delete':
|
||||
try:
|
||||
os.remove(self.path)
|
||||
except OSError:
|
||||
pass # don't complain if already deleted
|
||||
elif method == 'put':
|
||||
try:
|
||||
f = file(self.path, 'w')
|
||||
f.write(body)
|
||||
f.close()
|
||||
except IOError, e:
|
||||
self.status = 500
|
||||
self.raise_connection_error()
|
||||
elif method == 'get':
|
||||
if not os.path.exists(self.path):
|
||||
self.status = 404
|
||||
self.raise_connection_error(NotFound)
|
||||
|
||||
def connect(self):
|
||||
pass
|
||||
|
||||
def getresponse(self):
|
||||
return self
|
||||
|
||||
def getheader(self, header):
|
||||
if header == 'content-length':
|
||||
try:
|
||||
return os.path.getsize(self.path)
|
||||
except OSError:
|
||||
return 0
|
||||
|
||||
def read(self, howmuch=None):
|
||||
if self.method == 'get':
|
||||
try:
|
||||
return file(self.path, 'r').read(howmuch)
|
||||
except IOError:
|
||||
self.status = 500
|
||||
self.raise_connection_error()
|
||||
return ''
|
||||
|
||||
def raise_connection_error(self, klass=ConnectionError):
|
||||
raise klass(
|
||||
self.method, self.host, self.port,
|
||||
self.path, self.status, self.reason, '')
|
||||
|
||||
scheme_to_factory_map = {
|
||||
'http': httputil.HttpClient,
|
||||
'https': httputil.HttpsClient,
|
||||
'file': FileScheme,
|
||||
}
|
||||
|
||||
def makeConnection(scheme, location, use_proxy):
|
||||
if use_proxy:
|
||||
if "http_proxy" in os.environ:
|
||||
location = os.environ["http_proxy"]
|
||||
elif "ALL_PROXY" in os.environ:
|
||||
location = os.environ["ALL_PROXY"]
|
||||
else:
|
||||
location = "localhost:3128" #default to local squid
|
||||
if location.startswith("http://"):
|
||||
location = location[len("http://"):]
|
||||
return scheme_to_factory_map[scheme](location)
|
||||
|
||||
|
||||
def get(url, headers=None, use_proxy=False):
|
||||
if headers is None:
|
||||
headers = {}
|
||||
scheme, location, path, params, query, id = urlparse.urlparse(url)
|
||||
connection = makeConnection(scheme, location, use_proxy=use_proxy)
|
||||
fullpath = path
|
||||
if query:
|
||||
fullpath += "?"+query
|
||||
connection.connect()
|
||||
if headers is None:
|
||||
headers=dict()
|
||||
if use_proxy:
|
||||
headers.update({ "Host" : location })
|
||||
connection.request("GET", url, headers=headers)
|
||||
else:
|
||||
connection.request("GET", fullpath, headers=headers)
|
||||
|
||||
return runConnection(connection)
|
||||
|
||||
def put(url, data, headers=None):
|
||||
body = LLSD.toXML(data)
|
||||
scheme, location, path, params, query, id = urlparse.urlparse(url)
|
||||
connection = makeConnection(scheme, location, use_proxy=False)
|
||||
if headers is None:
|
||||
headers = {}
|
||||
headers.update({'Content-Type': 'application/xml'})
|
||||
fullpath = path
|
||||
if query:
|
||||
fullpath += "?"+query
|
||||
connection.request("PUT", fullpath, body, headers)
|
||||
return runConnection(connection, raise_parse_errors=False)
|
||||
|
||||
def delete(url):
|
||||
scheme, location, path, params, query, id = urlparse.urlparse(url)
|
||||
connection = makeConnection(scheme, location, use_proxy=False)
|
||||
connection.request("DELETE", path+"?"+query)
|
||||
return runConnection(connection, raise_parse_errors=False)
|
||||
|
||||
def post(url, data=None):
|
||||
body = LLSD.toXML(data)
|
||||
scheme, location, path, params, query, id = urlparse.urlparse(url)
|
||||
connection = makeConnection(scheme, location, use_proxy=False)
|
||||
connection.request("POST", path+"?"+query, body, {'Content-Type': 'application/xml'})
|
||||
return runConnection(connection)
|
||||
|
||||
def postFile(url, filename):
|
||||
f = open(filename)
|
||||
body = f.read()
|
||||
f.close()
|
||||
scheme, location, path, params, query, id = urlparse.urlparse(url)
|
||||
connection = makeConnection(scheme, location, use_proxy=False)
|
||||
connection.request("POST", path+"?"+query, body, {'Content-Type': 'application/octet-stream'})
|
||||
return runConnection(connection)
|
||||
|
||||
def getStatus(url, use_proxy=False):
|
||||
scheme, location, path, params, query, id = urlparse.urlparse(url)
|
||||
connection = makeConnection(scheme, location, use_proxy=use_proxy)
|
||||
if use_proxy:
|
||||
connection.request("GET", url, headers={ "Host" : location })
|
||||
else:
|
||||
connection.request("GET", path+"?"+query)
|
||||
return connection.getresponse().status
|
||||
|
||||
def putStatus(url, data):
|
||||
body = LLSD.toXML(data)
|
||||
scheme, location, path, params, query, id = urlparse.urlparse(url)
|
||||
connection = makeConnection(scheme, location, use_proxy=False)
|
||||
connection.request("PUT", path+"?"+query, body, {'Content-Type': 'application/xml'})
|
||||
return connection.getresponse().status
|
||||
|
||||
def deleteStatus(url):
|
||||
scheme, location, path, params, query, id = urlparse.urlparse(url)
|
||||
connection = makeConnection(scheme, location, use_proxy=False)
|
||||
connection.request("DELETE", path+"?"+query)
|
||||
return connection.getresponse().status
|
||||
|
||||
def postStatus(url, data):
|
||||
body = LLSD.toXML(data)
|
||||
scheme, location, path, params, query, id = urlparse.urlparse(url)
|
||||
connection = makeConnection(scheme, location, use_proxy=False)
|
||||
connection.request("POST", path+"?"+query, body)
|
||||
return connection.getresponse().status
|
||||
|
||||
def postFileStatus(url, filename):
|
||||
f = open(filename)
|
||||
body = f.read()
|
||||
f.close()
|
||||
scheme, location, path, params, query, id = urlparse.urlparse(url)
|
||||
connection = makeConnection(scheme, location, use_proxy=False)
|
||||
connection.request("POST", path+"?"+query, body, {'Content-Type': 'application/octet-stream'})
|
||||
response = connection.getresponse()
|
||||
return response.status, response.read()
|
||||
|
||||
def getFromSimulator(path, use_proxy=False):
|
||||
return get('http://' + simulatorHostAndPort + path, use_proxy=use_proxy)
|
||||
|
||||
def postToSimulator(path, data=None):
|
||||
return post('http://' + simulatorHostAndPort + path, data)
|
||||
|
|
@ -0,0 +1,123 @@
|
|||
"""\
|
||||
@file russ.py
|
||||
@brief Recursive URL Substitution Syntax helpers
|
||||
@author Phoenix
|
||||
|
||||
Many details on how this should work is available on the wiki:
|
||||
https://wiki.secondlife.com/wiki/Recursive_URL_Substitution_Syntax
|
||||
|
||||
Adding features to this should be reflected in that page in the
|
||||
implementations section.
|
||||
|
||||
Copyright (c) 2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
import urllib
|
||||
from indra.ipc import llsdhttp
|
||||
|
||||
class UnbalancedBraces(Exception):
|
||||
pass
|
||||
|
||||
class UnknownDirective(Exception):
|
||||
pass
|
||||
|
||||
class BadDirective(Exception):
|
||||
pass
|
||||
|
||||
def format(format_str, context):
|
||||
"""@brief Format format string according to rules for RUSS.
|
||||
@see https://osiris.lindenlab.com/mediawiki/index.php/Recursive_URL_Substitution_Syntax
|
||||
@param format_str The input string to format.
|
||||
@param context A map used for string substitutions.
|
||||
@return Returns the formatted string. If no match, the braces remain intact.
|
||||
"""
|
||||
while True:
|
||||
#print "format_str:", format_str
|
||||
all_matches = _find_sub_matches(format_str)
|
||||
if not all_matches:
|
||||
break
|
||||
substitutions = 0
|
||||
while True:
|
||||
matches = all_matches.pop()
|
||||
# we work from right to left to make sure we do not
|
||||
# invalidate positions earlier in format_str
|
||||
matches.reverse()
|
||||
for pos in matches:
|
||||
# Use index since _find_sub_matches should have raised
|
||||
# an exception, and failure to find now is an exception.
|
||||
end = format_str.index('}', pos)
|
||||
#print "directive:", format_str[pos+1:pos+5]
|
||||
if format_str[pos + 1] == '$':
|
||||
value = context.get(format_str[pos + 2:end])
|
||||
elif format_str[pos + 1] == '%':
|
||||
value = _build_query_string(
|
||||
context.get(format_str[pos + 2:end]))
|
||||
elif format_str[pos+1:pos+5] == 'http' or format_str[pos+1:pos+5] == 'file':
|
||||
value = _fetch_url_directive(format_str[pos + 1:end])
|
||||
else:
|
||||
raise UnknownDirective, format_str[pos:end + 1]
|
||||
if not value == None:
|
||||
format_str = format_str[:pos]+str(value)+format_str[end+1:]
|
||||
substitutions += 1
|
||||
|
||||
# If there were any substitutions at this depth, re-parse
|
||||
# since this may have revealed new things to substitute
|
||||
if substitutions:
|
||||
break
|
||||
if not all_matches:
|
||||
break
|
||||
|
||||
# If there were no substitutions at all, and we have exhausted
|
||||
# the possible matches, bail.
|
||||
if not substitutions:
|
||||
break
|
||||
return format_str
|
||||
|
||||
def _find_sub_matches(format_str):
|
||||
"""@brief Find all of the substitution matches.
|
||||
@param format_str the RUSS conformant format string.
|
||||
@return Returns an array of depths of arrays of positional matches in input.
|
||||
"""
|
||||
depth = 0
|
||||
matches = []
|
||||
for pos in range(len(format_str)):
|
||||
if format_str[pos] == '{':
|
||||
depth += 1
|
||||
if not len(matches) == depth:
|
||||
matches.append([])
|
||||
matches[depth - 1].append(pos)
|
||||
continue
|
||||
if format_str[pos] == '}':
|
||||
depth -= 1
|
||||
continue
|
||||
if not depth == 0:
|
||||
raise UnbalancedBraces, format_str
|
||||
return matches
|
||||
|
||||
def _build_query_string(query_dict):
|
||||
"""\
|
||||
@breif given a dict, return a query string. utility wrapper for urllib.
|
||||
@param query_dict input query dict
|
||||
@returns Returns an urlencoded query string including leading '?'.
|
||||
"""
|
||||
if query_dict:
|
||||
return '?' + urllib.urlencode(query_dict)
|
||||
else:
|
||||
return ''
|
||||
|
||||
def _fetch_url_directive(directive):
|
||||
"*FIX: This only supports GET"
|
||||
commands = directive.split('|')
|
||||
resource = llsdhttp.get(commands[0])
|
||||
if len(commands) == 3:
|
||||
resource = _walk_resource(resource, commands[2])
|
||||
return resource
|
||||
|
||||
def _walk_resource(resource, path):
|
||||
path = path.split('/')
|
||||
for child in path:
|
||||
if not child:
|
||||
continue
|
||||
resource = resource[child]
|
||||
return resource
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
"""\
|
||||
@file servicebuilder.py
|
||||
@author Phoenix
|
||||
@brief Class which will generate service urls.
|
||||
|
||||
Copyright (c) 2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
from indra.base import config
|
||||
from indra.ipc import llsdhttp
|
||||
from indra.ipc import russ
|
||||
|
||||
services_config = {}
|
||||
try:
|
||||
services_config = llsdhttp.get(config.get('services-config'))
|
||||
except:
|
||||
pass
|
||||
|
||||
class ServiceBuilder(object):
|
||||
def __init__(self, services_definition = services_config):
|
||||
"""\
|
||||
@brief
|
||||
@brief Create a ServiceBuilder.
|
||||
@param services_definition Complete services definition, services.xml.
|
||||
"""
|
||||
# no need to keep a copy of the services section of the
|
||||
# complete services definition, but it doesn't hurt much.
|
||||
self.services = services_definition['services']
|
||||
self.builders = {}
|
||||
for service in self.services:
|
||||
service_builder = service.get('service-builder')
|
||||
if not service_builder:
|
||||
continue
|
||||
if isinstance(service_builder, dict):
|
||||
# We will be constructing several builders
|
||||
for name, builder in service_builder.items():
|
||||
full_builder_name = service['name'] + '-' + name
|
||||
self.builders[full_builder_name] = builder
|
||||
else:
|
||||
self.builders[service['name']] = service_builder
|
||||
|
||||
def buildServiceURL(self, name, context):
|
||||
"""\
|
||||
@brief given the environment on construction, return a service URL.
|
||||
@param name The name of the service.
|
||||
@param context A dict of name value lookups for the service.
|
||||
@returns Returns the
|
||||
"""
|
||||
base_url = config.get('services-base-url')
|
||||
svc_path = russ.format(self.builders[name], context)
|
||||
return base_url + svc_path
|
||||
|
|
@ -0,0 +1,134 @@
|
|||
"""\
|
||||
@file tokenstream.py
|
||||
@brief Message template parsing utility class
|
||||
|
||||
Copyright (c) 2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
class _EOF(object):
|
||||
pass
|
||||
|
||||
EOF = _EOF()
|
||||
|
||||
class _LineMarker(int):
|
||||
pass
|
||||
|
||||
_commentRE = re.compile(r'//.*')
|
||||
_symbolRE = re.compile(r'[a-zA-Z_][a-zA-Z_0-9]*')
|
||||
_integerRE = re.compile(r'(0x[0-9A-Fa-f]+|0\d*|[1-9]\d*)')
|
||||
_floatRE = re.compile(r'\d+(\.\d*)?')
|
||||
|
||||
|
||||
class ParseError(Exception):
|
||||
def __init__(self, stream, reason):
|
||||
self.line = stream.line
|
||||
self.context = stream._context()
|
||||
self.reason = reason
|
||||
|
||||
def _contextString(self):
|
||||
c = [ ]
|
||||
for t in self.context:
|
||||
if isinstance(t, _LineMarker):
|
||||
break
|
||||
c.append(t)
|
||||
return " ".join(c)
|
||||
|
||||
def __str__(self):
|
||||
return "line %d: %s @ ... %s" % (
|
||||
self.line, self.reason, self._contextString())
|
||||
|
||||
def __nonzero__(self):
|
||||
return False
|
||||
|
||||
|
||||
def _optionText(options):
|
||||
n = len(options)
|
||||
if n == 1:
|
||||
return '"%s"' % options[0]
|
||||
return '"' + '", "'.join(options[0:(n-1)]) + '" or "' + options[-1] + '"'
|
||||
|
||||
|
||||
class TokenStream(object):
|
||||
def __init__(self):
|
||||
self.line = 0
|
||||
self.tokens = [ ]
|
||||
|
||||
def fromString(self, string):
|
||||
return self.fromLines(string.split('\n'))
|
||||
|
||||
def fromFile(self, file):
|
||||
return self.fromLines(file)
|
||||
|
||||
def fromLines(self, lines):
|
||||
i = 0
|
||||
for line in lines:
|
||||
i += 1
|
||||
self.tokens.append(_LineMarker(i))
|
||||
self.tokens.extend(_commentRE.sub(" ", line).split())
|
||||
self._consumeLines()
|
||||
return self
|
||||
|
||||
def consume(self):
|
||||
if not self.tokens:
|
||||
return EOF
|
||||
t = self.tokens.pop(0)
|
||||
self._consumeLines()
|
||||
return t
|
||||
|
||||
def _consumeLines(self):
|
||||
while self.tokens and isinstance(self.tokens[0], _LineMarker):
|
||||
self.line = self.tokens.pop(0)
|
||||
|
||||
def peek(self):
|
||||
if not self.tokens:
|
||||
return EOF
|
||||
return self.tokens[0]
|
||||
|
||||
def want(self, t):
|
||||
if t == self.peek():
|
||||
return self.consume()
|
||||
return ParseError(self, 'expected "%s"' % t)
|
||||
|
||||
def wantOneOf(self, options):
|
||||
assert len(options)
|
||||
if self.peek() in options:
|
||||
return self.consume()
|
||||
return ParseError(self, 'expected one of %s' % _optionText(options))
|
||||
|
||||
def wantEOF(self):
|
||||
return self.want(EOF)
|
||||
|
||||
def wantRE(self, re, message=None):
|
||||
t = self.peek()
|
||||
if t != EOF:
|
||||
m = re.match(t)
|
||||
if m and m.end() == len(t):
|
||||
return self.consume()
|
||||
if not message:
|
||||
message = "expected match for r'%s'" % re.pattern
|
||||
return ParseError(self, message)
|
||||
|
||||
def wantSymbol(self):
|
||||
return self.wantRE(_symbolRE, "expected symbol")
|
||||
|
||||
def wantInteger(self):
|
||||
return self.wantRE(_integerRE, "expected integer")
|
||||
|
||||
def wantFloat(self):
|
||||
return self.wantRE(_floatRE, "expected float")
|
||||
|
||||
def _context(self):
|
||||
n = min(5, len(self.tokens))
|
||||
return self.tokens[0:n]
|
||||
|
||||
def require(self, t):
|
||||
if t:
|
||||
return t
|
||||
if isinstance(t, ParseError):
|
||||
raise t
|
||||
else:
|
||||
raise ParseError(self, "unmet requirement")
|
||||
|
||||
|
|
@ -0,0 +1,253 @@
|
|||
"""\
|
||||
@file xml_rpc.py
|
||||
@brief An implementation of a parser/generator for the XML-RPC xml format.
|
||||
|
||||
Copyright (c) 2006-2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
|
||||
from greenlet import greenlet
|
||||
|
||||
from mulib import mu
|
||||
|
||||
from xml.sax import handler
|
||||
from xml.sax import parseString
|
||||
|
||||
|
||||
# States
|
||||
class Expected(object):
|
||||
def __init__(self, tag):
|
||||
self.tag = tag
|
||||
|
||||
def __getattr__(self, name):
|
||||
return type(self)(name)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%r)' % (
|
||||
type(self).__name__, self.tag)
|
||||
|
||||
|
||||
class START(Expected):
|
||||
pass
|
||||
|
||||
|
||||
class END(Expected):
|
||||
pass
|
||||
|
||||
|
||||
class STR(object):
|
||||
tag = ''
|
||||
|
||||
|
||||
START = START('')
|
||||
END = END('')
|
||||
|
||||
|
||||
class Malformed(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class XMLParser(handler.ContentHandler):
|
||||
def __init__(self, state_machine, next_states):
|
||||
handler.ContentHandler.__init__(self)
|
||||
self.state_machine = state_machine
|
||||
if not isinstance(next_states, tuple):
|
||||
next_states = (next_states, )
|
||||
self.next_states = next_states
|
||||
self._character_buffer = ''
|
||||
|
||||
def assertState(self, state, name, *rest):
|
||||
if not isinstance(self.next_states, tuple):
|
||||
self.next_states = (self.next_states, )
|
||||
for next in self.next_states:
|
||||
if type(state) == type(next):
|
||||
if next.tag and next.tag != name:
|
||||
raise Malformed(
|
||||
"Expected %s, got %s %s %s" % (
|
||||
next, state, name, rest))
|
||||
break
|
||||
else:
|
||||
raise Malformed(
|
||||
"Expected %s, got %s %s %s" % (
|
||||
self.next_states, state, name, rest))
|
||||
|
||||
def startElement(self, name, attrs):
|
||||
self.assertState(START, name.lower(), attrs)
|
||||
self.next_states = self.state_machine.switch(START, (name.lower(), dict(attrs)))
|
||||
|
||||
def endElement(self, name):
|
||||
if self._character_buffer.strip():
|
||||
characters = self._character_buffer.strip()
|
||||
self._character_buffer = ''
|
||||
self.assertState(STR, characters)
|
||||
self.next_states = self.state_machine.switch(characters)
|
||||
self.assertState(END, name.lower())
|
||||
self.next_states = self.state_machine.switch(END, name.lower())
|
||||
|
||||
def error(self, exc):
|
||||
self.bozo = 1
|
||||
self.exc = exc
|
||||
|
||||
def fatalError(self, exc):
|
||||
self.error(exc)
|
||||
raise exc
|
||||
|
||||
def characters(self, characters):
|
||||
self._character_buffer += characters
|
||||
|
||||
|
||||
def parse(what):
|
||||
child = greenlet(xml_rpc)
|
||||
me = greenlet.getcurrent()
|
||||
startup_states = child.switch(me)
|
||||
parser = XMLParser(child, startup_states)
|
||||
try:
|
||||
parseString(what, parser)
|
||||
except Malformed:
|
||||
print what
|
||||
raise
|
||||
return child.switch()
|
||||
|
||||
|
||||
def xml_rpc(yielder):
|
||||
yielder.switch(START.methodcall)
|
||||
yielder.switch(START.methodname)
|
||||
methodName = yielder.switch(STR)
|
||||
yielder.switch(END.methodname)
|
||||
|
||||
yielder.switch(START.params)
|
||||
|
||||
root = None
|
||||
params = []
|
||||
while True:
|
||||
state, _ = yielder.switch(START.param, END.params)
|
||||
if state == END:
|
||||
break
|
||||
|
||||
yielder.switch(START.value)
|
||||
|
||||
params.append(
|
||||
handle(yielder))
|
||||
|
||||
yielder.switch(END.value)
|
||||
yielder.switch(END.param)
|
||||
|
||||
yielder.switch(END.methodcall)
|
||||
## Resume parse
|
||||
yielder.switch()
|
||||
## Return result to parse
|
||||
return methodName.strip(), params
|
||||
|
||||
|
||||
def handle(yielder):
|
||||
_, (tag, attrs) = yielder.switch(START)
|
||||
if tag in ['int', 'i4']:
|
||||
result = int(yielder.switch(STR))
|
||||
elif tag == 'boolean':
|
||||
result = bool(int(yielder.switch(STR)))
|
||||
elif tag == 'string':
|
||||
result = yielder.switch(STR)
|
||||
elif tag == 'double':
|
||||
result = float(yielder.switch(STR))
|
||||
elif tag == 'datetime.iso8601':
|
||||
result = yielder.switch(STR)
|
||||
elif tag == 'base64':
|
||||
result = base64.b64decode(yielder.switch(STR))
|
||||
elif tag == 'struct':
|
||||
result = {}
|
||||
while True:
|
||||
state, _ = yielder.switch(START.member, END.struct)
|
||||
if state == END:
|
||||
break
|
||||
|
||||
yielder.switch(START.name)
|
||||
key = yielder.switch(STR)
|
||||
yielder.switch(END.name)
|
||||
|
||||
yielder.switch(START.value)
|
||||
result[key] = handle(yielder)
|
||||
yielder.switch(END.value)
|
||||
|
||||
yielder.switch(END.member)
|
||||
## We already handled </struct> above, don't want to handle it below
|
||||
return result
|
||||
elif tag == 'array':
|
||||
result = []
|
||||
yielder.switch(START.data)
|
||||
while True:
|
||||
state, _ = yielder.switch(START.value, END.data)
|
||||
if state == END:
|
||||
break
|
||||
|
||||
result.append(handle(yielder))
|
||||
|
||||
yielder.switch(END.value)
|
||||
|
||||
yielder.switch(getattr(END, tag))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
VALUE = mu.tagFactory('value')
|
||||
BOOLEAN = mu.tagFactory('boolean')
|
||||
INT = mu.tagFactory('int')
|
||||
STRUCT = mu.tagFactory('struct')
|
||||
MEMBER = mu.tagFactory('member')
|
||||
NAME = mu.tagFactory('name')
|
||||
ARRAY = mu.tagFactory('array')
|
||||
DATA = mu.tagFactory('data')
|
||||
STRING = mu.tagFactory('string')
|
||||
DOUBLE = mu.tagFactory('double')
|
||||
METHODRESPONSE = mu.tagFactory('methodResponse')
|
||||
PARAMS = mu.tagFactory('params')
|
||||
PARAM = mu.tagFactory('param')
|
||||
|
||||
mu.inline_elements['string'] = True
|
||||
mu.inline_elements['boolean'] = True
|
||||
mu.inline_elements['name'] = True
|
||||
|
||||
|
||||
def _generate(something):
|
||||
if isinstance(something, dict):
|
||||
result = STRUCT()
|
||||
for key, value in something.items():
|
||||
result[
|
||||
MEMBER[
|
||||
NAME[key], _generate(value)]]
|
||||
return VALUE[result]
|
||||
elif isinstance(something, list):
|
||||
result = DATA()
|
||||
for item in something:
|
||||
result[_generate(item)]
|
||||
return VALUE[ARRAY[[result]]]
|
||||
elif isinstance(something, basestring):
|
||||
return VALUE[STRING[something]]
|
||||
elif isinstance(something, bool):
|
||||
if something:
|
||||
return VALUE[BOOLEAN['1']]
|
||||
return VALUE[BOOLEAN['0']]
|
||||
elif isinstance(something, int):
|
||||
return VALUE[INT[something]]
|
||||
elif isinstance(something, float):
|
||||
return VALUE[DOUBLE[something]]
|
||||
|
||||
def generate(*args):
|
||||
params = PARAMS()
|
||||
for arg in args:
|
||||
params[PARAM[_generate(arg)]]
|
||||
return METHODRESPONSE[params]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print parse("""<?xml version="1.0"?> <methodCall> <methodName>examples.getStateName</methodName> <params> <param> <value><i4>41</i4></value> </param> </params> </methodCall>
|
||||
""")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
"""\
|
||||
@file __init__.py
|
||||
@brief Initialization file for the indra util module.
|
||||
|
||||
Copyright (c) 2006-2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
"""\
|
||||
@file helpformatter.py
|
||||
@author Phoenix
|
||||
@brief Class for formatting optparse descriptions.
|
||||
|
||||
Copyright (c) 2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
import optparse
|
||||
import textwrap
|
||||
|
||||
class Formatter(optparse.IndentedHelpFormatter):
|
||||
def __init__(
|
||||
self,
|
||||
p_indentIncrement = 2,
|
||||
p_maxHelpPosition = 24,
|
||||
p_width = 79,
|
||||
p_shortFirst = 1) :
|
||||
optparse.HelpFormatter.__init__(
|
||||
self,
|
||||
p_indentIncrement,
|
||||
p_maxHelpPosition,
|
||||
p_width,
|
||||
p_shortFirst)
|
||||
def format_description(self, p_description):
|
||||
t_descWidth = self.width - self.current_indent
|
||||
t_indent = " " * (self.current_indent + 2)
|
||||
return "\n".join(
|
||||
[textwrap.fill(descr, t_descWidth, initial_indent = t_indent,
|
||||
subsequent_indent = t_indent)
|
||||
for descr in p_description.split("\n")] )
|
||||
|
|
@ -0,0 +1,562 @@
|
|||
#!/usr/bin/python
|
||||
"""\
|
||||
@file llmanifest.py
|
||||
@author Ryan Williams
|
||||
@brief Library for specifying operations on a set of files.
|
||||
|
||||
Copyright (c) 2007, Linden Research, Inc.
|
||||
$License$
|
||||
"""
|
||||
|
||||
import commands
|
||||
import filecmp
|
||||
import fnmatch
|
||||
import getopt
|
||||
import glob
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tarfile
|
||||
|
||||
def path_ancestors(path):
|
||||
path = os.path.normpath(path)
|
||||
result = []
|
||||
while len(path) > 0:
|
||||
result.append(path)
|
||||
path, sub = os.path.split(path)
|
||||
return result
|
||||
|
||||
def proper_windows_path(path, current_platform = sys.platform):
|
||||
""" This function takes an absolute Windows or Cygwin path and
|
||||
returns a path appropriately formatted for the platform it's
|
||||
running on (as determined by sys.platform)"""
|
||||
path = path.strip()
|
||||
drive_letter = None
|
||||
rel = None
|
||||
match = re.match("/cygdrive/([a-z])/(.*)", path)
|
||||
if(not match):
|
||||
match = re.match('([a-zA-Z]):\\\(.*)', path)
|
||||
if(not match):
|
||||
return None # not an absolute path
|
||||
drive_letter = match.group(1)
|
||||
rel = match.group(2)
|
||||
if(current_platform == "cygwin"):
|
||||
return "/cygdrive/" + drive_letter.lower() + '/' + rel.replace('\\', '/')
|
||||
else:
|
||||
return drive_letter.upper() + ':\\' + rel.replace('/', '\\')
|
||||
|
||||
def get_default_platform(dummy):
|
||||
return {'linux2':'linux',
|
||||
'linux1':'linux',
|
||||
'cygwin':'windows',
|
||||
'win32':'windows',
|
||||
'darwin':'darwin'
|
||||
}[sys.platform]
|
||||
|
||||
def get_default_version(srctree):
|
||||
# look up llversion.h and parse out the version info
|
||||
paths = [os.path.join(srctree, x, 'llversion.h') for x in ['llcommon', '../llcommon', '../../indra/llcommon.h']]
|
||||
for p in paths:
|
||||
if os.path.exists(p):
|
||||
contents = open(p, 'r').read()
|
||||
major = re.search("LL_VERSION_MAJOR\s=\s([0-9]+)", contents).group(1)
|
||||
minor = re.search("LL_VERSION_MINOR\s=\s([0-9]+)", contents).group(1)
|
||||
patch = re.search("LL_VERSION_PATCH\s=\s([0-9]+)", contents).group(1)
|
||||
build = re.search("LL_VERSION_BUILD\s=\s([0-9]+)", contents).group(1)
|
||||
return major, minor, patch, build
|
||||
|
||||
DEFAULT_CHANNEL = 'Second Life Release'
|
||||
|
||||
ARGUMENTS=[
|
||||
dict(name='actions',
|
||||
description="""This argument specifies the actions that are to be taken when the
|
||||
script is run. The meaningful actions are currently:
|
||||
copy - copies the files specified by the manifest into the
|
||||
destination directory.
|
||||
package - bundles up the files in the destination directory into
|
||||
an installer for the current platform
|
||||
unpacked - bundles up the files in the destination directory into
|
||||
a simple tarball
|
||||
Example use: %(name)s --actions="copy unpacked" """,
|
||||
default="copy package"),
|
||||
dict(name='arch',
|
||||
description="""This argument is appended to the platform string for
|
||||
determining which manifest class to run.
|
||||
Example use: %(name)s --arch=i686
|
||||
On Linux this would try to use Linux_i686Manifest.""",
|
||||
default=""),
|
||||
dict(name='configuration',
|
||||
description="""The build configuration used. Only used on OS X for
|
||||
now, but it could be used for other platforms as well.""",
|
||||
default="Universal"),
|
||||
dict(name='grid',
|
||||
description="""Which grid the client will try to connect to. Even
|
||||
though it's not strictly a grid, 'firstlook' is also an acceptable
|
||||
value for this parameter.""",
|
||||
default=""),
|
||||
dict(name='channel',
|
||||
description="""The channel to use for updates.""",
|
||||
default=DEFAULT_CHANNEL),
|
||||
dict(name='installer_name',
|
||||
description=""" The name of the file that the installer should be
|
||||
packaged up into. Only used on Linux at the moment.""",
|
||||
default=None),
|
||||
dict(name='login_url',
|
||||
description="""The url that the login screen displays in the client.""",
|
||||
default=None),
|
||||
dict(name='platform',
|
||||
description="""The current platform, to be used for looking up which
|
||||
manifest class to run.""",
|
||||
default=get_default_platform),
|
||||
dict(name='version',
|
||||
description="""This specifies the version of Second Life that is
|
||||
being packaged up.""",
|
||||
default=get_default_version)
|
||||
]
|
||||
|
||||
def usage(srctree=""):
|
||||
nd = {'name':sys.argv[0]}
|
||||
print """Usage:
|
||||
%(name)s [options] [destdir]
|
||||
Options:
|
||||
""" % nd
|
||||
for arg in ARGUMENTS:
|
||||
default = arg['default']
|
||||
if hasattr(default, '__call__'):
|
||||
default = "(computed value) \"" + str(default(srctree)) + '"'
|
||||
elif default is not None:
|
||||
default = '"' + default + '"'
|
||||
print "\t--%s Default: %s\n\t%s\n" % (
|
||||
arg['name'],
|
||||
default,
|
||||
arg['description'] % nd)
|
||||
|
||||
def main(argv=None, srctree='.', dsttree='./dst'):
|
||||
if(argv == None):
|
||||
argv = sys.argv
|
||||
|
||||
option_names = [arg['name'] + '=' for arg in ARGUMENTS]
|
||||
option_names.append('help')
|
||||
options, remainder = getopt.getopt(argv[1:], "", option_names)
|
||||
if len(remainder) >= 1:
|
||||
dsttree = remainder[0]
|
||||
|
||||
print "Source tree:", srctree
|
||||
print "Destination tree:", dsttree
|
||||
|
||||
# convert options to a hash
|
||||
args = {}
|
||||
for opt in options:
|
||||
args[opt[0].replace("--", "")] = opt[1]
|
||||
|
||||
# early out for help
|
||||
if args.has_key('help'):
|
||||
# *TODO: it is a huge hack to pass around the srctree like this
|
||||
usage(srctree)
|
||||
return
|
||||
|
||||
# defaults
|
||||
for arg in ARGUMENTS:
|
||||
if not args.has_key(arg['name']):
|
||||
default = arg['default']
|
||||
if hasattr(default, '__call__'):
|
||||
default = default(srctree)
|
||||
if default is not None:
|
||||
args[arg['name']] = default
|
||||
|
||||
# fix up version
|
||||
if args.has_key('version') and type(args['version']) == str:
|
||||
args['version'] = args['version'].split('.')
|
||||
|
||||
# default and agni are default
|
||||
if args['grid'] in ['default', 'agni']:
|
||||
args['grid'] = ''
|
||||
|
||||
if args.has_key('actions'):
|
||||
args['actions'] = args['actions'].split()
|
||||
|
||||
# debugging
|
||||
for opt in args:
|
||||
print "Option:", opt, "=", args[opt]
|
||||
|
||||
wm = LLManifest.for_platform(args['platform'], args.get('arch'))(srctree, dsttree, args)
|
||||
wm.do(*args['actions'])
|
||||
return 0
|
||||
|
||||
class LLManifestRegistry(type):
|
||||
def __init__(cls, name, bases, dct):
|
||||
super(LLManifestRegistry, cls).__init__(name, bases, dct)
|
||||
match = re.match("(\w+)Manifest", name)
|
||||
if(match):
|
||||
cls.manifests[match.group(1).lower()] = cls
|
||||
|
||||
class LLManifest(object):
|
||||
__metaclass__ = LLManifestRegistry
|
||||
manifests = {}
|
||||
def for_platform(self, platform, arch = None):
|
||||
if arch:
|
||||
platform = platform + '_' + arch
|
||||
return self.manifests[platform.lower()]
|
||||
for_platform = classmethod(for_platform)
|
||||
|
||||
def __init__(self, srctree, dsttree, args):
|
||||
super(LLManifest, self).__init__()
|
||||
self.args = args
|
||||
self.file_list = []
|
||||
self.excludes = []
|
||||
self.actions = []
|
||||
self.src_prefix = [srctree]
|
||||
self.dst_prefix = [dsttree]
|
||||
self.created_paths = []
|
||||
|
||||
def default_grid(self):
|
||||
return self.args.get('grid', None) == ''
|
||||
def default_channel(self):
|
||||
return self.args.get('channel', None) == DEFAULT_CHANNEL
|
||||
|
||||
def construct(self):
|
||||
""" Meant to be overriden by LLManifest implementors with code that
|
||||
constructs the complete destination hierarchy."""
|
||||
pass # override this method
|
||||
|
||||
def exclude(self, glob):
|
||||
""" Excludes all files that match the glob from being included
|
||||
in the file list by path()."""
|
||||
self.excludes.append(glob)
|
||||
|
||||
def prefix(self, src='', dst=None):
|
||||
""" Pushes a prefix onto the stack. Until end_prefix is
|
||||
called, all relevant method calls (esp. to path()) will prefix
|
||||
paths with the entire prefix stack. Source and destination
|
||||
prefixes can be different, though if only one is provided they
|
||||
are both equal. To specify a no-op, use an empty string, not
|
||||
None."""
|
||||
if(dst == None):
|
||||
dst = src
|
||||
self.src_prefix.append(src)
|
||||
self.dst_prefix.append(dst)
|
||||
return True # so that you can wrap it in an if to get indentation
|
||||
|
||||
def end_prefix(self, descr=None):
|
||||
"""Pops a prefix off the stack. If given an argument, checks
|
||||
the argument against the top of the stack. If the argument
|
||||
matches neither the source or destination prefixes at the top
|
||||
of the stack, then misnesting must have occurred and an
|
||||
exception is raised."""
|
||||
# as an error-prevention mechanism, check the prefix and see if it matches the source or destination prefix. If not, improper nesting may have occurred.
|
||||
src = self.src_prefix.pop()
|
||||
dst = self.dst_prefix.pop()
|
||||
if descr and not(src == descr or dst == descr):
|
||||
raise ValueError, "End prefix '" + descr + "' didn't match '" +src+ "' or '" +dst + "'"
|
||||
|
||||
def get_src_prefix(self):
|
||||
""" Returns the current source prefix."""
|
||||
return os.path.join(*self.src_prefix)
|
||||
|
||||
def get_dst_prefix(self):
|
||||
""" Returns the current destination prefix."""
|
||||
return os.path.join(*self.dst_prefix)
|
||||
|
||||
def src_path_of(self, relpath):
|
||||
"""Returns the full path to a file or directory specified
|
||||
relative to the source directory."""
|
||||
return os.path.join(self.get_src_prefix(), relpath)
|
||||
|
||||
def dst_path_of(self, relpath):
|
||||
"""Returns the full path to a file or directory specified
|
||||
relative to the destination directory."""
|
||||
return os.path.join(self.get_dst_prefix(), relpath)
|
||||
|
||||
def ensure_src_dir(self, reldir):
|
||||
"""Construct the path for a directory relative to the
|
||||
source path, and ensures that it exists. Returns the
|
||||
full path."""
|
||||
path = os.path.join(self.get_src_prefix(), reldir)
|
||||
self.cmakedirs(path)
|
||||
return path
|
||||
|
||||
def ensure_dst_dir(self, reldir):
|
||||
"""Construct the path for a directory relative to the
|
||||
destination path, and ensures that it exists. Returns the
|
||||
full path."""
|
||||
path = os.path.join(self.get_dst_prefix(), reldir)
|
||||
self.cmakedirs(path)
|
||||
return path
|
||||
|
||||
def run_command(self, command):
|
||||
""" Runs an external command, and returns the output. Raises
|
||||
an exception if the command reurns a nonzero status code. For
|
||||
debugging/informational purpoases, prints out the command's
|
||||
output as it is received."""
|
||||
print "Running command:", command
|
||||
fd = os.popen(command, 'r')
|
||||
lines = []
|
||||
while True:
|
||||
lines.append(fd.readline())
|
||||
if(lines[-1] == ''):
|
||||
break
|
||||
else:
|
||||
print lines[-1],
|
||||
output = ''.join(lines)
|
||||
status = fd.close()
|
||||
if(status):
|
||||
raise RuntimeError, "Command " + command + " returned non-zero status (" + str(status) + ")"
|
||||
return output
|
||||
|
||||
def created_path(self, path):
|
||||
""" Declare that you've created a path in order to
|
||||
a) verify that you really have created it
|
||||
b) schedule it for cleanup"""
|
||||
if not os.path.exists(path):
|
||||
raise RuntimeError, "Should be something at path " + path
|
||||
self.created_paths.append(path)
|
||||
|
||||
def put_in_file(self, contents, dst):
|
||||
# write contents as dst
|
||||
f = open(self.dst_path_of(dst), "wb")
|
||||
f.write(contents)
|
||||
f.close()
|
||||
|
||||
def replace_in(self, src, dst=None, searchdict={}):
|
||||
if(dst == None):
|
||||
dst = src
|
||||
# read src
|
||||
f = open(self.src_path_of(src), "rbU")
|
||||
contents = f.read()
|
||||
f.close()
|
||||
# apply dict replacements
|
||||
for old, new in searchdict.iteritems():
|
||||
contents = contents.replace(old, new)
|
||||
self.put_in_file(contents, dst)
|
||||
self.created_paths.append(dst)
|
||||
|
||||
def copy_action(self, src, dst):
|
||||
if(src and (os.path.exists(src) or os.path.islink(src))):
|
||||
# ensure that destination path exists
|
||||
self.cmakedirs(os.path.dirname(dst))
|
||||
self.created_paths.append(dst)
|
||||
if(not os.path.isdir(src)):
|
||||
self.ccopy(src,dst)
|
||||
else:
|
||||
# src is a dir
|
||||
self.ccopytree(src,dst)
|
||||
else:
|
||||
print "Doesn't exist:", src
|
||||
|
||||
def package_action(self, src, dst):
|
||||
pass
|
||||
|
||||
def copy_finish(self):
|
||||
pass
|
||||
|
||||
def package_finish(self):
|
||||
pass
|
||||
|
||||
def unpacked_finish(self):
|
||||
unpacked_file_name = "unpacked_%(plat)s_%(vers)s.tar" % {
|
||||
'plat':self.args['platform'],
|
||||
'vers':'_'.join(self.args['version'])}
|
||||
print "Creating unpacked file:", unpacked_file_name
|
||||
# could add a gz here but that doubles the time it takes to do this step
|
||||
tf = tarfile.open(self.src_path_of(unpacked_file_name), 'w:')
|
||||
# add the entire installation package, at the very top level
|
||||
tf.add(self.get_dst_prefix(), "")
|
||||
tf.close()
|
||||
|
||||
def cleanup_finish(self):
|
||||
""" Delete paths that were specified to have been created by this script"""
|
||||
for c in self.created_paths:
|
||||
# *TODO is this gonna be useful?
|
||||
print "Cleaning up " + c
|
||||
|
||||
def process_file(self, src, dst):
|
||||
if(self.includes(src, dst)):
|
||||
# print src, "=>", dst
|
||||
for action in self.actions:
|
||||
methodname = action + "_action"
|
||||
method = getattr(self, methodname, None)
|
||||
if method is not None:
|
||||
method(src, dst)
|
||||
self.file_list.append([src, dst])
|
||||
else:
|
||||
print "Excluding: ", src, dst
|
||||
|
||||
|
||||
def process_directory(self, src, dst):
|
||||
if(not self.includes(src, dst)):
|
||||
print "Excluding: ", src, dst
|
||||
return
|
||||
names = os.listdir(src)
|
||||
self.cmakedirs(dst)
|
||||
errors = []
|
||||
for name in names:
|
||||
srcname = os.path.join(src, name)
|
||||
dstname = os.path.join(dst, name)
|
||||
if os.path.isdir(srcname):
|
||||
self.process_directory(srcname, dstname)
|
||||
else:
|
||||
self.process_file(srcname, dstname)
|
||||
|
||||
|
||||
|
||||
def includes(self, src, dst):
|
||||
if src:
|
||||
for excl in self.excludes:
|
||||
if fnmatch.fnmatch(src, excl):
|
||||
return False
|
||||
return True
|
||||
|
||||
def remove(self, *paths):
|
||||
for path in paths:
|
||||
if(os.path.exists(path)):
|
||||
print "Removing path", path
|
||||
if(os.path.isdir(path)):
|
||||
shutil.rmtree(path)
|
||||
else:
|
||||
os.remove(path)
|
||||
|
||||
def ccopy(self, src, dst):
|
||||
""" Copy a single file or symlink. Uses filecmp to skip copying for existing files."""
|
||||
if os.path.islink(src):
|
||||
linkto = os.readlink(src)
|
||||
if(os.path.islink(dst) or os.path.exists(dst)):
|
||||
os.remove(dst) # because symlinking over an existing link fails
|
||||
os.symlink(linkto, dst)
|
||||
else:
|
||||
# Don't recopy file if it's up-to-date.
|
||||
# If we seem to be not not overwriting files that have been
|
||||
# updated, set the last arg to False, but it will take longer.
|
||||
if(os.path.exists(dst) and filecmp.cmp(src, dst, True)):
|
||||
return
|
||||
# only copy if it's not excluded
|
||||
if(self.includes(src, dst)):
|
||||
shutil.copy2(src, dst)
|
||||
|
||||
def ccopytree(self, src, dst):
|
||||
"""Direct copy of shutil.copytree with the additional
|
||||
feature that the destination directory can exist. It
|
||||
is so dumb that Python doesn't come with this. Also it
|
||||
implements the excludes functionality."""
|
||||
if(not self.includes(src, dst)):
|
||||
return
|
||||
names = os.listdir(src)
|
||||
self.cmakedirs(dst)
|
||||
errors = []
|
||||
for name in names:
|
||||
srcname = os.path.join(src, name)
|
||||
dstname = os.path.join(dst, name)
|
||||
try:
|
||||
if os.path.isdir(srcname):
|
||||
self.ccopytree(srcname, dstname)
|
||||
else:
|
||||
self.ccopy(srcname, dstname)
|
||||
# XXX What about devices, sockets etc.?
|
||||
except (IOError, os.error), why:
|
||||
errors.append((srcname, dstname, why))
|
||||
if errors:
|
||||
raise RuntimeError, errors
|
||||
|
||||
|
||||
def cmakedirs(self, path):
|
||||
"""Ensures that a directory exists, and doesn't throw an exception
|
||||
if you call it on an existing directory."""
|
||||
# print "making path: ", path
|
||||
path = os.path.normpath(path)
|
||||
self.created_paths.append(path)
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
def find_existing_file(self, *list):
|
||||
for f in list:
|
||||
if(os.path.exists(f)):
|
||||
return f
|
||||
# didn't find it, return last item in list
|
||||
if len(list) > 0:
|
||||
return list[-1]
|
||||
else:
|
||||
return None
|
||||
|
||||
def contents_of_tar(self, src_tar, dst_dir):
|
||||
""" Extracts the contents of the tarfile (specified
|
||||
relative to the source prefix) into the directory
|
||||
specified relative to the destination directory."""
|
||||
self.check_file_exists(src_tar)
|
||||
tf = tarfile.open(self.src_path_of(src_tar), 'r')
|
||||
for member in tf.getmembers():
|
||||
tf.extract(member, self.ensure_dst_dir(dst_dir))
|
||||
# TODO get actions working on these dudes, perhaps we should extract to a temporary directory and then process_directory on it?
|
||||
self.file_list.append([src_tar,
|
||||
self.dst_path_of(os.path.join(dst_dir,member.name))])
|
||||
tf.close()
|
||||
|
||||
|
||||
def wildcard_regex(self, src_glob, dst_glob):
|
||||
# print "regex_pair:", src_glob, dst_glob
|
||||
src_re = re.escape(src_glob)
|
||||
src_re = src_re.replace('\*', '([-a-zA-Z0-9._ ]+)')
|
||||
dst_temp = dst_glob
|
||||
i = 1
|
||||
while(dst_temp.count("*") > 0):
|
||||
dst_temp = dst_temp.replace('*', '\g<' + str(i) + '>', 1)
|
||||
i = i+1
|
||||
# print "regex_result:", src_re, dst_temp
|
||||
return re.compile(src_re), dst_temp
|
||||
|
||||
def check_file_exists(self, path):
|
||||
if(not os.path.exists(path) and not os.path.islink(path)):
|
||||
raise RuntimeError("Path %s doesn't exist" % (
|
||||
os.path.normpath(os.path.join(os.getcwd(), path)),))
|
||||
|
||||
|
||||
wildcard_pattern = re.compile('\*')
|
||||
def expand_globs(self, src, dst):
|
||||
def fw_slash(str):
|
||||
return str.replace('\\', '/')
|
||||
def os_slash(str):
|
||||
return str.replace('/', os.path.sep)
|
||||
dst = fw_slash(dst)
|
||||
src = fw_slash(src)
|
||||
src_list = glob.glob(src)
|
||||
src_re, d_template = self.wildcard_regex(src, dst)
|
||||
for s in src_list:
|
||||
s = fw_slash(s)
|
||||
d = src_re.sub(d_template, s)
|
||||
#print "s:",s, "d_t", d_template, "dst", dst, "d", d
|
||||
yield os_slash(s), os_slash(d)
|
||||
|
||||
def path(self, src, dst=None):
|
||||
print "Processing", src, "=>", dst
|
||||
if src == None:
|
||||
raise RuntimeError("No source file, dst is " + dst)
|
||||
if dst == None:
|
||||
dst = src
|
||||
dst = os.path.join(self.get_dst_prefix(), dst)
|
||||
src = os.path.join(self.get_src_prefix(), src)
|
||||
|
||||
# expand globs
|
||||
if(self.wildcard_pattern.search(src)):
|
||||
for s,d in self.expand_globs(src, dst):
|
||||
self.process_file(s, d)
|
||||
else:
|
||||
# if we're specifying a single path (not a glob),
|
||||
# we should error out if it doesn't exist
|
||||
self.check_file_exists(src)
|
||||
# if it's a directory, recurse through it
|
||||
if(os.path.isdir(src)):
|
||||
self.process_directory(src, dst)
|
||||
else:
|
||||
self.process_file(src, dst)
|
||||
|
||||
|
||||
def do(self, *actions):
|
||||
self.actions = actions
|
||||
self.construct()
|
||||
# perform finish actions
|
||||
for action in self.actions:
|
||||
methodname = action + "_finish"
|
||||
method = getattr(self, methodname, None)
|
||||
if method is not None:
|
||||
method()
|
||||
return self.file_list
|
||||
|
|
@ -12,7 +12,7 @@ import re
|
|||
import tarfile
|
||||
viewer_dir = os.path.dirname(__file__)
|
||||
# add llmanifest library to our path so we don't have to muck with PYTHONPATH
|
||||
sys.path.append(os.path.join(viewer_dir, '../lib/python/indra'))
|
||||
sys.path.append(os.path.join(viewer_dir, '../lib/python/indra/util'))
|
||||
from llmanifest import LLManifest, main, proper_windows_path, path_ancestors
|
||||
|
||||
class ViewerManifest(LLManifest):
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
# Copyright (c) 2006-$CurrentYear$, Linden Research, Inc.
|
||||
# $License$
|
||||
|
||||
from indra.util import llmanifest
|
||||
from indra import llmanifest
|
||||
import os.path
|
||||
import os
|
||||
import unittest
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ import os
|
|||
import sys
|
||||
import urllib
|
||||
|
||||
from indra import compatibility
|
||||
from indra import llmessage
|
||||
from indra.ipc import compatibility
|
||||
from indra.ipc import llmessage
|
||||
|
||||
def die(msg):
|
||||
print >>sys.stderr, msg
|
||||
|
|
|
|||
Loading…
Reference in New Issue