diff options
author | imarom <imarom@cisco.com> | 2015-08-30 11:41:42 +0300 |
---|---|---|
committer | imarom <imarom@cisco.com> | 2015-08-30 11:41:42 +0300 |
commit | c9381643e7bf9b3dc690bf3e012ad6176ee32b8c (patch) | |
tree | ff0e91ee5c38f2caaeaa53340ecf2db2a326455a /scripts | |
parent | 05a529031e962d61ab977393fb3d153931feff34 (diff) | |
parent | 53f0e28d7f30c7175cbb15884c309613593859d8 (diff) |
Merge branch 'master' into rpc
Conflicts:
linux/ws_main.py
linux_dpdk/ws_main.py
Diffstat (limited to 'scripts')
278 files changed, 24329 insertions, 6243 deletions
diff --git a/scripts/automation/trex_control_plane/client/outer_packages.py b/scripts/automation/trex_control_plane/client/outer_packages.py index a7c34e48..5facad20 100755 --- a/scripts/automation/trex_control_plane/client/outer_packages.py +++ b/scripts/automation/trex_control_plane/client/outer_packages.py @@ -1,29 +1,30 @@ #!/router/bin/python
-import sys,site
-import platform,os
+import sys
+import site
+import os
-CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
+CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory
-PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, 'python_lib'))
-
+PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs'))
CLIENT_MODULES = ['enum34-1.0.4',
- # 'jsonrpclib-0.1.3',
- 'jsonrpclib-pelix-0.2.5',
- 'termstyle',
- 'rpc_exceptions-0.1'
- ]
+ 'jsonrpclib-pelix-0.2.5',
+ 'termstyle',
+ 'rpc_exceptions-0.1'
+ ]
+
-def import_client_modules ():
+def import_client_modules():
sys.path.append(ROOT_PATH)
import_module_list(CLIENT_MODULES)
-def import_module_list (modules_list):
+
+def import_module_list(modules_list):
assert(isinstance(modules_list, list))
for p in modules_list:
- full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
- fix_path = os.path.normcase(full_path) #CURRENT_PATH+p)
+ full_path = os.path.join(PATH_TO_PYTHON_LIB, p)
+ fix_path = os.path.normcase(full_path) # (CURRENT_PATH+p)
site.addsitedir(full_path)
import_client_modules()
diff --git a/scripts/automation/trex_control_plane/client/trex_client.py b/scripts/automation/trex_control_plane/client/trex_client.py index 1f297538..0fbb4719 100755 --- a/scripts/automation/trex_control_plane/client/trex_client.py +++ b/scripts/automation/trex_control_plane/client/trex_client.py @@ -35,7 +35,7 @@ class CTRexClient(object): def __init__(self, trex_host, max_history_size = 100, trex_daemon_port = 8090, trex_zmq_port = 4500, verbose = False): """ - Instatiate a T-Rex client object, and connecting it to listening deamon-server + Instantiate a T-Rex client object, and connecting it to listening daemon-server :parameters: trex_host : str @@ -45,15 +45,15 @@ class CTRexClient(object): default value : **100** trex_daemon_port : int - the port number on which the trex-deamon server can be reached + the port number on which the trex-daemon server can be reached default value: **8090** trex_zmq_port : int - the port number on which trex's zmq module will interact with deamon server + the port number on which trex's zmq module will interact with daemon server default value: **4500** verbose : bool - sets a verbose output on suported class method. + sets a verbose output on supported class method. default value : **False** @@ -153,7 +153,7 @@ class CTRexClient(object): """ Request to stop a T-Rex run on server. - The request is only valid if the stop intitiator is the same client as the T-Rex run intitiator. + The request is only valid if the stop initiator is the same client as the T-Rex run initiator. :parameters: None @@ -223,7 +223,7 @@ class CTRexClient(object): """ Block the client application until T-Rex changes state from 'Starting' to either 'Idle' or 'Running' - The request is only valid if the stop intitiator is the same client as the T-Rex run intitiator. + The request is only valid if the stop initiator is the same client as the T-Rex run initiator. :parameters: timeout : int diff --git a/scripts/automation/trex_control_plane/client_utils/general_utils.py b/scripts/automation/trex_control_plane/client_utils/general_utils.py index 5544eabc..b5912628 100755 --- a/scripts/automation/trex_control_plane/client_utils/general_utils.py +++ b/scripts/automation/trex_control_plane/client_utils/general_utils.py @@ -1,6 +1,9 @@ #!/router/bin/python -import sys,site +import sys +import site +import string +import random import os try: @@ -50,7 +53,27 @@ def find_path_to_pardir (pardir, base_path = os.getcwd() ): """ components = base_path.split(os.sep) return str.join(os.sep, components[:components.index(pardir)+1]) - + + +def random_id_gen(length=8): + """ + A generator for creating a random chars id of specific length + + :parameters: + length : int + the desired length of the generated id + + default: 8 + + :return: + a random id with each next() request. + """ + id_chars = string.ascii_lowercase + string.digits + while True: + return_id = '' + for i in range(length): + return_id += random.choice(id_chars) + yield return_id if __name__ == "__main__": diff --git a/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py b/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py new file mode 100644 index 00000000..1631c494 --- /dev/null +++ b/scripts/automation/trex_control_plane/client_utils/jsonrpc_client.py @@ -0,0 +1,186 @@ +#!/router/bin/python + +import outer_packages +import zmq +import json +import general_utils +from time import sleep + +class JsonRpcClient(object): + + def __init__ (self, default_server, default_port): + self.verbose = False + self.connected = False + + # default values + self.port = default_port + self.server = default_server + self.id_gen = general_utils.random_id_gen() + + def get_connection_details (self): + rc = {} + rc['server'] = self.server + rc['port'] = self.port + + return rc + + def pretty_json (self, json_str): + return json.dumps(json.loads(json_str), indent = 4, separators=(',', ': '), sort_keys = True) + + def verbose_msg (self, msg): + if not self.verbose: + return + + print "[verbose] " + msg + + + def create_jsonrpc_v2 (self, method_name, params = {}, id = None): + msg = {} + msg["jsonrpc"] = "2.0" + msg["method"] = method_name + + msg["params"] = params + + msg["id"] = id + + return json.dumps(msg) + + def invoke_rpc_method (self, method_name, params = {}, block = False): + rc, msg = self._invoke_rpc_method(method_name, params, block) + if not rc: + self.disconnect() + + return rc, msg + + def _invoke_rpc_method (self, method_name, params = {}, block = False): + if not self.connected: + return False, "Not connected to server" + + id = self.id_gen.next() + msg = self.create_jsonrpc_v2(method_name, params, id = id) + + self.verbose_msg("Sending Request To Server:\n\n" + self.pretty_json(msg) + "\n") + + if block: + self.socket.send(msg) + else: + try: + self.socket.send(msg, flags = zmq.NOBLOCK) + except zmq.error.ZMQError as e: + return False, "Failed To Get Send Message" + + got_response = False + + if block: + response = self.socket.recv() + got_response = True + else: + for i in xrange(0 ,10): + try: + response = self.socket.recv(flags = zmq.NOBLOCK) + got_response = True + break + except zmq.Again: + sleep(0.2) + + if not got_response: + return False, "Failed To Get Server Response" + + self.verbose_msg("Server Response:\n\n" + self.pretty_json(response) + "\n") + + # decode + response_json = json.loads(response) + + if (response_json.get("jsonrpc") != "2.0"): + return False, "Malfromed Response ({0})".format(str(response)) + + if (response_json.get("id") != id): + return False, "Server Replied With Bad ID ({0})".format(str(response)) + + # error reported by server + if ("error" in response_json): + return True, response_json["error"]["message"] + + # if no error there should be a result + if ("result" not in response_json): + return False, "Malfromed Response ({0})".format(str(response)) + + return True, response_json["result"] + + + def ping_rpc_server(self): + + return self.invoke_rpc_method("ping", block = False) + + def get_rpc_server_status (self): + return self.invoke_rpc_method("get_status") + + def query_rpc_server(self): + return self.invoke_rpc_method("get_reg_cmds") + + + def set_verbose(self, mode): + self.verbose = mode + + def disconnect (self): + if self.connected: + self.socket.close(linger = 0) + self.context.destroy(linger = 0) + self.connected = False + return True, "" + else: + return False, "Not connected to server" + + def connect(self, server = None, port = None): + if self.connected: + self.disconnect() + + self.context = zmq.Context() + + self.server = (server if server else self.server) + self.port = (port if port else self.port) + + # Socket to talk to server + self.transport = "tcp://{0}:{1}".format(self.server, self.port) + + print "\nConnecting To RPC Server On {0}".format(self.transport) + + self.socket = self.context.socket(zmq.REQ) + try: + self.socket.connect(self.transport) + except zmq.error.ZMQError as e: + return False, "ZMQ Error: Bad server or port name: " + str(e) + + + self.connected = True + + # ping the server + rc, err = self.ping_rpc_server() + if not rc: + self.disconnect() + return rc, err + + return True, "" + + + def reconnect(self): + # connect using current values + return self.connect() + + if not self.connected: + return False, "Not connected to server" + + # reconnect + return self.connect(self.server, self.port) + + + def is_connected(self): + return self.connected + + + def __del__(self): + print "Shutting down RPC client\n" + self.context.destroy(linger=0) + +if __name__ == "__main__": + pass diff --git a/scripts/automation/trex_control_plane/client_utils/outer_packages.py b/scripts/automation/trex_control_plane/client_utils/outer_packages.py new file mode 100644 index 00000000..c489fd3d --- /dev/null +++ b/scripts/automation/trex_control_plane/client_utils/outer_packages.py @@ -0,0 +1,29 @@ +#!/router/bin/python + +import sys +import site +import os + +CURRENT_PATH = os.path.dirname(os.path.realpath(__file__)) +ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory +PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs')) + +CLIENT_UTILS_MODULES = ['zmq'] + + +def import_client_utils_modules(): + # must be in a higher priority + sys.path.insert(0, PATH_TO_PYTHON_LIB) + sys.path.append(ROOT_PATH) + import_module_list(CLIENT_UTILS_MODULES) + + +def import_module_list(modules_list): + assert(isinstance(modules_list, list)) + for p in modules_list: + full_path = os.path.join(PATH_TO_PYTHON_LIB, p) + fix_path = os.path.normcase(full_path) + site.addsitedir(full_path) + +import_client_utils_modules() + diff --git a/scripts/automation/trex_control_plane/console/trex_console.py b/scripts/automation/trex_control_plane/console/trex_console.py new file mode 100644 index 00000000..6514a51c --- /dev/null +++ b/scripts/automation/trex_control_plane/console/trex_console.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import cmd +import json +import ast +import argparse +import sys +import trex_root_path +from client_utils.jsonrpc_client import JsonRpcClient +import trex_status + +class TrexConsole(cmd.Cmd): + """Trex Console""" + + def __init__(self, rpc_client): + cmd.Cmd.__init__(self) + + self.rpc_client = rpc_client + + self.do_connect("") + + self.intro = "\n-=TRex Console V1.0=-\n" + self.intro += "\nType 'help' or '?' for supported actions\n" + + self.verbose = False + + self.postcmd(False, "") + + + # a cool hack - i stole this function and added space + def completenames(self, text, *ignored): + dotext = 'do_'+text + return [a[3:]+' ' for a in self.get_names() if a.startswith(dotext)] + + # set verbose on / off + def do_verbose (self, line): + '''shows or set verbose mode\n''' + if line == "": + print "\nverbose is " + ("on\n" if self.verbose else "off\n") + + elif line == "on": + self.verbose = True + self.rpc_client.set_verbose(True) + print "\nverbose set to on\n" + + elif line == "off": + self.verbose = False + self.rpc_client.set_verbose(False) + print "\nverbose set to off\n" + + else: + print "\nplease specify 'on' or 'off'\n" + + # query the server for registered commands + def do_query_server(self, line): + '''query the RPC server for supported remote commands\n''' + + rc, msg = self.rpc_client.query_rpc_server() + if not rc: + print "\n*** " + msg + "\n" + return + + print "\nRPC server supports the following commands: \n\n" + for func in msg: + if func: + print func + print "\n" + + def do_ping (self, line): + '''Pings the RPC server\n''' + + print "\n-> Pinging RPC server" + + rc, msg = self.rpc_client.ping_rpc_server() + if rc: + print "[SUCCESS]\n" + else: + print "\n*** " + msg + "\n" + return + + def do_connect (self, line): + '''Connects to the server\n''' + + if line == "": + rc, msg = self.rpc_client.connect() + else: + sp = line.split() + if (len(sp) != 2): + print "\n[usage] connect [server] [port] or without parameters\n" + return + + rc, msg = self.rpc_client.connect(sp[0], sp[1]) + + if rc: + print "[SUCCESS]\n" + else: + print "\n*** " + msg + "\n" + return + + rc, msg = self.rpc_client.query_rpc_server() + + if rc: + self.supported_rpc = [str(x) for x in msg if x] + + def do_rpc (self, line): + '''Launches a RPC on the server\n''' + + if line == "": + print "\nUsage: [method name] [param dict as string]\n" + print "Example: rpc test_add {'x': 12, 'y': 17}\n" + return + + sp = line.split(' ', 1) + method = sp[0] + + params = None + bad_parse = False + if len(sp) > 1: + + try: + params = ast.literal_eval(sp[1]) + if not isinstance(params, dict): + bad_parse = True + + except ValueError as e1: + bad_parse = True + except SyntaxError as e2: + bad_parse = True + + if bad_parse: + print "\nValue should be a valid dict: '{0}'".format(sp[1]) + print "\nUsage: [method name] [param dict as string]\n" + print "Example: rpc test_add {'x': 12, 'y': 17}\n" + return + + rc, msg = self.rpc_client.invoke_rpc_method(method, params) + if rc: + print "\nServer Response:\n\n" + json.dumps(msg) + "\n" + else: + print "\n*** " + msg + "\n" + #print "Please try 'reconnect' to reconnect to server" + + + def complete_rpc (self, text, line, begidx, endidx): + return [x for x in self.supported_rpc if x.startswith(text)] + + def do_status (self, line): + '''Shows a graphical console\n''' + + self.do_verbose('off') + trex_status.show_trex_status(self.rpc_client) + + def do_quit(self, line): + '''exit the client\n''' + return True + + def do_disconnect (self, line): + '''Disconnect from the server\n''' + if not self.rpc_client.is_connected(): + print "Not connected to server\n" + return + + rc, msg = self.rpc_client.disconnect() + if rc: + print "[SUCCESS]\n" + else: + print msg + "\n" + + def postcmd(self, stop, line): + if self.rpc_client.is_connected(): + self.prompt = "TRex > " + else: + self.supported_rpc = None + self.prompt = "TRex (offline) > " + + return stop + + def default(self, line): + print "'{0}' is an unrecognized command. type 'help' or '?' for a list\n".format(line) + + def do_help (self, line): + '''Shows This Help Screen\n''' + if line: + try: + func = getattr(self, 'help_' + line) + except AttributeError: + try: + doc = getattr(self, 'do_' + line).__doc__ + if doc: + self.stdout.write("%s\n"%str(doc)) + return + except AttributeError: + pass + self.stdout.write("%s\n"%str(self.nohelp % (line,))) + return + func() + return + + print "\nSupported Console Commands:" + print "----------------------------\n" + + cmds = [x[3:] for x in self.get_names() if x.startswith("do_")] + for cmd in cmds: + if cmd == "EOF": + continue + + try: + doc = getattr(self, 'do_' + cmd).__doc__ + if doc: + help = str(doc) + else: + help = "*** Undocumented Function ***\n" + except AttributeError: + help = "*** Undocumented Function ***\n" + + print "{:<30} {:<30}".format(cmd + " - ", help) + + + # aliasing + do_exit = do_EOF = do_q = do_quit + +def setParserOptions (): + parser = argparse.ArgumentParser(prog="trex_console.py") + + parser.add_argument("-s", "--server", help = "T-Rex Server [default is localhost]", + default = "localhost", + type = str) + + parser.add_argument("-p", "--port", help = "T-Rex Server Port [default is 5050]\n", + default = 5050, + type = int) + + return parser + +def main (): + parser = setParserOptions() + options = parser.parse_args(sys.argv[1:]) + + # RPC client + rpc_client = JsonRpcClient(options.server, options.port) + + # console + try: + console = TrexConsole(rpc_client) + console.cmdloop() + except KeyboardInterrupt as e: + print "\n\n*** Caught Ctrl + C... Exiting...\n\n" + return + +if __name__ == '__main__': + main() + diff --git a/scripts/automation/trex_control_plane/console/trex_root_path.py b/scripts/automation/trex_control_plane/console/trex_root_path.py new file mode 100644 index 00000000..de4ec03b --- /dev/null +++ b/scripts/automation/trex_control_plane/console/trex_root_path.py @@ -0,0 +1,15 @@ +#!/router/bin/python + +import os +import sys + +def add_root_to_path (): + """adds trex_control_plane root dir to script path, up to `depth` parent dirs""" + root_dirname = 'trex_control_plane' + file_path = os.path.dirname(os.path.realpath(__file__)) + + components = file_path.split(os.sep) + sys.path.append( str.join(os.sep, components[:components.index(root_dirname)+1]) ) + return + +add_root_to_path() diff --git a/scripts/automation/trex_control_plane/console/trex_status.py b/scripts/automation/trex_control_plane/console/trex_status.py new file mode 100644 index 00000000..8ee669b5 --- /dev/null +++ b/scripts/automation/trex_control_plane/console/trex_status.py @@ -0,0 +1,212 @@ +from time import sleep + +import os + +import curses +from curses import panel +import random +import collections +import operator +import datetime + +g_curses_active = False + +# +def percentage (a, total): + x = int ((float(a) / total) * 100) + return str(x) + "%" + +# panel object +class TrexStatusPanel(): + def __init__ (self, h, l, y, x, headline): + self.h = h + self.l = l + self.y = y + self.x = x + self.headline = headline + + self.win = curses.newwin(h, l, y, x) + self.win.erase() + self.win.box() + + self.win.addstr(1, 2, headline, curses.A_UNDERLINE) + self.win.refresh() + + panel.new_panel(self.win) + self.panel = panel.new_panel(self.win) + self.panel.top() + + def clear (self): + self.win.erase() + self.win.box() + self.win.addstr(1, 2, self.headline, curses.A_UNDERLINE) + + def getwin (self): + return self.win + +def float_to_human_readable (size, suffix = "bps"): + for unit in ['','K','M','G']: + if abs(size) < 1024.0: + return "%3.1f %s%s" % (size, unit, suffix) + size /= 1024.0 + return "NaN" + +# status object +class TrexStatus(): + def __init__ (self, stdscr, rpc_client): + self.stdscr = stdscr + self.log = [] + self.rpc_client = rpc_client + + self.get_server_info() + + def get_server_info (self): + rc, msg = self.rpc_client.get_rpc_server_status() + + if rc: + self.server_status = msg + else: + self.server_status = None + + def add_log_event (self, msg): + self.log.append("[{0}] {1}".format(str(datetime.datetime.now().time()), msg)) + + def add_panel (self, h, l, y, x, headline): + win = curses.newwin(h, l, y, x) + win.erase() + win.box() + + win.addstr(1, 2, headline) + win.refresh() + + panel.new_panel(win) + panel1 = panel.new_panel(win) + panel1.top() + + return win, panel1 + + # static info panel + def update_info (self): + if self.server_status == None: + return + + self.info_panel.clear() + + connection_details = self.rpc_client.get_connection_details() + + self.info_panel.getwin().addstr(3, 2, "{:<30} {:30}".format("Server:", connection_details['server'] + ":" + str(connection_details['port']))) + self.info_panel.getwin().addstr(4, 2, "{:<30} {:30}".format("Version:", self.server_status["general"]["version"])) + self.info_panel.getwin().addstr(5, 2, "{:<30} {:30}".format("Build:", + self.server_status["general"]["build_date"] + " @ " + self.server_status["general"]["build_time"] + " by " + self.server_status["general"]["version_user"])) + + self.info_panel.getwin().addstr(6, 2, "{:<30} {:30}".format("Server Uptime:", self.server_status["general"]["uptime"])) + + # general stats + def update_general (self, gen_stats): + pass + + # control panel + def update_control (self): + self.control_panel.clear() + + self.control_panel.getwin().addstr(1, 2, "'f' - freeze, 'c' - clear stats, 'p' - ping server, 'q' - quit") + + index = 3 + + cut = len(self.log) - 4 + if cut < 0: + cut = 0 + + for l in self.log[cut:]: + self.control_panel.getwin().addstr(index, 2, l) + index += 1 + + def generate_layout (self): + self.max_y = self.stdscr.getmaxyx()[0] + self.max_x = self.stdscr.getmaxyx()[1] + + # create cls panel + self.main_panel = TrexStatusPanel(int(self.max_y * 0.8), self.max_x / 2, 0,0, "Trex Activity:") + + self.general_panel = TrexStatusPanel(int(self.max_y * 0.6), self.max_x / 2, 0, self.max_x /2, "General Statistics:") + + self.info_panel = TrexStatusPanel(int(self.max_y * 0.2), self.max_x / 2, int(self.max_y * 0.6), self.max_x /2, "Server Info:") + + self.control_panel = TrexStatusPanel(int(self.max_y * 0.2), self.max_x , int(self.max_y * 0.8), 0, "") + + panel.update_panels(); self.stdscr.refresh() + + def wait_for_key_input (self): + ch = self.stdscr.getch() + + if (ch != curses.ERR): + # stop/start status + if (ch == ord('f')): + self.update_active = not self.update_active + self.add_log_event("Update continued" if self.update_active else "Update stopped") + + elif (ch == ord('p')): + self.add_log_event("Pinging RPC server") + rc, msg = self.rpc_client.ping_rpc_server() + if rc: + self.add_log_event("Server replied: '{0}'".format(msg)) + else: + self.add_log_event("Failed to get reply") + + # c - clear stats + elif (ch == ord('c')): + self.add_log_event("Statistics cleared") + + elif (ch == ord('q')): + return False + else: + self.add_log_event("Unknown key pressed {0}".format("'" + chr(ch) + "'" if chr(ch).isalpha() else "")) + + return True + + # main run entry point + def run (self): + try: + curses.curs_set(0) + except: + pass + + curses.use_default_colors() + self.stdscr.nodelay(1) + curses.nonl() + curses.noecho() + + self.generate_layout() + + self.update_active = True + while (True): + + rc = self.wait_for_key_input() + if not rc: + break + + self.update_control() + self.update_info() + + panel.update_panels(); + self.stdscr.refresh() + sleep(0.1) + + +def show_trex_status_internal (stdscr, rpc_client): + trex_status = TrexStatus(stdscr, rpc_client) + trex_status.run() + +def show_trex_status (rpc_client): + + try: + curses.wrapper(show_trex_status_internal, rpc_client) + except KeyboardInterrupt: + curses.endwin() + +def cleanup (): + try: + curses.endwin() + except: + pass + diff --git a/scripts/automation/trex_control_plane/examples/client_interactive_example.py b/scripts/automation/trex_control_plane/examples/client_interactive_example.py index e8d358a9..10735221 100755 --- a/scripts/automation/trex_control_plane/examples/client_interactive_example.py +++ b/scripts/automation/trex_control_plane/examples/client_interactive_example.py @@ -4,7 +4,7 @@ import trex_root_path from client.trex_client import * from common.trex_exceptions import * import cmd -from python_lib.termstyle import termstyle +import termstyle import os from argparse import ArgumentParser from pprint import pprint @@ -23,14 +23,13 @@ class InteractiveTRexClient(cmd.Cmd): cmd.Cmd.__init__(self) self.verbose = verbose_mode self.trex = CTRexClient(trex_host, max_history_size, trex_daemon_port = trex_port, verbose = verbose_mode) - self.DEFAULT_RUN_PARAMS = dict(c = 4, - m = 1.5, + self.DEFAULT_RUN_PARAMS = dict( m = 1.5, nc = True, p = True, d = 100, f = 'avl/sfr_delay_10_1g.yaml', l = 1000) - self.run_params = self.DEFAULT_RUN_PARAMS + self.run_params = dict(self.DEFAULT_RUN_PARAMS) self.decoder = json.JSONDecoder() @@ -112,7 +111,7 @@ class InteractiveTRexClient(cmd.Cmd): def do_restore_run_default (self, line): """Restores original T-Rex running configuration""" - self.run_params = self.DEFAULT_RUN_PARAMS + self.run_params = dict(self.DEFAULT_RUN_PARAMS) print termstyle.green("*** End of restoring default run parameters ***") def do_run_until_finish (self, sample_rate): diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO deleted file mode 100755 index 7082747b..00000000 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/PKG-INFO +++ /dev/null @@ -1,10 +0,0 @@ -Metadata-Version: 1.0 -Name: jsonrpclib -Version: 0.1.3 -Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library. -Home-page: http://github.com/joshmarshall/jsonrpclib/ -Author: Josh Marshall -Author-email: catchjosh@gmail.com -License: http://www.apache.org/licenses/LICENSE-2.0 -Description: UNKNOWN -Platform: UNKNOWN diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt deleted file mode 100755 index 9d431a48..00000000 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/README.txt +++ /dev/null @@ -1,203 +0,0 @@ -JSONRPClib -========== -This library is an implementation of the JSON-RPC specification. -It supports both the original 1.0 specification, as well as the -new (proposed) 2.0 spec, which includes batch submission, keyword -arguments, etc. - -It is licensed under the Apache License, Version 2.0 -(http://www.apache.org/licenses/LICENSE-2.0.html). - -Communication -------------- -Feel free to send any questions, comments, or patches to our Google Group -mailing list (you'll need to join to send a message): -http://groups.google.com/group/jsonrpclib - -Summary -------- -This library implements the JSON-RPC 2.0 proposed specification in pure Python. -It is designed to be as compatible with the syntax of xmlrpclib as possible -(it extends where possible), so that projects using xmlrpclib could easily be -modified to use JSON and experiment with the differences. - -It is backwards-compatible with the 1.0 specification, and supports all of the -new proposed features of 2.0, including: - -* Batch submission (via MultiCall) -* Keyword arguments -* Notifications (both in a batch and 'normal') -* Class translation using the 'jsonclass' key. - -I've added a "SimpleJSONRPCServer", which is intended to emulate the -"SimpleXMLRPCServer" from the default Python distribution. - -Requirements ------------- -It supports cjson and simplejson, and looks for the parsers in that order -(searching first for cjson, then for the "built-in" simplejson as json in 2.6+, -and then the simplejson external library). One of these must be installed to -use this library, although if you have a standard distribution of 2.6+, you -should already have one. Keep in mind that cjson is supposed to be the -quickest, I believe, so if you are going for full-on optimization you may -want to pick it up. - -Client Usage ------------- - -This is (obviously) taken from a console session. - - >>> import jsonrpclib - >>> server = jsonrpclib.Server('http://localhost:8080') - >>> server.add(5,6) - 11 - >>> print jsonrpclib.history.request - {"jsonrpc": "2.0", "params": [5, 6], "id": "gb3c9g37", "method": "add"} - >>> print jsonrpclib.history.response - {'jsonrpc': '2.0', 'result': 11, 'id': 'gb3c9g37'} - >>> server.add(x=5, y=10) - 15 - >>> server._notify.add(5,6) - # No result returned... - >>> batch = jsonrpclib.MultiCall(server) - >>> batch.add(5, 6) - >>> batch.ping({'key':'value'}) - >>> batch._notify.add(4, 30) - >>> results = batch() - >>> for result in results: - >>> ... print result - 11 - {'key': 'value'} - # Note that there are only two responses -- this is according to spec. - -If you need 1.0 functionality, there are a bunch of places you can pass that -in, although the best is just to change the value on -jsonrpclib.config.version: - - >>> import jsonrpclib - >>> jsonrpclib.config.version - 2.0 - >>> jsonrpclib.config.version = 1.0 - >>> server = jsonrpclib.Server('http://localhost:8080') - >>> server.add(7, 10) - 17 - >>> print jsonrpclib..history.request - {"params": [7, 10], "id": "thes7tl2", "method": "add"} - >>> print jsonrpclib.history.response - {'id': 'thes7tl2', 'result': 17, 'error': None} - >>> - -The equivalent loads and dumps functions also exist, although with minor -modifications. The dumps arguments are almost identical, but it adds three -arguments: rpcid for the 'id' key, version to specify the JSON-RPC -compatibility, and notify if it's a request that you want to be a -notification. - -Additionally, the loads method does not return the params and method like -xmlrpclib, but instead a.) parses for errors, raising ProtocolErrors, and -b.) returns the entire structure of the request / response for manual parsing. - -SimpleJSONRPCServer -------------------- -This is identical in usage (or should be) to the SimpleXMLRPCServer in the default Python install. Some of the differences in features are that it obviously supports notification, batch calls, class translation (if left on), etc. Note: The import line is slightly different from the regular SimpleXMLRPCServer, since the SimpleJSONRPCServer is distributed within the jsonrpclib library. - - from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer - - server = SimpleJSONRPCServer(('localhost', 8080)) - server.register_function(pow) - server.register_function(lambda x,y: x+y, 'add') - server.register_function(lambda x: x, 'ping') - server.serve_forever() - -Class Translation ------------------ -I've recently added "automatic" class translation support, although it is -turned off by default. This can be devastatingly slow if improperly used, so -the following is just a short list of things to keep in mind when using it. - -* Keep It (the object) Simple Stupid. (for exceptions, keep reading.) -* Do not require init params (for exceptions, keep reading) -* Getter properties without setters could be dangerous (read: not tested) - -If any of the above are issues, use the _serialize method. (see usage below) -The server and client must BOTH have use_jsonclass configuration item on and -they must both have access to the same libraries used by the objects for -this to work. - -If you have excessively nested arguments, it would be better to turn off the -translation and manually invoke it on specific objects using -jsonrpclib.jsonclass.dump / jsonrpclib.jsonclass.load (since the default -behavior recursively goes through attributes and lists / dicts / tuples). - -[test_obj.py] - - # This object is /very/ simple, and the system will look through the - # attributes and serialize what it can. - class TestObj(object): - foo = 'bar' - - # This object requires __init__ params, so it uses the _serialize method - # and returns a tuple of init params and attribute values (the init params - # can be a dict or a list, but the attribute values must be a dict.) - class TestSerial(object): - foo = 'bar' - def __init__(self, *args): - self.args = args - def _serialize(self): - return (self.args, {'foo':self.foo,}) - -[usage] - - import jsonrpclib - import test_obj - - jsonrpclib.config.use_jsonclass = True - - testobj1 = test_obj.TestObj() - testobj2 = test_obj.TestSerial() - server = jsonrpclib.Server('http://localhost:8080') - # The 'ping' just returns whatever is sent - ping1 = server.ping(testobj1) - ping2 = server.ping(testobj2) - print jsonrpclib.history.request - # {"jsonrpc": "2.0", "params": [{"__jsonclass__": ["test_obj.TestSerial", ["foo"]]}], "id": "a0l976iv", "method": "ping"} - print jsonrpclib.history.result - # {'jsonrpc': '2.0', 'result': <test_obj.TestSerial object at 0x2744590>, 'id': 'a0l976iv'} - -To turn on this behaviour, just set jsonrpclib.config.use_jsonclass to True. -If you want to use a different method for serialization, just set -jsonrpclib.config.serialize_method to the method name. Finally, if you are -using classes that you have defined in the implementation (as in, not a -separate library), you'll need to add those (on BOTH the server and the -client) using the jsonrpclib.config.classes.add() method. -(Examples forthcoming.) - -Feedback on this "feature" is very, VERY much appreciated. - -Why JSON-RPC? -------------- -In my opinion, there are several reasons to choose JSON over XML for RPC: - -* Much simpler to read (I suppose this is opinion, but I know I'm right. :) -* Size / Bandwidth - Main reason, a JSON object representation is just much smaller. -* Parsing - JSON should be much quicker to parse than XML. -* Easy class passing with jsonclass (when enabled) - -In the interest of being fair, there are also a few reasons to choose XML -over JSON: - -* Your server doesn't do JSON (rather obvious) -* Wider XML-RPC support across APIs (can we change this? :)) -* Libraries are more established, i.e. more stable (Let's change this too.) - -TESTS ------ -I've dropped almost-verbatim tests from the JSON-RPC spec 2.0 page. -You can run it with: - - python tests.py - -TODO ----- -* Use HTTP error codes on SimpleJSONRPCServer -* Test, test, test and optimize
\ No newline at end of file diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py deleted file mode 100755 index d76da73e..00000000 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/SimpleJSONRPCServer.py +++ /dev/null @@ -1,229 +0,0 @@ -import jsonrpclib -from jsonrpclib import Fault -from jsonrpclib.jsonrpc import USE_UNIX_SOCKETS -import SimpleXMLRPCServer -import SocketServer -import socket -import logging -import os -import types -import traceback -import sys -try: - import fcntl -except ImportError: - # For Windows - fcntl = None - -def get_version(request): - # must be a dict - if 'jsonrpc' in request.keys(): - return 2.0 - if 'id' in request.keys(): - return 1.0 - return None - -def validate_request(request): - if type(request) is not types.DictType: - fault = Fault( - -32600, 'Request must be {}, not %s.' % type(request) - ) - return fault - rpcid = request.get('id', None) - version = get_version(request) - if not version: - fault = Fault(-32600, 'Request %s invalid.' % request, rpcid=rpcid) - return fault - request.setdefault('params', []) - method = request.get('method', None) - params = request.get('params') - param_types = (types.ListType, types.DictType, types.TupleType) - if not method or type(method) not in types.StringTypes or \ - type(params) not in param_types: - fault = Fault( - -32600, 'Invalid request parameters or method.', rpcid=rpcid - ) - return fault - return True - -class SimpleJSONRPCDispatcher(SimpleXMLRPCServer.SimpleXMLRPCDispatcher): - - def __init__(self, encoding=None): - SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self, - allow_none=True, - encoding=encoding) - - def _marshaled_dispatch(self, data, dispatch_method = None): - response = None - try: - request = jsonrpclib.loads(data) - except Exception, e: - fault = Fault(-32700, 'Request %s invalid. (%s)' % (data, e)) - response = fault.response() - return response - if not request: - fault = Fault(-32600, 'Request invalid -- no request data.') - return fault.response() - if type(request) is types.ListType: - # This SHOULD be a batch, by spec - responses = [] - for req_entry in request: - result = validate_request(req_entry) - if type(result) is Fault: - responses.append(result.response()) - continue - resp_entry = self._marshaled_single_dispatch(req_entry) - if resp_entry is not None: - responses.append(resp_entry) - if len(responses) > 0: - response = '[%s]' % ','.join(responses) - else: - response = '' - else: - result = validate_request(request) - if type(result) is Fault: - return result.response() - response = self._marshaled_single_dispatch(request) - return response - - def _marshaled_single_dispatch(self, request): - # TODO - Use the multiprocessing and skip the response if - # it is a notification - # Put in support for custom dispatcher here - # (See SimpleXMLRPCServer._marshaled_dispatch) - method = request.get('method') - params = request.get('params') - try: - response = self._dispatch(method, params) - except: - exc_type, exc_value, exc_tb = sys.exc_info() - fault = Fault(-32603, '%s:%s' % (exc_type, exc_value)) - return fault.response() - if 'id' not in request.keys() or request['id'] == None: - # It's a notification - return None - try: - response = jsonrpclib.dumps(response, - methodresponse=True, - rpcid=request['id'] - ) - return response - except: - exc_type, exc_value, exc_tb = sys.exc_info() - fault = Fault(-32603, '%s:%s' % (exc_type, exc_value)) - return fault.response() - - def _dispatch(self, method, params): - func = None - try: - func = self.funcs[method] - except KeyError: - if self.instance is not None: - if hasattr(self.instance, '_dispatch'): - return self.instance._dispatch(method, params) - else: - try: - func = SimpleXMLRPCServer.resolve_dotted_attribute( - self.instance, - method, - True - ) - except AttributeError: - pass - if func is not None: - try: - if type(params) is types.ListType: - response = func(*params) - else: - response = func(**params) - return response - except TypeError: - return Fault(-32602, 'Invalid parameters.') - except: - err_lines = traceback.format_exc().splitlines() - trace_string = '%s | %s' % (err_lines[-3], err_lines[-1]) - fault = jsonrpclib.Fault(-32603, 'Server error: %s' % - trace_string) - return fault - else: - return Fault(-32601, 'Method %s not supported.' % method) - -class SimpleJSONRPCRequestHandler( - SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): - - def do_POST(self): - if not self.is_rpc_path_valid(): - self.report_404() - return - try: - max_chunk_size = 10*1024*1024 - size_remaining = int(self.headers["content-length"]) - L = [] - while size_remaining: - chunk_size = min(size_remaining, max_chunk_size) - L.append(self.rfile.read(chunk_size)) - size_remaining -= len(L[-1]) - data = ''.join(L) - response = self.server._marshaled_dispatch(data) - self.send_response(200) - except Exception, e: - self.send_response(500) - err_lines = traceback.format_exc().splitlines() - trace_string = '%s | %s' % (err_lines[-3], err_lines[-1]) - fault = jsonrpclib.Fault(-32603, 'Server error: %s' % trace_string) - response = fault.response() - if response == None: - response = '' - self.send_header("Content-type", "application/json-rpc") - self.send_header("Content-length", str(len(response))) - self.end_headers() - self.wfile.write(response) - self.wfile.flush() - self.connection.shutdown(1) - -class SimpleJSONRPCServer(SocketServer.TCPServer, SimpleJSONRPCDispatcher): - - allow_reuse_address = True - - def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler, - logRequests=True, encoding=None, bind_and_activate=True, - address_family=socket.AF_INET): - self.logRequests = logRequests - SimpleJSONRPCDispatcher.__init__(self, encoding) - # TCPServer.__init__ has an extra parameter on 2.6+, so - # check Python version and decide on how to call it - vi = sys.version_info - self.address_family = address_family - if USE_UNIX_SOCKETS and address_family == socket.AF_UNIX: - # Unix sockets can't be bound if they already exist in the - # filesystem. The convention of e.g. X11 is to unlink - # before binding again. - if os.path.exists(addr): - try: - os.unlink(addr) - except OSError: - logging.warning("Could not unlink socket %s", addr) - # if python 2.5 and lower - if vi[0] < 3 and vi[1] < 6: - SocketServer.TCPServer.__init__(self, addr, requestHandler) - else: - SocketServer.TCPServer.__init__(self, addr, requestHandler, - bind_and_activate) - if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'): - flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD) - flags |= fcntl.FD_CLOEXEC - fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags) - -class CGIJSONRPCRequestHandler(SimpleJSONRPCDispatcher): - - def __init__(self, encoding=None): - SimpleJSONRPCDispatcher.__init__(self, encoding) - - def handle_jsonrpc(self, request_text): - response = self._marshaled_dispatch(request_text) - print 'Content-Type: application/json-rpc' - print 'Content-Length: %d' % len(response) - print - sys.stdout.write(response) - - handle_xmlrpc = handle_jsonrpc diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py deleted file mode 100755 index 6e884b83..00000000 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from jsonrpclib.config import Config -config = Config.instance() -from jsonrpclib.history import History -history = History.instance() -from jsonrpclib.jsonrpc import Server, MultiCall, Fault -from jsonrpclib.jsonrpc import ProtocolError, loads, dumps diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py deleted file mode 100755 index 4d28f1b1..00000000 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/config.py +++ /dev/null @@ -1,38 +0,0 @@ -import sys - -class LocalClasses(dict): - def add(self, cls): - self[cls.__name__] = cls - -class Config(object): - """ - This is pretty much used exclusively for the 'jsonclass' - functionality... set use_jsonclass to False to turn it off. - You can change serialize_method and ignore_attribute, or use - the local_classes.add(class) to include "local" classes. - """ - use_jsonclass = True - # Change to False to keep __jsonclass__ entries raw. - serialize_method = '_serialize' - # The serialize_method should be a string that references the - # method on a custom class object which is responsible for - # returning a tuple of the constructor arguments and a dict of - # attributes. - ignore_attribute = '_ignore' - # The ignore attribute should be a string that references the - # attribute on a custom class object which holds strings and / or - # references of the attributes the class translator should ignore. - classes = LocalClasses() - # The list of classes to use for jsonclass translation. - version = 2.0 - # Version of the JSON-RPC spec to support - user_agent = 'jsonrpclib/0.1 (Python %s)' % \ - '.'.join([str(ver) for ver in sys.version_info[0:3]]) - # User agent to use for calls. - _instance = None - - @classmethod - def instance(cls): - if not cls._instance: - cls._instance = cls() - return cls._instance diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py deleted file mode 100755 index d11863dc..00000000 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/history.py +++ /dev/null @@ -1,40 +0,0 @@ -class History(object): - """ - This holds all the response and request objects for a - session. A server using this should call "clear" after - each request cycle in order to keep it from clogging - memory. - """ - requests = [] - responses = [] - _instance = None - - @classmethod - def instance(cls): - if not cls._instance: - cls._instance = cls() - return cls._instance - - def add_response(self, response_obj): - self.responses.append(response_obj) - - def add_request(self, request_obj): - self.requests.append(request_obj) - - @property - def request(self): - if len(self.requests) == 0: - return None - else: - return self.requests[-1] - - @property - def response(self): - if len(self.responses) == 0: - return None - else: - return self.responses[-1] - - def clear(self): - del self.requests[:] - del self.responses[:] diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py deleted file mode 100755 index 298c3da3..00000000 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonclass.py +++ /dev/null @@ -1,145 +0,0 @@ -import types -import inspect -import re -import traceback - -from jsonrpclib import config - -iter_types = [ - types.DictType, - types.ListType, - types.TupleType -] - -string_types = [ - types.StringType, - types.UnicodeType -] - -numeric_types = [ - types.IntType, - types.LongType, - types.FloatType -] - -value_types = [ - types.BooleanType, - types.NoneType -] - -supported_types = iter_types+string_types+numeric_types+value_types -invalid_module_chars = r'[^a-zA-Z0-9\_\.]' - -class TranslationError(Exception): - pass - -def dump(obj, serialize_method=None, ignore_attribute=None, ignore=[]): - if not serialize_method: - serialize_method = config.serialize_method - if not ignore_attribute: - ignore_attribute = config.ignore_attribute - obj_type = type(obj) - # Parse / return default "types"... - if obj_type in numeric_types+string_types+value_types: - return obj - if obj_type in iter_types: - if obj_type in (types.ListType, types.TupleType): - new_obj = [] - for item in obj: - new_obj.append(dump(item, serialize_method, - ignore_attribute, ignore)) - if obj_type is types.TupleType: - new_obj = tuple(new_obj) - return new_obj - # It's a dict... - else: - new_obj = {} - for key, value in obj.iteritems(): - new_obj[key] = dump(value, serialize_method, - ignore_attribute, ignore) - return new_obj - # It's not a standard type, so it needs __jsonclass__ - module_name = inspect.getmodule(obj).__name__ - class_name = obj.__class__.__name__ - json_class = class_name - if module_name not in ['', '__main__']: - json_class = '%s.%s' % (module_name, json_class) - return_obj = {"__jsonclass__":[json_class,]} - # If a serialization method is defined.. - if serialize_method in dir(obj): - # Params can be a dict (keyword) or list (positional) - # Attrs MUST be a dict. - serialize = getattr(obj, serialize_method) - params, attrs = serialize() - return_obj['__jsonclass__'].append(params) - return_obj.update(attrs) - return return_obj - # Otherwise, try to figure it out - # Obviously, we can't assume to know anything about the - # parameters passed to __init__ - return_obj['__jsonclass__'].append([]) - attrs = {} - ignore_list = getattr(obj, ignore_attribute, [])+ignore - for attr_name, attr_value in obj.__dict__.iteritems(): - if type(attr_value) in supported_types and \ - attr_name not in ignore_list and \ - attr_value not in ignore_list: - attrs[attr_name] = dump(attr_value, serialize_method, - ignore_attribute, ignore) - return_obj.update(attrs) - return return_obj - -def load(obj): - if type(obj) in string_types+numeric_types+value_types: - return obj - if type(obj) is types.ListType: - return_list = [] - for entry in obj: - return_list.append(load(entry)) - return return_list - # Othewise, it's a dict type - if '__jsonclass__' not in obj.keys(): - return_dict = {} - for key, value in obj.iteritems(): - new_value = load(value) - return_dict[key] = new_value - return return_dict - # It's a dict, and it's a __jsonclass__ - orig_module_name = obj['__jsonclass__'][0] - params = obj['__jsonclass__'][1] - if orig_module_name == '': - raise TranslationError('Module name empty.') - json_module_clean = re.sub(invalid_module_chars, '', orig_module_name) - if json_module_clean != orig_module_name: - raise TranslationError('Module name %s has invalid characters.' % - orig_module_name) - json_module_parts = json_module_clean.split('.') - json_class = None - if len(json_module_parts) == 1: - # Local class name -- probably means it won't work - if json_module_parts[0] not in config.classes.keys(): - raise TranslationError('Unknown class or module %s.' % - json_module_parts[0]) - json_class = config.classes[json_module_parts[0]] - else: - json_class_name = json_module_parts.pop() - json_module_tree = '.'.join(json_module_parts) - try: - temp_module = __import__(json_module_tree) - except ImportError: - raise TranslationError('Could not import %s from module %s.' % - (json_class_name, json_module_tree)) - json_class = getattr(temp_module, json_class_name) - # Creating the object... - new_obj = None - if type(params) is types.ListType: - new_obj = json_class(*params) - elif type(params) is types.DictType: - new_obj = json_class(**params) - else: - raise TranslationError('Constructor args must be a dict or list.') - for key, value in obj.iteritems(): - if key == '__jsonclass__': - continue - setattr(new_obj, key, value) - return new_obj diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py deleted file mode 100755 index e11939ae..00000000 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/jsonrpclib/jsonrpc.py +++ /dev/null @@ -1,556 +0,0 @@ -""" -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -============================ -JSONRPC Library (jsonrpclib) -============================ - -This library is a JSON-RPC v.2 (proposed) implementation which -follows the xmlrpclib API for portability between clients. It -uses the same Server / ServerProxy, loads, dumps, etc. syntax, -while providing features not present in XML-RPC like: - -* Keyword arguments -* Notifications -* Versioning -* Batches and batch notifications - -Eventually, I'll add a SimpleXMLRPCServer compatible library, -and other things to tie the thing off nicely. :) - -For a quick-start, just open a console and type the following, -replacing the server address, method, and parameters -appropriately. ->>> import jsonrpclib ->>> server = jsonrpclib.Server('http://localhost:8181') ->>> server.add(5, 6) -11 ->>> server._notify.add(5, 6) ->>> batch = jsonrpclib.MultiCall(server) ->>> batch.add(3, 50) ->>> batch.add(2, 3) ->>> batch._notify.add(3, 5) ->>> batch() -[53, 5] - -See http://code.google.com/p/jsonrpclib/ for more info. -""" - -import types -import sys -from xmlrpclib import Transport as XMLTransport -from xmlrpclib import SafeTransport as XMLSafeTransport -from xmlrpclib import ServerProxy as XMLServerProxy -from xmlrpclib import _Method as XML_Method -import time -import string -import random - -# Library includes -import jsonrpclib -from jsonrpclib import config -from jsonrpclib import history - -# JSON library importing -cjson = None -json = None -try: - import cjson -except ImportError: - try: - import json - except ImportError: - try: - import simplejson as json - except ImportError: - raise ImportError( - 'You must have the cjson, json, or simplejson ' + - 'module(s) available.' - ) - -IDCHARS = string.ascii_lowercase+string.digits - -class UnixSocketMissing(Exception): - """ - Just a properly named Exception if Unix Sockets usage is - attempted on a platform that doesn't support them (Windows) - """ - pass - -#JSON Abstractions - -def jdumps(obj, encoding='utf-8'): - # Do 'serialize' test at some point for other classes - global cjson - if cjson: - return cjson.encode(obj) - else: - return json.dumps(obj, encoding=encoding) - -def jloads(json_string): - global cjson - if cjson: - return cjson.decode(json_string) - else: - return json.loads(json_string) - - -# XMLRPClib re-implementations - -class ProtocolError(Exception): - pass - -class TransportMixIn(object): - """ Just extends the XMLRPC transport where necessary. """ - user_agent = config.user_agent - # for Python 2.7 support - _connection = None - - def send_content(self, connection, request_body): - connection.putheader("Content-Type", "application/json-rpc") - connection.putheader("Content-Length", str(len(request_body))) - connection.endheaders() - if request_body: - connection.send(request_body) - - def getparser(self): - target = JSONTarget() - return JSONParser(target), target - -class JSONParser(object): - def __init__(self, target): - self.target = target - - def feed(self, data): - self.target.feed(data) - - def close(self): - pass - -class JSONTarget(object): - def __init__(self): - self.data = [] - - def feed(self, data): - self.data.append(data) - - def close(self): - return ''.join(self.data) - -class Transport(TransportMixIn, XMLTransport): - pass - -class SafeTransport(TransportMixIn, XMLSafeTransport): - pass -from httplib import HTTP, HTTPConnection -from socket import socket - -USE_UNIX_SOCKETS = False - -try: - from socket import AF_UNIX, SOCK_STREAM - USE_UNIX_SOCKETS = True -except ImportError: - pass - -if (USE_UNIX_SOCKETS): - - class UnixHTTPConnection(HTTPConnection): - def connect(self): - self.sock = socket(AF_UNIX, SOCK_STREAM) - self.sock.connect(self.host) - - class UnixHTTP(HTTP): - _connection_class = UnixHTTPConnection - - class UnixTransport(TransportMixIn, XMLTransport): - def make_connection(self, host): - import httplib - host, extra_headers, x509 = self.get_host_info(host) - return UnixHTTP(host) - - -class ServerProxy(XMLServerProxy): - """ - Unfortunately, much more of this class has to be copied since - so much of it does the serialization. - """ - - def __init__(self, uri, transport=None, encoding=None, - verbose=0, version=None): - import urllib - if not version: - version = config.version - self.__version = version - schema, uri = urllib.splittype(uri) - if schema not in ('http', 'https', 'unix'): - raise IOError('Unsupported JSON-RPC protocol.') - if schema == 'unix': - if not USE_UNIX_SOCKETS: - # Don't like the "generic" Exception... - raise UnixSocketMissing("Unix sockets not available.") - self.__host = uri - self.__handler = '/' - else: - self.__host, self.__handler = urllib.splithost(uri) - if not self.__handler: - # Not sure if this is in the JSON spec? - #self.__handler = '/' - self.__handler == '/' - if transport is None: - if schema == 'unix': - transport = UnixTransport() - elif schema == 'https': - transport = SafeTransport() - else: - transport = Transport() - self.__transport = transport - self.__encoding = encoding - self.__verbose = verbose - - def _request(self, methodname, params, rpcid=None): - request = dumps(params, methodname, encoding=self.__encoding, - rpcid=rpcid, version=self.__version) - response = self._run_request(request) - check_for_errors(response) - return response['result'] - - def _request_notify(self, methodname, params, rpcid=None): - request = dumps(params, methodname, encoding=self.__encoding, - rpcid=rpcid, version=self.__version, notify=True) - response = self._run_request(request, notify=True) - check_for_errors(response) - return - - def _run_request(self, request, notify=None): - history.add_request(request) - - response = self.__transport.request( - self.__host, - self.__handler, - request, - verbose=self.__verbose - ) - - # Here, the XMLRPC library translates a single list - # response to the single value -- should we do the - # same, and require a tuple / list to be passed to - # the response object, or expect the Server to be - # outputting the response appropriately? - - history.add_response(response) - if not response: - return None - return_obj = loads(response) - return return_obj - - def __getattr__(self, name): - # Same as original, just with new _Method reference - return _Method(self._request, name) - - @property - def _notify(self): - # Just like __getattr__, but with notify namespace. - return _Notify(self._request_notify) - - -class _Method(XML_Method): - - def __call__(self, *args, **kwargs): - if len(args) > 0 and len(kwargs) > 0: - raise ProtocolError('Cannot use both positional ' + - 'and keyword arguments (according to JSON-RPC spec.)') - if len(args) > 0: - return self.__send(self.__name, args) - else: - return self.__send(self.__name, kwargs) - - def __getattr__(self, name): - self.__name = '%s.%s' % (self.__name, name) - return self - # The old method returned a new instance, but this seemed wasteful. - # The only thing that changes is the name. - #return _Method(self.__send, "%s.%s" % (self.__name, name)) - -class _Notify(object): - def __init__(self, request): - self._request = request - - def __getattr__(self, name): - return _Method(self._request, name) - -# Batch implementation - -class MultiCallMethod(object): - - def __init__(self, method, notify=False): - self.method = method - self.params = [] - self.notify = notify - - def __call__(self, *args, **kwargs): - if len(kwargs) > 0 and len(args) > 0: - raise ProtocolError('JSON-RPC does not support both ' + - 'positional and keyword arguments.') - if len(kwargs) > 0: - self.params = kwargs - else: - self.params = args - - def request(self, encoding=None, rpcid=None): - return dumps(self.params, self.method, version=2.0, - encoding=encoding, rpcid=rpcid, notify=self.notify) - - def __repr__(self): - return '%s' % self.request() - - def __getattr__(self, method): - new_method = '%s.%s' % (self.method, method) - self.method = new_method - return self - -class MultiCallNotify(object): - - def __init__(self, multicall): - self.multicall = multicall - - def __getattr__(self, name): - new_job = MultiCallMethod(name, notify=True) - self.multicall._job_list.append(new_job) - return new_job - -class MultiCallIterator(object): - - def __init__(self, results): - self.results = results - - def __iter__(self): - for i in range(0, len(self.results)): - yield self[i] - raise StopIteration - - def __getitem__(self, i): - item = self.results[i] - check_for_errors(item) - return item['result'] - - def __len__(self): - return len(self.results) - -class MultiCall(object): - - def __init__(self, server): - self._server = server - self._job_list = [] - - def _request(self): - if len(self._job_list) < 1: - # Should we alert? This /is/ pretty obvious. - return - request_body = '[ %s ]' % ','.join([job.request() for - job in self._job_list]) - responses = self._server._run_request(request_body) - del self._job_list[:] - if not responses: - responses = [] - return MultiCallIterator(responses) - - @property - def _notify(self): - return MultiCallNotify(self) - - def __getattr__(self, name): - new_job = MultiCallMethod(name) - self._job_list.append(new_job) - return new_job - - __call__ = _request - -# These lines conform to xmlrpclib's "compatibility" line. -# Not really sure if we should include these, but oh well. -Server = ServerProxy - -class Fault(object): - # JSON-RPC error class - def __init__(self, code=-32000, message='Server error', rpcid=None): - self.faultCode = code - self.faultString = message - self.rpcid = rpcid - - def error(self): - return {'code':self.faultCode, 'message':self.faultString} - - def response(self, rpcid=None, version=None): - if not version: - version = config.version - if rpcid: - self.rpcid = rpcid - return dumps( - self, methodresponse=True, rpcid=self.rpcid, version=version - ) - - def __repr__(self): - return '<Fault %s: %s>' % (self.faultCode, self.faultString) - -def random_id(length=8): - return_id = '' - for i in range(length): - return_id += random.choice(IDCHARS) - return return_id - -class Payload(dict): - def __init__(self, rpcid=None, version=None): - if not version: - version = config.version - self.id = rpcid - self.version = float(version) - - def request(self, method, params=[]): - if type(method) not in types.StringTypes: - raise ValueError('Method name must be a string.') - if not self.id: - self.id = random_id() - request = { 'id':self.id, 'method':method } - if params: - request['params'] = params - if self.version >= 2: - request['jsonrpc'] = str(self.version) - return request - - def notify(self, method, params=[]): - request = self.request(method, params) - if self.version >= 2: - del request['id'] - else: - request['id'] = None - return request - - def response(self, result=None): - response = {'result':result, 'id':self.id} - if self.version >= 2: - response['jsonrpc'] = str(self.version) - else: - response['error'] = None - return response - - def error(self, code=-32000, message='Server error.'): - error = self.response() - if self.version >= 2: - del error['result'] - else: - error['result'] = None - error['error'] = {'code':code, 'message':message} - return error - -def dumps(params=[], methodname=None, methodresponse=None, - encoding=None, rpcid=None, version=None, notify=None): - """ - This differs from the Python implementation in that it implements - the rpcid argument since the 2.0 spec requires it for responses. - """ - if not version: - version = config.version - valid_params = (types.TupleType, types.ListType, types.DictType) - if methodname in types.StringTypes and \ - type(params) not in valid_params and \ - not isinstance(params, Fault): - """ - If a method, and params are not in a listish or a Fault, - error out. - """ - raise TypeError('Params must be a dict, list, tuple or Fault ' + - 'instance.') - # Begin parsing object - payload = Payload(rpcid=rpcid, version=version) - if not encoding: - encoding = 'utf-8' - if type(params) is Fault: - response = payload.error(params.faultCode, params.faultString) - return jdumps(response, encoding=encoding) - if type(methodname) not in types.StringTypes and methodresponse != True: - raise ValueError('Method name must be a string, or methodresponse '+ - 'must be set to True.') - if config.use_jsonclass == True: - from jsonrpclib import jsonclass - params = jsonclass.dump(params) - if methodresponse is True: - if rpcid is None: - raise ValueError('A method response must have an rpcid.') - response = payload.response(params) - return jdumps(response, encoding=encoding) - request = None - if notify == True: - request = payload.notify(methodname, params) - else: - request = payload.request(methodname, params) - return jdumps(request, encoding=encoding) - -def loads(data): - """ - This differs from the Python implementation, in that it returns - the request structure in Dict format instead of the method, params. - It will return a list in the case of a batch request / response. - """ - if data == '': - # notification - return None - result = jloads(data) - # if the above raises an error, the implementing server code - # should return something like the following: - # { 'jsonrpc':'2.0', 'error': fault.error(), id: None } - if config.use_jsonclass == True: - from jsonrpclib import jsonclass - result = jsonclass.load(result) - return result - -def check_for_errors(result): - if not result: - # Notification - return result - if type(result) is not types.DictType: - raise TypeError('Response is not a dict.') - if 'jsonrpc' in result.keys() and float(result['jsonrpc']) > 2.0: - raise NotImplementedError('JSON-RPC version not yet supported.') - if 'result' not in result.keys() and 'error' not in result.keys(): - raise ValueError('Response does not have a result or error key.') - if 'error' in result.keys() and result['error'] != None: - code = result['error']['code'] - message = result['error']['message'] - raise ProtocolError((code, message)) - return result - -def isbatch(result): - if type(result) not in (types.ListType, types.TupleType): - return False - if len(result) < 1: - return False - if type(result[0]) is not types.DictType: - return False - if 'jsonrpc' not in result[0].keys(): - return False - try: - version = float(result[0]['jsonrpc']) - except ValueError: - raise ProtocolError('"jsonrpc" key must be a float(able) value.') - if version < 2: - return False - return True - -def isnotification(request): - if 'id' not in request.keys(): - # 2.0 notification - return True - if request['id'] == None: - # 1.0 notification - return True - return False diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py deleted file mode 100755 index 569b6367..00000000 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/setup.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env/python -""" -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -""" - -import distutils.core - -distutils.core.setup( - name = "jsonrpclib", - version = "0.1.3", - packages = ["jsonrpclib"], - author = "Josh Marshall", - author_email = "catchjosh@gmail.com", - url = "http://github.com/joshmarshall/jsonrpclib/", - license = "http://www.apache.org/licenses/LICENSE-2.0", - description = "This project is an implementation of the JSON-RPC v2.0 " + - "specification (backwards-compatible) as a client library.", -) diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt b/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt deleted file mode 100755 index eb0864bd..00000000 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/LICENSE.txt +++ /dev/null @@ -1,11 +0,0 @@ -Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py b/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py deleted file mode 100755 index 1ff892ad..00000000 --- a/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/rednose.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright (c) 2009, Tim Cuthbertson # All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# * Neither the name of the organisation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS -# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED -# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY -# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -from __future__ import print_function -import os -import sys -import linecache -import re -import time - -import nose - -import termstyle - -failure = 'FAILED' -error = 'ERROR' -success = 'passed' -skip = 'skipped' -line_length = 77 - -PY3 = sys.version_info[0] >= 3 -if PY3: - to_unicode = str -else: - def to_unicode(s): - try: - return unicode(s) - except UnicodeDecodeError: - return unicode(repr(str(s))) - -BLACKLISTED_WRITERS = [ - 'nose[\\/]result\\.pyc?$', - 'unittest[\\/]runner\\.pyc?$' -] -REDNOSE_DEBUG = False - - -class RedNose(nose.plugins.Plugin): - env_opt = 'NOSE_REDNOSE' - env_opt_color = 'NOSE_REDNOSE_COLOR' - score = 199 # just under the `coverage` module - - def __init__(self, *args): - super(RedNose, self).__init__(*args) - self.reports = [] - self.error = self.success = self.failure = self.skip = 0 - self.total = 0 - self.stream = None - self.verbose = False - self.enabled = False - self.tree = False - - def options(self, parser, env=os.environ): - global REDNOSE_DEBUG - rednose_on = bool(env.get(self.env_opt, False)) - rednose_color = env.get(self.env_opt_color, 'auto') - REDNOSE_DEBUG = bool(env.get('REDNOSE_DEBUG', False)) - - parser.add_option( - "--rednose", - action="store_true", - default=rednose_on, - dest="rednose", - help="enable colour output (alternatively, set $%s=1)" % (self.env_opt,) - ) - parser.add_option( - "--no-color", - action="store_false", - dest="rednose", - help="disable colour output" - ) - parser.add_option( - "--force-color", - action="store_const", - dest='rednose_color', - default=rednose_color, - const='force', - help="force colour output when not using a TTY (alternatively, set $%s=force)" % (self.env_opt_color,) - ) - parser.add_option( - "--immediate", - action="store_true", - default=False, - help="print errors and failures as they happen, as well as at the end" - ) - - def configure(self, options, conf): - if options.rednose: - self.enabled = True - termstyle_init = { - 'force': termstyle.enable, - 'off': termstyle.disable - }.get(options.rednose_color, termstyle.auto) - termstyle_init() - - self.immediate = options.immediate - self.verbose = options.verbosity >= 2 - - def begin(self): - self.start_time = time.time() - self._in_test = False - - def _format_test_name(self, test): - return test.shortDescription() or to_unicode(test) - - def prepareTestResult(self, result): - result.stream = FilteringStream(self.stream, BLACKLISTED_WRITERS) - - def beforeTest(self, test): - self._in_test = True - if self.verbose: - self._out(self._format_test_name(test) + ' ... ') - - def afterTest(self, test): - if self._in_test: - self.addSkip() - - def _print_test(self, type_, color): - self.total += 1 - if self.verbose: - self._outln(color(type_)) - else: - if type_ == failure: - short_ = 'F' - elif type_ == error: - short_ = 'X' - elif type_ == skip: - short_ = '-' - else: - short_ = '.' - self._out(color(short_)) - if self.total % line_length == 0: - self._outln() - self._in_test = False - - def _add_report(self, report): - failure_type, test, err = report - self.reports.append(report) - if self.immediate: - self._outln() - self._report_test(len(self.reports), *report) - - def addFailure(self, test, err): - self.failure += 1 - self._add_report((failure, test, err)) - self._print_test(failure, termstyle.red) - - def addError(self, test, err): - if err[0].__name__ == 'SkipTest': - self.addSkip(test, err) - return - self.error += 1 - self._add_report((error, test, err)) - self._print_test(error, termstyle.yellow) - - def addSuccess(self, test): - self.success += 1 - self._print_test(success, termstyle.green) - - def addSkip(self, test=None, err=None): - self.skip += 1 - self._print_test(skip, termstyle.blue) - - def setOutputStream(self, stream): - self.stream = stream - - def report(self, stream): - """report on all registered failures and errors""" - self._outln() - if self.immediate: - for x in range(0, 5): - self._outln() - report_num = 0 - if len(self.reports) > 0: - for report_num, report in enumerate(self.reports): - self._report_test(report_num + 1, *report) - self._outln() - - self._summarize() - - def _summarize(self): - """summarize all tests - the number of failures, errors and successes""" - self._line(termstyle.black) - self._out("%s test%s run in %0.1f seconds" % ( - self.total, - self._plural(self.total), - time.time() - self.start_time)) - if self.total > self.success: - self._outln(". ") - additionals = [] - if self.failure > 0: - additionals.append(termstyle.red("%s FAILED" % ( - self.failure,))) - if self.error > 0: - additionals.append(termstyle.yellow("%s error%s" % ( - self.error, - self._plural(self.error) ))) - if self.skip > 0: - additionals.append(termstyle.blue("%s skipped" % ( - self.skip))) - self._out(', '.join(additionals)) - - self._out(termstyle.green(" (%s test%s passed)" % ( - self.success, - self._plural(self.success) ))) - self._outln() - - def _report_test(self, report_num, type_, test, err): - """report the results of a single (failing or errored) test""" - self._line(termstyle.black) - self._out("%s) " % (report_num)) - if type_ == failure: - color = termstyle.red - self._outln(color('FAIL: %s' % (self._format_test_name(test),))) - else: - color = termstyle.yellow - self._outln(color('ERROR: %s' % (self._format_test_name(test),))) - - exc_type, exc_instance, exc_trace = err - - self._outln() - self._outln(self._fmt_traceback(exc_trace)) - self._out(color(' ', termstyle.bold(color(exc_type.__name__)), ": ")) - self._outln(self._fmt_message(exc_instance, color)) - self._outln() - - def _relative_path(self, path): - """ - If path is a child of the current working directory, the relative - path is returned surrounded by bold xterm escape sequences. - If path is not a child of the working directory, path is returned - """ - try: - here = os.path.abspath(os.path.realpath(os.getcwd())) - fullpath = os.path.abspath(os.path.realpath(path)) - except OSError: - return path - if fullpath.startswith(here): - return termstyle.bold(fullpath[len(here)+1:]) - return path - - def _file_line(self, tb): - """formats the file / lineno / function line of a traceback element""" - prefix = "file://" - prefix = "" - - f = tb.tb_frame - if '__unittest' in f.f_globals: - # this is the magical flag that prevents unittest internal - # code from junking up the stacktrace - return None - - filename = f.f_code.co_filename - lineno = tb.tb_lineno - linecache.checkcache(filename) - function_name = f.f_code.co_name - - line_contents = linecache.getline(filename, lineno, f.f_globals).strip() - - return " %s line %s in %s\n %s" % ( - termstyle.blue(prefix, self._relative_path(filename)), - lineno, - termstyle.cyan(function_name), - line_contents) - - def _fmt_traceback(self, trace): - """format a traceback""" - ret = [] - ret.append(termstyle.default(" Traceback (most recent call last):")) - current_trace = trace - while current_trace is not None: - line = self._file_line(current_trace) - if line is not None: - ret.append(line) - current_trace = current_trace.tb_next - return '\n'.join(ret) - - def _fmt_message(self, exception, color): - orig_message_lines = to_unicode(exception).splitlines() - - if len(orig_message_lines) == 0: - return '' - message_lines = [color(orig_message_lines[0])] - for line in orig_message_lines[1:]: - match = re.match('^---.* begin captured stdout.*----$', line) - if match: - color = None - message_lines.append('') - line = ' ' + line - message_lines.append(color(line) if color is not None else line) - return '\n'.join(message_lines) - - def _out(self, msg='', newline=False): - self.stream.write(msg) - if newline: - self.stream.write('\n') - - def _outln(self, msg=''): - self._out(msg, True) - - def _plural(self, num): - return '' if num == 1 else 's' - - def _line(self, color=termstyle.reset, char='-'): - """ - print a line of separator characters (default '-') - in the given colour (default black) - """ - self._outln(color(char * line_length)) - - -import traceback -import sys - - -class FilteringStream(object): - """ - A wrapper for a stream that will filter - calls to `write` and `writeln` to ignore calls - from blacklisted callers - (implemented as a regex on their filename, according - to traceback.extract_stack()) - - It's super hacky, but there seems to be no other way - to suppress nose's default output - """ - def __init__(self, stream, excludes): - self.__stream = stream - self.__excludes = list(map(re.compile, excludes)) - - def __should_filter(self): - try: - stack = traceback.extract_stack(limit=3)[0] - filename = stack[0] - pattern_matches_filename = lambda pattern: pattern.search(filename) - should_filter = any(map(pattern_matches_filename, self.__excludes)) - if REDNOSE_DEBUG: - print >> sys.stderr, "REDNOSE_DEBUG: got write call from %s, should_filter = %s" % ( - filename, should_filter) - return should_filter - except StandardError as e: - if REDNOSE_DEBUG: - print("\nError in rednose filtering: %s" % (e,), file=sys.stderr) - traceback.print_exc(sys.stderr) - return False - - def write(self, *a): - if self.__should_filter(): - return - return self.__stream.write(*a) - - def writeln(self, *a): - if self.__should_filter(): - return - return self.__stream.writeln(*a) - - # pass non-known methods through to self.__stream - def __getattr__(self, name): - if REDNOSE_DEBUG: - print("REDNOSE_DEBUG: getting attr %s" % (name,), file=sys.stderr) - return getattr(self.__stream, name) diff --git a/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py b/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py deleted file mode 100755 index 34cded4b..00000000 --- a/scripts/automation/trex_control_plane/python_lib/rednose-0.4.1/setup.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python - -## NOTE: ## -## this setup.py was generated by zero2pypi: -## http://gfxmonk.net/dist/0install/zero2pypi.xml - -from setuptools import * -setup( - packages = find_packages(exclude=['test', 'test.*']), - description='coloured output for nosetests', - entry_points={'nose.plugins.0.10': ['NOSETESTS_PLUGINS = rednose:RedNose']}, - install_requires=['setuptools', 'python-termstyle >=0.1.7'], - long_description="\n**Note**: This package has been built automatically by\n`zero2pypi <http://gfxmonk.net/dist/0install/zero2pypi.xml>`_.\nIf possible, you should use the zero-install feed instead:\nhttp://gfxmonk.net/dist/0install/rednose.xml\n\n----------------\n\n=========\nrednose\n=========\n\nrednose is a `nosetests`_\nplugin for adding colour (and readability) to nosetest console results.\n\nInstallation:\n-------------\n::\n\n\teasy_install rednose\n\t\nor from the source::\n\n\t./setup.py develop\n\nUsage:\n------\n::\n\n\tnosetests --rednose\n\nor::\n\n\texport NOSE_REDNOSE=1\n\tnosetests\n\nRednose by default uses auto-colouring, which will only use\ncolour if you're running it on a terminal (i.e not piping it\nto a file). To control colouring, use one of::\n\n\tnosetests --rednose --force-color\n\tnosetests --no-color\n\n(you can also control this by setting the environment variable NOSE_REDNOSE_COLOR to 'force' or 'no')\n\n.. _nosetests: http://somethingaboutorange.com/mrl/projects/nose/\n", - name='rednose', - py_modules=['rednose'], - url='http://gfxmonk.net/dist/0install/rednose.xml', - version='0.4.1', -classifiers=[ - "License :: OSI Approved :: BSD License", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Development Status :: 4 - Beta", - "Intended Audience :: Developers", - "Topic :: Software Development :: Libraries :: Python Modules", - "Topic :: Software Development :: Testing", - ], - keywords='test nosetests nose nosetest output colour console', - license='BSD', -) diff --git a/scripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gz b/scripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gz Binary files differdeleted file mode 100755 index 4f36749b..00000000 --- a/scripts/automation/trex_control_plane/python_lib/zmq_fedora.tar.gz +++ /dev/null diff --git a/scripts/automation/trex_control_plane/server/extended_daemon_runner.py b/scripts/automation/trex_control_plane/server/extended_daemon_runner.py index 07eedd9f..2ce1eb06 100755 --- a/scripts/automation/trex_control_plane/server/extended_daemon_runner.py +++ b/scripts/automation/trex_control_plane/server/extended_daemon_runner.py @@ -8,18 +8,17 @@ import os, sys from argparse import ArgumentParser
from trex_server import trex_parser
try:
- from python_lib.termstyle import termstyle
+ from termstyle import termstyle
except ImportError:
import termstyle
-
-def daemonize_parser (parser_obj, action_funcs, help_menu):
+def daemonize_parser(parser_obj, action_funcs, help_menu):
"""Update the regular process parser to deal with daemon process options"""
parser_obj.description += " (as a daemon process)"
parser_obj.usage = None
- parser_obj.add_argument("action", choices = action_funcs,
- action="store", help = help_menu )
+ parser_obj.add_argument("action", choices=action_funcs,
+ action="store", help=help_menu)
return
@@ -42,7 +41,7 @@ class ExtendedDaemonRunner(runner.DaemonRunner): (*) start-live : start the application in live mode (no daemon process).
"""
- def __init__ (self, app, parser_obj):
+ def __init__(self, app, parser_obj):
""" Set up the parameters of a new runner.
THIS METHOD INTENTIONALLY DO NOT INVOKE SUPER __init__() METHOD
@@ -78,8 +77,8 @@ class ExtendedDaemonRunner(runner.DaemonRunner): self.daemon_context = daemon.DaemonContext()
self.daemon_context.stdin = open(app.stdin_path, 'rt')
self.daemon_context.stdout = open(app.stdout_path, 'w+t')
- self.daemon_context.stderr = open(
- app.stderr_path, 'a+t', buffering=0)
+ self.daemon_context.stderr = open(app.stderr_path,
+ 'a+t', buffering=0)
self.pidfile = None
if app.pidfile_path is not None:
@@ -87,23 +86,22 @@ class ExtendedDaemonRunner(runner.DaemonRunner): self.daemon_context.pidfile = self.pidfile
# mask out all arguments that aren't relevant to main app script
-
- def update_action_funcs (self):
+ def update_action_funcs(self):
self.action_funcs.update({u'start-live': self._start_live, u'show': self._show}) # add key (=action), value (=desired func)
@staticmethod
- def _start_live (self):
+ def _start_live(self):
self.app.run()
@staticmethod
- def _show (self):
+ def _show(self):
if self.pidfile.is_locked():
print termstyle.red("T-Rex server daemon is running")
else:
print termstyle.red("T-Rex server daemon is NOT running")
- def do_action (self):
+ def do_action(self):
self.__prevent_duplicate_runs()
self.__prompt_init_msg()
try:
@@ -117,7 +115,7 @@ class ExtendedDaemonRunner(runner.DaemonRunner): self.do_action()
- def __prevent_duplicate_runs (self):
+ def __prevent_duplicate_runs(self):
if self.action == 'start' and self.pidfile.is_locked():
print termstyle.green("Server daemon is already running")
exit(1)
@@ -125,13 +123,13 @@ class ExtendedDaemonRunner(runner.DaemonRunner): print termstyle.green("Server daemon is not running")
exit(1)
- def __prompt_init_msg (self):
+ def __prompt_init_msg(self):
if self.action == 'start':
print termstyle.green("Starting daemon server...")
elif self.action == 'stop':
print termstyle.green("Stopping daemon server...")
- def __verify_termination (self):
+ def __verify_termination(self):
pass
# import time
# while self.pidfile.is_locked():
diff --git a/scripts/automation/trex_control_plane/server/outer_packages.py b/scripts/automation/trex_control_plane/server/outer_packages.py index ab25ea68..976e478d 100755 --- a/scripts/automation/trex_control_plane/server/outer_packages.py +++ b/scripts/automation/trex_control_plane/server/outer_packages.py @@ -1,66 +1,34 @@ #!/router/bin/python -import sys,site -import platform,os -import tarfile -import errno -import pwd +import sys +import site +import os CURRENT_PATH = os.path.dirname(os.path.realpath(__file__)) -ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory -PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, 'python_lib')) +ROOT_PATH = os.path.abspath(os.path.join(CURRENT_PATH, os.pardir)) # path to trex_control_plane directory +PATH_TO_PYTHON_LIB = os.path.abspath(os.path.join(ROOT_PATH, os.pardir, os.pardir, 'external_libs')) SERVER_MODULES = ['enum34-1.0.4', - # 'jsonrpclib-0.1.3', - 'jsonrpclib-pelix-0.2.5', - 'zmq', - 'python-daemon-2.0.5', - 'lockfile-0.10.2', - 'termstyle' - ] + 'jsonrpclib-pelix-0.2.5', + 'zmq', + 'python-daemon-2.0.5', + 'lockfile-0.10.2', + 'termstyle' + ] -def extract_zmq_package (): - """make sure zmq package is available""" - os.chdir(PATH_TO_PYTHON_LIB) - if not os.path.exists('zmq'): - if os.path.exists('zmq_fedora.tar.gz'): # make sure tar file is available for extraction - try: - tar = tarfile.open("zmq_fedora.tar.gz") - # finally, extract the tarfile locally - tar.extractall() - except OSError as err: - if err.errno == errno.EACCES: - # fall back. try extracting using currently logged in user - stat_info = os.stat(PATH_TO_PYTHON_LIB) - uid = stat_info.st_uid - logged_user = pwd.getpwuid(uid).pw_name - if logged_user != 'root': - try: - os.system("sudo -u {user} tar -zxvf zmq_fedora.tar.gz".format(user = logged_user)) - except: - raise OSError(13, 'Permission denied: Please make sure that logged user have sudo access and writing privileges to `python_lib` directory.') - else: - raise OSError(13, 'Permission denied: Please make sure that logged user have sudo access and writing privileges to `python_lib` directory.') - finally: - tar.close() - else: - raise IOError("File 'zmq_fedora.tar.gz' couldn't be located at python_lib directory.") - os.chdir(CURRENT_PATH) - -def import_server_modules (): +def import_server_modules(): # must be in a higher priority sys.path.insert(0, PATH_TO_PYTHON_LIB) sys.path.append(ROOT_PATH) - extract_zmq_package() import_module_list(SERVER_MODULES) -def import_module_list (modules_list): + +def import_module_list(modules_list): assert(isinstance(modules_list, list)) for p in modules_list: - full_path = os.path.join(PATH_TO_PYTHON_LIB, p) - fix_path = os.path.normcase(full_path) + full_path = os.path.join(PATH_TO_PYTHON_LIB, p) + fix_path = os.path.normcase(full_path) site.addsitedir(full_path) - import_server_modules() diff --git a/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py index 28e154ee..7a278af8 100755 --- a/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py +++ b/scripts/automation/trex_control_plane/server/zmq_monitor_thread.py @@ -13,25 +13,23 @@ from common.trex_status_e import TRexStatus CCustomLogger.setup_custom_logger('TRexServer')
logger = logging.getLogger('TRexServer')
+
class ZmqMonitorSession(threading.Thread):
def __init__(self, trexObj , zmq_port):
super(ZmqMonitorSession, self).__init__()
self.stoprequest = threading.Event()
-# self.terminateFlag = False
self.first_dump = True
self.zmq_port = zmq_port
- self.zmq_publisher = "tcp://localhost:{port}".format( port = self.zmq_port )
-# self.context = zmq.Context()
-# self.socket = self.context.socket(zmq.SUB)
+ self.zmq_publisher = "tcp://localhost:{port}".format(port=self.zmq_port)
self.trexObj = trexObj
self.expect_trex = self.trexObj.expect_trex # used to signal if T-Rex is expected to run and if data should be considered
self.decoder = JSONDecoder()
logger.info("ZMQ monitor initialization finished")
- def run (self):
+ def run(self):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
- logger.info("ZMQ monitor started listening @ {pub}".format( pub = self.zmq_publisher ) )
+ logger.info("ZMQ monitor started listening @ {pub}".format(pub=self.zmq_publisher))
self.socket.connect(self.zmq_publisher)
self.socket.setsockopt(zmq.SUBSCRIBE, '')
@@ -46,10 +44,10 @@ class ZmqMonitorSession(threading.Thread): # allow this exception since it comes from ZMQ monitor termination
pass
else:
- logger.error("ZMQ monitor thrown an exception. Received exception: {ex}".format(ex = e))
+ logger.error("ZMQ monitor thrown an exception. Received exception: {ex}".format(ex=e))
raise
- def join (self, timeout = None):
+ def join(self, timeout=None):
self.stoprequest.set()
logger.debug("Handling termination of ZMQ monitor thread")
self.socket.close()
@@ -57,15 +55,15 @@ class ZmqMonitorSession(threading.Thread): logger.info("ZMQ monitor resources has been freed.")
super(ZmqMonitorSession, self).join(timeout)
- def parse_and_update_zmq_dump (self, zmq_dump):
+ def parse_and_update_zmq_dump(self, zmq_dump):
try:
dict_obj = self.decoder.decode(zmq_dump)
except ValueError:
- logger.error("ZMQ dump failed JSON-RPC decode. Ignoring. Bad dump was: {dump}".format(dump = zmq_dump))
+ logger.error("ZMQ dump failed JSON-RPC decode. Ignoring. Bad dump was: {dump}".format(dump=zmq_dump))
dict_obj = None
# add to trex_obj zmq latest dump, based on its 'name' header
- if dict_obj is not None and dict_obj!={}:
+ if dict_obj is not None and dict_obj != {}:
self.trexObj.zmq_dump[dict_obj['name']] = dict_obj
if self.first_dump:
# change TRexStatus from starting to Running once the first ZMQ dump is obtained and parsed successfully
diff --git a/scripts/external_libs/PyYAML-3.01/LICENSE b/scripts/external_libs/PyYAML-3.01/LICENSE new file mode 100644 index 00000000..050ced23 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/scripts/external_libs/PyYAML-3.01/PKG-INFO b/scripts/external_libs/PyYAML-3.01/PKG-INFO new file mode 100644 index 00000000..6ec73b1f --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/PKG-INFO @@ -0,0 +1,28 @@ +Metadata-Version: 1.0 +Name: PyYAML +Version: 3.01 +Summary: YAML parser and emitter for Python +Home-page: http://pyyaml.org/wiki/PyYAML +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Download-URL: http://pyyaml.org/download/pyyaml/PyYAML-3.01.tar.gz +Description: YAML is a data serialization format designed for human readability and + interaction with scripting languages. PyYAML is a YAML parser and + emitter for Python. + + PyYAML features a complete YAML 1.1 parser, Unicode support, pickle + support, capable extension API, and sensible error messages. PyYAML + supports standard YAML tags and provides Python-specific tags that allow + to represent an arbitrary Python object. + + PyYAML is applicable for a broad range of tasks from complex + configuration files to object serialization and persistance. +Platform: Any +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup diff --git a/scripts/external_libs/PyYAML-3.01/README b/scripts/external_libs/PyYAML-3.01/README new file mode 100644 index 00000000..8a6dec77 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/README @@ -0,0 +1,18 @@ +PyYAML 3000 - The next generation YAML parser and emitter for Python. + +To install, type 'python setup.py install'. + +For more information, check the PyYAML homepage: +'http://pyyaml.org/wiki/PyYAML'. + +Documentation (rough and incomplete though): +'http://pyyaml.org/wiki/PyYAMLDocumentation'. + +Post your questions and opinions to the YAML-Core mailing list: +'http://lists.sourceforge.net/lists/listinfo/yaml-core'. + +Submit bug reports and feature requests to the PyYAML bug tracker: +'http://pyyaml.org/newticket?component=pyyaml'. + +PyYAML 3000 is written by Kirill Simonov <xi@resolvent.net>. It is released +under the MIT license. See the file LICENSE for more details. diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/__init__.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/__init__.py new file mode 100644 index 00000000..c30973a3 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/__init__.py @@ -0,0 +1,284 @@ + +from error import * + +from tokens import * +from events import * +from nodes import * + +from loader import * +from dumper import * + +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + while loader.check_token(): + yield loader.get_token() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + while loader.check_event(): + yield loader.get_event() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + if loader.check_node(): + return loader.get_node() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponsing representation trees. + """ + loader = Loader(stream) + while loader.check_node(): + yield loader.get_node() + +def load_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + loader = Loader(stream) + while loader.check_data(): + yield loader.get_data() + +def load(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + loader = Loader(stream) + if loader.check_data(): + return loader.get_data() + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader) + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + for event in events: + dumper.emit(event) + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding='utf-8', explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=Loader, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=Loader): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(object): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __metaclass__ = YAMLObjectMetaclass + + yaml_loader = Loader + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + from_yaml = classmethod(from_yaml) + + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + to_yaml = classmethod(to_yaml) + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/composer.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/composer.py new file mode 100644 index 00000000..d256b054 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/composer.py @@ -0,0 +1,123 @@ + +__all__ = ['Composer', 'ComposerError'] + +from error import MarkedYAMLError +from events import * +from nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer: + + def __init__(self): + self.anchors = {} + + def check_node(self): + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def __iter__(self): + # Iterator protocol. + while not self.check_event(StreamEndEvent): + yield self.compose_document() + + def compose_document(self): + + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.complete_anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor.encode('utf-8'), event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurence" + % anchor.encode('utf-8'), self.anchors[anchor].start_mark, + "second occurence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, {}, + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + key_event = self.peek_event() + item_key = self.compose_node(node, None) + if item_key in node.value: + raise ComposerError("while composing a mapping", start_event.start_mark, + "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + node.value[item_key] = item_value + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/constructor.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/constructor.py new file mode 100644 index 00000000..57ad53d1 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/constructor.py @@ -0,0 +1,638 @@ + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError'] + +from error import * +from nodes import * +from composer import * + +try: + import datetime + datetime_available = True +except ImportError: + datetime_available = False + +try: + set +except NameError: + from sets import Set as set + +import binascii, re, sys + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor(Composer): + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def __iter__(self): + # Iterator protocol. + while self.check_node(): + yield self.construct_document(self.get_node()) + + def construct_document(self, node): + data = self.construct_object(node) + self.constructed_objects = {} + self.recursive_objects = {} + return data + + def construct_object(self, node): + if node in self.constructed_objects: + return self.constructed_objects[node] + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + if node.tag in self.yaml_constructors: + constructor = lambda node: self.yaml_constructors[node.tag](self, node) + else: + for tag_prefix in self.yaml_multi_constructors: + if node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = lambda node: \ + self.yaml_multi_constructors[tag_prefix](self, tag_suffix, node) + break + else: + if None in self.yaml_multi_constructors: + constructor = lambda node: \ + self.yaml_multi_constructors[None](self, node.tag, node) + elif None in self.yaml_constructors: + constructor = lambda node: \ + self.yaml_constructors[None](self, node) + elif isinstance(node, ScalarNode): + constructor = self.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.construct_mapping + else: + print node.tag + data = constructor(node) + self.constructed_objects[node] = data + del self.recursive_objects[node] + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + if isinstance(node, MappingNode): + for key_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(node.value[key_node]) + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child) for child in node.value] + + def construct_mapping(self, node): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + merge = None + for key_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:merge': + if merge is not None: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found duplicate merge key", key_node.start_mark) + value_node = node.value[key_node] + if isinstance(value_node, MappingNode): + merge = [self.construct_mapping(value_node)] + elif isinstance(value_node, SequenceNode): + merge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + merge.append(self.construct_mapping(subnode)) + merge.reverse() + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == u'tag:yaml.org,2002:value': + if '=' in mapping: + raise ConstructorError("while construction a mapping", node.start_mark, + "found duplicate value key", key_node.start_mark) + value = self.construct_object(node.value[key_node]) + mapping['='] = value + else: + key = self.construct_object(key_node) + try: + duplicate_key = key in mapping + except TypeError, exc: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unacceptable key (%s)" % exc, key_node.start_mark) + if duplicate_key: + raise ConstructorError("while constructing a mapping", node.start_mark, + "found duplicate key", key_node.start_mark) + value = self.construct_object(node.value[key_node]) + mapping[key] = value + if merge is not None: + merge.append(mapping) + mapping = {} + for submapping in merge: + mapping.update(submapping) + return mapping + + def construct_pairs(self, node): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node in node.value: + key = self.construct_object(key_node) + value = self.construct_object(node.value[key_node]) + pairs.append((key, value)) + return pairs + + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + add_constructor = classmethod(add_constructor) + + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + add_multi_constructor = classmethod(add_multi_constructor) + +class SafeConstructor(BaseConstructor): + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + u'yes': True, + u'no': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300000 + nan_value = inf_value/inf_value + + def construct_yaml_float(self, node): + value = str(self.construct_scalar(node)) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value.lower() == '.inf': + return sign*self.inf_value + elif value.lower() == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return float(value) + + def construct_yaml_binary(self, node): + value = self.construct_scalar(node) + try: + return str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError), exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + ur'''^(?P<year>[0-9][0-9][0-9][0-9]) + -(?P<month>[0-9][0-9]?) + -(?P<day>[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P<hour>[0-9][0-9]?) + :(?P<minute>[0-9][0-9]) + :(?P<second>[0-9][0-9]) + (?:\.(?P<fraction>[0-9]*))? + (?:[ \t]*(?:Z|(?P<tz_hour>[-+][0-9][0-9]?) + (?::(?P<tz_minute>[0-9][0-9])?)?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + for key in values: + if values[key]: + values[key] = int(values[key]) + else: + values[key] = 0 + fraction = values['fraction'] + if fraction: + while 10*fraction < 1000000: + fraction *= 10 + values['fraction'] = fraction + stamp = datetime.datetime(values['year'], values['month'], values['day'], + values['hour'], values['minute'], values['second'], values['fraction']) + diff = datetime.timedelta(hours=values['tz_hour'], minutes=values['tz_minute']) + return stamp-diff + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + omap = [] + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node = subnode.value.keys()[0] + key = self.construct_object(key_node) + value = self.construct_object(subnode.value[key_node]) + omap.append((key, value)) + return omap + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + pairs = [] + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node = subnode.value.keys()[0] + key = self.construct_object(key_node) + value = self.construct_object(subnode.value[key_node]) + pairs.append((key, value)) + return pairs + + def construct_yaml_set(self, node): + value = self.construct_mapping(node) + return set(value) + + def construct_yaml_str(self, node): + value = self.construct_scalar(node) + try: + return str(value) + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + return self.construct_sequence(node) + + def construct_yaml_map(self, node): + return self.construct_mapping(node) + + def construct_yaml_object(self, node, cls): + state = self.construct_mapping(node) + data = cls.__new__(cls) + if hasattr(data, '__setstate__'): + data.__setstate__(state) + else: + data.__dict__.update(state) + return data + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'), + node.start_mark) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +if datetime_available: + SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class Constructor(SafeConstructor): + + def construct_python_str(self, node): + return self.construct_scalar(node).encode('utf-8') + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_long(self, node): + return long(self.construct_yaml_int(node)) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_yaml_seq(node)) + + def find_python_module(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + try: + __import__(name) + except ImportError, exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark) + return sys.modules[name] + + def find_python_name(self, name, mark): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if u'.' in name: + # Python 2.4 only + #module_name, object_name = name.rsplit('.', 1) + items = name.split('.') + object_name = items.pop() + module_name = '.'.join(items) + else: + module_name = '__builtin__' + object_name = name + try: + __import__(module_name) + except ImportError, exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" % (object_name.encode('utf-8'), + module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value.encode('utf-8'), + node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + class classobj: pass + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if newobj and isinstance(cls, type(self.classobj)) \ + and not args and not kwds: + instance = self.classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(object, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + state = self.construct_mapping(node) + self.set_python_instance_state(instance, state) + return instance + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/none', + Constructor.construct_yaml_null) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/bool', + Constructor.construct_yaml_bool) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/str', + Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', + Constructor.construct_python_unicode) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/int', + Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', + Constructor.construct_python_long) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', + Constructor.construct_yaml_float) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', + Constructor.construct_python_complex) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/list', + Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', + Constructor.construct_python_tuple) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/dict', + Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', + Constructor.construct_python_name) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', + Constructor.construct_python_module) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', + Constructor.construct_python_object) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', + Constructor.construct_python_object_apply) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', + Constructor.construct_python_object_new) + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/dumper.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/dumper.py new file mode 100644 index 00000000..355c1e2f --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from emitter import * +from serializer import * +from representer import * +from resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_uncode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=None, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style) + Resolver.__init__(self) + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/emitter.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/emitter.py new file mode 100644 index 00000000..a34c4526 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/emitter.py @@ -0,0 +1,1162 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from error import YAMLError +from events import * + +import re + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis: + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter: + + DEFAULT_TAG_PREFIXES = { + u'!' : u'!', + u'tag:yaml.org,2002:' : u'!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = u'\n' + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding: + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = self.event.tags.keys() + handles.sort() + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor(u'&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator(u'[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator(u'{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u'}', False) + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(u':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator(u'-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(u':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(u':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == u'') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if not self.flow_level and self.analysis.allow_block: + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" + % (handle.encode('utf-8'))) + for ch in handle[1:-1]: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch.encode('utf-8'), handle.encode('utf-8'))) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == u'!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + or ch in u'-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return u''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == u'!': + return tag + handle = None + suffix = tag + for prefix in self.tag_prefixes: + if tag.startswith(prefix) \ + and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + or ch in u'-;/?:@&=+$,_.~*\'()[]' \ + or (ch == u'!' and handle != u'!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = u''.join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + or ch in u'-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch.encode('utf-8'), anchor.encode('utf-8'))) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Whitespaces. + inline_spaces = False # non-space space+ non-space + inline_breaks = False # non-space break+ non-space + leading_spaces = False # ^ space+ (non-space | $) + leading_breaks = False # ^ break+ (non-space | $) + trailing_spaces = False # (^ | non-space) space+ $ + trailing_breaks = False # (^ | non-space) break+ $ + inline_breaks_spaces = False # non-space break+ space+ non-space + mixed_breaks_spaces = False # anything else + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_space = True + + # Last character or followed by a whitespace. + followed_by_space = (len(scalar) == 1 or + scalar[1] in u'\0 \t\r\n\x85\u2028\u2029') + + # The current series of whitespaces contain plain spaces. + spaces = False + + # The current series of whitespaces contain line breaks. + breaks = False + + # The current series of whitespaces contain a space followed by a + # break. + mixed = False + + # The current series of whitespaces start at the beginning of the + # scalar. + leading = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}#&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': + flow_indicators = True + if followed_by_space: + block_indicators = True + if ch == u'-' and followed_by_space: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',?[]{}': + flow_indicators = True + if ch == u':': + flow_indicators = True + if followed_by_space: + block_indicators = True + if ch == u'#' and preceeded_by_space: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Spaces, line breaks, and how they are mixed. State machine. + + # Start or continue series of whitespaces. + if ch in u' \n\x85\u2028\u2029': + if spaces and breaks: + if ch != u' ': # break+ (space+ break+) => mixed + mixed = True + elif spaces: + if ch != u' ': # (space+ break+) => mixed + breaks = True + mixed = True + elif breaks: + if ch == u' ': # break+ space+ + spaces = True + else: + leading = (index == 0) + if ch == u' ': # space+ + spaces = True + else: # break+ + breaks = True + + # Series of whitespaces ended with a non-space. + elif spaces or breaks: + if leading: + if spaces and breaks: + mixed_breaks_spaces = True + elif spaces: + leading_spaces = True + elif breaks: + leading_breaks = True + else: + if mixed: + mixed_breaks_spaces = True + elif spaces and breaks: + inline_breaks_spaces = True + elif spaces: + inline_spaces = True + elif breaks: + inline_breaks = True + spaces = breaks = mixed = leading = False + + # Series of whitespaces reach the end. + if (spaces or breaks) and (index == len(scalar)-1): + if spaces and breaks: + mixed_breaks_spaces = True + elif spaces: + trailing_spaces = True + if leading: + leading_spaces = True + elif breaks: + trailing_breaks = True + if leading: + leading_breaks = True + spaces = breaks = mixed = leading = False + + # Prepare for the next character. + index += 1 + preceeded_by_space = (ch in u'\0 \t\r\n\x85\u2028\u2029') + followed_by_space = (index+1 >= len(scalar) or + scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespace are bad for plain scalars. We also + # do not want to mess with leading whitespaces for block scalars. + if leading_spaces or leading_breaks or trailing_spaces: + allow_flow_plain = allow_block_plain = allow_block = False + + # Trailing breaks are fine for block scalars, but unacceptable for + # plain scalars. + if trailing_breaks: + allow_flow_plain = allow_block_plain = False + + # The combination of (space+ break+) is only acceptable for block + # scalars. + if inline_breaks_spaces: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Mixed spaces and breaks, as well as special character are only + # allowed for double quoted scalars. + if mixed_breaks_spaces or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # We don't emit multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\xFF\xFE'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator(u'\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u'\'': + data = u'\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + self.write_indicator(u'\'', False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'\"': u'\"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \ + or not (u'\x20' <= ch <= u'\x7E' + or (self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+u'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_chomp(self, text): + tail = text[-2:] + while len(tail) < 2: + tail = u' '+tail + if tail[-1] in u'\n\x85\u2028\u2029': + if tail[-2] in u'\n\x85\u2028\u2029': + return u'+' + else: + return u'' + else: + return u'-' + + def write_folded(self, text): + chomp = self.determine_chomp(text) + self.write_indicator(u'>'+chomp, True) + self.write_indent() + leading_space = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != u' ' \ + and text[start] == u'\n': + self.write_line_break() + leading_space = (ch == u' ') + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + spaces = (ch == u' ') + end += 1 + + def write_literal(self, text): + chomp = self.determine_chomp(text) + self.write_indicator(u'|'+chomp, True) + self.write_indent() + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.writespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.writespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == u' ') + breaks = (ch in u'\n\x85\u2028\u2029') + end += 1 + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/error.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/error.py new file mode 100644 index 00000000..8fa916b2 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark: + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end].encode('utf-8') + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/events.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/events.py new file mode 100644 index 00000000..3f244fa0 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event: + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/loader.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/loader.py new file mode 100644 index 00000000..293ff467 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/loader.py @@ -0,0 +1,40 @@ + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader'] + +from reader import * +from scanner import * +from parser import * +from composer import * +from constructor import * +from resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/nodes.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/nodes.py new file mode 100644 index 00000000..cb8c1cba --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node: + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '<empty>' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py new file mode 100644 index 00000000..2aec0fe3 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py @@ -0,0 +1,484 @@ + +# YAML can be parsed by an LL(1) parser! +# +# We use the following production rules: +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END? +# implicit_document ::= block_node DOCUMENT-END? +# block_node ::= ALIAS | properties? block_content +# flow_node ::= ALIAS | properties? flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# block_mapping ::= BLOCK-MAPPING_START ((KEY block_node_or_indentless_sequence?)? (VALUE block_node_or_indentless_sequence?)?)* BLOCK-END +# block_node_or_indentless_sequence ::= ALIAS | properties? (block_content | indentless_block_sequence) +# indentless_block_sequence ::= (BLOCK-ENTRY block_node?)+ +# flow_collection ::= flow_sequence | flow_mapping +# flow_sequence ::= FLOW-SEQUENCE-START (flow_sequence_entry FLOW-ENTRY)* flow_sequence_entry? FLOW-SEQUENCE-END +# flow_mapping ::= FLOW-MAPPING-START (flow_mapping_entry FLOW-ENTRY)* flow_mapping_entry? FLOW-MAPPING-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +# TODO: support for BOM within a stream. +# stream ::= (BOM? implicit_document)? (BOM? explicit_document)* STREAM-END + +# FIRST sets: +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from error import MarkedYAMLError +from tokens import * +from events import * +from scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser: + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + # Note that we use Python generators. If you rewrite the parser in another + # language, you may replace all 'yield'-s with event handler calls. + + DEFAULT_TAGS = { + u'!': u'!', + u'!!': u'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.event_generator = self.parse_stream() + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + try: + self.current_event = self.event_generator.next() + except StopIteration: + pass + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + try: + self.current_event = self.event_generator.next() + except StopIteration: + pass + return self.current_event + + def get_event(self): + # Get the next event. + if self.current_event is None: + try: + self.current_event = self.event_generator.next() + except StopIteration: + pass + value = self.current_event + self.current_event = None + return value + + def __iter__(self): + # Iterator protocol. + return self.event_generator + + def parse_stream(self): + # STREAM-START implicit_document? explicit_document* STREAM-END + + # Parse start of stream. + token = self.get_token() + yield StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Parse implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + yield DocumentStartEvent(start_mark, end_mark, + explicit=False) + for event in self.parse_block_node(): + yield event + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + while self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + yield DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Parse explicit documents. + while not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '<document start>', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + yield DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + yield self.process_empty_scalar(token.end_mark) + else: + for event in self.parse_block_node(): + yield event + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + while self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit=True + yield DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Parse end of stream. + token = self.get_token() + yield StreamEndEvent(token.start_mark, token.end_mark) + + def process_directives(self): + # DIRECTIVE* + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == u'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle.encode('utf-8'), + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + # block_node ::= ALIAS | properties? block_content + # flow_node ::= ALIAS | properties? flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # block_node_or_indentless_sequence ::= ALIAS | properties? + # (block_content | indentless_block_sequence) + if self.check_token(AliasToken): + token = self.get_token() + yield AliasEvent(token.value, token.start_mark, token.end_mark) + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None and tag != u'!': + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle.encode('utf-8'), + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + collection_events = None + implicit = (tag is None or tag == u'!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + collection_events = self.parse_indentless_sequence() + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + collection_events = self.parse_flow_sequence() + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + collection_events = self.parse_flow_mapping() + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + collection_events = self.parse_block_sequence() + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + collection_events = self.parse_block_mapping() + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), u'', + start_mark, end_mark) + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while scanning a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + yield event + if collection_events is not None: + for event in collection_events: + yield event + + def parse_block_sequence(self): + # BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + token = self.get_token() + start_mark = token.start_mark + while self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + for event in self.parse_block_node(): + yield event + else: + yield self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while scanning a block collection", start_mark, + "expected <block end>, but found %r" % token.id, token.start_mark) + token = self.get_token() + yield SequenceEndEvent(token.start_mark, token.end_mark) + + def parse_indentless_sequence(self): + # (BLOCK-ENTRY block_node?)+ + while self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + for event in self.parse_block_node(): + yield event + else: + yield self.process_empty_scalar(token.end_mark) + token = self.peek_token() + yield SequenceEndEvent(token.start_mark, token.start_mark) + + def parse_block_mapping(self): + # BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + token = self.get_token() + start_mark = token.start_mark + while self.check_token(KeyToken, ValueToken): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + for event in self.parse_block_node_or_indentless_sequence(): + yield event + else: + yield self.process_empty_scalar(token.end_mark) + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + for event in self.parse_block_node_or_indentless_sequence(): + yield event + else: + yield self.process_empty_scalar(token.end_mark) + else: + token = self.peek_token() + yield self.process_empty_scalar(token.start_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while scanning a block mapping", start_mark, + "expected <block end>, but found %r" % token.id, token.start_mark) + token = self.get_token() + yield MappingEndEvent(token.start_mark, token.end_mark) + + def parse_flow_sequence(self): + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + token = self.get_token() + start_mark = token.start_mark + while not self.check_token(FlowSequenceEndToken): + if self.check_token(KeyToken): + token = self.get_token() + yield MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + for event in self.parse_flow_node(): + yield event + else: + yield self.process_empty_scalar(token.end_mark) + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + for event in self.parse_flow_node(): + yield event + else: + yield self.process_empty_scalar(token.end_mark) + else: + token = self.peek_token() + yield self.process_empty_scalar(token.start_mark) + token = self.peek_token() + yield MappingEndEvent(token.start_mark, token.start_mark) + else: + for event in self.parse_flow_node(): + yield event + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + token = self.peek_token() + raise ParserError("while scanning a flow sequence", start_mark, + "expected ',' or ']', but got %r" % token.id, token.start_mark) + if self.check_token(FlowEntryToken): + self.get_token() + token = self.get_token() + yield SequenceEndEvent(token.start_mark, token.end_mark) + + def parse_flow_mapping(self): + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + token = self.get_token() + start_mark = token.start_mark + while not self.check_token(FlowMappingEndToken): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + for event in self.parse_flow_node(): + yield event + else: + yield self.process_empty_scalar(token.end_mark) + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + for event in self.parse_flow_node(): + yield event + else: + yield self.process_empty_scalar(token.end_mark) + else: + token = self.peek_token() + yield self.process_empty_scalar(token.start_mark) + else: + for event in self.parse_flow_node(): + yield event + yield self.process_empty_scalar(self.peek_token().start_mark) + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + token = self.peek_token() + raise ParserError("while scanning a flow mapping", start_mark, + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(FlowEntryToken): + self.get_token() + if not self.check_token(FlowMappingEndToken): + token = self.peek_token() + raise ParserError("while scanning a flow mapping", start_mark, + "expected '}', but found %r" % token.id, token.start_mark) + token = self.get_token() + yield MappingEndEvent(token.start_mark, token.end_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), u'', mark, mark) + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/reader.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/reader.py new file mode 100644 index 00000000..beb76d0a --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/reader.py @@ -0,0 +1,222 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from error import YAMLError, Mark + +import codecs, re + +# Unfortunately, codec functions in Python 2.3 does not support the `finish` +# arguments, so we have to write our own wrappers. + +try: + codecs.utf_8_decode('', 'strict', False) + from codecs import utf_8_decode, utf_16_le_decode, utf_16_be_decode + +except TypeError: + + def utf_16_le_decode(data, errors, finish=False): + if not finish and len(data) % 2 == 1: + data = data[:-1] + return codecs.utf_16_le_decode(data, errors) + + def utf_16_be_decode(data, errors, finish=False): + if not finish and len(data) % 2 == 1: + data = data[:-1] + return codecs.utf_16_be_decode(data, errors) + + def utf_8_decode(data, errors, finish=False): + if not finish: + # We are trying to remove a possible incomplete multibyte character + # from the suffix of the data. + # The first byte of a multi-byte sequence is in the range 0xc0 to 0xfd. + # All further bytes are in the range 0x80 to 0xbf. + # UTF-8 encoded UCS characters may be up to six bytes long. + count = 0 + while count < 5 and count < len(data) \ + and '\x80' <= data[-count-1] <= '\xBF': + count -= 1 + if count < 5 and count < len(data) \ + and '\xC0' <= data[-count-1] <= '\xFD': + data = data[:-count-1] + return codecs.utf_8_decode(data, errors) + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, str): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (ord(self.character), self.reason, + self.name, self.position) + +class Reader: + # Reader: + # - determines the data encoding and converts it to unicode, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object, + # - a `unicode` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = u'' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, unicode): + self.name = "<unicode string>" + self.check_printable(stream) + self.buffer = stream+u'\0' + elif isinstance(stream, str): + self.name = "<string>" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "<file>") + self.eof = False + self.raw_buffer = '' + self.determine_encoding() + + def peek(self, index=0): + if self.pointer+index+1 >= len(self.buffer): + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + for k in range(length): + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' \ + or (ch == u'\r' and self.buffer[self.pointer+1] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and len(self.raw_buffer) < 2: + self.update_raw() + if not isinstance(self.raw_buffer, unicode): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, character, + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError, exc: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += u'\0' + self.raw_buffer = None + break + + def update_raw(self, size=1024): + data = self.stream.read(size) + if data: + self.raw_buffer += data + self.stream_pointer += len(data) + else: + self.eof = True + +#try: +# import psyco +# psyco.bind(Reader) +#except ImportError: +# pass + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/representer.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/representer.py new file mode 100644 index 00000000..cb37169d --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/representer.py @@ -0,0 +1,501 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from error import * +from nodes import * + +try: + import datetime + datetime_available = True +except ImportError: + datetime_available = False + +try: + set +except NameError: + from sets import Set as set + +import sys, copy_reg + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter: + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=None): + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + + class C: pass + c = C() + def f(): pass + classobj_type = type(C) + instance_type = type(c) + function_type = type(f) + builtin_function_type = type(abs) + module_type = type(sys) + del C, c, f + + def get_classobj_bases(self, cls): + bases = [cls] + for base in cls.__bases__: + bases.extend(self.get_classobj_bases(base)) + return bases + + def represent_data(self, data): + if self.ignore_aliases(data): + alias_key = None + else: + alias_key = id(data) + if alias_key is not None: + if alias_key in self.represented_objects: + node = self.represented_objects[alias_key] + if node is None: + raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + self.represented_objects[alias_key] = None + data_types = type(data).__mro__ + if type(data) is self.instance_type: + data_types = self.get_classobj_bases(data.__class__)+list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, unicode(data)) + if alias_key is not None: + self.represented_objects[alias_key] = node + return node + + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + add_representer = classmethod(add_representer) + + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + add_multi_representer = classmethod(add_multi_representer) + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + return ScalarNode(tag, value, style=style) + + def represent_sequence(self, tag, sequence, flow_style=None): + best_style = True + value = [] + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(self.represent_data(item)) + if flow_style is None: + flow_style = self.default_flow_style + if flow_style is None: + flow_style = best_style + return SequenceNode(tag, value, flow_style=flow_style) + + def represent_mapping(self, tag, mapping, flow_style=None): + best_style = True + if hasattr(mapping, 'keys'): + value = {} + for item_key in mapping.keys(): + item_value = mapping[item_key] + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value[node_key] = node_value + else: + value = [] + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + flow_style = self.default_flow_style + if flow_style is None: + flow_style = best_style + return MappingNode(tag, value, flow_style=flow_style) + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data in [None, ()]: + return True + if isinstance(data, (str, unicode, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:null', + u'null') + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data): + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + def represent_long(self, data): + return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data)) + + repr_pos_inf = repr(1e300000) + repr_neg_inf = repr(-1e300000) + repr_nan = repr(1e300000/1e300000) + + def represent_float(self, data): + repr_data = repr(data) + if repr_data == self.repr_pos_inf: + value = u'.inf' + elif repr_data == self.repr_neg_inf: + value = u'-.inf' + elif repr_data == self.repr_nan: + value = u'.nan' + else: + value = unicode(repr_data) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + pairs = (len(data) > 0 and isinstance(data, list)) + if pairs: + for item in data: + if not isinstance(item, tuple) or len(item) != 2: + pairs = False + break + if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + value = [] + for item_key, item_value in data: + value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + [(item_key, item_value)])) + return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = u'%04d-%02d-%02d' % (data.year, data.month, data.day) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = u'%04d-%02d-%02d %02d:%02d:%02d' \ + % (data.year, data.month, data.day, + data.hour, data.minute, data.second) + if data.microsecond: + value += u'.' + unicode(data.microsecond/1000000.0).split(u'.')[1] + if data.utcoffset(): + value += unicode(data.utcoffset()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + if isinstance(state, dict): + state = state.items() + state.sort() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object: %s" % data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(unicode, + SafeRepresenter.represent_unicode) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(long, + SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +if datetime_available: + SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_str(self, data): + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, unicode(data)) + + def represent_complex(self, data): + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'') + + def represent_module(self, data): + return self.represent_scalar( + u'tag:yaml.org,2002:python/module:'+data.__name__, u'') + + def represent_instance(self, data): + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed by + # calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never be + # called and the class instance is created by instantiating a trivial + # class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, we + # produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + state = state.items() + state.sort() + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+class_name, state) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:'+class_name, args) + value = {} + if args: + value['args'] = args + value['state'] = state + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:'+class_name, value) + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copy_reg.dispatch_table: + reduce = copy_reg.dispatch_table[cls] + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent object: %r" % data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + state = state.items() + state.sort() + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + +Representer.add_representer(str, + Representer.represent_str) + +Representer.add_representer(unicode, + Representer.represent_unicode) + +Representer.add_representer(long, + Representer.represent_long) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(Representer.classobj_type, + Representer.represent_name) + +Representer.add_representer(Representer.function_type, + Representer.represent_name) + +Representer.add_representer(Representer.builtin_function_type, + Representer.represent_name) + +Representer.add_representer(Representer.module_type, + Representer.represent_module) + +Representer.add_multi_representer(Representer.instance_type, + Representer.represent_instance) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/resolver.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/resolver.py new file mode 100644 index 00000000..7e580e98 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/resolver.py @@ -0,0 +1,205 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from error import * +from nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver: + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + add_implicit_resolver = classmethod(add_implicit_resolver) + + def add_path_resolver(cls, tag, path, kind=None): + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is map: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, basestring) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (basestring, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is map: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + add_path_resolver = classmethod(add_path_resolver) + + def descend_resolver(self, current_node, current_index): + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, basestring): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if index_check in [False, None] and current_index is None: + return + if isinstance(index_check, basestring): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == u'': + resolvers = self.yaml_implicit_resolvers.get(u'', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:bool', + re.compile(ur'''^(?:yes|Yes|YES|n|N|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:float', + re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)?\.[0-9_]*(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:int', + re.compile(ur'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list(u'-+0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:merge', + re.compile(ur'^(?:<<)$'), + ['<']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:null', + re.compile(ur'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:timestamp', + re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')) + +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:value', + re.compile(ur'^(?:=)$'), + ['=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + u'tag:yaml.org,2002:yaml', + re.compile(ur'^(?:!|&|\*)$'), + list(u'!&*')) + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/scanner.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/scanner.py new file mode 100644 index 00000000..cf2478f9 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/scanner.py @@ -0,0 +1,1458 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from error import MarkedYAMLError +from tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey: + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner: + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + def __iter__(self): + # Iterator protocol. + while self.need_more_tokens(): + self.fetch_more_tokens() + while self.tokens: + self.tokens_taken += 1 + yield self.tokens.pop(0) + while self.need_more_tokens(): + self.fetch_more_tokens() + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == u'\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == u'%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == u'-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == u'.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == u'[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == u'{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == u']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == u'}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch in u',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch in u'-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == u'?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == u':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == u'*': + return self.fetch_alias() + + # Is it an anchor? + if ch == u'&': + return self.fetch_anchor() + + # Is it a tag? + if ch == u'!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == u'|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == u'>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == u'\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == u'\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" + % ch.encode('utf-8'), self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in self.possible_simple_keys.keys(): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not found expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # A simple key is required only if it is the first token in the current + # line. Therefore it is always allowed. + assert self.allow_simple_key or not required + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + # I don't think it's possible, but I could be wrong. + assert not key.required + #if key.required: + # raise ScannerError("while scanning a simple key", key.mark, + # "could not found expected ':'", self.get_mark()) + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset everything (not really needed). + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be catched by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'---' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == u'...' \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029' + and (ch == u'-' or (not self.flow_level and ch in u'?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if <TAB>: + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == u'\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == u'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == u'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" + % self.peek().encode('utf-8'), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not (u'0' <= ch <= '9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 0 + while u'0' <= self.peek(length) <= u'9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == u' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != u' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == '*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch.encode('utf-8'), self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == u'<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != u'>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek().encode('utf-8'), + self.get_mark()) + self.forward() + elif ch in u'\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = u'!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in u'\0 \r\n\x85\u2028\u2029': + if ch == u'!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = u'!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = u'!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch.encode('utf-8'), + self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = u'' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != u'\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in u' \t' + length = 0 + while self.peek(length) not in u'\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != u'\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == u'\n' \ + and leading_non_space and self.peek() not in u' \t': + if not breaks: + chunks.append(u' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == u'\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in u'0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in u'+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in u'\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch.encode('utf-8'), self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == u' ': + self.forward() + if self.peek() == u'#': + while self.peek() not in u'\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in u'\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" + % ch.encode('utf-8'), self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() != u' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + while self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == u' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(u''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + u'0': u'\0', + u'a': u'\x07', + u'b': u'\x08', + u't': u'\x09', + u'\t': u'\x09', + u'n': u'\x0A', + u'v': u'\x0B', + u'f': u'\x0C', + u'r': u'\x0D', + u'e': u'\x1B', + u' ': u'\x20', + u'\"': u'\"', + u'\\': u'\\', + u'N': u'\x85', + u'_': u'\xA0', + u'L': u'\u2028', + u'P': u'\u2029', + } + + ESCAPE_CODES = { + u'x': 2, + u'u': 4, + u'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == u'\'' and self.peek(1) == u'\'': + chunks.append(u'\'') + self.forward(2) + elif (double and ch == u'\'') or (not double and ch in u'\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == u'\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k).encode('utf-8')), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(unichr(code)) + self.forward(length) + elif ch in u'\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in u' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == u'\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in u' \t': + self.forward() + if self.peek() in u'\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ':' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == u'#': + break + while True: + ch = self.peek(length) + if ch in u'\0 \t\r\n\x85\u2028\u2029' \ + or (not self.flow_level and ch == u':' and + self.peek(length+1) in u'\0 \t\r\n\x28\u2028\u2029') \ + or (self.flow_level and ch in u',:?[]{}'): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if (self.flow_level and ch == u':' + and self.peek(length+1) not in u'\0 \t\r\n\x28\u2028\u2029,[]{}'): + self.forward(length) + raise ScannerError("while scanning a plain scalar", start_mark, + "found unexpected ':'", self.get_mark(), + "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.") + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == u'#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(u''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in u' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in u'\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in u' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == u'---' or prefix == u'...') \ + and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029': + return + if line_break != u'\n': + chunks.append(line_break) + elif not breaks: + chunks.append(u' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != u'!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != u' ': + while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + or ch in u'-_': + length += 1 + ch = self.peek(length) + if ch != u'!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch.encode('utf-8'), + self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \ + or ch in u'-;/?:@&=+$,_.!~*\'()[]%': + if ch == u'%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch.encode('utf-8'), + self.get_mark()) + return u''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + bytes = [] + mark = self.get_mark() + while self.peek() == u'%': + self.forward() + for k in range(2): + if self.peek(k) not in u'0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" % + (self.peek(k).encode('utf-8')), self.get_mark()) + bytes.append(chr(int(self.prefix(2), 16))) + self.forward(2) + try: + value = unicode(''.join(bytes), 'utf-8') + except UnicodeDecodeError, exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in u'\r\n\x85': + if self.prefix(2) == u'\r\n': + self.forward(2) + else: + self.forward() + return u'\n' + elif ch in u'\u2028\u2029': + self.forward() + return ch + return u'' + +#try: +# import psyco +# psyco.bind(Scanner) +#except ImportError: +# pass + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/serializer.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/serializer.py new file mode 100644 index 00000000..937be9a9 --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/serializer.py @@ -0,0 +1,121 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from error import YAMLError +from events import * +from nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer: + + ANCHOR_TEMPLATE = u'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_alias_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + if hasattr(node.value, 'keys'): + for key in node.value.keys(): + self.anchor_node(key) + self.anchor_node(node.value[key]) + else: + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + if hasattr(node.value, 'keys'): + for key in node.value.keys(): + self.serialize_node(key, node, None) + self.serialize_node(node.value[key], node, key) + else: + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/tokens.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/tokens.py new file mode 100644 index 00000000..4fe4522e --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/lib/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token: + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '<byte order mark>' + +class DirectiveToken(Token): + id = '<directive>' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '<document start>' + +class DocumentEndToken(Token): + id = '<document end>' + +class StreamStartToken(Token): + id = '<stream start>' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '<stream end>' + +class BlockSequenceStartToken(Token): + id = '<block sequence start>' + +class BlockMappingStartToken(Token): + id = '<block mapping start>' + +class BlockEndToken(Token): + id = '<block end>' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '<alias>' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '<anchor>' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '<tag>' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '<scalar>' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/scripts/external_libs/PyYAML-3.01/setup.py b/scripts/external_libs/PyYAML-3.01/setup.py new file mode 100644 index 00000000..23c1efac --- /dev/null +++ b/scripts/external_libs/PyYAML-3.01/setup.py @@ -0,0 +1,52 @@ + +NAME = 'PyYAML' +VERSION = '3.01' +DESCRIPTION = "YAML parser and emitter for Python" +LONG_DESCRIPTION = """\ +YAML is a data serialization format designed for human readability and +interaction with scripting languages. PyYAML is a YAML parser and +emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that allow +to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistance.""" +AUTHOR = "Kirill Simonov" +AUTHOR_EMAIL = 'xi@resolvent.net' +LICENSE = "MIT" +PLATFORMS = "Any" +URL = "http://pyyaml.org/wiki/PyYAML" +DOWNLOAD_URL = "http://pyyaml.org/download/pyyaml/%s-%s.tar.gz" % (NAME, VERSION) +CLASSIFIERS = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Text Processing :: Markup", +] + + +from distutils.core import setup + +setup( + name=NAME, + version=VERSION, + description=DESCRIPTION, + long_description=LONG_DESCRIPTION, + author=AUTHOR, + author_email=AUTHOR_EMAIL, + license=LICENSE, + platforms=PLATFORMS, + url=URL, + download_url=DOWNLOAD_URL, + classifiers=CLASSIFIERS, + + package_dir={'': 'lib'}, + packages=['yaml'], +) + diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt b/scripts/external_libs/__init__.py index 8b137891..8b137891 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt +++ b/scripts/external_libs/__init__.py diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/PKG-INFO b/scripts/external_libs/enum34-1.0.4/PKG-INFO index 428ce0e3..428ce0e3 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/PKG-INFO +++ b/scripts/external_libs/enum34-1.0.4/PKG-INFO diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/LICENSE b/scripts/external_libs/enum34-1.0.4/enum/LICENSE index 9003b885..9003b885 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/LICENSE +++ b/scripts/external_libs/enum34-1.0.4/enum/LICENSE diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/README b/scripts/external_libs/enum34-1.0.4/enum/README index 511af984..511af984 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/README +++ b/scripts/external_libs/enum34-1.0.4/enum/README diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/__init__.py b/scripts/external_libs/enum34-1.0.4/enum/__init__.py index 6a327a8a..6a327a8a 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/__init__.py +++ b/scripts/external_libs/enum34-1.0.4/enum/__init__.py diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/doc/enum.rst b/scripts/external_libs/enum34-1.0.4/enum/doc/enum.rst index 0d429bfc..0d429bfc 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/doc/enum.rst +++ b/scripts/external_libs/enum34-1.0.4/enum/doc/enum.rst diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/enum.py b/scripts/external_libs/enum34-1.0.4/enum/enum.py index 6a327a8a..6a327a8a 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/enum.py +++ b/scripts/external_libs/enum34-1.0.4/enum/enum.py diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/test_enum.py b/scripts/external_libs/enum34-1.0.4/enum/test_enum.py index d7a97942..d7a97942 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/enum/test_enum.py +++ b/scripts/external_libs/enum34-1.0.4/enum/test_enum.py diff --git a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/setup.py b/scripts/external_libs/enum34-1.0.4/setup.py index ecb4944f..4cb9c691 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/enum34-1.0.4/setup.py +++ b/scripts/external_libs/enum34-1.0.4/setup.py @@ -1,44 +1,44 @@ -import os
-import sys
-from distutils.core import setup
-
-if sys.version_info[:2] < (2, 7):
- required = ['ordereddict']
-else:
- required = []
-
-long_desc = open('enum/doc/enum.rst').read()
-
-setup( name='enum34',
- version='1.0.4',
- url='https://pypi.python.org/pypi/enum34',
- packages=['enum'],
- package_data={
- 'enum' : [
- 'LICENSE',
- 'README',
- 'doc/enum.rst',
- 'doc/enum.pdf',
- 'test_enum.py',
- ]
- },
- license='BSD License',
- description='Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4',
- long_description=long_desc,
- provides=['enum'],
- install_requires=required,
- author='Ethan Furman',
- author_email='ethan@stoneleaf.us',
- classifiers=[
- 'Development Status :: 5 - Production/Stable',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved :: BSD License',
- 'Programming Language :: Python',
- 'Topic :: Software Development',
- 'Programming Language :: Python :: 2.4',
- 'Programming Language :: Python :: 2.5',
- 'Programming Language :: Python :: 2.6',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3',
- ],
- )
+import os +import sys +from distutils.core import setup + +if sys.version_info[:2] < (2, 7): + required = ['ordereddict'] +else: + required = [] + +long_desc = open('enum/doc/enum.rst').read() + +setup( name='enum34', + version='1.0.4', + url='https://pypi.python.org/pypi/enum34', + packages=['enum'], + package_data={ + 'enum' : [ + 'LICENSE', + 'README', + 'doc/enum.rst', + 'doc/enum.pdf', + 'test_enum.py', + ] + }, + license='BSD License', + description='Python 3.4 Enum backported to 3.3, 3.2, 3.1, 2.7, 2.6, 2.5, and 2.4', + long_description=long_desc, + provides=['enum'], + install_requires=required, + author='Ethan Furman', + author_email='ethan@stoneleaf.us', + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: BSD License', + 'Programming Language :: Python', + 'Topic :: Software Development', + 'Programming Language :: Python :: 2.4', + 'Programming Language :: Python :: 2.5', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + ], + ) diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/LICENSE.txt b/scripts/external_libs/jsonrpclib-pelix-0.2.5/LICENSE.txt index 51fca54c..51fca54c 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-0.1.3/LICENSE.txt +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/LICENSE.txt diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/MANIFEST.in b/scripts/external_libs/jsonrpclib-pelix-0.2.5/MANIFEST.in index 42f4acf5..eb0014ad 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/MANIFEST.in +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/MANIFEST.in @@ -1,2 +1,2 @@ -include *.txt
-include README.rst
+include *.txt +include README.rst diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/PKG-INFO b/scripts/external_libs/jsonrpclib-pelix-0.2.5/PKG-INFO index 9d0f3fca..5dce6b1c 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/PKG-INFO +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/PKG-INFO @@ -1,460 +1,460 @@ -Metadata-Version: 1.1
-Name: jsonrpclib-pelix
-Version: 0.2.5
-Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library, for Python 2.6+ and Python 3.This version is a fork of jsonrpclib by Josh Marshall, usable with Pelix remote services.
-Home-page: http://github.com/tcalmant/jsonrpclib/
-Author: Thomas Calmant
-Author-email: thomas.calmant+github@gmail.com
-License: Apache License 2.0
-Description: JSONRPClib (patched for Pelix)
- ##############################
-
- .. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg
- :target: https://pypi.python.org/pypi/jsonrpclib-pelix/
-
- .. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master
- :target: https://travis-ci.org/tcalmant/jsonrpclib
-
- .. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master
- :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master
-
-
- This library is an implementation of the JSON-RPC specification.
- It supports both the original 1.0 specification, as well as the
- new (proposed) 2.0 specification, which includes batch submission, keyword
- arguments, etc.
-
- It is licensed under the Apache License, Version 2.0
- (http://www.apache.org/licenses/LICENSE-2.0.html).
-
-
- About this version
- ******************
-
- This is a patched version of the original ``jsonrpclib`` project by
- Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib.
-
- The suffix *-pelix* only indicates that this version works with Pelix Remote
- Services, but it is **not** a Pelix specific implementation.
-
- * This version adds support for Python 3, staying compatible with Python 2.
- * It is now possible to use the dispatch_method argument while extending
- the SimpleJSONRPCDispatcher, to use a custom dispatcher.
- This allows to use this package by Pelix Remote Services.
- * It can use thread pools to control the number of threads spawned to handle
- notification requests and clients connections.
- * The modifications added in other forks of this project have been added:
-
- * From https://github.com/drdaeman/jsonrpclib:
-
- * Improved JSON-RPC 1.0 support
- * Less strict error response handling
-
- * From https://github.com/tuomassalo/jsonrpclib:
-
- * In case of a non-pre-defined error, raise an AppError and give access to
- *error.data*
-
- * From https://github.com/dejw/jsonrpclib:
-
- * Custom headers can be sent with request and associated tests
-
- * The support for Unix sockets has been removed, as it is not trivial to convert
- to Python 3 (and I don't use them)
- * This version cannot be installed with the original ``jsonrpclib``, as it uses
- the same package name.
-
-
- Summary
- *******
-
- This library implements the JSON-RPC 2.0 proposed specification in pure Python.
- It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible
- (it extends where possible), so that projects using ``xmlrpclib`` could easily
- be modified to use JSON and experiment with the differences.
-
- It is backwards-compatible with the 1.0 specification, and supports all of the
- new proposed features of 2.0, including:
-
- * Batch submission (via MultiCall)
- * Keyword arguments
- * Notifications (both in a batch and 'normal')
- * Class translation using the ``__jsonclass__`` key.
-
- I've added a "SimpleJSONRPCServer", which is intended to emulate the
- "SimpleXMLRPCServer" from the default Python distribution.
-
-
- Requirements
- ************
-
- It supports ``cjson`` and ``simplejson``, and looks for the parsers in that
- order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+,
- and then the ``simplejson`` external library).
- One of these must be installed to use this library, although if you have a
- standard distribution of 2.6+, you should already have one.
- Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if
- you are going for full-on optimization you may want to pick it up.
-
- Since library uses ``contextlib`` module, you should have at least Python 2.5
- installed.
-
-
- Installation
- ************
-
- You can install this from PyPI with one of the following commands (sudo
- may be required):
-
- .. code-block:: console
-
- easy_install jsonrpclib-pelix
- pip install jsonrpclib-pelix
-
- Alternatively, you can download the source from the GitHub repository
- at http://github.com/tcalmant/jsonrpclib and manually install it
- with the following commands:
-
- .. code-block:: console
-
- git clone git://github.com/tcalmant/jsonrpclib.git
- cd jsonrpclib
- python setup.py install
-
-
- SimpleJSONRPCServer
- *******************
-
- This is identical in usage (or should be) to the SimpleXMLRPCServer in the
- Python standard library. Some of the differences in features are that it
- obviously supports notification, batch calls, class translation (if left on),
- etc.
- Note: The import line is slightly different from the regular SimpleXMLRPCServer,
- since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library.
-
- .. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
-
- server = SimpleJSONRPCServer(('localhost', 8080))
- server.register_function(pow)
- server.register_function(lambda x,y: x+y, 'add')
- server.register_function(lambda x: x, 'ping')
- server.serve_forever()
-
- To start protect the server with SSL, use the following snippet:
-
- .. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
-
- # Setup the SSL socket
- server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False)
- server.socket = ssl.wrap_socket(server.socket, certfile='server.pem',
- server_side=True)
- server.server_bind()
- server.server_activate()
-
- # ... register functions
- # Start the server
- server.serve_forever()
-
-
- Notification Thread Pool
- ========================
-
- By default, notification calls are handled in the request handling thread.
- It is possible to use a thread pool to handle them, by giving it to the server
- using the ``set_notification_pool()`` method:
-
- .. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
- from jsonrpclib.threadpool import ThreadPool
-
- # Setup the thread pool: between 0 and 10 threads
- pool = ThreadPool(max_threads=10, min_threads=0)
-
- # Don't forget to start it
- pool.start()
-
- # Setup the server
- server = SimpleJSONRPCServer(('localhost', 8080), config)
- server.set_notification_pool(pool)
-
- # Register methods
- server.register_function(pow)
- server.register_function(lambda x,y: x+y, 'add')
- server.register_function(lambda x: x, 'ping')
-
- try:
- server.serve_forever()
- finally:
- # Stop the thread pool (let threads finish their current task)
- pool.stop()
- server.set_notification_pool(None)
-
-
- Threaded server
- ===============
-
- It is also possible to use a thread pool to handle clients requests, using the
- ``PooledJSONRPCServer`` class.
- By default, this class uses pool of 0 to 30 threads. A custom pool can be given
- with the ``thread_pool`` parameter of the class constructor.
-
- The notification pool and the request pool are different: by default, a server
- with a request pool doesn't have a notification pool.
-
- .. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
- from jsonrpclib.threadpool import ThreadPool
-
- # Setup the notification and request pools
- nofif_pool = ThreadPool(max_threads=10, min_threads=0)
- request_pool = ThreadPool(max_threads=50, min_threads=10)
-
- # Don't forget to start them
- nofif_pool.start()
- request_pool.start()
-
- # Setup the server
- server = PooledJSONRPCServer(('localhost', 8080), config,
- thread_pool=request_pool)
- server.set_notification_pool(nofif_pool)
-
- # Register methods
- server.register_function(pow)
- server.register_function(lambda x,y: x+y, 'add')
- server.register_function(lambda x: x, 'ping')
-
- try:
- server.serve_forever()
- finally:
- # Stop the thread pools (let threads finish their current task)
- request_pool.stop()
- nofif_pool.stop()
- server.set_notification_pool(None)
-
- Client Usage
- ************
-
- This is (obviously) taken from a console session.
-
- .. code-block:: python
-
- >>> import jsonrpclib
- >>> server = jsonrpclib.ServerProxy('http://localhost:8080')
- >>> server.add(5,6)
- 11
- >>> server.add(x=5, y=10)
- 15
- >>> server._notify.add(5,6)
- # No result returned...
- >>> batch = jsonrpclib.MultiCall(server)
- >>> batch.add(5, 6)
- >>> batch.ping({'key':'value'})
- >>> batch._notify.add(4, 30)
- >>> results = batch()
- >>> for result in results:
- >>> ... print(result)
- 11
- {'key': 'value'}
- # Note that there are only two responses -- this is according to spec.
-
- # Clean up
- >>> server('close')()
-
- # Using client history
- >>> history = jsonrpclib.history.History()
- >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history)
- >>> server.add(5,6)
- 11
- >>> print(history.request)
- {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
- "method": "add", "params": [5, 6]}
- >>> print(history.response)
- {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
- "result": 11}
-
- # Clean up
- >>> server('close')()
-
- If you need 1.0 functionality, there are a bunch of places you can pass that in,
- although the best is just to give a specific configuration to
- ``jsonrpclib.ServerProxy``:
-
- .. code-block:: python
-
- >>> import jsonrpclib
- >>> jsonrpclib.config.DEFAULT.version
- 2.0
- >>> config = jsonrpclib.config.Config(version=1.0)
- >>> history = jsonrpclib.history.History()
- >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config,
- history=history)
- >>> server.add(7, 10)
- 17
- >>> print(history.request)
- {"id": "827b2923-5b37-49a5-8b36-e73920a16d32",
- "method": "add", "params": [7, 10]}
- >>> print(history.response)
- {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17}
- >>> server('close')()
-
- The equivalent ``loads`` and ``dumps`` functions also exist, although with minor
- modifications. The ``dumps`` arguments are almost identical, but it adds three
- arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC
- compatibility, and ``notify`` if it's a request that you want to be a
- notification.
-
- Additionally, the ``loads`` method does not return the params and method like
- ``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and
- b.) returns the entire structure of the request / response for manual parsing.
-
-
- Additional headers
- ******************
-
- If your remote service requires custom headers in request, you can pass them
- as as a ``headers`` keyword argument, when creating the ``ServerProxy``:
-
- .. code-block:: python
-
- >>> import jsonrpclib
- >>> server = jsonrpclib.ServerProxy("http://localhost:8080",
- headers={'X-Test' : 'Test'})
-
- You can also put additional request headers only for certain method invocation:
-
- .. code-block:: python
-
- >>> import jsonrpclib
- >>> server = jsonrpclib.Server("http://localhost:8080")
- >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server:
- ... test_server.ping(42)
- ...
- >>> # X-Test header will be no longer sent in requests
-
- Of course ``_additional_headers`` contexts can be nested as well.
-
-
- Class Translation
- *****************
-
- I've recently added "automatic" class translation support, although it is
- turned off by default. This can be devastatingly slow if improperly used, so
- the following is just a short list of things to keep in mind when using it.
-
- * Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
- * Do not require init params (for exceptions, keep reading)
- * Getter properties without setters could be dangerous (read: not tested)
-
- If any of the above are issues, use the _serialize method. (see usage below)
- The server and client must BOTH have use_jsonclass configuration item on and
- they must both have access to the same libraries used by the objects for
- this to work.
-
- If you have excessively nested arguments, it would be better to turn off the
- translation and manually invoke it on specific objects using
- ``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default
- behavior recursively goes through attributes and lists / dicts / tuples).
-
- Sample file: *test_obj.py*
-
- .. code-block:: python
-
- # This object is /very/ simple, and the system will look through the
- # attributes and serialize what it can.
- class TestObj(object):
- foo = 'bar'
-
- # This object requires __init__ params, so it uses the _serialize method
- # and returns a tuple of init params and attribute values (the init params
- # can be a dict or a list, but the attribute values must be a dict.)
- class TestSerial(object):
- foo = 'bar'
- def __init__(self, *args):
- self.args = args
- def _serialize(self):
- return (self.args, {'foo':self.foo,})
-
- * Sample usage
-
- .. code-block:: python
-
- >>> import jsonrpclib
- >>> import test_obj
-
- # History is used only to print the serialized form of beans
- >>> history = jsonrpclib.history.History()
- >>> testobj1 = test_obj.TestObj()
- >>> testobj2 = test_obj.TestSerial()
- >>> server = jsonrpclib.Server('http://localhost:8080', history=history)
-
- # The 'ping' just returns whatever is sent
- >>> ping1 = server.ping(testobj1)
- >>> ping2 = server.ping(testobj2)
-
- >>> print(history.request)
- {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
- "method": "ping", "params": [{"__jsonclass__":
- ["test_obj.TestSerial", []], "foo": "bar"}
- ]}
- >>> print(history.response)
- {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
- "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}}
-
- This behavior is turned by default. To deactivate it, just set the
- ``use_jsonclass`` member of a server ``Config`` to False.
- If you want to use a per-class serialization method, set its name in the
- ``serialize_method`` member of a server ``Config``.
- Finally, if you are using classes that you have defined in the implementation
- (as in, not a separate library), you'll need to add those (on BOTH the server
- and the client) using the ``config.classes.add()`` method.
-
- Feedback on this "feature" is very, VERY much appreciated.
-
- Why JSON-RPC?
- *************
-
- In my opinion, there are several reasons to choose JSON over XML for RPC:
-
- * Much simpler to read (I suppose this is opinion, but I know I'm right. :)
- * Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
- * Parsing - JSON should be much quicker to parse than XML.
- * Easy class passing with ``jsonclass`` (when enabled)
-
- In the interest of being fair, there are also a few reasons to choose XML
- over JSON:
-
- * Your server doesn't do JSON (rather obvious)
- * Wider XML-RPC support across APIs (can we change this? :))
- * Libraries are more established, i.e. more stable (Let's change this too.)
-
- Tests
- *****
-
- Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page.
- They can be run using *unittest* or *nosetest*:
-
- .. code-block:: console
-
- python -m unittest discover tests
- python3 -m unittest discover tests
- nosetests tests
-
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.0
-Classifier: Programming Language :: Python :: 3.1
-Classifier: Programming Language :: Python :: 3.2
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
+Metadata-Version: 1.1 +Name: jsonrpclib-pelix +Version: 0.2.5 +Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library, for Python 2.6+ and Python 3.This version is a fork of jsonrpclib by Josh Marshall, usable with Pelix remote services. +Home-page: http://github.com/tcalmant/jsonrpclib/ +Author: Thomas Calmant +Author-email: thomas.calmant+github@gmail.com +License: Apache License 2.0 +Description: JSONRPClib (patched for Pelix) + ############################## + + .. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg + :target: https://pypi.python.org/pypi/jsonrpclib-pelix/ + + .. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master + :target: https://travis-ci.org/tcalmant/jsonrpclib + + .. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master + :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master + + + This library is an implementation of the JSON-RPC specification. + It supports both the original 1.0 specification, as well as the + new (proposed) 2.0 specification, which includes batch submission, keyword + arguments, etc. + + It is licensed under the Apache License, Version 2.0 + (http://www.apache.org/licenses/LICENSE-2.0.html). + + + About this version + ****************** + + This is a patched version of the original ``jsonrpclib`` project by + Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib. + + The suffix *-pelix* only indicates that this version works with Pelix Remote + Services, but it is **not** a Pelix specific implementation. + + * This version adds support for Python 3, staying compatible with Python 2. + * It is now possible to use the dispatch_method argument while extending + the SimpleJSONRPCDispatcher, to use a custom dispatcher. + This allows to use this package by Pelix Remote Services. + * It can use thread pools to control the number of threads spawned to handle + notification requests and clients connections. + * The modifications added in other forks of this project have been added: + + * From https://github.com/drdaeman/jsonrpclib: + + * Improved JSON-RPC 1.0 support + * Less strict error response handling + + * From https://github.com/tuomassalo/jsonrpclib: + + * In case of a non-pre-defined error, raise an AppError and give access to + *error.data* + + * From https://github.com/dejw/jsonrpclib: + + * Custom headers can be sent with request and associated tests + + * The support for Unix sockets has been removed, as it is not trivial to convert + to Python 3 (and I don't use them) + * This version cannot be installed with the original ``jsonrpclib``, as it uses + the same package name. + + + Summary + ******* + + This library implements the JSON-RPC 2.0 proposed specification in pure Python. + It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible + (it extends where possible), so that projects using ``xmlrpclib`` could easily + be modified to use JSON and experiment with the differences. + + It is backwards-compatible with the 1.0 specification, and supports all of the + new proposed features of 2.0, including: + + * Batch submission (via MultiCall) + * Keyword arguments + * Notifications (both in a batch and 'normal') + * Class translation using the ``__jsonclass__`` key. + + I've added a "SimpleJSONRPCServer", which is intended to emulate the + "SimpleXMLRPCServer" from the default Python distribution. + + + Requirements + ************ + + It supports ``cjson`` and ``simplejson``, and looks for the parsers in that + order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+, + and then the ``simplejson`` external library). + One of these must be installed to use this library, although if you have a + standard distribution of 2.6+, you should already have one. + Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if + you are going for full-on optimization you may want to pick it up. + + Since library uses ``contextlib`` module, you should have at least Python 2.5 + installed. + + + Installation + ************ + + You can install this from PyPI with one of the following commands (sudo + may be required): + + .. code-block:: console + + easy_install jsonrpclib-pelix + pip install jsonrpclib-pelix + + Alternatively, you can download the source from the GitHub repository + at http://github.com/tcalmant/jsonrpclib and manually install it + with the following commands: + + .. code-block:: console + + git clone git://github.com/tcalmant/jsonrpclib.git + cd jsonrpclib + python setup.py install + + + SimpleJSONRPCServer + ******************* + + This is identical in usage (or should be) to the SimpleXMLRPCServer in the + Python standard library. Some of the differences in features are that it + obviously supports notification, batch calls, class translation (if left on), + etc. + Note: The import line is slightly different from the regular SimpleXMLRPCServer, + since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library. + + .. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer + + server = SimpleJSONRPCServer(('localhost', 8080)) + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_function(lambda x: x, 'ping') + server.serve_forever() + + To start protect the server with SSL, use the following snippet: + + .. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer + + # Setup the SSL socket + server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False) + server.socket = ssl.wrap_socket(server.socket, certfile='server.pem', + server_side=True) + server.server_bind() + server.server_activate() + + # ... register functions + # Start the server + server.serve_forever() + + + Notification Thread Pool + ======================== + + By default, notification calls are handled in the request handling thread. + It is possible to use a thread pool to handle them, by giving it to the server + using the ``set_notification_pool()`` method: + + .. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer + from jsonrpclib.threadpool import ThreadPool + + # Setup the thread pool: between 0 and 10 threads + pool = ThreadPool(max_threads=10, min_threads=0) + + # Don't forget to start it + pool.start() + + # Setup the server + server = SimpleJSONRPCServer(('localhost', 8080), config) + server.set_notification_pool(pool) + + # Register methods + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_function(lambda x: x, 'ping') + + try: + server.serve_forever() + finally: + # Stop the thread pool (let threads finish their current task) + pool.stop() + server.set_notification_pool(None) + + + Threaded server + =============== + + It is also possible to use a thread pool to handle clients requests, using the + ``PooledJSONRPCServer`` class. + By default, this class uses pool of 0 to 30 threads. A custom pool can be given + with the ``thread_pool`` parameter of the class constructor. + + The notification pool and the request pool are different: by default, a server + with a request pool doesn't have a notification pool. + + .. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer + from jsonrpclib.threadpool import ThreadPool + + # Setup the notification and request pools + nofif_pool = ThreadPool(max_threads=10, min_threads=0) + request_pool = ThreadPool(max_threads=50, min_threads=10) + + # Don't forget to start them + nofif_pool.start() + request_pool.start() + + # Setup the server + server = PooledJSONRPCServer(('localhost', 8080), config, + thread_pool=request_pool) + server.set_notification_pool(nofif_pool) + + # Register methods + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_function(lambda x: x, 'ping') + + try: + server.serve_forever() + finally: + # Stop the thread pools (let threads finish their current task) + request_pool.stop() + nofif_pool.stop() + server.set_notification_pool(None) + + Client Usage + ************ + + This is (obviously) taken from a console session. + + .. code-block:: python + + >>> import jsonrpclib + >>> server = jsonrpclib.ServerProxy('http://localhost:8080') + >>> server.add(5,6) + 11 + >>> server.add(x=5, y=10) + 15 + >>> server._notify.add(5,6) + # No result returned... + >>> batch = jsonrpclib.MultiCall(server) + >>> batch.add(5, 6) + >>> batch.ping({'key':'value'}) + >>> batch._notify.add(4, 30) + >>> results = batch() + >>> for result in results: + >>> ... print(result) + 11 + {'key': 'value'} + # Note that there are only two responses -- this is according to spec. + + # Clean up + >>> server('close')() + + # Using client history + >>> history = jsonrpclib.history.History() + >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history) + >>> server.add(5,6) + 11 + >>> print(history.request) + {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0", + "method": "add", "params": [5, 6]} + >>> print(history.response) + {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0", + "result": 11} + + # Clean up + >>> server('close')() + + If you need 1.0 functionality, there are a bunch of places you can pass that in, + although the best is just to give a specific configuration to + ``jsonrpclib.ServerProxy``: + + .. code-block:: python + + >>> import jsonrpclib + >>> jsonrpclib.config.DEFAULT.version + 2.0 + >>> config = jsonrpclib.config.Config(version=1.0) + >>> history = jsonrpclib.history.History() + >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config, + history=history) + >>> server.add(7, 10) + 17 + >>> print(history.request) + {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", + "method": "add", "params": [7, 10]} + >>> print(history.response) + {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17} + >>> server('close')() + + The equivalent ``loads`` and ``dumps`` functions also exist, although with minor + modifications. The ``dumps`` arguments are almost identical, but it adds three + arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC + compatibility, and ``notify`` if it's a request that you want to be a + notification. + + Additionally, the ``loads`` method does not return the params and method like + ``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and + b.) returns the entire structure of the request / response for manual parsing. + + + Additional headers + ****************** + + If your remote service requires custom headers in request, you can pass them + as as a ``headers`` keyword argument, when creating the ``ServerProxy``: + + .. code-block:: python + + >>> import jsonrpclib + >>> server = jsonrpclib.ServerProxy("http://localhost:8080", + headers={'X-Test' : 'Test'}) + + You can also put additional request headers only for certain method invocation: + + .. code-block:: python + + >>> import jsonrpclib + >>> server = jsonrpclib.Server("http://localhost:8080") + >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server: + ... test_server.ping(42) + ... + >>> # X-Test header will be no longer sent in requests + + Of course ``_additional_headers`` contexts can be nested as well. + + + Class Translation + ***************** + + I've recently added "automatic" class translation support, although it is + turned off by default. This can be devastatingly slow if improperly used, so + the following is just a short list of things to keep in mind when using it. + + * Keep It (the object) Simple Stupid. (for exceptions, keep reading.) + * Do not require init params (for exceptions, keep reading) + * Getter properties without setters could be dangerous (read: not tested) + + If any of the above are issues, use the _serialize method. (see usage below) + The server and client must BOTH have use_jsonclass configuration item on and + they must both have access to the same libraries used by the objects for + this to work. + + If you have excessively nested arguments, it would be better to turn off the + translation and manually invoke it on specific objects using + ``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default + behavior recursively goes through attributes and lists / dicts / tuples). + + Sample file: *test_obj.py* + + .. code-block:: python + + # This object is /very/ simple, and the system will look through the + # attributes and serialize what it can. + class TestObj(object): + foo = 'bar' + + # This object requires __init__ params, so it uses the _serialize method + # and returns a tuple of init params and attribute values (the init params + # can be a dict or a list, but the attribute values must be a dict.) + class TestSerial(object): + foo = 'bar' + def __init__(self, *args): + self.args = args + def _serialize(self): + return (self.args, {'foo':self.foo,}) + + * Sample usage + + .. code-block:: python + + >>> import jsonrpclib + >>> import test_obj + + # History is used only to print the serialized form of beans + >>> history = jsonrpclib.history.History() + >>> testobj1 = test_obj.TestObj() + >>> testobj2 = test_obj.TestSerial() + >>> server = jsonrpclib.Server('http://localhost:8080', history=history) + + # The 'ping' just returns whatever is sent + >>> ping1 = server.ping(testobj1) + >>> ping2 = server.ping(testobj2) + + >>> print(history.request) + {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0", + "method": "ping", "params": [{"__jsonclass__": + ["test_obj.TestSerial", []], "foo": "bar"} + ]} + >>> print(history.response) + {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0", + "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}} + + This behavior is turned by default. To deactivate it, just set the + ``use_jsonclass`` member of a server ``Config`` to False. + If you want to use a per-class serialization method, set its name in the + ``serialize_method`` member of a server ``Config``. + Finally, if you are using classes that you have defined in the implementation + (as in, not a separate library), you'll need to add those (on BOTH the server + and the client) using the ``config.classes.add()`` method. + + Feedback on this "feature" is very, VERY much appreciated. + + Why JSON-RPC? + ************* + + In my opinion, there are several reasons to choose JSON over XML for RPC: + + * Much simpler to read (I suppose this is opinion, but I know I'm right. :) + * Size / Bandwidth - Main reason, a JSON object representation is just much smaller. + * Parsing - JSON should be much quicker to parse than XML. + * Easy class passing with ``jsonclass`` (when enabled) + + In the interest of being fair, there are also a few reasons to choose XML + over JSON: + + * Your server doesn't do JSON (rather obvious) + * Wider XML-RPC support across APIs (can we change this? :)) + * Libraries are more established, i.e. more stable (Let's change this too.) + + Tests + ***** + + Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page. + They can be run using *unittest* or *nosetest*: + + .. code-block:: console + + python -m unittest discover tests + python3 -m unittest discover tests + nosetests tests + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.0 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/README.rst b/scripts/external_libs/jsonrpclib-pelix-0.2.5/README.rst index 29da2708..19001933 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/README.rst +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/README.rst @@ -1,438 +1,438 @@ -JSONRPClib (patched for Pelix)
-##############################
-
-.. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg
- :target: https://pypi.python.org/pypi/jsonrpclib-pelix/
-
-.. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master
- :target: https://travis-ci.org/tcalmant/jsonrpclib
-
-.. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master
- :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master
-
-
-This library is an implementation of the JSON-RPC specification.
-It supports both the original 1.0 specification, as well as the
-new (proposed) 2.0 specification, which includes batch submission, keyword
-arguments, etc.
-
-It is licensed under the Apache License, Version 2.0
-(http://www.apache.org/licenses/LICENSE-2.0.html).
-
-
-About this version
-******************
-
-This is a patched version of the original ``jsonrpclib`` project by
-Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib.
-
-The suffix *-pelix* only indicates that this version works with Pelix Remote
-Services, but it is **not** a Pelix specific implementation.
-
-* This version adds support for Python 3, staying compatible with Python 2.
-* It is now possible to use the dispatch_method argument while extending
- the SimpleJSONRPCDispatcher, to use a custom dispatcher.
- This allows to use this package by Pelix Remote Services.
-* It can use thread pools to control the number of threads spawned to handle
- notification requests and clients connections.
-* The modifications added in other forks of this project have been added:
-
- * From https://github.com/drdaeman/jsonrpclib:
-
- * Improved JSON-RPC 1.0 support
- * Less strict error response handling
-
- * From https://github.com/tuomassalo/jsonrpclib:
-
- * In case of a non-pre-defined error, raise an AppError and give access to
- *error.data*
-
- * From https://github.com/dejw/jsonrpclib:
-
- * Custom headers can be sent with request and associated tests
-
-* The support for Unix sockets has been removed, as it is not trivial to convert
- to Python 3 (and I don't use them)
-* This version cannot be installed with the original ``jsonrpclib``, as it uses
- the same package name.
-
-
-Summary
-*******
-
-This library implements the JSON-RPC 2.0 proposed specification in pure Python.
-It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible
-(it extends where possible), so that projects using ``xmlrpclib`` could easily
-be modified to use JSON and experiment with the differences.
-
-It is backwards-compatible with the 1.0 specification, and supports all of the
-new proposed features of 2.0, including:
-
-* Batch submission (via MultiCall)
-* Keyword arguments
-* Notifications (both in a batch and 'normal')
-* Class translation using the ``__jsonclass__`` key.
-
-I've added a "SimpleJSONRPCServer", which is intended to emulate the
-"SimpleXMLRPCServer" from the default Python distribution.
-
-
-Requirements
-************
-
-It supports ``cjson`` and ``simplejson``, and looks for the parsers in that
-order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+,
-and then the ``simplejson`` external library).
-One of these must be installed to use this library, although if you have a
-standard distribution of 2.6+, you should already have one.
-Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if
-you are going for full-on optimization you may want to pick it up.
-
-Since library uses ``contextlib`` module, you should have at least Python 2.5
-installed.
-
-
-Installation
-************
-
-You can install this from PyPI with one of the following commands (sudo
-may be required):
-
-.. code-block:: console
-
- easy_install jsonrpclib-pelix
- pip install jsonrpclib-pelix
-
-Alternatively, you can download the source from the GitHub repository
-at http://github.com/tcalmant/jsonrpclib and manually install it
-with the following commands:
-
-.. code-block:: console
-
- git clone git://github.com/tcalmant/jsonrpclib.git
- cd jsonrpclib
- python setup.py install
-
-
-SimpleJSONRPCServer
-*******************
-
-This is identical in usage (or should be) to the SimpleXMLRPCServer in the
-Python standard library. Some of the differences in features are that it
-obviously supports notification, batch calls, class translation (if left on),
-etc.
-Note: The import line is slightly different from the regular SimpleXMLRPCServer,
-since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library.
-
-.. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
-
- server = SimpleJSONRPCServer(('localhost', 8080))
- server.register_function(pow)
- server.register_function(lambda x,y: x+y, 'add')
- server.register_function(lambda x: x, 'ping')
- server.serve_forever()
-
-To start protect the server with SSL, use the following snippet:
-
-.. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
-
- # Setup the SSL socket
- server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False)
- server.socket = ssl.wrap_socket(server.socket, certfile='server.pem',
- server_side=True)
- server.server_bind()
- server.server_activate()
-
- # ... register functions
- # Start the server
- server.serve_forever()
-
-
-Notification Thread Pool
-========================
-
-By default, notification calls are handled in the request handling thread.
-It is possible to use a thread pool to handle them, by giving it to the server
-using the ``set_notification_pool()`` method:
-
-.. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
- from jsonrpclib.threadpool import ThreadPool
-
- # Setup the thread pool: between 0 and 10 threads
- pool = ThreadPool(max_threads=10, min_threads=0)
-
- # Don't forget to start it
- pool.start()
-
- # Setup the server
- server = SimpleJSONRPCServer(('localhost', 8080), config)
- server.set_notification_pool(pool)
-
- # Register methods
- server.register_function(pow)
- server.register_function(lambda x,y: x+y, 'add')
- server.register_function(lambda x: x, 'ping')
-
- try:
- server.serve_forever()
- finally:
- # Stop the thread pool (let threads finish their current task)
- pool.stop()
- server.set_notification_pool(None)
-
-
-Threaded server
-===============
-
-It is also possible to use a thread pool to handle clients requests, using the
-``PooledJSONRPCServer`` class.
-By default, this class uses pool of 0 to 30 threads. A custom pool can be given
-with the ``thread_pool`` parameter of the class constructor.
-
-The notification pool and the request pool are different: by default, a server
-with a request pool doesn't have a notification pool.
-
-.. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
- from jsonrpclib.threadpool import ThreadPool
-
- # Setup the notification and request pools
- nofif_pool = ThreadPool(max_threads=10, min_threads=0)
- request_pool = ThreadPool(max_threads=50, min_threads=10)
-
- # Don't forget to start them
- nofif_pool.start()
- request_pool.start()
-
- # Setup the server
- server = PooledJSONRPCServer(('localhost', 8080), config,
- thread_pool=request_pool)
- server.set_notification_pool(nofif_pool)
-
- # Register methods
- server.register_function(pow)
- server.register_function(lambda x,y: x+y, 'add')
- server.register_function(lambda x: x, 'ping')
-
- try:
- server.serve_forever()
- finally:
- # Stop the thread pools (let threads finish their current task)
- request_pool.stop()
- nofif_pool.stop()
- server.set_notification_pool(None)
-
-Client Usage
-************
-
-This is (obviously) taken from a console session.
-
-.. code-block:: python
-
- >>> import jsonrpclib
- >>> server = jsonrpclib.ServerProxy('http://localhost:8080')
- >>> server.add(5,6)
- 11
- >>> server.add(x=5, y=10)
- 15
- >>> server._notify.add(5,6)
- # No result returned...
- >>> batch = jsonrpclib.MultiCall(server)
- >>> batch.add(5, 6)
- >>> batch.ping({'key':'value'})
- >>> batch._notify.add(4, 30)
- >>> results = batch()
- >>> for result in results:
- >>> ... print(result)
- 11
- {'key': 'value'}
- # Note that there are only two responses -- this is according to spec.
-
- # Clean up
- >>> server('close')()
-
- # Using client history
- >>> history = jsonrpclib.history.History()
- >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history)
- >>> server.add(5,6)
- 11
- >>> print(history.request)
- {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
- "method": "add", "params": [5, 6]}
- >>> print(history.response)
- {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
- "result": 11}
-
- # Clean up
- >>> server('close')()
-
-If you need 1.0 functionality, there are a bunch of places you can pass that in,
-although the best is just to give a specific configuration to
-``jsonrpclib.ServerProxy``:
-
-.. code-block:: python
-
- >>> import jsonrpclib
- >>> jsonrpclib.config.DEFAULT.version
- 2.0
- >>> config = jsonrpclib.config.Config(version=1.0)
- >>> history = jsonrpclib.history.History()
- >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config,
- history=history)
- >>> server.add(7, 10)
- 17
- >>> print(history.request)
- {"id": "827b2923-5b37-49a5-8b36-e73920a16d32",
- "method": "add", "params": [7, 10]}
- >>> print(history.response)
- {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17}
- >>> server('close')()
-
-The equivalent ``loads`` and ``dumps`` functions also exist, although with minor
-modifications. The ``dumps`` arguments are almost identical, but it adds three
-arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC
-compatibility, and ``notify`` if it's a request that you want to be a
-notification.
-
-Additionally, the ``loads`` method does not return the params and method like
-``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and
-b.) returns the entire structure of the request / response for manual parsing.
-
-
-Additional headers
-******************
-
-If your remote service requires custom headers in request, you can pass them
-as as a ``headers`` keyword argument, when creating the ``ServerProxy``:
-
-.. code-block:: python
-
- >>> import jsonrpclib
- >>> server = jsonrpclib.ServerProxy("http://localhost:8080",
- headers={'X-Test' : 'Test'})
-
-You can also put additional request headers only for certain method invocation:
-
-.. code-block:: python
-
- >>> import jsonrpclib
- >>> server = jsonrpclib.Server("http://localhost:8080")
- >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server:
- ... test_server.ping(42)
- ...
- >>> # X-Test header will be no longer sent in requests
-
-Of course ``_additional_headers`` contexts can be nested as well.
-
-
-Class Translation
-*****************
-
-I've recently added "automatic" class translation support, although it is
-turned off by default. This can be devastatingly slow if improperly used, so
-the following is just a short list of things to keep in mind when using it.
-
-* Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
-* Do not require init params (for exceptions, keep reading)
-* Getter properties without setters could be dangerous (read: not tested)
-
-If any of the above are issues, use the _serialize method. (see usage below)
-The server and client must BOTH have use_jsonclass configuration item on and
-they must both have access to the same libraries used by the objects for
-this to work.
-
-If you have excessively nested arguments, it would be better to turn off the
-translation and manually invoke it on specific objects using
-``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default
-behavior recursively goes through attributes and lists / dicts / tuples).
-
- Sample file: *test_obj.py*
-
-.. code-block:: python
-
- # This object is /very/ simple, and the system will look through the
- # attributes and serialize what it can.
- class TestObj(object):
- foo = 'bar'
-
- # This object requires __init__ params, so it uses the _serialize method
- # and returns a tuple of init params and attribute values (the init params
- # can be a dict or a list, but the attribute values must be a dict.)
- class TestSerial(object):
- foo = 'bar'
- def __init__(self, *args):
- self.args = args
- def _serialize(self):
- return (self.args, {'foo':self.foo,})
-
-* Sample usage
-
-.. code-block:: python
-
- >>> import jsonrpclib
- >>> import test_obj
-
- # History is used only to print the serialized form of beans
- >>> history = jsonrpclib.history.History()
- >>> testobj1 = test_obj.TestObj()
- >>> testobj2 = test_obj.TestSerial()
- >>> server = jsonrpclib.Server('http://localhost:8080', history=history)
-
- # The 'ping' just returns whatever is sent
- >>> ping1 = server.ping(testobj1)
- >>> ping2 = server.ping(testobj2)
-
- >>> print(history.request)
- {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
- "method": "ping", "params": [{"__jsonclass__":
- ["test_obj.TestSerial", []], "foo": "bar"}
- ]}
- >>> print(history.response)
- {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
- "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}}
-
-This behavior is turned by default. To deactivate it, just set the
-``use_jsonclass`` member of a server ``Config`` to False.
-If you want to use a per-class serialization method, set its name in the
-``serialize_method`` member of a server ``Config``.
-Finally, if you are using classes that you have defined in the implementation
-(as in, not a separate library), you'll need to add those (on BOTH the server
-and the client) using the ``config.classes.add()`` method.
-
-Feedback on this "feature" is very, VERY much appreciated.
-
-Why JSON-RPC?
-*************
-
-In my opinion, there are several reasons to choose JSON over XML for RPC:
-
-* Much simpler to read (I suppose this is opinion, but I know I'm right. :)
-* Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
-* Parsing - JSON should be much quicker to parse than XML.
-* Easy class passing with ``jsonclass`` (when enabled)
-
-In the interest of being fair, there are also a few reasons to choose XML
-over JSON:
-
-* Your server doesn't do JSON (rather obvious)
-* Wider XML-RPC support across APIs (can we change this? :))
-* Libraries are more established, i.e. more stable (Let's change this too.)
-
-Tests
-*****
-
-Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page.
-They can be run using *unittest* or *nosetest*:
-
-.. code-block:: console
-
- python -m unittest discover tests
- python3 -m unittest discover tests
- nosetests tests
+JSONRPClib (patched for Pelix) +############################## + +.. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg + :target: https://pypi.python.org/pypi/jsonrpclib-pelix/ + +.. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master + :target: https://travis-ci.org/tcalmant/jsonrpclib + +.. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master + :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master + + +This library is an implementation of the JSON-RPC specification. +It supports both the original 1.0 specification, as well as the +new (proposed) 2.0 specification, which includes batch submission, keyword +arguments, etc. + +It is licensed under the Apache License, Version 2.0 +(http://www.apache.org/licenses/LICENSE-2.0.html). + + +About this version +****************** + +This is a patched version of the original ``jsonrpclib`` project by +Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib. + +The suffix *-pelix* only indicates that this version works with Pelix Remote +Services, but it is **not** a Pelix specific implementation. + +* This version adds support for Python 3, staying compatible with Python 2. +* It is now possible to use the dispatch_method argument while extending + the SimpleJSONRPCDispatcher, to use a custom dispatcher. + This allows to use this package by Pelix Remote Services. +* It can use thread pools to control the number of threads spawned to handle + notification requests and clients connections. +* The modifications added in other forks of this project have been added: + + * From https://github.com/drdaeman/jsonrpclib: + + * Improved JSON-RPC 1.0 support + * Less strict error response handling + + * From https://github.com/tuomassalo/jsonrpclib: + + * In case of a non-pre-defined error, raise an AppError and give access to + *error.data* + + * From https://github.com/dejw/jsonrpclib: + + * Custom headers can be sent with request and associated tests + +* The support for Unix sockets has been removed, as it is not trivial to convert + to Python 3 (and I don't use them) +* This version cannot be installed with the original ``jsonrpclib``, as it uses + the same package name. + + +Summary +******* + +This library implements the JSON-RPC 2.0 proposed specification in pure Python. +It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible +(it extends where possible), so that projects using ``xmlrpclib`` could easily +be modified to use JSON and experiment with the differences. + +It is backwards-compatible with the 1.0 specification, and supports all of the +new proposed features of 2.0, including: + +* Batch submission (via MultiCall) +* Keyword arguments +* Notifications (both in a batch and 'normal') +* Class translation using the ``__jsonclass__`` key. + +I've added a "SimpleJSONRPCServer", which is intended to emulate the +"SimpleXMLRPCServer" from the default Python distribution. + + +Requirements +************ + +It supports ``cjson`` and ``simplejson``, and looks for the parsers in that +order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+, +and then the ``simplejson`` external library). +One of these must be installed to use this library, although if you have a +standard distribution of 2.6+, you should already have one. +Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if +you are going for full-on optimization you may want to pick it up. + +Since library uses ``contextlib`` module, you should have at least Python 2.5 +installed. + + +Installation +************ + +You can install this from PyPI with one of the following commands (sudo +may be required): + +.. code-block:: console + + easy_install jsonrpclib-pelix + pip install jsonrpclib-pelix + +Alternatively, you can download the source from the GitHub repository +at http://github.com/tcalmant/jsonrpclib and manually install it +with the following commands: + +.. code-block:: console + + git clone git://github.com/tcalmant/jsonrpclib.git + cd jsonrpclib + python setup.py install + + +SimpleJSONRPCServer +******************* + +This is identical in usage (or should be) to the SimpleXMLRPCServer in the +Python standard library. Some of the differences in features are that it +obviously supports notification, batch calls, class translation (if left on), +etc. +Note: The import line is slightly different from the regular SimpleXMLRPCServer, +since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library. + +.. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer + + server = SimpleJSONRPCServer(('localhost', 8080)) + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_function(lambda x: x, 'ping') + server.serve_forever() + +To start protect the server with SSL, use the following snippet: + +.. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer + + # Setup the SSL socket + server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False) + server.socket = ssl.wrap_socket(server.socket, certfile='server.pem', + server_side=True) + server.server_bind() + server.server_activate() + + # ... register functions + # Start the server + server.serve_forever() + + +Notification Thread Pool +======================== + +By default, notification calls are handled in the request handling thread. +It is possible to use a thread pool to handle them, by giving it to the server +using the ``set_notification_pool()`` method: + +.. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer + from jsonrpclib.threadpool import ThreadPool + + # Setup the thread pool: between 0 and 10 threads + pool = ThreadPool(max_threads=10, min_threads=0) + + # Don't forget to start it + pool.start() + + # Setup the server + server = SimpleJSONRPCServer(('localhost', 8080), config) + server.set_notification_pool(pool) + + # Register methods + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_function(lambda x: x, 'ping') + + try: + server.serve_forever() + finally: + # Stop the thread pool (let threads finish their current task) + pool.stop() + server.set_notification_pool(None) + + +Threaded server +=============== + +It is also possible to use a thread pool to handle clients requests, using the +``PooledJSONRPCServer`` class. +By default, this class uses pool of 0 to 30 threads. A custom pool can be given +with the ``thread_pool`` parameter of the class constructor. + +The notification pool and the request pool are different: by default, a server +with a request pool doesn't have a notification pool. + +.. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer + from jsonrpclib.threadpool import ThreadPool + + # Setup the notification and request pools + nofif_pool = ThreadPool(max_threads=10, min_threads=0) + request_pool = ThreadPool(max_threads=50, min_threads=10) + + # Don't forget to start them + nofif_pool.start() + request_pool.start() + + # Setup the server + server = PooledJSONRPCServer(('localhost', 8080), config, + thread_pool=request_pool) + server.set_notification_pool(nofif_pool) + + # Register methods + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_function(lambda x: x, 'ping') + + try: + server.serve_forever() + finally: + # Stop the thread pools (let threads finish their current task) + request_pool.stop() + nofif_pool.stop() + server.set_notification_pool(None) + +Client Usage +************ + +This is (obviously) taken from a console session. + +.. code-block:: python + + >>> import jsonrpclib + >>> server = jsonrpclib.ServerProxy('http://localhost:8080') + >>> server.add(5,6) + 11 + >>> server.add(x=5, y=10) + 15 + >>> server._notify.add(5,6) + # No result returned... + >>> batch = jsonrpclib.MultiCall(server) + >>> batch.add(5, 6) + >>> batch.ping({'key':'value'}) + >>> batch._notify.add(4, 30) + >>> results = batch() + >>> for result in results: + >>> ... print(result) + 11 + {'key': 'value'} + # Note that there are only two responses -- this is according to spec. + + # Clean up + >>> server('close')() + + # Using client history + >>> history = jsonrpclib.history.History() + >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history) + >>> server.add(5,6) + 11 + >>> print(history.request) + {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0", + "method": "add", "params": [5, 6]} + >>> print(history.response) + {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0", + "result": 11} + + # Clean up + >>> server('close')() + +If you need 1.0 functionality, there are a bunch of places you can pass that in, +although the best is just to give a specific configuration to +``jsonrpclib.ServerProxy``: + +.. code-block:: python + + >>> import jsonrpclib + >>> jsonrpclib.config.DEFAULT.version + 2.0 + >>> config = jsonrpclib.config.Config(version=1.0) + >>> history = jsonrpclib.history.History() + >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config, + history=history) + >>> server.add(7, 10) + 17 + >>> print(history.request) + {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", + "method": "add", "params": [7, 10]} + >>> print(history.response) + {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17} + >>> server('close')() + +The equivalent ``loads`` and ``dumps`` functions also exist, although with minor +modifications. The ``dumps`` arguments are almost identical, but it adds three +arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC +compatibility, and ``notify`` if it's a request that you want to be a +notification. + +Additionally, the ``loads`` method does not return the params and method like +``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and +b.) returns the entire structure of the request / response for manual parsing. + + +Additional headers +****************** + +If your remote service requires custom headers in request, you can pass them +as as a ``headers`` keyword argument, when creating the ``ServerProxy``: + +.. code-block:: python + + >>> import jsonrpclib + >>> server = jsonrpclib.ServerProxy("http://localhost:8080", + headers={'X-Test' : 'Test'}) + +You can also put additional request headers only for certain method invocation: + +.. code-block:: python + + >>> import jsonrpclib + >>> server = jsonrpclib.Server("http://localhost:8080") + >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server: + ... test_server.ping(42) + ... + >>> # X-Test header will be no longer sent in requests + +Of course ``_additional_headers`` contexts can be nested as well. + + +Class Translation +***************** + +I've recently added "automatic" class translation support, although it is +turned off by default. This can be devastatingly slow if improperly used, so +the following is just a short list of things to keep in mind when using it. + +* Keep It (the object) Simple Stupid. (for exceptions, keep reading.) +* Do not require init params (for exceptions, keep reading) +* Getter properties without setters could be dangerous (read: not tested) + +If any of the above are issues, use the _serialize method. (see usage below) +The server and client must BOTH have use_jsonclass configuration item on and +they must both have access to the same libraries used by the objects for +this to work. + +If you have excessively nested arguments, it would be better to turn off the +translation and manually invoke it on specific objects using +``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default +behavior recursively goes through attributes and lists / dicts / tuples). + + Sample file: *test_obj.py* + +.. code-block:: python + + # This object is /very/ simple, and the system will look through the + # attributes and serialize what it can. + class TestObj(object): + foo = 'bar' + + # This object requires __init__ params, so it uses the _serialize method + # and returns a tuple of init params and attribute values (the init params + # can be a dict or a list, but the attribute values must be a dict.) + class TestSerial(object): + foo = 'bar' + def __init__(self, *args): + self.args = args + def _serialize(self): + return (self.args, {'foo':self.foo,}) + +* Sample usage + +.. code-block:: python + + >>> import jsonrpclib + >>> import test_obj + + # History is used only to print the serialized form of beans + >>> history = jsonrpclib.history.History() + >>> testobj1 = test_obj.TestObj() + >>> testobj2 = test_obj.TestSerial() + >>> server = jsonrpclib.Server('http://localhost:8080', history=history) + + # The 'ping' just returns whatever is sent + >>> ping1 = server.ping(testobj1) + >>> ping2 = server.ping(testobj2) + + >>> print(history.request) + {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0", + "method": "ping", "params": [{"__jsonclass__": + ["test_obj.TestSerial", []], "foo": "bar"} + ]} + >>> print(history.response) + {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0", + "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}} + +This behavior is turned by default. To deactivate it, just set the +``use_jsonclass`` member of a server ``Config`` to False. +If you want to use a per-class serialization method, set its name in the +``serialize_method`` member of a server ``Config``. +Finally, if you are using classes that you have defined in the implementation +(as in, not a separate library), you'll need to add those (on BOTH the server +and the client) using the ``config.classes.add()`` method. + +Feedback on this "feature" is very, VERY much appreciated. + +Why JSON-RPC? +************* + +In my opinion, there are several reasons to choose JSON over XML for RPC: + +* Much simpler to read (I suppose this is opinion, but I know I'm right. :) +* Size / Bandwidth - Main reason, a JSON object representation is just much smaller. +* Parsing - JSON should be much quicker to parse than XML. +* Easy class passing with ``jsonclass`` (when enabled) + +In the interest of being fair, there are also a few reasons to choose XML +over JSON: + +* Your server doesn't do JSON (rather obvious) +* Wider XML-RPC support across APIs (can we change this? :)) +* Libraries are more established, i.e. more stable (Let's change this too.) + +Tests +***** + +Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page. +They can be run using *unittest* or *nosetest*: + +.. code-block:: console + + python -m unittest discover tests + python3 -m unittest discover tests + nosetests tests diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py index f7a7b652..e9fe4e68 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/SimpleJSONRPCServer.py @@ -1,602 +1,602 @@ -#!/usr/bin/python
-# -- Content-Encoding: UTF-8 --
-"""
-Defines a request dispatcher, a HTTP request handler, a HTTP server and a
-CGI request handler.
-
-:authors: Josh Marshall, Thomas Calmant
-:copyright: Copyright 2015, isandlaTech
-:license: Apache License 2.0
-:version: 0.2.5
-
-..
-
- Copyright 2015 isandlaTech
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-# Module version
-__version_info__ = (0, 2, 5)
-__version__ = ".".join(str(x) for x in __version_info__)
-
-# Documentation strings format
-__docformat__ = "restructuredtext en"
-
-# ------------------------------------------------------------------------------
-# Local modules
-from jsonrpclib import Fault
-import jsonrpclib.config
-import jsonrpclib.utils as utils
-import jsonrpclib.threadpool
-
-# Standard library
-import logging
-import socket
-import sys
-import traceback
-
-# Prepare the logger
-_logger = logging.getLogger(__name__)
-
-try:
- # Python 3
- # pylint: disable=F0401,E0611
- import xmlrpc.server as xmlrpcserver
- import socketserver
-except (ImportError, AttributeError):
- # Python 2 or IronPython
- # pylint: disable=F0401,E0611
- import SimpleXMLRPCServer as xmlrpcserver
- import SocketServer as socketserver
-
-try:
- # Windows
- import fcntl
-except ImportError:
- # Other systems
- # pylint: disable=C0103
- fcntl = None
-
-# ------------------------------------------------------------------------------
-
-
-def get_version(request):
- """
- Computes the JSON-RPC version
-
- :param request: A request dictionary
- :return: The JSON-RPC version or None
- """
- if 'jsonrpc' in request:
- return 2.0
- elif 'id' in request:
- return 1.0
-
- return None
-
-
-def validate_request(request, json_config):
- """
- Validates the format of a request dictionary
-
- :param request: A request dictionary
- :param json_config: A JSONRPClib Config instance
- :return: True if the dictionary is valid, else a Fault object
- """
- if not isinstance(request, utils.DictType):
- # Invalid request type
- fault = Fault(-32600, 'Request must be a dict, not {0}'
- .format(type(request).__name__),
- config=json_config)
- _logger.warning("Invalid request content: %s", fault)
- return fault
-
- # Get the request ID
- rpcid = request.get('id', None)
-
- # Check request version
- version = get_version(request)
- if not version:
- fault = Fault(-32600, 'Request {0} invalid.'.format(request),
- rpcid=rpcid, config=json_config)
- _logger.warning("No version in request: %s", fault)
- return fault
-
- # Default parameters: empty list
- request.setdefault('params', [])
-
- # Check parameters
- method = request.get('method', None)
- params = request.get('params')
- param_types = (utils.ListType, utils.DictType, utils.TupleType)
-
- if not method or not isinstance(method, utils.string_types) or \
- not isinstance(params, param_types):
- # Invalid type of method name or parameters
- fault = Fault(-32600, 'Invalid request parameters or method.',
- rpcid=rpcid, config=json_config)
- _logger.warning("Invalid request content: %s", fault)
- return fault
-
- # Valid request
- return True
-
-# ------------------------------------------------------------------------------
-
-
-class NoMulticallResult(Exception):
- """
- No result in multicall
- """
- pass
-
-
-class SimpleJSONRPCDispatcher(xmlrpcserver.SimpleXMLRPCDispatcher, object):
- """
- Mix-in class that dispatches JSON-RPC requests.
-
- This class is used to register JSON-RPC method handlers
- and then to dispatch them. This class doesn't need to be
- instanced directly when used by SimpleJSONRPCServer.
- """
- def __init__(self, encoding=None, config=jsonrpclib.config.DEFAULT):
- """
- Sets up the dispatcher with the given encoding.
- None values are allowed.
- """
- xmlrpcserver.SimpleXMLRPCDispatcher.__init__(
- self, allow_none=True, encoding=encoding or "UTF-8")
- self.json_config = config
-
- # Notification thread pool
- self.__notification_pool = None
-
- def set_notification_pool(self, thread_pool):
- """
- Sets the thread pool to use to handle notifications
- """
- self.__notification_pool = thread_pool
-
- def _unmarshaled_dispatch(self, request, dispatch_method=None):
- """
- Loads the request dictionary (unmarshaled), calls the method(s)
- accordingly and returns a JSON-RPC dictionary (not marshaled)
-
- :param request: JSON-RPC request dictionary (or list of)
- :param dispatch_method: Custom dispatch method (for method resolution)
- :return: A JSON-RPC dictionary (or an array of) or None if the request
- was a notification
- :raise NoMulticallResult: No result in batch
- """
- if not request:
- # Invalid request dictionary
- fault = Fault(-32600, 'Request invalid -- no request data.',
- config=self.json_config)
- _logger.warning("Invalid request: %s", fault)
- return fault.dump()
-
- if isinstance(request, utils.ListType):
- # This SHOULD be a batch, by spec
- responses = []
- for req_entry in request:
- # Validate the request
- result = validate_request(req_entry, self.json_config)
- if isinstance(result, Fault):
- responses.append(result.dump())
- continue
-
- # Call the method
- resp_entry = self._marshaled_single_dispatch(req_entry,
- dispatch_method)
-
- # Store its result
- if isinstance(resp_entry, Fault):
- # pylint: disable=E1103
- responses.append(resp_entry.dump())
- elif resp_entry is not None:
- responses.append(resp_entry)
-
- if not responses:
- # No non-None result
- _logger.error("No result in Multicall")
- raise NoMulticallResult("No result")
-
- return responses
-
- else:
- # Single call
- result = validate_request(request, self.json_config)
- if isinstance(result, Fault):
- return result.dump()
-
- # Call the method
- response = self._marshaled_single_dispatch(request,
- dispatch_method)
- if isinstance(response, Fault):
- # pylint: disable=E1103
- return response.dump()
-
- return response
-
- def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
- """
- Parses the request data (marshaled), calls method(s) and returns a
- JSON string (marshaled)
-
- :param data: A JSON request string
- :param dispatch_method: Custom dispatch method (for method resolution)
- :param path: Unused parameter, to keep compatibility with xmlrpclib
- :return: A JSON-RPC response string (marshaled)
- """
- # Parse the request
- try:
- request = jsonrpclib.loads(data, self.json_config)
- except Exception as ex:
- # Parsing/loading error
- fault = Fault(-32700, 'Request {0} invalid. ({1}:{2})'
- .format(data, type(ex).__name__, ex),
- config=self.json_config)
- _logger.warning("Error parsing request: %s", fault)
- return fault.response()
-
- # Get the response dictionary
- try:
- response = self._unmarshaled_dispatch(request, dispatch_method)
- if response is not None:
- # Compute the string representation of the dictionary/list
- return jsonrpclib.jdumps(response, self.encoding)
- else:
- # No result (notification)
- return ''
- except NoMulticallResult:
- # Return an empty string (jsonrpclib internal behaviour)
- return ''
-
- def _marshaled_single_dispatch(self, request, dispatch_method=None):
- """
- Dispatches a single method call
-
- :param request: A validated request dictionary
- :param dispatch_method: Custom dispatch method (for method resolution)
- :return: A JSON-RPC response dictionary, or None if it was a
- notification request
- """
- method = request.get('method')
- params = request.get('params')
-
- # Prepare a request-specific configuration
- if 'jsonrpc' not in request and self.json_config.version >= 2:
- # JSON-RPC 1.0 request on a JSON-RPC 2.0
- # => compatibility needed
- config = self.json_config.copy()
- config.version = 1.0
- else:
- # Keep server configuration as is
- config = self.json_config
-
- # Test if this is a notification request
- is_notification = 'id' not in request or request['id'] in (None, '')
- if is_notification and self.__notification_pool is not None:
- # Use the thread pool for notifications
- if dispatch_method is not None:
- self.__notification_pool.enqueue(dispatch_method,
- method, params)
- else:
- self.__notification_pool.enqueue(self._dispatch,
- method, params, config)
-
- # Return immediately
- return None
- else:
- # Synchronous call
- try:
- # Call the method
- if dispatch_method is not None:
- response = dispatch_method(method, params)
- else:
- response = self._dispatch(method, params, config)
- except Exception as ex:
- # Return a fault
- fault = Fault(-32603, '{0}:{1}'.format(type(ex).__name__, ex),
- config=config)
- _logger.error("Error calling method %s: %s", method, fault)
- return fault.dump()
-
- if is_notification:
- # It's a notification, no result needed
- # Do not use 'not id' as it might be the integer 0
- return None
-
- # Prepare a JSON-RPC dictionary
- try:
- return jsonrpclib.dump(response, rpcid=request['id'],
- is_response=True, config=config)
- except Exception as ex:
- # JSON conversion exception
- fault = Fault(-32603, '{0}:{1}'.format(type(ex).__name__, ex),
- config=config)
- _logger.error("Error preparing JSON-RPC result: %s", fault)
- return fault.dump()
-
- def _dispatch(self, method, params, config=None):
- """
- Default method resolver and caller
-
- :param method: Name of the method to call
- :param params: List of arguments to give to the method
- :param config: Request-specific configuration
- :return: The result of the method
- """
- config = config or self.json_config
-
- func = None
- try:
- # Look into registered methods
- func = self.funcs[method]
- except KeyError:
- if self.instance is not None:
- # Try with the registered instance
- try:
- # Instance has a custom dispatcher
- return getattr(self.instance, '_dispatch')(method, params)
- except AttributeError:
- # Resolve the method name in the instance
- try:
- func = xmlrpcserver.resolve_dotted_attribute(
- self.instance, method, True)
- except AttributeError:
- # Unknown method
- pass
-
- if func is not None:
- try:
- # Call the method
- if isinstance(params, utils.ListType):
- return func(*params)
- else:
- return func(**params)
- except TypeError as ex:
- # Maybe the parameters are wrong
- fault = Fault(-32602, 'Invalid parameters: {0}'.format(ex),
- config=config)
- _logger.warning("Invalid call parameters: %s", fault)
- return fault
- except:
- # Method exception
- err_lines = traceback.format_exc().splitlines()
- trace_string = '{0} | {1}'.format(err_lines[-3], err_lines[-1])
- fault = Fault(-32603, 'Server error: {0}'.format(trace_string),
- config=config)
- _logger.exception("Server-side exception: %s", fault)
- return fault
- else:
- # Unknown method
- fault = Fault(-32601, 'Method {0} not supported.'.format(method),
- config=config)
- _logger.warning("Unknown method: %s", fault)
- return fault
-
-# ------------------------------------------------------------------------------
-
-
-class SimpleJSONRPCRequestHandler(xmlrpcserver.SimpleXMLRPCRequestHandler):
- """
- HTTP request handler.
-
- The server that receives the requests must have a json_config member,
- containing a JSONRPClib Config instance
- """
- def do_POST(self):
- """
- Handles POST requests
- """
- if not self.is_rpc_path_valid():
- self.report_404()
- return
-
- # Retrieve the configuration
- config = getattr(self.server, 'json_config', jsonrpclib.config.DEFAULT)
-
- try:
- # Read the request body
- max_chunk_size = 10 * 1024 * 1024
- size_remaining = int(self.headers["content-length"])
- chunks = []
- while size_remaining:
- chunk_size = min(size_remaining, max_chunk_size)
- raw_chunk = self.rfile.read(chunk_size)
- if not raw_chunk:
- break
- chunks.append(utils.from_bytes(raw_chunk))
- size_remaining -= len(chunks[-1])
- data = ''.join(chunks)
-
- try:
- # Decode content
- data = self.decode_request_content(data)
- if data is None:
- # Unknown encoding, response has been sent
- return
- except AttributeError:
- # Available since Python 2.7
- pass
-
- # Execute the method
- response = self.server._marshaled_dispatch(
- data, getattr(self, '_dispatch', None), self.path)
-
- # No exception: send a 200 OK
- self.send_response(200)
- except:
- # Exception: send 500 Server Error
- self.send_response(500)
- err_lines = traceback.format_exc().splitlines()
- trace_string = '{0} | {1}'.format(err_lines[-3], err_lines[-1])
- fault = jsonrpclib.Fault(-32603, 'Server error: {0}'
- .format(trace_string), config=config)
- _logger.exception("Server-side error: %s", fault)
- response = fault.response()
-
- if response is None:
- # Avoid to send None
- response = ''
-
- # Convert the response to the valid string format
- response = utils.to_bytes(response)
-
- # Send it
- self.send_header("Content-type", config.content_type)
- self.send_header("Content-length", str(len(response)))
- self.end_headers()
- if response:
- self.wfile.write(response)
-
-# ------------------------------------------------------------------------------
-
-
-class SimpleJSONRPCServer(socketserver.TCPServer, SimpleJSONRPCDispatcher):
- """
- JSON-RPC server (and dispatcher)
- """
- # This simplifies server restart after error
- allow_reuse_address = True
-
- # pylint: disable=C0103
- def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
- logRequests=True, encoding=None, bind_and_activate=True,
- address_family=socket.AF_INET,
- config=jsonrpclib.config.DEFAULT):
- """
- Sets up the server and the dispatcher
-
- :param addr: The server listening address
- :param requestHandler: Custom request handler
- :param logRequests: Flag to(de)activate requests logging
- :param encoding: The dispatcher request encoding
- :param bind_and_activate: If True, starts the server immediately
- :param address_family: The server listening address family
- :param config: A JSONRPClib Config instance
- """
- # Set up the dispatcher fields
- SimpleJSONRPCDispatcher.__init__(self, encoding, config)
-
- # Prepare the server configuration
- # logRequests is used by SimpleXMLRPCRequestHandler
- self.logRequests = logRequests
- self.address_family = address_family
- self.json_config = config
-
- # Work on the request handler
- class RequestHandlerWrapper(requestHandler, object):
- """
- Wraps the request handle to have access to the configuration
- """
- def __init__(self, *args, **kwargs):
- """
- Constructs the wrapper after having stored the configuration
- """
- self.config = config
- super(RequestHandlerWrapper, self).__init__(*args, **kwargs)
-
- # Set up the server
- socketserver.TCPServer.__init__(self, addr, requestHandler,
- bind_and_activate)
-
- # Windows-specific
- if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
- flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
- flags |= fcntl.FD_CLOEXEC
- fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
-
-# ------------------------------------------------------------------------------
-
-
-class PooledJSONRPCServer(SimpleJSONRPCServer, socketserver.ThreadingMixIn):
- """
- JSON-RPC server based on a thread pool
- """
- def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler,
- logRequests=True, encoding=None, bind_and_activate=True,
- address_family=socket.AF_INET,
- config=jsonrpclib.config.DEFAULT, thread_pool=None):
- """
- Sets up the server and the dispatcher
-
- :param addr: The server listening address
- :param requestHandler: Custom request handler
- :param logRequests: Flag to(de)activate requests logging
- :param encoding: The dispatcher request encoding
- :param bind_and_activate: If True, starts the server immediately
- :param address_family: The server listening address family
- :param config: A JSONRPClib Config instance
- :param thread_pool: A ThreadPool object. The pool must be started.
- """
- # Normalize the thread pool
- if thread_pool is None:
- # Start a thread pool with 30 threads max, 0 thread min
- thread_pool = jsonrpclib.threadpool.ThreadPool(
- 30, 0, logname="PooledJSONRPCServer")
- thread_pool.start()
-
- # Store the thread pool
- self.__request_pool = thread_pool
-
- # Prepare the server
- SimpleJSONRPCServer.__init__(self, addr, requestHandler, logRequests,
- encoding, bind_and_activate,
- address_family, config)
-
- def process_request(self, request, client_address):
- """
- Handle a client request: queue it in the thread pool
- """
- self.__request_pool.enqueue(self.process_request_thread,
- request, client_address)
-
- def server_close(self):
- """
- Clean up the server
- """
- SimpleJSONRPCServer.server_close(self)
- self.__request_pool.stop()
-
-# ------------------------------------------------------------------------------
-
-
-class CGIJSONRPCRequestHandler(SimpleJSONRPCDispatcher):
- """
- JSON-RPC CGI handler (and dispatcher)
- """
- def __init__(self, encoding=None, config=jsonrpclib.config.DEFAULT):
- """
- Sets up the dispatcher
-
- :param encoding: Dispatcher encoding
- :param config: A JSONRPClib Config instance
- """
- SimpleJSONRPCDispatcher.__init__(self, encoding, config)
-
- def handle_jsonrpc(self, request_text):
- """
- Handle a JSON-RPC request
- """
- response = self._marshaled_dispatch(request_text)
- sys.stdout.write('Content-Type: {0}\r\n'
- .format(self.json_config.content_type))
- sys.stdout.write('Content-Length: {0:d}\r\n'.format(len(response)))
- sys.stdout.write('\r\n')
- sys.stdout.write(response)
-
- # XML-RPC alias
- handle_xmlrpc = handle_jsonrpc
+#!/usr/bin/python +# -- Content-Encoding: UTF-8 -- +""" +Defines a request dispatcher, a HTTP request handler, a HTTP server and a +CGI request handler. + +:authors: Josh Marshall, Thomas Calmant +:copyright: Copyright 2015, isandlaTech +:license: Apache License 2.0 +:version: 0.2.5 + +.. + + Copyright 2015 isandlaTech + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +# Module version +__version_info__ = (0, 2, 5) +__version__ = ".".join(str(x) for x in __version_info__) + +# Documentation strings format +__docformat__ = "restructuredtext en" + +# ------------------------------------------------------------------------------ +# Local modules +from jsonrpclib import Fault +import jsonrpclib.config +import jsonrpclib.utils as utils +import jsonrpclib.threadpool + +# Standard library +import logging +import socket +import sys +import traceback + +# Prepare the logger +_logger = logging.getLogger(__name__) + +try: + # Python 3 + # pylint: disable=F0401,E0611 + import xmlrpc.server as xmlrpcserver + import socketserver +except (ImportError, AttributeError): + # Python 2 or IronPython + # pylint: disable=F0401,E0611 + import SimpleXMLRPCServer as xmlrpcserver + import SocketServer as socketserver + +try: + # Windows + import fcntl +except ImportError: + # Other systems + # pylint: disable=C0103 + fcntl = None + +# ------------------------------------------------------------------------------ + + +def get_version(request): + """ + Computes the JSON-RPC version + + :param request: A request dictionary + :return: The JSON-RPC version or None + """ + if 'jsonrpc' in request: + return 2.0 + elif 'id' in request: + return 1.0 + + return None + + +def validate_request(request, json_config): + """ + Validates the format of a request dictionary + + :param request: A request dictionary + :param json_config: A JSONRPClib Config instance + :return: True if the dictionary is valid, else a Fault object + """ + if not isinstance(request, utils.DictType): + # Invalid request type + fault = Fault(-32600, 'Request must be a dict, not {0}' + .format(type(request).__name__), + config=json_config) + _logger.warning("Invalid request content: %s", fault) + return fault + + # Get the request ID + rpcid = request.get('id', None) + + # Check request version + version = get_version(request) + if not version: + fault = Fault(-32600, 'Request {0} invalid.'.format(request), + rpcid=rpcid, config=json_config) + _logger.warning("No version in request: %s", fault) + return fault + + # Default parameters: empty list + request.setdefault('params', []) + + # Check parameters + method = request.get('method', None) + params = request.get('params') + param_types = (utils.ListType, utils.DictType, utils.TupleType) + + if not method or not isinstance(method, utils.string_types) or \ + not isinstance(params, param_types): + # Invalid type of method name or parameters + fault = Fault(-32600, 'Invalid request parameters or method.', + rpcid=rpcid, config=json_config) + _logger.warning("Invalid request content: %s", fault) + return fault + + # Valid request + return True + +# ------------------------------------------------------------------------------ + + +class NoMulticallResult(Exception): + """ + No result in multicall + """ + pass + + +class SimpleJSONRPCDispatcher(xmlrpcserver.SimpleXMLRPCDispatcher, object): + """ + Mix-in class that dispatches JSON-RPC requests. + + This class is used to register JSON-RPC method handlers + and then to dispatch them. This class doesn't need to be + instanced directly when used by SimpleJSONRPCServer. + """ + def __init__(self, encoding=None, config=jsonrpclib.config.DEFAULT): + """ + Sets up the dispatcher with the given encoding. + None values are allowed. + """ + xmlrpcserver.SimpleXMLRPCDispatcher.__init__( + self, allow_none=True, encoding=encoding or "UTF-8") + self.json_config = config + + # Notification thread pool + self.__notification_pool = None + + def set_notification_pool(self, thread_pool): + """ + Sets the thread pool to use to handle notifications + """ + self.__notification_pool = thread_pool + + def _unmarshaled_dispatch(self, request, dispatch_method=None): + """ + Loads the request dictionary (unmarshaled), calls the method(s) + accordingly and returns a JSON-RPC dictionary (not marshaled) + + :param request: JSON-RPC request dictionary (or list of) + :param dispatch_method: Custom dispatch method (for method resolution) + :return: A JSON-RPC dictionary (or an array of) or None if the request + was a notification + :raise NoMulticallResult: No result in batch + """ + if not request: + # Invalid request dictionary + fault = Fault(-32600, 'Request invalid -- no request data.', + config=self.json_config) + _logger.warning("Invalid request: %s", fault) + return fault.dump() + + if isinstance(request, utils.ListType): + # This SHOULD be a batch, by spec + responses = [] + for req_entry in request: + # Validate the request + result = validate_request(req_entry, self.json_config) + if isinstance(result, Fault): + responses.append(result.dump()) + continue + + # Call the method + resp_entry = self._marshaled_single_dispatch(req_entry, + dispatch_method) + + # Store its result + if isinstance(resp_entry, Fault): + # pylint: disable=E1103 + responses.append(resp_entry.dump()) + elif resp_entry is not None: + responses.append(resp_entry) + + if not responses: + # No non-None result + _logger.error("No result in Multicall") + raise NoMulticallResult("No result") + + return responses + + else: + # Single call + result = validate_request(request, self.json_config) + if isinstance(result, Fault): + return result.dump() + + # Call the method + response = self._marshaled_single_dispatch(request, + dispatch_method) + if isinstance(response, Fault): + # pylint: disable=E1103 + return response.dump() + + return response + + def _marshaled_dispatch(self, data, dispatch_method=None, path=None): + """ + Parses the request data (marshaled), calls method(s) and returns a + JSON string (marshaled) + + :param data: A JSON request string + :param dispatch_method: Custom dispatch method (for method resolution) + :param path: Unused parameter, to keep compatibility with xmlrpclib + :return: A JSON-RPC response string (marshaled) + """ + # Parse the request + try: + request = jsonrpclib.loads(data, self.json_config) + except Exception as ex: + # Parsing/loading error + fault = Fault(-32700, 'Request {0} invalid. ({1}:{2})' + .format(data, type(ex).__name__, ex), + config=self.json_config) + _logger.warning("Error parsing request: %s", fault) + return fault.response() + + # Get the response dictionary + try: + response = self._unmarshaled_dispatch(request, dispatch_method) + if response is not None: + # Compute the string representation of the dictionary/list + return jsonrpclib.jdumps(response, self.encoding) + else: + # No result (notification) + return '' + except NoMulticallResult: + # Return an empty string (jsonrpclib internal behaviour) + return '' + + def _marshaled_single_dispatch(self, request, dispatch_method=None): + """ + Dispatches a single method call + + :param request: A validated request dictionary + :param dispatch_method: Custom dispatch method (for method resolution) + :return: A JSON-RPC response dictionary, or None if it was a + notification request + """ + method = request.get('method') + params = request.get('params') + + # Prepare a request-specific configuration + if 'jsonrpc' not in request and self.json_config.version >= 2: + # JSON-RPC 1.0 request on a JSON-RPC 2.0 + # => compatibility needed + config = self.json_config.copy() + config.version = 1.0 + else: + # Keep server configuration as is + config = self.json_config + + # Test if this is a notification request + is_notification = 'id' not in request or request['id'] in (None, '') + if is_notification and self.__notification_pool is not None: + # Use the thread pool for notifications + if dispatch_method is not None: + self.__notification_pool.enqueue(dispatch_method, + method, params) + else: + self.__notification_pool.enqueue(self._dispatch, + method, params, config) + + # Return immediately + return None + else: + # Synchronous call + try: + # Call the method + if dispatch_method is not None: + response = dispatch_method(method, params) + else: + response = self._dispatch(method, params, config) + except Exception as ex: + # Return a fault + fault = Fault(-32603, '{0}:{1}'.format(type(ex).__name__, ex), + config=config) + _logger.error("Error calling method %s: %s", method, fault) + return fault.dump() + + if is_notification: + # It's a notification, no result needed + # Do not use 'not id' as it might be the integer 0 + return None + + # Prepare a JSON-RPC dictionary + try: + return jsonrpclib.dump(response, rpcid=request['id'], + is_response=True, config=config) + except Exception as ex: + # JSON conversion exception + fault = Fault(-32603, '{0}:{1}'.format(type(ex).__name__, ex), + config=config) + _logger.error("Error preparing JSON-RPC result: %s", fault) + return fault.dump() + + def _dispatch(self, method, params, config=None): + """ + Default method resolver and caller + + :param method: Name of the method to call + :param params: List of arguments to give to the method + :param config: Request-specific configuration + :return: The result of the method + """ + config = config or self.json_config + + func = None + try: + # Look into registered methods + func = self.funcs[method] + except KeyError: + if self.instance is not None: + # Try with the registered instance + try: + # Instance has a custom dispatcher + return getattr(self.instance, '_dispatch')(method, params) + except AttributeError: + # Resolve the method name in the instance + try: + func = xmlrpcserver.resolve_dotted_attribute( + self.instance, method, True) + except AttributeError: + # Unknown method + pass + + if func is not None: + try: + # Call the method + if isinstance(params, utils.ListType): + return func(*params) + else: + return func(**params) + except TypeError as ex: + # Maybe the parameters are wrong + fault = Fault(-32602, 'Invalid parameters: {0}'.format(ex), + config=config) + _logger.warning("Invalid call parameters: %s", fault) + return fault + except: + # Method exception + err_lines = traceback.format_exc().splitlines() + trace_string = '{0} | {1}'.format(err_lines[-3], err_lines[-1]) + fault = Fault(-32603, 'Server error: {0}'.format(trace_string), + config=config) + _logger.exception("Server-side exception: %s", fault) + return fault + else: + # Unknown method + fault = Fault(-32601, 'Method {0} not supported.'.format(method), + config=config) + _logger.warning("Unknown method: %s", fault) + return fault + +# ------------------------------------------------------------------------------ + + +class SimpleJSONRPCRequestHandler(xmlrpcserver.SimpleXMLRPCRequestHandler): + """ + HTTP request handler. + + The server that receives the requests must have a json_config member, + containing a JSONRPClib Config instance + """ + def do_POST(self): + """ + Handles POST requests + """ + if not self.is_rpc_path_valid(): + self.report_404() + return + + # Retrieve the configuration + config = getattr(self.server, 'json_config', jsonrpclib.config.DEFAULT) + + try: + # Read the request body + max_chunk_size = 10 * 1024 * 1024 + size_remaining = int(self.headers["content-length"]) + chunks = [] + while size_remaining: + chunk_size = min(size_remaining, max_chunk_size) + raw_chunk = self.rfile.read(chunk_size) + if not raw_chunk: + break + chunks.append(utils.from_bytes(raw_chunk)) + size_remaining -= len(chunks[-1]) + data = ''.join(chunks) + + try: + # Decode content + data = self.decode_request_content(data) + if data is None: + # Unknown encoding, response has been sent + return + except AttributeError: + # Available since Python 2.7 + pass + + # Execute the method + response = self.server._marshaled_dispatch( + data, getattr(self, '_dispatch', None), self.path) + + # No exception: send a 200 OK + self.send_response(200) + except: + # Exception: send 500 Server Error + self.send_response(500) + err_lines = traceback.format_exc().splitlines() + trace_string = '{0} | {1}'.format(err_lines[-3], err_lines[-1]) + fault = jsonrpclib.Fault(-32603, 'Server error: {0}' + .format(trace_string), config=config) + _logger.exception("Server-side error: %s", fault) + response = fault.response() + + if response is None: + # Avoid to send None + response = '' + + # Convert the response to the valid string format + response = utils.to_bytes(response) + + # Send it + self.send_header("Content-type", config.content_type) + self.send_header("Content-length", str(len(response))) + self.end_headers() + if response: + self.wfile.write(response) + +# ------------------------------------------------------------------------------ + + +class SimpleJSONRPCServer(socketserver.TCPServer, SimpleJSONRPCDispatcher): + """ + JSON-RPC server (and dispatcher) + """ + # This simplifies server restart after error + allow_reuse_address = True + + # pylint: disable=C0103 + def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler, + logRequests=True, encoding=None, bind_and_activate=True, + address_family=socket.AF_INET, + config=jsonrpclib.config.DEFAULT): + """ + Sets up the server and the dispatcher + + :param addr: The server listening address + :param requestHandler: Custom request handler + :param logRequests: Flag to(de)activate requests logging + :param encoding: The dispatcher request encoding + :param bind_and_activate: If True, starts the server immediately + :param address_family: The server listening address family + :param config: A JSONRPClib Config instance + """ + # Set up the dispatcher fields + SimpleJSONRPCDispatcher.__init__(self, encoding, config) + + # Prepare the server configuration + # logRequests is used by SimpleXMLRPCRequestHandler + self.logRequests = logRequests + self.address_family = address_family + self.json_config = config + + # Work on the request handler + class RequestHandlerWrapper(requestHandler, object): + """ + Wraps the request handle to have access to the configuration + """ + def __init__(self, *args, **kwargs): + """ + Constructs the wrapper after having stored the configuration + """ + self.config = config + super(RequestHandlerWrapper, self).__init__(*args, **kwargs) + + # Set up the server + socketserver.TCPServer.__init__(self, addr, requestHandler, + bind_and_activate) + + # Windows-specific + if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'): + flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD) + flags |= fcntl.FD_CLOEXEC + fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags) + +# ------------------------------------------------------------------------------ + + +class PooledJSONRPCServer(SimpleJSONRPCServer, socketserver.ThreadingMixIn): + """ + JSON-RPC server based on a thread pool + """ + def __init__(self, addr, requestHandler=SimpleJSONRPCRequestHandler, + logRequests=True, encoding=None, bind_and_activate=True, + address_family=socket.AF_INET, + config=jsonrpclib.config.DEFAULT, thread_pool=None): + """ + Sets up the server and the dispatcher + + :param addr: The server listening address + :param requestHandler: Custom request handler + :param logRequests: Flag to(de)activate requests logging + :param encoding: The dispatcher request encoding + :param bind_and_activate: If True, starts the server immediately + :param address_family: The server listening address family + :param config: A JSONRPClib Config instance + :param thread_pool: A ThreadPool object. The pool must be started. + """ + # Normalize the thread pool + if thread_pool is None: + # Start a thread pool with 30 threads max, 0 thread min + thread_pool = jsonrpclib.threadpool.ThreadPool( + 30, 0, logname="PooledJSONRPCServer") + thread_pool.start() + + # Store the thread pool + self.__request_pool = thread_pool + + # Prepare the server + SimpleJSONRPCServer.__init__(self, addr, requestHandler, logRequests, + encoding, bind_and_activate, + address_family, config) + + def process_request(self, request, client_address): + """ + Handle a client request: queue it in the thread pool + """ + self.__request_pool.enqueue(self.process_request_thread, + request, client_address) + + def server_close(self): + """ + Clean up the server + """ + SimpleJSONRPCServer.server_close(self) + self.__request_pool.stop() + +# ------------------------------------------------------------------------------ + + +class CGIJSONRPCRequestHandler(SimpleJSONRPCDispatcher): + """ + JSON-RPC CGI handler (and dispatcher) + """ + def __init__(self, encoding=None, config=jsonrpclib.config.DEFAULT): + """ + Sets up the dispatcher + + :param encoding: Dispatcher encoding + :param config: A JSONRPClib Config instance + """ + SimpleJSONRPCDispatcher.__init__(self, encoding, config) + + def handle_jsonrpc(self, request_text): + """ + Handle a JSON-RPC request + """ + response = self._marshaled_dispatch(request_text) + sys.stdout.write('Content-Type: {0}\r\n' + .format(self.json_config.content_type)) + sys.stdout.write('Content-Length: {0:d}\r\n'.format(len(response))) + sys.stdout.write('\r\n') + sys.stdout.write(response) + + # XML-RPC alias + handle_xmlrpc = handle_jsonrpc diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py index 2c7dc1c5..a92774ab 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/__init__.py @@ -1,34 +1,34 @@ -#!/usr/bin/python
-# -- Content-Encoding: UTF-8 --
-"""
-Aliases to ease access to jsonrpclib classes
-
-:authors: Josh Marshall, Thomas Calmant
-:copyright: Copyright 2015, isandlaTech
-:license: Apache License 2.0
-:version: 0.2.5
-
-..
-
- Copyright 2015 isandlaTech
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-# Easy access to utility methods and classes
-from jsonrpclib.jsonrpc import Server, ServerProxy
-from jsonrpclib.jsonrpc import MultiCall, Fault, ProtocolError, AppError
-from jsonrpclib.jsonrpc import loads, dumps, load, dump
-from jsonrpclib.jsonrpc import jloads, jdumps
-import jsonrpclib.history as history
-import jsonrpclib.utils as utils
+#!/usr/bin/python +# -- Content-Encoding: UTF-8 -- +""" +Aliases to ease access to jsonrpclib classes + +:authors: Josh Marshall, Thomas Calmant +:copyright: Copyright 2015, isandlaTech +:license: Apache License 2.0 +:version: 0.2.5 + +.. + + Copyright 2015 isandlaTech + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +# Easy access to utility methods and classes +from jsonrpclib.jsonrpc import Server, ServerProxy +from jsonrpclib.jsonrpc import MultiCall, Fault, ProtocolError, AppError +from jsonrpclib.jsonrpc import loads, dumps, load, dump +from jsonrpclib.jsonrpc import jloads, jdumps +import jsonrpclib.history as history +import jsonrpclib.utils as utils diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py index d2c5a811..77838d4e 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/config.py @@ -1,141 +1,141 @@ -#!/usr/bin/python
-# -- Content-Encoding: UTF-8 --
-"""
-The configuration module.
-
-:copyright: Copyright 2015, isandlaTech
-:license: Apache License 2.0
-:version: 0.2.5
-
-..
-
- Copyright 2015 isandlaTech
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-# Module version
-__version_info__ = (0, 2, 5)
-__version__ = ".".join(str(x) for x in __version_info__)
-
-# Documentation strings format
-__docformat__ = "restructuredtext en"
-
-# ------------------------------------------------------------------------------
-
-import sys
-
-# ------------------------------------------------------------------------------
-
-
-class LocalClasses(dict):
- """
- Associates local classes with their names (used in the jsonclass module)
- """
- def add(self, cls, name=None):
- """
- Stores a local class
-
- :param cls: A class
- :param name: Custom name used in the __jsonclass__ attribute
- """
- if not name:
- name = cls.__name__
- self[name] = cls
-
-# ------------------------------------------------------------------------------
-
-
-class Config(object):
- """
- This is pretty much used exclusively for the 'jsonclass'
- functionality... set use_jsonclass to False to turn it off.
- You can change serialize_method and ignore_attribute, or use
- the local_classes.add(class) to include "local" classes.
- """
- def __init__(self, version=2.0, content_type="application/json-rpc",
- user_agent=None, use_jsonclass=True,
- serialize_method='_serialize',
- ignore_attribute='_ignore',
- serialize_handlers=None):
- """
- Sets up a configuration of JSONRPClib
-
- :param version: JSON-RPC specification version
- :param content_type: HTTP content type header value
- :param user_agent: The HTTP request user agent
- :param use_jsonclass: Allow bean marshalling
- :param serialize_method: A string that references the method on a
- custom class object which is responsible for
- returning a tuple of the arguments and a dict
- of attributes.
- :param ignore_attribute: A string that references the attribute on a
- custom class object which holds strings and/or
- references of the attributes the class
- translator should ignore.
- :param serialize_handlers: A dictionary of dump handler functions by
- type for additional type support and for
- overriding dump of built-in types in utils
- """
- # JSON-RPC specification
- self.version = version
-
- # Change to False to keep __jsonclass__ entries raw.
- self.use_jsonclass = use_jsonclass
-
- # it SHOULD be 'application/json-rpc'
- # but MAY be 'application/json' or 'application/jsonrequest'
- self.content_type = content_type
-
- # Default user agent
- if user_agent is None:
- user_agent = 'jsonrpclib/{0} (Python {1})'.format(
- __version__, '.'.join(str(ver)
- for ver in sys.version_info[0:3]))
- self.user_agent = user_agent
-
- # The list of classes to use for jsonclass translation.
- self.classes = LocalClasses()
-
- # The serialize_method should be a string that references the
- # method on a custom class object which is responsible for
- # returning a tuple of the constructor arguments and a dict of
- # attributes.
- self.serialize_method = serialize_method
-
- # The ignore attribute should be a string that references the
- # attribute on a custom class object which holds strings and / or
- # references of the attributes the class translator should ignore.
- self.ignore_attribute = ignore_attribute
-
- # The list of serialize handler functions for jsonclass dump.
- # Used for handling additional types and overriding built-in types.
- # Functions are expected to have the same parameters as jsonclass dump
- # (possibility to call standard jsonclass dump function within).
- self.serialize_handlers = serialize_handlers or {}
-
- def copy(self):
- """
- Returns a shallow copy of this configuration bean
-
- :return: A shallow copy of this configuration
- """
- new_config = Config(self.version, self.content_type, self.user_agent,
- self.use_jsonclass, self.serialize_method,
- self.ignore_attribute, None)
- new_config.classes = self.classes.copy()
- new_config.serialize_handlers = self.serialize_handlers.copy()
- return new_config
-
-# Default configuration
-DEFAULT = Config()
+#!/usr/bin/python +# -- Content-Encoding: UTF-8 -- +""" +The configuration module. + +:copyright: Copyright 2015, isandlaTech +:license: Apache License 2.0 +:version: 0.2.5 + +.. + + Copyright 2015 isandlaTech + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +# Module version +__version_info__ = (0, 2, 5) +__version__ = ".".join(str(x) for x in __version_info__) + +# Documentation strings format +__docformat__ = "restructuredtext en" + +# ------------------------------------------------------------------------------ + +import sys + +# ------------------------------------------------------------------------------ + + +class LocalClasses(dict): + """ + Associates local classes with their names (used in the jsonclass module) + """ + def add(self, cls, name=None): + """ + Stores a local class + + :param cls: A class + :param name: Custom name used in the __jsonclass__ attribute + """ + if not name: + name = cls.__name__ + self[name] = cls + +# ------------------------------------------------------------------------------ + + +class Config(object): + """ + This is pretty much used exclusively for the 'jsonclass' + functionality... set use_jsonclass to False to turn it off. + You can change serialize_method and ignore_attribute, or use + the local_classes.add(class) to include "local" classes. + """ + def __init__(self, version=2.0, content_type="application/json-rpc", + user_agent=None, use_jsonclass=True, + serialize_method='_serialize', + ignore_attribute='_ignore', + serialize_handlers=None): + """ + Sets up a configuration of JSONRPClib + + :param version: JSON-RPC specification version + :param content_type: HTTP content type header value + :param user_agent: The HTTP request user agent + :param use_jsonclass: Allow bean marshalling + :param serialize_method: A string that references the method on a + custom class object which is responsible for + returning a tuple of the arguments and a dict + of attributes. + :param ignore_attribute: A string that references the attribute on a + custom class object which holds strings and/or + references of the attributes the class + translator should ignore. + :param serialize_handlers: A dictionary of dump handler functions by + type for additional type support and for + overriding dump of built-in types in utils + """ + # JSON-RPC specification + self.version = version + + # Change to False to keep __jsonclass__ entries raw. + self.use_jsonclass = use_jsonclass + + # it SHOULD be 'application/json-rpc' + # but MAY be 'application/json' or 'application/jsonrequest' + self.content_type = content_type + + # Default user agent + if user_agent is None: + user_agent = 'jsonrpclib/{0} (Python {1})'.format( + __version__, '.'.join(str(ver) + for ver in sys.version_info[0:3])) + self.user_agent = user_agent + + # The list of classes to use for jsonclass translation. + self.classes = LocalClasses() + + # The serialize_method should be a string that references the + # method on a custom class object which is responsible for + # returning a tuple of the constructor arguments and a dict of + # attributes. + self.serialize_method = serialize_method + + # The ignore attribute should be a string that references the + # attribute on a custom class object which holds strings and / or + # references of the attributes the class translator should ignore. + self.ignore_attribute = ignore_attribute + + # The list of serialize handler functions for jsonclass dump. + # Used for handling additional types and overriding built-in types. + # Functions are expected to have the same parameters as jsonclass dump + # (possibility to call standard jsonclass dump function within). + self.serialize_handlers = serialize_handlers or {} + + def copy(self): + """ + Returns a shallow copy of this configuration bean + + :return: A shallow copy of this configuration + """ + new_config = Config(self.version, self.content_type, self.user_agent, + self.use_jsonclass, self.serialize_method, + self.ignore_attribute, None) + new_config.classes = self.classes.copy() + new_config.serialize_handlers = self.serialize_handlers.copy() + return new_config + +# Default configuration +DEFAULT = Config() diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py index 7062ab66..288d9539 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/history.py @@ -1,95 +1,95 @@ -#!/usr/bin/python
-# -- Content-Encoding: UTF-8 --
-"""
-The history module.
-
-:authors: Josh Marshall, Thomas Calmant
-:copyright: Copyright 2015, isandlaTech
-:license: Apache License 2.0
-:version: 0.2.5
-
-..
-
- Copyright 2015 isandlaTech
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-# Module version
-__version_info__ = (0, 2, 5)
-__version__ = ".".join(str(x) for x in __version_info__)
-
-# Documentation strings format
-__docformat__ = "restructuredtext en"
-
-# ------------------------------------------------------------------------------
-
-
-class History(object):
- """
- This holds all the response and request objects for a
- session. A server using this should call "clear" after
- each request cycle in order to keep it from clogging
- memory.
- """
- def __init__(self):
- """
- Sets up members
- """
- self.requests = []
- self.responses = []
-
- def add_response(self, response_obj):
- """
- Adds a response to the history
-
- :param response_obj: Response content
- """
- self.responses.append(response_obj)
-
- def add_request(self, request_obj):
- """
- Adds a request to the history
-
- :param request_obj: A request object
- """
- self.requests.append(request_obj)
-
- @property
- def request(self):
- """
- Returns the latest stored request or None
- """
- try:
- return self.requests[-1]
-
- except IndexError:
- return None
-
- @property
- def response(self):
- """
- Returns the latest stored response or None
- """
- try:
- return self.responses[-1]
-
- except IndexError:
- return None
-
- def clear(self):
- """
- Clears the history lists
- """
- del self.requests[:]
- del self.responses[:]
+#!/usr/bin/python +# -- Content-Encoding: UTF-8 -- +""" +The history module. + +:authors: Josh Marshall, Thomas Calmant +:copyright: Copyright 2015, isandlaTech +:license: Apache License 2.0 +:version: 0.2.5 + +.. + + Copyright 2015 isandlaTech + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +# Module version +__version_info__ = (0, 2, 5) +__version__ = ".".join(str(x) for x in __version_info__) + +# Documentation strings format +__docformat__ = "restructuredtext en" + +# ------------------------------------------------------------------------------ + + +class History(object): + """ + This holds all the response and request objects for a + session. A server using this should call "clear" after + each request cycle in order to keep it from clogging + memory. + """ + def __init__(self): + """ + Sets up members + """ + self.requests = [] + self.responses = [] + + def add_response(self, response_obj): + """ + Adds a response to the history + + :param response_obj: Response content + """ + self.responses.append(response_obj) + + def add_request(self, request_obj): + """ + Adds a request to the history + + :param request_obj: A request object + """ + self.requests.append(request_obj) + + @property + def request(self): + """ + Returns the latest stored request or None + """ + try: + return self.requests[-1] + + except IndexError: + return None + + @property + def response(self): + """ + Returns the latest stored response or None + """ + try: + return self.responses[-1] + + except IndexError: + return None + + def clear(self): + """ + Clears the history lists + """ + del self.requests[:] + del self.responses[:] diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py index c7cc4c35..6bcbeab7 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonclass.py @@ -1,295 +1,295 @@ -#!/usr/bin/python
-# -- Content-Encoding: UTF-8 --
-"""
-The serialization module
-
-:authors: Josh Marshall, Thomas Calmant
-:copyright: Copyright 2015, isandlaTech
-:license: Apache License 2.0
-:version: 0.2.5
-
-..
-
- Copyright 2015 isandlaTech
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-# Module version
-__version_info__ = (0, 2, 5)
-__version__ = ".".join(str(x) for x in __version_info__)
-
-# Documentation strings format
-__docformat__ = "restructuredtext en"
-
-# ------------------------------------------------------------------------------
-
-# Local package
-import jsonrpclib.config
-import jsonrpclib.utils as utils
-
-# Standard library
-import inspect
-import re
-
-# ------------------------------------------------------------------------------
-
-# Supported transmitted code
-SUPPORTED_TYPES = (utils.DictType,) + utils.iterable_types \
- + utils.primitive_types
-
-# Regex of invalid module characters
-INVALID_MODULE_CHARS = r'[^a-zA-Z0-9\_\.]'
-
-# ------------------------------------------------------------------------------
-
-
-class TranslationError(Exception):
- """
- Unmarshaling exception
- """
- pass
-
-
-def _slots_finder(clazz, fields_set):
- """
- Recursively visits the class hierarchy to find all slots
-
- :param clazz: Class to analyze
- :param fields_set: Set where to store __slots___ content
- """
- # ... class level
- try:
- fields_set.update(clazz.__slots__)
- except AttributeError:
- pass
-
- # ... parent classes level
- for base_class in clazz.__bases__:
- _slots_finder(base_class, fields_set)
-
-
-def _find_fields(obj):
- """
- Returns the names of the fields of the given object
-
- :param obj: An object to analyze
- :return: A set of field names
- """
- # Find fields...
- fields = set()
-
- # ... using __dict__
- try:
- fields.update(obj.__dict__)
- except AttributeError:
- pass
-
- # ... using __slots__
- _slots_finder(obj.__class__, fields)
- return fields
-
-
-def dump(obj, serialize_method=None, ignore_attribute=None, ignore=None,
- config=jsonrpclib.config.DEFAULT):
- """
- Transforms the given object into a JSON-RPC compliant form.
- Converts beans into dictionaries with a __jsonclass__ entry.
- Doesn't change primitive types.
-
- :param obj: An object to convert
- :param serialize_method: Custom serialization method
- :param ignore_attribute: Name of the object attribute containing the names
- of members to ignore
- :param ignore: A list of members to ignore
- :param config: A JSONRPClib Config instance
- :return: A JSON-RPC compliant object
- """
- # Normalize arguments
- serialize_method = serialize_method or config.serialize_method
- ignore_attribute = ignore_attribute or config.ignore_attribute
- ignore = ignore or []
-
- # Parse / return default "types"...
- # Apply additional types, override built-in types
- # (reminder: config.serialize_handlers is a dict)
- try:
- serializer = config.serialize_handlers[type(obj)]
- except KeyError:
- # Not a serializer
- pass
- else:
- if serializer is not None:
- return serializer(obj, serialize_method, ignore_attribute,
- ignore, config)
-
- # Primitive
- if isinstance(obj, utils.primitive_types):
- return obj
-
- # Iterative
- elif isinstance(obj, utils.iterable_types):
- # List, set or tuple
- return [dump(item, serialize_method, ignore_attribute, ignore, config)
- for item in obj]
-
- elif isinstance(obj, utils.DictType):
- # Dictionary
- return dict((key, dump(value, serialize_method,
- ignore_attribute, ignore, config))
- for key, value in obj.items())
-
- # It's not a standard type, so it needs __jsonclass__
- module_name = inspect.getmodule(type(obj)).__name__
- json_class = obj.__class__.__name__
-
- if module_name not in ('', '__main__'):
- json_class = '{0}.{1}'.format(module_name, json_class)
-
- # Keep the class name in the returned object
- return_obj = {"__jsonclass__": [json_class]}
-
- # If a serialization method is defined..
- if hasattr(obj, serialize_method):
- # Params can be a dict (keyword) or list (positional)
- # Attrs MUST be a dict.
- serialize = getattr(obj, serialize_method)
- params, attrs = serialize()
- return_obj['__jsonclass__'].append(params)
- return_obj.update(attrs)
- return return_obj
-
- else:
- # Otherwise, try to figure it out
- # Obviously, we can't assume to know anything about the
- # parameters passed to __init__
- return_obj['__jsonclass__'].append([])
-
- # Prepare filtering lists
- known_types = SUPPORTED_TYPES + tuple(config.serialize_handlers)
- ignore_list = getattr(obj, ignore_attribute, []) + ignore
-
- # Find fields and filter them by name
- fields = _find_fields(obj)
- fields.difference_update(ignore_list)
-
- # Dump field values
- attrs = {}
- for attr_name in fields:
- attr_value = getattr(obj, attr_name)
- if isinstance(attr_value, known_types) and \
- attr_value not in ignore_list:
- attrs[attr_name] = dump(attr_value, serialize_method,
- ignore_attribute, ignore, config)
- return_obj.update(attrs)
- return return_obj
-
-# ------------------------------------------------------------------------------
-
-
-def load(obj, classes=None):
- """
- If 'obj' is a dictionary containing a __jsonclass__ entry, converts the
- dictionary item into a bean of this class.
-
- :param obj: An object from a JSON-RPC dictionary
- :param classes: A custom {name: class} dictionary
- :return: The loaded object
- """
- # Primitive
- if isinstance(obj, utils.primitive_types):
- return obj
-
- # List, set or tuple
- elif isinstance(obj, utils.iterable_types):
- # This comes from a JSON parser, so it can only be a list...
- return [load(entry) for entry in obj]
-
- # Otherwise, it's a dict type
- elif '__jsonclass__' not in obj:
- return dict((key, load(value)) for key, value in obj.items())
-
- # It's a dictionary, and it has a __jsonclass__
- orig_module_name = obj['__jsonclass__'][0]
- params = obj['__jsonclass__'][1]
-
- # Validate the module name
- if not orig_module_name:
- raise TranslationError('Module name empty.')
-
- json_module_clean = re.sub(INVALID_MODULE_CHARS, '', orig_module_name)
- if json_module_clean != orig_module_name:
- raise TranslationError('Module name {0} has invalid characters.'
- .format(orig_module_name))
-
- # Load the class
- json_module_parts = json_module_clean.split('.')
- json_class = None
- if classes and len(json_module_parts) == 1:
- # Local class name -- probably means it won't work
- try:
- json_class = classes[json_module_parts[0]]
- except KeyError:
- raise TranslationError('Unknown class or module {0}.'
- .format(json_module_parts[0]))
-
- else:
- # Module + class
- json_class_name = json_module_parts.pop()
- json_module_tree = '.'.join(json_module_parts)
- try:
- # Use fromlist to load the module itself, not the package
- temp_module = __import__(json_module_tree,
- fromlist=[json_class_name])
- except ImportError:
- raise TranslationError('Could not import {0} from module {1}.'
- .format(json_class_name, json_module_tree))
-
- try:
- json_class = getattr(temp_module, json_class_name)
- except AttributeError:
- raise TranslationError("Unknown class {0}.{1}."
- .format(json_module_tree, json_class_name))
-
- # Create the object
- new_obj = None
- if isinstance(params, utils.ListType):
- try:
- new_obj = json_class(*params)
- except TypeError as ex:
- raise TranslationError("Error instantiating {0}: {1}"
- .format(json_class.__name__, ex))
-
- elif isinstance(params, utils.DictType):
- try:
- new_obj = json_class(**params)
- except TypeError as ex:
- raise TranslationError("Error instantiating {0}: {1}"
- .format(json_class.__name__, ex))
-
- else:
- raise TranslationError("Constructor args must be a dict or a list, "
- "not {0}".format(type(params).__name__))
-
- # Remove the class information, as it must be ignored during the
- # reconstruction of the object
- raw_jsonclass = obj.pop('__jsonclass__')
-
- for key, value in obj.items():
- # Recursive loading
- setattr(new_obj, key, load(value, classes))
-
- # Restore the class information for further usage
- obj['__jsonclass__'] = raw_jsonclass
-
- return new_obj
+#!/usr/bin/python +# -- Content-Encoding: UTF-8 -- +""" +The serialization module + +:authors: Josh Marshall, Thomas Calmant +:copyright: Copyright 2015, isandlaTech +:license: Apache License 2.0 +:version: 0.2.5 + +.. + + Copyright 2015 isandlaTech + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +# Module version +__version_info__ = (0, 2, 5) +__version__ = ".".join(str(x) for x in __version_info__) + +# Documentation strings format +__docformat__ = "restructuredtext en" + +# ------------------------------------------------------------------------------ + +# Local package +import jsonrpclib.config +import jsonrpclib.utils as utils + +# Standard library +import inspect +import re + +# ------------------------------------------------------------------------------ + +# Supported transmitted code +SUPPORTED_TYPES = (utils.DictType,) + utils.iterable_types \ + + utils.primitive_types + +# Regex of invalid module characters +INVALID_MODULE_CHARS = r'[^a-zA-Z0-9\_\.]' + +# ------------------------------------------------------------------------------ + + +class TranslationError(Exception): + """ + Unmarshaling exception + """ + pass + + +def _slots_finder(clazz, fields_set): + """ + Recursively visits the class hierarchy to find all slots + + :param clazz: Class to analyze + :param fields_set: Set where to store __slots___ content + """ + # ... class level + try: + fields_set.update(clazz.__slots__) + except AttributeError: + pass + + # ... parent classes level + for base_class in clazz.__bases__: + _slots_finder(base_class, fields_set) + + +def _find_fields(obj): + """ + Returns the names of the fields of the given object + + :param obj: An object to analyze + :return: A set of field names + """ + # Find fields... + fields = set() + + # ... using __dict__ + try: + fields.update(obj.__dict__) + except AttributeError: + pass + + # ... using __slots__ + _slots_finder(obj.__class__, fields) + return fields + + +def dump(obj, serialize_method=None, ignore_attribute=None, ignore=None, + config=jsonrpclib.config.DEFAULT): + """ + Transforms the given object into a JSON-RPC compliant form. + Converts beans into dictionaries with a __jsonclass__ entry. + Doesn't change primitive types. + + :param obj: An object to convert + :param serialize_method: Custom serialization method + :param ignore_attribute: Name of the object attribute containing the names + of members to ignore + :param ignore: A list of members to ignore + :param config: A JSONRPClib Config instance + :return: A JSON-RPC compliant object + """ + # Normalize arguments + serialize_method = serialize_method or config.serialize_method + ignore_attribute = ignore_attribute or config.ignore_attribute + ignore = ignore or [] + + # Parse / return default "types"... + # Apply additional types, override built-in types + # (reminder: config.serialize_handlers is a dict) + try: + serializer = config.serialize_handlers[type(obj)] + except KeyError: + # Not a serializer + pass + else: + if serializer is not None: + return serializer(obj, serialize_method, ignore_attribute, + ignore, config) + + # Primitive + if isinstance(obj, utils.primitive_types): + return obj + + # Iterative + elif isinstance(obj, utils.iterable_types): + # List, set or tuple + return [dump(item, serialize_method, ignore_attribute, ignore, config) + for item in obj] + + elif isinstance(obj, utils.DictType): + # Dictionary + return dict((key, dump(value, serialize_method, + ignore_attribute, ignore, config)) + for key, value in obj.items()) + + # It's not a standard type, so it needs __jsonclass__ + module_name = inspect.getmodule(type(obj)).__name__ + json_class = obj.__class__.__name__ + + if module_name not in ('', '__main__'): + json_class = '{0}.{1}'.format(module_name, json_class) + + # Keep the class name in the returned object + return_obj = {"__jsonclass__": [json_class]} + + # If a serialization method is defined.. + if hasattr(obj, serialize_method): + # Params can be a dict (keyword) or list (positional) + # Attrs MUST be a dict. + serialize = getattr(obj, serialize_method) + params, attrs = serialize() + return_obj['__jsonclass__'].append(params) + return_obj.update(attrs) + return return_obj + + else: + # Otherwise, try to figure it out + # Obviously, we can't assume to know anything about the + # parameters passed to __init__ + return_obj['__jsonclass__'].append([]) + + # Prepare filtering lists + known_types = SUPPORTED_TYPES + tuple(config.serialize_handlers) + ignore_list = getattr(obj, ignore_attribute, []) + ignore + + # Find fields and filter them by name + fields = _find_fields(obj) + fields.difference_update(ignore_list) + + # Dump field values + attrs = {} + for attr_name in fields: + attr_value = getattr(obj, attr_name) + if isinstance(attr_value, known_types) and \ + attr_value not in ignore_list: + attrs[attr_name] = dump(attr_value, serialize_method, + ignore_attribute, ignore, config) + return_obj.update(attrs) + return return_obj + +# ------------------------------------------------------------------------------ + + +def load(obj, classes=None): + """ + If 'obj' is a dictionary containing a __jsonclass__ entry, converts the + dictionary item into a bean of this class. + + :param obj: An object from a JSON-RPC dictionary + :param classes: A custom {name: class} dictionary + :return: The loaded object + """ + # Primitive + if isinstance(obj, utils.primitive_types): + return obj + + # List, set or tuple + elif isinstance(obj, utils.iterable_types): + # This comes from a JSON parser, so it can only be a list... + return [load(entry) for entry in obj] + + # Otherwise, it's a dict type + elif '__jsonclass__' not in obj: + return dict((key, load(value)) for key, value in obj.items()) + + # It's a dictionary, and it has a __jsonclass__ + orig_module_name = obj['__jsonclass__'][0] + params = obj['__jsonclass__'][1] + + # Validate the module name + if not orig_module_name: + raise TranslationError('Module name empty.') + + json_module_clean = re.sub(INVALID_MODULE_CHARS, '', orig_module_name) + if json_module_clean != orig_module_name: + raise TranslationError('Module name {0} has invalid characters.' + .format(orig_module_name)) + + # Load the class + json_module_parts = json_module_clean.split('.') + json_class = None + if classes and len(json_module_parts) == 1: + # Local class name -- probably means it won't work + try: + json_class = classes[json_module_parts[0]] + except KeyError: + raise TranslationError('Unknown class or module {0}.' + .format(json_module_parts[0])) + + else: + # Module + class + json_class_name = json_module_parts.pop() + json_module_tree = '.'.join(json_module_parts) + try: + # Use fromlist to load the module itself, not the package + temp_module = __import__(json_module_tree, + fromlist=[json_class_name]) + except ImportError: + raise TranslationError('Could not import {0} from module {1}.' + .format(json_class_name, json_module_tree)) + + try: + json_class = getattr(temp_module, json_class_name) + except AttributeError: + raise TranslationError("Unknown class {0}.{1}." + .format(json_module_tree, json_class_name)) + + # Create the object + new_obj = None + if isinstance(params, utils.ListType): + try: + new_obj = json_class(*params) + except TypeError as ex: + raise TranslationError("Error instantiating {0}: {1}" + .format(json_class.__name__, ex)) + + elif isinstance(params, utils.DictType): + try: + new_obj = json_class(**params) + except TypeError as ex: + raise TranslationError("Error instantiating {0}: {1}" + .format(json_class.__name__, ex)) + + else: + raise TranslationError("Constructor args must be a dict or a list, " + "not {0}".format(type(params).__name__)) + + # Remove the class information, as it must be ignored during the + # reconstruction of the object + raw_jsonclass = obj.pop('__jsonclass__') + + for key, value in obj.items(): + # Recursive loading + setattr(new_obj, key, load(value, classes)) + + # Restore the class information for further usage + obj['__jsonclass__'] = raw_jsonclass + + return new_obj diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py index 8ee902b0..8ea3a9c8 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/jsonrpc.py @@ -1,1192 +1,1192 @@ -#!/usr/bin/python
-# -- Content-Encoding: UTF-8 --
-"""
-============================
-JSONRPC Library (jsonrpclib)
-============================
-
-This library is a JSON-RPC v.2 (proposed) implementation which
-follows the xmlrpclib API for portability between clients. It
-uses the same Server / ServerProxy, loads, dumps, etc. syntax,
-while providing features not present in XML-RPC like:
-
-* Keyword arguments
-* Notifications
-* Versioning
-* Batches and batch notifications
-
-Eventually, I'll add a SimpleXMLRPCServer compatible library,
-and other things to tie the thing off nicely. :)
-
-For a quick-start, just open a console and type the following,
-replacing the server address, method, and parameters
-appropriately.
->>> import jsonrpclib
->>> server = jsonrpclib.Server('http://localhost:8181')
->>> server.add(5, 6)
-11
->>> server._notify.add(5, 6)
->>> batch = jsonrpclib.MultiCall(server)
->>> batch.add(3, 50)
->>> batch.add(2, 3)
->>> batch._notify.add(3, 5)
->>> batch()
-[53, 5]
-
-See https://github.com/tcalmant/jsonrpclib for more info.
-
-:authors: Josh Marshall, Thomas Calmant
-:copyright: Copyright 2015, isandlaTech
-:license: Apache License 2.0
-:version: 0.2.5
-
-..
-
- Copyright 2015 isandlaTech
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-# Module version
-__version_info__ = (0, 2, 5)
-__version__ = ".".join(str(x) for x in __version_info__)
-
-# Documentation strings format
-__docformat__ = "restructuredtext en"
-
-# ------------------------------------------------------------------------------
-
-# Library includes
-import jsonrpclib.config
-import jsonrpclib.utils as utils
-
-# Standard library
-import contextlib
-import logging
-import sys
-import uuid
-
-# Create the logger
-_logger = logging.getLogger(__name__)
-
-try:
- # Python 3
- # pylint: disable=F0401,E0611
- from urllib.parse import splittype
- from urllib.parse import splithost
- from xmlrpc.client import Transport as XMLTransport
- from xmlrpc.client import SafeTransport as XMLSafeTransport
- from xmlrpc.client import ServerProxy as XMLServerProxy
- from xmlrpc.client import _Method as XML_Method
-
-except ImportError:
- # Python 2
- # pylint: disable=F0401,E0611
- from urllib import splittype
- from urllib import splithost
- from xmlrpclib import Transport as XMLTransport
- from xmlrpclib import SafeTransport as XMLSafeTransport
- from xmlrpclib import ServerProxy as XMLServerProxy
- from xmlrpclib import _Method as XML_Method
-
-# ------------------------------------------------------------------------------
-# JSON library import
-
-# JSON class serialization
-from jsonrpclib import jsonclass
-
-try:
- # pylint: disable=F0401,E0611
- # Using cjson
- import cjson
- _logger.debug("Using cjson as JSON library")
-
- # Declare cjson methods
- def jdumps(obj, encoding='utf-8'):
- """
- Serializes ``obj`` to a JSON formatted string, using cjson.
- """
- return cjson.encode(obj)
-
- def jloads(json_string):
- """
- Deserializes ``json_string`` (a string containing a JSON document)
- to a Python object, using cjson.
- """
- return cjson.decode(json_string)
-
-except ImportError:
- # pylint: disable=F0401,E0611
- # Use json or simplejson
- try:
- import json
- _logger.debug("Using json as JSON library")
-
- except ImportError:
- try:
- import simplejson as json
- _logger.debug("Using simplejson as JSON library")
- except ImportError:
- _logger.error("No supported JSON library found")
- raise ImportError('You must have the cjson, json, or simplejson '
- 'module(s) available.')
-
- # Declare json methods
- if sys.version_info[0] < 3:
- def jdumps(obj, encoding='utf-8'):
- """
- Serializes ``obj`` to a JSON formatted string.
- """
- # Python 2 (explicit encoding)
- return json.dumps(obj, encoding=encoding)
-
- else:
- # Python 3
- def jdumps(obj, encoding='utf-8'):
- """
- Serializes ``obj`` to a JSON formatted string.
- """
- # Python 3 (the encoding parameter has been removed)
- return json.dumps(obj)
-
- def jloads(json_string):
- """
- Deserializes ``json_string`` (a string containing a JSON document)
- to a Python object.
- """
- return json.loads(json_string)
-
-# ------------------------------------------------------------------------------
-# XMLRPClib re-implementations
-
-
-class ProtocolError(Exception):
- """
- JSON-RPC error
-
- ProtocolError.args[0] can be:
- * an error message (string)
- * a (code, message) tuple
- """
- pass
-
-
-class AppError(ProtocolError):
- """
- Application error: the error code is not in the pre-defined ones
-
- AppError.args[0][0]: Error code
- AppError.args[0][1]: Error message or trace
- AppError.args[0][2]: Associated data
- """
- def data(self):
- """
- Retrieves the value found in the 'data' entry of the error, or None
-
- :return: The data associated to the error, or None
- """
- return self.args[0][2]
-
-
-class JSONParser(object):
- """
- Default JSON parser
- """
- def __init__(self, target):
- """
- Associates the target loader to the parser
-
- :param target: a JSONTarget instance
- """
- self.target = target
-
- def feed(self, data):
- """
- Feeds the associated target with the given data
- """
- self.target.feed(data)
-
- def close(self):
- """
- Does nothing
- """
- pass
-
-
-class JSONTarget(object):
- """
- Unmarshalls stream data to a string
- """
- def __init__(self):
- """
- Sets up the unmarshaller
- """
- self.data = []
-
- def feed(self, data):
- """
- Stores the given raw data into a buffer
- """
- # Store raw data as it might not contain whole wide-character
- self.data.append(data)
-
- def close(self):
- """
- Unmarshalls the buffered data
- """
- if not self.data:
- return ''
- else:
- # Use type to have a valid join (str vs. bytes)
- data = type(self.data[0])().join(self.data)
- try:
- # Convert the whole final string
- data = utils.from_bytes(data)
- except:
- # Try a pass-through
- pass
-
- return data
-
-
-class TransportMixIn(object):
- """ Just extends the XMLRPC transport where necessary. """
- # for Python 2.7 support
- _connection = None
-
- # List of non-overridable headers
- # Use the configuration to change the content-type
- readonly_headers = ('content-length', 'content-type')
-
- def __init__(self, config=jsonrpclib.config.DEFAULT, context=None):
- """
- Sets up the transport
-
- :param config: A JSONRPClib Config instance
- """
- # Store the configuration
- self._config = config
-
- # Store the SSL context
- self.context = context
-
- # Set up the user agent
- self.user_agent = config.user_agent
-
- # Additional headers: list of dictionaries
- self.additional_headers = []
-
- def push_headers(self, headers):
- """
- Adds a dictionary of headers to the additional headers list
-
- :param headers: A dictionary
- """
- self.additional_headers.append(headers)
-
- def pop_headers(self, headers):
- """
- Removes the given dictionary from the additional headers list.
- Also validates that given headers are on top of the stack
-
- :param headers: Headers to remove
- :raise AssertionError: The given dictionary is not on the latest stored
- in the additional headers list
- """
- assert self.additional_headers[-1] == headers
- self.additional_headers.pop()
-
- def emit_additional_headers(self, connection):
- """
- Puts headers as is in the request, filtered read only headers
-
- :param connection: The request connection
- """
- additional_headers = {}
-
- # Prepare the merged dictionary
- for headers in self.additional_headers:
- additional_headers.update(headers)
-
- # Remove forbidden keys
- for forbidden in self.readonly_headers:
- additional_headers.pop(forbidden, None)
-
- # Reversed order: in the case of multiple headers value definition,
- # the latest pushed has priority
- for key, value in additional_headers.items():
- key = str(key)
- if key.lower() not in self.readonly_headers:
- # Only accept replaceable headers
- connection.putheader(str(key), str(value))
-
- def send_content(self, connection, request_body):
- """
- Completes the request headers and sends the request body of a JSON-RPC
- request over a HTTPConnection
-
- :param connection: An HTTPConnection object
- :param request_body: JSON-RPC request body
- """
- # Convert the body first
- request_body = utils.to_bytes(request_body)
-
- # "static" headers
- connection.putheader("Content-Type", self._config.content_type)
- connection.putheader("Content-Length", str(len(request_body)))
-
- # Emit additional headers here in order not to override content-length
- self.emit_additional_headers(connection)
-
- connection.endheaders()
- if request_body:
- connection.send(request_body)
-
- def getparser(self):
- """
- Create an instance of the parser, and attach it to an unmarshalling
- object. Return both objects.
-
- :return: The parser and unmarshaller instances
- """
- target = JSONTarget()
- return JSONParser(target), target
-
-
-class Transport(TransportMixIn, XMLTransport):
- """
- Mixed-in HTTP transport
- """
- pass
-
-
-class SafeTransport(TransportMixIn, XMLSafeTransport):
- """
- Mixed-in HTTPS transport
- """
- pass
-
-# ------------------------------------------------------------------------------
-
-
-class ServerProxy(XMLServerProxy):
- """
- Unfortunately, much more of this class has to be copied since
- so much of it does the serialization.
- """
- def __init__(self, uri, transport=None, encoding=None,
- verbose=0, version=None, headers=None, history=None,
- config=jsonrpclib.config.DEFAULT, context=None):
- """
- Sets up the server proxy
-
- :param uri: Request URI
- :param transport: Custom transport handler
- :param encoding: Specified encoding
- :param verbose: Log verbosity level
- :param version: JSON-RPC specification version
- :param headers: Custom additional headers for each request
- :param history: History object (for tests)
- :param config: A JSONRPClib Config instance
- :param context: The optional SSLContext to use
- """
- # Store the configuration
- self._config = config
- self.__version = version or config.version
-
- schema, uri = splittype(uri)
- if schema not in ('http', 'https'):
- _logger.error("jsonrpclib only support http(s) URIs, not %s",
- schema)
- raise IOError('Unsupported JSON-RPC protocol.')
-
- self.__host, self.__handler = splithost(uri)
- if not self.__handler:
- # Not sure if this is in the JSON spec?
- self.__handler = '/'
-
- if transport is None:
- if schema == 'https':
- transport = SafeTransport(config=config, context=context)
- else:
- transport = Transport(config=config)
- self.__transport = transport
-
- self.__encoding = encoding
- self.__verbose = verbose
- self.__history = history
-
- # Global custom headers are injected into Transport
- self.__transport.push_headers(headers or {})
-
- def _request(self, methodname, params, rpcid=None):
- """
- Calls a method on the remote server
-
- :param methodname: Name of the method to call
- :param params: Method parameters
- :param rpcid: ID of the remote call
- :return: The parsed result of the call
- """
- request = dumps(params, methodname, encoding=self.__encoding,
- rpcid=rpcid, version=self.__version,
- config=self._config)
- response = self._run_request(request)
- check_for_errors(response)
- return response['result']
-
- def _request_notify(self, methodname, params, rpcid=None):
- """
- Calls a method as a notification
-
- :param methodname: Name of the method to call
- :param params: Method parameters
- :param rpcid: ID of the remote call
- """
- request = dumps(params, methodname, encoding=self.__encoding,
- rpcid=rpcid, version=self.__version, notify=True,
- config=self._config)
- response = self._run_request(request, notify=True)
- check_for_errors(response)
-
- def _run_request(self, request, notify=False):
- """
- Sends the given request to the remote server
-
- :param request: The request to send
- :param notify: Notification request flag (unused)
- :return: The response as a parsed JSON object
- """
- if self.__history is not None:
- self.__history.add_request(request)
-
- response = self.__transport.request(
- self.__host,
- self.__handler,
- request,
- verbose=self.__verbose
- )
-
- # Here, the XMLRPC library translates a single list
- # response to the single value -- should we do the
- # same, and require a tuple / list to be passed to
- # the response object, or expect the Server to be
- # outputting the response appropriately?
-
- if self.__history is not None:
- self.__history.add_response(response)
-
- if not response:
- return None
- else:
- return_obj = loads(response, self._config)
- return return_obj
-
- def __getattr__(self, name):
- """
- Returns a callable object to call the remote service
- """
- # Same as original, just with new _Method reference
- return _Method(self._request, name)
-
- def __close(self):
- """
- Closes the transport layer
- """
- try:
- self.__transport.close()
- except AttributeError:
- # Not available in Python 2.6
- pass
-
- def __call__(self, attr):
- """
- A workaround to get special attributes on the ServerProxy
- without interfering with the magic __getattr__
-
- (code from xmlrpclib in Python 2.7)
- """
- if attr == "close":
- return self.__close
-
- elif attr == "transport":
- return self.__transport
-
- raise AttributeError("Attribute {0} not found".format(attr))
-
- @property
- def _notify(self):
- """
- Like __getattr__, but sending a notification request instead of a call
- """
- return _Notify(self._request_notify)
-
- @contextlib.contextmanager
- def _additional_headers(self, headers):
- """
- Allows to specify additional headers, to be added inside the with
- block.
- Example of usage:
-
- >>> with client._additional_headers({'X-Test' : 'Test'}) as new_client:
- ... new_client.method()
- ...
- >>> # Here old headers are restored
- """
- self.__transport.push_headers(headers)
- yield self
- self.__transport.pop_headers(headers)
-
-# ------------------------------------------------------------------------------
-
-
-class _Method(XML_Method):
- """
- Some magic to bind an JSON-RPC method to an RPC server.
- """
- def __call__(self, *args, **kwargs):
- """
- Sends an RPC request and returns the unmarshalled result
- """
- if args and kwargs:
- raise ProtocolError("Cannot use both positional and keyword "
- "arguments (according to JSON-RPC spec.)")
- if args:
- return self.__send(self.__name, args)
- else:
- return self.__send(self.__name, kwargs)
-
- def __getattr__(self, name):
- """
- Returns a Method object for nested calls
- """
- if name == "__name__":
- return self.__name
- return _Method(self.__send, "{0}.{1}".format(self.__name, name))
-
-
-class _Notify(object):
- """
- Same as _Method, but to send notifications
- """
- def __init__(self, request):
- """
- Sets the method to call to send a request to the server
- """
- self._request = request
-
- def __getattr__(self, name):
- """
- Returns a Method object, to be called as a notification
- """
- return _Method(self._request, name)
-
-# ------------------------------------------------------------------------------
-# Batch implementation
-
-
-class MultiCallMethod(object):
- """
- Stores calls made to a MultiCall object for batch execution
- """
- def __init__(self, method, notify=False, config=jsonrpclib.config.DEFAULT):
- """
- Sets up the store
-
- :param method: Name of the method to call
- :param notify: Notification flag
- :param config: Request configuration
- """
- self.method = method
- self.params = []
- self.notify = notify
- self._config = config
-
- def __call__(self, *args, **kwargs):
- """
- Normalizes call parameters
- """
- if kwargs and args:
- raise ProtocolError('JSON-RPC does not support both ' +
- 'positional and keyword arguments.')
- if kwargs:
- self.params = kwargs
- else:
- self.params = args
-
- def request(self, encoding=None, rpcid=None):
- """
- Returns the request object as JSON-formatted string
- """
- return dumps(self.params, self.method, version=2.0,
- encoding=encoding, rpcid=rpcid, notify=self.notify,
- config=self._config)
-
- def __repr__(self):
- """
- String representation
- """
- return str(self.request())
-
- def __getattr__(self, method):
- """
- Updates the object for a nested call
- """
- self.method = "{0}.{1}".format(self.method, method)
- return self
-
-
-class MultiCallNotify(object):
- """
- Same as MultiCallMethod but for notifications
- """
- def __init__(self, multicall, config=jsonrpclib.config.DEFAULT):
- """
- Sets ip the store
-
- :param multicall: The parent MultiCall instance
- :param config: Request configuration
- """
- self.multicall = multicall
- self._config = config
-
- def __getattr__(self, name):
- """
- Returns the MultiCallMethod to use as a notification
- """
- new_job = MultiCallMethod(name, notify=True, config=self._config)
- self.multicall._job_list.append(new_job)
- return new_job
-
-
-class MultiCallIterator(object):
- """
- Iterates over the results of a MultiCall.
- Exceptions are raised in response to JSON-RPC faults
- """
- def __init__(self, results):
- """
- Sets up the results store
- """
- self.results = results
-
- def __get_result(self, item):
- """
- Checks for error and returns the "real" result stored in a MultiCall
- result.
- """
- check_for_errors(item)
- return item['result']
-
- def __iter__(self):
- """
- Iterates over all results
- """
- for item in self.results:
- yield self.__get_result(item)
- raise StopIteration
-
- def __getitem__(self, i):
- """
- Returns the i-th object of the results
- """
- return self.__get_result(self.results[i])
-
- def __len__(self):
- """
- Returns the number of results stored
- """
- return len(self.results)
-
-
-class MultiCall(object):
- """
- server -> a object used to boxcar method calls, where server should be a
- ServerProxy object.
-
- Methods can be added to the MultiCall using normal
- method call syntax e.g.:
-
- multicall = MultiCall(server_proxy)
- multicall.add(2,3)
- multicall.get_address("Guido")
-
- To execute the multicall, call the MultiCall object e.g.:
-
- add_result, address = multicall()
- """
- def __init__(self, server, config=jsonrpclib.config.DEFAULT):
- """
- Sets up the multicall
-
- :param server: A ServerProxy object
- :param config: Request configuration
- """
- self._server = server
- self._job_list = []
- self._config = config
-
- def _request(self):
- """
- Sends the request to the server and returns the responses
-
- :return: A MultiCallIterator object
- """
- if len(self._job_list) < 1:
- # Should we alert? This /is/ pretty obvious.
- return
- request_body = "[ {0} ]".format(
- ','.join(job.request() for job in self._job_list))
- responses = self._server._run_request(request_body)
- del self._job_list[:]
- if not responses:
- responses = []
- return MultiCallIterator(responses)
-
- @property
- def _notify(self):
- """
- Prepares a notification call
- """
- return MultiCallNotify(self, self._config)
-
- def __getattr__(self, name):
- """
- Registers a method call
- """
- new_job = MultiCallMethod(name, config=self._config)
- self._job_list.append(new_job)
- return new_job
-
- __call__ = _request
-
-# These lines conform to xmlrpclib's "compatibility" line.
-# Not really sure if we should include these, but oh well.
-Server = ServerProxy
-
-# ------------------------------------------------------------------------------
-
-
-class Fault(object):
- """
- JSON-RPC error class
- """
- def __init__(self, code=-32000, message='Server error', rpcid=None,
- config=jsonrpclib.config.DEFAULT, data=None):
- """
- Sets up the error description
-
- :param code: Fault code
- :param message: Associated message
- :param rpcid: Request ID
- :param config: A JSONRPClib Config instance
- :param data: Extra information added to an error description
- """
- self.faultCode = code
- self.faultString = message
- self.rpcid = rpcid
- self.config = config
- self.data = data
-
- def error(self):
- """
- Returns the error as a dictionary
-
- :returns: A {'code', 'message'} dictionary
- """
- return {'code': self.faultCode, 'message': self.faultString,
- 'data': self.data}
-
- def response(self, rpcid=None, version=None):
- """
- Returns the error as a JSON-RPC response string
-
- :param rpcid: Forced request ID
- :param version: JSON-RPC version
- :return: A JSON-RPC response string
- """
- if not version:
- version = self.config.version
-
- if rpcid:
- self.rpcid = rpcid
-
- return dumps(self, methodresponse=True, rpcid=self.rpcid,
- version=version, config=self.config)
-
- def dump(self, rpcid=None, version=None):
- """
- Returns the error as a JSON-RPC response dictionary
-
- :param rpcid: Forced request ID
- :param version: JSON-RPC version
- :return: A JSON-RPC response dictionary
- """
- if not version:
- version = self.config.version
-
- if rpcid:
- self.rpcid = rpcid
-
- return dump(self, is_response=True, rpcid=self.rpcid,
- version=version, config=self.config)
-
- def __repr__(self):
- """
- String representation
- """
- return '<Fault {0}: {1}>'.format(self.faultCode, self.faultString)
-
-
-class Payload(object):
- """
- JSON-RPC content handler
- """
- def __init__(self, rpcid=None, version=None,
- config=jsonrpclib.config.DEFAULT):
- """
- Sets up the JSON-RPC handler
-
- :param rpcid: Request ID
- :param version: JSON-RPC version
- :param config: A JSONRPClib Config instance
- """
- if not version:
- version = config.version
-
- self.id = rpcid
- self.version = float(version)
-
- def request(self, method, params=None):
- """
- Prepares a method call request
-
- :param method: Method name
- :param params: Method parameters
- :return: A JSON-RPC request dictionary
- """
- if not isinstance(method, utils.string_types):
- raise ValueError('Method name must be a string.')
-
- if not self.id:
- # Generate a request ID
- self.id = str(uuid.uuid4())
-
- request = {'id': self.id, 'method': method}
- if params or self.version < 1.1:
- request['params'] = params or []
-
- if self.version >= 2:
- request['jsonrpc'] = str(self.version)
-
- return request
-
- def notify(self, method, params=None):
- """
- Prepares a notification request
-
- :param method: Notification name
- :param params: Notification parameters
- :return: A JSON-RPC notification dictionary
- """
- # Prepare the request dictionary
- request = self.request(method, params)
-
- # Remove the request ID, as it's a notification
- if self.version >= 2:
- del request['id']
- else:
- request['id'] = None
-
- return request
-
- def response(self, result=None):
- """
- Prepares a response dictionary
-
- :param result: The result of method call
- :return: A JSON-RPC response dictionary
- """
- response = {'result': result, 'id': self.id}
-
- if self.version >= 2:
- response['jsonrpc'] = str(self.version)
- else:
- response['error'] = None
-
- return response
-
- def error(self, code=-32000, message='Server error.', data=None):
- """
- Prepares an error dictionary
-
- :param code: Error code
- :param message: Error message
- :return: A JSON-RPC error dictionary
- """
- error = self.response()
- if self.version >= 2:
- del error['result']
- else:
- error['result'] = None
- error['error'] = {'code': code, 'message': message}
- if data is not None:
- error['error']['data'] = data
- return error
-
-# ------------------------------------------------------------------------------
-
-
-def dump(params=None, methodname=None, rpcid=None, version=None,
- is_response=None, is_notify=None, config=jsonrpclib.config.DEFAULT):
- """
- Prepares a JSON-RPC dictionary (request, notification, response or error)
-
- :param params: Method parameters (if a method name is given) or a Fault
- :param methodname: Method name
- :param rpcid: Request ID
- :param version: JSON-RPC version
- :param is_response: If True, this is a response dictionary
- :param is_notify: If True, this is a notification request
- :param config: A JSONRPClib Config instance
- :return: A JSON-RPC dictionary
- """
- # Default version
- if not version:
- version = config.version
-
- if not is_response and params is None:
- params = []
-
- # Validate method name and parameters
- valid_params = [utils.TupleType, utils.ListType, utils.DictType, Fault]
- if is_response:
- valid_params.append(type(None))
-
- if isinstance(methodname, utils.string_types) and \
- not isinstance(params, tuple(valid_params)):
- """
- If a method, and params are not in a listish or a Fault,
- error out.
- """
- raise TypeError("Params must be a dict, list, tuple "
- "or Fault instance.")
-
- # Prepares the JSON-RPC content
- payload = Payload(rpcid=rpcid, version=version)
-
- if isinstance(params, Fault):
- # Prepare an error dictionary
- # pylint: disable=E1103
- return payload.error(params.faultCode, params.faultString, params.data)
-
- if not isinstance(methodname, utils.string_types) and not is_response:
- # Neither a request nor a response
- raise ValueError('Method name must be a string, or is_response '
- 'must be set to True.')
-
- if config.use_jsonclass:
- # Use jsonclass to convert the parameters
- params = jsonclass.dump(params, config=config)
-
- if is_response:
- # Prepare a response dictionary
- if rpcid is None:
- # A response must have a request ID
- raise ValueError('A method response must have an rpcid.')
- return payload.response(params)
-
- if is_notify:
- # Prepare a notification dictionary
- return payload.notify(methodname, params)
- else:
- # Prepare a method call dictionary
- return payload.request(methodname, params)
-
-
-def dumps(params=None, methodname=None, methodresponse=None,
- encoding=None, rpcid=None, version=None, notify=None,
- config=jsonrpclib.config.DEFAULT):
- """
- Prepares a JSON-RPC request/response string
-
- :param params: Method parameters (if a method name is given) or a Fault
- :param methodname: Method name
- :param methodresponse: If True, this is a response dictionary
- :param encoding: Result string encoding
- :param rpcid: Request ID
- :param version: JSON-RPC version
- :param notify: If True, this is a notification request
- :param config: A JSONRPClib Config instance
- :return: A JSON-RPC dictionary
- """
- # Prepare the dictionary
- request = dump(params, methodname, rpcid, version, methodresponse, notify,
- config)
-
- # Returns it as a JSON string
- return jdumps(request, encoding=encoding or "UTF-8")
-
-
-def load(data, config=jsonrpclib.config.DEFAULT):
- """
- Loads a JSON-RPC request/response dictionary. Calls jsonclass to load beans
-
- :param data: A JSON-RPC dictionary
- :param config: A JSONRPClib Config instance (or None for default values)
- :return: A parsed dictionary or None
- """
- if data is None:
- # Notification
- return None
-
- # if the above raises an error, the implementing server code
- # should return something like the following:
- # { 'jsonrpc':'2.0', 'error': fault.error(), id: None }
- if config.use_jsonclass:
- # Convert beans
- data = jsonclass.load(data, config.classes)
-
- return data
-
-
-def loads(data, config=jsonrpclib.config.DEFAULT):
- """
- Loads a JSON-RPC request/response string. Calls jsonclass to load beans
-
- :param data: A JSON-RPC string
- :param config: A JSONRPClib Config instance (or None for default values)
- :return: A parsed dictionary or None
- """
- if data == '':
- # Notification
- return None
-
- # Parse the JSON dictionary
- result = jloads(data)
-
- # Load the beans
- return load(result, config)
-
-# ------------------------------------------------------------------------------
-
-
-def check_for_errors(result):
- """
- Checks if a result dictionary signals an error
-
- :param result: A result dictionary
- :raise TypeError: Invalid parameter
- :raise NotImplementedError: Unknown JSON-RPC version
- :raise ValueError: Invalid dictionary content
- :raise ProtocolError: An error occurred on the server side
- :return: The result parameter
- """
- if not result:
- # Notification
- return result
-
- if not isinstance(result, utils.DictType):
- # Invalid argument
- raise TypeError('Response is not a dict.')
-
- if 'jsonrpc' in result and float(result['jsonrpc']) > 2.0:
- # Unknown JSON-RPC version
- raise NotImplementedError('JSON-RPC version not yet supported.')
-
- if 'result' not in result and 'error' not in result:
- # Invalid dictionary content
- raise ValueError('Response does not have a result or error key.')
-
- if 'error' in result and result['error']:
- # Server-side error
- if 'code' in result['error']:
- # Code + Message
- code = result['error']['code']
- try:
- # Get the message (jsonrpclib)
- message = result['error']['message']
- except KeyError:
- # Get the trace (jabsorb)
- message = result['error'].get('trace', '<no error message>')
-
- if -32700 <= code <= -32000:
- # Pre-defined errors
- # See http://www.jsonrpc.org/specification#error_object
- raise ProtocolError((code, message))
- else:
- # Application error
- data = result['error'].get('data', None)
- raise AppError((code, message, data))
-
- elif isinstance(result['error'], dict) and len(result['error']) == 1:
- # Error with a single entry ('reason', ...): use its content
- error_key = result['error'].keys()[0]
- raise ProtocolError(result['error'][error_key])
-
- else:
- # Use the raw error content
- raise ProtocolError(result['error'])
-
- return result
-
-
-def isbatch(request):
- """
- Tests if the given request is a batch call, i.e. a list of multiple calls
- :param request: a JSON-RPC request object
- :return: True if the request is a batch call
- """
- if not isinstance(request, (utils.ListType, utils.TupleType)):
- # Not a list: not a batch call
- return False
- elif len(request) < 1:
- # Only one request: not a batch call
- return False
- elif not isinstance(request[0], utils.DictType):
- # One of the requests is not a dictionary, i.e. a JSON Object
- # therefore it is not a valid JSON-RPC request
- return False
- elif 'jsonrpc' not in request[0].keys():
- # No "jsonrpc" version in the JSON object: not a request
- return False
-
- try:
- version = float(request[0]['jsonrpc'])
- except ValueError:
- # Bad version of JSON-RPC
- raise ProtocolError('"jsonrpc" key must be a float(able) value.')
-
- if version < 2:
- # Batch call were not supported before JSON-RPC 2.0
- return False
-
- return True
-
-
-def isnotification(request):
- """
- Tests if the given request is a notification
-
- :param request: A request dictionary
- :return: True if the request is a notification
- """
- if 'id' not in request:
- # 2.0 notification
- return True
-
- if request['id'] is None:
- # 1.0 notification
- return True
-
- return False
+#!/usr/bin/python +# -- Content-Encoding: UTF-8 -- +""" +============================ +JSONRPC Library (jsonrpclib) +============================ + +This library is a JSON-RPC v.2 (proposed) implementation which +follows the xmlrpclib API for portability between clients. It +uses the same Server / ServerProxy, loads, dumps, etc. syntax, +while providing features not present in XML-RPC like: + +* Keyword arguments +* Notifications +* Versioning +* Batches and batch notifications + +Eventually, I'll add a SimpleXMLRPCServer compatible library, +and other things to tie the thing off nicely. :) + +For a quick-start, just open a console and type the following, +replacing the server address, method, and parameters +appropriately. +>>> import jsonrpclib +>>> server = jsonrpclib.Server('http://localhost:8181') +>>> server.add(5, 6) +11 +>>> server._notify.add(5, 6) +>>> batch = jsonrpclib.MultiCall(server) +>>> batch.add(3, 50) +>>> batch.add(2, 3) +>>> batch._notify.add(3, 5) +>>> batch() +[53, 5] + +See https://github.com/tcalmant/jsonrpclib for more info. + +:authors: Josh Marshall, Thomas Calmant +:copyright: Copyright 2015, isandlaTech +:license: Apache License 2.0 +:version: 0.2.5 + +.. + + Copyright 2015 isandlaTech + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +# Module version +__version_info__ = (0, 2, 5) +__version__ = ".".join(str(x) for x in __version_info__) + +# Documentation strings format +__docformat__ = "restructuredtext en" + +# ------------------------------------------------------------------------------ + +# Library includes +import jsonrpclib.config +import jsonrpclib.utils as utils + +# Standard library +import contextlib +import logging +import sys +import uuid + +# Create the logger +_logger = logging.getLogger(__name__) + +try: + # Python 3 + # pylint: disable=F0401,E0611 + from urllib.parse import splittype + from urllib.parse import splithost + from xmlrpc.client import Transport as XMLTransport + from xmlrpc.client import SafeTransport as XMLSafeTransport + from xmlrpc.client import ServerProxy as XMLServerProxy + from xmlrpc.client import _Method as XML_Method + +except ImportError: + # Python 2 + # pylint: disable=F0401,E0611 + from urllib import splittype + from urllib import splithost + from xmlrpclib import Transport as XMLTransport + from xmlrpclib import SafeTransport as XMLSafeTransport + from xmlrpclib import ServerProxy as XMLServerProxy + from xmlrpclib import _Method as XML_Method + +# ------------------------------------------------------------------------------ +# JSON library import + +# JSON class serialization +from jsonrpclib import jsonclass + +try: + # pylint: disable=F0401,E0611 + # Using cjson + import cjson + _logger.debug("Using cjson as JSON library") + + # Declare cjson methods + def jdumps(obj, encoding='utf-8'): + """ + Serializes ``obj`` to a JSON formatted string, using cjson. + """ + return cjson.encode(obj) + + def jloads(json_string): + """ + Deserializes ``json_string`` (a string containing a JSON document) + to a Python object, using cjson. + """ + return cjson.decode(json_string) + +except ImportError: + # pylint: disable=F0401,E0611 + # Use json or simplejson + try: + import json + _logger.debug("Using json as JSON library") + + except ImportError: + try: + import simplejson as json + _logger.debug("Using simplejson as JSON library") + except ImportError: + _logger.error("No supported JSON library found") + raise ImportError('You must have the cjson, json, or simplejson ' + 'module(s) available.') + + # Declare json methods + if sys.version_info[0] < 3: + def jdumps(obj, encoding='utf-8'): + """ + Serializes ``obj`` to a JSON formatted string. + """ + # Python 2 (explicit encoding) + return json.dumps(obj, encoding=encoding) + + else: + # Python 3 + def jdumps(obj, encoding='utf-8'): + """ + Serializes ``obj`` to a JSON formatted string. + """ + # Python 3 (the encoding parameter has been removed) + return json.dumps(obj) + + def jloads(json_string): + """ + Deserializes ``json_string`` (a string containing a JSON document) + to a Python object. + """ + return json.loads(json_string) + +# ------------------------------------------------------------------------------ +# XMLRPClib re-implementations + + +class ProtocolError(Exception): + """ + JSON-RPC error + + ProtocolError.args[0] can be: + * an error message (string) + * a (code, message) tuple + """ + pass + + +class AppError(ProtocolError): + """ + Application error: the error code is not in the pre-defined ones + + AppError.args[0][0]: Error code + AppError.args[0][1]: Error message or trace + AppError.args[0][2]: Associated data + """ + def data(self): + """ + Retrieves the value found in the 'data' entry of the error, or None + + :return: The data associated to the error, or None + """ + return self.args[0][2] + + +class JSONParser(object): + """ + Default JSON parser + """ + def __init__(self, target): + """ + Associates the target loader to the parser + + :param target: a JSONTarget instance + """ + self.target = target + + def feed(self, data): + """ + Feeds the associated target with the given data + """ + self.target.feed(data) + + def close(self): + """ + Does nothing + """ + pass + + +class JSONTarget(object): + """ + Unmarshalls stream data to a string + """ + def __init__(self): + """ + Sets up the unmarshaller + """ + self.data = [] + + def feed(self, data): + """ + Stores the given raw data into a buffer + """ + # Store raw data as it might not contain whole wide-character + self.data.append(data) + + def close(self): + """ + Unmarshalls the buffered data + """ + if not self.data: + return '' + else: + # Use type to have a valid join (str vs. bytes) + data = type(self.data[0])().join(self.data) + try: + # Convert the whole final string + data = utils.from_bytes(data) + except: + # Try a pass-through + pass + + return data + + +class TransportMixIn(object): + """ Just extends the XMLRPC transport where necessary. """ + # for Python 2.7 support + _connection = None + + # List of non-overridable headers + # Use the configuration to change the content-type + readonly_headers = ('content-length', 'content-type') + + def __init__(self, config=jsonrpclib.config.DEFAULT, context=None): + """ + Sets up the transport + + :param config: A JSONRPClib Config instance + """ + # Store the configuration + self._config = config + + # Store the SSL context + self.context = context + + # Set up the user agent + self.user_agent = config.user_agent + + # Additional headers: list of dictionaries + self.additional_headers = [] + + def push_headers(self, headers): + """ + Adds a dictionary of headers to the additional headers list + + :param headers: A dictionary + """ + self.additional_headers.append(headers) + + def pop_headers(self, headers): + """ + Removes the given dictionary from the additional headers list. + Also validates that given headers are on top of the stack + + :param headers: Headers to remove + :raise AssertionError: The given dictionary is not on the latest stored + in the additional headers list + """ + assert self.additional_headers[-1] == headers + self.additional_headers.pop() + + def emit_additional_headers(self, connection): + """ + Puts headers as is in the request, filtered read only headers + + :param connection: The request connection + """ + additional_headers = {} + + # Prepare the merged dictionary + for headers in self.additional_headers: + additional_headers.update(headers) + + # Remove forbidden keys + for forbidden in self.readonly_headers: + additional_headers.pop(forbidden, None) + + # Reversed order: in the case of multiple headers value definition, + # the latest pushed has priority + for key, value in additional_headers.items(): + key = str(key) + if key.lower() not in self.readonly_headers: + # Only accept replaceable headers + connection.putheader(str(key), str(value)) + + def send_content(self, connection, request_body): + """ + Completes the request headers and sends the request body of a JSON-RPC + request over a HTTPConnection + + :param connection: An HTTPConnection object + :param request_body: JSON-RPC request body + """ + # Convert the body first + request_body = utils.to_bytes(request_body) + + # "static" headers + connection.putheader("Content-Type", self._config.content_type) + connection.putheader("Content-Length", str(len(request_body))) + + # Emit additional headers here in order not to override content-length + self.emit_additional_headers(connection) + + connection.endheaders() + if request_body: + connection.send(request_body) + + def getparser(self): + """ + Create an instance of the parser, and attach it to an unmarshalling + object. Return both objects. + + :return: The parser and unmarshaller instances + """ + target = JSONTarget() + return JSONParser(target), target + + +class Transport(TransportMixIn, XMLTransport): + """ + Mixed-in HTTP transport + """ + pass + + +class SafeTransport(TransportMixIn, XMLSafeTransport): + """ + Mixed-in HTTPS transport + """ + pass + +# ------------------------------------------------------------------------------ + + +class ServerProxy(XMLServerProxy): + """ + Unfortunately, much more of this class has to be copied since + so much of it does the serialization. + """ + def __init__(self, uri, transport=None, encoding=None, + verbose=0, version=None, headers=None, history=None, + config=jsonrpclib.config.DEFAULT, context=None): + """ + Sets up the server proxy + + :param uri: Request URI + :param transport: Custom transport handler + :param encoding: Specified encoding + :param verbose: Log verbosity level + :param version: JSON-RPC specification version + :param headers: Custom additional headers for each request + :param history: History object (for tests) + :param config: A JSONRPClib Config instance + :param context: The optional SSLContext to use + """ + # Store the configuration + self._config = config + self.__version = version or config.version + + schema, uri = splittype(uri) + if schema not in ('http', 'https'): + _logger.error("jsonrpclib only support http(s) URIs, not %s", + schema) + raise IOError('Unsupported JSON-RPC protocol.') + + self.__host, self.__handler = splithost(uri) + if not self.__handler: + # Not sure if this is in the JSON spec? + self.__handler = '/' + + if transport is None: + if schema == 'https': + transport = SafeTransport(config=config, context=context) + else: + transport = Transport(config=config) + self.__transport = transport + + self.__encoding = encoding + self.__verbose = verbose + self.__history = history + + # Global custom headers are injected into Transport + self.__transport.push_headers(headers or {}) + + def _request(self, methodname, params, rpcid=None): + """ + Calls a method on the remote server + + :param methodname: Name of the method to call + :param params: Method parameters + :param rpcid: ID of the remote call + :return: The parsed result of the call + """ + request = dumps(params, methodname, encoding=self.__encoding, + rpcid=rpcid, version=self.__version, + config=self._config) + response = self._run_request(request) + check_for_errors(response) + return response['result'] + + def _request_notify(self, methodname, params, rpcid=None): + """ + Calls a method as a notification + + :param methodname: Name of the method to call + :param params: Method parameters + :param rpcid: ID of the remote call + """ + request = dumps(params, methodname, encoding=self.__encoding, + rpcid=rpcid, version=self.__version, notify=True, + config=self._config) + response = self._run_request(request, notify=True) + check_for_errors(response) + + def _run_request(self, request, notify=False): + """ + Sends the given request to the remote server + + :param request: The request to send + :param notify: Notification request flag (unused) + :return: The response as a parsed JSON object + """ + if self.__history is not None: + self.__history.add_request(request) + + response = self.__transport.request( + self.__host, + self.__handler, + request, + verbose=self.__verbose + ) + + # Here, the XMLRPC library translates a single list + # response to the single value -- should we do the + # same, and require a tuple / list to be passed to + # the response object, or expect the Server to be + # outputting the response appropriately? + + if self.__history is not None: + self.__history.add_response(response) + + if not response: + return None + else: + return_obj = loads(response, self._config) + return return_obj + + def __getattr__(self, name): + """ + Returns a callable object to call the remote service + """ + # Same as original, just with new _Method reference + return _Method(self._request, name) + + def __close(self): + """ + Closes the transport layer + """ + try: + self.__transport.close() + except AttributeError: + # Not available in Python 2.6 + pass + + def __call__(self, attr): + """ + A workaround to get special attributes on the ServerProxy + without interfering with the magic __getattr__ + + (code from xmlrpclib in Python 2.7) + """ + if attr == "close": + return self.__close + + elif attr == "transport": + return self.__transport + + raise AttributeError("Attribute {0} not found".format(attr)) + + @property + def _notify(self): + """ + Like __getattr__, but sending a notification request instead of a call + """ + return _Notify(self._request_notify) + + @contextlib.contextmanager + def _additional_headers(self, headers): + """ + Allows to specify additional headers, to be added inside the with + block. + Example of usage: + + >>> with client._additional_headers({'X-Test' : 'Test'}) as new_client: + ... new_client.method() + ... + >>> # Here old headers are restored + """ + self.__transport.push_headers(headers) + yield self + self.__transport.pop_headers(headers) + +# ------------------------------------------------------------------------------ + + +class _Method(XML_Method): + """ + Some magic to bind an JSON-RPC method to an RPC server. + """ + def __call__(self, *args, **kwargs): + """ + Sends an RPC request and returns the unmarshalled result + """ + if args and kwargs: + raise ProtocolError("Cannot use both positional and keyword " + "arguments (according to JSON-RPC spec.)") + if args: + return self.__send(self.__name, args) + else: + return self.__send(self.__name, kwargs) + + def __getattr__(self, name): + """ + Returns a Method object for nested calls + """ + if name == "__name__": + return self.__name + return _Method(self.__send, "{0}.{1}".format(self.__name, name)) + + +class _Notify(object): + """ + Same as _Method, but to send notifications + """ + def __init__(self, request): + """ + Sets the method to call to send a request to the server + """ + self._request = request + + def __getattr__(self, name): + """ + Returns a Method object, to be called as a notification + """ + return _Method(self._request, name) + +# ------------------------------------------------------------------------------ +# Batch implementation + + +class MultiCallMethod(object): + """ + Stores calls made to a MultiCall object for batch execution + """ + def __init__(self, method, notify=False, config=jsonrpclib.config.DEFAULT): + """ + Sets up the store + + :param method: Name of the method to call + :param notify: Notification flag + :param config: Request configuration + """ + self.method = method + self.params = [] + self.notify = notify + self._config = config + + def __call__(self, *args, **kwargs): + """ + Normalizes call parameters + """ + if kwargs and args: + raise ProtocolError('JSON-RPC does not support both ' + + 'positional and keyword arguments.') + if kwargs: + self.params = kwargs + else: + self.params = args + + def request(self, encoding=None, rpcid=None): + """ + Returns the request object as JSON-formatted string + """ + return dumps(self.params, self.method, version=2.0, + encoding=encoding, rpcid=rpcid, notify=self.notify, + config=self._config) + + def __repr__(self): + """ + String representation + """ + return str(self.request()) + + def __getattr__(self, method): + """ + Updates the object for a nested call + """ + self.method = "{0}.{1}".format(self.method, method) + return self + + +class MultiCallNotify(object): + """ + Same as MultiCallMethod but for notifications + """ + def __init__(self, multicall, config=jsonrpclib.config.DEFAULT): + """ + Sets ip the store + + :param multicall: The parent MultiCall instance + :param config: Request configuration + """ + self.multicall = multicall + self._config = config + + def __getattr__(self, name): + """ + Returns the MultiCallMethod to use as a notification + """ + new_job = MultiCallMethod(name, notify=True, config=self._config) + self.multicall._job_list.append(new_job) + return new_job + + +class MultiCallIterator(object): + """ + Iterates over the results of a MultiCall. + Exceptions are raised in response to JSON-RPC faults + """ + def __init__(self, results): + """ + Sets up the results store + """ + self.results = results + + def __get_result(self, item): + """ + Checks for error and returns the "real" result stored in a MultiCall + result. + """ + check_for_errors(item) + return item['result'] + + def __iter__(self): + """ + Iterates over all results + """ + for item in self.results: + yield self.__get_result(item) + raise StopIteration + + def __getitem__(self, i): + """ + Returns the i-th object of the results + """ + return self.__get_result(self.results[i]) + + def __len__(self): + """ + Returns the number of results stored + """ + return len(self.results) + + +class MultiCall(object): + """ + server -> a object used to boxcar method calls, where server should be a + ServerProxy object. + + Methods can be added to the MultiCall using normal + method call syntax e.g.: + + multicall = MultiCall(server_proxy) + multicall.add(2,3) + multicall.get_address("Guido") + + To execute the multicall, call the MultiCall object e.g.: + + add_result, address = multicall() + """ + def __init__(self, server, config=jsonrpclib.config.DEFAULT): + """ + Sets up the multicall + + :param server: A ServerProxy object + :param config: Request configuration + """ + self._server = server + self._job_list = [] + self._config = config + + def _request(self): + """ + Sends the request to the server and returns the responses + + :return: A MultiCallIterator object + """ + if len(self._job_list) < 1: + # Should we alert? This /is/ pretty obvious. + return + request_body = "[ {0} ]".format( + ','.join(job.request() for job in self._job_list)) + responses = self._server._run_request(request_body) + del self._job_list[:] + if not responses: + responses = [] + return MultiCallIterator(responses) + + @property + def _notify(self): + """ + Prepares a notification call + """ + return MultiCallNotify(self, self._config) + + def __getattr__(self, name): + """ + Registers a method call + """ + new_job = MultiCallMethod(name, config=self._config) + self._job_list.append(new_job) + return new_job + + __call__ = _request + +# These lines conform to xmlrpclib's "compatibility" line. +# Not really sure if we should include these, but oh well. +Server = ServerProxy + +# ------------------------------------------------------------------------------ + + +class Fault(object): + """ + JSON-RPC error class + """ + def __init__(self, code=-32000, message='Server error', rpcid=None, + config=jsonrpclib.config.DEFAULT, data=None): + """ + Sets up the error description + + :param code: Fault code + :param message: Associated message + :param rpcid: Request ID + :param config: A JSONRPClib Config instance + :param data: Extra information added to an error description + """ + self.faultCode = code + self.faultString = message + self.rpcid = rpcid + self.config = config + self.data = data + + def error(self): + """ + Returns the error as a dictionary + + :returns: A {'code', 'message'} dictionary + """ + return {'code': self.faultCode, 'message': self.faultString, + 'data': self.data} + + def response(self, rpcid=None, version=None): + """ + Returns the error as a JSON-RPC response string + + :param rpcid: Forced request ID + :param version: JSON-RPC version + :return: A JSON-RPC response string + """ + if not version: + version = self.config.version + + if rpcid: + self.rpcid = rpcid + + return dumps(self, methodresponse=True, rpcid=self.rpcid, + version=version, config=self.config) + + def dump(self, rpcid=None, version=None): + """ + Returns the error as a JSON-RPC response dictionary + + :param rpcid: Forced request ID + :param version: JSON-RPC version + :return: A JSON-RPC response dictionary + """ + if not version: + version = self.config.version + + if rpcid: + self.rpcid = rpcid + + return dump(self, is_response=True, rpcid=self.rpcid, + version=version, config=self.config) + + def __repr__(self): + """ + String representation + """ + return '<Fault {0}: {1}>'.format(self.faultCode, self.faultString) + + +class Payload(object): + """ + JSON-RPC content handler + """ + def __init__(self, rpcid=None, version=None, + config=jsonrpclib.config.DEFAULT): + """ + Sets up the JSON-RPC handler + + :param rpcid: Request ID + :param version: JSON-RPC version + :param config: A JSONRPClib Config instance + """ + if not version: + version = config.version + + self.id = rpcid + self.version = float(version) + + def request(self, method, params=None): + """ + Prepares a method call request + + :param method: Method name + :param params: Method parameters + :return: A JSON-RPC request dictionary + """ + if not isinstance(method, utils.string_types): + raise ValueError('Method name must be a string.') + + if not self.id: + # Generate a request ID + self.id = str(uuid.uuid4()) + + request = {'id': self.id, 'method': method} + if params or self.version < 1.1: + request['params'] = params or [] + + if self.version >= 2: + request['jsonrpc'] = str(self.version) + + return request + + def notify(self, method, params=None): + """ + Prepares a notification request + + :param method: Notification name + :param params: Notification parameters + :return: A JSON-RPC notification dictionary + """ + # Prepare the request dictionary + request = self.request(method, params) + + # Remove the request ID, as it's a notification + if self.version >= 2: + del request['id'] + else: + request['id'] = None + + return request + + def response(self, result=None): + """ + Prepares a response dictionary + + :param result: The result of method call + :return: A JSON-RPC response dictionary + """ + response = {'result': result, 'id': self.id} + + if self.version >= 2: + response['jsonrpc'] = str(self.version) + else: + response['error'] = None + + return response + + def error(self, code=-32000, message='Server error.', data=None): + """ + Prepares an error dictionary + + :param code: Error code + :param message: Error message + :return: A JSON-RPC error dictionary + """ + error = self.response() + if self.version >= 2: + del error['result'] + else: + error['result'] = None + error['error'] = {'code': code, 'message': message} + if data is not None: + error['error']['data'] = data + return error + +# ------------------------------------------------------------------------------ + + +def dump(params=None, methodname=None, rpcid=None, version=None, + is_response=None, is_notify=None, config=jsonrpclib.config.DEFAULT): + """ + Prepares a JSON-RPC dictionary (request, notification, response or error) + + :param params: Method parameters (if a method name is given) or a Fault + :param methodname: Method name + :param rpcid: Request ID + :param version: JSON-RPC version + :param is_response: If True, this is a response dictionary + :param is_notify: If True, this is a notification request + :param config: A JSONRPClib Config instance + :return: A JSON-RPC dictionary + """ + # Default version + if not version: + version = config.version + + if not is_response and params is None: + params = [] + + # Validate method name and parameters + valid_params = [utils.TupleType, utils.ListType, utils.DictType, Fault] + if is_response: + valid_params.append(type(None)) + + if isinstance(methodname, utils.string_types) and \ + not isinstance(params, tuple(valid_params)): + """ + If a method, and params are not in a listish or a Fault, + error out. + """ + raise TypeError("Params must be a dict, list, tuple " + "or Fault instance.") + + # Prepares the JSON-RPC content + payload = Payload(rpcid=rpcid, version=version) + + if isinstance(params, Fault): + # Prepare an error dictionary + # pylint: disable=E1103 + return payload.error(params.faultCode, params.faultString, params.data) + + if not isinstance(methodname, utils.string_types) and not is_response: + # Neither a request nor a response + raise ValueError('Method name must be a string, or is_response ' + 'must be set to True.') + + if config.use_jsonclass: + # Use jsonclass to convert the parameters + params = jsonclass.dump(params, config=config) + + if is_response: + # Prepare a response dictionary + if rpcid is None: + # A response must have a request ID + raise ValueError('A method response must have an rpcid.') + return payload.response(params) + + if is_notify: + # Prepare a notification dictionary + return payload.notify(methodname, params) + else: + # Prepare a method call dictionary + return payload.request(methodname, params) + + +def dumps(params=None, methodname=None, methodresponse=None, + encoding=None, rpcid=None, version=None, notify=None, + config=jsonrpclib.config.DEFAULT): + """ + Prepares a JSON-RPC request/response string + + :param params: Method parameters (if a method name is given) or a Fault + :param methodname: Method name + :param methodresponse: If True, this is a response dictionary + :param encoding: Result string encoding + :param rpcid: Request ID + :param version: JSON-RPC version + :param notify: If True, this is a notification request + :param config: A JSONRPClib Config instance + :return: A JSON-RPC dictionary + """ + # Prepare the dictionary + request = dump(params, methodname, rpcid, version, methodresponse, notify, + config) + + # Returns it as a JSON string + return jdumps(request, encoding=encoding or "UTF-8") + + +def load(data, config=jsonrpclib.config.DEFAULT): + """ + Loads a JSON-RPC request/response dictionary. Calls jsonclass to load beans + + :param data: A JSON-RPC dictionary + :param config: A JSONRPClib Config instance (or None for default values) + :return: A parsed dictionary or None + """ + if data is None: + # Notification + return None + + # if the above raises an error, the implementing server code + # should return something like the following: + # { 'jsonrpc':'2.0', 'error': fault.error(), id: None } + if config.use_jsonclass: + # Convert beans + data = jsonclass.load(data, config.classes) + + return data + + +def loads(data, config=jsonrpclib.config.DEFAULT): + """ + Loads a JSON-RPC request/response string. Calls jsonclass to load beans + + :param data: A JSON-RPC string + :param config: A JSONRPClib Config instance (or None for default values) + :return: A parsed dictionary or None + """ + if data == '': + # Notification + return None + + # Parse the JSON dictionary + result = jloads(data) + + # Load the beans + return load(result, config) + +# ------------------------------------------------------------------------------ + + +def check_for_errors(result): + """ + Checks if a result dictionary signals an error + + :param result: A result dictionary + :raise TypeError: Invalid parameter + :raise NotImplementedError: Unknown JSON-RPC version + :raise ValueError: Invalid dictionary content + :raise ProtocolError: An error occurred on the server side + :return: The result parameter + """ + if not result: + # Notification + return result + + if not isinstance(result, utils.DictType): + # Invalid argument + raise TypeError('Response is not a dict.') + + if 'jsonrpc' in result and float(result['jsonrpc']) > 2.0: + # Unknown JSON-RPC version + raise NotImplementedError('JSON-RPC version not yet supported.') + + if 'result' not in result and 'error' not in result: + # Invalid dictionary content + raise ValueError('Response does not have a result or error key.') + + if 'error' in result and result['error']: + # Server-side error + if 'code' in result['error']: + # Code + Message + code = result['error']['code'] + try: + # Get the message (jsonrpclib) + message = result['error']['message'] + except KeyError: + # Get the trace (jabsorb) + message = result['error'].get('trace', '<no error message>') + + if -32700 <= code <= -32000: + # Pre-defined errors + # See http://www.jsonrpc.org/specification#error_object + raise ProtocolError((code, message)) + else: + # Application error + data = result['error'].get('data', None) + raise AppError((code, message, data)) + + elif isinstance(result['error'], dict) and len(result['error']) == 1: + # Error with a single entry ('reason', ...): use its content + error_key = result['error'].keys()[0] + raise ProtocolError(result['error'][error_key]) + + else: + # Use the raw error content + raise ProtocolError(result['error']) + + return result + + +def isbatch(request): + """ + Tests if the given request is a batch call, i.e. a list of multiple calls + :param request: a JSON-RPC request object + :return: True if the request is a batch call + """ + if not isinstance(request, (utils.ListType, utils.TupleType)): + # Not a list: not a batch call + return False + elif len(request) < 1: + # Only one request: not a batch call + return False + elif not isinstance(request[0], utils.DictType): + # One of the requests is not a dictionary, i.e. a JSON Object + # therefore it is not a valid JSON-RPC request + return False + elif 'jsonrpc' not in request[0].keys(): + # No "jsonrpc" version in the JSON object: not a request + return False + + try: + version = float(request[0]['jsonrpc']) + except ValueError: + # Bad version of JSON-RPC + raise ProtocolError('"jsonrpc" key must be a float(able) value.') + + if version < 2: + # Batch call were not supported before JSON-RPC 2.0 + return False + + return True + + +def isnotification(request): + """ + Tests if the given request is a notification + + :param request: A request dictionary + :return: True if the request is a notification + """ + if 'id' not in request: + # 2.0 notification + return True + + if request['id'] is None: + # 1.0 notification + return True + + return False diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py index 3919c105..a38b5b83 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/threadpool.py @@ -1,490 +1,490 @@ -#!/usr/bin/env python
-# -- Content-Encoding: UTF-8 --
-"""
-Cached thread pool, inspired from Pelix/iPOPO Thread Pool
-
-:author: Thomas Calmant
-:copyright: Copyright 2015, isandlaTech
-:license: Apache License 2.0
-:version: 0.2.5
-
-..
-
- Copyright 2015 isandlaTech
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-# Documentation strings format
-__docformat__ = "restructuredtext en"
-
-# Module version
-__version_info__ = (0, 2, 5)
-__version__ = ".".join(str(x) for x in __version_info__)
-
-# ------------------------------------------------------------------------------
-
-# Standard library
-import logging
-import threading
-
-try:
- # Python 3
- # pylint: disable=F0401
- import queue
-except ImportError:
- # Python 2
- # pylint: disable=F0401
- import Queue as queue
-
-# ------------------------------------------------------------------------------
-
-
-class EventData(object):
- """
- A threading event with some associated data
- """
- def __init__(self):
- """
- Sets up the event
- """
- self.__event = threading.Event()
- self.__data = None
- self.__exception = None
-
- @property
- def data(self):
- """
- Returns the associated value
- """
- return self.__data
-
- @property
- def exception(self):
- """
- Returns the exception used to stop the wait() method
- """
- return self.__exception
-
- def clear(self):
- """
- Clears the event
- """
- self.__event.clear()
- self.__data = None
- self.__exception = None
-
- def is_set(self):
- """
- Checks if the event is set
- """
- return self.__event.is_set()
-
- def set(self, data=None):
- """
- Sets the event
- """
- self.__data = data
- self.__exception = None
- self.__event.set()
-
- def raise_exception(self, exception):
- """
- Raises an exception in wait()
-
- :param exception: An Exception object
- """
- self.__data = None
- self.__exception = exception
- self.__event.set()
-
- def wait(self, timeout=None):
- """
- Waits for the event or for the timeout
-
- :param timeout: Wait timeout (in seconds)
- :return: True if the event as been set, else False
- """
- # The 'or' part is for Python 2.6
- result = self.__event.wait(timeout) or self.__event.is_set()
- # pylint: disable=E0702
- # Pylint seems to miss the "is None" check below
- if self.__exception is None:
- return result
- else:
- raise self.__exception
-
-
-class FutureResult(object):
- """
- An object to wait for the result of a threaded execution
- """
- def __init__(self, logger=None):
- """
- Sets up the FutureResult object
-
- :param logger: The Logger to use in case of error (optional)
- """
- self._logger = logger or logging.getLogger(__name__)
- self._done_event = EventData()
- self.__callback = None
- self.__extra = None
-
- def __notify(self):
- """
- Notify the given callback about the result of the execution
- """
- if self.__callback is not None:
- try:
- self.__callback(self._done_event.data,
- self._done_event.exception,
- self.__extra)
- except Exception as ex:
- self._logger.exception("Error calling back method: %s", ex)
-
- def set_callback(self, method, extra=None):
- """
- Sets a callback method, called once the result has been computed or in
- case of exception.
-
- The callback method must have the following signature:
- ``callback(result, exception, extra)``.
-
- :param method: The method to call back in the end of the execution
- :param extra: Extra parameter to be given to the callback method
- """
- self.__callback = method
- self.__extra = extra
- if self._done_event.is_set():
- # The execution has already finished
- self.__notify()
-
- def execute(self, method, args, kwargs):
- """
- Execute the given method and stores its result.
- The result is considered "done" even if the method raises an exception
-
- :param method: The method to execute
- :param args: Method positional arguments
- :param kwargs: Method keyword arguments
- :raise Exception: The exception raised by the method
- """
- # Normalize arguments
- if args is None:
- args = []
-
- if kwargs is None:
- kwargs = {}
-
- try:
- # Call the method
- result = method(*args, **kwargs)
- except Exception as ex:
- # Something went wrong: propagate to the event and to the caller
- self._done_event.raise_exception(ex)
- raise
- else:
- # Store the result
- self._done_event.set(result)
- finally:
- # In any case: notify the call back (if any)
- self.__notify()
-
- def done(self):
- """
- Returns True if the job has finished, else False
- """
- return self._done_event.is_set()
-
- def result(self, timeout=None):
- """
- Waits up to timeout for the result the threaded job.
- Returns immediately the result if the job has already been done.
-
- :param timeout: The maximum time to wait for a result (in seconds)
- :raise OSError: The timeout raised before the job finished
- :raise Exception: The exception encountered during the call, if any
- """
- if self._done_event.wait(timeout):
- return self._done_event.data
- else:
- raise OSError("Timeout raised")
-
-# ------------------------------------------------------------------------------
-
-
-class ThreadPool(object):
- """
- Executes the tasks stored in a FIFO in a thread pool
- """
- def __init__(self, max_threads, min_threads=1, queue_size=0, timeout=60,
- logname=None):
- """
- Sets up the thread pool.
-
- Threads are kept alive 60 seconds (timeout argument).
-
- :param max_threads: Maximum size of the thread pool
- :param min_threads: Minimum size of the thread pool
- :param queue_size: Size of the task queue (0 for infinite)
- :param timeout: Queue timeout (in seconds, 60s by default)
- :param logname: Name of the logger
- :raise ValueError: Invalid number of threads
- """
- # Validate parameters
- try:
- max_threads = int(max_threads)
- if max_threads < 1:
- raise ValueError("Pool size must be greater than 0")
- except (TypeError, ValueError) as ex:
- raise ValueError("Invalid pool size: {0}".format(ex))
-
- try:
- min_threads = int(min_threads)
- if min_threads < 0:
- min_threads = 0
- elif min_threads > max_threads:
- min_threads = max_threads
- except (TypeError, ValueError) as ex:
- raise ValueError("Invalid pool size: {0}".format(ex))
-
- # The logger
- self._logger = logging.getLogger(logname or __name__)
-
- # The loop control event
- self._done_event = threading.Event()
- self._done_event.set()
-
- # The task queue
- try:
- queue_size = int(queue_size)
- except (TypeError, ValueError):
- # Not a valid integer
- queue_size = 0
-
- self._queue = queue.Queue(queue_size)
- self._timeout = timeout
- self.__lock = threading.RLock()
-
- # The thread pool
- self._min_threads = min_threads
- self._max_threads = max_threads
- self._threads = []
-
- # Thread count
- self._thread_id = 0
-
- # Current number of threads, active and alive
- self.__nb_threads = 0
- self.__nb_active_threads = 0
-
- def start(self):
- """
- Starts the thread pool. Does nothing if the pool is already started.
- """
- if not self._done_event.is_set():
- # Stop event not set: we're running
- return
-
- # Clear the stop event
- self._done_event.clear()
-
- # Compute the number of threads to start to handle pending tasks
- nb_pending_tasks = self._queue.qsize()
- if nb_pending_tasks > self._max_threads:
- nb_threads = self._max_threads
- elif nb_pending_tasks < self._min_threads:
- nb_threads = self._min_threads
- else:
- nb_threads = nb_pending_tasks
-
- # Create the threads
- for _ in range(nb_threads):
- self.__start_thread()
-
- def __start_thread(self):
- """
- Starts a new thread, if possible
- """
- with self.__lock:
- if self.__nb_threads >= self._max_threads:
- # Can't create more threads
- return False
-
- if self._done_event.is_set():
- # We're stopped: do nothing
- return False
-
- # Prepare thread and start it
- name = "{0}-{1}".format(self._logger.name, self._thread_id)
- self._thread_id += 1
-
- thread = threading.Thread(target=self.__run, name=name)
- thread.daemon = True
- self._threads.append(thread)
- thread.start()
- return True
-
- def stop(self):
- """
- Stops the thread pool. Does nothing if the pool is already stopped.
- """
- if self._done_event.is_set():
- # Stop event set: we're stopped
- return
-
- # Set the stop event
- self._done_event.set()
-
- with self.__lock:
- # Add something in the queue (to unlock the join())
- try:
- for _ in self._threads:
- self._queue.put(self._done_event, True, self._timeout)
- except queue.Full:
- # There is already something in the queue
- pass
-
- # Copy the list of threads to wait for
- threads = self._threads[:]
-
- # Join threads outside the lock
- for thread in threads:
- while thread.is_alive():
- # Wait 3 seconds
- thread.join(3)
- if thread.is_alive():
- # Thread is still alive: something might be wrong
- self._logger.warning("Thread %s is still alive...",
- thread.name)
-
- # Clear storage
- del self._threads[:]
- self.clear()
-
- def enqueue(self, method, *args, **kwargs):
- """
- Queues a task in the pool
-
- :param method: Method to call
- :return: A FutureResult object, to get the result of the task
- :raise ValueError: Invalid method
- :raise Full: The task queue is full
- """
- if not hasattr(method, '__call__'):
- raise ValueError("{0} has no __call__ member."
- .format(method.__name__))
-
- # Prepare the future result object
- future = FutureResult(self._logger)
-
- # Use a lock, as we might be "resetting" the queue
- with self.__lock:
- # Add the task to the queue
- self._queue.put((method, args, kwargs, future), True,
- self._timeout)
-
- if self.__nb_active_threads == self.__nb_threads:
- # All threads are taken: start a new one
- self.__start_thread()
-
- return future
-
- def clear(self):
- """
- Empties the current queue content.
- Returns once the queue have been emptied.
- """
- with self.__lock:
- # Empty the current queue
- try:
- while True:
- self._queue.get_nowait()
- self._queue.task_done()
- except queue.Empty:
- # Queue is now empty
- pass
-
- # Wait for the tasks currently executed
- self.join()
-
- def join(self, timeout=None):
- """
- Waits for all the tasks to be executed
-
- :param timeout: Maximum time to wait (in seconds)
- :return: True if the queue has been emptied, else False
- """
- if self._queue.empty():
- # Nothing to wait for...
- return True
- elif timeout is None:
- # Use the original join
- self._queue.join()
- return True
- else:
- # Wait for the condition
- with self._queue.all_tasks_done:
- self._queue.all_tasks_done.wait(timeout)
- return not bool(self._queue.unfinished_tasks)
-
- def __run(self):
- """
- The main loop
- """
- with self.__lock:
- self.__nb_threads += 1
-
- while not self._done_event.is_set():
- try:
- # Wait for an action (blocking)
- task = self._queue.get(True, self._timeout)
- if task is self._done_event:
- # Stop event in the queue: get out
- self._queue.task_done()
- with self.__lock:
- self.__nb_threads -= 1
- return
- except queue.Empty:
- # Nothing to do yet
- pass
- else:
- with self.__lock:
- self.__nb_active_threads += 1
-
- # Extract elements
- method, args, kwargs, future = task
- try:
- # Call the method
- future.execute(method, args, kwargs)
- except Exception as ex:
- self._logger.exception("Error executing %s: %s",
- method.__name__, ex)
- finally:
- # Mark the action as executed
- self._queue.task_done()
-
- # Thread is not active anymore
- self.__nb_active_threads -= 1
-
- # Clean up thread if necessary
- with self.__lock:
- if self.__nb_threads > self._min_threads:
- # No more work for this thread, and we're above the
- # minimum number of threads: stop this one
- self.__nb_threads -= 1
- return
-
- with self.__lock:
- # Thread stops
- self.__nb_threads -= 1
+#!/usr/bin/env python +# -- Content-Encoding: UTF-8 -- +""" +Cached thread pool, inspired from Pelix/iPOPO Thread Pool + +:author: Thomas Calmant +:copyright: Copyright 2015, isandlaTech +:license: Apache License 2.0 +:version: 0.2.5 + +.. + + Copyright 2015 isandlaTech + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +# Documentation strings format +__docformat__ = "restructuredtext en" + +# Module version +__version_info__ = (0, 2, 5) +__version__ = ".".join(str(x) for x in __version_info__) + +# ------------------------------------------------------------------------------ + +# Standard library +import logging +import threading + +try: + # Python 3 + # pylint: disable=F0401 + import queue +except ImportError: + # Python 2 + # pylint: disable=F0401 + import Queue as queue + +# ------------------------------------------------------------------------------ + + +class EventData(object): + """ + A threading event with some associated data + """ + def __init__(self): + """ + Sets up the event + """ + self.__event = threading.Event() + self.__data = None + self.__exception = None + + @property + def data(self): + """ + Returns the associated value + """ + return self.__data + + @property + def exception(self): + """ + Returns the exception used to stop the wait() method + """ + return self.__exception + + def clear(self): + """ + Clears the event + """ + self.__event.clear() + self.__data = None + self.__exception = None + + def is_set(self): + """ + Checks if the event is set + """ + return self.__event.is_set() + + def set(self, data=None): + """ + Sets the event + """ + self.__data = data + self.__exception = None + self.__event.set() + + def raise_exception(self, exception): + """ + Raises an exception in wait() + + :param exception: An Exception object + """ + self.__data = None + self.__exception = exception + self.__event.set() + + def wait(self, timeout=None): + """ + Waits for the event or for the timeout + + :param timeout: Wait timeout (in seconds) + :return: True if the event as been set, else False + """ + # The 'or' part is for Python 2.6 + result = self.__event.wait(timeout) or self.__event.is_set() + # pylint: disable=E0702 + # Pylint seems to miss the "is None" check below + if self.__exception is None: + return result + else: + raise self.__exception + + +class FutureResult(object): + """ + An object to wait for the result of a threaded execution + """ + def __init__(self, logger=None): + """ + Sets up the FutureResult object + + :param logger: The Logger to use in case of error (optional) + """ + self._logger = logger or logging.getLogger(__name__) + self._done_event = EventData() + self.__callback = None + self.__extra = None + + def __notify(self): + """ + Notify the given callback about the result of the execution + """ + if self.__callback is not None: + try: + self.__callback(self._done_event.data, + self._done_event.exception, + self.__extra) + except Exception as ex: + self._logger.exception("Error calling back method: %s", ex) + + def set_callback(self, method, extra=None): + """ + Sets a callback method, called once the result has been computed or in + case of exception. + + The callback method must have the following signature: + ``callback(result, exception, extra)``. + + :param method: The method to call back in the end of the execution + :param extra: Extra parameter to be given to the callback method + """ + self.__callback = method + self.__extra = extra + if self._done_event.is_set(): + # The execution has already finished + self.__notify() + + def execute(self, method, args, kwargs): + """ + Execute the given method and stores its result. + The result is considered "done" even if the method raises an exception + + :param method: The method to execute + :param args: Method positional arguments + :param kwargs: Method keyword arguments + :raise Exception: The exception raised by the method + """ + # Normalize arguments + if args is None: + args = [] + + if kwargs is None: + kwargs = {} + + try: + # Call the method + result = method(*args, **kwargs) + except Exception as ex: + # Something went wrong: propagate to the event and to the caller + self._done_event.raise_exception(ex) + raise + else: + # Store the result + self._done_event.set(result) + finally: + # In any case: notify the call back (if any) + self.__notify() + + def done(self): + """ + Returns True if the job has finished, else False + """ + return self._done_event.is_set() + + def result(self, timeout=None): + """ + Waits up to timeout for the result the threaded job. + Returns immediately the result if the job has already been done. + + :param timeout: The maximum time to wait for a result (in seconds) + :raise OSError: The timeout raised before the job finished + :raise Exception: The exception encountered during the call, if any + """ + if self._done_event.wait(timeout): + return self._done_event.data + else: + raise OSError("Timeout raised") + +# ------------------------------------------------------------------------------ + + +class ThreadPool(object): + """ + Executes the tasks stored in a FIFO in a thread pool + """ + def __init__(self, max_threads, min_threads=1, queue_size=0, timeout=60, + logname=None): + """ + Sets up the thread pool. + + Threads are kept alive 60 seconds (timeout argument). + + :param max_threads: Maximum size of the thread pool + :param min_threads: Minimum size of the thread pool + :param queue_size: Size of the task queue (0 for infinite) + :param timeout: Queue timeout (in seconds, 60s by default) + :param logname: Name of the logger + :raise ValueError: Invalid number of threads + """ + # Validate parameters + try: + max_threads = int(max_threads) + if max_threads < 1: + raise ValueError("Pool size must be greater than 0") + except (TypeError, ValueError) as ex: + raise ValueError("Invalid pool size: {0}".format(ex)) + + try: + min_threads = int(min_threads) + if min_threads < 0: + min_threads = 0 + elif min_threads > max_threads: + min_threads = max_threads + except (TypeError, ValueError) as ex: + raise ValueError("Invalid pool size: {0}".format(ex)) + + # The logger + self._logger = logging.getLogger(logname or __name__) + + # The loop control event + self._done_event = threading.Event() + self._done_event.set() + + # The task queue + try: + queue_size = int(queue_size) + except (TypeError, ValueError): + # Not a valid integer + queue_size = 0 + + self._queue = queue.Queue(queue_size) + self._timeout = timeout + self.__lock = threading.RLock() + + # The thread pool + self._min_threads = min_threads + self._max_threads = max_threads + self._threads = [] + + # Thread count + self._thread_id = 0 + + # Current number of threads, active and alive + self.__nb_threads = 0 + self.__nb_active_threads = 0 + + def start(self): + """ + Starts the thread pool. Does nothing if the pool is already started. + """ + if not self._done_event.is_set(): + # Stop event not set: we're running + return + + # Clear the stop event + self._done_event.clear() + + # Compute the number of threads to start to handle pending tasks + nb_pending_tasks = self._queue.qsize() + if nb_pending_tasks > self._max_threads: + nb_threads = self._max_threads + elif nb_pending_tasks < self._min_threads: + nb_threads = self._min_threads + else: + nb_threads = nb_pending_tasks + + # Create the threads + for _ in range(nb_threads): + self.__start_thread() + + def __start_thread(self): + """ + Starts a new thread, if possible + """ + with self.__lock: + if self.__nb_threads >= self._max_threads: + # Can't create more threads + return False + + if self._done_event.is_set(): + # We're stopped: do nothing + return False + + # Prepare thread and start it + name = "{0}-{1}".format(self._logger.name, self._thread_id) + self._thread_id += 1 + + thread = threading.Thread(target=self.__run, name=name) + thread.daemon = True + self._threads.append(thread) + thread.start() + return True + + def stop(self): + """ + Stops the thread pool. Does nothing if the pool is already stopped. + """ + if self._done_event.is_set(): + # Stop event set: we're stopped + return + + # Set the stop event + self._done_event.set() + + with self.__lock: + # Add something in the queue (to unlock the join()) + try: + for _ in self._threads: + self._queue.put(self._done_event, True, self._timeout) + except queue.Full: + # There is already something in the queue + pass + + # Copy the list of threads to wait for + threads = self._threads[:] + + # Join threads outside the lock + for thread in threads: + while thread.is_alive(): + # Wait 3 seconds + thread.join(3) + if thread.is_alive(): + # Thread is still alive: something might be wrong + self._logger.warning("Thread %s is still alive...", + thread.name) + + # Clear storage + del self._threads[:] + self.clear() + + def enqueue(self, method, *args, **kwargs): + """ + Queues a task in the pool + + :param method: Method to call + :return: A FutureResult object, to get the result of the task + :raise ValueError: Invalid method + :raise Full: The task queue is full + """ + if not hasattr(method, '__call__'): + raise ValueError("{0} has no __call__ member." + .format(method.__name__)) + + # Prepare the future result object + future = FutureResult(self._logger) + + # Use a lock, as we might be "resetting" the queue + with self.__lock: + # Add the task to the queue + self._queue.put((method, args, kwargs, future), True, + self._timeout) + + if self.__nb_active_threads == self.__nb_threads: + # All threads are taken: start a new one + self.__start_thread() + + return future + + def clear(self): + """ + Empties the current queue content. + Returns once the queue have been emptied. + """ + with self.__lock: + # Empty the current queue + try: + while True: + self._queue.get_nowait() + self._queue.task_done() + except queue.Empty: + # Queue is now empty + pass + + # Wait for the tasks currently executed + self.join() + + def join(self, timeout=None): + """ + Waits for all the tasks to be executed + + :param timeout: Maximum time to wait (in seconds) + :return: True if the queue has been emptied, else False + """ + if self._queue.empty(): + # Nothing to wait for... + return True + elif timeout is None: + # Use the original join + self._queue.join() + return True + else: + # Wait for the condition + with self._queue.all_tasks_done: + self._queue.all_tasks_done.wait(timeout) + return not bool(self._queue.unfinished_tasks) + + def __run(self): + """ + The main loop + """ + with self.__lock: + self.__nb_threads += 1 + + while not self._done_event.is_set(): + try: + # Wait for an action (blocking) + task = self._queue.get(True, self._timeout) + if task is self._done_event: + # Stop event in the queue: get out + self._queue.task_done() + with self.__lock: + self.__nb_threads -= 1 + return + except queue.Empty: + # Nothing to do yet + pass + else: + with self.__lock: + self.__nb_active_threads += 1 + + # Extract elements + method, args, kwargs, future = task + try: + # Call the method + future.execute(method, args, kwargs) + except Exception as ex: + self._logger.exception("Error executing %s: %s", + method.__name__, ex) + finally: + # Mark the action as executed + self._queue.task_done() + + # Thread is not active anymore + self.__nb_active_threads -= 1 + + # Clean up thread if necessary + with self.__lock: + if self.__nb_threads > self._min_threads: + # No more work for this thread, and we're above the + # minimum number of threads: stop this one + self.__nb_threads -= 1 + return + + with self.__lock: + # Thread stops + self.__nb_threads -= 1 diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py index 01b71fce..31183742 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib/utils.py @@ -1,122 +1,122 @@ -#!/usr/bin/python
-# -- Content-Encoding: UTF-8 --
-"""
-Utility methods, for compatibility between Python version
-
-:author: Thomas Calmant
-:copyright: Copyright 2015, isandlaTech
-:license: Apache License 2.0
-:version: 0.2.5
-
-..
-
- Copyright 2015 isandlaTech
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-# Module version
-__version_info__ = (0, 2, 5)
-__version__ = ".".join(str(x) for x in __version_info__)
-
-# Documentation strings format
-__docformat__ = "restructuredtext en"
-
-# ------------------------------------------------------------------------------
-
-import sys
-
-# ------------------------------------------------------------------------------
-
-if sys.version_info[0] < 3:
- # Python 2
- import types
- try:
- string_types = (
- types.StringType,
- types.UnicodeType
- )
- except NameError:
- # Python built without unicode support
- string_types = (types.StringType,)
-
- numeric_types = (
- types.IntType,
- types.LongType,
- types.FloatType
- )
-
- def to_bytes(string):
- """
- Converts the given string into bytes
- """
- if type(string) is unicode:
- return str(string)
- return string
-
- def from_bytes(data):
- """
- Converts the given bytes into a string
- """
- if type(data) is str:
- return data
- return str(data)
-
-else:
- # Python 3
- string_types = (
- bytes,
- str
- )
-
- numeric_types = (
- int,
- float
- )
-
- def to_bytes(string):
- """
- Converts the given string into bytes
- """
- if type(string) is bytes:
- return string
- return bytes(string, "UTF-8")
-
- def from_bytes(data):
- """
- Converts the given bytes into a string
- """
- if type(data) is str:
- return data
- return str(data, "UTF-8")
-
-# ------------------------------------------------------------------------------
-# Common
-
-DictType = dict
-
-ListType = list
-TupleType = tuple
-
-iterable_types = (
- list,
- set, frozenset,
- tuple
-)
-
-value_types = (
- bool,
- type(None)
-)
-
-primitive_types = string_types + numeric_types + value_types
+#!/usr/bin/python +# -- Content-Encoding: UTF-8 -- +""" +Utility methods, for compatibility between Python version + +:author: Thomas Calmant +:copyright: Copyright 2015, isandlaTech +:license: Apache License 2.0 +:version: 0.2.5 + +.. + + Copyright 2015 isandlaTech + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +# Module version +__version_info__ = (0, 2, 5) +__version__ = ".".join(str(x) for x in __version_info__) + +# Documentation strings format +__docformat__ = "restructuredtext en" + +# ------------------------------------------------------------------------------ + +import sys + +# ------------------------------------------------------------------------------ + +if sys.version_info[0] < 3: + # Python 2 + import types + try: + string_types = ( + types.StringType, + types.UnicodeType + ) + except NameError: + # Python built without unicode support + string_types = (types.StringType,) + + numeric_types = ( + types.IntType, + types.LongType, + types.FloatType + ) + + def to_bytes(string): + """ + Converts the given string into bytes + """ + if type(string) is unicode: + return str(string) + return string + + def from_bytes(data): + """ + Converts the given bytes into a string + """ + if type(data) is str: + return data + return str(data) + +else: + # Python 3 + string_types = ( + bytes, + str + ) + + numeric_types = ( + int, + float + ) + + def to_bytes(string): + """ + Converts the given string into bytes + """ + if type(string) is bytes: + return string + return bytes(string, "UTF-8") + + def from_bytes(data): + """ + Converts the given bytes into a string + """ + if type(data) is str: + return data + return str(data, "UTF-8") + +# ------------------------------------------------------------------------------ +# Common + +DictType = dict + +ListType = list +TupleType = tuple + +iterable_types = ( + list, + set, frozenset, + tuple +) + +value_types = ( + bool, + type(None) +) + +primitive_types = string_types + numeric_types + value_types diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO index 9d0f3fca..5dce6b1c 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/PKG-INFO @@ -1,460 +1,460 @@ -Metadata-Version: 1.1
-Name: jsonrpclib-pelix
-Version: 0.2.5
-Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library, for Python 2.6+ and Python 3.This version is a fork of jsonrpclib by Josh Marshall, usable with Pelix remote services.
-Home-page: http://github.com/tcalmant/jsonrpclib/
-Author: Thomas Calmant
-Author-email: thomas.calmant+github@gmail.com
-License: Apache License 2.0
-Description: JSONRPClib (patched for Pelix)
- ##############################
-
- .. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg
- :target: https://pypi.python.org/pypi/jsonrpclib-pelix/
-
- .. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master
- :target: https://travis-ci.org/tcalmant/jsonrpclib
-
- .. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master
- :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master
-
-
- This library is an implementation of the JSON-RPC specification.
- It supports both the original 1.0 specification, as well as the
- new (proposed) 2.0 specification, which includes batch submission, keyword
- arguments, etc.
-
- It is licensed under the Apache License, Version 2.0
- (http://www.apache.org/licenses/LICENSE-2.0.html).
-
-
- About this version
- ******************
-
- This is a patched version of the original ``jsonrpclib`` project by
- Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib.
-
- The suffix *-pelix* only indicates that this version works with Pelix Remote
- Services, but it is **not** a Pelix specific implementation.
-
- * This version adds support for Python 3, staying compatible with Python 2.
- * It is now possible to use the dispatch_method argument while extending
- the SimpleJSONRPCDispatcher, to use a custom dispatcher.
- This allows to use this package by Pelix Remote Services.
- * It can use thread pools to control the number of threads spawned to handle
- notification requests and clients connections.
- * The modifications added in other forks of this project have been added:
-
- * From https://github.com/drdaeman/jsonrpclib:
-
- * Improved JSON-RPC 1.0 support
- * Less strict error response handling
-
- * From https://github.com/tuomassalo/jsonrpclib:
-
- * In case of a non-pre-defined error, raise an AppError and give access to
- *error.data*
-
- * From https://github.com/dejw/jsonrpclib:
-
- * Custom headers can be sent with request and associated tests
-
- * The support for Unix sockets has been removed, as it is not trivial to convert
- to Python 3 (and I don't use them)
- * This version cannot be installed with the original ``jsonrpclib``, as it uses
- the same package name.
-
-
- Summary
- *******
-
- This library implements the JSON-RPC 2.0 proposed specification in pure Python.
- It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible
- (it extends where possible), so that projects using ``xmlrpclib`` could easily
- be modified to use JSON and experiment with the differences.
-
- It is backwards-compatible with the 1.0 specification, and supports all of the
- new proposed features of 2.0, including:
-
- * Batch submission (via MultiCall)
- * Keyword arguments
- * Notifications (both in a batch and 'normal')
- * Class translation using the ``__jsonclass__`` key.
-
- I've added a "SimpleJSONRPCServer", which is intended to emulate the
- "SimpleXMLRPCServer" from the default Python distribution.
-
-
- Requirements
- ************
-
- It supports ``cjson`` and ``simplejson``, and looks for the parsers in that
- order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+,
- and then the ``simplejson`` external library).
- One of these must be installed to use this library, although if you have a
- standard distribution of 2.6+, you should already have one.
- Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if
- you are going for full-on optimization you may want to pick it up.
-
- Since library uses ``contextlib`` module, you should have at least Python 2.5
- installed.
-
-
- Installation
- ************
-
- You can install this from PyPI with one of the following commands (sudo
- may be required):
-
- .. code-block:: console
-
- easy_install jsonrpclib-pelix
- pip install jsonrpclib-pelix
-
- Alternatively, you can download the source from the GitHub repository
- at http://github.com/tcalmant/jsonrpclib and manually install it
- with the following commands:
-
- .. code-block:: console
-
- git clone git://github.com/tcalmant/jsonrpclib.git
- cd jsonrpclib
- python setup.py install
-
-
- SimpleJSONRPCServer
- *******************
-
- This is identical in usage (or should be) to the SimpleXMLRPCServer in the
- Python standard library. Some of the differences in features are that it
- obviously supports notification, batch calls, class translation (if left on),
- etc.
- Note: The import line is slightly different from the regular SimpleXMLRPCServer,
- since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library.
-
- .. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
-
- server = SimpleJSONRPCServer(('localhost', 8080))
- server.register_function(pow)
- server.register_function(lambda x,y: x+y, 'add')
- server.register_function(lambda x: x, 'ping')
- server.serve_forever()
-
- To start protect the server with SSL, use the following snippet:
-
- .. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
-
- # Setup the SSL socket
- server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False)
- server.socket = ssl.wrap_socket(server.socket, certfile='server.pem',
- server_side=True)
- server.server_bind()
- server.server_activate()
-
- # ... register functions
- # Start the server
- server.serve_forever()
-
-
- Notification Thread Pool
- ========================
-
- By default, notification calls are handled in the request handling thread.
- It is possible to use a thread pool to handle them, by giving it to the server
- using the ``set_notification_pool()`` method:
-
- .. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
- from jsonrpclib.threadpool import ThreadPool
-
- # Setup the thread pool: between 0 and 10 threads
- pool = ThreadPool(max_threads=10, min_threads=0)
-
- # Don't forget to start it
- pool.start()
-
- # Setup the server
- server = SimpleJSONRPCServer(('localhost', 8080), config)
- server.set_notification_pool(pool)
-
- # Register methods
- server.register_function(pow)
- server.register_function(lambda x,y: x+y, 'add')
- server.register_function(lambda x: x, 'ping')
-
- try:
- server.serve_forever()
- finally:
- # Stop the thread pool (let threads finish their current task)
- pool.stop()
- server.set_notification_pool(None)
-
-
- Threaded server
- ===============
-
- It is also possible to use a thread pool to handle clients requests, using the
- ``PooledJSONRPCServer`` class.
- By default, this class uses pool of 0 to 30 threads. A custom pool can be given
- with the ``thread_pool`` parameter of the class constructor.
-
- The notification pool and the request pool are different: by default, a server
- with a request pool doesn't have a notification pool.
-
- .. code-block:: python
-
- from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
- from jsonrpclib.threadpool import ThreadPool
-
- # Setup the notification and request pools
- nofif_pool = ThreadPool(max_threads=10, min_threads=0)
- request_pool = ThreadPool(max_threads=50, min_threads=10)
-
- # Don't forget to start them
- nofif_pool.start()
- request_pool.start()
-
- # Setup the server
- server = PooledJSONRPCServer(('localhost', 8080), config,
- thread_pool=request_pool)
- server.set_notification_pool(nofif_pool)
-
- # Register methods
- server.register_function(pow)
- server.register_function(lambda x,y: x+y, 'add')
- server.register_function(lambda x: x, 'ping')
-
- try:
- server.serve_forever()
- finally:
- # Stop the thread pools (let threads finish their current task)
- request_pool.stop()
- nofif_pool.stop()
- server.set_notification_pool(None)
-
- Client Usage
- ************
-
- This is (obviously) taken from a console session.
-
- .. code-block:: python
-
- >>> import jsonrpclib
- >>> server = jsonrpclib.ServerProxy('http://localhost:8080')
- >>> server.add(5,6)
- 11
- >>> server.add(x=5, y=10)
- 15
- >>> server._notify.add(5,6)
- # No result returned...
- >>> batch = jsonrpclib.MultiCall(server)
- >>> batch.add(5, 6)
- >>> batch.ping({'key':'value'})
- >>> batch._notify.add(4, 30)
- >>> results = batch()
- >>> for result in results:
- >>> ... print(result)
- 11
- {'key': 'value'}
- # Note that there are only two responses -- this is according to spec.
-
- # Clean up
- >>> server('close')()
-
- # Using client history
- >>> history = jsonrpclib.history.History()
- >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history)
- >>> server.add(5,6)
- 11
- >>> print(history.request)
- {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
- "method": "add", "params": [5, 6]}
- >>> print(history.response)
- {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0",
- "result": 11}
-
- # Clean up
- >>> server('close')()
-
- If you need 1.0 functionality, there are a bunch of places you can pass that in,
- although the best is just to give a specific configuration to
- ``jsonrpclib.ServerProxy``:
-
- .. code-block:: python
-
- >>> import jsonrpclib
- >>> jsonrpclib.config.DEFAULT.version
- 2.0
- >>> config = jsonrpclib.config.Config(version=1.0)
- >>> history = jsonrpclib.history.History()
- >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config,
- history=history)
- >>> server.add(7, 10)
- 17
- >>> print(history.request)
- {"id": "827b2923-5b37-49a5-8b36-e73920a16d32",
- "method": "add", "params": [7, 10]}
- >>> print(history.response)
- {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17}
- >>> server('close')()
-
- The equivalent ``loads`` and ``dumps`` functions also exist, although with minor
- modifications. The ``dumps`` arguments are almost identical, but it adds three
- arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC
- compatibility, and ``notify`` if it's a request that you want to be a
- notification.
-
- Additionally, the ``loads`` method does not return the params and method like
- ``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and
- b.) returns the entire structure of the request / response for manual parsing.
-
-
- Additional headers
- ******************
-
- If your remote service requires custom headers in request, you can pass them
- as as a ``headers`` keyword argument, when creating the ``ServerProxy``:
-
- .. code-block:: python
-
- >>> import jsonrpclib
- >>> server = jsonrpclib.ServerProxy("http://localhost:8080",
- headers={'X-Test' : 'Test'})
-
- You can also put additional request headers only for certain method invocation:
-
- .. code-block:: python
-
- >>> import jsonrpclib
- >>> server = jsonrpclib.Server("http://localhost:8080")
- >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server:
- ... test_server.ping(42)
- ...
- >>> # X-Test header will be no longer sent in requests
-
- Of course ``_additional_headers`` contexts can be nested as well.
-
-
- Class Translation
- *****************
-
- I've recently added "automatic" class translation support, although it is
- turned off by default. This can be devastatingly slow if improperly used, so
- the following is just a short list of things to keep in mind when using it.
-
- * Keep It (the object) Simple Stupid. (for exceptions, keep reading.)
- * Do not require init params (for exceptions, keep reading)
- * Getter properties without setters could be dangerous (read: not tested)
-
- If any of the above are issues, use the _serialize method. (see usage below)
- The server and client must BOTH have use_jsonclass configuration item on and
- they must both have access to the same libraries used by the objects for
- this to work.
-
- If you have excessively nested arguments, it would be better to turn off the
- translation and manually invoke it on specific objects using
- ``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default
- behavior recursively goes through attributes and lists / dicts / tuples).
-
- Sample file: *test_obj.py*
-
- .. code-block:: python
-
- # This object is /very/ simple, and the system will look through the
- # attributes and serialize what it can.
- class TestObj(object):
- foo = 'bar'
-
- # This object requires __init__ params, so it uses the _serialize method
- # and returns a tuple of init params and attribute values (the init params
- # can be a dict or a list, but the attribute values must be a dict.)
- class TestSerial(object):
- foo = 'bar'
- def __init__(self, *args):
- self.args = args
- def _serialize(self):
- return (self.args, {'foo':self.foo,})
-
- * Sample usage
-
- .. code-block:: python
-
- >>> import jsonrpclib
- >>> import test_obj
-
- # History is used only to print the serialized form of beans
- >>> history = jsonrpclib.history.History()
- >>> testobj1 = test_obj.TestObj()
- >>> testobj2 = test_obj.TestSerial()
- >>> server = jsonrpclib.Server('http://localhost:8080', history=history)
-
- # The 'ping' just returns whatever is sent
- >>> ping1 = server.ping(testobj1)
- >>> ping2 = server.ping(testobj2)
-
- >>> print(history.request)
- {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
- "method": "ping", "params": [{"__jsonclass__":
- ["test_obj.TestSerial", []], "foo": "bar"}
- ]}
- >>> print(history.response)
- {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0",
- "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}}
-
- This behavior is turned by default. To deactivate it, just set the
- ``use_jsonclass`` member of a server ``Config`` to False.
- If you want to use a per-class serialization method, set its name in the
- ``serialize_method`` member of a server ``Config``.
- Finally, if you are using classes that you have defined in the implementation
- (as in, not a separate library), you'll need to add those (on BOTH the server
- and the client) using the ``config.classes.add()`` method.
-
- Feedback on this "feature" is very, VERY much appreciated.
-
- Why JSON-RPC?
- *************
-
- In my opinion, there are several reasons to choose JSON over XML for RPC:
-
- * Much simpler to read (I suppose this is opinion, but I know I'm right. :)
- * Size / Bandwidth - Main reason, a JSON object representation is just much smaller.
- * Parsing - JSON should be much quicker to parse than XML.
- * Easy class passing with ``jsonclass`` (when enabled)
-
- In the interest of being fair, there are also a few reasons to choose XML
- over JSON:
-
- * Your server doesn't do JSON (rather obvious)
- * Wider XML-RPC support across APIs (can we change this? :))
- * Libraries are more established, i.e. more stable (Let's change this too.)
-
- Tests
- *****
-
- Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page.
- They can be run using *unittest* or *nosetest*:
-
- .. code-block:: console
-
- python -m unittest discover tests
- python3 -m unittest discover tests
- nosetests tests
-
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python :: 2.6
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.0
-Classifier: Programming Language :: Python :: 3.1
-Classifier: Programming Language :: Python :: 3.2
-Classifier: Programming Language :: Python :: 3.3
-Classifier: Programming Language :: Python :: 3.4
+Metadata-Version: 1.1 +Name: jsonrpclib-pelix +Version: 0.2.5 +Summary: This project is an implementation of the JSON-RPC v2.0 specification (backwards-compatible) as a client library, for Python 2.6+ and Python 3.This version is a fork of jsonrpclib by Josh Marshall, usable with Pelix remote services. +Home-page: http://github.com/tcalmant/jsonrpclib/ +Author: Thomas Calmant +Author-email: thomas.calmant+github@gmail.com +License: Apache License 2.0 +Description: JSONRPClib (patched for Pelix) + ############################## + + .. image:: https://pypip.in/license/jsonrpclib-pelix/badge.svg + :target: https://pypi.python.org/pypi/jsonrpclib-pelix/ + + .. image:: https://travis-ci.org/tcalmant/jsonrpclib.svg?branch=master + :target: https://travis-ci.org/tcalmant/jsonrpclib + + .. image:: https://coveralls.io/repos/tcalmant/jsonrpclib/badge.svg?branch=master + :target: https://coveralls.io/r/tcalmant/jsonrpclib?branch=master + + + This library is an implementation of the JSON-RPC specification. + It supports both the original 1.0 specification, as well as the + new (proposed) 2.0 specification, which includes batch submission, keyword + arguments, etc. + + It is licensed under the Apache License, Version 2.0 + (http://www.apache.org/licenses/LICENSE-2.0.html). + + + About this version + ****************** + + This is a patched version of the original ``jsonrpclib`` project by + Josh Marshall, available at https://github.com/joshmarshall/jsonrpclib. + + The suffix *-pelix* only indicates that this version works with Pelix Remote + Services, but it is **not** a Pelix specific implementation. + + * This version adds support for Python 3, staying compatible with Python 2. + * It is now possible to use the dispatch_method argument while extending + the SimpleJSONRPCDispatcher, to use a custom dispatcher. + This allows to use this package by Pelix Remote Services. + * It can use thread pools to control the number of threads spawned to handle + notification requests and clients connections. + * The modifications added in other forks of this project have been added: + + * From https://github.com/drdaeman/jsonrpclib: + + * Improved JSON-RPC 1.0 support + * Less strict error response handling + + * From https://github.com/tuomassalo/jsonrpclib: + + * In case of a non-pre-defined error, raise an AppError and give access to + *error.data* + + * From https://github.com/dejw/jsonrpclib: + + * Custom headers can be sent with request and associated tests + + * The support for Unix sockets has been removed, as it is not trivial to convert + to Python 3 (and I don't use them) + * This version cannot be installed with the original ``jsonrpclib``, as it uses + the same package name. + + + Summary + ******* + + This library implements the JSON-RPC 2.0 proposed specification in pure Python. + It is designed to be as compatible with the syntax of ``xmlrpclib`` as possible + (it extends where possible), so that projects using ``xmlrpclib`` could easily + be modified to use JSON and experiment with the differences. + + It is backwards-compatible with the 1.0 specification, and supports all of the + new proposed features of 2.0, including: + + * Batch submission (via MultiCall) + * Keyword arguments + * Notifications (both in a batch and 'normal') + * Class translation using the ``__jsonclass__`` key. + + I've added a "SimpleJSONRPCServer", which is intended to emulate the + "SimpleXMLRPCServer" from the default Python distribution. + + + Requirements + ************ + + It supports ``cjson`` and ``simplejson``, and looks for the parsers in that + order (searching first for ``cjson``, then for the *built-in* ``json`` in 2.6+, + and then the ``simplejson`` external library). + One of these must be installed to use this library, although if you have a + standard distribution of 2.6+, you should already have one. + Keep in mind that ``cjson`` is supposed to be the quickest, I believe, so if + you are going for full-on optimization you may want to pick it up. + + Since library uses ``contextlib`` module, you should have at least Python 2.5 + installed. + + + Installation + ************ + + You can install this from PyPI with one of the following commands (sudo + may be required): + + .. code-block:: console + + easy_install jsonrpclib-pelix + pip install jsonrpclib-pelix + + Alternatively, you can download the source from the GitHub repository + at http://github.com/tcalmant/jsonrpclib and manually install it + with the following commands: + + .. code-block:: console + + git clone git://github.com/tcalmant/jsonrpclib.git + cd jsonrpclib + python setup.py install + + + SimpleJSONRPCServer + ******************* + + This is identical in usage (or should be) to the SimpleXMLRPCServer in the + Python standard library. Some of the differences in features are that it + obviously supports notification, batch calls, class translation (if left on), + etc. + Note: The import line is slightly different from the regular SimpleXMLRPCServer, + since the SimpleJSONRPCServer is distributed within the ``jsonrpclib`` library. + + .. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer + + server = SimpleJSONRPCServer(('localhost', 8080)) + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_function(lambda x: x, 'ping') + server.serve_forever() + + To start protect the server with SSL, use the following snippet: + + .. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer + + # Setup the SSL socket + server = SimpleJSONRPCServer(('localhost', 8080), bind_and_activate=False) + server.socket = ssl.wrap_socket(server.socket, certfile='server.pem', + server_side=True) + server.server_bind() + server.server_activate() + + # ... register functions + # Start the server + server.serve_forever() + + + Notification Thread Pool + ======================== + + By default, notification calls are handled in the request handling thread. + It is possible to use a thread pool to handle them, by giving it to the server + using the ``set_notification_pool()`` method: + + .. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer + from jsonrpclib.threadpool import ThreadPool + + # Setup the thread pool: between 0 and 10 threads + pool = ThreadPool(max_threads=10, min_threads=0) + + # Don't forget to start it + pool.start() + + # Setup the server + server = SimpleJSONRPCServer(('localhost', 8080), config) + server.set_notification_pool(pool) + + # Register methods + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_function(lambda x: x, 'ping') + + try: + server.serve_forever() + finally: + # Stop the thread pool (let threads finish their current task) + pool.stop() + server.set_notification_pool(None) + + + Threaded server + =============== + + It is also possible to use a thread pool to handle clients requests, using the + ``PooledJSONRPCServer`` class. + By default, this class uses pool of 0 to 30 threads. A custom pool can be given + with the ``thread_pool`` parameter of the class constructor. + + The notification pool and the request pool are different: by default, a server + with a request pool doesn't have a notification pool. + + .. code-block:: python + + from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer + from jsonrpclib.threadpool import ThreadPool + + # Setup the notification and request pools + nofif_pool = ThreadPool(max_threads=10, min_threads=0) + request_pool = ThreadPool(max_threads=50, min_threads=10) + + # Don't forget to start them + nofif_pool.start() + request_pool.start() + + # Setup the server + server = PooledJSONRPCServer(('localhost', 8080), config, + thread_pool=request_pool) + server.set_notification_pool(nofif_pool) + + # Register methods + server.register_function(pow) + server.register_function(lambda x,y: x+y, 'add') + server.register_function(lambda x: x, 'ping') + + try: + server.serve_forever() + finally: + # Stop the thread pools (let threads finish their current task) + request_pool.stop() + nofif_pool.stop() + server.set_notification_pool(None) + + Client Usage + ************ + + This is (obviously) taken from a console session. + + .. code-block:: python + + >>> import jsonrpclib + >>> server = jsonrpclib.ServerProxy('http://localhost:8080') + >>> server.add(5,6) + 11 + >>> server.add(x=5, y=10) + 15 + >>> server._notify.add(5,6) + # No result returned... + >>> batch = jsonrpclib.MultiCall(server) + >>> batch.add(5, 6) + >>> batch.ping({'key':'value'}) + >>> batch._notify.add(4, 30) + >>> results = batch() + >>> for result in results: + >>> ... print(result) + 11 + {'key': 'value'} + # Note that there are only two responses -- this is according to spec. + + # Clean up + >>> server('close')() + + # Using client history + >>> history = jsonrpclib.history.History() + >>> server = jsonrpclib.ServerProxy('http://localhost:8080', history=history) + >>> server.add(5,6) + 11 + >>> print(history.request) + {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0", + "method": "add", "params": [5, 6]} + >>> print(history.response) + {"id": "f682b956-c8e1-4506-9db4-29fe8bc9fcaa", "jsonrpc": "2.0", + "result": 11} + + # Clean up + >>> server('close')() + + If you need 1.0 functionality, there are a bunch of places you can pass that in, + although the best is just to give a specific configuration to + ``jsonrpclib.ServerProxy``: + + .. code-block:: python + + >>> import jsonrpclib + >>> jsonrpclib.config.DEFAULT.version + 2.0 + >>> config = jsonrpclib.config.Config(version=1.0) + >>> history = jsonrpclib.history.History() + >>> server = jsonrpclib.ServerProxy('http://localhost:8080', config=config, + history=history) + >>> server.add(7, 10) + 17 + >>> print(history.request) + {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", + "method": "add", "params": [7, 10]} + >>> print(history.response) + {"id": "827b2923-5b37-49a5-8b36-e73920a16d32", "error": null, "result": 17} + >>> server('close')() + + The equivalent ``loads`` and ``dumps`` functions also exist, although with minor + modifications. The ``dumps`` arguments are almost identical, but it adds three + arguments: ``rpcid`` for the 'id' key, ``version`` to specify the JSON-RPC + compatibility, and ``notify`` if it's a request that you want to be a + notification. + + Additionally, the ``loads`` method does not return the params and method like + ``xmlrpclib``, but instead a.) parses for errors, raising ProtocolErrors, and + b.) returns the entire structure of the request / response for manual parsing. + + + Additional headers + ****************** + + If your remote service requires custom headers in request, you can pass them + as as a ``headers`` keyword argument, when creating the ``ServerProxy``: + + .. code-block:: python + + >>> import jsonrpclib + >>> server = jsonrpclib.ServerProxy("http://localhost:8080", + headers={'X-Test' : 'Test'}) + + You can also put additional request headers only for certain method invocation: + + .. code-block:: python + + >>> import jsonrpclib + >>> server = jsonrpclib.Server("http://localhost:8080") + >>> with server._additional_headers({'X-Test' : 'Test'}) as test_server: + ... test_server.ping(42) + ... + >>> # X-Test header will be no longer sent in requests + + Of course ``_additional_headers`` contexts can be nested as well. + + + Class Translation + ***************** + + I've recently added "automatic" class translation support, although it is + turned off by default. This can be devastatingly slow if improperly used, so + the following is just a short list of things to keep in mind when using it. + + * Keep It (the object) Simple Stupid. (for exceptions, keep reading.) + * Do not require init params (for exceptions, keep reading) + * Getter properties without setters could be dangerous (read: not tested) + + If any of the above are issues, use the _serialize method. (see usage below) + The server and client must BOTH have use_jsonclass configuration item on and + they must both have access to the same libraries used by the objects for + this to work. + + If you have excessively nested arguments, it would be better to turn off the + translation and manually invoke it on specific objects using + ``jsonrpclib.jsonclass.dump`` / ``jsonrpclib.jsonclass.load`` (since the default + behavior recursively goes through attributes and lists / dicts / tuples). + + Sample file: *test_obj.py* + + .. code-block:: python + + # This object is /very/ simple, and the system will look through the + # attributes and serialize what it can. + class TestObj(object): + foo = 'bar' + + # This object requires __init__ params, so it uses the _serialize method + # and returns a tuple of init params and attribute values (the init params + # can be a dict or a list, but the attribute values must be a dict.) + class TestSerial(object): + foo = 'bar' + def __init__(self, *args): + self.args = args + def _serialize(self): + return (self.args, {'foo':self.foo,}) + + * Sample usage + + .. code-block:: python + + >>> import jsonrpclib + >>> import test_obj + + # History is used only to print the serialized form of beans + >>> history = jsonrpclib.history.History() + >>> testobj1 = test_obj.TestObj() + >>> testobj2 = test_obj.TestSerial() + >>> server = jsonrpclib.Server('http://localhost:8080', history=history) + + # The 'ping' just returns whatever is sent + >>> ping1 = server.ping(testobj1) + >>> ping2 = server.ping(testobj2) + + >>> print(history.request) + {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0", + "method": "ping", "params": [{"__jsonclass__": + ["test_obj.TestSerial", []], "foo": "bar"} + ]} + >>> print(history.response) + {"id": "7805f1f9-9abd-49c6-81dc-dbd47229fe13", "jsonrpc": "2.0", + "result": {"__jsonclass__": ["test_obj.TestSerial", []], "foo": "bar"}} + + This behavior is turned by default. To deactivate it, just set the + ``use_jsonclass`` member of a server ``Config`` to False. + If you want to use a per-class serialization method, set its name in the + ``serialize_method`` member of a server ``Config``. + Finally, if you are using classes that you have defined in the implementation + (as in, not a separate library), you'll need to add those (on BOTH the server + and the client) using the ``config.classes.add()`` method. + + Feedback on this "feature" is very, VERY much appreciated. + + Why JSON-RPC? + ************* + + In my opinion, there are several reasons to choose JSON over XML for RPC: + + * Much simpler to read (I suppose this is opinion, but I know I'm right. :) + * Size / Bandwidth - Main reason, a JSON object representation is just much smaller. + * Parsing - JSON should be much quicker to parse than XML. + * Easy class passing with ``jsonclass`` (when enabled) + + In the interest of being fair, there are also a few reasons to choose XML + over JSON: + + * Your server doesn't do JSON (rather obvious) + * Wider XML-RPC support across APIs (can we change this? :)) + * Libraries are more established, i.e. more stable (Let's change this too.) + + Tests + ***** + + Tests are an almost-verbatim drop from the JSON-RPC specification 2.0 page. + They can be run using *unittest* or *nosetest*: + + .. code-block:: console + + python -m unittest discover tests + python3 -m unittest discover tests + nosetests tests + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.0 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt index f5714032..f5714032 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/SOURCES.txt diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt index 8b137891..8b137891 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/dependency_links.txt diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt index 1410b2ff..1410b2ff 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/jsonrpclib_pelix.egg-info/top_level.txt diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.cfg b/scripts/external_libs/jsonrpclib-pelix-0.2.5/setup.cfg index 26c67942..7633f817 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.cfg +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/setup.cfg @@ -1,8 +1,8 @@ -[bdist_wheel]
-universal = 1
-
-[egg_info]
-tag_date = 0
-tag_svn_revision = 0
-tag_build =
-
+[bdist_wheel] +universal = 1 + +[egg_info] +tag_date = 0 +tag_svn_revision = 0 +tag_build = + diff --git a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.py b/scripts/external_libs/jsonrpclib-pelix-0.2.5/setup.py index a64f2fb0..fb28d630 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/jsonrpclib-pelix-0.2.5/setup.py +++ b/scripts/external_libs/jsonrpclib-pelix-0.2.5/setup.py @@ -1,74 +1,74 @@ -#!/usr/bin/env python
-# -- Content-Encoding: UTF-8 --
-"""
-Installation script
-
-:authors: Josh Marshall, Thomas Calmant
-:copyright: Copyright 2015, isandlaTech
-:license: Apache License 2.0
-:version: 0.2.5
-
-..
-
- Copyright 2015 isandlaTech
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-
-# Module version
-__version_info__ = (0, 2, 5)
-__version__ = ".".join(str(x) for x in __version_info__)
-
-# Documentation strings format
-__docformat__ = "restructuredtext en"
-
-# ------------------------------------------------------------------------------
-
-import sys
-
-try:
- from setuptools import setup
-except ImportError:
- from distutils.core import setup
-
-# ------------------------------------------------------------------------------
-
-setup(
- name="jsonrpclib-pelix",
- version=__version__,
- license="Apache License 2.0",
- author="Thomas Calmant",
- author_email="thomas.calmant+github@gmail.com",
- url="http://github.com/tcalmant/jsonrpclib/",
- description=
- "This project is an implementation of the JSON-RPC v2.0 specification "
- "(backwards-compatible) as a client library, for Python 2.6+ and Python 3."
- "This version is a fork of jsonrpclib by Josh Marshall, "
- "usable with Pelix remote services.",
- long_description=open("README.rst").read(),
- packages=["jsonrpclib"],
- classifiers=[
- 'Development Status :: 5 - Production/Stable',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved :: Apache Software License',
- 'Operating System :: OS Independent',
- 'Programming Language :: Python :: 2.6',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.0',
- 'Programming Language :: Python :: 3.1',
- 'Programming Language :: Python :: 3.2',
- 'Programming Language :: Python :: 3.3',
- 'Programming Language :: Python :: 3.4'],
- tests_require=['unittest2'] if sys.version_info < (2, 7) else []
-)
+#!/usr/bin/env python +# -- Content-Encoding: UTF-8 -- +""" +Installation script + +:authors: Josh Marshall, Thomas Calmant +:copyright: Copyright 2015, isandlaTech +:license: Apache License 2.0 +:version: 0.2.5 + +.. + + Copyright 2015 isandlaTech + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +# Module version +__version_info__ = (0, 2, 5) +__version__ = ".".join(str(x) for x in __version_info__) + +# Documentation strings format +__docformat__ = "restructuredtext en" + +# ------------------------------------------------------------------------------ + +import sys + +try: + from setuptools import setup +except ImportError: + from distutils.core import setup + +# ------------------------------------------------------------------------------ + +setup( + name="jsonrpclib-pelix", + version=__version__, + license="Apache License 2.0", + author="Thomas Calmant", + author_email="thomas.calmant+github@gmail.com", + url="http://github.com/tcalmant/jsonrpclib/", + description= + "This project is an implementation of the JSON-RPC v2.0 specification " + "(backwards-compatible) as a client library, for Python 2.6+ and Python 3." + "This version is a fork of jsonrpclib by Josh Marshall, " + "usable with Pelix remote services.", + long_description=open("README.rst").read(), + packages=["jsonrpclib"], + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.0', + 'Programming Language :: Python :: 3.1', + 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4'], + tests_require=['unittest2'] if sys.version_info < (2, 7) else [] +) diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ACKS b/scripts/external_libs/lockfile-0.10.2/ACKS index 44519d17..44519d17 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ACKS +++ b/scripts/external_libs/lockfile-0.10.2/ACKS diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/AUTHORS b/scripts/external_libs/lockfile-0.10.2/AUTHORS index fda721cd..fda721cd 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/AUTHORS +++ b/scripts/external_libs/lockfile-0.10.2/AUTHORS diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ChangeLog b/scripts/external_libs/lockfile-0.10.2/ChangeLog index 3ba36a7d..3ba36a7d 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/ChangeLog +++ b/scripts/external_libs/lockfile-0.10.2/ChangeLog diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/LICENSE b/scripts/external_libs/lockfile-0.10.2/LICENSE index 610c0793..610c0793 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/LICENSE +++ b/scripts/external_libs/lockfile-0.10.2/LICENSE diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/PKG-INFO b/scripts/external_libs/lockfile-0.10.2/PKG-INFO index 9f72376f..9f72376f 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/PKG-INFO +++ b/scripts/external_libs/lockfile-0.10.2/PKG-INFO diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/README b/scripts/external_libs/lockfile-0.10.2/README index 5f7acbc4..5f7acbc4 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/README +++ b/scripts/external_libs/lockfile-0.10.2/README diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/RELEASE-NOTES b/scripts/external_libs/lockfile-0.10.2/RELEASE-NOTES index 8b452ed1..8b452ed1 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/RELEASE-NOTES +++ b/scripts/external_libs/lockfile-0.10.2/RELEASE-NOTES diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/Makefile b/scripts/external_libs/lockfile-0.10.2/doc/source/Makefile index 1b1e8d28..1b1e8d28 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/Makefile +++ b/scripts/external_libs/lockfile-0.10.2/doc/source/Makefile diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/conf.py b/scripts/external_libs/lockfile-0.10.2/doc/source/conf.py index 623edcb5..623edcb5 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/conf.py +++ b/scripts/external_libs/lockfile-0.10.2/doc/source/conf.py diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/index.rst b/scripts/external_libs/lockfile-0.10.2/doc/source/index.rst index f76173dc..f76173dc 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/doc/source/index.rst +++ b/scripts/external_libs/lockfile-0.10.2/doc/source/index.rst diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/PKG-INFO b/scripts/external_libs/lockfile-0.10.2/lockfile.egg-info/PKG-INFO index 9f72376f..9f72376f 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/PKG-INFO +++ b/scripts/external_libs/lockfile-0.10.2/lockfile.egg-info/PKG-INFO diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt b/scripts/external_libs/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt index 4b289f3a..4b289f3a 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt +++ b/scripts/external_libs/lockfile-0.10.2/lockfile.egg-info/SOURCES.txt diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt b/scripts/external_libs/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt index 8b137891..8b137891 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt +++ b/scripts/external_libs/lockfile-0.10.2/lockfile.egg-info/dependency_links.txt diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/not-zip-safe b/scripts/external_libs/lockfile-0.10.2/lockfile.egg-info/not-zip-safe index 8b137891..8b137891 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/not-zip-safe +++ b/scripts/external_libs/lockfile-0.10.2/lockfile.egg-info/not-zip-safe diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/top_level.txt b/scripts/external_libs/lockfile-0.10.2/lockfile.egg-info/top_level.txt index 5a13159a..5a13159a 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile.egg-info/top_level.txt +++ b/scripts/external_libs/lockfile-0.10.2/lockfile.egg-info/top_level.txt diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/__init__.py b/scripts/external_libs/lockfile-0.10.2/lockfile/__init__.py index d905af96..d905af96 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/__init__.py +++ b/scripts/external_libs/lockfile-0.10.2/lockfile/__init__.py diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/linklockfile.py b/scripts/external_libs/lockfile-0.10.2/lockfile/linklockfile.py index 9c506734..9c506734 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/linklockfile.py +++ b/scripts/external_libs/lockfile-0.10.2/lockfile/linklockfile.py diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/mkdirlockfile.py b/scripts/external_libs/lockfile-0.10.2/lockfile/mkdirlockfile.py index 8d2c801f..8d2c801f 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/mkdirlockfile.py +++ b/scripts/external_libs/lockfile-0.10.2/lockfile/mkdirlockfile.py diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/pidlockfile.py b/scripts/external_libs/lockfile-0.10.2/lockfile/pidlockfile.py index e92f9ead..e92f9ead 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/pidlockfile.py +++ b/scripts/external_libs/lockfile-0.10.2/lockfile/pidlockfile.py diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/sqlitelockfile.py b/scripts/external_libs/lockfile-0.10.2/lockfile/sqlitelockfile.py index 7dee4a85..7dee4a85 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/sqlitelockfile.py +++ b/scripts/external_libs/lockfile-0.10.2/lockfile/sqlitelockfile.py diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/symlinklockfile.py b/scripts/external_libs/lockfile-0.10.2/lockfile/symlinklockfile.py index 57551a36..57551a36 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/lockfile/symlinklockfile.py +++ b/scripts/external_libs/lockfile-0.10.2/lockfile/symlinklockfile.py diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.cfg b/scripts/external_libs/lockfile-0.10.2/setup.cfg index c1fb3984..c1fb3984 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.cfg +++ b/scripts/external_libs/lockfile-0.10.2/setup.cfg diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.py b/scripts/external_libs/lockfile-0.10.2/setup.py index 73637574..73637574 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/setup.py +++ b/scripts/external_libs/lockfile-0.10.2/setup.py diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test-requirements.txt b/scripts/external_libs/lockfile-0.10.2/test-requirements.txt index 2e087ff1..2e087ff1 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test-requirements.txt +++ b/scripts/external_libs/lockfile-0.10.2/test-requirements.txt diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/compliancetest.py b/scripts/external_libs/lockfile-0.10.2/test/compliancetest.py index e0258b11..e0258b11 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/compliancetest.py +++ b/scripts/external_libs/lockfile-0.10.2/test/compliancetest.py diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/test_lockfile.py b/scripts/external_libs/lockfile-0.10.2/test/test_lockfile.py index e1f4f72f..e1f4f72f 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/test/test_lockfile.py +++ b/scripts/external_libs/lockfile-0.10.2/test/test_lockfile.py diff --git a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/tox.ini b/scripts/external_libs/lockfile-0.10.2/tox.ini index b0a868a3..b0a868a3 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/lockfile-0.10.2/tox.ini +++ b/scripts/external_libs/lockfile-0.10.2/tox.ini diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/ChangeLog b/scripts/external_libs/python-daemon-2.0.5/ChangeLog index 4975f781..4975f781 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/ChangeLog +++ b/scripts/external_libs/python-daemon-2.0.5/ChangeLog diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.ASF-2 b/scripts/external_libs/python-daemon-2.0.5/LICENSE.ASF-2 index d6456956..d6456956 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.ASF-2 +++ b/scripts/external_libs/python-daemon-2.0.5/LICENSE.ASF-2 diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.GPL-3 b/scripts/external_libs/python-daemon-2.0.5/LICENSE.GPL-3 index 94a9ed02..94a9ed02 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/LICENSE.GPL-3 +++ b/scripts/external_libs/python-daemon-2.0.5/LICENSE.GPL-3 diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/MANIFEST.in b/scripts/external_libs/python-daemon-2.0.5/MANIFEST.in index d3d4341e..d3d4341e 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/MANIFEST.in +++ b/scripts/external_libs/python-daemon-2.0.5/MANIFEST.in diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/PKG-INFO b/scripts/external_libs/python-daemon-2.0.5/PKG-INFO index fd81f509..fd81f509 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/PKG-INFO +++ b/scripts/external_libs/python-daemon-2.0.5/PKG-INFO diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/__init__.py b/scripts/external_libs/python-daemon-2.0.5/daemon/__init__.py index 4731a6ef..4731a6ef 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/__init__.py +++ b/scripts/external_libs/python-daemon-2.0.5/daemon/__init__.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/_metadata.py b/scripts/external_libs/python-daemon-2.0.5/daemon/_metadata.py index 6d22a2b7..6d22a2b7 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/_metadata.py +++ b/scripts/external_libs/python-daemon-2.0.5/daemon/_metadata.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/daemon.py b/scripts/external_libs/python-daemon-2.0.5/daemon/daemon.py index 07810cf1..07810cf1 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/daemon.py +++ b/scripts/external_libs/python-daemon-2.0.5/daemon/daemon.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/pidfile.py b/scripts/external_libs/python-daemon-2.0.5/daemon/pidfile.py index 4517ee0e..4517ee0e 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/pidfile.py +++ b/scripts/external_libs/python-daemon-2.0.5/daemon/pidfile.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/runner.py b/scripts/external_libs/python-daemon-2.0.5/daemon/runner.py index 6973cf1c..6973cf1c 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/daemon/runner.py +++ b/scripts/external_libs/python-daemon-2.0.5/daemon/runner.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/CREDITS b/scripts/external_libs/python-daemon-2.0.5/doc/CREDITS index feb65d5e..feb65d5e 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/CREDITS +++ b/scripts/external_libs/python-daemon-2.0.5/doc/CREDITS diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/FAQ b/scripts/external_libs/python-daemon-2.0.5/doc/FAQ index 1fcc4658..1fcc4658 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/FAQ +++ b/scripts/external_libs/python-daemon-2.0.5/doc/FAQ diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/TODO b/scripts/external_libs/python-daemon-2.0.5/doc/TODO index 81b41481..81b41481 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/TODO +++ b/scripts/external_libs/python-daemon-2.0.5/doc/TODO diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/hacking.txt b/scripts/external_libs/python-daemon-2.0.5/doc/hacking.txt index 9484dbd0..9484dbd0 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/doc/hacking.txt +++ b/scripts/external_libs/python-daemon-2.0.5/doc/hacking.txt diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO index fd81f509..fd81f509 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO +++ b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/PKG-INFO diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt index 6e176719..6e176719 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt +++ b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/SOURCES.txt diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt index 8b137891..8b137891 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe +++ b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/dependency_links.txt diff --git a/scripts/automation/trex_control_plane/python_lib/__init__.py b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe index d3f5a12f..8b137891 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/__init__.py +++ b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/not-zip-safe @@ -1 +1 @@ -
+ diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/requires.txt b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/requires.txt index d1496b02..d1496b02 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/requires.txt +++ b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/requires.txt diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt index 28e3ee0c..28e3ee0c 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt +++ b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/top_level.txt diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/version_info.json b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/version_info.json index bac1b84f..bac1b84f 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/python_daemon.egg-info/version_info.json +++ b/scripts/external_libs/python-daemon-2.0.5/python_daemon.egg-info/version_info.json diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.cfg b/scripts/external_libs/python-daemon-2.0.5/setup.cfg index 9d3d2c02..9d3d2c02 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.cfg +++ b/scripts/external_libs/python-daemon-2.0.5/setup.cfg diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.py b/scripts/external_libs/python-daemon-2.0.5/setup.py index 16a6a6a6..16a6a6a6 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/setup.py +++ b/scripts/external_libs/python-daemon-2.0.5/setup.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/__init__.py b/scripts/external_libs/python-daemon-2.0.5/test/__init__.py index 398519f1..398519f1 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/__init__.py +++ b/scripts/external_libs/python-daemon-2.0.5/test/__init__.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/scaffold.py b/scripts/external_libs/python-daemon-2.0.5/test/scaffold.py index 9a4f1150..9a4f1150 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/scaffold.py +++ b/scripts/external_libs/python-daemon-2.0.5/test/scaffold.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_daemon.py b/scripts/external_libs/python-daemon-2.0.5/test/test_daemon.py index a911858a..a911858a 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_daemon.py +++ b/scripts/external_libs/python-daemon-2.0.5/test/test_daemon.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_metadata.py b/scripts/external_libs/python-daemon-2.0.5/test/test_metadata.py index 692753f4..692753f4 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_metadata.py +++ b/scripts/external_libs/python-daemon-2.0.5/test/test_metadata.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_pidfile.py b/scripts/external_libs/python-daemon-2.0.5/test/test_pidfile.py index 9b636ec8..9b636ec8 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_pidfile.py +++ b/scripts/external_libs/python-daemon-2.0.5/test/test_pidfile.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_runner.py b/scripts/external_libs/python-daemon-2.0.5/test/test_runner.py index 4c0c714a..4c0c714a 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test/test_runner.py +++ b/scripts/external_libs/python-daemon-2.0.5/test/test_runner.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test_version.py b/scripts/external_libs/python-daemon-2.0.5/test_version.py index b52f521d..b52f521d 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/test_version.py +++ b/scripts/external_libs/python-daemon-2.0.5/test_version.py diff --git a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/version.py b/scripts/external_libs/python-daemon-2.0.5/version.py index 7e4c4202..7e4c4202 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/python-daemon-2.0.5/version.py +++ b/scripts/external_libs/python-daemon-2.0.5/version.py diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/MANIFEST.in b/scripts/external_libs/termstyle/MANIFEST.in index 14dafaf3..14dafaf3 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/termstyle/MANIFEST.in +++ b/scripts/external_libs/termstyle/MANIFEST.in diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/Makefile b/scripts/external_libs/termstyle/Makefile index 02151dca..02151dca 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/termstyle/Makefile +++ b/scripts/external_libs/termstyle/Makefile diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/README.rst b/scripts/external_libs/termstyle/README.rst index f3dfa0ab..f3dfa0ab 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/termstyle/README.rst +++ b/scripts/external_libs/termstyle/README.rst diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/VERSION b/scripts/external_libs/termstyle/VERSION index 345f8cc0..345f8cc0 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/termstyle/VERSION +++ b/scripts/external_libs/termstyle/VERSION diff --git a/scripts/external_libs/termstyle/__init__.py b/scripts/external_libs/termstyle/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/scripts/external_libs/termstyle/__init__.py diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/python-termstyle.xml b/scripts/external_libs/termstyle/python-termstyle.xml index b6b08bd7..b6b08bd7 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/termstyle/python-termstyle.xml +++ b/scripts/external_libs/termstyle/python-termstyle.xml diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/setup.py b/scripts/external_libs/termstyle/setup.py index 69b11cbb..69b11cbb 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/termstyle/setup.py +++ b/scripts/external_libs/termstyle/setup.py diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/termstyle.py b/scripts/external_libs/termstyle/termstyle.py index 62a3a920..62a3a920 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/termstyle/termstyle.py +++ b/scripts/external_libs/termstyle/termstyle.py diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/test2.py b/scripts/external_libs/termstyle/test2.py index 2d84c375..2d84c375 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/termstyle/test2.py +++ b/scripts/external_libs/termstyle/test2.py diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/test3.py b/scripts/external_libs/termstyle/test3.py index 861c44f9..861c44f9 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/termstyle/test3.py +++ b/scripts/external_libs/termstyle/test3.py diff --git a/scripts/automation/trex_control_plane/python_lib/termstyle/test_all.sh b/scripts/external_libs/termstyle/test_all.sh index d28545a9..d28545a9 100755..100644 --- a/scripts/automation/trex_control_plane/python_lib/termstyle/test_all.sh +++ b/scripts/external_libs/termstyle/test_all.sh diff --git a/scripts/external_libs/zmq/__init__.py b/scripts/external_libs/zmq/__init__.py new file mode 100644 index 00000000..3408b3ba --- /dev/null +++ b/scripts/external_libs/zmq/__init__.py @@ -0,0 +1,64 @@ +"""Python bindings for 0MQ.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import os +import sys +import glob + +# load bundled libzmq, if there is one: + +here = os.path.dirname(__file__) + +bundled = [] +bundled_sodium = [] +for ext in ('pyd', 'so', 'dll', 'dylib'): + bundled_sodium.extend(glob.glob(os.path.join(here, 'libsodium*.%s*' % ext))) + bundled.extend(glob.glob(os.path.join(here, 'libzmq*.%s*' % ext))) + +if bundled: + import ctypes + if bundled_sodium: + if bundled[0].endswith('.pyd'): + # a Windows Extension + _libsodium = ctypes.cdll.LoadLibrary(bundled_sodium[0]) + else: + _libsodium = ctypes.CDLL(bundled_sodium[0], mode=ctypes.RTLD_GLOBAL) + if bundled[0].endswith('.pyd'): + # a Windows Extension + _libzmq = ctypes.cdll.LoadLibrary(bundled[0]) + else: + _libzmq = ctypes.CDLL(bundled[0], mode=ctypes.RTLD_GLOBAL) + del ctypes +else: + import zipimport + try: + if isinstance(__loader__, zipimport.zipimporter): + # a zipped pyzmq egg + from zmq import libzmq as _libzmq + except (NameError, ImportError): + pass + finally: + del zipimport + +del os, sys, glob, here, bundled, bundled_sodium, ext + +# zmq top-level imports + +from zmq import backend +from zmq.backend import * +from zmq import sugar +from zmq.sugar import * +from zmq import devices + +def get_includes(): + """Return a list of directories to include for linking against pyzmq with cython.""" + from os.path import join, dirname, abspath, pardir + base = dirname(__file__) + parent = abspath(join(base, pardir)) + return [ parent ] + [ join(parent, base, subdir) for subdir in ('utils',) ] + + +__all__ = ['get_includes'] + sugar.__all__ + backend.__all__ + diff --git a/scripts/external_libs/zmq/auth/__init__.py b/scripts/external_libs/zmq/auth/__init__.py new file mode 100644 index 00000000..11d3ad6b --- /dev/null +++ b/scripts/external_libs/zmq/auth/__init__.py @@ -0,0 +1,10 @@ +"""Utilities for ZAP authentication. + +To run authentication in a background thread, see :mod:`zmq.auth.thread`. +For integration with the tornado eventloop, see :mod:`zmq.auth.ioloop`. + +.. versionadded:: 14.1 +""" + +from .base import * +from .certs import * diff --git a/scripts/external_libs/zmq/auth/base.py b/scripts/external_libs/zmq/auth/base.py new file mode 100644 index 00000000..9b4aaed7 --- /dev/null +++ b/scripts/external_libs/zmq/auth/base.py @@ -0,0 +1,272 @@ +"""Base implementation of 0MQ authentication.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import logging + +import zmq +from zmq.utils import z85 +from zmq.utils.strtypes import bytes, unicode, b, u +from zmq.error import _check_version + +from .certs import load_certificates + + +CURVE_ALLOW_ANY = '*' +VERSION = b'1.0' + +class Authenticator(object): + """Implementation of ZAP authentication for zmq connections. + + Note: + - libzmq provides four levels of security: default NULL (which the Authenticator does + not see), and authenticated NULL, PLAIN, and CURVE, which the Authenticator can see. + - until you add policies, all incoming NULL connections are allowed + (classic ZeroMQ behavior), and all PLAIN and CURVE connections are denied. + """ + + def __init__(self, context=None, encoding='utf-8', log=None): + _check_version((4,0), "security") + self.context = context or zmq.Context.instance() + self.encoding = encoding + self.allow_any = False + self.zap_socket = None + self.whitelist = set() + self.blacklist = set() + # passwords is a dict keyed by domain and contains values + # of dicts with username:password pairs. + self.passwords = {} + # certs is dict keyed by domain and contains values + # of dicts keyed by the public keys from the specified location. + self.certs = {} + self.log = log or logging.getLogger('zmq.auth') + + def start(self): + """Create and bind the ZAP socket""" + self.zap_socket = self.context.socket(zmq.REP) + self.zap_socket.linger = 1 + self.zap_socket.bind("inproc://zeromq.zap.01") + + def stop(self): + """Close the ZAP socket""" + if self.zap_socket: + self.zap_socket.close() + self.zap_socket = None + + def allow(self, *addresses): + """Allow (whitelist) IP address(es). + + Connections from addresses not in the whitelist will be rejected. + + - For NULL, all clients from this address will be accepted. + - For PLAIN and CURVE, they will be allowed to continue with authentication. + + whitelist is mutually exclusive with blacklist. + """ + if self.blacklist: + raise ValueError("Only use a whitelist or a blacklist, not both") + self.whitelist.update(addresses) + + def deny(self, *addresses): + """Deny (blacklist) IP address(es). + + Addresses not in the blacklist will be allowed to continue with authentication. + + Blacklist is mutually exclusive with whitelist. + """ + if self.whitelist: + raise ValueError("Only use a whitelist or a blacklist, not both") + self.blacklist.update(addresses) + + def configure_plain(self, domain='*', passwords=None): + """Configure PLAIN authentication for a given domain. + + PLAIN authentication uses a plain-text password file. + To cover all domains, use "*". + You can modify the password file at any time; it is reloaded automatically. + """ + if passwords: + self.passwords[domain] = passwords + + def configure_curve(self, domain='*', location=None): + """Configure CURVE authentication for a given domain. + + CURVE authentication uses a directory that holds all public client certificates, + i.e. their public keys. + + To cover all domains, use "*". + + You can add and remove certificates in that directory at any time. + + To allow all client keys without checking, specify CURVE_ALLOW_ANY for the location. + """ + # If location is CURVE_ALLOW_ANY then allow all clients. Otherwise + # treat location as a directory that holds the certificates. + if location == CURVE_ALLOW_ANY: + self.allow_any = True + else: + self.allow_any = False + try: + self.certs[domain] = load_certificates(location) + except Exception as e: + self.log.error("Failed to load CURVE certs from %s: %s", location, e) + + def handle_zap_message(self, msg): + """Perform ZAP authentication""" + if len(msg) < 6: + self.log.error("Invalid ZAP message, not enough frames: %r", msg) + if len(msg) < 2: + self.log.error("Not enough information to reply") + else: + self._send_zap_reply(msg[1], b"400", b"Not enough frames") + return + + version, request_id, domain, address, identity, mechanism = msg[:6] + credentials = msg[6:] + + domain = u(domain, self.encoding, 'replace') + address = u(address, self.encoding, 'replace') + + if (version != VERSION): + self.log.error("Invalid ZAP version: %r", msg) + self._send_zap_reply(request_id, b"400", b"Invalid version") + return + + self.log.debug("version: %r, request_id: %r, domain: %r," + " address: %r, identity: %r, mechanism: %r", + version, request_id, domain, + address, identity, mechanism, + ) + + + # Is address is explicitly whitelisted or blacklisted? + allowed = False + denied = False + reason = b"NO ACCESS" + + if self.whitelist: + if address in self.whitelist: + allowed = True + self.log.debug("PASSED (whitelist) address=%s", address) + else: + denied = True + reason = b"Address not in whitelist" + self.log.debug("DENIED (not in whitelist) address=%s", address) + + elif self.blacklist: + if address in self.blacklist: + denied = True + reason = b"Address is blacklisted" + self.log.debug("DENIED (blacklist) address=%s", address) + else: + allowed = True + self.log.debug("PASSED (not in blacklist) address=%s", address) + + # Perform authentication mechanism-specific checks if necessary + username = u("user") + if not denied: + + if mechanism == b'NULL' and not allowed: + # For NULL, we allow if the address wasn't blacklisted + self.log.debug("ALLOWED (NULL)") + allowed = True + + elif mechanism == b'PLAIN': + # For PLAIN, even a whitelisted address must authenticate + if len(credentials) != 2: + self.log.error("Invalid PLAIN credentials: %r", credentials) + self._send_zap_reply(request_id, b"400", b"Invalid credentials") + return + username, password = [ u(c, self.encoding, 'replace') for c in credentials ] + allowed, reason = self._authenticate_plain(domain, username, password) + + elif mechanism == b'CURVE': + # For CURVE, even a whitelisted address must authenticate + if len(credentials) != 1: + self.log.error("Invalid CURVE credentials: %r", credentials) + self._send_zap_reply(request_id, b"400", b"Invalid credentials") + return + key = credentials[0] + allowed, reason = self._authenticate_curve(domain, key) + + if allowed: + self._send_zap_reply(request_id, b"200", b"OK", username) + else: + self._send_zap_reply(request_id, b"400", reason) + + def _authenticate_plain(self, domain, username, password): + """PLAIN ZAP authentication""" + allowed = False + reason = b"" + if self.passwords: + # If no domain is not specified then use the default domain + if not domain: + domain = '*' + + if domain in self.passwords: + if username in self.passwords[domain]: + if password == self.passwords[domain][username]: + allowed = True + else: + reason = b"Invalid password" + else: + reason = b"Invalid username" + else: + reason = b"Invalid domain" + + if allowed: + self.log.debug("ALLOWED (PLAIN) domain=%s username=%s password=%s", + domain, username, password, + ) + else: + self.log.debug("DENIED %s", reason) + + else: + reason = b"No passwords defined" + self.log.debug("DENIED (PLAIN) %s", reason) + + return allowed, reason + + def _authenticate_curve(self, domain, client_key): + """CURVE ZAP authentication""" + allowed = False + reason = b"" + if self.allow_any: + allowed = True + reason = b"OK" + self.log.debug("ALLOWED (CURVE allow any client)") + else: + # If no explicit domain is specified then use the default domain + if not domain: + domain = '*' + + if domain in self.certs: + # The certs dict stores keys in z85 format, convert binary key to z85 bytes + z85_client_key = z85.encode(client_key) + if z85_client_key in self.certs[domain] or self.certs[domain] == b'OK': + allowed = True + reason = b"OK" + else: + reason = b"Unknown key" + + status = "ALLOWED" if allowed else "DENIED" + self.log.debug("%s (CURVE) domain=%s client_key=%s", + status, domain, z85_client_key, + ) + else: + reason = b"Unknown domain" + + return allowed, reason + + def _send_zap_reply(self, request_id, status_code, status_text, user_id='user'): + """Send a ZAP reply to finish the authentication.""" + user_id = user_id if status_code == b'200' else b'' + if isinstance(user_id, unicode): + user_id = user_id.encode(self.encoding, 'replace') + metadata = b'' # not currently used + self.log.debug("ZAP reply code=%s text=%s", status_code, status_text) + reply = [VERSION, request_id, status_code, status_text, user_id, metadata] + self.zap_socket.send_multipart(reply) + +__all__ = ['Authenticator', 'CURVE_ALLOW_ANY'] diff --git a/scripts/external_libs/zmq/auth/certs.py b/scripts/external_libs/zmq/auth/certs.py new file mode 100644 index 00000000..4d26ad7b --- /dev/null +++ b/scripts/external_libs/zmq/auth/certs.py @@ -0,0 +1,119 @@ +"""0MQ authentication related functions and classes.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import datetime +import glob +import io +import os +import zmq +from zmq.utils.strtypes import bytes, unicode, b, u + + +_cert_secret_banner = u("""# **** Generated on {0} by pyzmq **** +# ZeroMQ CURVE **Secret** Certificate +# DO NOT PROVIDE THIS FILE TO OTHER USERS nor change its permissions. + +""") + +_cert_public_banner = u("""# **** Generated on {0} by pyzmq **** +# ZeroMQ CURVE Public Certificate +# Exchange securely, or use a secure mechanism to verify the contents +# of this file after exchange. Store public certificates in your home +# directory, in the .curve subdirectory. + +""") + +def _write_key_file(key_filename, banner, public_key, secret_key=None, metadata=None, encoding='utf-8'): + """Create a certificate file""" + if isinstance(public_key, bytes): + public_key = public_key.decode(encoding) + if isinstance(secret_key, bytes): + secret_key = secret_key.decode(encoding) + with io.open(key_filename, 'w', encoding='utf8') as f: + f.write(banner.format(datetime.datetime.now())) + + f.write(u('metadata\n')) + if metadata: + for k, v in metadata.items(): + if isinstance(v, bytes): + v = v.decode(encoding) + f.write(u(" {0} = {1}\n").format(k, v)) + + f.write(u('curve\n')) + f.write(u(" public-key = \"{0}\"\n").format(public_key)) + + if secret_key: + f.write(u(" secret-key = \"{0}\"\n").format(secret_key)) + + +def create_certificates(key_dir, name, metadata=None): + """Create zmq certificates. + + Returns the file paths to the public and secret certificate files. + """ + public_key, secret_key = zmq.curve_keypair() + base_filename = os.path.join(key_dir, name) + secret_key_file = "{0}.key_secret".format(base_filename) + public_key_file = "{0}.key".format(base_filename) + now = datetime.datetime.now() + + _write_key_file(public_key_file, + _cert_public_banner.format(now), + public_key) + + _write_key_file(secret_key_file, + _cert_secret_banner.format(now), + public_key, + secret_key=secret_key, + metadata=metadata) + + return public_key_file, secret_key_file + + +def load_certificate(filename): + """Load public and secret key from a zmq certificate. + + Returns (public_key, secret_key) + + If the certificate file only contains the public key, + secret_key will be None. + """ + public_key = None + secret_key = None + if not os.path.exists(filename): + raise IOError("Invalid certificate file: {0}".format(filename)) + + with open(filename, 'rb') as f: + for line in f: + line = line.strip() + if line.startswith(b'#'): + continue + if line.startswith(b'public-key'): + public_key = line.split(b"=", 1)[1].strip(b' \t\'"') + if line.startswith(b'secret-key'): + secret_key = line.split(b"=", 1)[1].strip(b' \t\'"') + if public_key and secret_key: + break + + return public_key, secret_key + + +def load_certificates(directory='.'): + """Load public keys from all certificates in a directory""" + certs = {} + if not os.path.isdir(directory): + raise IOError("Invalid certificate directory: {0}".format(directory)) + # Follow czmq pattern of public keys stored in *.key files. + glob_string = os.path.join(directory, "*.key") + + cert_files = glob.glob(glob_string) + for cert_file in cert_files: + public_key, _ = load_certificate(cert_file) + if public_key: + certs[public_key] = 'OK' + return certs + +__all__ = ['create_certificates', 'load_certificate', 'load_certificates'] diff --git a/scripts/external_libs/zmq/auth/ioloop.py b/scripts/external_libs/zmq/auth/ioloop.py new file mode 100644 index 00000000..1f448b47 --- /dev/null +++ b/scripts/external_libs/zmq/auth/ioloop.py @@ -0,0 +1,34 @@ +"""ZAP Authenticator integrated with the tornado IOLoop. + +.. versionadded:: 14.1 +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from zmq.eventloop import ioloop, zmqstream +from .base import Authenticator + + +class IOLoopAuthenticator(Authenticator): + """ZAP authentication for use in the tornado IOLoop""" + + def __init__(self, context=None, encoding='utf-8', log=None, io_loop=None): + super(IOLoopAuthenticator, self).__init__(context) + self.zap_stream = None + self.io_loop = io_loop or ioloop.IOLoop.instance() + + def start(self): + """Start ZAP authentication""" + super(IOLoopAuthenticator, self).start() + self.zap_stream = zmqstream.ZMQStream(self.zap_socket, self.io_loop) + self.zap_stream.on_recv(self.handle_zap_message) + + def stop(self): + """Stop ZAP authentication""" + if self.zap_stream: + self.zap_stream.close() + self.zap_stream = None + super(IOLoopAuthenticator, self).stop() + +__all__ = ['IOLoopAuthenticator'] diff --git a/scripts/external_libs/zmq/auth/thread.py b/scripts/external_libs/zmq/auth/thread.py new file mode 100644 index 00000000..8c3355a9 --- /dev/null +++ b/scripts/external_libs/zmq/auth/thread.py @@ -0,0 +1,184 @@ +"""ZAP Authenticator in a Python Thread. + +.. versionadded:: 14.1 +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import logging +from threading import Thread + +import zmq +from zmq.utils import jsonapi +from zmq.utils.strtypes import bytes, unicode, b, u + +from .base import Authenticator + +class AuthenticationThread(Thread): + """A Thread for running a zmq Authenticator + + This is run in the background by ThreadedAuthenticator + """ + + def __init__(self, context, endpoint, encoding='utf-8', log=None): + super(AuthenticationThread, self).__init__() + self.context = context or zmq.Context.instance() + self.encoding = encoding + self.log = log = log or logging.getLogger('zmq.auth') + self.authenticator = Authenticator(context, encoding=encoding, log=log) + + # create a socket to communicate back to main thread. + self.pipe = context.socket(zmq.PAIR) + self.pipe.linger = 1 + self.pipe.connect(endpoint) + + def run(self): + """ Start the Authentication Agent thread task """ + self.authenticator.start() + zap = self.authenticator.zap_socket + poller = zmq.Poller() + poller.register(self.pipe, zmq.POLLIN) + poller.register(zap, zmq.POLLIN) + while True: + try: + socks = dict(poller.poll()) + except zmq.ZMQError: + break # interrupted + + if self.pipe in socks and socks[self.pipe] == zmq.POLLIN: + terminate = self._handle_pipe() + if terminate: + break + + if zap in socks and socks[zap] == zmq.POLLIN: + self._handle_zap() + + self.pipe.close() + self.authenticator.stop() + + def _handle_zap(self): + """ + Handle a message from the ZAP socket. + """ + msg = self.authenticator.zap_socket.recv_multipart() + if not msg: return + self.authenticator.handle_zap_message(msg) + + def _handle_pipe(self): + """ + Handle a message from front-end API. + """ + terminate = False + + # Get the whole message off the pipe in one go + msg = self.pipe.recv_multipart() + + if msg is None: + terminate = True + return terminate + + command = msg[0] + self.log.debug("auth received API command %r", command) + + if command == b'ALLOW': + addresses = [u(m, self.encoding) for m in msg[1:]] + try: + self.authenticator.allow(*addresses) + except Exception as e: + self.log.exception("Failed to allow %s", addresses) + + elif command == b'DENY': + addresses = [u(m, self.encoding) for m in msg[1:]] + try: + self.authenticator.deny(*addresses) + except Exception as e: + self.log.exception("Failed to deny %s", addresses) + + elif command == b'PLAIN': + domain = u(msg[1], self.encoding) + json_passwords = msg[2] + self.authenticator.configure_plain(domain, jsonapi.loads(json_passwords)) + + elif command == b'CURVE': + # For now we don't do anything with domains + domain = u(msg[1], self.encoding) + + # If location is CURVE_ALLOW_ANY, allow all clients. Otherwise + # treat location as a directory that holds the certificates. + location = u(msg[2], self.encoding) + self.authenticator.configure_curve(domain, location) + + elif command == b'TERMINATE': + terminate = True + + else: + self.log.error("Invalid auth command from API: %r", command) + + return terminate + +def _inherit_docstrings(cls): + """inherit docstrings from Authenticator, so we don't duplicate them""" + for name, method in cls.__dict__.items(): + if name.startswith('_'): + continue + upstream_method = getattr(Authenticator, name, None) + if not method.__doc__: + method.__doc__ = upstream_method.__doc__ + return cls + +@_inherit_docstrings +class ThreadAuthenticator(object): + """Run ZAP authentication in a background thread""" + + def __init__(self, context=None, encoding='utf-8', log=None): + self.context = context or zmq.Context.instance() + self.log = log + self.encoding = encoding + self.pipe = None + self.pipe_endpoint = "inproc://{0}.inproc".format(id(self)) + self.thread = None + + def allow(self, *addresses): + self.pipe.send_multipart([b'ALLOW'] + [b(a, self.encoding) for a in addresses]) + + def deny(self, *addresses): + self.pipe.send_multipart([b'DENY'] + [b(a, self.encoding) for a in addresses]) + + def configure_plain(self, domain='*', passwords=None): + self.pipe.send_multipart([b'PLAIN', b(domain, self.encoding), jsonapi.dumps(passwords or {})]) + + def configure_curve(self, domain='*', location=''): + domain = b(domain, self.encoding) + location = b(location, self.encoding) + self.pipe.send_multipart([b'CURVE', domain, location]) + + def start(self): + """Start the authentication thread""" + # create a socket to communicate with auth thread. + self.pipe = self.context.socket(zmq.PAIR) + self.pipe.linger = 1 + self.pipe.bind(self.pipe_endpoint) + self.thread = AuthenticationThread(self.context, self.pipe_endpoint, encoding=self.encoding, log=self.log) + self.thread.start() + + def stop(self): + """Stop the authentication thread""" + if self.pipe: + self.pipe.send(b'TERMINATE') + if self.is_alive(): + self.thread.join() + self.thread = None + self.pipe.close() + self.pipe = None + + def is_alive(self): + """Is the ZAP thread currently running?""" + if self.thread and self.thread.is_alive(): + return True + return False + + def __del__(self): + self.stop() + +__all__ = ['ThreadAuthenticator'] diff --git a/scripts/external_libs/zmq/backend/__init__.py b/scripts/external_libs/zmq/backend/__init__.py new file mode 100644 index 00000000..7cac725c --- /dev/null +++ b/scripts/external_libs/zmq/backend/__init__.py @@ -0,0 +1,45 @@ +"""Import basic exposure of libzmq C API as a backend""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import os +import platform +import sys + +from zmq.utils.sixcerpt import reraise + +from .select import public_api, select_backend + +if 'PYZMQ_BACKEND' in os.environ: + backend = os.environ['PYZMQ_BACKEND'] + if backend in ('cython', 'cffi'): + backend = 'zmq.backend.%s' % backend + _ns = select_backend(backend) +else: + # default to cython, fallback to cffi + # (reverse on PyPy) + if platform.python_implementation() == 'PyPy': + first, second = ('zmq.backend.cffi', 'zmq.backend.cython') + else: + first, second = ('zmq.backend.cython', 'zmq.backend.cffi') + + try: + _ns = select_backend(first) + except Exception: + exc_info = sys.exc_info() + exc = exc_info[1] + try: + _ns = select_backend(second) + except ImportError: + # prevent 'During handling of the above exception...' on py3 + # can't use `raise ... from` on Python 2 + if hasattr(exc, '__cause__'): + exc.__cause__ = None + # raise the *first* error, not the fallback + reraise(*exc_info) + +globals().update(_ns) + +__all__ = public_api diff --git a/scripts/external_libs/zmq/backend/cffi/__init__.py b/scripts/external_libs/zmq/backend/cffi/__init__.py new file mode 100644 index 00000000..ca3164d3 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/__init__.py @@ -0,0 +1,22 @@ +"""CFFI backend (for PyPY)""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from zmq.backend.cffi import (constants, error, message, context, socket, + _poll, devices, utils) + +__all__ = [] +for submod in (constants, error, message, context, socket, + _poll, devices, utils): + __all__.extend(submod.__all__) + +from .constants import * +from .error import * +from .message import * +from .context import * +from .socket import * +from .devices import * +from ._poll import * +from ._cffi import zmq_version_info, ffi +from .utils import * diff --git a/scripts/external_libs/zmq/backend/cffi/_cdefs.h b/scripts/external_libs/zmq/backend/cffi/_cdefs.h new file mode 100644 index 00000000..d3300575 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/_cdefs.h @@ -0,0 +1,68 @@ +void zmq_version(int *major, int *minor, int *patch); + +void* zmq_socket(void *context, int type); +int zmq_close(void *socket); + +int zmq_bind(void *socket, const char *endpoint); +int zmq_connect(void *socket, const char *endpoint); + +int zmq_errno(void); +const char * zmq_strerror(int errnum); + +void* zmq_stopwatch_start(void); +unsigned long zmq_stopwatch_stop(void *watch); +void zmq_sleep(int seconds_); +int zmq_device(int device, void *frontend, void *backend); + +int zmq_unbind(void *socket, const char *endpoint); +int zmq_disconnect(void *socket, const char *endpoint); +void* zmq_ctx_new(); +int zmq_ctx_destroy(void *context); +int zmq_ctx_get(void *context, int opt); +int zmq_ctx_set(void *context, int opt, int optval); +int zmq_proxy(void *frontend, void *backend, void *capture); +int zmq_socket_monitor(void *socket, const char *addr, int events); + +int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key); +int zmq_has (const char *capability); + +typedef struct { ...; } zmq_msg_t; +typedef ... zmq_free_fn; + +int zmq_msg_init(zmq_msg_t *msg); +int zmq_msg_init_size(zmq_msg_t *msg, size_t size); +int zmq_msg_init_data(zmq_msg_t *msg, + void *data, + size_t size, + zmq_free_fn *ffn, + void *hint); + +size_t zmq_msg_size(zmq_msg_t *msg); +void *zmq_msg_data(zmq_msg_t *msg); +int zmq_msg_close(zmq_msg_t *msg); + +int zmq_msg_send(zmq_msg_t *msg, void *socket, int flags); +int zmq_msg_recv(zmq_msg_t *msg, void *socket, int flags); + +int zmq_getsockopt(void *socket, + int option_name, + void *option_value, + size_t *option_len); + +int zmq_setsockopt(void *socket, + int option_name, + const void *option_value, + size_t option_len); +typedef struct +{ + void *socket; + int fd; + short events; + short revents; +} zmq_pollitem_t; + +int zmq_poll(zmq_pollitem_t *items, int nitems, long timeout); + +// miscellany +void * memcpy(void *restrict s1, const void *restrict s2, size_t n); +int get_ipc_path_max_len(void); diff --git a/scripts/external_libs/zmq/backend/cffi/_cffi.py b/scripts/external_libs/zmq/backend/cffi/_cffi.py new file mode 100644 index 00000000..c73ebf83 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/_cffi.py @@ -0,0 +1,127 @@ +# coding: utf-8 +"""The main CFFI wrapping of libzmq""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import json +import os +from os.path import dirname, join +from cffi import FFI + +from zmq.utils.constant_names import all_names, no_prefix + + +base_zmq_version = (3,2,2) + +def load_compiler_config(): + """load pyzmq compiler arguments""" + import zmq + zmq_dir = dirname(zmq.__file__) + zmq_parent = dirname(zmq_dir) + + fname = join(zmq_dir, 'utils', 'compiler.json') + if os.path.exists(fname): + with open(fname) as f: + cfg = json.load(f) + else: + cfg = {} + + cfg.setdefault("include_dirs", []) + cfg.setdefault("library_dirs", []) + cfg.setdefault("runtime_library_dirs", []) + cfg.setdefault("libraries", ["zmq"]) + + # cast to str, because cffi can't handle unicode paths (?!) + cfg['libraries'] = [str(lib) for lib in cfg['libraries']] + for key in ("include_dirs", "library_dirs", "runtime_library_dirs"): + # interpret paths relative to parent of zmq (like source tree) + abs_paths = [] + for p in cfg[key]: + if p.startswith('zmq'): + p = join(zmq_parent, p) + abs_paths.append(str(p)) + cfg[key] = abs_paths + return cfg + + +def zmq_version_info(): + """Get libzmq version as tuple of ints""" + major = ffi.new('int*') + minor = ffi.new('int*') + patch = ffi.new('int*') + + C.zmq_version(major, minor, patch) + + return (int(major[0]), int(minor[0]), int(patch[0])) + + +cfg = load_compiler_config() +ffi = FFI() + +def _make_defines(names): + _names = [] + for name in names: + define_line = "#define %s ..." % (name) + _names.append(define_line) + + return "\n".join(_names) + +c_constant_names = [] +for name in all_names: + if no_prefix(name): + c_constant_names.append(name) + else: + c_constant_names.append("ZMQ_" + name) + +# load ffi definitions +here = os.path.dirname(__file__) +with open(os.path.join(here, '_cdefs.h')) as f: + _cdefs = f.read() + +with open(os.path.join(here, '_verify.c')) as f: + _verify = f.read() + +ffi.cdef(_cdefs) +ffi.cdef(_make_defines(c_constant_names)) + +try: + C = ffi.verify(_verify, + modulename='_cffi_ext', + libraries=cfg['libraries'], + include_dirs=cfg['include_dirs'], + library_dirs=cfg['library_dirs'], + runtime_library_dirs=cfg['runtime_library_dirs'], + ) + _version_info = zmq_version_info() +except Exception as e: + raise ImportError("PyZMQ CFFI backend couldn't find zeromq: %s\n" + "Please check that you have zeromq headers and libraries." % e) + +if _version_info < (3,2,2): + raise ImportError("PyZMQ CFFI backend requires zeromq >= 3.2.2," + " but found %i.%i.%i" % _version_info + ) + +nsp = new_sizet_pointer = lambda length: ffi.new('size_t*', length) + +new_uint64_pointer = lambda: (ffi.new('uint64_t*'), + nsp(ffi.sizeof('uint64_t'))) +new_int64_pointer = lambda: (ffi.new('int64_t*'), + nsp(ffi.sizeof('int64_t'))) +new_int_pointer = lambda: (ffi.new('int*'), + nsp(ffi.sizeof('int'))) +new_binary_data = lambda length: (ffi.new('char[%d]' % (length)), + nsp(ffi.sizeof('char') * length)) + +value_uint64_pointer = lambda val : (ffi.new('uint64_t*', val), + ffi.sizeof('uint64_t')) +value_int64_pointer = lambda val: (ffi.new('int64_t*', val), + ffi.sizeof('int64_t')) +value_int_pointer = lambda val: (ffi.new('int*', val), + ffi.sizeof('int')) +value_binary_data = lambda val, length: (ffi.new('char[%d]' % (length + 1), val), + ffi.sizeof('char') * length) + +IPC_PATH_MAX_LEN = C.get_ipc_path_max_len() diff --git a/scripts/external_libs/zmq/backend/cffi/_poll.py b/scripts/external_libs/zmq/backend/cffi/_poll.py new file mode 100644 index 00000000..9bca34ca --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/_poll.py @@ -0,0 +1,56 @@ +# coding: utf-8 +"""zmq poll function""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from ._cffi import C, ffi, zmq_version_info + +from .constants import * + +from zmq.error import _check_rc + + +def _make_zmq_pollitem(socket, flags): + zmq_socket = socket._zmq_socket + zmq_pollitem = ffi.new('zmq_pollitem_t*') + zmq_pollitem.socket = zmq_socket + zmq_pollitem.fd = 0 + zmq_pollitem.events = flags + zmq_pollitem.revents = 0 + return zmq_pollitem[0] + +def _make_zmq_pollitem_fromfd(socket_fd, flags): + zmq_pollitem = ffi.new('zmq_pollitem_t*') + zmq_pollitem.socket = ffi.NULL + zmq_pollitem.fd = socket_fd + zmq_pollitem.events = flags + zmq_pollitem.revents = 0 + return zmq_pollitem[0] + +def zmq_poll(sockets, timeout): + cffi_pollitem_list = [] + low_level_to_socket_obj = {} + for item in sockets: + if isinstance(item[0], int): + low_level_to_socket_obj[item[0]] = item + cffi_pollitem_list.append(_make_zmq_pollitem_fromfd(item[0], item[1])) + else: + low_level_to_socket_obj[item[0]._zmq_socket] = item + cffi_pollitem_list.append(_make_zmq_pollitem(item[0], item[1])) + items = ffi.new('zmq_pollitem_t[]', cffi_pollitem_list) + list_length = ffi.cast('int', len(cffi_pollitem_list)) + c_timeout = ffi.cast('long', timeout) + rc = C.zmq_poll(items, list_length, c_timeout) + _check_rc(rc) + result = [] + for index in range(len(items)): + if not items[index].socket == ffi.NULL: + if items[index].revents > 0: + result.append((low_level_to_socket_obj[items[index].socket][0], + items[index].revents)) + else: + result.append((items[index].fd, items[index].revents)) + return result + +__all__ = ['zmq_poll'] diff --git a/scripts/external_libs/zmq/backend/cffi/_verify.c b/scripts/external_libs/zmq/backend/cffi/_verify.c new file mode 100644 index 00000000..547840eb --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/_verify.c @@ -0,0 +1,12 @@ +#include <stdio.h> +#include <sys/un.h> +#include <string.h> + +#include <zmq.h> +#include <zmq_utils.h> +#include "zmq_compat.h" + +int get_ipc_path_max_len(void) { + struct sockaddr_un *dummy; + return sizeof(dummy->sun_path) - 1; +} diff --git a/scripts/external_libs/zmq/backend/cffi/constants.py b/scripts/external_libs/zmq/backend/cffi/constants.py new file mode 100644 index 00000000..ee293e74 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/constants.py @@ -0,0 +1,15 @@ +# coding: utf-8 +"""zmq constants""" + +from ._cffi import C, c_constant_names +from zmq.utils.constant_names import all_names + +g = globals() +for cname in c_constant_names: + if cname.startswith("ZMQ_"): + name = cname[4:] + else: + name = cname + g[name] = getattr(C, cname) + +__all__ = all_names diff --git a/scripts/external_libs/zmq/backend/cffi/context.py b/scripts/external_libs/zmq/backend/cffi/context.py new file mode 100644 index 00000000..16a7b257 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/context.py @@ -0,0 +1,100 @@ +# coding: utf-8 +"""zmq Context class""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import weakref + +from ._cffi import C, ffi + +from .socket import * +from .constants import * + +from zmq.error import ZMQError, _check_rc + +class Context(object): + _zmq_ctx = None + _iothreads = None + _closed = None + _sockets = None + _shadow = False + + def __init__(self, io_threads=1, shadow=None): + + if shadow: + self._zmq_ctx = ffi.cast("void *", shadow) + self._shadow = True + else: + self._shadow = False + if not io_threads >= 0: + raise ZMQError(EINVAL) + + self._zmq_ctx = C.zmq_ctx_new() + if self._zmq_ctx == ffi.NULL: + raise ZMQError(C.zmq_errno()) + if not shadow: + C.zmq_ctx_set(self._zmq_ctx, IO_THREADS, io_threads) + self._closed = False + self._sockets = set() + + @property + def underlying(self): + """The address of the underlying libzmq context""" + return int(ffi.cast('size_t', self._zmq_ctx)) + + @property + def closed(self): + return self._closed + + def _add_socket(self, socket): + ref = weakref.ref(socket) + self._sockets.add(ref) + return ref + + def _rm_socket(self, ref): + if ref in self._sockets: + self._sockets.remove(ref) + + def set(self, option, value): + """set a context option + + see zmq_ctx_set + """ + rc = C.zmq_ctx_set(self._zmq_ctx, option, value) + _check_rc(rc) + + def get(self, option): + """get context option + + see zmq_ctx_get + """ + rc = C.zmq_ctx_get(self._zmq_ctx, option) + _check_rc(rc) + return rc + + def term(self): + if self.closed: + return + + C.zmq_ctx_destroy(self._zmq_ctx) + + self._zmq_ctx = None + self._closed = True + + def destroy(self, linger=None): + if self.closed: + return + + sockets = self._sockets + self._sockets = set() + for s in sockets: + s = s() + if s and not s.closed: + if linger: + s.setsockopt(LINGER, linger) + s.close() + + self.term() + +__all__ = ['Context'] diff --git a/scripts/external_libs/zmq/backend/cffi/devices.py b/scripts/external_libs/zmq/backend/cffi/devices.py new file mode 100644 index 00000000..c7a514a8 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/devices.py @@ -0,0 +1,24 @@ +# coding: utf-8 +"""zmq device functions""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from ._cffi import C, ffi, zmq_version_info +from .socket import Socket +from zmq.error import ZMQError, _check_rc + +def device(device_type, frontend, backend): + rc = C.zmq_proxy(frontend._zmq_socket, backend._zmq_socket, ffi.NULL) + _check_rc(rc) + +def proxy(frontend, backend, capture=None): + if isinstance(capture, Socket): + capture = capture._zmq_socket + else: + capture = ffi.NULL + + rc = C.zmq_proxy(frontend._zmq_socket, backend._zmq_socket, capture) + _check_rc(rc) + +__all__ = ['device', 'proxy'] diff --git a/scripts/external_libs/zmq/backend/cffi/error.py b/scripts/external_libs/zmq/backend/cffi/error.py new file mode 100644 index 00000000..3bb64de0 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/error.py @@ -0,0 +1,13 @@ +"""zmq error functions""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from ._cffi import C, ffi + +def strerror(errno): + return ffi.string(C.zmq_strerror(errno)) + +zmq_errno = C.zmq_errno + +__all__ = ['strerror', 'zmq_errno'] diff --git a/scripts/external_libs/zmq/backend/cffi/message.py b/scripts/external_libs/zmq/backend/cffi/message.py new file mode 100644 index 00000000..c35decb6 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/message.py @@ -0,0 +1,69 @@ +"""Dummy Frame object""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from ._cffi import ffi, C + +import zmq +from zmq.utils.strtypes import unicode + +try: + view = memoryview +except NameError: + view = buffer + +_content = lambda x: x.tobytes() if type(x) == memoryview else x + +class Frame(object): + _data = None + tracker = None + closed = False + more = False + buffer = None + + + def __init__(self, data, track=False): + try: + view(data) + except TypeError: + raise + + self._data = data + + if isinstance(data, unicode): + raise TypeError("Unicode objects not allowed. Only: str/bytes, " + + "buffer interfaces.") + + self.more = False + self.tracker = None + self.closed = False + if track: + self.tracker = zmq.MessageTracker() + + self.buffer = view(self.bytes) + + @property + def bytes(self): + data = _content(self._data) + return data + + def __len__(self): + return len(self.bytes) + + def __eq__(self, other): + return self.bytes == _content(other) + + def __str__(self): + if str is unicode: + return self.bytes.decode() + else: + return self.bytes + + @property + def done(self): + return True + +Message = Frame + +__all__ = ['Frame', 'Message'] diff --git a/scripts/external_libs/zmq/backend/cffi/socket.py b/scripts/external_libs/zmq/backend/cffi/socket.py new file mode 100644 index 00000000..3c427739 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/socket.py @@ -0,0 +1,244 @@ +# coding: utf-8 +"""zmq Socket class""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import random +import codecs + +import errno as errno_mod + +from ._cffi import (C, ffi, new_uint64_pointer, new_int64_pointer, + new_int_pointer, new_binary_data, value_uint64_pointer, + value_int64_pointer, value_int_pointer, value_binary_data, + IPC_PATH_MAX_LEN) + +from .message import Frame +from .constants import * + +import zmq +from zmq.error import ZMQError, _check_rc, _check_version +from zmq.utils.strtypes import unicode + + +def new_pointer_from_opt(option, length=0): + from zmq.sugar.constants import ( + int64_sockopts, bytes_sockopts, + ) + if option in int64_sockopts: + return new_int64_pointer() + elif option in bytes_sockopts: + return new_binary_data(length) + else: + # default + return new_int_pointer() + +def value_from_opt_pointer(option, opt_pointer, length=0): + from zmq.sugar.constants import ( + int64_sockopts, bytes_sockopts, + ) + if option in int64_sockopts: + return int(opt_pointer[0]) + elif option in bytes_sockopts: + return ffi.buffer(opt_pointer, length)[:] + else: + return int(opt_pointer[0]) + +def initialize_opt_pointer(option, value, length=0): + from zmq.sugar.constants import ( + int64_sockopts, bytes_sockopts, + ) + if option in int64_sockopts: + return value_int64_pointer(value) + elif option in bytes_sockopts: + return value_binary_data(value, length) + else: + return value_int_pointer(value) + + +class Socket(object): + context = None + socket_type = None + _zmq_socket = None + _closed = None + _ref = None + _shadow = False + + def __init__(self, context=None, socket_type=None, shadow=None): + self.context = context + if shadow is not None: + self._zmq_socket = ffi.cast("void *", shadow) + self._shadow = True + else: + self._shadow = False + self._zmq_socket = C.zmq_socket(context._zmq_ctx, socket_type) + if self._zmq_socket == ffi.NULL: + raise ZMQError() + self._closed = False + if context: + self._ref = context._add_socket(self) + + @property + def underlying(self): + """The address of the underlying libzmq socket""" + return int(ffi.cast('size_t', self._zmq_socket)) + + @property + def closed(self): + return self._closed + + def close(self, linger=None): + rc = 0 + if not self._closed and hasattr(self, '_zmq_socket'): + if self._zmq_socket is not None: + rc = C.zmq_close(self._zmq_socket) + self._closed = True + if self.context: + self.context._rm_socket(self._ref) + return rc + + def bind(self, address): + if isinstance(address, unicode): + address = address.encode('utf8') + rc = C.zmq_bind(self._zmq_socket, address) + if rc < 0: + if IPC_PATH_MAX_LEN and C.zmq_errno() == errno_mod.ENAMETOOLONG: + # py3compat: address is bytes, but msg wants str + if str is unicode: + address = address.decode('utf-8', 'replace') + path = address.split('://', 1)[-1] + msg = ('ipc path "{0}" is longer than {1} ' + 'characters (sizeof(sockaddr_un.sun_path)).' + .format(path, IPC_PATH_MAX_LEN)) + raise ZMQError(C.zmq_errno(), msg=msg) + else: + _check_rc(rc) + + def unbind(self, address): + _check_version((3,2), "unbind") + if isinstance(address, unicode): + address = address.encode('utf8') + rc = C.zmq_unbind(self._zmq_socket, address) + _check_rc(rc) + + def connect(self, address): + if isinstance(address, unicode): + address = address.encode('utf8') + rc = C.zmq_connect(self._zmq_socket, address) + _check_rc(rc) + + def disconnect(self, address): + _check_version((3,2), "disconnect") + if isinstance(address, unicode): + address = address.encode('utf8') + rc = C.zmq_disconnect(self._zmq_socket, address) + _check_rc(rc) + + def set(self, option, value): + length = None + if isinstance(value, unicode): + raise TypeError("unicode not allowed, use bytes") + + if isinstance(value, bytes): + if option not in zmq.constants.bytes_sockopts: + raise TypeError("not a bytes sockopt: %s" % option) + length = len(value) + + c_data = initialize_opt_pointer(option, value, length) + + c_value_pointer = c_data[0] + c_sizet = c_data[1] + + rc = C.zmq_setsockopt(self._zmq_socket, + option, + ffi.cast('void*', c_value_pointer), + c_sizet) + _check_rc(rc) + + def get(self, option): + c_data = new_pointer_from_opt(option, length=255) + + c_value_pointer = c_data[0] + c_sizet_pointer = c_data[1] + + rc = C.zmq_getsockopt(self._zmq_socket, + option, + c_value_pointer, + c_sizet_pointer) + _check_rc(rc) + + sz = c_sizet_pointer[0] + v = value_from_opt_pointer(option, c_value_pointer, sz) + if option != zmq.IDENTITY and option in zmq.constants.bytes_sockopts and v.endswith(b'\0'): + v = v[:-1] + return v + + def send(self, message, flags=0, copy=False, track=False): + if isinstance(message, unicode): + raise TypeError("Message must be in bytes, not an unicode Object") + + if isinstance(message, Frame): + message = message.bytes + + zmq_msg = ffi.new('zmq_msg_t*') + c_message = ffi.new('char[]', message) + rc = C.zmq_msg_init_size(zmq_msg, len(message)) + C.memcpy(C.zmq_msg_data(zmq_msg), c_message, len(message)) + + rc = C.zmq_msg_send(zmq_msg, self._zmq_socket, flags) + C.zmq_msg_close(zmq_msg) + _check_rc(rc) + + if track: + return zmq.MessageTracker() + + def recv(self, flags=0, copy=True, track=False): + zmq_msg = ffi.new('zmq_msg_t*') + C.zmq_msg_init(zmq_msg) + + rc = C.zmq_msg_recv(zmq_msg, self._zmq_socket, flags) + + if rc < 0: + C.zmq_msg_close(zmq_msg) + _check_rc(rc) + + _buffer = ffi.buffer(C.zmq_msg_data(zmq_msg), C.zmq_msg_size(zmq_msg)) + value = _buffer[:] + C.zmq_msg_close(zmq_msg) + + frame = Frame(value, track=track) + frame.more = self.getsockopt(RCVMORE) + + if copy: + return frame.bytes + else: + return frame + + def monitor(self, addr, events=-1): + """s.monitor(addr, flags) + + Start publishing socket events on inproc. + See libzmq docs for zmq_monitor for details. + + Note: requires libzmq >= 3.2 + + Parameters + ---------- + addr : str + The inproc url used for monitoring. Passing None as + the addr will cause an existing socket monitor to be + deregistered. + events : int [default: zmq.EVENT_ALL] + The zmq event bitmask for which events will be sent to the monitor. + """ + + _check_version((3,2), "monitor") + if events < 0: + events = zmq.EVENT_ALL + if addr is None: + addr = ffi.NULL + rc = C.zmq_socket_monitor(self._zmq_socket, addr, events) + + +__all__ = ['Socket', 'IPC_PATH_MAX_LEN'] diff --git a/scripts/external_libs/zmq/backend/cffi/utils.py b/scripts/external_libs/zmq/backend/cffi/utils.py new file mode 100644 index 00000000..fde7827b --- /dev/null +++ b/scripts/external_libs/zmq/backend/cffi/utils.py @@ -0,0 +1,62 @@ +# coding: utf-8 +"""miscellaneous zmq_utils wrapping""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from ._cffi import ffi, C + +from zmq.error import ZMQError, _check_rc, _check_version +from zmq.utils.strtypes import unicode + +def has(capability): + """Check for zmq capability by name (e.g. 'ipc', 'curve') + + .. versionadded:: libzmq-4.1 + .. versionadded:: 14.1 + """ + _check_version((4,1), 'zmq.has') + if isinstance(capability, unicode): + capability = capability.encode('utf8') + return bool(C.zmq_has(capability)) + +def curve_keypair(): + """generate a Z85 keypair for use with zmq.CURVE security + + Requires libzmq (≥ 4.0) to have been linked with libsodium. + + Returns + ------- + (public, secret) : two bytestrings + The public and private keypair as 40 byte z85-encoded bytestrings. + """ + _check_version((3,2), "monitor") + public = ffi.new('char[64]') + private = ffi.new('char[64]') + rc = C.zmq_curve_keypair(public, private) + _check_rc(rc) + return ffi.buffer(public)[:40], ffi.buffer(private)[:40] + + +class Stopwatch(object): + def __init__(self): + self.watch = ffi.NULL + + def start(self): + if self.watch == ffi.NULL: + self.watch = C.zmq_stopwatch_start() + else: + raise ZMQError('Stopwatch is already runing.') + + def stop(self): + if self.watch == ffi.NULL: + raise ZMQError('Must start the Stopwatch before calling stop.') + else: + time = C.zmq_stopwatch_stop(self.watch) + self.watch = ffi.NULL + return time + + def sleep(self, seconds): + C.zmq_sleep(seconds) + +__all__ = ['has', 'curve_keypair', 'Stopwatch'] diff --git a/scripts/external_libs/zmq/backend/cython/__init__.py b/scripts/external_libs/zmq/backend/cython/__init__.py new file mode 100644 index 00000000..e5358185 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/__init__.py @@ -0,0 +1,23 @@ +"""Python bindings for core 0MQ objects.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Lesser GNU Public License (LGPL). + +from . import (constants, error, message, context, + socket, utils, _poll, _version, _device ) + +__all__ = [] +for submod in (constants, error, message, context, + socket, utils, _poll, _version, _device): + __all__.extend(submod.__all__) + +from .constants import * +from .error import * +from .message import * +from .context import * +from .socket import * +from ._poll import * +from .utils import * +from ._device import * +from ._version import * + diff --git a/scripts/external_libs/zmq/backend/cython/_device.py b/scripts/external_libs/zmq/backend/cython/_device.py new file mode 100644 index 00000000..3368ca2c --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/_device.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'_device.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/scripts/external_libs/zmq/backend/cython/_device.so b/scripts/external_libs/zmq/backend/cython/_device.so Binary files differnew file mode 100644 index 00000000..5957f8e0 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/_device.so diff --git a/scripts/external_libs/zmq/backend/cython/_poll.py b/scripts/external_libs/zmq/backend/cython/_poll.py new file mode 100644 index 00000000..cb1d5d77 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/_poll.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'_poll.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/scripts/external_libs/zmq/backend/cython/_poll.so b/scripts/external_libs/zmq/backend/cython/_poll.so Binary files differnew file mode 100644 index 00000000..d93d0f10 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/_poll.so diff --git a/scripts/external_libs/zmq/backend/cython/_version.py b/scripts/external_libs/zmq/backend/cython/_version.py new file mode 100644 index 00000000..08262706 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/_version.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'_version.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/scripts/external_libs/zmq/backend/cython/_version.so b/scripts/external_libs/zmq/backend/cython/_version.so Binary files differnew file mode 100644 index 00000000..40dd3dc8 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/_version.so diff --git a/scripts/external_libs/zmq/backend/cython/checkrc.pxd b/scripts/external_libs/zmq/backend/cython/checkrc.pxd new file mode 100644 index 00000000..3bf69fc3 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/checkrc.pxd @@ -0,0 +1,23 @@ +from libc.errno cimport EINTR, EAGAIN +from cpython cimport PyErr_CheckSignals +from libzmq cimport zmq_errno, ZMQ_ETERM + +cdef inline int _check_rc(int rc) except -1: + """internal utility for checking zmq return condition + + and raising the appropriate Exception class + """ + cdef int errno = zmq_errno() + PyErr_CheckSignals() + if rc < 0: + if errno == EAGAIN: + from zmq.error import Again + raise Again(errno) + elif errno == ZMQ_ETERM: + from zmq.error import ContextTerminated + raise ContextTerminated(errno) + else: + from zmq.error import ZMQError + raise ZMQError(errno) + # return -1 + return 0 diff --git a/scripts/external_libs/zmq/backend/cython/constants.py b/scripts/external_libs/zmq/backend/cython/constants.py new file mode 100644 index 00000000..ea772ac0 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/constants.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'constants.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/scripts/external_libs/zmq/backend/cython/constants.so b/scripts/external_libs/zmq/backend/cython/constants.so Binary files differnew file mode 100644 index 00000000..cf44c07e --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/constants.so diff --git a/scripts/external_libs/zmq/backend/cython/context.pxd b/scripts/external_libs/zmq/backend/cython/context.pxd new file mode 100644 index 00000000..9c9267a5 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/context.pxd @@ -0,0 +1,41 @@ +"""0MQ Context class declaration.""" + +# +# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley +# +# This file is part of pyzmq. +# +# pyzmq is free software; you can redistribute it and/or modify it under +# the terms of the Lesser GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# pyzmq is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Lesser GNU General Public License for more details. +# +# You should have received a copy of the Lesser GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + +cdef class Context: + + cdef object __weakref__ # enable weakref + cdef void *handle # The C handle for the underlying zmq object. + cdef bint _shadow # whether the Context is a shadow wrapper of another + cdef void **_sockets # A C-array containg socket handles + cdef size_t _n_sockets # the number of sockets + cdef size_t _max_sockets # the size of the _sockets array + cdef int _pid # the pid of the process which created me (for fork safety) + + cdef public bint closed # bool property for a closed context. + cdef inline int _term(self) + # helpers for events on _sockets in Socket.__cinit__()/close() + cdef inline void _add_socket(self, void* handle) + cdef inline void _remove_socket(self, void* handle) + diff --git a/scripts/external_libs/zmq/backend/cython/context.py b/scripts/external_libs/zmq/backend/cython/context.py new file mode 100644 index 00000000..19f8ec7c --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/context.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'context.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/scripts/external_libs/zmq/backend/cython/context.so b/scripts/external_libs/zmq/backend/cython/context.so Binary files differnew file mode 100644 index 00000000..ef9b9699 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/context.so diff --git a/scripts/external_libs/zmq/backend/cython/error.py b/scripts/external_libs/zmq/backend/cython/error.py new file mode 100644 index 00000000..d3a4ea0e --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/error.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'error.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/scripts/external_libs/zmq/backend/cython/error.so b/scripts/external_libs/zmq/backend/cython/error.so Binary files differnew file mode 100644 index 00000000..360da9dd --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/error.so diff --git a/scripts/external_libs/zmq/backend/cython/libzmq.pxd b/scripts/external_libs/zmq/backend/cython/libzmq.pxd new file mode 100644 index 00000000..e42f6d6b --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/libzmq.pxd @@ -0,0 +1,110 @@ +"""All the C imports for 0MQ""" + +# +# Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley +# +# This file is part of pyzmq. +# +# pyzmq is free software; you can redistribute it and/or modify it under +# the terms of the Lesser GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# pyzmq is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Lesser GNU General Public License for more details. +# +# You should have received a copy of the Lesser GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +#----------------------------------------------------------------------------- +# Import the C header files +#----------------------------------------------------------------------------- + +cdef extern from *: + ctypedef void* const_void_ptr "const void *" + ctypedef char* const_char_ptr "const char *" + +cdef extern from "zmq_compat.h": + ctypedef signed long long int64_t "pyzmq_int64_t" + +include "constant_enums.pxi" + +cdef extern from "zmq.h" nogil: + + void _zmq_version "zmq_version"(int *major, int *minor, int *patch) + + ctypedef int fd_t "ZMQ_FD_T" + + enum: errno + char *zmq_strerror (int errnum) + int zmq_errno() + + void *zmq_ctx_new () + int zmq_ctx_destroy (void *context) + int zmq_ctx_set (void *context, int option, int optval) + int zmq_ctx_get (void *context, int option) + void *zmq_init (int io_threads) + int zmq_term (void *context) + + # blackbox def for zmq_msg_t + ctypedef void * zmq_msg_t "zmq_msg_t" + + ctypedef void zmq_free_fn(void *data, void *hint) + + int zmq_msg_init (zmq_msg_t *msg) + int zmq_msg_init_size (zmq_msg_t *msg, size_t size) + int zmq_msg_init_data (zmq_msg_t *msg, void *data, + size_t size, zmq_free_fn *ffn, void *hint) + int zmq_msg_send (zmq_msg_t *msg, void *s, int flags) + int zmq_msg_recv (zmq_msg_t *msg, void *s, int flags) + int zmq_msg_close (zmq_msg_t *msg) + int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src) + int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src) + void *zmq_msg_data (zmq_msg_t *msg) + size_t zmq_msg_size (zmq_msg_t *msg) + int zmq_msg_more (zmq_msg_t *msg) + int zmq_msg_get (zmq_msg_t *msg, int option) + int zmq_msg_set (zmq_msg_t *msg, int option, int optval) + const_char_ptr zmq_msg_gets (zmq_msg_t *msg, const_char_ptr property) + int zmq_has (const_char_ptr capability) + + void *zmq_socket (void *context, int type) + int zmq_close (void *s) + int zmq_setsockopt (void *s, int option, void *optval, size_t optvallen) + int zmq_getsockopt (void *s, int option, void *optval, size_t *optvallen) + int zmq_bind (void *s, char *addr) + int zmq_connect (void *s, char *addr) + int zmq_unbind (void *s, char *addr) + int zmq_disconnect (void *s, char *addr) + + int zmq_socket_monitor (void *s, char *addr, int flags) + + # send/recv + int zmq_sendbuf (void *s, const_void_ptr buf, size_t n, int flags) + int zmq_recvbuf (void *s, void *buf, size_t n, int flags) + + ctypedef struct zmq_pollitem_t: + void *socket + int fd + short events + short revents + + int zmq_poll (zmq_pollitem_t *items, int nitems, long timeout) + + int zmq_device (int device_, void *insocket_, void *outsocket_) + int zmq_proxy (void *frontend, void *backend, void *capture) + +cdef extern from "zmq_utils.h" nogil: + + void *zmq_stopwatch_start () + unsigned long zmq_stopwatch_stop (void *watch_) + void zmq_sleep (int seconds_) + int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key) + diff --git a/scripts/external_libs/zmq/backend/cython/message.pxd b/scripts/external_libs/zmq/backend/cython/message.pxd new file mode 100644 index 00000000..4781195f --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/message.pxd @@ -0,0 +1,63 @@ +"""0MQ Message related class declarations.""" + +# +# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley +# +# This file is part of pyzmq. +# +# pyzmq is free software; you can redistribute it and/or modify it under +# the terms of the Lesser GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# pyzmq is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Lesser GNU General Public License for more details. +# +# You should have received a copy of the Lesser GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +from cpython cimport PyBytes_FromStringAndSize + +from libzmq cimport zmq_msg_t, zmq_msg_data, zmq_msg_size + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + +cdef class MessageTracker(object): + + cdef set events # Message Event objects to track. + cdef set peers # Other Message or MessageTracker objects. + + +cdef class Frame: + + cdef zmq_msg_t zmq_msg + cdef object _data # The actual message data as a Python object. + cdef object _buffer # A Python Buffer/View of the message contents + cdef object _bytes # A bytes/str copy of the message. + cdef bint _failed_init # Flag to handle failed zmq_msg_init + cdef public object tracker_event # Event for use with zmq_free_fn. + cdef public object tracker # MessageTracker object. + cdef public bint more # whether RCVMORE was set + + cdef Frame fast_copy(self) # Create shallow copy of Message object. + cdef object _getbuffer(self) # Construct self._buffer. + + +cdef inline object copy_zmq_msg_bytes(zmq_msg_t *zmq_msg): + """ Copy the data from a zmq_msg_t """ + cdef char *data_c = NULL + cdef Py_ssize_t data_len_c + data_c = <char *>zmq_msg_data(zmq_msg) + data_len_c = zmq_msg_size(zmq_msg) + return PyBytes_FromStringAndSize(data_c, data_len_c) + + diff --git a/scripts/external_libs/zmq/backend/cython/message.py b/scripts/external_libs/zmq/backend/cython/message.py new file mode 100644 index 00000000..5e423b62 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/message.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'message.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/scripts/external_libs/zmq/backend/cython/message.so b/scripts/external_libs/zmq/backend/cython/message.so Binary files differnew file mode 100644 index 00000000..f674489f --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/message.so diff --git a/scripts/external_libs/zmq/backend/cython/socket.pxd b/scripts/external_libs/zmq/backend/cython/socket.pxd new file mode 100644 index 00000000..b8a331e2 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/socket.pxd @@ -0,0 +1,47 @@ +"""0MQ Socket class declaration.""" + +# +# Copyright (c) 2010-2011 Brian E. Granger & Min Ragan-Kelley +# +# This file is part of pyzmq. +# +# pyzmq is free software; you can redistribute it and/or modify it under +# the terms of the Lesser GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# pyzmq is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Lesser GNU General Public License for more details. +# +# You should have received a copy of the Lesser GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +from context cimport Context + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + + +cdef class Socket: + + cdef object __weakref__ # enable weakref + cdef void *handle # The C handle for the underlying zmq object. + cdef bint _shadow # whether the Socket is a shadow wrapper of another + # Hold on to a reference to the context to make sure it is not garbage + # collected until the socket it done with it. + cdef public Context context # The zmq Context object that owns this. + cdef public bint _closed # bool property for a closed socket. + cdef int _pid # the pid of the process which created me (for fork safety) + + # cpdef methods for direct-cython access: + cpdef object send(self, object data, int flags=*, copy=*, track=*) + cpdef object recv(self, int flags=*, copy=*, track=*) + diff --git a/scripts/external_libs/zmq/backend/cython/socket.py b/scripts/external_libs/zmq/backend/cython/socket.py new file mode 100644 index 00000000..faef8bee --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/socket.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'socket.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/scripts/external_libs/zmq/backend/cython/socket.so b/scripts/external_libs/zmq/backend/cython/socket.so Binary files differnew file mode 100644 index 00000000..1c927042 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/socket.so diff --git a/scripts/external_libs/zmq/backend/cython/utils.pxd b/scripts/external_libs/zmq/backend/cython/utils.pxd new file mode 100644 index 00000000..1d7117f1 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/utils.pxd @@ -0,0 +1,29 @@ +"""Wrap zmq_utils.h""" + +# +# Copyright (c) 2010 Brian E. Granger & Min Ragan-Kelley +# +# This file is part of pyzmq. +# +# pyzmq is free software; you can redistribute it and/or modify it under +# the terms of the Lesser GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# pyzmq is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Lesser GNU General Public License for more details. +# +# You should have received a copy of the Lesser GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +#----------------------------------------------------------------------------- +# Code +#----------------------------------------------------------------------------- + + +cdef class Stopwatch: + cdef void *watch # The C handle for the underlying zmq object + diff --git a/scripts/external_libs/zmq/backend/cython/utils.py b/scripts/external_libs/zmq/backend/cython/utils.py new file mode 100644 index 00000000..fe928300 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/utils.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'utils.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/scripts/external_libs/zmq/backend/cython/utils.so b/scripts/external_libs/zmq/backend/cython/utils.so Binary files differnew file mode 100644 index 00000000..b4e5b283 --- /dev/null +++ b/scripts/external_libs/zmq/backend/cython/utils.so diff --git a/scripts/external_libs/zmq/backend/select.py b/scripts/external_libs/zmq/backend/select.py new file mode 100644 index 00000000..0a2e09a2 --- /dev/null +++ b/scripts/external_libs/zmq/backend/select.py @@ -0,0 +1,39 @@ +"""Import basic exposure of libzmq C API as a backend""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +public_api = [ + 'Context', + 'Socket', + 'Frame', + 'Message', + 'Stopwatch', + 'device', + 'proxy', + 'zmq_poll', + 'strerror', + 'zmq_errno', + 'has', + 'curve_keypair', + 'constants', + 'zmq_version_info', + 'IPC_PATH_MAX_LEN', +] + +def select_backend(name): + """Select the pyzmq backend""" + try: + mod = __import__(name, fromlist=public_api) + except ImportError: + raise + except Exception as e: + import sys + from zmq.utils.sixcerpt import reraise + exc_info = sys.exc_info() + reraise(ImportError, ImportError("Importing %s failed with %s" % (name, e)), exc_info[2]) + + ns = {} + for key in public_api: + ns[key] = getattr(mod, key) + return ns diff --git a/scripts/external_libs/zmq/devices/__init__.py b/scripts/external_libs/zmq/devices/__init__.py new file mode 100644 index 00000000..23715963 --- /dev/null +++ b/scripts/external_libs/zmq/devices/__init__.py @@ -0,0 +1,16 @@ +"""0MQ Device classes for running in background threads or processes.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from zmq import device +from zmq.devices import basedevice, proxydevice, monitoredqueue, monitoredqueuedevice + +from zmq.devices.basedevice import * +from zmq.devices.proxydevice import * +from zmq.devices.monitoredqueue import * +from zmq.devices.monitoredqueuedevice import * + +__all__ = ['device'] +for submod in (basedevice, proxydevice, monitoredqueue, monitoredqueuedevice): + __all__.extend(submod.__all__) diff --git a/scripts/external_libs/zmq/devices/basedevice.py b/scripts/external_libs/zmq/devices/basedevice.py new file mode 100644 index 00000000..7ba1b7ac --- /dev/null +++ b/scripts/external_libs/zmq/devices/basedevice.py @@ -0,0 +1,229 @@ +"""Classes for running 0MQ Devices in the background.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import time +from threading import Thread +from multiprocessing import Process + +from zmq import device, QUEUE, Context, ETERM, ZMQError + + +class Device: + """A 0MQ Device to be run in the background. + + You do not pass Socket instances to this, but rather Socket types:: + + Device(device_type, in_socket_type, out_socket_type) + + For instance:: + + dev = Device(zmq.QUEUE, zmq.DEALER, zmq.ROUTER) + + Similar to zmq.device, but socket types instead of sockets themselves are + passed, and the sockets are created in the work thread, to avoid issues + with thread safety. As a result, additional bind_{in|out} and + connect_{in|out} methods and setsockopt_{in|out} allow users to specify + connections for the sockets. + + Parameters + ---------- + device_type : int + The 0MQ Device type + {in|out}_type : int + zmq socket types, to be passed later to context.socket(). e.g. + zmq.PUB, zmq.SUB, zmq.REQ. If out_type is < 0, then in_socket is used + for both in_socket and out_socket. + + Methods + ------- + bind_{in_out}(iface) + passthrough for ``{in|out}_socket.bind(iface)``, to be called in the thread + connect_{in_out}(iface) + passthrough for ``{in|out}_socket.connect(iface)``, to be called in the + thread + setsockopt_{in_out}(opt,value) + passthrough for ``{in|out}_socket.setsockopt(opt, value)``, to be called in + the thread + + Attributes + ---------- + daemon : int + sets whether the thread should be run as a daemon + Default is true, because if it is false, the thread will not + exit unless it is killed + context_factory : callable (class attribute) + Function for creating the Context. This will be Context.instance + in ThreadDevices, and Context in ProcessDevices. The only reason + it is not instance() in ProcessDevices is that there may be a stale + Context instance already initialized, and the forked environment + should *never* try to use it. + """ + + context_factory = Context.instance + """Callable that returns a context. Typically either Context.instance or Context, + depending on whether the device should share the global instance or not. + """ + + def __init__(self, device_type=QUEUE, in_type=None, out_type=None): + self.device_type = device_type + if in_type is None: + raise TypeError("in_type must be specified") + if out_type is None: + raise TypeError("out_type must be specified") + self.in_type = in_type + self.out_type = out_type + self._in_binds = [] + self._in_connects = [] + self._in_sockopts = [] + self._out_binds = [] + self._out_connects = [] + self._out_sockopts = [] + self.daemon = True + self.done = False + + def bind_in(self, addr): + """Enqueue ZMQ address for binding on in_socket. + + See zmq.Socket.bind for details. + """ + self._in_binds.append(addr) + + def connect_in(self, addr): + """Enqueue ZMQ address for connecting on in_socket. + + See zmq.Socket.connect for details. + """ + self._in_connects.append(addr) + + def setsockopt_in(self, opt, value): + """Enqueue setsockopt(opt, value) for in_socket + + See zmq.Socket.setsockopt for details. + """ + self._in_sockopts.append((opt, value)) + + def bind_out(self, addr): + """Enqueue ZMQ address for binding on out_socket. + + See zmq.Socket.bind for details. + """ + self._out_binds.append(addr) + + def connect_out(self, addr): + """Enqueue ZMQ address for connecting on out_socket. + + See zmq.Socket.connect for details. + """ + self._out_connects.append(addr) + + def setsockopt_out(self, opt, value): + """Enqueue setsockopt(opt, value) for out_socket + + See zmq.Socket.setsockopt for details. + """ + self._out_sockopts.append((opt, value)) + + def _setup_sockets(self): + ctx = self.context_factory() + + self._context = ctx + + # create the sockets + ins = ctx.socket(self.in_type) + if self.out_type < 0: + outs = ins + else: + outs = ctx.socket(self.out_type) + + # set sockopts (must be done first, in case of zmq.IDENTITY) + for opt,value in self._in_sockopts: + ins.setsockopt(opt, value) + for opt,value in self._out_sockopts: + outs.setsockopt(opt, value) + + for iface in self._in_binds: + ins.bind(iface) + for iface in self._out_binds: + outs.bind(iface) + + for iface in self._in_connects: + ins.connect(iface) + for iface in self._out_connects: + outs.connect(iface) + + return ins,outs + + def run_device(self): + """The runner method. + + Do not call me directly, instead call ``self.start()``, just like a Thread. + """ + ins,outs = self._setup_sockets() + device(self.device_type, ins, outs) + + def run(self): + """wrap run_device in try/catch ETERM""" + try: + self.run_device() + except ZMQError as e: + if e.errno == ETERM: + # silence TERM errors, because this should be a clean shutdown + pass + else: + raise + finally: + self.done = True + + def start(self): + """Start the device. Override me in subclass for other launchers.""" + return self.run() + + def join(self,timeout=None): + """wait for me to finish, like Thread.join. + + Reimplemented appropriately by subclasses.""" + tic = time.time() + toc = tic + while not self.done and not (timeout is not None and toc-tic > timeout): + time.sleep(.001) + toc = time.time() + + +class BackgroundDevice(Device): + """Base class for launching Devices in background processes and threads.""" + + launcher=None + _launch_class=None + + def start(self): + self.launcher = self._launch_class(target=self.run) + self.launcher.daemon = self.daemon + return self.launcher.start() + + def join(self, timeout=None): + return self.launcher.join(timeout=timeout) + + +class ThreadDevice(BackgroundDevice): + """A Device that will be run in a background Thread. + + See Device for details. + """ + _launch_class=Thread + +class ProcessDevice(BackgroundDevice): + """A Device that will be run in a background Process. + + See Device for details. + """ + _launch_class=Process + context_factory = Context + """Callable that returns a context. Typically either Context.instance or Context, + depending on whether the device should share the global instance or not. + """ + + +__all__ = ['Device', 'ThreadDevice', 'ProcessDevice'] diff --git a/scripts/external_libs/zmq/devices/monitoredqueue.pxd b/scripts/external_libs/zmq/devices/monitoredqueue.pxd new file mode 100644 index 00000000..1e26ed86 --- /dev/null +++ b/scripts/external_libs/zmq/devices/monitoredqueue.pxd @@ -0,0 +1,177 @@ +"""MonitoredQueue class declarations. + +Authors +------- +* MinRK +* Brian Granger +""" + +# +# Copyright (c) 2010 Min Ragan-Kelley, Brian Granger +# +# This file is part of pyzmq, but is derived and adapted from zmq_queue.cpp +# originally from libzmq-2.1.6, used under LGPLv3 +# +# pyzmq is free software; you can redistribute it and/or modify it under +# the terms of the Lesser GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# pyzmq is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Lesser GNU General Public License for more details. +# +# You should have received a copy of the Lesser GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +#----------------------------------------------------------------------------- +# Imports +#----------------------------------------------------------------------------- + +from libzmq cimport * + +#----------------------------------------------------------------------------- +# MonitoredQueue C functions +#----------------------------------------------------------------------------- + +cdef inline int _relay(void *insocket_, void *outsocket_, void *sidesocket_, + zmq_msg_t msg, zmq_msg_t side_msg, zmq_msg_t id_msg, + bint swap_ids) nogil: + cdef int rc + cdef int64_t flag_2 + cdef int flag_3 + cdef int flags + cdef bint more + cdef size_t flagsz + cdef void * flag_ptr + + if ZMQ_VERSION_MAJOR < 3: + flagsz = sizeof (int64_t) + flag_ptr = &flag_2 + else: + flagsz = sizeof (int) + flag_ptr = &flag_3 + + if swap_ids:# both router, must send second identity first + # recv two ids into msg, id_msg + rc = zmq_msg_recv(&msg, insocket_, 0) + if rc < 0: return rc + + rc = zmq_msg_recv(&id_msg, insocket_, 0) + if rc < 0: return rc + + # send second id (id_msg) first + #!!!! always send a copy before the original !!!! + rc = zmq_msg_copy(&side_msg, &id_msg) + if rc < 0: return rc + rc = zmq_msg_send(&side_msg, outsocket_, ZMQ_SNDMORE) + if rc < 0: return rc + rc = zmq_msg_send(&id_msg, sidesocket_, ZMQ_SNDMORE) + if rc < 0: return rc + # send first id (msg) second + rc = zmq_msg_copy(&side_msg, &msg) + if rc < 0: return rc + rc = zmq_msg_send(&side_msg, outsocket_, ZMQ_SNDMORE) + if rc < 0: return rc + rc = zmq_msg_send(&msg, sidesocket_, ZMQ_SNDMORE) + if rc < 0: return rc + while (True): + rc = zmq_msg_recv(&msg, insocket_, 0) + if rc < 0: return rc + # assert (rc == 0) + rc = zmq_getsockopt (insocket_, ZMQ_RCVMORE, flag_ptr, &flagsz) + if rc < 0: return rc + flags = 0 + if ZMQ_VERSION_MAJOR < 3: + if flag_2: + flags |= ZMQ_SNDMORE + else: + if flag_3: + flags |= ZMQ_SNDMORE + # LABEL has been removed: + # rc = zmq_getsockopt (insocket_, ZMQ_RCVLABEL, flag_ptr, &flagsz) + # if flag_3: + # flags |= ZMQ_SNDLABEL + # assert (rc == 0) + + rc = zmq_msg_copy(&side_msg, &msg) + if rc < 0: return rc + if flags: + rc = zmq_msg_send(&side_msg, outsocket_, flags) + if rc < 0: return rc + # only SNDMORE for side-socket + rc = zmq_msg_send(&msg, sidesocket_, ZMQ_SNDMORE) + if rc < 0: return rc + else: + rc = zmq_msg_send(&side_msg, outsocket_, 0) + if rc < 0: return rc + rc = zmq_msg_send(&msg, sidesocket_, 0) + if rc < 0: return rc + break + return rc + +# the MonitoredQueue C function, adapted from zmq::queue.cpp : +cdef inline int c_monitored_queue (void *insocket_, void *outsocket_, + void *sidesocket_, zmq_msg_t *in_msg_ptr, + zmq_msg_t *out_msg_ptr, int swap_ids) nogil: + """The actual C function for a monitored queue device. + + See ``monitored_queue()`` for details. + """ + + cdef zmq_msg_t msg + cdef int rc = zmq_msg_init (&msg) + cdef zmq_msg_t id_msg + rc = zmq_msg_init (&id_msg) + if rc < 0: return rc + cdef zmq_msg_t side_msg + rc = zmq_msg_init (&side_msg) + if rc < 0: return rc + + cdef zmq_pollitem_t items [2] + items [0].socket = insocket_ + items [0].fd = 0 + items [0].events = ZMQ_POLLIN + items [0].revents = 0 + items [1].socket = outsocket_ + items [1].fd = 0 + items [1].events = ZMQ_POLLIN + items [1].revents = 0 + # I don't think sidesocket should be polled? + # items [2].socket = sidesocket_ + # items [2].fd = 0 + # items [2].events = ZMQ_POLLIN + # items [2].revents = 0 + + while (True): + + # // Wait while there are either requests or replies to process. + rc = zmq_poll (&items [0], 2, -1) + if rc < 0: return rc + # // The algorithm below asumes ratio of request and replies processed + # // under full load to be 1:1. Although processing requests replies + # // first is tempting it is suspectible to DoS attacks (overloading + # // the system with unsolicited replies). + # + # // Process a request. + if (items [0].revents & ZMQ_POLLIN): + # send in_prefix to side socket + rc = zmq_msg_copy(&side_msg, in_msg_ptr) + if rc < 0: return rc + rc = zmq_msg_send(&side_msg, sidesocket_, ZMQ_SNDMORE) + if rc < 0: return rc + # relay the rest of the message + rc = _relay(insocket_, outsocket_, sidesocket_, msg, side_msg, id_msg, swap_ids) + if rc < 0: return rc + if (items [1].revents & ZMQ_POLLIN): + # send out_prefix to side socket + rc = zmq_msg_copy(&side_msg, out_msg_ptr) + if rc < 0: return rc + rc = zmq_msg_send(&side_msg, sidesocket_, ZMQ_SNDMORE) + if rc < 0: return rc + # relay the rest of the message + rc = _relay(outsocket_, insocket_, sidesocket_, msg, side_msg, id_msg, swap_ids) + if rc < 0: return rc + return rc diff --git a/scripts/external_libs/zmq/devices/monitoredqueue.py b/scripts/external_libs/zmq/devices/monitoredqueue.py new file mode 100644 index 00000000..6d714e51 --- /dev/null +++ b/scripts/external_libs/zmq/devices/monitoredqueue.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'monitoredqueue.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/scripts/external_libs/zmq/devices/monitoredqueue.so b/scripts/external_libs/zmq/devices/monitoredqueue.so Binary files differnew file mode 100644 index 00000000..edca8a4b --- /dev/null +++ b/scripts/external_libs/zmq/devices/monitoredqueue.so diff --git a/scripts/external_libs/zmq/devices/monitoredqueuedevice.py b/scripts/external_libs/zmq/devices/monitoredqueuedevice.py new file mode 100644 index 00000000..9723f866 --- /dev/null +++ b/scripts/external_libs/zmq/devices/monitoredqueuedevice.py @@ -0,0 +1,66 @@ +"""MonitoredQueue classes and functions.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +from zmq import ZMQError, PUB +from zmq.devices.proxydevice import ProxyBase, Proxy, ThreadProxy, ProcessProxy +from zmq.devices.monitoredqueue import monitored_queue + + +class MonitoredQueueBase(ProxyBase): + """Base class for overriding methods.""" + + _in_prefix = b'' + _out_prefix = b'' + + def __init__(self, in_type, out_type, mon_type=PUB, in_prefix=b'in', out_prefix=b'out'): + + ProxyBase.__init__(self, in_type=in_type, out_type=out_type, mon_type=mon_type) + + self._in_prefix = in_prefix + self._out_prefix = out_prefix + + def run_device(self): + ins,outs,mons = self._setup_sockets() + monitored_queue(ins, outs, mons, self._in_prefix, self._out_prefix) + + +class MonitoredQueue(MonitoredQueueBase, Proxy): + """Class for running monitored_queue in the background. + + See zmq.devices.Device for most of the spec. MonitoredQueue differs from Proxy, + only in that it adds a ``prefix`` to messages sent on the monitor socket, + with a different prefix for each direction. + + MQ also supports ROUTER on both sides, which zmq.proxy does not. + + If a message arrives on `in_sock`, it will be prefixed with `in_prefix` on the monitor socket. + If it arrives on out_sock, it will be prefixed with `out_prefix`. + + A PUB socket is the most logical choice for the mon_socket, but it is not required. + """ + pass + + +class ThreadMonitoredQueue(MonitoredQueueBase, ThreadProxy): + """Run zmq.monitored_queue in a background thread. + + See MonitoredQueue and Proxy for details. + """ + pass + + +class ProcessMonitoredQueue(MonitoredQueueBase, ProcessProxy): + """Run zmq.monitored_queue in a background thread. + + See MonitoredQueue and Proxy for details. + """ + + +__all__ = [ + 'MonitoredQueue', + 'ThreadMonitoredQueue', + 'ProcessMonitoredQueue' +] diff --git a/scripts/external_libs/zmq/devices/proxydevice.py b/scripts/external_libs/zmq/devices/proxydevice.py new file mode 100644 index 00000000..68be3f15 --- /dev/null +++ b/scripts/external_libs/zmq/devices/proxydevice.py @@ -0,0 +1,90 @@ +"""Proxy classes and functions.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import zmq +from zmq.devices.basedevice import Device, ThreadDevice, ProcessDevice + + +class ProxyBase(object): + """Base class for overriding methods.""" + + def __init__(self, in_type, out_type, mon_type=zmq.PUB): + + Device.__init__(self, in_type=in_type, out_type=out_type) + self.mon_type = mon_type + self._mon_binds = [] + self._mon_connects = [] + self._mon_sockopts = [] + + def bind_mon(self, addr): + """Enqueue ZMQ address for binding on mon_socket. + + See zmq.Socket.bind for details. + """ + self._mon_binds.append(addr) + + def connect_mon(self, addr): + """Enqueue ZMQ address for connecting on mon_socket. + + See zmq.Socket.bind for details. + """ + self._mon_connects.append(addr) + + def setsockopt_mon(self, opt, value): + """Enqueue setsockopt(opt, value) for mon_socket + + See zmq.Socket.setsockopt for details. + """ + self._mon_sockopts.append((opt, value)) + + def _setup_sockets(self): + ins,outs = Device._setup_sockets(self) + ctx = self._context + mons = ctx.socket(self.mon_type) + + # set sockopts (must be done first, in case of zmq.IDENTITY) + for opt,value in self._mon_sockopts: + mons.setsockopt(opt, value) + + for iface in self._mon_binds: + mons.bind(iface) + + for iface in self._mon_connects: + mons.connect(iface) + + return ins,outs,mons + + def run_device(self): + ins,outs,mons = self._setup_sockets() + zmq.proxy(ins, outs, mons) + +class Proxy(ProxyBase, Device): + """Threadsafe Proxy object. + + See zmq.devices.Device for most of the spec. This subclass adds a + <method>_mon version of each <method>_{in|out} method, for configuring the + monitor socket. + + A Proxy is a 3-socket ZMQ Device that functions just like a + QUEUE, except each message is also sent out on the monitor socket. + + A PUB socket is the most logical choice for the mon_socket, but it is not required. + """ + pass + +class ThreadProxy(ProxyBase, ThreadDevice): + """Proxy in a Thread. See Proxy for more.""" + pass + +class ProcessProxy(ProxyBase, ProcessDevice): + """Proxy in a Process. See Proxy for more.""" + pass + + +__all__ = [ + 'Proxy', + 'ThreadProxy', + 'ProcessProxy', +] diff --git a/scripts/external_libs/zmq/error.py b/scripts/external_libs/zmq/error.py new file mode 100644 index 00000000..48cdaafa --- /dev/null +++ b/scripts/external_libs/zmq/error.py @@ -0,0 +1,164 @@ +"""0MQ Error classes and functions.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +class ZMQBaseError(Exception): + """Base exception class for 0MQ errors in Python.""" + pass + +class ZMQError(ZMQBaseError): + """Wrap an errno style error. + + Parameters + ---------- + errno : int + The ZMQ errno or None. If None, then ``zmq_errno()`` is called and + used. + msg : string + Description of the error or None. + """ + errno = None + + def __init__(self, errno=None, msg=None): + """Wrap an errno style error. + + Parameters + ---------- + errno : int + The ZMQ errno or None. If None, then ``zmq_errno()`` is called and + used. + msg : string + Description of the error or None. + """ + from zmq.backend import strerror, zmq_errno + if errno is None: + errno = zmq_errno() + if isinstance(errno, int): + self.errno = errno + if msg is None: + self.strerror = strerror(errno) + else: + self.strerror = msg + else: + if msg is None: + self.strerror = str(errno) + else: + self.strerror = msg + # flush signals, because there could be a SIGINT + # waiting to pounce, resulting in uncaught exceptions. + # Doing this here means getting SIGINT during a blocking + # libzmq call will raise a *catchable* KeyboardInterrupt + # PyErr_CheckSignals() + + def __str__(self): + return self.strerror + + def __repr__(self): + return "ZMQError('%s')"%self.strerror + + +class ZMQBindError(ZMQBaseError): + """An error for ``Socket.bind_to_random_port()``. + + See Also + -------- + .Socket.bind_to_random_port + """ + pass + + +class NotDone(ZMQBaseError): + """Raised when timeout is reached while waiting for 0MQ to finish with a Message + + See Also + -------- + .MessageTracker.wait : object for tracking when ZeroMQ is done + """ + pass + + +class ContextTerminated(ZMQError): + """Wrapper for zmq.ETERM + + .. versionadded:: 13.0 + """ + pass + + +class Again(ZMQError): + """Wrapper for zmq.EAGAIN + + .. versionadded:: 13.0 + """ + pass + + +def _check_rc(rc, errno=None): + """internal utility for checking zmq return condition + + and raising the appropriate Exception class + """ + if rc < 0: + from zmq.backend import zmq_errno + if errno is None: + errno = zmq_errno() + from zmq import EAGAIN, ETERM + if errno == EAGAIN: + raise Again(errno) + elif errno == ETERM: + raise ContextTerminated(errno) + else: + raise ZMQError(errno) + +_zmq_version_info = None +_zmq_version = None + +class ZMQVersionError(NotImplementedError): + """Raised when a feature is not provided by the linked version of libzmq. + + .. versionadded:: 14.2 + """ + min_version = None + def __init__(self, min_version, msg='Feature'): + global _zmq_version + if _zmq_version is None: + from zmq import zmq_version + _zmq_version = zmq_version() + self.msg = msg + self.min_version = min_version + self.version = _zmq_version + + def __repr__(self): + return "ZMQVersionError('%s')" % str(self) + + def __str__(self): + return "%s requires libzmq >= %s, have %s" % (self.msg, self.min_version, self.version) + + +def _check_version(min_version_info, msg='Feature'): + """Check for libzmq + + raises ZMQVersionError if current zmq version is not at least min_version + + min_version_info is a tuple of integers, and will be compared against zmq.zmq_version_info(). + """ + global _zmq_version_info + if _zmq_version_info is None: + from zmq import zmq_version_info + _zmq_version_info = zmq_version_info() + if _zmq_version_info < min_version_info: + min_version = '.'.join(str(v) for v in min_version_info) + raise ZMQVersionError(min_version, msg) + + +__all__ = [ + 'ZMQBaseError', + 'ZMQBindError', + 'ZMQError', + 'NotDone', + 'ContextTerminated', + 'Again', + 'ZMQVersionError', +] diff --git a/scripts/external_libs/zmq/eventloop/__init__.py b/scripts/external_libs/zmq/eventloop/__init__.py new file mode 100644 index 00000000..568e8e8d --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/__init__.py @@ -0,0 +1,5 @@ +"""A Tornado based event loop for PyZMQ.""" + +from zmq.eventloop.ioloop import IOLoop + +__all__ = ['IOLoop']
\ No newline at end of file diff --git a/scripts/external_libs/zmq/eventloop/ioloop.py b/scripts/external_libs/zmq/eventloop/ioloop.py new file mode 100644 index 00000000..35f4c418 --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/ioloop.py @@ -0,0 +1,193 @@ +# coding: utf-8 +"""tornado IOLoop API with zmq compatibility + +If you have tornado ≥ 3.0, this is a subclass of tornado's IOLoop, +otherwise we ship a minimal subset of tornado in zmq.eventloop.minitornado. + +The minimal shipped version of tornado's IOLoop does not include +support for concurrent futures - this will only be available if you +have tornado ≥ 3.0. +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from __future__ import absolute_import, division, with_statement + +import os +import time +import warnings + +from zmq import ( + Poller, + POLLIN, POLLOUT, POLLERR, + ZMQError, ETERM, +) + +try: + import tornado + tornado_version = tornado.version_info +except (ImportError, AttributeError): + tornado_version = () + +try: + # tornado ≥ 3 + from tornado.ioloop import PollIOLoop, PeriodicCallback + from tornado.log import gen_log +except ImportError: + from .minitornado.ioloop import PollIOLoop, PeriodicCallback + from .minitornado.log import gen_log + + +class DelayedCallback(PeriodicCallback): + """Schedules the given callback to be called once. + + The callback is called once, after callback_time milliseconds. + + `start` must be called after the DelayedCallback is created. + + The timeout is calculated from when `start` is called. + """ + def __init__(self, callback, callback_time, io_loop=None): + # PeriodicCallback require callback_time to be positive + warnings.warn("""DelayedCallback is deprecated. + Use loop.add_timeout instead.""", DeprecationWarning) + callback_time = max(callback_time, 1e-3) + super(DelayedCallback, self).__init__(callback, callback_time, io_loop) + + def start(self): + """Starts the timer.""" + self._running = True + self._firstrun = True + self._next_timeout = time.time() + self.callback_time / 1000.0 + self.io_loop.add_timeout(self._next_timeout, self._run) + + def _run(self): + if not self._running: return + self._running = False + try: + self.callback() + except Exception: + gen_log.error("Error in delayed callback", exc_info=True) + + +class ZMQPoller(object): + """A poller that can be used in the tornado IOLoop. + + This simply wraps a regular zmq.Poller, scaling the timeout + by 1000, so that it is in seconds rather than milliseconds. + """ + + def __init__(self): + self._poller = Poller() + + @staticmethod + def _map_events(events): + """translate IOLoop.READ/WRITE/ERROR event masks into zmq.POLLIN/OUT/ERR""" + z_events = 0 + if events & IOLoop.READ: + z_events |= POLLIN + if events & IOLoop.WRITE: + z_events |= POLLOUT + if events & IOLoop.ERROR: + z_events |= POLLERR + return z_events + + @staticmethod + def _remap_events(z_events): + """translate zmq.POLLIN/OUT/ERR event masks into IOLoop.READ/WRITE/ERROR""" + events = 0 + if z_events & POLLIN: + events |= IOLoop.READ + if z_events & POLLOUT: + events |= IOLoop.WRITE + if z_events & POLLERR: + events |= IOLoop.ERROR + return events + + def register(self, fd, events): + return self._poller.register(fd, self._map_events(events)) + + def modify(self, fd, events): + return self._poller.modify(fd, self._map_events(events)) + + def unregister(self, fd): + return self._poller.unregister(fd) + + def poll(self, timeout): + """poll in seconds rather than milliseconds. + + Event masks will be IOLoop.READ/WRITE/ERROR + """ + z_events = self._poller.poll(1000*timeout) + return [ (fd,self._remap_events(evt)) for (fd,evt) in z_events ] + + def close(self): + pass + + +class ZMQIOLoop(PollIOLoop): + """ZMQ subclass of tornado's IOLoop""" + def initialize(self, impl=None, **kwargs): + impl = ZMQPoller() if impl is None else impl + super(ZMQIOLoop, self).initialize(impl=impl, **kwargs) + + @staticmethod + def instance(): + """Returns a global `IOLoop` instance. + + Most applications have a single, global `IOLoop` running on the + main thread. Use this method to get this instance from + another thread. To get the current thread's `IOLoop`, use `current()`. + """ + # install ZMQIOLoop as the active IOLoop implementation + # when using tornado 3 + if tornado_version >= (3,): + PollIOLoop.configure(ZMQIOLoop) + return PollIOLoop.instance() + + def start(self): + try: + super(ZMQIOLoop, self).start() + except ZMQError as e: + if e.errno == ETERM: + # quietly return on ETERM + pass + else: + raise e + + +if tornado_version >= (3,0) and tornado_version < (3,1): + def backport_close(self, all_fds=False): + """backport IOLoop.close to 3.0 from 3.1 (supports fd.close() method)""" + from zmq.eventloop.minitornado.ioloop import PollIOLoop as mini_loop + return mini_loop.close.__get__(self)(all_fds) + ZMQIOLoop.close = backport_close + + +# public API name +IOLoop = ZMQIOLoop + + +def install(): + """set the tornado IOLoop instance with the pyzmq IOLoop. + + After calling this function, tornado's IOLoop.instance() and pyzmq's + IOLoop.instance() will return the same object. + + An assertion error will be raised if tornado's IOLoop has been initialized + prior to calling this function. + """ + from tornado import ioloop + # check if tornado's IOLoop is already initialized to something other + # than the pyzmq IOLoop instance: + assert (not ioloop.IOLoop.initialized()) or \ + ioloop.IOLoop.instance() is IOLoop.instance(), "tornado IOLoop already initialized" + + if tornado_version >= (3,): + # tornado 3 has an official API for registering new defaults, yay! + ioloop.IOLoop.configure(ZMQIOLoop) + else: + # we have to set the global instance explicitly + ioloop.IOLoop._instance = IOLoop.instance() + diff --git a/scripts/external_libs/zmq/eventloop/minitornado/__init__.py b/scripts/external_libs/zmq/eventloop/minitornado/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/__init__.py diff --git a/scripts/external_libs/zmq/eventloop/minitornado/concurrent.py b/scripts/external_libs/zmq/eventloop/minitornado/concurrent.py new file mode 100644 index 00000000..519b23d5 --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/concurrent.py @@ -0,0 +1,11 @@ +"""pyzmq does not ship tornado's futures, +this just raises informative NotImplementedErrors to avoid having to change too much code. +""" + +class NotImplementedFuture(object): + def __init__(self, *args, **kwargs): + raise NotImplementedError("pyzmq does not ship tornado's Futures, " + "install tornado >= 3.0 for future support." + ) + +Future = TracebackFuture = NotImplementedFuture diff --git a/scripts/external_libs/zmq/eventloop/minitornado/ioloop.py b/scripts/external_libs/zmq/eventloop/minitornado/ioloop.py new file mode 100644 index 00000000..710a3ecb --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/ioloop.py @@ -0,0 +1,829 @@ +#!/usr/bin/env python +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""An I/O event loop for non-blocking sockets. + +Typical applications will use a single `IOLoop` object, in the +`IOLoop.instance` singleton. The `IOLoop.start` method should usually +be called at the end of the ``main()`` function. Atypical applications may +use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest` +case. + +In addition to I/O events, the `IOLoop` can also schedule time-based events. +`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`. +""" + +from __future__ import absolute_import, division, print_function, with_statement + +import datetime +import errno +import functools +import heapq +import logging +import numbers +import os +import select +import sys +import threading +import time +import traceback + +from .concurrent import Future, TracebackFuture +from .log import app_log, gen_log +from . import stack_context +from .util import Configurable + +try: + import signal +except ImportError: + signal = None + +try: + import thread # py2 +except ImportError: + import _thread as thread # py3 + +from .platform.auto import set_close_exec, Waker + + +class TimeoutError(Exception): + pass + + +class IOLoop(Configurable): + """A level-triggered I/O loop. + + We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they + are available, or else we fall back on select(). If you are + implementing a system that needs to handle thousands of + simultaneous connections, you should use a system that supports + either ``epoll`` or ``kqueue``. + + Example usage for a simple TCP server:: + + import errno + import functools + import ioloop + import socket + + def connection_ready(sock, fd, events): + while True: + try: + connection, address = sock.accept() + except socket.error, e: + if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): + raise + return + connection.setblocking(0) + handle_connection(connection, address) + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.setblocking(0) + sock.bind(("", port)) + sock.listen(128) + + io_loop = ioloop.IOLoop.instance() + callback = functools.partial(connection_ready, sock) + io_loop.add_handler(sock.fileno(), callback, io_loop.READ) + io_loop.start() + + """ + # Constants from the epoll module + _EPOLLIN = 0x001 + _EPOLLPRI = 0x002 + _EPOLLOUT = 0x004 + _EPOLLERR = 0x008 + _EPOLLHUP = 0x010 + _EPOLLRDHUP = 0x2000 + _EPOLLONESHOT = (1 << 30) + _EPOLLET = (1 << 31) + + # Our events map exactly to the epoll events + NONE = 0 + READ = _EPOLLIN + WRITE = _EPOLLOUT + ERROR = _EPOLLERR | _EPOLLHUP + + # Global lock for creating global IOLoop instance + _instance_lock = threading.Lock() + + _current = threading.local() + + @staticmethod + def instance(): + """Returns a global `IOLoop` instance. + + Most applications have a single, global `IOLoop` running on the + main thread. Use this method to get this instance from + another thread. To get the current thread's `IOLoop`, use `current()`. + """ + if not hasattr(IOLoop, "_instance"): + with IOLoop._instance_lock: + if not hasattr(IOLoop, "_instance"): + # New instance after double check + IOLoop._instance = IOLoop() + return IOLoop._instance + + @staticmethod + def initialized(): + """Returns true if the singleton instance has been created.""" + return hasattr(IOLoop, "_instance") + + def install(self): + """Installs this `IOLoop` object as the singleton instance. + + This is normally not necessary as `instance()` will create + an `IOLoop` on demand, but you may want to call `install` to use + a custom subclass of `IOLoop`. + """ + assert not IOLoop.initialized() + IOLoop._instance = self + + @staticmethod + def current(): + """Returns the current thread's `IOLoop`. + + If an `IOLoop` is currently running or has been marked as current + by `make_current`, returns that instance. Otherwise returns + `IOLoop.instance()`, i.e. the main thread's `IOLoop`. + + A common pattern for classes that depend on ``IOLoops`` is to use + a default argument to enable programs with multiple ``IOLoops`` + but not require the argument for simpler applications:: + + class MyClass(object): + def __init__(self, io_loop=None): + self.io_loop = io_loop or IOLoop.current() + + In general you should use `IOLoop.current` as the default when + constructing an asynchronous object, and use `IOLoop.instance` + when you mean to communicate to the main thread from a different + one. + """ + current = getattr(IOLoop._current, "instance", None) + if current is None: + return IOLoop.instance() + return current + + def make_current(self): + """Makes this the `IOLoop` for the current thread. + + An `IOLoop` automatically becomes current for its thread + when it is started, but it is sometimes useful to call + `make_current` explictly before starting the `IOLoop`, + so that code run at startup time can find the right + instance. + """ + IOLoop._current.instance = self + + @staticmethod + def clear_current(): + IOLoop._current.instance = None + + @classmethod + def configurable_base(cls): + return IOLoop + + @classmethod + def configurable_default(cls): + # this is the only patch to IOLoop: + from zmq.eventloop.ioloop import ZMQIOLoop + return ZMQIOLoop + # the remainder of this method is unused, + # but left for preservation reasons + if hasattr(select, "epoll"): + from tornado.platform.epoll import EPollIOLoop + return EPollIOLoop + if hasattr(select, "kqueue"): + # Python 2.6+ on BSD or Mac + from tornado.platform.kqueue import KQueueIOLoop + return KQueueIOLoop + from tornado.platform.select import SelectIOLoop + return SelectIOLoop + + def initialize(self): + pass + + def close(self, all_fds=False): + """Closes the `IOLoop`, freeing any resources used. + + If ``all_fds`` is true, all file descriptors registered on the + IOLoop will be closed (not just the ones created by the + `IOLoop` itself). + + Many applications will only use a single `IOLoop` that runs for the + entire lifetime of the process. In that case closing the `IOLoop` + is not necessary since everything will be cleaned up when the + process exits. `IOLoop.close` is provided mainly for scenarios + such as unit tests, which create and destroy a large number of + ``IOLoops``. + + An `IOLoop` must be completely stopped before it can be closed. This + means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must + be allowed to return before attempting to call `IOLoop.close()`. + Therefore the call to `close` will usually appear just after + the call to `start` rather than near the call to `stop`. + + .. versionchanged:: 3.1 + If the `IOLoop` implementation supports non-integer objects + for "file descriptors", those objects will have their + ``close`` method when ``all_fds`` is true. + """ + raise NotImplementedError() + + def add_handler(self, fd, handler, events): + """Registers the given handler to receive the given events for fd. + + The ``events`` argument is a bitwise or of the constants + ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. + + When an event occurs, ``handler(fd, events)`` will be run. + """ + raise NotImplementedError() + + def update_handler(self, fd, events): + """Changes the events we listen for fd.""" + raise NotImplementedError() + + def remove_handler(self, fd): + """Stop listening for events on fd.""" + raise NotImplementedError() + + def set_blocking_signal_threshold(self, seconds, action): + """Sends a signal if the `IOLoop` is blocked for more than + ``s`` seconds. + + Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy + platform. + + The action parameter is a Python signal handler. Read the + documentation for the `signal` module for more information. + If ``action`` is None, the process will be killed if it is + blocked for too long. + """ + raise NotImplementedError() + + def set_blocking_log_threshold(self, seconds): + """Logs a stack trace if the `IOLoop` is blocked for more than + ``s`` seconds. + + Equivalent to ``set_blocking_signal_threshold(seconds, + self.log_stack)`` + """ + self.set_blocking_signal_threshold(seconds, self.log_stack) + + def log_stack(self, signal, frame): + """Signal handler to log the stack trace of the current thread. + + For use with `set_blocking_signal_threshold`. + """ + gen_log.warning('IOLoop blocked for %f seconds in\n%s', + self._blocking_signal_threshold, + ''.join(traceback.format_stack(frame))) + + def start(self): + """Starts the I/O loop. + + The loop will run until one of the callbacks calls `stop()`, which + will make the loop stop after the current event iteration completes. + """ + raise NotImplementedError() + + def stop(self): + """Stop the I/O loop. + + If the event loop is not currently running, the next call to `start()` + will return immediately. + + To use asynchronous methods from otherwise-synchronous code (such as + unit tests), you can start and stop the event loop like this:: + + ioloop = IOLoop() + async_method(ioloop=ioloop, callback=ioloop.stop) + ioloop.start() + + ``ioloop.start()`` will return after ``async_method`` has run + its callback, whether that callback was invoked before or + after ``ioloop.start``. + + Note that even after `stop` has been called, the `IOLoop` is not + completely stopped until `IOLoop.start` has also returned. + Some work that was scheduled before the call to `stop` may still + be run before the `IOLoop` shuts down. + """ + raise NotImplementedError() + + def run_sync(self, func, timeout=None): + """Starts the `IOLoop`, runs the given function, and stops the loop. + + If the function returns a `.Future`, the `IOLoop` will run + until the future is resolved. If it raises an exception, the + `IOLoop` will stop and the exception will be re-raised to the + caller. + + The keyword-only argument ``timeout`` may be used to set + a maximum duration for the function. If the timeout expires, + a `TimeoutError` is raised. + + This method is useful in conjunction with `tornado.gen.coroutine` + to allow asynchronous calls in a ``main()`` function:: + + @gen.coroutine + def main(): + # do stuff... + + if __name__ == '__main__': + IOLoop.instance().run_sync(main) + """ + future_cell = [None] + + def run(): + try: + result = func() + except Exception: + future_cell[0] = TracebackFuture() + future_cell[0].set_exc_info(sys.exc_info()) + else: + if isinstance(result, Future): + future_cell[0] = result + else: + future_cell[0] = Future() + future_cell[0].set_result(result) + self.add_future(future_cell[0], lambda future: self.stop()) + self.add_callback(run) + if timeout is not None: + timeout_handle = self.add_timeout(self.time() + timeout, self.stop) + self.start() + if timeout is not None: + self.remove_timeout(timeout_handle) + if not future_cell[0].done(): + raise TimeoutError('Operation timed out after %s seconds' % timeout) + return future_cell[0].result() + + def time(self): + """Returns the current time according to the `IOLoop`'s clock. + + The return value is a floating-point number relative to an + unspecified time in the past. + + By default, the `IOLoop`'s time function is `time.time`. However, + it may be configured to use e.g. `time.monotonic` instead. + Calls to `add_timeout` that pass a number instead of a + `datetime.timedelta` should use this function to compute the + appropriate time, so they can work no matter what time function + is chosen. + """ + return time.time() + + def add_timeout(self, deadline, callback): + """Runs the ``callback`` at the time ``deadline`` from the I/O loop. + + Returns an opaque handle that may be passed to + `remove_timeout` to cancel. + + ``deadline`` may be a number denoting a time (on the same + scale as `IOLoop.time`, normally `time.time`), or a + `datetime.timedelta` object for a deadline relative to the + current time. + + Note that it is not safe to call `add_timeout` from other threads. + Instead, you must use `add_callback` to transfer control to the + `IOLoop`'s thread, and then call `add_timeout` from there. + """ + raise NotImplementedError() + + def remove_timeout(self, timeout): + """Cancels a pending timeout. + + The argument is a handle as returned by `add_timeout`. It is + safe to call `remove_timeout` even if the callback has already + been run. + """ + raise NotImplementedError() + + def add_callback(self, callback, *args, **kwargs): + """Calls the given callback on the next I/O loop iteration. + + It is safe to call this method from any thread at any time, + except from a signal handler. Note that this is the **only** + method in `IOLoop` that makes this thread-safety guarantee; all + other interaction with the `IOLoop` must be done from that + `IOLoop`'s thread. `add_callback()` may be used to transfer + control from other threads to the `IOLoop`'s thread. + + To add a callback from a signal handler, see + `add_callback_from_signal`. + """ + raise NotImplementedError() + + def add_callback_from_signal(self, callback, *args, **kwargs): + """Calls the given callback on the next I/O loop iteration. + + Safe for use from a Python signal handler; should not be used + otherwise. + + Callbacks added with this method will be run without any + `.stack_context`, to avoid picking up the context of the function + that was interrupted by the signal. + """ + raise NotImplementedError() + + def add_future(self, future, callback): + """Schedules a callback on the ``IOLoop`` when the given + `.Future` is finished. + + The callback is invoked with one argument, the + `.Future`. + """ + assert isinstance(future, Future) + callback = stack_context.wrap(callback) + future.add_done_callback( + lambda future: self.add_callback(callback, future)) + + def _run_callback(self, callback): + """Runs a callback with error handling. + + For use in subclasses. + """ + try: + callback() + except Exception: + self.handle_callback_exception(callback) + + def handle_callback_exception(self, callback): + """This method is called whenever a callback run by the `IOLoop` + throws an exception. + + By default simply logs the exception as an error. Subclasses + may override this method to customize reporting of exceptions. + + The exception itself is not passed explicitly, but is available + in `sys.exc_info`. + """ + app_log.error("Exception in callback %r", callback, exc_info=True) + + +class PollIOLoop(IOLoop): + """Base class for IOLoops built around a select-like function. + + For concrete implementations, see `tornado.platform.epoll.EPollIOLoop` + (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or + `tornado.platform.select.SelectIOLoop` (all platforms). + """ + def initialize(self, impl, time_func=None): + super(PollIOLoop, self).initialize() + self._impl = impl + if hasattr(self._impl, 'fileno'): + set_close_exec(self._impl.fileno()) + self.time_func = time_func or time.time + self._handlers = {} + self._events = {} + self._callbacks = [] + self._callback_lock = threading.Lock() + self._timeouts = [] + self._cancellations = 0 + self._running = False + self._stopped = False + self._closing = False + self._thread_ident = None + self._blocking_signal_threshold = None + + # Create a pipe that we send bogus data to when we want to wake + # the I/O loop when it is idle + self._waker = Waker() + self.add_handler(self._waker.fileno(), + lambda fd, events: self._waker.consume(), + self.READ) + + def close(self, all_fds=False): + with self._callback_lock: + self._closing = True + self.remove_handler(self._waker.fileno()) + if all_fds: + for fd in self._handlers.keys(): + try: + close_method = getattr(fd, 'close', None) + if close_method is not None: + close_method() + else: + os.close(fd) + except Exception: + gen_log.debug("error closing fd %s", fd, exc_info=True) + self._waker.close() + self._impl.close() + + def add_handler(self, fd, handler, events): + self._handlers[fd] = stack_context.wrap(handler) + self._impl.register(fd, events | self.ERROR) + + def update_handler(self, fd, events): + self._impl.modify(fd, events | self.ERROR) + + def remove_handler(self, fd): + self._handlers.pop(fd, None) + self._events.pop(fd, None) + try: + self._impl.unregister(fd) + except Exception: + gen_log.debug("Error deleting fd from IOLoop", exc_info=True) + + def set_blocking_signal_threshold(self, seconds, action): + if not hasattr(signal, "setitimer"): + gen_log.error("set_blocking_signal_threshold requires a signal module " + "with the setitimer method") + return + self._blocking_signal_threshold = seconds + if seconds is not None: + signal.signal(signal.SIGALRM, + action if action is not None else signal.SIG_DFL) + + def start(self): + if not logging.getLogger().handlers: + # The IOLoop catches and logs exceptions, so it's + # important that log output be visible. However, python's + # default behavior for non-root loggers (prior to python + # 3.2) is to print an unhelpful "no handlers could be + # found" message rather than the actual log entry, so we + # must explicitly configure logging if we've made it this + # far without anything. + logging.basicConfig() + if self._stopped: + self._stopped = False + return + old_current = getattr(IOLoop._current, "instance", None) + IOLoop._current.instance = self + self._thread_ident = thread.get_ident() + self._running = True + + # signal.set_wakeup_fd closes a race condition in event loops: + # a signal may arrive at the beginning of select/poll/etc + # before it goes into its interruptible sleep, so the signal + # will be consumed without waking the select. The solution is + # for the (C, synchronous) signal handler to write to a pipe, + # which will then be seen by select. + # + # In python's signal handling semantics, this only matters on the + # main thread (fortunately, set_wakeup_fd only works on the main + # thread and will raise a ValueError otherwise). + # + # If someone has already set a wakeup fd, we don't want to + # disturb it. This is an issue for twisted, which does its + # SIGCHILD processing in response to its own wakeup fd being + # written to. As long as the wakeup fd is registered on the IOLoop, + # the loop will still wake up and everything should work. + old_wakeup_fd = None + if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix': + # requires python 2.6+, unix. set_wakeup_fd exists but crashes + # the python process on windows. + try: + old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno()) + if old_wakeup_fd != -1: + # Already set, restore previous value. This is a little racy, + # but there's no clean get_wakeup_fd and in real use the + # IOLoop is just started once at the beginning. + signal.set_wakeup_fd(old_wakeup_fd) + old_wakeup_fd = None + except ValueError: # non-main thread + pass + + while True: + poll_timeout = 3600.0 + + # Prevent IO event starvation by delaying new callbacks + # to the next iteration of the event loop. + with self._callback_lock: + callbacks = self._callbacks + self._callbacks = [] + for callback in callbacks: + self._run_callback(callback) + + if self._timeouts: + now = self.time() + while self._timeouts: + if self._timeouts[0].callback is None: + # the timeout was cancelled + heapq.heappop(self._timeouts) + self._cancellations -= 1 + elif self._timeouts[0].deadline <= now: + timeout = heapq.heappop(self._timeouts) + self._run_callback(timeout.callback) + else: + seconds = self._timeouts[0].deadline - now + poll_timeout = min(seconds, poll_timeout) + break + if (self._cancellations > 512 + and self._cancellations > (len(self._timeouts) >> 1)): + # Clean up the timeout queue when it gets large and it's + # more than half cancellations. + self._cancellations = 0 + self._timeouts = [x for x in self._timeouts + if x.callback is not None] + heapq.heapify(self._timeouts) + + if self._callbacks: + # If any callbacks or timeouts called add_callback, + # we don't want to wait in poll() before we run them. + poll_timeout = 0.0 + + if not self._running: + break + + if self._blocking_signal_threshold is not None: + # clear alarm so it doesn't fire while poll is waiting for + # events. + signal.setitimer(signal.ITIMER_REAL, 0, 0) + + try: + event_pairs = self._impl.poll(poll_timeout) + except Exception as e: + # Depending on python version and IOLoop implementation, + # different exception types may be thrown and there are + # two ways EINTR might be signaled: + # * e.errno == errno.EINTR + # * e.args is like (errno.EINTR, 'Interrupted system call') + if (getattr(e, 'errno', None) == errno.EINTR or + (isinstance(getattr(e, 'args', None), tuple) and + len(e.args) == 2 and e.args[0] == errno.EINTR)): + continue + else: + raise + + if self._blocking_signal_threshold is not None: + signal.setitimer(signal.ITIMER_REAL, + self._blocking_signal_threshold, 0) + + # Pop one fd at a time from the set of pending fds and run + # its handler. Since that handler may perform actions on + # other file descriptors, there may be reentrant calls to + # this IOLoop that update self._events + self._events.update(event_pairs) + while self._events: + fd, events = self._events.popitem() + try: + self._handlers[fd](fd, events) + except (OSError, IOError) as e: + if e.args[0] == errno.EPIPE: + # Happens when the client closes the connection + pass + else: + app_log.error("Exception in I/O handler for fd %s", + fd, exc_info=True) + except Exception: + app_log.error("Exception in I/O handler for fd %s", + fd, exc_info=True) + # reset the stopped flag so another start/stop pair can be issued + self._stopped = False + if self._blocking_signal_threshold is not None: + signal.setitimer(signal.ITIMER_REAL, 0, 0) + IOLoop._current.instance = old_current + if old_wakeup_fd is not None: + signal.set_wakeup_fd(old_wakeup_fd) + + def stop(self): + self._running = False + self._stopped = True + self._waker.wake() + + def time(self): + return self.time_func() + + def add_timeout(self, deadline, callback): + timeout = _Timeout(deadline, stack_context.wrap(callback), self) + heapq.heappush(self._timeouts, timeout) + return timeout + + def remove_timeout(self, timeout): + # Removing from a heap is complicated, so just leave the defunct + # timeout object in the queue (see discussion in + # http://docs.python.org/library/heapq.html). + # If this turns out to be a problem, we could add a garbage + # collection pass whenever there are too many dead timeouts. + timeout.callback = None + self._cancellations += 1 + + def add_callback(self, callback, *args, **kwargs): + with self._callback_lock: + if self._closing: + raise RuntimeError("IOLoop is closing") + list_empty = not self._callbacks + self._callbacks.append(functools.partial( + stack_context.wrap(callback), *args, **kwargs)) + if list_empty and thread.get_ident() != self._thread_ident: + # If we're in the IOLoop's thread, we know it's not currently + # polling. If we're not, and we added the first callback to an + # empty list, we may need to wake it up (it may wake up on its + # own, but an occasional extra wake is harmless). Waking + # up a polling IOLoop is relatively expensive, so we try to + # avoid it when we can. + self._waker.wake() + + def add_callback_from_signal(self, callback, *args, **kwargs): + with stack_context.NullContext(): + if thread.get_ident() != self._thread_ident: + # if the signal is handled on another thread, we can add + # it normally (modulo the NullContext) + self.add_callback(callback, *args, **kwargs) + else: + # If we're on the IOLoop's thread, we cannot use + # the regular add_callback because it may deadlock on + # _callback_lock. Blindly insert into self._callbacks. + # This is safe because the GIL makes list.append atomic. + # One subtlety is that if the signal interrupted the + # _callback_lock block in IOLoop.start, we may modify + # either the old or new version of self._callbacks, + # but either way will work. + self._callbacks.append(functools.partial( + stack_context.wrap(callback), *args, **kwargs)) + + +class _Timeout(object): + """An IOLoop timeout, a UNIX timestamp and a callback""" + + # Reduce memory overhead when there are lots of pending callbacks + __slots__ = ['deadline', 'callback'] + + def __init__(self, deadline, callback, io_loop): + if isinstance(deadline, numbers.Real): + self.deadline = deadline + elif isinstance(deadline, datetime.timedelta): + self.deadline = io_loop.time() + _Timeout.timedelta_to_seconds(deadline) + else: + raise TypeError("Unsupported deadline %r" % deadline) + self.callback = callback + + @staticmethod + def timedelta_to_seconds(td): + """Equivalent to td.total_seconds() (introduced in python 2.7).""" + return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) + + # Comparison methods to sort by deadline, with object id as a tiebreaker + # to guarantee a consistent ordering. The heapq module uses __le__ + # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons + # use __lt__). + def __lt__(self, other): + return ((self.deadline, id(self)) < + (other.deadline, id(other))) + + def __le__(self, other): + return ((self.deadline, id(self)) <= + (other.deadline, id(other))) + + +class PeriodicCallback(object): + """Schedules the given callback to be called periodically. + + The callback is called every ``callback_time`` milliseconds. + + `start` must be called after the `PeriodicCallback` is created. + """ + def __init__(self, callback, callback_time, io_loop=None): + self.callback = callback + if callback_time <= 0: + raise ValueError("Periodic callback must have a positive callback_time") + self.callback_time = callback_time + self.io_loop = io_loop or IOLoop.current() + self._running = False + self._timeout = None + + def start(self): + """Starts the timer.""" + self._running = True + self._next_timeout = self.io_loop.time() + self._schedule_next() + + def stop(self): + """Stops the timer.""" + self._running = False + if self._timeout is not None: + self.io_loop.remove_timeout(self._timeout) + self._timeout = None + + def _run(self): + if not self._running: + return + try: + self.callback() + except Exception: + app_log.error("Error in periodic callback", exc_info=True) + self._schedule_next() + + def _schedule_next(self): + if self._running: + current_time = self.io_loop.time() + while self._next_timeout <= current_time: + self._next_timeout += self.callback_time / 1000.0 + self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run) diff --git a/scripts/external_libs/zmq/eventloop/minitornado/log.py b/scripts/external_libs/zmq/eventloop/minitornado/log.py new file mode 100644 index 00000000..49051e89 --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/log.py @@ -0,0 +1,6 @@ +"""minimal subset of tornado.log for zmq.eventloop.minitornado""" + +import logging + +app_log = logging.getLogger("tornado.application") +gen_log = logging.getLogger("tornado.general") diff --git a/scripts/external_libs/zmq/eventloop/minitornado/platform/__init__.py b/scripts/external_libs/zmq/eventloop/minitornado/platform/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/platform/__init__.py diff --git a/scripts/external_libs/zmq/eventloop/minitornado/platform/auto.py b/scripts/external_libs/zmq/eventloop/minitornado/platform/auto.py new file mode 100644 index 00000000..b40ccd94 --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/platform/auto.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# +# Copyright 2011 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Implementation of platform-specific functionality. + +For each function or class described in `tornado.platform.interface`, +the appropriate platform-specific implementation exists in this module. +Most code that needs access to this functionality should do e.g.:: + + from tornado.platform.auto import set_close_exec +""" + +from __future__ import absolute_import, division, print_function, with_statement + +import os + +if os.name == 'nt': + from .common import Waker + from .windows import set_close_exec +else: + from .posix import set_close_exec, Waker + +try: + # monotime monkey-patches the time module to have a monotonic function + # in versions of python before 3.3. + import monotime +except ImportError: + pass +try: + from time import monotonic as monotonic_time +except ImportError: + monotonic_time = None diff --git a/scripts/external_libs/zmq/eventloop/minitornado/platform/common.py b/scripts/external_libs/zmq/eventloop/minitornado/platform/common.py new file mode 100644 index 00000000..2d75dc1e --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/platform/common.py @@ -0,0 +1,91 @@ +"""Lowest-common-denominator implementations of platform functionality.""" +from __future__ import absolute_import, division, print_function, with_statement + +import errno +import socket + +from . import interface + + +class Waker(interface.Waker): + """Create an OS independent asynchronous pipe. + + For use on platforms that don't have os.pipe() (or where pipes cannot + be passed to select()), but do have sockets. This includes Windows + and Jython. + """ + def __init__(self): + # Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py + + self.writer = socket.socket() + # Disable buffering -- pulling the trigger sends 1 byte, + # and we want that sent immediately, to wake up ASAP. + self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + + count = 0 + while 1: + count += 1 + # Bind to a local port; for efficiency, let the OS pick + # a free port for us. + # Unfortunately, stress tests showed that we may not + # be able to connect to that port ("Address already in + # use") despite that the OS picked it. This appears + # to be a race bug in the Windows socket implementation. + # So we loop until a connect() succeeds (almost always + # on the first try). See the long thread at + # http://mail.zope.org/pipermail/zope/2005-July/160433.html + # for hideous details. + a = socket.socket() + a.bind(("127.0.0.1", 0)) + a.listen(1) + connect_address = a.getsockname() # assigned (host, port) pair + try: + self.writer.connect(connect_address) + break # success + except socket.error as detail: + if (not hasattr(errno, 'WSAEADDRINUSE') or + detail[0] != errno.WSAEADDRINUSE): + # "Address already in use" is the only error + # I've seen on two WinXP Pro SP2 boxes, under + # Pythons 2.3.5 and 2.4.1. + raise + # (10048, 'Address already in use') + # assert count <= 2 # never triggered in Tim's tests + if count >= 10: # I've never seen it go above 2 + a.close() + self.writer.close() + raise socket.error("Cannot bind trigger!") + # Close `a` and try again. Note: I originally put a short + # sleep() here, but it didn't appear to help or hurt. + a.close() + + self.reader, addr = a.accept() + self.reader.setblocking(0) + self.writer.setblocking(0) + a.close() + self.reader_fd = self.reader.fileno() + + def fileno(self): + return self.reader.fileno() + + def write_fileno(self): + return self.writer.fileno() + + def wake(self): + try: + self.writer.send(b"x") + except (IOError, socket.error): + pass + + def consume(self): + try: + while True: + result = self.reader.recv(1024) + if not result: + break + except (IOError, socket.error): + pass + + def close(self): + self.reader.close() + self.writer.close() diff --git a/scripts/external_libs/zmq/eventloop/minitornado/platform/interface.py b/scripts/external_libs/zmq/eventloop/minitornado/platform/interface.py new file mode 100644 index 00000000..07da6bab --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/platform/interface.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# +# Copyright 2011 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Interfaces for platform-specific functionality. + +This module exists primarily for documentation purposes and as base classes +for other tornado.platform modules. Most code should import the appropriate +implementation from `tornado.platform.auto`. +""" + +from __future__ import absolute_import, division, print_function, with_statement + + +def set_close_exec(fd): + """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor.""" + raise NotImplementedError() + + +class Waker(object): + """A socket-like object that can wake another thread from ``select()``. + + The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to + its ``select`` (or ``epoll`` or ``kqueue``) calls. When another + thread wants to wake up the loop, it calls `wake`. Once it has woken + up, it will call `consume` to do any necessary per-wake cleanup. When + the ``IOLoop`` is closed, it closes its waker too. + """ + def fileno(self): + """Returns the read file descriptor for this waker. + + Must be suitable for use with ``select()`` or equivalent on the + local platform. + """ + raise NotImplementedError() + + def write_fileno(self): + """Returns the write file descriptor for this waker.""" + raise NotImplementedError() + + def wake(self): + """Triggers activity on the waker's file descriptor.""" + raise NotImplementedError() + + def consume(self): + """Called after the listen has woken up to do any necessary cleanup.""" + raise NotImplementedError() + + def close(self): + """Closes the waker's file descriptor(s).""" + raise NotImplementedError() diff --git a/scripts/external_libs/zmq/eventloop/minitornado/platform/posix.py b/scripts/external_libs/zmq/eventloop/minitornado/platform/posix.py new file mode 100644 index 00000000..ccffbb66 --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/platform/posix.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# +# Copyright 2011 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Posix implementations of platform-specific functionality.""" + +from __future__ import absolute_import, division, print_function, with_statement + +import fcntl +import os + +from . import interface + + +def set_close_exec(fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFD) + fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) + + +def _set_nonblocking(fd): + flags = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) + + +class Waker(interface.Waker): + def __init__(self): + r, w = os.pipe() + _set_nonblocking(r) + _set_nonblocking(w) + set_close_exec(r) + set_close_exec(w) + self.reader = os.fdopen(r, "rb", 0) + self.writer = os.fdopen(w, "wb", 0) + + def fileno(self): + return self.reader.fileno() + + def write_fileno(self): + return self.writer.fileno() + + def wake(self): + try: + self.writer.write(b"x") + except IOError: + pass + + def consume(self): + try: + while True: + result = self.reader.read() + if not result: + break + except IOError: + pass + + def close(self): + self.reader.close() + self.writer.close() diff --git a/scripts/external_libs/zmq/eventloop/minitornado/platform/windows.py b/scripts/external_libs/zmq/eventloop/minitornado/platform/windows.py new file mode 100644 index 00000000..817bdca1 --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/platform/windows.py @@ -0,0 +1,20 @@ +# NOTE: win32 support is currently experimental, and not recommended +# for production use. + + +from __future__ import absolute_import, division, print_function, with_statement +import ctypes +import ctypes.wintypes + +# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx +SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation +SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) +SetHandleInformation.restype = ctypes.wintypes.BOOL + +HANDLE_FLAG_INHERIT = 0x00000001 + + +def set_close_exec(fd): + success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0) + if not success: + raise ctypes.GetLastError() diff --git a/scripts/external_libs/zmq/eventloop/minitornado/stack_context.py b/scripts/external_libs/zmq/eventloop/minitornado/stack_context.py new file mode 100644 index 00000000..226d8042 --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/stack_context.py @@ -0,0 +1,376 @@ +#!/usr/bin/env python +# +# Copyright 2010 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""`StackContext` allows applications to maintain threadlocal-like state +that follows execution as it moves to other execution contexts. + +The motivating examples are to eliminate the need for explicit +``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to +allow some additional context to be kept for logging. + +This is slightly magic, but it's an extension of the idea that an +exception handler is a kind of stack-local state and when that stack +is suspended and resumed in a new context that state needs to be +preserved. `StackContext` shifts the burden of restoring that state +from each call site (e.g. wrapping each `.AsyncHTTPClient` callback +in ``async_callback``) to the mechanisms that transfer control from +one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`, +thread pools, etc). + +Example usage:: + + @contextlib.contextmanager + def die_on_error(): + try: + yield + except Exception: + logging.error("exception in asynchronous operation",exc_info=True) + sys.exit(1) + + with StackContext(die_on_error): + # Any exception thrown here *or in callback and its desendents* + # will cause the process to exit instead of spinning endlessly + # in the ioloop. + http_client.fetch(url, callback) + ioloop.start() + +Most applications shouln't have to work with `StackContext` directly. +Here are a few rules of thumb for when it's necessary: + +* If you're writing an asynchronous library that doesn't rely on a + stack_context-aware library like `tornado.ioloop` or `tornado.iostream` + (for example, if you're writing a thread pool), use + `.stack_context.wrap()` before any asynchronous operations to capture the + stack context from where the operation was started. + +* If you're writing an asynchronous library that has some shared + resources (such as a connection pool), create those shared resources + within a ``with stack_context.NullContext():`` block. This will prevent + ``StackContexts`` from leaking from one request to another. + +* If you want to write something like an exception handler that will + persist across asynchronous calls, create a new `StackContext` (or + `ExceptionStackContext`), and make your asynchronous calls in a ``with`` + block that references your `StackContext`. +""" + +from __future__ import absolute_import, division, print_function, with_statement + +import sys +import threading + +from .util import raise_exc_info + + +class StackContextInconsistentError(Exception): + pass + + +class _State(threading.local): + def __init__(self): + self.contexts = (tuple(), None) +_state = _State() + + +class StackContext(object): + """Establishes the given context as a StackContext that will be transferred. + + Note that the parameter is a callable that returns a context + manager, not the context itself. That is, where for a + non-transferable context manager you would say:: + + with my_context(): + + StackContext takes the function itself rather than its result:: + + with StackContext(my_context): + + The result of ``with StackContext() as cb:`` is a deactivation + callback. Run this callback when the StackContext is no longer + needed to ensure that it is not propagated any further (note that + deactivating a context does not affect any instances of that + context that are currently pending). This is an advanced feature + and not necessary in most applications. + """ + def __init__(self, context_factory): + self.context_factory = context_factory + self.contexts = [] + self.active = True + + def _deactivate(self): + self.active = False + + # StackContext protocol + def enter(self): + context = self.context_factory() + self.contexts.append(context) + context.__enter__() + + def exit(self, type, value, traceback): + context = self.contexts.pop() + context.__exit__(type, value, traceback) + + # Note that some of this code is duplicated in ExceptionStackContext + # below. ExceptionStackContext is more common and doesn't need + # the full generality of this class. + def __enter__(self): + self.old_contexts = _state.contexts + self.new_contexts = (self.old_contexts[0] + (self,), self) + _state.contexts = self.new_contexts + + try: + self.enter() + except: + _state.contexts = self.old_contexts + raise + + return self._deactivate + + def __exit__(self, type, value, traceback): + try: + self.exit(type, value, traceback) + finally: + final_contexts = _state.contexts + _state.contexts = self.old_contexts + + # Generator coroutines and with-statements with non-local + # effects interact badly. Check here for signs of + # the stack getting out of sync. + # Note that this check comes after restoring _state.context + # so that if it fails things are left in a (relatively) + # consistent state. + if final_contexts is not self.new_contexts: + raise StackContextInconsistentError( + 'stack_context inconsistency (may be caused by yield ' + 'within a "with StackContext" block)') + + # Break up a reference to itself to allow for faster GC on CPython. + self.new_contexts = None + + +class ExceptionStackContext(object): + """Specialization of StackContext for exception handling. + + The supplied ``exception_handler`` function will be called in the + event of an uncaught exception in this context. The semantics are + similar to a try/finally clause, and intended use cases are to log + an error, close a socket, or similar cleanup actions. The + ``exc_info`` triple ``(type, value, traceback)`` will be passed to the + exception_handler function. + + If the exception handler returns true, the exception will be + consumed and will not be propagated to other exception handlers. + """ + def __init__(self, exception_handler): + self.exception_handler = exception_handler + self.active = True + + def _deactivate(self): + self.active = False + + def exit(self, type, value, traceback): + if type is not None: + return self.exception_handler(type, value, traceback) + + def __enter__(self): + self.old_contexts = _state.contexts + self.new_contexts = (self.old_contexts[0], self) + _state.contexts = self.new_contexts + + return self._deactivate + + def __exit__(self, type, value, traceback): + try: + if type is not None: + return self.exception_handler(type, value, traceback) + finally: + final_contexts = _state.contexts + _state.contexts = self.old_contexts + + if final_contexts is not self.new_contexts: + raise StackContextInconsistentError( + 'stack_context inconsistency (may be caused by yield ' + 'within a "with StackContext" block)') + + # Break up a reference to itself to allow for faster GC on CPython. + self.new_contexts = None + + +class NullContext(object): + """Resets the `StackContext`. + + Useful when creating a shared resource on demand (e.g. an + `.AsyncHTTPClient`) where the stack that caused the creating is + not relevant to future operations. + """ + def __enter__(self): + self.old_contexts = _state.contexts + _state.contexts = (tuple(), None) + + def __exit__(self, type, value, traceback): + _state.contexts = self.old_contexts + + +def _remove_deactivated(contexts): + """Remove deactivated handlers from the chain""" + # Clean ctx handlers + stack_contexts = tuple([h for h in contexts[0] if h.active]) + + # Find new head + head = contexts[1] + while head is not None and not head.active: + head = head.old_contexts[1] + + # Process chain + ctx = head + while ctx is not None: + parent = ctx.old_contexts[1] + + while parent is not None: + if parent.active: + break + ctx.old_contexts = parent.old_contexts + parent = parent.old_contexts[1] + + ctx = parent + + return (stack_contexts, head) + + +def wrap(fn): + """Returns a callable object that will restore the current `StackContext` + when executed. + + Use this whenever saving a callback to be executed later in a + different execution context (either in a different thread or + asynchronously in the same thread). + """ + # Check if function is already wrapped + if fn is None or hasattr(fn, '_wrapped'): + return fn + + # Capture current stack head + # TODO: Any other better way to store contexts and update them in wrapped function? + cap_contexts = [_state.contexts] + + def wrapped(*args, **kwargs): + ret = None + try: + # Capture old state + current_state = _state.contexts + + # Remove deactivated items + cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0]) + + # Force new state + _state.contexts = contexts + + # Current exception + exc = (None, None, None) + top = None + + # Apply stack contexts + last_ctx = 0 + stack = contexts[0] + + # Apply state + for n in stack: + try: + n.enter() + last_ctx += 1 + except: + # Exception happened. Record exception info and store top-most handler + exc = sys.exc_info() + top = n.old_contexts[1] + + # Execute callback if no exception happened while restoring state + if top is None: + try: + ret = fn(*args, **kwargs) + except: + exc = sys.exc_info() + top = contexts[1] + + # If there was exception, try to handle it by going through the exception chain + if top is not None: + exc = _handle_exception(top, exc) + else: + # Otherwise take shorter path and run stack contexts in reverse order + while last_ctx > 0: + last_ctx -= 1 + c = stack[last_ctx] + + try: + c.exit(*exc) + except: + exc = sys.exc_info() + top = c.old_contexts[1] + break + else: + top = None + + # If if exception happened while unrolling, take longer exception handler path + if top is not None: + exc = _handle_exception(top, exc) + + # If exception was not handled, raise it + if exc != (None, None, None): + raise_exc_info(exc) + finally: + _state.contexts = current_state + return ret + + wrapped._wrapped = True + return wrapped + + +def _handle_exception(tail, exc): + while tail is not None: + try: + if tail.exit(*exc): + exc = (None, None, None) + except: + exc = sys.exc_info() + + tail = tail.old_contexts[1] + + return exc + + +def run_with_stack_context(context, func): + """Run a coroutine ``func`` in the given `StackContext`. + + It is not safe to have a ``yield`` statement within a ``with StackContext`` + block, so it is difficult to use stack context with `.gen.coroutine`. + This helper function runs the function in the correct context while + keeping the ``yield`` and ``with`` statements syntactically separate. + + Example:: + + @gen.coroutine + def incorrect(): + with StackContext(ctx): + # ERROR: this will raise StackContextInconsistentError + yield other_coroutine() + + @gen.coroutine + def correct(): + yield run_with_stack_context(StackContext(ctx), other_coroutine) + + .. versionadded:: 3.1 + """ + with context: + return func() diff --git a/scripts/external_libs/zmq/eventloop/minitornado/util.py b/scripts/external_libs/zmq/eventloop/minitornado/util.py new file mode 100644 index 00000000..c1e2eb95 --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/minitornado/util.py @@ -0,0 +1,184 @@ +"""Miscellaneous utility functions and classes. + +This module is used internally by Tornado. It is not necessarily expected +that the functions and classes defined here will be useful to other +applications, but they are documented here in case they are. + +The one public-facing part of this module is the `Configurable` class +and its `~Configurable.configure` method, which becomes a part of the +interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`, +and `.Resolver`. +""" + +from __future__ import absolute_import, division, print_function, with_statement + +import sys + + +def import_object(name): + """Imports an object by name. + + import_object('x') is equivalent to 'import x'. + import_object('x.y.z') is equivalent to 'from x.y import z'. + + >>> import tornado.escape + >>> import_object('tornado.escape') is tornado.escape + True + >>> import_object('tornado.escape.utf8') is tornado.escape.utf8 + True + >>> import_object('tornado') is tornado + True + >>> import_object('tornado.missing_module') + Traceback (most recent call last): + ... + ImportError: No module named missing_module + """ + if name.count('.') == 0: + return __import__(name, None, None) + + parts = name.split('.') + obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0) + try: + return getattr(obj, parts[-1]) + except AttributeError: + raise ImportError("No module named %s" % parts[-1]) + + +# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for +# literal strings, and alternative solutions like "from __future__ import +# unicode_literals" have other problems (see PEP 414). u() can be applied +# to ascii strings that include \u escapes (but they must not contain +# literal non-ascii characters). +if type('') is not type(b''): + def u(s): + return s + bytes_type = bytes + unicode_type = str + basestring_type = str +else: + def u(s): + return s.decode('unicode_escape') + bytes_type = str + unicode_type = unicode + basestring_type = basestring + + +if sys.version_info > (3,): + exec(""" +def raise_exc_info(exc_info): + raise exc_info[1].with_traceback(exc_info[2]) + +def exec_in(code, glob, loc=None): + if isinstance(code, str): + code = compile(code, '<string>', 'exec', dont_inherit=True) + exec(code, glob, loc) +""") +else: + exec(""" +def raise_exc_info(exc_info): + raise exc_info[0], exc_info[1], exc_info[2] + +def exec_in(code, glob, loc=None): + if isinstance(code, basestring): + # exec(string) inherits the caller's future imports; compile + # the string first to prevent that. + code = compile(code, '<string>', 'exec', dont_inherit=True) + exec code in glob, loc +""") + + +class Configurable(object): + """Base class for configurable interfaces. + + A configurable interface is an (abstract) class whose constructor + acts as a factory function for one of its implementation subclasses. + The implementation subclass as well as optional keyword arguments to + its initializer can be set globally at runtime with `configure`. + + By using the constructor as the factory method, the interface + looks like a normal class, `isinstance` works as usual, etc. This + pattern is most useful when the choice of implementation is likely + to be a global decision (e.g. when `~select.epoll` is available, + always use it instead of `~select.select`), or when a + previously-monolithic class has been split into specialized + subclasses. + + Configurable subclasses must define the class methods + `configurable_base` and `configurable_default`, and use the instance + method `initialize` instead of ``__init__``. + """ + __impl_class = None + __impl_kwargs = None + + def __new__(cls, **kwargs): + base = cls.configurable_base() + args = {} + if cls is base: + impl = cls.configured_class() + if base.__impl_kwargs: + args.update(base.__impl_kwargs) + else: + impl = cls + args.update(kwargs) + instance = super(Configurable, cls).__new__(impl) + # initialize vs __init__ chosen for compatiblity with AsyncHTTPClient + # singleton magic. If we get rid of that we can switch to __init__ + # here too. + instance.initialize(**args) + return instance + + @classmethod + def configurable_base(cls): + """Returns the base class of a configurable hierarchy. + + This will normally return the class in which it is defined. + (which is *not* necessarily the same as the cls classmethod parameter). + """ + raise NotImplementedError() + + @classmethod + def configurable_default(cls): + """Returns the implementation class to be used if none is configured.""" + raise NotImplementedError() + + def initialize(self): + """Initialize a `Configurable` subclass instance. + + Configurable classes should use `initialize` instead of ``__init__``. + """ + + @classmethod + def configure(cls, impl, **kwargs): + """Sets the class to use when the base class is instantiated. + + Keyword arguments will be saved and added to the arguments passed + to the constructor. This can be used to set global defaults for + some parameters. + """ + base = cls.configurable_base() + if isinstance(impl, (unicode_type, bytes_type)): + impl = import_object(impl) + if impl is not None and not issubclass(impl, cls): + raise ValueError("Invalid subclass of %s" % cls) + base.__impl_class = impl + base.__impl_kwargs = kwargs + + @classmethod + def configured_class(cls): + """Returns the currently configured class.""" + base = cls.configurable_base() + if cls.__impl_class is None: + base.__impl_class = cls.configurable_default() + return base.__impl_class + + @classmethod + def _save_configuration(cls): + base = cls.configurable_base() + return (base.__impl_class, base.__impl_kwargs) + + @classmethod + def _restore_configuration(cls, saved): + base = cls.configurable_base() + base.__impl_class = saved[0] + base.__impl_kwargs = saved[1] + diff --git a/scripts/external_libs/zmq/eventloop/zmqstream.py b/scripts/external_libs/zmq/eventloop/zmqstream.py new file mode 100644 index 00000000..86a97e44 --- /dev/null +++ b/scripts/external_libs/zmq/eventloop/zmqstream.py @@ -0,0 +1,529 @@ +# +# Copyright 2009 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A utility class to send to and recv from a non-blocking socket.""" + +from __future__ import with_statement + +import sys + +import zmq +from zmq.utils import jsonapi + +try: + import cPickle as pickle +except ImportError: + import pickle + +from .ioloop import IOLoop + +try: + # gen_log will only import from >= 3.0 + from tornado.log import gen_log + from tornado import stack_context +except ImportError: + from .minitornado.log import gen_log + from .minitornado import stack_context + +try: + from queue import Queue +except ImportError: + from Queue import Queue + +from zmq.utils.strtypes import bytes, unicode, basestring + +try: + callable +except NameError: + callable = lambda obj: hasattr(obj, '__call__') + + +class ZMQStream(object): + """A utility class to register callbacks when a zmq socket sends and receives + + For use with zmq.eventloop.ioloop + + There are three main methods + + Methods: + + * **on_recv(callback, copy=True):** + register a callback to be run every time the socket has something to receive + * **on_send(callback):** + register a callback to be run every time you call send + * **send(self, msg, flags=0, copy=False, callback=None):** + perform a send that will trigger the callback + if callback is passed, on_send is also called. + + There are also send_multipart(), send_json(), send_pyobj() + + Three other methods for deactivating the callbacks: + + * **stop_on_recv():** + turn off the recv callback + * **stop_on_send():** + turn off the send callback + + which simply call ``on_<evt>(None)``. + + The entire socket interface, excluding direct recv methods, is also + provided, primarily through direct-linking the methods. + e.g. + + >>> stream.bind is stream.socket.bind + True + + """ + + socket = None + io_loop = None + poller = None + + def __init__(self, socket, io_loop=None): + self.socket = socket + self.io_loop = io_loop or IOLoop.instance() + self.poller = zmq.Poller() + + self._send_queue = Queue() + self._recv_callback = None + self._send_callback = None + self._close_callback = None + self._recv_copy = False + self._flushed = False + + self._state = self.io_loop.ERROR + self._init_io_state() + + # shortcircuit some socket methods + self.bind = self.socket.bind + self.bind_to_random_port = self.socket.bind_to_random_port + self.connect = self.socket.connect + self.setsockopt = self.socket.setsockopt + self.getsockopt = self.socket.getsockopt + self.setsockopt_string = self.socket.setsockopt_string + self.getsockopt_string = self.socket.getsockopt_string + self.setsockopt_unicode = self.socket.setsockopt_unicode + self.getsockopt_unicode = self.socket.getsockopt_unicode + + + def stop_on_recv(self): + """Disable callback and automatic receiving.""" + return self.on_recv(None) + + def stop_on_send(self): + """Disable callback on sending.""" + return self.on_send(None) + + def stop_on_err(self): + """DEPRECATED, does nothing""" + gen_log.warn("on_err does nothing, and will be removed") + + def on_err(self, callback): + """DEPRECATED, does nothing""" + gen_log.warn("on_err does nothing, and will be removed") + + def on_recv(self, callback, copy=True): + """Register a callback for when a message is ready to recv. + + There can be only one callback registered at a time, so each + call to `on_recv` replaces previously registered callbacks. + + on_recv(None) disables recv event polling. + + Use on_recv_stream(callback) instead, to register a callback that will receive + both this ZMQStream and the message, instead of just the message. + + Parameters + ---------- + + callback : callable + callback must take exactly one argument, which will be a + list, as returned by socket.recv_multipart() + if callback is None, recv callbacks are disabled. + copy : bool + copy is passed directly to recv, so if copy is False, + callback will receive Message objects. If copy is True, + then callback will receive bytes/str objects. + + Returns : None + """ + + self._check_closed() + assert callback is None or callable(callback) + self._recv_callback = stack_context.wrap(callback) + self._recv_copy = copy + if callback is None: + self._drop_io_state(self.io_loop.READ) + else: + self._add_io_state(self.io_loop.READ) + + def on_recv_stream(self, callback, copy=True): + """Same as on_recv, but callback will get this stream as first argument + + callback must take exactly two arguments, as it will be called as:: + + callback(stream, msg) + + Useful when a single callback should be used with multiple streams. + """ + if callback is None: + self.stop_on_recv() + else: + self.on_recv(lambda msg: callback(self, msg), copy=copy) + + def on_send(self, callback): + """Register a callback to be called on each send + + There will be two arguments:: + + callback(msg, status) + + * `msg` will be the list of sendable objects that was just sent + * `status` will be the return result of socket.send_multipart(msg) - + MessageTracker or None. + + Non-copying sends return a MessageTracker object whose + `done` attribute will be True when the send is complete. + This allows users to track when an object is safe to write to + again. + + The second argument will always be None if copy=True + on the send. + + Use on_send_stream(callback) to register a callback that will be passed + this ZMQStream as the first argument, in addition to the other two. + + on_send(None) disables recv event polling. + + Parameters + ---------- + + callback : callable + callback must take exactly two arguments, which will be + the message being sent (always a list), + and the return result of socket.send_multipart(msg) - + MessageTracker or None. + + if callback is None, send callbacks are disabled. + """ + + self._check_closed() + assert callback is None or callable(callback) + self._send_callback = stack_context.wrap(callback) + + + def on_send_stream(self, callback): + """Same as on_send, but callback will get this stream as first argument + + Callback will be passed three arguments:: + + callback(stream, msg, status) + + Useful when a single callback should be used with multiple streams. + """ + if callback is None: + self.stop_on_send() + else: + self.on_send(lambda msg, status: callback(self, msg, status)) + + + def send(self, msg, flags=0, copy=True, track=False, callback=None): + """Send a message, optionally also register a new callback for sends. + See zmq.socket.send for details. + """ + return self.send_multipart([msg], flags=flags, copy=copy, track=track, callback=callback) + + def send_multipart(self, msg, flags=0, copy=True, track=False, callback=None): + """Send a multipart message, optionally also register a new callback for sends. + See zmq.socket.send_multipart for details. + """ + kwargs = dict(flags=flags, copy=copy, track=track) + self._send_queue.put((msg, kwargs)) + callback = callback or self._send_callback + if callback is not None: + self.on_send(callback) + else: + # noop callback + self.on_send(lambda *args: None) + self._add_io_state(self.io_loop.WRITE) + + def send_string(self, u, flags=0, encoding='utf-8', callback=None): + """Send a unicode message with an encoding. + See zmq.socket.send_unicode for details. + """ + if not isinstance(u, basestring): + raise TypeError("unicode/str objects only") + return self.send(u.encode(encoding), flags=flags, callback=callback) + + send_unicode = send_string + + def send_json(self, obj, flags=0, callback=None): + """Send json-serialized version of an object. + See zmq.socket.send_json for details. + """ + if jsonapi is None: + raise ImportError('jsonlib{1,2}, json or simplejson library is required.') + else: + msg = jsonapi.dumps(obj) + return self.send(msg, flags=flags, callback=callback) + + def send_pyobj(self, obj, flags=0, protocol=-1, callback=None): + """Send a Python object as a message using pickle to serialize. + + See zmq.socket.send_json for details. + """ + msg = pickle.dumps(obj, protocol) + return self.send(msg, flags, callback=callback) + + def _finish_flush(self): + """callback for unsetting _flushed flag.""" + self._flushed = False + + def flush(self, flag=zmq.POLLIN|zmq.POLLOUT, limit=None): + """Flush pending messages. + + This method safely handles all pending incoming and/or outgoing messages, + bypassing the inner loop, passing them to the registered callbacks. + + A limit can be specified, to prevent blocking under high load. + + flush will return the first time ANY of these conditions are met: + * No more events matching the flag are pending. + * the total number of events handled reaches the limit. + + Note that if ``flag|POLLIN != 0``, recv events will be flushed even if no callback + is registered, unlike normal IOLoop operation. This allows flush to be + used to remove *and ignore* incoming messages. + + Parameters + ---------- + flag : int, default=POLLIN|POLLOUT + 0MQ poll flags. + If flag|POLLIN, recv events will be flushed. + If flag|POLLOUT, send events will be flushed. + Both flags can be set at once, which is the default. + limit : None or int, optional + The maximum number of messages to send or receive. + Both send and recv count against this limit. + + Returns + ------- + int : count of events handled (both send and recv) + """ + self._check_closed() + # unset self._flushed, so callbacks will execute, in case flush has + # already been called this iteration + already_flushed = self._flushed + self._flushed = False + # initialize counters + count = 0 + def update_flag(): + """Update the poll flag, to prevent registering POLLOUT events + if we don't have pending sends.""" + return flag & zmq.POLLIN | (self.sending() and flag & zmq.POLLOUT) + flag = update_flag() + if not flag: + # nothing to do + return 0 + self.poller.register(self.socket, flag) + events = self.poller.poll(0) + while events and (not limit or count < limit): + s,event = events[0] + if event & zmq.POLLIN: # receiving + self._handle_recv() + count += 1 + if self.socket is None: + # break if socket was closed during callback + break + if event & zmq.POLLOUT and self.sending(): + self._handle_send() + count += 1 + if self.socket is None: + # break if socket was closed during callback + break + + flag = update_flag() + if flag: + self.poller.register(self.socket, flag) + events = self.poller.poll(0) + else: + events = [] + if count: # only bypass loop if we actually flushed something + # skip send/recv callbacks this iteration + self._flushed = True + # reregister them at the end of the loop + if not already_flushed: # don't need to do it again + self.io_loop.add_callback(self._finish_flush) + elif already_flushed: + self._flushed = True + + # update ioloop poll state, which may have changed + self._rebuild_io_state() + return count + + def set_close_callback(self, callback): + """Call the given callback when the stream is closed.""" + self._close_callback = stack_context.wrap(callback) + + def close(self, linger=None): + """Close this stream.""" + if self.socket is not None: + self.io_loop.remove_handler(self.socket) + self.socket.close(linger) + self.socket = None + if self._close_callback: + self._run_callback(self._close_callback) + + def receiving(self): + """Returns True if we are currently receiving from the stream.""" + return self._recv_callback is not None + + def sending(self): + """Returns True if we are currently sending to the stream.""" + return not self._send_queue.empty() + + def closed(self): + return self.socket is None + + def _run_callback(self, callback, *args, **kwargs): + """Wrap running callbacks in try/except to allow us to + close our socket.""" + try: + # Use a NullContext to ensure that all StackContexts are run + # inside our blanket exception handler rather than outside. + with stack_context.NullContext(): + callback(*args, **kwargs) + except: + gen_log.error("Uncaught exception, closing connection.", + exc_info=True) + # Close the socket on an uncaught exception from a user callback + # (It would eventually get closed when the socket object is + # gc'd, but we don't want to rely on gc happening before we + # run out of file descriptors) + self.close() + # Re-raise the exception so that IOLoop.handle_callback_exception + # can see it and log the error + raise + + def _handle_events(self, fd, events): + """This method is the actual handler for IOLoop, that gets called whenever + an event on my socket is posted. It dispatches to _handle_recv, etc.""" + # print "handling events" + if not self.socket: + gen_log.warning("Got events for closed stream %s", fd) + return + try: + # dispatch events: + if events & IOLoop.ERROR: + gen_log.error("got POLLERR event on ZMQStream, which doesn't make sense") + return + if events & IOLoop.READ: + self._handle_recv() + if not self.socket: + return + if events & IOLoop.WRITE: + self._handle_send() + if not self.socket: + return + + # rebuild the poll state + self._rebuild_io_state() + except: + gen_log.error("Uncaught exception, closing connection.", + exc_info=True) + self.close() + raise + + def _handle_recv(self): + """Handle a recv event.""" + if self._flushed: + return + try: + msg = self.socket.recv_multipart(zmq.NOBLOCK, copy=self._recv_copy) + except zmq.ZMQError as e: + if e.errno == zmq.EAGAIN: + # state changed since poll event + pass + else: + gen_log.error("RECV Error: %s"%zmq.strerror(e.errno)) + else: + if self._recv_callback: + callback = self._recv_callback + # self._recv_callback = None + self._run_callback(callback, msg) + + # self.update_state() + + + def _handle_send(self): + """Handle a send event.""" + if self._flushed: + return + if not self.sending(): + gen_log.error("Shouldn't have handled a send event") + return + + msg, kwargs = self._send_queue.get() + try: + status = self.socket.send_multipart(msg, **kwargs) + except zmq.ZMQError as e: + gen_log.error("SEND Error: %s", e) + status = e + if self._send_callback: + callback = self._send_callback + self._run_callback(callback, msg, status) + + # self.update_state() + + def _check_closed(self): + if not self.socket: + raise IOError("Stream is closed") + + def _rebuild_io_state(self): + """rebuild io state based on self.sending() and receiving()""" + if self.socket is None: + return + state = self.io_loop.ERROR + if self.receiving(): + state |= self.io_loop.READ + if self.sending(): + state |= self.io_loop.WRITE + if state != self._state: + self._state = state + self._update_handler(state) + + def _add_io_state(self, state): + """Add io_state to poller.""" + if not self._state & state: + self._state = self._state | state + self._update_handler(self._state) + + def _drop_io_state(self, state): + """Stop poller from watching an io_state.""" + if self._state & state: + self._state = self._state & (~state) + self._update_handler(self._state) + + def _update_handler(self, state): + """Update IOLoop handler with state.""" + if self.socket is None: + return + self.io_loop.update_handler(self.socket, state) + + def _init_io_state(self): + """initialize the ioloop event handler""" + with stack_context.NullContext(): + self.io_loop.add_handler(self.socket, self._handle_events, self._state) + diff --git a/scripts/external_libs/zmq/green/__init__.py b/scripts/external_libs/zmq/green/__init__.py new file mode 100644 index 00000000..ff7e5965 --- /dev/null +++ b/scripts/external_libs/zmq/green/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +#----------------------------------------------------------------------------- +# Copyright (C) 2011-2012 Travis Cline +# +# This file is part of pyzmq +# It is adapted from upstream project zeromq_gevent under the New BSD License +# +# Distributed under the terms of the New BSD License. The full license is in +# the file COPYING.BSD, distributed as part of this software. +#----------------------------------------------------------------------------- + +"""zmq.green - gevent compatibility with zeromq. + +Usage +----- + +Instead of importing zmq directly, do so in the following manner: + +.. + + import zmq.green as zmq + + +Any calls that would have blocked the current thread will now only block the +current green thread. + +This compatibility is accomplished by ensuring the nonblocking flag is set +before any blocking operation and the ØMQ file descriptor is polled internally +to trigger needed events. +""" + +from zmq import * +from zmq.green.core import _Context, _Socket +from zmq.green.poll import _Poller +Context = _Context +Socket = _Socket +Poller = _Poller + +from zmq.green.device import device + diff --git a/scripts/external_libs/zmq/green/core.py b/scripts/external_libs/zmq/green/core.py new file mode 100644 index 00000000..9fc73e32 --- /dev/null +++ b/scripts/external_libs/zmq/green/core.py @@ -0,0 +1,287 @@ +#----------------------------------------------------------------------------- +# Copyright (C) 2011-2012 Travis Cline +# +# This file is part of pyzmq +# It is adapted from upstream project zeromq_gevent under the New BSD License +# +# Distributed under the terms of the New BSD License. The full license is in +# the file COPYING.BSD, distributed as part of this software. +#----------------------------------------------------------------------------- + +"""This module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq <zmq>` to be non blocking +""" + +from __future__ import print_function + +import sys +import time +import warnings + +import zmq + +from zmq import Context as _original_Context +from zmq import Socket as _original_Socket +from .poll import _Poller + +import gevent +from gevent.event import AsyncResult +from gevent.hub import get_hub + +if hasattr(zmq, 'RCVTIMEO'): + TIMEOS = (zmq.RCVTIMEO, zmq.SNDTIMEO) +else: + TIMEOS = () + +def _stop(evt): + """simple wrapper for stopping an Event, allowing for method rename in gevent 1.0""" + try: + evt.stop() + except AttributeError as e: + # gevent<1.0 compat + evt.cancel() + +class _Socket(_original_Socket): + """Green version of :class:`zmq.Socket` + + The following methods are overridden: + + * send + * recv + + To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving + is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised. + + The `__state_changed` method is triggered when the zmq.FD for the socket is + marked as readable and triggers the necessary read and write events (which + are waited for in the recv and send methods). + + Some double underscore prefixes are used to minimize pollution of + :class:`zmq.Socket`'s namespace. + """ + __in_send_multipart = False + __in_recv_multipart = False + __writable = None + __readable = None + _state_event = None + _gevent_bug_timeout = 11.6 # timeout for not trusting gevent + _debug_gevent = False # turn on if you think gevent is missing events + _poller_class = _Poller + + def __init__(self, context, socket_type): + _original_Socket.__init__(self, context, socket_type) + self.__in_send_multipart = False + self.__in_recv_multipart = False + self.__setup_events() + + + def __del__(self): + self.close() + + def close(self, linger=None): + super(_Socket, self).close(linger) + self.__cleanup_events() + + def __cleanup_events(self): + # close the _state_event event, keeps the number of active file descriptors down + if getattr(self, '_state_event', None): + _stop(self._state_event) + self._state_event = None + # if the socket has entered a close state resume any waiting greenlets + self.__writable.set() + self.__readable.set() + + def __setup_events(self): + self.__readable = AsyncResult() + self.__writable = AsyncResult() + self.__readable.set() + self.__writable.set() + + try: + self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD), 1) # read state watcher + self._state_event.start(self.__state_changed) + except AttributeError: + # for gevent<1.0 compatibility + from gevent.core import read_event + self._state_event = read_event(self.getsockopt(zmq.FD), self.__state_changed, persist=True) + + def __state_changed(self, event=None, _evtype=None): + if self.closed: + self.__cleanup_events() + return + try: + # avoid triggering __state_changed from inside __state_changed + events = super(_Socket, self).getsockopt(zmq.EVENTS) + except zmq.ZMQError as exc: + self.__writable.set_exception(exc) + self.__readable.set_exception(exc) + else: + if events & zmq.POLLOUT: + self.__writable.set() + if events & zmq.POLLIN: + self.__readable.set() + + def _wait_write(self): + assert self.__writable.ready(), "Only one greenlet can be waiting on this event" + self.__writable = AsyncResult() + # timeout is because libzmq cannot be trusted to properly signal a new send event: + # this is effectively a maximum poll interval of 1s + tic = time.time() + dt = self._gevent_bug_timeout + if dt: + timeout = gevent.Timeout(seconds=dt) + else: + timeout = None + try: + if timeout: + timeout.start() + self.__writable.get(block=True) + except gevent.Timeout as t: + if t is not timeout: + raise + toc = time.time() + # gevent bug: get can raise timeout even on clean return + # don't display zmq bug warning for gevent bug (this is getting ridiculous) + if self._debug_gevent and timeout and toc-tic > dt and \ + self.getsockopt(zmq.EVENTS) & zmq.POLLOUT: + print("BUG: gevent may have missed a libzmq send event on %i!" % self.FD, file=sys.stderr) + finally: + if timeout: + timeout.cancel() + self.__writable.set() + + def _wait_read(self): + assert self.__readable.ready(), "Only one greenlet can be waiting on this event" + self.__readable = AsyncResult() + # timeout is because libzmq cannot always be trusted to play nice with libevent. + # I can only confirm that this actually happens for send, but lets be symmetrical + # with our dirty hacks. + # this is effectively a maximum poll interval of 1s + tic = time.time() + dt = self._gevent_bug_timeout + if dt: + timeout = gevent.Timeout(seconds=dt) + else: + timeout = None + try: + if timeout: + timeout.start() + self.__readable.get(block=True) + except gevent.Timeout as t: + if t is not timeout: + raise + toc = time.time() + # gevent bug: get can raise timeout even on clean return + # don't display zmq bug warning for gevent bug (this is getting ridiculous) + if self._debug_gevent and timeout and toc-tic > dt and \ + self.getsockopt(zmq.EVENTS) & zmq.POLLIN: + print("BUG: gevent may have missed a libzmq recv event on %i!" % self.FD, file=sys.stderr) + finally: + if timeout: + timeout.cancel() + self.__readable.set() + + def send(self, data, flags=0, copy=True, track=False): + """send, which will only block current greenlet + + state_changed always fires exactly once (success or fail) at the + end of this method. + """ + + # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised + if flags & zmq.NOBLOCK: + try: + msg = super(_Socket, self).send(data, flags, copy, track) + finally: + if not self.__in_send_multipart: + self.__state_changed() + return msg + # ensure the zmq.NOBLOCK flag is part of flags + flags |= zmq.NOBLOCK + while True: # Attempt to complete this operation indefinitely, blocking the current greenlet + try: + # attempt the actual call + msg = super(_Socket, self).send(data, flags, copy, track) + except zmq.ZMQError as e: + # if the raised ZMQError is not EAGAIN, reraise + if e.errno != zmq.EAGAIN: + if not self.__in_send_multipart: + self.__state_changed() + raise + else: + if not self.__in_send_multipart: + self.__state_changed() + return msg + # defer to the event loop until we're notified the socket is writable + self._wait_write() + + def recv(self, flags=0, copy=True, track=False): + """recv, which will only block current greenlet + + state_changed always fires exactly once (success or fail) at the + end of this method. + """ + if flags & zmq.NOBLOCK: + try: + msg = super(_Socket, self).recv(flags, copy, track) + finally: + if not self.__in_recv_multipart: + self.__state_changed() + return msg + + flags |= zmq.NOBLOCK + while True: + try: + msg = super(_Socket, self).recv(flags, copy, track) + except zmq.ZMQError as e: + if e.errno != zmq.EAGAIN: + if not self.__in_recv_multipart: + self.__state_changed() + raise + else: + if not self.__in_recv_multipart: + self.__state_changed() + return msg + self._wait_read() + + def send_multipart(self, *args, **kwargs): + """wrap send_multipart to prevent state_changed on each partial send""" + self.__in_send_multipart = True + try: + msg = super(_Socket, self).send_multipart(*args, **kwargs) + finally: + self.__in_send_multipart = False + self.__state_changed() + return msg + + def recv_multipart(self, *args, **kwargs): + """wrap recv_multipart to prevent state_changed on each partial recv""" + self.__in_recv_multipart = True + try: + msg = super(_Socket, self).recv_multipart(*args, **kwargs) + finally: + self.__in_recv_multipart = False + self.__state_changed() + return msg + + def get(self, opt): + """trigger state_changed on getsockopt(EVENTS)""" + if opt in TIMEOS: + warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning) + optval = super(_Socket, self).get(opt) + if opt == zmq.EVENTS: + self.__state_changed() + return optval + + def set(self, opt, val): + """set socket option""" + if opt in TIMEOS: + warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning) + return super(_Socket, self).set(opt, val) + + +class _Context(_original_Context): + """Replacement for :class:`zmq.Context` + + Ensures that the greened Socket above is used in calls to `socket`. + """ + _socket_class = _Socket diff --git a/scripts/external_libs/zmq/green/device.py b/scripts/external_libs/zmq/green/device.py new file mode 100644 index 00000000..4b070237 --- /dev/null +++ b/scripts/external_libs/zmq/green/device.py @@ -0,0 +1,32 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import zmq +from zmq.green import Poller + +def device(device_type, isocket, osocket): + """Start a zeromq device (gevent-compatible). + + Unlike the true zmq.device, this does not release the GIL. + + Parameters + ---------- + device_type : (QUEUE, FORWARDER, STREAMER) + The type of device to start (ignored). + isocket : Socket + The Socket instance for the incoming traffic. + osocket : Socket + The Socket instance for the outbound traffic. + """ + p = Poller() + if osocket == -1: + osocket = isocket + p.register(isocket, zmq.POLLIN) + p.register(osocket, zmq.POLLIN) + + while True: + events = dict(p.poll()) + if isocket in events: + osocket.send_multipart(isocket.recv_multipart()) + if osocket in events: + isocket.send_multipart(osocket.recv_multipart()) diff --git a/scripts/external_libs/zmq/green/eventloop/__init__.py b/scripts/external_libs/zmq/green/eventloop/__init__.py new file mode 100644 index 00000000..c5150efe --- /dev/null +++ b/scripts/external_libs/zmq/green/eventloop/__init__.py @@ -0,0 +1,3 @@ +from zmq.green.eventloop.ioloop import IOLoop + +__all__ = ['IOLoop']
\ No newline at end of file diff --git a/scripts/external_libs/zmq/green/eventloop/ioloop.py b/scripts/external_libs/zmq/green/eventloop/ioloop.py new file mode 100644 index 00000000..e12fd5e9 --- /dev/null +++ b/scripts/external_libs/zmq/green/eventloop/ioloop.py @@ -0,0 +1,33 @@ +from zmq.eventloop.ioloop import * +from zmq.green import Poller + +RealIOLoop = IOLoop +RealZMQPoller = ZMQPoller + +class IOLoop(RealIOLoop): + + def initialize(self, impl=None): + impl = _poll() if impl is None else impl + super(IOLoop, self).initialize(impl) + + @staticmethod + def instance(): + """Returns a global `IOLoop` instance. + + Most applications have a single, global `IOLoop` running on the + main thread. Use this method to get this instance from + another thread. To get the current thread's `IOLoop`, use `current()`. + """ + # install this class as the active IOLoop implementation + # when using tornado 3 + if tornado_version >= (3,): + PollIOLoop.configure(IOLoop) + return PollIOLoop.instance() + + +class ZMQPoller(RealZMQPoller): + """gevent-compatible version of ioloop.ZMQPoller""" + def __init__(self): + self._poller = Poller() + +_poll = ZMQPoller diff --git a/scripts/external_libs/zmq/green/eventloop/zmqstream.py b/scripts/external_libs/zmq/green/eventloop/zmqstream.py new file mode 100644 index 00000000..90fbd1f5 --- /dev/null +++ b/scripts/external_libs/zmq/green/eventloop/zmqstream.py @@ -0,0 +1,11 @@ +from zmq.eventloop.zmqstream import * + +from zmq.green.eventloop.ioloop import IOLoop + +RealZMQStream = ZMQStream + +class ZMQStream(RealZMQStream): + + def __init__(self, socket, io_loop=None): + io_loop = io_loop or IOLoop.instance() + super(ZMQStream, self).__init__(socket, io_loop=io_loop) diff --git a/scripts/external_libs/zmq/green/poll.py b/scripts/external_libs/zmq/green/poll.py new file mode 100644 index 00000000..8f016129 --- /dev/null +++ b/scripts/external_libs/zmq/green/poll.py @@ -0,0 +1,95 @@ +import zmq +import gevent +from gevent import select + +from zmq import Poller as _original_Poller + + +class _Poller(_original_Poller): + """Replacement for :class:`zmq.Poller` + + Ensures that the greened Poller below is used in calls to + :meth:`zmq.Poller.poll`. + """ + _gevent_bug_timeout = 1.33 # minimum poll interval, for working around gevent bug + + def _get_descriptors(self): + """Returns three elements tuple with socket descriptors ready + for gevent.select.select + """ + rlist = [] + wlist = [] + xlist = [] + + for socket, flags in self.sockets: + if isinstance(socket, zmq.Socket): + rlist.append(socket.getsockopt(zmq.FD)) + continue + elif isinstance(socket, int): + fd = socket + elif hasattr(socket, 'fileno'): + try: + fd = int(socket.fileno()) + except: + raise ValueError('fileno() must return an valid integer fd') + else: + raise TypeError('Socket must be a 0MQ socket, an integer fd ' + 'or have a fileno() method: %r' % socket) + + if flags & zmq.POLLIN: + rlist.append(fd) + if flags & zmq.POLLOUT: + wlist.append(fd) + if flags & zmq.POLLERR: + xlist.append(fd) + + return (rlist, wlist, xlist) + + def poll(self, timeout=-1): + """Overridden method to ensure that the green version of + Poller is used. + + Behaves the same as :meth:`zmq.core.Poller.poll` + """ + + if timeout is None: + timeout = -1 + + if timeout < 0: + timeout = -1 + + rlist = None + wlist = None + xlist = None + + if timeout > 0: + tout = gevent.Timeout.start_new(timeout/1000.0) + + try: + # Loop until timeout or events available + rlist, wlist, xlist = self._get_descriptors() + while True: + events = super(_Poller, self).poll(0) + if events or timeout == 0: + return events + + # wait for activity on sockets in a green way + # set a minimum poll frequency, + # because gevent < 1.0 cannot be trusted to catch edge-triggered FD events + _bug_timeout = gevent.Timeout.start_new(self._gevent_bug_timeout) + try: + select.select(rlist, wlist, xlist) + except gevent.Timeout as t: + if t is not _bug_timeout: + raise + finally: + _bug_timeout.cancel() + + except gevent.Timeout as t: + if t is not tout: + raise + return [] + finally: + if timeout > 0: + tout.cancel() + diff --git a/scripts/external_libs/zmq/libzmq.so b/scripts/external_libs/zmq/libzmq.so Binary files differnew file mode 100644 index 00000000..16980c27 --- /dev/null +++ b/scripts/external_libs/zmq/libzmq.so diff --git a/scripts/external_libs/zmq/log/__init__.py b/scripts/external_libs/zmq/log/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/scripts/external_libs/zmq/log/__init__.py diff --git a/scripts/external_libs/zmq/log/handlers.py b/scripts/external_libs/zmq/log/handlers.py new file mode 100644 index 00000000..5ff21bf3 --- /dev/null +++ b/scripts/external_libs/zmq/log/handlers.py @@ -0,0 +1,146 @@ +"""pyzmq logging handlers. + +This mainly defines the PUBHandler object for publishing logging messages over +a zmq.PUB socket. + +The PUBHandler can be used with the regular logging module, as in:: + + >>> import logging + >>> handler = PUBHandler('tcp://127.0.0.1:12345') + >>> handler.root_topic = 'foo' + >>> logger = logging.getLogger('foobar') + >>> logger.setLevel(logging.DEBUG) + >>> logger.addHandler(handler) + +After this point, all messages logged by ``logger`` will be published on the +PUB socket. + +Code adapted from StarCluster: + + http://github.com/jtriley/StarCluster/blob/master/starcluster/logger.py +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import logging +from logging import INFO, DEBUG, WARN, ERROR, FATAL + +import zmq +from zmq.utils.strtypes import bytes, unicode, cast_bytes + + +TOPIC_DELIM="::" # delimiter for splitting topics on the receiving end. + + +class PUBHandler(logging.Handler): + """A basic logging handler that emits log messages through a PUB socket. + + Takes a PUB socket already bound to interfaces or an interface to bind to. + + Example:: + + sock = context.socket(zmq.PUB) + sock.bind('inproc://log') + handler = PUBHandler(sock) + + Or:: + + handler = PUBHandler('inproc://loc') + + These are equivalent. + + Log messages handled by this handler are broadcast with ZMQ topics + ``this.root_topic`` comes first, followed by the log level + (DEBUG,INFO,etc.), followed by any additional subtopics specified in the + message by: log.debug("subtopic.subsub::the real message") + """ + root_topic="" + socket = None + + formatters = { + logging.DEBUG: logging.Formatter( + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), + logging.INFO: logging.Formatter("%(message)s\n"), + logging.WARN: logging.Formatter( + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n"), + logging.ERROR: logging.Formatter( + "%(levelname)s %(filename)s:%(lineno)d - %(message)s - %(exc_info)s\n"), + logging.CRITICAL: logging.Formatter( + "%(levelname)s %(filename)s:%(lineno)d - %(message)s\n")} + + def __init__(self, interface_or_socket, context=None): + logging.Handler.__init__(self) + if isinstance(interface_or_socket, zmq.Socket): + self.socket = interface_or_socket + self.ctx = self.socket.context + else: + self.ctx = context or zmq.Context() + self.socket = self.ctx.socket(zmq.PUB) + self.socket.bind(interface_or_socket) + + def format(self,record): + """Format a record.""" + return self.formatters[record.levelno].format(record) + + def emit(self, record): + """Emit a log message on my socket.""" + try: + topic, record.msg = record.msg.split(TOPIC_DELIM,1) + except Exception: + topic = "" + try: + bmsg = cast_bytes(self.format(record)) + except Exception: + self.handleError(record) + return + + topic_list = [] + + if self.root_topic: + topic_list.append(self.root_topic) + + topic_list.append(record.levelname) + + if topic: + topic_list.append(topic) + + btopic = b'.'.join(cast_bytes(t) for t in topic_list) + + self.socket.send_multipart([btopic, bmsg]) + + +class TopicLogger(logging.Logger): + """A simple wrapper that takes an additional argument to log methods. + + All the regular methods exist, but instead of one msg argument, two + arguments: topic, msg are passed. + + That is:: + + logger.debug('msg') + + Would become:: + + logger.debug('topic.sub', 'msg') + """ + def log(self, level, topic, msg, *args, **kwargs): + """Log 'msg % args' with level and topic. + + To pass exception information, use the keyword argument exc_info + with a True value:: + + logger.log(level, "zmq.fun", "We have a %s", + "mysterious problem", exc_info=1) + """ + logging.Logger.log(self, level, '%s::%s'%(topic,msg), *args, **kwargs) + +# Generate the methods of TopicLogger, since they are just adding a +# topic prefix to a message. +for name in "debug warn warning error critical fatal".split(): + meth = getattr(logging.Logger,name) + setattr(TopicLogger, name, + lambda self, level, topic, msg, *args, **kwargs: + meth(self, level, topic+TOPIC_DELIM+msg,*args, **kwargs)) + diff --git a/scripts/external_libs/zmq/ssh/__init__.py b/scripts/external_libs/zmq/ssh/__init__.py new file mode 100644 index 00000000..57f09568 --- /dev/null +++ b/scripts/external_libs/zmq/ssh/__init__.py @@ -0,0 +1 @@ +from zmq.ssh.tunnel import * diff --git a/scripts/external_libs/zmq/ssh/forward.py b/scripts/external_libs/zmq/ssh/forward.py new file mode 100644 index 00000000..2d619462 --- /dev/null +++ b/scripts/external_libs/zmq/ssh/forward.py @@ -0,0 +1,91 @@ +# +# This file is adapted from a paramiko demo, and thus licensed under LGPL 2.1. +# Original Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com> +# Edits Copyright (C) 2010 The IPython Team +# +# Paramiko is free software; you can redistribute it and/or modify it under the +# terms of the GNU Lesser General Public License as published by the Free +# Software Foundation; either version 2.1 of the License, or (at your option) +# any later version. +# +# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more +# details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with Paramiko; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA. + +""" +Sample script showing how to do local port forwarding over paramiko. + +This script connects to the requested SSH server and sets up local port +forwarding (the openssh -L option) from a local port through a tunneled +connection to a destination reachable from the SSH server machine. +""" + +from __future__ import print_function + +import logging +import select +try: # Python 3 + import socketserver +except ImportError: # Python 2 + import SocketServer as socketserver + +logger = logging.getLogger('ssh') + +class ForwardServer (socketserver.ThreadingTCPServer): + daemon_threads = True + allow_reuse_address = True + + +class Handler (socketserver.BaseRequestHandler): + + def handle(self): + try: + chan = self.ssh_transport.open_channel('direct-tcpip', + (self.chain_host, self.chain_port), + self.request.getpeername()) + except Exception as e: + logger.debug('Incoming request to %s:%d failed: %s' % (self.chain_host, + self.chain_port, + repr(e))) + return + if chan is None: + logger.debug('Incoming request to %s:%d was rejected by the SSH server.' % + (self.chain_host, self.chain_port)) + return + + logger.debug('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(), + chan.getpeername(), (self.chain_host, self.chain_port))) + while True: + r, w, x = select.select([self.request, chan], [], []) + if self.request in r: + data = self.request.recv(1024) + if len(data) == 0: + break + chan.send(data) + if chan in r: + data = chan.recv(1024) + if len(data) == 0: + break + self.request.send(data) + chan.close() + self.request.close() + logger.debug('Tunnel closed ') + + +def forward_tunnel(local_port, remote_host, remote_port, transport): + # this is a little convoluted, but lets me configure things for the Handler + # object. (SocketServer doesn't give Handlers any way to access the outer + # server normally.) + class SubHander (Handler): + chain_host = remote_host + chain_port = remote_port + ssh_transport = transport + ForwardServer(('127.0.0.1', local_port), SubHander).serve_forever() + + +__all__ = ['forward_tunnel'] diff --git a/scripts/external_libs/zmq/ssh/tunnel.py b/scripts/external_libs/zmq/ssh/tunnel.py new file mode 100644 index 00000000..5a0c5433 --- /dev/null +++ b/scripts/external_libs/zmq/ssh/tunnel.py @@ -0,0 +1,376 @@ +"""Basic ssh tunnel utilities, and convenience functions for tunneling +zeromq connections. +""" + +# Copyright (C) 2010-2011 IPython Development Team +# Copyright (C) 2011- PyZMQ Developers +# +# Redistributed from IPython under the terms of the BSD License. + + +from __future__ import print_function + +import atexit +import os +import signal +import socket +import sys +import warnings +from getpass import getpass, getuser +from multiprocessing import Process + +try: + with warnings.catch_warnings(): + warnings.simplefilter('ignore', DeprecationWarning) + import paramiko + SSHException = paramiko.ssh_exception.SSHException +except ImportError: + paramiko = None + class SSHException(Exception): + pass +else: + from .forward import forward_tunnel + +try: + import pexpect +except ImportError: + pexpect = None + + +_random_ports = set() + +def select_random_ports(n): + """Selects and return n random ports that are available.""" + ports = [] + for i in range(n): + sock = socket.socket() + sock.bind(('', 0)) + while sock.getsockname()[1] in _random_ports: + sock.close() + sock = socket.socket() + sock.bind(('', 0)) + ports.append(sock) + for i, sock in enumerate(ports): + port = sock.getsockname()[1] + sock.close() + ports[i] = port + _random_ports.add(port) + return ports + + +#----------------------------------------------------------------------------- +# Check for passwordless login +#----------------------------------------------------------------------------- + +def try_passwordless_ssh(server, keyfile, paramiko=None): + """Attempt to make an ssh connection without a password. + This is mainly used for requiring password input only once + when many tunnels may be connected to the same server. + + If paramiko is None, the default for the platform is chosen. + """ + if paramiko is None: + paramiko = sys.platform == 'win32' + if not paramiko: + f = _try_passwordless_openssh + else: + f = _try_passwordless_paramiko + return f(server, keyfile) + +def _try_passwordless_openssh(server, keyfile): + """Try passwordless login with shell ssh command.""" + if pexpect is None: + raise ImportError("pexpect unavailable, use paramiko") + cmd = 'ssh -f '+ server + if keyfile: + cmd += ' -i ' + keyfile + cmd += ' exit' + + # pop SSH_ASKPASS from env + env = os.environ.copy() + env.pop('SSH_ASKPASS', None) + + ssh_newkey = 'Are you sure you want to continue connecting' + p = pexpect.spawn(cmd, env=env) + while True: + try: + i = p.expect([ssh_newkey, '[Pp]assword:'], timeout=.1) + if i==0: + raise SSHException('The authenticity of the host can\'t be established.') + except pexpect.TIMEOUT: + continue + except pexpect.EOF: + return True + else: + return False + +def _try_passwordless_paramiko(server, keyfile): + """Try passwordless login with paramiko.""" + if paramiko is None: + msg = "Paramiko unavaliable, " + if sys.platform == 'win32': + msg += "Paramiko is required for ssh tunneled connections on Windows." + else: + msg += "use OpenSSH." + raise ImportError(msg) + username, server, port = _split_server(server) + client = paramiko.SSHClient() + client.load_system_host_keys() + client.set_missing_host_key_policy(paramiko.WarningPolicy()) + try: + client.connect(server, port, username=username, key_filename=keyfile, + look_for_keys=True) + except paramiko.AuthenticationException: + return False + else: + client.close() + return True + + +def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60): + """Connect a socket to an address via an ssh tunnel. + + This is a wrapper for socket.connect(addr), when addr is not accessible + from the local machine. It simply creates an ssh tunnel using the remaining args, + and calls socket.connect('tcp://localhost:lport') where lport is the randomly + selected local port of the tunnel. + + """ + new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout) + socket.connect(new_url) + return tunnel + + +def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60): + """Open a tunneled connection from a 0MQ url. + + For use inside tunnel_connection. + + Returns + ------- + + (url, tunnel) : (str, object) + The 0MQ url that has been forwarded, and the tunnel object + """ + + lport = select_random_ports(1)[0] + transport, addr = addr.split('://') + ip,rport = addr.split(':') + rport = int(rport) + if paramiko is None: + paramiko = sys.platform == 'win32' + if paramiko: + tunnelf = paramiko_tunnel + else: + tunnelf = openssh_tunnel + + tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout) + return 'tcp://127.0.0.1:%i'%lport, tunnel + +def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60): + """Create an ssh tunnel using command-line ssh that connects port lport + on this machine to localhost:rport on server. The tunnel + will automatically close when not in use, remaining open + for a minimum of timeout seconds for an initial connection. + + This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, + as seen from `server`. + + keyfile and password may be specified, but ssh config is checked for defaults. + + Parameters + ---------- + + lport : int + local port for connecting to the tunnel from this machine. + rport : int + port on the remote machine to connect to. + server : str + The ssh server to connect to. The full ssh server string will be parsed. + user@server:port + remoteip : str [Default: 127.0.0.1] + The remote ip, specifying the destination of the tunnel. + Default is localhost, which means that the tunnel would redirect + localhost:lport on this machine to localhost:rport on the *server*. + + keyfile : str; path to public key file + This specifies a key to be used in ssh login, default None. + Regular default ssh keys will be used without specifying this argument. + password : str; + Your ssh password to the ssh server. Note that if this is left None, + you will be prompted for it if passwordless key based login is unavailable. + timeout : int [default: 60] + The time (in seconds) after which no activity will result in the tunnel + closing. This prevents orphaned tunnels from running forever. + """ + if pexpect is None: + raise ImportError("pexpect unavailable, use paramiko_tunnel") + ssh="ssh " + if keyfile: + ssh += "-i " + keyfile + + if ':' in server: + server, port = server.split(':') + ssh += " -p %s" % port + + cmd = "%s -O check %s" % (ssh, server) + (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) + if not exitstatus: + pid = int(output[output.find("(pid=")+5:output.find(")")]) + cmd = "%s -O forward -L 127.0.0.1:%i:%s:%i %s" % ( + ssh, lport, remoteip, rport, server) + (output, exitstatus) = pexpect.run(cmd, withexitstatus=True) + if not exitstatus: + atexit.register(_stop_tunnel, cmd.replace("-O forward", "-O cancel", 1)) + return pid + cmd = "%s -f -S none -L 127.0.0.1:%i:%s:%i %s sleep %i" % ( + ssh, lport, remoteip, rport, server, timeout) + + # pop SSH_ASKPASS from env + env = os.environ.copy() + env.pop('SSH_ASKPASS', None) + + ssh_newkey = 'Are you sure you want to continue connecting' + tunnel = pexpect.spawn(cmd, env=env) + failed = False + while True: + try: + i = tunnel.expect([ssh_newkey, '[Pp]assword:'], timeout=.1) + if i==0: + raise SSHException('The authenticity of the host can\'t be established.') + except pexpect.TIMEOUT: + continue + except pexpect.EOF: + if tunnel.exitstatus: + print(tunnel.exitstatus) + print(tunnel.before) + print(tunnel.after) + raise RuntimeError("tunnel '%s' failed to start"%(cmd)) + else: + return tunnel.pid + else: + if failed: + print("Password rejected, try again") + password=None + if password is None: + password = getpass("%s's password: "%(server)) + tunnel.sendline(password) + failed = True + +def _stop_tunnel(cmd): + pexpect.run(cmd) + +def _split_server(server): + if '@' in server: + username,server = server.split('@', 1) + else: + username = getuser() + if ':' in server: + server, port = server.split(':') + port = int(port) + else: + port = 22 + return username, server, port + +def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60): + """launch a tunner with paramiko in a subprocess. This should only be used + when shell ssh is unavailable (e.g. Windows). + + This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`, + as seen from `server`. + + If you are familiar with ssh tunnels, this creates the tunnel: + + ssh server -L localhost:lport:remoteip:rport + + keyfile and password may be specified, but ssh config is checked for defaults. + + + Parameters + ---------- + + lport : int + local port for connecting to the tunnel from this machine. + rport : int + port on the remote machine to connect to. + server : str + The ssh server to connect to. The full ssh server string will be parsed. + user@server:port + remoteip : str [Default: 127.0.0.1] + The remote ip, specifying the destination of the tunnel. + Default is localhost, which means that the tunnel would redirect + localhost:lport on this machine to localhost:rport on the *server*. + + keyfile : str; path to public key file + This specifies a key to be used in ssh login, default None. + Regular default ssh keys will be used without specifying this argument. + password : str; + Your ssh password to the ssh server. Note that if this is left None, + you will be prompted for it if passwordless key based login is unavailable. + timeout : int [default: 60] + The time (in seconds) after which no activity will result in the tunnel + closing. This prevents orphaned tunnels from running forever. + + """ + if paramiko is None: + raise ImportError("Paramiko not available") + + if password is None: + if not _try_passwordless_paramiko(server, keyfile): + password = getpass("%s's password: "%(server)) + + p = Process(target=_paramiko_tunnel, + args=(lport, rport, server, remoteip), + kwargs=dict(keyfile=keyfile, password=password)) + p.daemon=False + p.start() + atexit.register(_shutdown_process, p) + return p + +def _shutdown_process(p): + if p.is_alive(): + p.terminate() + +def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None): + """Function for actually starting a paramiko tunnel, to be passed + to multiprocessing.Process(target=this), and not called directly. + """ + username, server, port = _split_server(server) + client = paramiko.SSHClient() + client.load_system_host_keys() + client.set_missing_host_key_policy(paramiko.WarningPolicy()) + + try: + client.connect(server, port, username=username, key_filename=keyfile, + look_for_keys=True, password=password) +# except paramiko.AuthenticationException: +# if password is None: +# password = getpass("%s@%s's password: "%(username, server)) +# client.connect(server, port, username=username, password=password) +# else: +# raise + except Exception as e: + print('*** Failed to connect to %s:%d: %r' % (server, port, e)) + sys.exit(1) + + # Don't let SIGINT kill the tunnel subprocess + signal.signal(signal.SIGINT, signal.SIG_IGN) + + try: + forward_tunnel(lport, remoteip, rport, client.get_transport()) + except KeyboardInterrupt: + print('SIGINT: Port forwarding stopped cleanly') + sys.exit(0) + except Exception as e: + print("Port forwarding stopped uncleanly: %s"%e) + sys.exit(255) + +if sys.platform == 'win32': + ssh_tunnel = paramiko_tunnel +else: + ssh_tunnel = openssh_tunnel + + +__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh'] + + diff --git a/scripts/external_libs/zmq/sugar/__init__.py b/scripts/external_libs/zmq/sugar/__init__.py new file mode 100644 index 00000000..d0510a44 --- /dev/null +++ b/scripts/external_libs/zmq/sugar/__init__.py @@ -0,0 +1,27 @@ +"""pure-Python sugar wrappers for core 0MQ objects.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +from zmq.sugar import ( + constants, context, frame, poll, socket, tracker, version +) +from zmq import error + +__all__ = ['constants'] +for submod in ( + constants, context, error, frame, poll, socket, tracker, version +): + __all__.extend(submod.__all__) + +from zmq.error import * +from zmq.sugar.context import * +from zmq.sugar.tracker import * +from zmq.sugar.socket import * +from zmq.sugar.constants import * +from zmq.sugar.frame import * +from zmq.sugar.poll import * +# from zmq.sugar.stopwatch import * +# from zmq.sugar._device import * +from zmq.sugar.version import * diff --git a/scripts/external_libs/zmq/sugar/attrsettr.py b/scripts/external_libs/zmq/sugar/attrsettr.py new file mode 100644 index 00000000..4bbd36d6 --- /dev/null +++ b/scripts/external_libs/zmq/sugar/attrsettr.py @@ -0,0 +1,52 @@ +# coding: utf-8 +"""Mixin for mapping set/getattr to self.set/get""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +from . import constants + +class AttributeSetter(object): + + def __setattr__(self, key, value): + """set zmq options by attribute""" + + # regular setattr only allowed for class-defined attributes + for obj in [self] + self.__class__.mro(): + if key in obj.__dict__: + object.__setattr__(self, key, value) + return + + upper_key = key.upper() + try: + opt = getattr(constants, upper_key) + except AttributeError: + raise AttributeError("%s has no such option: %s" % ( + self.__class__.__name__, upper_key) + ) + else: + self._set_attr_opt(upper_key, opt, value) + + def _set_attr_opt(self, name, opt, value): + """override if setattr should do something other than call self.set""" + self.set(opt, value) + + def __getattr__(self, key): + """get zmq options by attribute""" + upper_key = key.upper() + try: + opt = getattr(constants, upper_key) + except AttributeError: + raise AttributeError("%s has no such option: %s" % ( + self.__class__.__name__, upper_key) + ) + else: + return self._get_attr_opt(upper_key, opt) + + def _get_attr_opt(self, name, opt): + """override if getattr should do something other than call self.get""" + return self.get(opt) + + +__all__ = ['AttributeSetter'] diff --git a/scripts/external_libs/zmq/sugar/constants.py b/scripts/external_libs/zmq/sugar/constants.py new file mode 100644 index 00000000..88281176 --- /dev/null +++ b/scripts/external_libs/zmq/sugar/constants.py @@ -0,0 +1,98 @@ +"""0MQ Constants.""" + +# Copyright (c) PyZMQ Developers. +# Distributed under the terms of the Modified BSD License. + +from zmq.backend import constants +from zmq.utils.constant_names import ( + base_names, + switched_sockopt_names, + int_sockopt_names, + int64_sockopt_names, + bytes_sockopt_names, + fd_sockopt_names, + ctx_opt_names, + msg_opt_names, +) + +#----------------------------------------------------------------------------- +# Python module level constants +#----------------------------------------------------------------------------- + +__all__ = [ + 'int_sockopts', + 'int64_sockopts', + 'bytes_sockopts', + 'ctx_opts', + 'ctx_opt_names', + ] + +int_sockopts = set() +int64_sockopts = set() +bytes_sockopts = set() +fd_sockopts = set() +ctx_opts = set() +msg_opts = set() + + +if constants.VERSION < 30000: + int64_sockopt_names.extend(switched_sockopt_names) +else: + int_sockopt_names.extend(switched_sockopt_names) + +_UNDEFINED = -9999 + +def _add_constant(name, container=None): + """add a constant to be defined + + optionally add it to one of the sets for use in get/setopt checkers + """ + c = getattr(constants, name, _UNDEFINED) + if c == _UNDEFINED: + return + globals()[name] = c + __all__.append(name) + if container is not None: + container.add(c) + return c + +for name in base_names: + _add_constant(name) + +for name in int_sockopt_names: + _add_constant(name, int_sockopts) + +for name in int64_sockopt_names: + _add_constant(name, int64_sockopts) + +for name in bytes_sockopt_names: + _add_constant(name, bytes_sockopts) + +for name in fd_sockopt_names: + _add_constant(name, fd_sockopts) + +for name in ctx_opt_names: + _add_constant(name, ctx_opts) + +for name in msg_opt_names: + _add_constant(name, msg_opts) + +# ensure some aliases are always defined +aliases = [ + ('DONTWAIT', 'NOBLOCK'), + ('XREQ', 'DEALER'), + ('XREP', 'ROUTER'), +] +for group in aliases: + undefined = set() + found = None + for name in group: + value = getattr(constants, name, -1) + if value != -1: + found = value + else: + undefined.add(name) + if found is not None: + for name in undefined: + globals()[name] = found + __all__.append(name) diff --git a/scripts/external_libs/zmq/sugar/context.py b/scripts/external_libs/zmq/sugar/context.py new file mode 100644 index 00000000..86a9c5dc --- /dev/null +++ b/scripts/external_libs/zmq/sugar/context.py @@ -0,0 +1,192 @@ +# coding: utf-8 +"""Python bindings for 0MQ.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import atexit +import weakref + +from zmq.backend import Context as ContextBase +from . import constants +from .attrsettr import AttributeSetter +from .constants import ENOTSUP, ctx_opt_names +from .socket import Socket +from zmq.error import ZMQError + +from zmq.utils.interop import cast_int_addr + + +class Context(ContextBase, AttributeSetter): + """Create a zmq Context + + A zmq Context creates sockets via its ``ctx.socket`` method. + """ + sockopts = None + _instance = None + _shadow = False + _exiting = False + + def __init__(self, io_threads=1, **kwargs): + super(Context, self).__init__(io_threads=io_threads, **kwargs) + if kwargs.get('shadow', False): + self._shadow = True + else: + self._shadow = False + self.sockopts = {} + + self._exiting = False + if not self._shadow: + ctx_ref = weakref.ref(self) + def _notify_atexit(): + ctx = ctx_ref() + if ctx is not None: + ctx._exiting = True + atexit.register(_notify_atexit) + + def __del__(self): + """deleting a Context should terminate it, without trying non-threadsafe destroy""" + if not self._shadow and not self._exiting: + self.term() + + def __enter__(self): + return self + + def __exit__(self, *args, **kwargs): + self.term() + + @classmethod + def shadow(cls, address): + """Shadow an existing libzmq context + + address is the integer address of the libzmq context + or an FFI pointer to it. + + .. versionadded:: 14.1 + """ + address = cast_int_addr(address) + return cls(shadow=address) + + @classmethod + def shadow_pyczmq(cls, ctx): + """Shadow an existing pyczmq context + + ctx is the FFI `zctx_t *` pointer + + .. versionadded:: 14.1 + """ + from pyczmq import zctx + + underlying = zctx.underlying(ctx) + address = cast_int_addr(underlying) + return cls(shadow=address) + + # static method copied from tornado IOLoop.instance + @classmethod + def instance(cls, io_threads=1): + """Returns a global Context instance. + + Most single-threaded applications have a single, global Context. + Use this method instead of passing around Context instances + throughout your code. + + A common pattern for classes that depend on Contexts is to use + a default argument to enable programs with multiple Contexts + but not require the argument for simpler applications: + + class MyClass(object): + def __init__(self, context=None): + self.context = context or Context.instance() + """ + if cls._instance is None or cls._instance.closed: + cls._instance = cls(io_threads=io_threads) + return cls._instance + + #------------------------------------------------------------------------- + # Hooks for ctxopt completion + #------------------------------------------------------------------------- + + def __dir__(self): + keys = dir(self.__class__) + + for collection in ( + ctx_opt_names, + ): + keys.extend(collection) + return keys + + #------------------------------------------------------------------------- + # Creating Sockets + #------------------------------------------------------------------------- + + @property + def _socket_class(self): + return Socket + + def socket(self, socket_type): + """Create a Socket associated with this Context. + + Parameters + ---------- + socket_type : int + The socket type, which can be any of the 0MQ socket types: + REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, etc. + """ + if self.closed: + raise ZMQError(ENOTSUP) + s = self._socket_class(self, socket_type) + for opt, value in self.sockopts.items(): + try: + s.setsockopt(opt, value) + except ZMQError: + # ignore ZMQErrors, which are likely for socket options + # that do not apply to a particular socket type, e.g. + # SUBSCRIBE for non-SUB sockets. + pass + return s + + def setsockopt(self, opt, value): + """set default socket options for new sockets created by this Context + + .. versionadded:: 13.0 + """ + self.sockopts[opt] = value + + def getsockopt(self, opt): + """get default socket options for new sockets created by this Context + + .. versionadded:: 13.0 + """ + return self.sockopts[opt] + + def _set_attr_opt(self, name, opt, value): + """set default sockopts as attributes""" + if name in constants.ctx_opt_names: + return self.set(opt, value) + else: + self.sockopts[opt] = value + + def _get_attr_opt(self, name, opt): + """get default sockopts as attributes""" + if name in constants.ctx_opt_names: + return self.get(opt) + else: + if opt not in self.sockopts: + raise AttributeError(name) + else: + return self.sockopts[opt] + + def __delattr__(self, key): + """delete default sockopts as attributes""" + key = key.upper() + try: + opt = getattr(constants, key) + except AttributeError: + raise AttributeError("no such socket option: %s" % key) + else: + if opt not in self.sockopts: + raise AttributeError(key) + else: + del self.sockopts[opt] + +__all__ = ['Context'] diff --git a/scripts/external_libs/zmq/sugar/frame.py b/scripts/external_libs/zmq/sugar/frame.py new file mode 100644 index 00000000..9f556c86 --- /dev/null +++ b/scripts/external_libs/zmq/sugar/frame.py @@ -0,0 +1,19 @@ +# coding: utf-8 +"""0MQ Frame pure Python methods.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +from .attrsettr import AttributeSetter +from zmq.backend import Frame as FrameBase + + +class Frame(FrameBase, AttributeSetter): + def __getitem__(self, key): + # map Frame['User-Id'] to Frame.get('User-Id') + return self.get(key) + +# keep deprecated alias +Message = Frame +__all__ = ['Frame', 'Message']
\ No newline at end of file diff --git a/scripts/external_libs/zmq/sugar/poll.py b/scripts/external_libs/zmq/sugar/poll.py new file mode 100644 index 00000000..c7b1d1bb --- /dev/null +++ b/scripts/external_libs/zmq/sugar/poll.py @@ -0,0 +1,161 @@ +"""0MQ polling related functions and classes.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import zmq +from zmq.backend import zmq_poll +from .constants import POLLIN, POLLOUT, POLLERR + +#----------------------------------------------------------------------------- +# Polling related methods +#----------------------------------------------------------------------------- + + +class Poller(object): + """A stateful poll interface that mirrors Python's built-in poll.""" + sockets = None + _map = {} + + def __init__(self): + self.sockets = [] + self._map = {} + + def __contains__(self, socket): + return socket in self._map + + def register(self, socket, flags=POLLIN|POLLOUT): + """p.register(socket, flags=POLLIN|POLLOUT) + + Register a 0MQ socket or native fd for I/O monitoring. + + register(s,0) is equivalent to unregister(s). + + Parameters + ---------- + socket : zmq.Socket or native socket + A zmq.Socket or any Python object having a ``fileno()`` + method that returns a valid file descriptor. + flags : int + The events to watch for. Can be POLLIN, POLLOUT or POLLIN|POLLOUT. + If `flags=0`, socket will be unregistered. + """ + if flags: + if socket in self._map: + idx = self._map[socket] + self.sockets[idx] = (socket, flags) + else: + idx = len(self.sockets) + self.sockets.append((socket, flags)) + self._map[socket] = idx + elif socket in self._map: + # uregister sockets registered with no events + self.unregister(socket) + else: + # ignore new sockets with no events + pass + + def modify(self, socket, flags=POLLIN|POLLOUT): + """Modify the flags for an already registered 0MQ socket or native fd.""" + self.register(socket, flags) + + def unregister(self, socket): + """Remove a 0MQ socket or native fd for I/O monitoring. + + Parameters + ---------- + socket : Socket + The socket instance to stop polling. + """ + idx = self._map.pop(socket) + self.sockets.pop(idx) + # shift indices after deletion + for socket, flags in self.sockets[idx:]: + self._map[socket] -= 1 + + def poll(self, timeout=None): + """Poll the registered 0MQ or native fds for I/O. + + Parameters + ---------- + timeout : float, int + The timeout in milliseconds. If None, no `timeout` (infinite). This + is in milliseconds to be compatible with ``select.poll()``. The + underlying zmq_poll uses microseconds and we convert to that in + this function. + + Returns + ------- + events : list of tuples + The list of events that are ready to be processed. + This is a list of tuples of the form ``(socket, event)``, where the 0MQ Socket + or integer fd is the first element, and the poll event mask (POLLIN, POLLOUT) is the second. + It is common to call ``events = dict(poller.poll())``, + which turns the list of tuples into a mapping of ``socket : event``. + """ + if timeout is None or timeout < 0: + timeout = -1 + elif isinstance(timeout, float): + timeout = int(timeout) + return zmq_poll(self.sockets, timeout=timeout) + + +def select(rlist, wlist, xlist, timeout=None): + """select(rlist, wlist, xlist, timeout=None) -> (rlist, wlist, xlist) + + Return the result of poll as a lists of sockets ready for r/w/exception. + + This has the same interface as Python's built-in ``select.select()`` function. + + Parameters + ---------- + timeout : float, int, optional + The timeout in seconds. If None, no timeout (infinite). This is in seconds to be + compatible with ``select.select()``. The underlying zmq_poll uses microseconds + and we convert to that in this function. + rlist : list of sockets/FDs + sockets/FDs to be polled for read events + wlist : list of sockets/FDs + sockets/FDs to be polled for write events + xlist : list of sockets/FDs + sockets/FDs to be polled for error events + + Returns + ------- + (rlist, wlist, xlist) : tuple of lists of sockets (length 3) + Lists correspond to sockets available for read/write/error events respectively. + """ + if timeout is None: + timeout = -1 + # Convert from sec -> us for zmq_poll. + # zmq_poll accepts 3.x style timeout in ms + timeout = int(timeout*1000.0) + if timeout < 0: + timeout = -1 + sockets = [] + for s in set(rlist + wlist + xlist): + flags = 0 + if s in rlist: + flags |= POLLIN + if s in wlist: + flags |= POLLOUT + if s in xlist: + flags |= POLLERR + sockets.append((s, flags)) + return_sockets = zmq_poll(sockets, timeout) + rlist, wlist, xlist = [], [], [] + for s, flags in return_sockets: + if flags & POLLIN: + rlist.append(s) + if flags & POLLOUT: + wlist.append(s) + if flags & POLLERR: + xlist.append(s) + return rlist, wlist, xlist + +#----------------------------------------------------------------------------- +# Symbols to export +#----------------------------------------------------------------------------- + +__all__ = [ 'Poller', 'select' ] diff --git a/scripts/external_libs/zmq/sugar/socket.py b/scripts/external_libs/zmq/sugar/socket.py new file mode 100644 index 00000000..c91589d7 --- /dev/null +++ b/scripts/external_libs/zmq/sugar/socket.py @@ -0,0 +1,495 @@ +# coding: utf-8 +"""0MQ Socket pure Python methods.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import codecs +import random +import warnings + +import zmq +from zmq.backend import Socket as SocketBase +from .poll import Poller +from . import constants +from .attrsettr import AttributeSetter +from zmq.error import ZMQError, ZMQBindError +from zmq.utils import jsonapi +from zmq.utils.strtypes import bytes,unicode,basestring +from zmq.utils.interop import cast_int_addr + +from .constants import ( + SNDMORE, ENOTSUP, POLLIN, + int64_sockopt_names, + int_sockopt_names, + bytes_sockopt_names, + fd_sockopt_names, +) +try: + import cPickle + pickle = cPickle +except: + cPickle = None + import pickle + +try: + DEFAULT_PROTOCOL = pickle.DEFAULT_PROTOCOL +except AttributeError: + DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL + + +class Socket(SocketBase, AttributeSetter): + """The ZMQ socket object + + To create a Socket, first create a Context:: + + ctx = zmq.Context.instance() + + then call ``ctx.socket(socket_type)``:: + + s = ctx.socket(zmq.ROUTER) + + """ + _shadow = False + + def __del__(self): + if not self._shadow: + self.close() + + # socket as context manager: + def __enter__(self): + """Sockets are context managers + + .. versionadded:: 14.4 + """ + return self + + def __exit__(self, *args, **kwargs): + self.close() + + #------------------------------------------------------------------------- + # Socket creation + #------------------------------------------------------------------------- + + @classmethod + def shadow(cls, address): + """Shadow an existing libzmq socket + + address is the integer address of the libzmq socket + or an FFI pointer to it. + + .. versionadded:: 14.1 + """ + address = cast_int_addr(address) + return cls(shadow=address) + + #------------------------------------------------------------------------- + # Deprecated aliases + #------------------------------------------------------------------------- + + @property + def socket_type(self): + warnings.warn("Socket.socket_type is deprecated, use Socket.type", + DeprecationWarning + ) + return self.type + + #------------------------------------------------------------------------- + # Hooks for sockopt completion + #------------------------------------------------------------------------- + + def __dir__(self): + keys = dir(self.__class__) + for collection in ( + bytes_sockopt_names, + int_sockopt_names, + int64_sockopt_names, + fd_sockopt_names, + ): + keys.extend(collection) + return keys + + #------------------------------------------------------------------------- + # Getting/Setting options + #------------------------------------------------------------------------- + setsockopt = SocketBase.set + getsockopt = SocketBase.get + + def set_string(self, option, optval, encoding='utf-8'): + """set socket options with a unicode object + + This is simply a wrapper for setsockopt to protect from encoding ambiguity. + + See the 0MQ documentation for details on specific options. + + Parameters + ---------- + option : int + The name of the option to set. Can be any of: SUBSCRIBE, + UNSUBSCRIBE, IDENTITY + optval : unicode string (unicode on py2, str on py3) + The value of the option to set. + encoding : str + The encoding to be used, default is utf8 + """ + if not isinstance(optval, unicode): + raise TypeError("unicode strings only") + return self.set(option, optval.encode(encoding)) + + setsockopt_unicode = setsockopt_string = set_string + + def get_string(self, option, encoding='utf-8'): + """get the value of a socket option + + See the 0MQ documentation for details on specific options. + + Parameters + ---------- + option : int + The option to retrieve. + + Returns + ------- + optval : unicode string (unicode on py2, str on py3) + The value of the option as a unicode string. + """ + + if option not in constants.bytes_sockopts: + raise TypeError("option %i will not return a string to be decoded"%option) + return self.getsockopt(option).decode(encoding) + + getsockopt_unicode = getsockopt_string = get_string + + def bind_to_random_port(self, addr, min_port=49152, max_port=65536, max_tries=100): + """bind this socket to a random port in a range + + Parameters + ---------- + addr : str + The address string without the port to pass to ``Socket.bind()``. + min_port : int, optional + The minimum port in the range of ports to try (inclusive). + max_port : int, optional + The maximum port in the range of ports to try (exclusive). + max_tries : int, optional + The maximum number of bind attempts to make. + + Returns + ------- + port : int + The port the socket was bound to. + + Raises + ------ + ZMQBindError + if `max_tries` reached before successful bind + """ + for i in range(max_tries): + try: + port = random.randrange(min_port, max_port) + self.bind('%s:%s' % (addr, port)) + except ZMQError as exception: + if not exception.errno == zmq.EADDRINUSE: + raise + else: + return port + raise ZMQBindError("Could not bind socket to random port.") + + def get_hwm(self): + """get the High Water Mark + + On libzmq ≥ 3, this gets SNDHWM if available, otherwise RCVHWM + """ + major = zmq.zmq_version_info()[0] + if major >= 3: + # return sndhwm, fallback on rcvhwm + try: + return self.getsockopt(zmq.SNDHWM) + except zmq.ZMQError as e: + pass + + return self.getsockopt(zmq.RCVHWM) + else: + return self.getsockopt(zmq.HWM) + + def set_hwm(self, value): + """set the High Water Mark + + On libzmq ≥ 3, this sets both SNDHWM and RCVHWM + """ + major = zmq.zmq_version_info()[0] + if major >= 3: + raised = None + try: + self.sndhwm = value + except Exception as e: + raised = e + try: + self.rcvhwm = value + except Exception: + raised = e + + if raised: + raise raised + else: + return self.setsockopt(zmq.HWM, value) + + hwm = property(get_hwm, set_hwm, + """property for High Water Mark + + Setting hwm sets both SNDHWM and RCVHWM as appropriate. + It gets SNDHWM if available, otherwise RCVHWM. + """ + ) + + #------------------------------------------------------------------------- + # Sending and receiving messages + #------------------------------------------------------------------------- + + def send_multipart(self, msg_parts, flags=0, copy=True, track=False): + """send a sequence of buffers as a multipart message + + The zmq.SNDMORE flag is added to all msg parts before the last. + + Parameters + ---------- + msg_parts : iterable + A sequence of objects to send as a multipart message. Each element + can be any sendable object (Frame, bytes, buffer-providers) + flags : int, optional + SNDMORE is handled automatically for frames before the last. + copy : bool, optional + Should the frame(s) be sent in a copying or non-copying manner. + track : bool, optional + Should the frame(s) be tracked for notification that ZMQ has + finished with it (ignored if copy=True). + + Returns + ------- + None : if copy or not track + MessageTracker : if track and not copy + a MessageTracker object, whose `pending` property will + be True until the last send is completed. + """ + for msg in msg_parts[:-1]: + self.send(msg, SNDMORE|flags, copy=copy, track=track) + # Send the last part without the extra SNDMORE flag. + return self.send(msg_parts[-1], flags, copy=copy, track=track) + + def recv_multipart(self, flags=0, copy=True, track=False): + """receive a multipart message as a list of bytes or Frame objects + + Parameters + ---------- + flags : int, optional + Any supported flag: NOBLOCK. If NOBLOCK is set, this method + will raise a ZMQError with EAGAIN if a message is not ready. + If NOBLOCK is not set, then this method will block until a + message arrives. + copy : bool, optional + Should the message frame(s) be received in a copying or non-copying manner? + If False a Frame object is returned for each part, if True a copy of + the bytes is made for each frame. + track : bool, optional + Should the message frame(s) be tracked for notification that ZMQ has + finished with it? (ignored if copy=True) + + Returns + ------- + msg_parts : list + A list of frames in the multipart message; either Frames or bytes, + depending on `copy`. + + """ + parts = [self.recv(flags, copy=copy, track=track)] + # have first part already, only loop while more to receive + while self.getsockopt(zmq.RCVMORE): + part = self.recv(flags, copy=copy, track=track) + parts.append(part) + + return parts + + def send_string(self, u, flags=0, copy=True, encoding='utf-8'): + """send a Python unicode string as a message with an encoding + + 0MQ communicates with raw bytes, so you must encode/decode + text (unicode on py2, str on py3) around 0MQ. + + Parameters + ---------- + u : Python unicode string (unicode on py2, str on py3) + The unicode string to send. + flags : int, optional + Any valid send flag. + encoding : str [default: 'utf-8'] + The encoding to be used + """ + if not isinstance(u, basestring): + raise TypeError("unicode/str objects only") + return self.send(u.encode(encoding), flags=flags, copy=copy) + + send_unicode = send_string + + def recv_string(self, flags=0, encoding='utf-8'): + """receive a unicode string, as sent by send_string + + Parameters + ---------- + flags : int + Any valid recv flag. + encoding : str [default: 'utf-8'] + The encoding to be used + + Returns + ------- + s : unicode string (unicode on py2, str on py3) + The Python unicode string that arrives as encoded bytes. + """ + b = self.recv(flags=flags) + return b.decode(encoding) + + recv_unicode = recv_string + + def send_pyobj(self, obj, flags=0, protocol=DEFAULT_PROTOCOL): + """send a Python object as a message using pickle to serialize + + Parameters + ---------- + obj : Python object + The Python object to send. + flags : int + Any valid send flag. + protocol : int + The pickle protocol number to use. The default is pickle.DEFAULT_PROTOCOl + where defined, and pickle.HIGHEST_PROTOCOL elsewhere. + """ + msg = pickle.dumps(obj, protocol) + return self.send(msg, flags) + + def recv_pyobj(self, flags=0): + """receive a Python object as a message using pickle to serialize + + Parameters + ---------- + flags : int + Any valid recv flag. + + Returns + ------- + obj : Python object + The Python object that arrives as a message. + """ + s = self.recv(flags) + return pickle.loads(s) + + def send_json(self, obj, flags=0, **kwargs): + """send a Python object as a message using json to serialize + + Keyword arguments are passed on to json.dumps + + Parameters + ---------- + obj : Python object + The Python object to send + flags : int + Any valid send flag + """ + msg = jsonapi.dumps(obj, **kwargs) + return self.send(msg, flags) + + def recv_json(self, flags=0, **kwargs): + """receive a Python object as a message using json to serialize + + Keyword arguments are passed on to json.loads + + Parameters + ---------- + flags : int + Any valid recv flag. + + Returns + ------- + obj : Python object + The Python object that arrives as a message. + """ + msg = self.recv(flags) + return jsonapi.loads(msg, **kwargs) + + _poller_class = Poller + + def poll(self, timeout=None, flags=POLLIN): + """poll the socket for events + + The default is to poll forever for incoming + events. Timeout is in milliseconds, if specified. + + Parameters + ---------- + timeout : int [default: None] + The timeout (in milliseconds) to wait for an event. If unspecified + (or specified None), will wait forever for an event. + flags : bitfield (int) [default: POLLIN] + The event flags to poll for (any combination of POLLIN|POLLOUT). + The default is to check for incoming events (POLLIN). + + Returns + ------- + events : bitfield (int) + The events that are ready and waiting. Will be 0 if no events were ready + by the time timeout was reached. + """ + + if self.closed: + raise ZMQError(ENOTSUP) + + p = self._poller_class() + p.register(self, flags) + evts = dict(p.poll(timeout)) + # return 0 if no events, otherwise return event bitfield + return evts.get(self, 0) + + def get_monitor_socket(self, events=None, addr=None): + """Return a connected PAIR socket ready to receive the event notifications. + + .. versionadded:: libzmq-4.0 + .. versionadded:: 14.0 + + Parameters + ---------- + events : bitfield (int) [default: ZMQ_EVENTS_ALL] + The bitmask defining which events are wanted. + addr : string [default: None] + The optional endpoint for the monitoring sockets. + + Returns + ------- + socket : (PAIR) + The socket is already connected and ready to receive messages. + """ + # safe-guard, method only available on libzmq >= 4 + if zmq.zmq_version_info() < (4,): + raise NotImplementedError("get_monitor_socket requires libzmq >= 4, have %s" % zmq.zmq_version()) + if addr is None: + # create endpoint name from internal fd + addr = "inproc://monitor.s-%d" % self.FD + if events is None: + # use all events + events = zmq.EVENT_ALL + # attach monitoring socket + self.monitor(addr, events) + # create new PAIR socket and connect it + ret = self.context.socket(zmq.PAIR) + ret.connect(addr) + return ret + + def disable_monitor(self): + """Shutdown the PAIR socket (created using get_monitor_socket) + that is serving socket events. + + .. versionadded:: 14.4 + """ + self.monitor(None, 0) + + +__all__ = ['Socket'] diff --git a/scripts/external_libs/zmq/sugar/tracker.py b/scripts/external_libs/zmq/sugar/tracker.py new file mode 100644 index 00000000..fb8c007f --- /dev/null +++ b/scripts/external_libs/zmq/sugar/tracker.py @@ -0,0 +1,120 @@ +"""Tracker for zero-copy messages with 0MQ.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import time + +try: + # below 3.3 + from threading import _Event as Event +except (ImportError, AttributeError): + # python throws ImportError, cython throws AttributeError + from threading import Event + +from zmq.error import NotDone +from zmq.backend import Frame + +class MessageTracker(object): + """MessageTracker(*towatch) + + A class for tracking if 0MQ is done using one or more messages. + + When you send a 0MQ message, it is not sent immediately. The 0MQ IO thread + sends the message at some later time. Often you want to know when 0MQ has + actually sent the message though. This is complicated by the fact that + a single 0MQ message can be sent multiple times using different sockets. + This class allows you to track all of the 0MQ usages of a message. + + Parameters + ---------- + *towatch : tuple of Event, MessageTracker, Message instances. + This list of objects to track. This class can track the low-level + Events used by the Message class, other MessageTrackers or + actual Messages. + """ + events = None + peers = None + + def __init__(self, *towatch): + """MessageTracker(*towatch) + + Create a message tracker to track a set of mesages. + + Parameters + ---------- + *towatch : tuple of Event, MessageTracker, Message instances. + This list of objects to track. This class can track the low-level + Events used by the Message class, other MessageTrackers or + actual Messages. + """ + self.events = set() + self.peers = set() + for obj in towatch: + if isinstance(obj, Event): + self.events.add(obj) + elif isinstance(obj, MessageTracker): + self.peers.add(obj) + elif isinstance(obj, Frame): + if not obj.tracker: + raise ValueError("Not a tracked message") + self.peers.add(obj.tracker) + else: + raise TypeError("Require Events or Message Frames, not %s"%type(obj)) + + @property + def done(self): + """Is 0MQ completely done with the message(s) being tracked?""" + for evt in self.events: + if not evt.is_set(): + return False + for pm in self.peers: + if not pm.done: + return False + return True + + def wait(self, timeout=-1): + """mt.wait(timeout=-1) + + Wait for 0MQ to be done with the message or until `timeout`. + + Parameters + ---------- + timeout : float [default: -1, wait forever] + Maximum time in (s) to wait before raising NotDone. + + Returns + ------- + None + if done before `timeout` + + Raises + ------ + NotDone + if `timeout` reached before I am done. + """ + tic = time.time() + if timeout is False or timeout < 0: + remaining = 3600*24*7 # a week + else: + remaining = timeout + done = False + for evt in self.events: + if remaining < 0: + raise NotDone + evt.wait(timeout=remaining) + if not evt.is_set(): + raise NotDone + toc = time.time() + remaining -= (toc-tic) + tic = toc + + for peer in self.peers: + if remaining < 0: + raise NotDone + peer.wait(timeout=remaining) + toc = time.time() + remaining -= (toc-tic) + tic = toc + +__all__ = ['MessageTracker']
\ No newline at end of file diff --git a/scripts/external_libs/zmq/sugar/version.py b/scripts/external_libs/zmq/sugar/version.py new file mode 100644 index 00000000..ea8fbbc4 --- /dev/null +++ b/scripts/external_libs/zmq/sugar/version.py @@ -0,0 +1,48 @@ +"""PyZMQ and 0MQ version functions.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +from zmq.backend import zmq_version_info + + +VERSION_MAJOR = 14 +VERSION_MINOR = 5 +VERSION_PATCH = 0 +VERSION_EXTRA = "" +__version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) + +if VERSION_EXTRA: + __version__ = "%s-%s" % (__version__, VERSION_EXTRA) + version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf')) +else: + version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) + +__revision__ = '' + +def pyzmq_version(): + """return the version of pyzmq as a string""" + if __revision__: + return '@'.join([__version__,__revision__[:6]]) + else: + return __version__ + +def pyzmq_version_info(): + """return the pyzmq version as a tuple of at least three numbers + + If pyzmq is a development version, `inf` will be appended after the third integer. + """ + return version_info + + +def zmq_version(): + """return the version of libzmq as a string""" + return "%i.%i.%i" % zmq_version_info() + + +__all__ = ['zmq_version', 'zmq_version_info', + 'pyzmq_version','pyzmq_version_info', + '__version__', '__revision__' +] + diff --git a/scripts/external_libs/zmq/tests/__init__.py b/scripts/external_libs/zmq/tests/__init__.py new file mode 100644 index 00000000..325a3f19 --- /dev/null +++ b/scripts/external_libs/zmq/tests/__init__.py @@ -0,0 +1,211 @@ +# Copyright (c) PyZMQ Developers. +# Distributed under the terms of the Modified BSD License. + +import functools +import sys +import time +from threading import Thread + +from unittest import TestCase + +import zmq +from zmq.utils import jsonapi + +try: + import gevent + from zmq import green as gzmq + have_gevent = True +except ImportError: + have_gevent = False + +try: + from unittest import SkipTest +except ImportError: + try: + from nose import SkipTest + except ImportError: + class SkipTest(Exception): + pass + +PYPY = 'PyPy' in sys.version + +#----------------------------------------------------------------------------- +# skip decorators (directly from unittest) +#----------------------------------------------------------------------------- + +_id = lambda x: x + +def skip(reason): + """ + Unconditionally skip a test. + """ + def decorator(test_item): + if not (isinstance(test_item, type) and issubclass(test_item, TestCase)): + @functools.wraps(test_item) + def skip_wrapper(*args, **kwargs): + raise SkipTest(reason) + test_item = skip_wrapper + + test_item.__unittest_skip__ = True + test_item.__unittest_skip_why__ = reason + return test_item + return decorator + +def skip_if(condition, reason="Skipped"): + """ + Skip a test if the condition is true. + """ + if condition: + return skip(reason) + return _id + +skip_pypy = skip_if(PYPY, "Doesn't work on PyPy") + +#----------------------------------------------------------------------------- +# Base test class +#----------------------------------------------------------------------------- + +class BaseZMQTestCase(TestCase): + green = False + + @property + def Context(self): + if self.green: + return gzmq.Context + else: + return zmq.Context + + def socket(self, socket_type): + s = self.context.socket(socket_type) + self.sockets.append(s) + return s + + def setUp(self): + if self.green and not have_gevent: + raise SkipTest("requires gevent") + self.context = self.Context.instance() + self.sockets = [] + + def tearDown(self): + contexts = set([self.context]) + while self.sockets: + sock = self.sockets.pop() + contexts.add(sock.context) # in case additional contexts are created + sock.close(0) + for ctx in contexts: + t = Thread(target=ctx.term) + t.daemon = True + t.start() + t.join(timeout=2) + if t.is_alive(): + # reset Context.instance, so the failure to term doesn't corrupt subsequent tests + zmq.sugar.context.Context._instance = None + raise RuntimeError("context could not terminate, open sockets likely remain in test") + + def create_bound_pair(self, type1=zmq.PAIR, type2=zmq.PAIR, interface='tcp://127.0.0.1'): + """Create a bound socket pair using a random port.""" + s1 = self.context.socket(type1) + s1.setsockopt(zmq.LINGER, 0) + port = s1.bind_to_random_port(interface) + s2 = self.context.socket(type2) + s2.setsockopt(zmq.LINGER, 0) + s2.connect('%s:%s' % (interface, port)) + self.sockets.extend([s1,s2]) + return s1, s2 + + def ping_pong(self, s1, s2, msg): + s1.send(msg) + msg2 = s2.recv() + s2.send(msg2) + msg3 = s1.recv() + return msg3 + + def ping_pong_json(self, s1, s2, o): + if jsonapi.jsonmod is None: + raise SkipTest("No json library") + s1.send_json(o) + o2 = s2.recv_json() + s2.send_json(o2) + o3 = s1.recv_json() + return o3 + + def ping_pong_pyobj(self, s1, s2, o): + s1.send_pyobj(o) + o2 = s2.recv_pyobj() + s2.send_pyobj(o2) + o3 = s1.recv_pyobj() + return o3 + + def assertRaisesErrno(self, errno, func, *args, **kwargs): + try: + func(*args, **kwargs) + except zmq.ZMQError as e: + self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \ +got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno))) + else: + self.fail("Function did not raise any error") + + def _select_recv(self, multipart, socket, **kwargs): + """call recv[_multipart] in a way that raises if there is nothing to receive""" + if zmq.zmq_version_info() >= (3,1,0): + # zmq 3.1 has a bug, where poll can return false positives, + # so we wait a little bit just in case + # See LIBZMQ-280 on JIRA + time.sleep(0.1) + + r,w,x = zmq.select([socket], [], [], timeout=5) + assert len(r) > 0, "Should have received a message" + kwargs['flags'] = zmq.DONTWAIT | kwargs.get('flags', 0) + + recv = socket.recv_multipart if multipart else socket.recv + return recv(**kwargs) + + def recv(self, socket, **kwargs): + """call recv in a way that raises if there is nothing to receive""" + return self._select_recv(False, socket, **kwargs) + + def recv_multipart(self, socket, **kwargs): + """call recv_multipart in a way that raises if there is nothing to receive""" + return self._select_recv(True, socket, **kwargs) + + +class PollZMQTestCase(BaseZMQTestCase): + pass + +class GreenTest: + """Mixin for making green versions of test classes""" + green = True + + def assertRaisesErrno(self, errno, func, *args, **kwargs): + if errno == zmq.EAGAIN: + raise SkipTest("Skipping because we're green.") + try: + func(*args, **kwargs) + except zmq.ZMQError: + e = sys.exc_info()[1] + self.assertEqual(e.errno, errno, "wrong error raised, expected '%s' \ +got '%s'" % (zmq.ZMQError(errno), zmq.ZMQError(e.errno))) + else: + self.fail("Function did not raise any error") + + def tearDown(self): + contexts = set([self.context]) + while self.sockets: + sock = self.sockets.pop() + contexts.add(sock.context) # in case additional contexts are created + sock.close() + try: + gevent.joinall([gevent.spawn(ctx.term) for ctx in contexts], timeout=2, raise_error=True) + except gevent.Timeout: + raise RuntimeError("context could not terminate, open sockets likely remain in test") + + def skip_green(self): + raise SkipTest("Skipping because we are green") + +def skip_green(f): + def skipping_test(self, *args, **kwargs): + if self.green: + raise SkipTest("Skipping because we are green") + else: + return f(self, *args, **kwargs) + return skipping_test diff --git a/scripts/external_libs/zmq/tests/test_auth.py b/scripts/external_libs/zmq/tests/test_auth.py new file mode 100644 index 00000000..d350f61f --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_auth.py @@ -0,0 +1,431 @@ +# -*- coding: utf8 -*- + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import logging +import os +import shutil +import sys +import tempfile + +import zmq.auth +from zmq.auth.ioloop import IOLoopAuthenticator +from zmq.auth.thread import ThreadAuthenticator + +from zmq.eventloop import ioloop, zmqstream +from zmq.tests import (BaseZMQTestCase, SkipTest) + +class BaseAuthTestCase(BaseZMQTestCase): + def setUp(self): + if zmq.zmq_version_info() < (4,0): + raise SkipTest("security is new in libzmq 4.0") + try: + zmq.curve_keypair() + except zmq.ZMQError: + raise SkipTest("security requires libzmq to be linked against libsodium") + super(BaseAuthTestCase, self).setUp() + # enable debug logging while we run tests + logging.getLogger('zmq.auth').setLevel(logging.DEBUG) + self.auth = self.make_auth() + self.auth.start() + self.base_dir, self.public_keys_dir, self.secret_keys_dir = self.create_certs() + + def make_auth(self): + raise NotImplementedError() + + def tearDown(self): + if self.auth: + self.auth.stop() + self.auth = None + self.remove_certs(self.base_dir) + super(BaseAuthTestCase, self).tearDown() + + def create_certs(self): + """Create CURVE certificates for a test""" + + # Create temporary CURVE keypairs for this test run. We create all keys in a + # temp directory and then move them into the appropriate private or public + # directory. + + base_dir = tempfile.mkdtemp() + keys_dir = os.path.join(base_dir, 'certificates') + public_keys_dir = os.path.join(base_dir, 'public_keys') + secret_keys_dir = os.path.join(base_dir, 'private_keys') + + os.mkdir(keys_dir) + os.mkdir(public_keys_dir) + os.mkdir(secret_keys_dir) + + server_public_file, server_secret_file = zmq.auth.create_certificates(keys_dir, "server") + client_public_file, client_secret_file = zmq.auth.create_certificates(keys_dir, "client") + + for key_file in os.listdir(keys_dir): + if key_file.endswith(".key"): + shutil.move(os.path.join(keys_dir, key_file), + os.path.join(public_keys_dir, '.')) + + for key_file in os.listdir(keys_dir): + if key_file.endswith(".key_secret"): + shutil.move(os.path.join(keys_dir, key_file), + os.path.join(secret_keys_dir, '.')) + + return (base_dir, public_keys_dir, secret_keys_dir) + + def remove_certs(self, base_dir): + """Remove certificates for a test""" + shutil.rmtree(base_dir) + + def load_certs(self, secret_keys_dir): + """Return server and client certificate keys""" + server_secret_file = os.path.join(secret_keys_dir, "server.key_secret") + client_secret_file = os.path.join(secret_keys_dir, "client.key_secret") + + server_public, server_secret = zmq.auth.load_certificate(server_secret_file) + client_public, client_secret = zmq.auth.load_certificate(client_secret_file) + + return server_public, server_secret, client_public, client_secret + + +class TestThreadAuthentication(BaseAuthTestCase): + """Test authentication running in a thread""" + + def make_auth(self): + return ThreadAuthenticator(self.context) + + def can_connect(self, server, client): + """Check if client can connect to server using tcp transport""" + result = False + iface = 'tcp://127.0.0.1' + port = server.bind_to_random_port(iface) + client.connect("%s:%i" % (iface, port)) + msg = [b"Hello World"] + server.send_multipart(msg) + if client.poll(1000): + rcvd_msg = client.recv_multipart() + self.assertEqual(rcvd_msg, msg) + result = True + return result + + def test_null(self): + """threaded auth - NULL""" + # A default NULL connection should always succeed, and not + # go through our authentication infrastructure at all. + self.auth.stop() + self.auth = None + + server = self.socket(zmq.PUSH) + client = self.socket(zmq.PULL) + self.assertTrue(self.can_connect(server, client)) + + # By setting a domain we switch on authentication for NULL sockets, + # though no policies are configured yet. The client connection + # should still be allowed. + server = self.socket(zmq.PUSH) + server.zap_domain = b'global' + client = self.socket(zmq.PULL) + self.assertTrue(self.can_connect(server, client)) + + def test_blacklist(self): + """threaded auth - Blacklist""" + # Blacklist 127.0.0.1, connection should fail + self.auth.deny('127.0.0.1') + server = self.socket(zmq.PUSH) + # By setting a domain we switch on authentication for NULL sockets, + # though no policies are configured yet. + server.zap_domain = b'global' + client = self.socket(zmq.PULL) + self.assertFalse(self.can_connect(server, client)) + + def test_whitelist(self): + """threaded auth - Whitelist""" + # Whitelist 127.0.0.1, connection should pass" + self.auth.allow('127.0.0.1') + server = self.socket(zmq.PUSH) + # By setting a domain we switch on authentication for NULL sockets, + # though no policies are configured yet. + server.zap_domain = b'global' + client = self.socket(zmq.PULL) + self.assertTrue(self.can_connect(server, client)) + + def test_plain(self): + """threaded auth - PLAIN""" + + # Try PLAIN authentication - without configuring server, connection should fail + server = self.socket(zmq.PUSH) + server.plain_server = True + client = self.socket(zmq.PULL) + client.plain_username = b'admin' + client.plain_password = b'Password' + self.assertFalse(self.can_connect(server, client)) + + # Try PLAIN authentication - with server configured, connection should pass + server = self.socket(zmq.PUSH) + server.plain_server = True + client = self.socket(zmq.PULL) + client.plain_username = b'admin' + client.plain_password = b'Password' + self.auth.configure_plain(domain='*', passwords={'admin': 'Password'}) + self.assertTrue(self.can_connect(server, client)) + + # Try PLAIN authentication - with bogus credentials, connection should fail + server = self.socket(zmq.PUSH) + server.plain_server = True + client = self.socket(zmq.PULL) + client.plain_username = b'admin' + client.plain_password = b'Bogus' + self.assertFalse(self.can_connect(server, client)) + + # Remove authenticator and check that a normal connection works + self.auth.stop() + self.auth = None + + server = self.socket(zmq.PUSH) + client = self.socket(zmq.PULL) + self.assertTrue(self.can_connect(server, client)) + client.close() + server.close() + + def test_curve(self): + """threaded auth - CURVE""" + self.auth.allow('127.0.0.1') + certs = self.load_certs(self.secret_keys_dir) + server_public, server_secret, client_public, client_secret = certs + + #Try CURVE authentication - without configuring server, connection should fail + server = self.socket(zmq.PUSH) + server.curve_publickey = server_public + server.curve_secretkey = server_secret + server.curve_server = True + client = self.socket(zmq.PULL) + client.curve_publickey = client_public + client.curve_secretkey = client_secret + client.curve_serverkey = server_public + self.assertFalse(self.can_connect(server, client)) + + #Try CURVE authentication - with server configured to CURVE_ALLOW_ANY, connection should pass + self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) + server = self.socket(zmq.PUSH) + server.curve_publickey = server_public + server.curve_secretkey = server_secret + server.curve_server = True + client = self.socket(zmq.PULL) + client.curve_publickey = client_public + client.curve_secretkey = client_secret + client.curve_serverkey = server_public + self.assertTrue(self.can_connect(server, client)) + + # Try CURVE authentication - with server configured, connection should pass + self.auth.configure_curve(domain='*', location=self.public_keys_dir) + server = self.socket(zmq.PUSH) + server.curve_publickey = server_public + server.curve_secretkey = server_secret + server.curve_server = True + client = self.socket(zmq.PULL) + client.curve_publickey = client_public + client.curve_secretkey = client_secret + client.curve_serverkey = server_public + self.assertTrue(self.can_connect(server, client)) + + # Remove authenticator and check that a normal connection works + self.auth.stop() + self.auth = None + + # Try connecting using NULL and no authentication enabled, connection should pass + server = self.socket(zmq.PUSH) + client = self.socket(zmq.PULL) + self.assertTrue(self.can_connect(server, client)) + + +def with_ioloop(method, expect_success=True): + """decorator for running tests with an IOLoop""" + def test_method(self): + r = method(self) + + loop = self.io_loop + if expect_success: + self.pullstream.on_recv(self.on_message_succeed) + else: + self.pullstream.on_recv(self.on_message_fail) + + t = loop.time() + loop.add_callback(self.attempt_connection) + loop.add_callback(self.send_msg) + if expect_success: + loop.add_timeout(t + 1, self.on_test_timeout_fail) + else: + loop.add_timeout(t + 1, self.on_test_timeout_succeed) + + loop.start() + if self.fail_msg: + self.fail(self.fail_msg) + + return r + return test_method + +def should_auth(method): + return with_ioloop(method, True) + +def should_not_auth(method): + return with_ioloop(method, False) + +class TestIOLoopAuthentication(BaseAuthTestCase): + """Test authentication running in ioloop""" + + def setUp(self): + self.fail_msg = None + self.io_loop = ioloop.IOLoop() + super(TestIOLoopAuthentication, self).setUp() + self.server = self.socket(zmq.PUSH) + self.client = self.socket(zmq.PULL) + self.pushstream = zmqstream.ZMQStream(self.server, self.io_loop) + self.pullstream = zmqstream.ZMQStream(self.client, self.io_loop) + + def make_auth(self): + return IOLoopAuthenticator(self.context, io_loop=self.io_loop) + + def tearDown(self): + if self.auth: + self.auth.stop() + self.auth = None + self.io_loop.close(all_fds=True) + super(TestIOLoopAuthentication, self).tearDown() + + def attempt_connection(self): + """Check if client can connect to server using tcp transport""" + iface = 'tcp://127.0.0.1' + port = self.server.bind_to_random_port(iface) + self.client.connect("%s:%i" % (iface, port)) + + def send_msg(self): + """Send a message from server to a client""" + msg = [b"Hello World"] + self.pushstream.send_multipart(msg) + + def on_message_succeed(self, frames): + """A message was received, as expected.""" + if frames != [b"Hello World"]: + self.fail_msg = "Unexpected message received" + self.io_loop.stop() + + def on_message_fail(self, frames): + """A message was received, unexpectedly.""" + self.fail_msg = 'Received messaged unexpectedly, security failed' + self.io_loop.stop() + + def on_test_timeout_succeed(self): + """Test timer expired, indicates test success""" + self.io_loop.stop() + + def on_test_timeout_fail(self): + """Test timer expired, indicates test failure""" + self.fail_msg = 'Test timed out' + self.io_loop.stop() + + @should_auth + def test_none(self): + """ioloop auth - NONE""" + # A default NULL connection should always succeed, and not + # go through our authentication infrastructure at all. + # no auth should be running + self.auth.stop() + self.auth = None + + @should_auth + def test_null(self): + """ioloop auth - NULL""" + # By setting a domain we switch on authentication for NULL sockets, + # though no policies are configured yet. The client connection + # should still be allowed. + self.server.zap_domain = b'global' + + @should_not_auth + def test_blacklist(self): + """ioloop auth - Blacklist""" + # Blacklist 127.0.0.1, connection should fail + self.auth.deny('127.0.0.1') + self.server.zap_domain = b'global' + + @should_auth + def test_whitelist(self): + """ioloop auth - Whitelist""" + # Whitelist 127.0.0.1, which overrides the blacklist, connection should pass" + self.auth.allow('127.0.0.1') + + self.server.setsockopt(zmq.ZAP_DOMAIN, b'global') + + @should_not_auth + def test_plain_unconfigured_server(self): + """ioloop auth - PLAIN, unconfigured server""" + self.client.plain_username = b'admin' + self.client.plain_password = b'Password' + # Try PLAIN authentication - without configuring server, connection should fail + self.server.plain_server = True + + @should_auth + def test_plain_configured_server(self): + """ioloop auth - PLAIN, configured server""" + self.client.plain_username = b'admin' + self.client.plain_password = b'Password' + # Try PLAIN authentication - with server configured, connection should pass + self.server.plain_server = True + self.auth.configure_plain(domain='*', passwords={'admin': 'Password'}) + + @should_not_auth + def test_plain_bogus_credentials(self): + """ioloop auth - PLAIN, bogus credentials""" + self.client.plain_username = b'admin' + self.client.plain_password = b'Bogus' + self.server.plain_server = True + + self.auth.configure_plain(domain='*', passwords={'admin': 'Password'}) + + @should_not_auth + def test_curve_unconfigured_server(self): + """ioloop auth - CURVE, unconfigured server""" + certs = self.load_certs(self.secret_keys_dir) + server_public, server_secret, client_public, client_secret = certs + + self.auth.allow('127.0.0.1') + + self.server.curve_publickey = server_public + self.server.curve_secretkey = server_secret + self.server.curve_server = True + + self.client.curve_publickey = client_public + self.client.curve_secretkey = client_secret + self.client.curve_serverkey = server_public + + @should_auth + def test_curve_allow_any(self): + """ioloop auth - CURVE, CURVE_ALLOW_ANY""" + certs = self.load_certs(self.secret_keys_dir) + server_public, server_secret, client_public, client_secret = certs + + self.auth.allow('127.0.0.1') + self.auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) + + self.server.curve_publickey = server_public + self.server.curve_secretkey = server_secret + self.server.curve_server = True + + self.client.curve_publickey = client_public + self.client.curve_secretkey = client_secret + self.client.curve_serverkey = server_public + + @should_auth + def test_curve_configured_server(self): + """ioloop auth - CURVE, configured server""" + self.auth.allow('127.0.0.1') + certs = self.load_certs(self.secret_keys_dir) + server_public, server_secret, client_public, client_secret = certs + + self.auth.configure_curve(domain='*', location=self.public_keys_dir) + + self.server.curve_publickey = server_public + self.server.curve_secretkey = server_secret + self.server.curve_server = True + + self.client.curve_publickey = client_public + self.client.curve_secretkey = client_secret + self.client.curve_serverkey = server_public diff --git a/scripts/external_libs/zmq/tests/test_cffi_backend.py b/scripts/external_libs/zmq/tests/test_cffi_backend.py new file mode 100644 index 00000000..1f85eebf --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_cffi_backend.py @@ -0,0 +1,310 @@ +# -*- coding: utf8 -*- + +import sys +import time + +from unittest import TestCase + +from zmq.tests import BaseZMQTestCase, SkipTest + +try: + from zmq.backend.cffi import ( + zmq_version_info, + PUSH, PULL, IDENTITY, + REQ, REP, POLLIN, POLLOUT, + ) + from zmq.backend.cffi._cffi import ffi, C + have_ffi_backend = True +except ImportError: + have_ffi_backend = False + + +class TestCFFIBackend(TestCase): + + def setUp(self): + if not have_ffi_backend or not 'PyPy' in sys.version: + raise SkipTest('PyPy Tests Only') + + def test_zmq_version_info(self): + version = zmq_version_info() + + assert version[0] in range(2,11) + + def test_zmq_ctx_new_destroy(self): + ctx = C.zmq_ctx_new() + + assert ctx != ffi.NULL + assert 0 == C.zmq_ctx_destroy(ctx) + + def test_zmq_socket_open_close(self): + ctx = C.zmq_ctx_new() + socket = C.zmq_socket(ctx, PUSH) + + assert ctx != ffi.NULL + assert ffi.NULL != socket + assert 0 == C.zmq_close(socket) + assert 0 == C.zmq_ctx_destroy(ctx) + + def test_zmq_setsockopt(self): + ctx = C.zmq_ctx_new() + socket = C.zmq_socket(ctx, PUSH) + + identity = ffi.new('char[3]', 'zmq') + ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3) + + assert ret == 0 + assert ctx != ffi.NULL + assert ffi.NULL != socket + assert 0 == C.zmq_close(socket) + assert 0 == C.zmq_ctx_destroy(ctx) + + def test_zmq_getsockopt(self): + ctx = C.zmq_ctx_new() + socket = C.zmq_socket(ctx, PUSH) + + identity = ffi.new('char[]', 'zmq') + ret = C.zmq_setsockopt(socket, IDENTITY, ffi.cast('void*', identity), 3) + assert ret == 0 + + option_len = ffi.new('size_t*', 3) + option = ffi.new('char*') + ret = C.zmq_getsockopt(socket, + IDENTITY, + ffi.cast('void*', option), + option_len) + + assert ret == 0 + assert ffi.string(ffi.cast('char*', option))[0] == "z" + assert ffi.string(ffi.cast('char*', option))[1] == "m" + assert ffi.string(ffi.cast('char*', option))[2] == "q" + assert ctx != ffi.NULL + assert ffi.NULL != socket + assert 0 == C.zmq_close(socket) + assert 0 == C.zmq_ctx_destroy(ctx) + + def test_zmq_bind(self): + ctx = C.zmq_ctx_new() + socket = C.zmq_socket(ctx, 8) + + assert 0 == C.zmq_bind(socket, 'tcp://*:4444') + assert ctx != ffi.NULL + assert ffi.NULL != socket + assert 0 == C.zmq_close(socket) + assert 0 == C.zmq_ctx_destroy(ctx) + + def test_zmq_bind_connect(self): + ctx = C.zmq_ctx_new() + + socket1 = C.zmq_socket(ctx, PUSH) + socket2 = C.zmq_socket(ctx, PULL) + + assert 0 == C.zmq_bind(socket1, 'tcp://*:4444') + assert 0 == C.zmq_connect(socket2, 'tcp://127.0.0.1:4444') + assert ctx != ffi.NULL + assert ffi.NULL != socket1 + assert ffi.NULL != socket2 + assert 0 == C.zmq_close(socket1) + assert 0 == C.zmq_close(socket2) + assert 0 == C.zmq_ctx_destroy(ctx) + + def test_zmq_msg_init_close(self): + zmq_msg = ffi.new('zmq_msg_t*') + + assert ffi.NULL != zmq_msg + assert 0 == C.zmq_msg_init(zmq_msg) + assert 0 == C.zmq_msg_close(zmq_msg) + + def test_zmq_msg_init_size(self): + zmq_msg = ffi.new('zmq_msg_t*') + + assert ffi.NULL != zmq_msg + assert 0 == C.zmq_msg_init_size(zmq_msg, 10) + assert 0 == C.zmq_msg_close(zmq_msg) + + def test_zmq_msg_init_data(self): + zmq_msg = ffi.new('zmq_msg_t*') + message = ffi.new('char[5]', 'Hello') + + assert 0 == C.zmq_msg_init_data(zmq_msg, + ffi.cast('void*', message), + 5, + ffi.NULL, + ffi.NULL) + + assert ffi.NULL != zmq_msg + assert 0 == C.zmq_msg_close(zmq_msg) + + def test_zmq_msg_data(self): + zmq_msg = ffi.new('zmq_msg_t*') + message = ffi.new('char[]', 'Hello') + assert 0 == C.zmq_msg_init_data(zmq_msg, + ffi.cast('void*', message), + 5, + ffi.NULL, + ffi.NULL) + + data = C.zmq_msg_data(zmq_msg) + + assert ffi.NULL != zmq_msg + assert ffi.string(ffi.cast("char*", data)) == 'Hello' + assert 0 == C.zmq_msg_close(zmq_msg) + + + def test_zmq_send(self): + ctx = C.zmq_ctx_new() + + sender = C.zmq_socket(ctx, REQ) + receiver = C.zmq_socket(ctx, REP) + + assert 0 == C.zmq_bind(receiver, 'tcp://*:7777') + assert 0 == C.zmq_connect(sender, 'tcp://127.0.0.1:7777') + + time.sleep(0.1) + + zmq_msg = ffi.new('zmq_msg_t*') + message = ffi.new('char[5]', 'Hello') + + C.zmq_msg_init_data(zmq_msg, + ffi.cast('void*', message), + ffi.cast('size_t', 5), + ffi.NULL, + ffi.NULL) + + assert 5 == C.zmq_msg_send(zmq_msg, sender, 0) + assert 0 == C.zmq_msg_close(zmq_msg) + assert C.zmq_close(sender) == 0 + assert C.zmq_close(receiver) == 0 + assert C.zmq_ctx_destroy(ctx) == 0 + + def test_zmq_recv(self): + ctx = C.zmq_ctx_new() + + sender = C.zmq_socket(ctx, REQ) + receiver = C.zmq_socket(ctx, REP) + + assert 0 == C.zmq_bind(receiver, 'tcp://*:2222') + assert 0 == C.zmq_connect(sender, 'tcp://127.0.0.1:2222') + + time.sleep(0.1) + + zmq_msg = ffi.new('zmq_msg_t*') + message = ffi.new('char[5]', 'Hello') + + C.zmq_msg_init_data(zmq_msg, + ffi.cast('void*', message), + ffi.cast('size_t', 5), + ffi.NULL, + ffi.NULL) + + zmq_msg2 = ffi.new('zmq_msg_t*') + C.zmq_msg_init(zmq_msg2) + + assert 5 == C.zmq_msg_send(zmq_msg, sender, 0) + assert 5 == C.zmq_msg_recv(zmq_msg2, receiver, 0) + assert 5 == C.zmq_msg_size(zmq_msg2) + assert b"Hello" == ffi.buffer(C.zmq_msg_data(zmq_msg2), + C.zmq_msg_size(zmq_msg2))[:] + assert C.zmq_close(sender) == 0 + assert C.zmq_close(receiver) == 0 + assert C.zmq_ctx_destroy(ctx) == 0 + + def test_zmq_poll(self): + ctx = C.zmq_ctx_new() + + sender = C.zmq_socket(ctx, REQ) + receiver = C.zmq_socket(ctx, REP) + + r1 = C.zmq_bind(receiver, 'tcp://*:3333') + r2 = C.zmq_connect(sender, 'tcp://127.0.0.1:3333') + + zmq_msg = ffi.new('zmq_msg_t*') + message = ffi.new('char[5]', 'Hello') + + C.zmq_msg_init_data(zmq_msg, + ffi.cast('void*', message), + ffi.cast('size_t', 5), + ffi.NULL, + ffi.NULL) + + receiver_pollitem = ffi.new('zmq_pollitem_t*') + receiver_pollitem.socket = receiver + receiver_pollitem.fd = 0 + receiver_pollitem.events = POLLIN | POLLOUT + receiver_pollitem.revents = 0 + + ret = C.zmq_poll(ffi.NULL, 0, 0) + assert ret == 0 + + ret = C.zmq_poll(receiver_pollitem, 1, 0) + assert ret == 0 + + ret = C.zmq_msg_send(zmq_msg, sender, 0) + print(ffi.string(C.zmq_strerror(C.zmq_errno()))) + assert ret == 5 + + time.sleep(0.2) + + ret = C.zmq_poll(receiver_pollitem, 1, 0) + assert ret == 1 + + assert int(receiver_pollitem.revents) & POLLIN + assert not int(receiver_pollitem.revents) & POLLOUT + + zmq_msg2 = ffi.new('zmq_msg_t*') + C.zmq_msg_init(zmq_msg2) + + ret_recv = C.zmq_msg_recv(zmq_msg2, receiver, 0) + assert ret_recv == 5 + + assert 5 == C.zmq_msg_size(zmq_msg2) + assert b"Hello" == ffi.buffer(C.zmq_msg_data(zmq_msg2), + C.zmq_msg_size(zmq_msg2))[:] + + sender_pollitem = ffi.new('zmq_pollitem_t*') + sender_pollitem.socket = sender + sender_pollitem.fd = 0 + sender_pollitem.events = POLLIN | POLLOUT + sender_pollitem.revents = 0 + + ret = C.zmq_poll(sender_pollitem, 1, 0) + assert ret == 0 + + zmq_msg_again = ffi.new('zmq_msg_t*') + message_again = ffi.new('char[11]', 'Hello Again') + + C.zmq_msg_init_data(zmq_msg_again, + ffi.cast('void*', message_again), + ffi.cast('size_t', 11), + ffi.NULL, + ffi.NULL) + + assert 11 == C.zmq_msg_send(zmq_msg_again, receiver, 0) + + time.sleep(0.2) + + assert 0 <= C.zmq_poll(sender_pollitem, 1, 0) + assert int(sender_pollitem.revents) & POLLIN + assert 11 == C.zmq_msg_recv(zmq_msg2, sender, 0) + assert 11 == C.zmq_msg_size(zmq_msg2) + assert b"Hello Again" == ffi.buffer(C.zmq_msg_data(zmq_msg2), + int(C.zmq_msg_size(zmq_msg2)))[:] + assert 0 == C.zmq_close(sender) + assert 0 == C.zmq_close(receiver) + assert 0 == C.zmq_ctx_destroy(ctx) + assert 0 == C.zmq_msg_close(zmq_msg) + assert 0 == C.zmq_msg_close(zmq_msg2) + assert 0 == C.zmq_msg_close(zmq_msg_again) + + def test_zmq_stopwatch_functions(self): + stopwatch = C.zmq_stopwatch_start() + ret = C.zmq_stopwatch_stop(stopwatch) + + assert ffi.NULL != stopwatch + assert 0 < int(ret) + + def test_zmq_sleep(self): + try: + C.zmq_sleep(1) + except Exception as e: + raise AssertionError("Error executing zmq_sleep(int)") + diff --git a/scripts/external_libs/zmq/tests/test_constants.py b/scripts/external_libs/zmq/tests/test_constants.py new file mode 100644 index 00000000..d32b2b48 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_constants.py @@ -0,0 +1,104 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import json +from unittest import TestCase + +import zmq + +from zmq.utils import constant_names +from zmq.sugar import constants as sugar_constants +from zmq.backend import constants as backend_constants + +all_set = set(constant_names.all_names) + +class TestConstants(TestCase): + + def _duplicate_test(self, namelist, listname): + """test that a given list has no duplicates""" + dupes = {} + for name in set(namelist): + cnt = namelist.count(name) + if cnt > 1: + dupes[name] = cnt + if dupes: + self.fail("The following names occur more than once in %s: %s" % (listname, json.dumps(dupes, indent=2))) + + def test_duplicate_all(self): + return self._duplicate_test(constant_names.all_names, "all_names") + + def _change_key(self, change, version): + """return changed-in key""" + return "%s-in %d.%d.%d" % tuple([change] + list(version)) + + def test_duplicate_changed(self): + all_changed = [] + for change in ("new", "removed"): + d = getattr(constant_names, change + "_in") + for version, namelist in d.items(): + all_changed.extend(namelist) + self._duplicate_test(namelist, self._change_key(change, version)) + + self._duplicate_test(all_changed, "all-changed") + + def test_changed_in_all(self): + missing = {} + for change in ("new", "removed"): + d = getattr(constant_names, change + "_in") + for version, namelist in d.items(): + key = self._change_key(change, version) + for name in namelist: + if name not in all_set: + if key not in missing: + missing[key] = [] + missing[key].append(name) + + if missing: + self.fail( + "The following names are missing in `all_names`: %s" % json.dumps(missing, indent=2) + ) + + def test_no_negative_constants(self): + for name in sugar_constants.__all__: + self.assertNotEqual(getattr(zmq, name), sugar_constants._UNDEFINED) + + def test_undefined_constants(self): + all_aliases = [] + for alias_group in sugar_constants.aliases: + all_aliases.extend(alias_group) + + for name in all_set.difference(all_aliases): + raw = getattr(backend_constants, name) + if raw == sugar_constants._UNDEFINED: + self.assertRaises(AttributeError, getattr, zmq, name) + else: + self.assertEqual(getattr(zmq, name), raw) + + def test_new(self): + zmq_version = zmq.zmq_version_info() + for version, new_names in constant_names.new_in.items(): + should_have = zmq_version >= version + for name in new_names: + try: + value = getattr(zmq, name) + except AttributeError: + if should_have: + self.fail("AttributeError: zmq.%s" % name) + else: + if not should_have: + self.fail("Shouldn't have: zmq.%s=%s" % (name, value)) + + def test_removed(self): + zmq_version = zmq.zmq_version_info() + for version, new_names in constant_names.removed_in.items(): + should_have = zmq_version < version + for name in new_names: + try: + value = getattr(zmq, name) + except AttributeError: + if should_have: + self.fail("AttributeError: zmq.%s" % name) + else: + if not should_have: + self.fail("Shouldn't have: zmq.%s=%s" % (name, value)) + diff --git a/scripts/external_libs/zmq/tests/test_context.py b/scripts/external_libs/zmq/tests/test_context.py new file mode 100644 index 00000000..e3280778 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_context.py @@ -0,0 +1,257 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import gc +import sys +import time +from threading import Thread, Event + +import zmq +from zmq.tests import ( + BaseZMQTestCase, have_gevent, GreenTest, skip_green, PYPY, SkipTest, +) + + +class TestContext(BaseZMQTestCase): + + def test_init(self): + c1 = self.Context() + self.assert_(isinstance(c1, self.Context)) + del c1 + c2 = self.Context() + self.assert_(isinstance(c2, self.Context)) + del c2 + c3 = self.Context() + self.assert_(isinstance(c3, self.Context)) + del c3 + + def test_dir(self): + ctx = self.Context() + self.assertTrue('socket' in dir(ctx)) + if zmq.zmq_version_info() > (3,): + self.assertTrue('IO_THREADS' in dir(ctx)) + ctx.term() + + def test_term(self): + c = self.Context() + c.term() + self.assert_(c.closed) + + def test_context_manager(self): + with self.Context() as c: + pass + self.assert_(c.closed) + + def test_fail_init(self): + self.assertRaisesErrno(zmq.EINVAL, self.Context, -1) + + def test_term_hang(self): + rep,req = self.create_bound_pair(zmq.ROUTER, zmq.DEALER) + req.setsockopt(zmq.LINGER, 0) + req.send(b'hello', copy=False) + req.close() + rep.close() + self.context.term() + + def test_instance(self): + ctx = self.Context.instance() + c2 = self.Context.instance(io_threads=2) + self.assertTrue(c2 is ctx) + c2.term() + c3 = self.Context.instance() + c4 = self.Context.instance() + self.assertFalse(c3 is c2) + self.assertFalse(c3.closed) + self.assertTrue(c3 is c4) + + def test_many_sockets(self): + """opening and closing many sockets shouldn't cause problems""" + ctx = self.Context() + for i in range(16): + sockets = [ ctx.socket(zmq.REP) for i in range(65) ] + [ s.close() for s in sockets ] + # give the reaper a chance + time.sleep(1e-2) + ctx.term() + + def test_sockopts(self): + """setting socket options with ctx attributes""" + ctx = self.Context() + ctx.linger = 5 + self.assertEqual(ctx.linger, 5) + s = ctx.socket(zmq.REQ) + self.assertEqual(s.linger, 5) + self.assertEqual(s.getsockopt(zmq.LINGER), 5) + s.close() + # check that subscribe doesn't get set on sockets that don't subscribe: + ctx.subscribe = b'' + s = ctx.socket(zmq.REQ) + s.close() + + ctx.term() + + + def test_destroy(self): + """Context.destroy should close sockets""" + ctx = self.Context() + sockets = [ ctx.socket(zmq.REP) for i in range(65) ] + + # close half of the sockets + [ s.close() for s in sockets[::2] ] + + ctx.destroy() + # reaper is not instantaneous + time.sleep(1e-2) + for s in sockets: + self.assertTrue(s.closed) + + def test_destroy_linger(self): + """Context.destroy should set linger on closing sockets""" + req,rep = self.create_bound_pair(zmq.REQ, zmq.REP) + req.send(b'hi') + time.sleep(1e-2) + self.context.destroy(linger=0) + # reaper is not instantaneous + time.sleep(1e-2) + for s in (req,rep): + self.assertTrue(s.closed) + + def test_term_noclose(self): + """Context.term won't close sockets""" + ctx = self.Context() + s = ctx.socket(zmq.REQ) + self.assertFalse(s.closed) + t = Thread(target=ctx.term) + t.start() + t.join(timeout=0.1) + self.assertTrue(t.is_alive(), "Context should be waiting") + s.close() + t.join(timeout=0.1) + self.assertFalse(t.is_alive(), "Context should have closed") + + def test_gc(self): + """test close&term by garbage collection alone""" + if PYPY: + raise SkipTest("GC doesn't work ") + + # test credit @dln (GH #137): + def gcf(): + def inner(): + ctx = self.Context() + s = ctx.socket(zmq.PUSH) + inner() + gc.collect() + t = Thread(target=gcf) + t.start() + t.join(timeout=1) + self.assertFalse(t.is_alive(), "Garbage collection should have cleaned up context") + + def test_cyclic_destroy(self): + """ctx.destroy should succeed when cyclic ref prevents gc""" + # test credit @dln (GH #137): + class CyclicReference(object): + def __init__(self, parent=None): + self.parent = parent + + def crash(self, sock): + self.sock = sock + self.child = CyclicReference(self) + + def crash_zmq(): + ctx = self.Context() + sock = ctx.socket(zmq.PULL) + c = CyclicReference() + c.crash(sock) + ctx.destroy() + + crash_zmq() + + def test_term_thread(self): + """ctx.term should not crash active threads (#139)""" + ctx = self.Context() + evt = Event() + evt.clear() + + def block(): + s = ctx.socket(zmq.REP) + s.bind_to_random_port('tcp://127.0.0.1') + evt.set() + try: + s.recv() + except zmq.ZMQError as e: + self.assertEqual(e.errno, zmq.ETERM) + return + finally: + s.close() + self.fail("recv should have been interrupted with ETERM") + t = Thread(target=block) + t.start() + + evt.wait(1) + self.assertTrue(evt.is_set(), "sync event never fired") + time.sleep(0.01) + ctx.term() + t.join(timeout=1) + self.assertFalse(t.is_alive(), "term should have interrupted s.recv()") + + def test_destroy_no_sockets(self): + ctx = self.Context() + s = ctx.socket(zmq.PUB) + s.bind_to_random_port('tcp://127.0.0.1') + s.close() + ctx.destroy() + assert s.closed + assert ctx.closed + + def test_ctx_opts(self): + if zmq.zmq_version_info() < (3,): + raise SkipTest("context options require libzmq 3") + ctx = self.Context() + ctx.set(zmq.MAX_SOCKETS, 2) + self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 2) + ctx.max_sockets = 100 + self.assertEqual(ctx.max_sockets, 100) + self.assertEqual(ctx.get(zmq.MAX_SOCKETS), 100) + + def test_shadow(self): + ctx = self.Context() + ctx2 = self.Context.shadow(ctx.underlying) + self.assertEqual(ctx.underlying, ctx2.underlying) + s = ctx.socket(zmq.PUB) + s.close() + del ctx2 + self.assertFalse(ctx.closed) + s = ctx.socket(zmq.PUB) + ctx2 = self.Context.shadow(ctx.underlying) + s2 = ctx2.socket(zmq.PUB) + s.close() + s2.close() + ctx.term() + self.assertRaisesErrno(zmq.EFAULT, ctx2.socket, zmq.PUB) + del ctx2 + + def test_shadow_pyczmq(self): + try: + from pyczmq import zctx, zsocket, zstr + except Exception: + raise SkipTest("Requires pyczmq") + + ctx = zctx.new() + a = zsocket.new(ctx, zmq.PUSH) + zsocket.bind(a, "inproc://a") + ctx2 = self.Context.shadow_pyczmq(ctx) + b = ctx2.socket(zmq.PULL) + b.connect("inproc://a") + zstr.send(a, b'hi') + rcvd = self.recv(b) + self.assertEqual(rcvd, b'hi') + b.close() + + +if False: # disable green context tests + class TestContextGreen(GreenTest, TestContext): + """gevent subclass of context tests""" + # skip tests that use real threads: + test_gc = GreenTest.skip_green + test_term_thread = GreenTest.skip_green + test_destroy_linger = GreenTest.skip_green diff --git a/scripts/external_libs/zmq/tests/test_device.py b/scripts/external_libs/zmq/tests/test_device.py new file mode 100644 index 00000000..f8305074 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_device.py @@ -0,0 +1,146 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import time + +import zmq +from zmq import devices +from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest, PYPY +from zmq.utils.strtypes import (bytes,unicode,basestring) + +if PYPY: + # cleanup of shared Context doesn't work on PyPy + devices.Device.context_factory = zmq.Context + +class TestDevice(BaseZMQTestCase): + + def test_device_types(self): + for devtype in (zmq.STREAMER, zmq.FORWARDER, zmq.QUEUE): + dev = devices.Device(devtype, zmq.PAIR, zmq.PAIR) + self.assertEqual(dev.device_type, devtype) + del dev + + def test_device_attributes(self): + dev = devices.Device(zmq.QUEUE, zmq.SUB, zmq.PUB) + self.assertEqual(dev.in_type, zmq.SUB) + self.assertEqual(dev.out_type, zmq.PUB) + self.assertEqual(dev.device_type, zmq.QUEUE) + self.assertEqual(dev.daemon, True) + del dev + + def test_tsdevice_attributes(self): + dev = devices.Device(zmq.QUEUE, zmq.SUB, zmq.PUB) + self.assertEqual(dev.in_type, zmq.SUB) + self.assertEqual(dev.out_type, zmq.PUB) + self.assertEqual(dev.device_type, zmq.QUEUE) + self.assertEqual(dev.daemon, True) + del dev + + + def test_single_socket_forwarder_connect(self): + dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1) + req = self.context.socket(zmq.REQ) + port = req.bind_to_random_port('tcp://127.0.0.1') + dev.connect_in('tcp://127.0.0.1:%i'%port) + dev.start() + time.sleep(.25) + msg = b'hello' + req.send(msg) + self.assertEqual(msg, self.recv(req)) + del dev + req.close() + dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1) + req = self.context.socket(zmq.REQ) + port = req.bind_to_random_port('tcp://127.0.0.1') + dev.connect_out('tcp://127.0.0.1:%i'%port) + dev.start() + time.sleep(.25) + msg = b'hello again' + req.send(msg) + self.assertEqual(msg, self.recv(req)) + del dev + req.close() + + def test_single_socket_forwarder_bind(self): + dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1) + # select random port: + binder = self.context.socket(zmq.REQ) + port = binder.bind_to_random_port('tcp://127.0.0.1') + binder.close() + time.sleep(0.1) + req = self.context.socket(zmq.REQ) + req.connect('tcp://127.0.0.1:%i'%port) + dev.bind_in('tcp://127.0.0.1:%i'%port) + dev.start() + time.sleep(.25) + msg = b'hello' + req.send(msg) + self.assertEqual(msg, self.recv(req)) + del dev + req.close() + dev = devices.ThreadDevice(zmq.QUEUE, zmq.REP, -1) + # select random port: + binder = self.context.socket(zmq.REQ) + port = binder.bind_to_random_port('tcp://127.0.0.1') + binder.close() + time.sleep(0.1) + req = self.context.socket(zmq.REQ) + req.connect('tcp://127.0.0.1:%i'%port) + dev.bind_in('tcp://127.0.0.1:%i'%port) + dev.start() + time.sleep(.25) + msg = b'hello again' + req.send(msg) + self.assertEqual(msg, self.recv(req)) + del dev + req.close() + + def test_proxy(self): + if zmq.zmq_version_info() < (3,2): + raise SkipTest("Proxies only in libzmq >= 3") + dev = devices.ThreadProxy(zmq.PULL, zmq.PUSH, zmq.PUSH) + binder = self.context.socket(zmq.REQ) + iface = 'tcp://127.0.0.1' + port = binder.bind_to_random_port(iface) + port2 = binder.bind_to_random_port(iface) + port3 = binder.bind_to_random_port(iface) + binder.close() + time.sleep(0.1) + dev.bind_in("%s:%i" % (iface, port)) + dev.bind_out("%s:%i" % (iface, port2)) + dev.bind_mon("%s:%i" % (iface, port3)) + dev.start() + time.sleep(0.25) + msg = b'hello' + push = self.context.socket(zmq.PUSH) + push.connect("%s:%i" % (iface, port)) + pull = self.context.socket(zmq.PULL) + pull.connect("%s:%i" % (iface, port2)) + mon = self.context.socket(zmq.PULL) + mon.connect("%s:%i" % (iface, port3)) + push.send(msg) + self.sockets.extend([push, pull, mon]) + self.assertEqual(msg, self.recv(pull)) + self.assertEqual(msg, self.recv(mon)) + +if have_gevent: + import gevent + import zmq.green + + class TestDeviceGreen(GreenTest, BaseZMQTestCase): + + def test_green_device(self): + rep = self.context.socket(zmq.REP) + req = self.context.socket(zmq.REQ) + self.sockets.extend([req, rep]) + port = rep.bind_to_random_port('tcp://127.0.0.1') + g = gevent.spawn(zmq.green.device, zmq.QUEUE, rep, rep) + req.connect('tcp://127.0.0.1:%i' % port) + req.send(b'hi') + timeout = gevent.Timeout(3) + timeout.start() + receiver = gevent.spawn(req.recv) + self.assertEqual(receiver.get(2), b'hi') + timeout.cancel() + g.kill(block=True) + diff --git a/scripts/external_libs/zmq/tests/test_error.py b/scripts/external_libs/zmq/tests/test_error.py new file mode 100644 index 00000000..a2eee14a --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_error.py @@ -0,0 +1,43 @@ +# -*- coding: utf8 -*- +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import sys +import time + +import zmq +from zmq import ZMQError, strerror, Again, ContextTerminated +from zmq.tests import BaseZMQTestCase + +if sys.version_info[0] >= 3: + long = int + +class TestZMQError(BaseZMQTestCase): + + def test_strerror(self): + """test that strerror gets the right type.""" + for i in range(10): + e = strerror(i) + self.assertTrue(isinstance(e, str)) + + def test_zmqerror(self): + for errno in range(10): + e = ZMQError(errno) + self.assertEqual(e.errno, errno) + self.assertEqual(str(e), strerror(errno)) + + def test_again(self): + s = self.context.socket(zmq.REP) + self.assertRaises(Again, s.recv, zmq.NOBLOCK) + self.assertRaisesErrno(zmq.EAGAIN, s.recv, zmq.NOBLOCK) + s.close() + + def atest_ctxterm(self): + s = self.context.socket(zmq.REP) + t = Thread(target=self.context.term) + t.start() + self.assertRaises(ContextTerminated, s.recv, zmq.NOBLOCK) + self.assertRaisesErrno(zmq.TERM, s.recv, zmq.NOBLOCK) + s.close() + t.join() + diff --git a/scripts/external_libs/zmq/tests/test_etc.py b/scripts/external_libs/zmq/tests/test_etc.py new file mode 100644 index 00000000..ad224064 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_etc.py @@ -0,0 +1,15 @@ +# Copyright (c) PyZMQ Developers. +# Distributed under the terms of the Modified BSD License. + +import sys + +import zmq + +from . import skip_if + +@skip_if(zmq.zmq_version_info() < (4,1), "libzmq < 4.1") +def test_has(): + assert not zmq.has('something weird') + has_ipc = zmq.has('ipc') + not_windows = not sys.platform.startswith('win') + assert has_ipc == not_windows diff --git a/scripts/external_libs/zmq/tests/test_imports.py b/scripts/external_libs/zmq/tests/test_imports.py new file mode 100644 index 00000000..c0ddfaac --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_imports.py @@ -0,0 +1,62 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import sys +from unittest import TestCase + +class TestImports(TestCase): + """Test Imports - the quickest test to ensure that we haven't + introduced version-incompatible syntax errors.""" + + def test_toplevel(self): + """test toplevel import""" + import zmq + + def test_core(self): + """test core imports""" + from zmq import Context + from zmq import Socket + from zmq import Poller + from zmq import Frame + from zmq import constants + from zmq import device, proxy + from zmq import Stopwatch + from zmq import ( + zmq_version, + zmq_version_info, + pyzmq_version, + pyzmq_version_info, + ) + + def test_devices(self): + """test device imports""" + import zmq.devices + from zmq.devices import basedevice + from zmq.devices import monitoredqueue + from zmq.devices import monitoredqueuedevice + + def test_log(self): + """test log imports""" + import zmq.log + from zmq.log import handlers + + def test_eventloop(self): + """test eventloop imports""" + import zmq.eventloop + from zmq.eventloop import ioloop + from zmq.eventloop import zmqstream + from zmq.eventloop.minitornado.platform import auto + from zmq.eventloop.minitornado import ioloop + + def test_utils(self): + """test util imports""" + import zmq.utils + from zmq.utils import strtypes + from zmq.utils import jsonapi + + def test_ssh(self): + """test ssh imports""" + from zmq.ssh import tunnel + + + diff --git a/scripts/external_libs/zmq/tests/test_ioloop.py b/scripts/external_libs/zmq/tests/test_ioloop.py new file mode 100644 index 00000000..2a8b1153 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_ioloop.py @@ -0,0 +1,113 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import time +import os +import threading + +import zmq +from zmq.tests import BaseZMQTestCase +from zmq.eventloop import ioloop +from zmq.eventloop.minitornado.ioloop import _Timeout +try: + from tornado.ioloop import PollIOLoop, IOLoop as BaseIOLoop +except ImportError: + from zmq.eventloop.minitornado.ioloop import IOLoop as BaseIOLoop + + +def printer(): + os.system("say hello") + raise Exception + print (time.time()) + + +class Delay(threading.Thread): + def __init__(self, f, delay=1): + self.f=f + self.delay=delay + self.aborted=False + self.cond=threading.Condition() + super(Delay, self).__init__() + + def run(self): + self.cond.acquire() + self.cond.wait(self.delay) + self.cond.release() + if not self.aborted: + self.f() + + def abort(self): + self.aborted=True + self.cond.acquire() + self.cond.notify() + self.cond.release() + + +class TestIOLoop(BaseZMQTestCase): + + def test_simple(self): + """simple IOLoop creation test""" + loop = ioloop.IOLoop() + dc = ioloop.PeriodicCallback(loop.stop, 200, loop) + pc = ioloop.PeriodicCallback(lambda : None, 10, loop) + pc.start() + dc.start() + t = Delay(loop.stop,1) + t.start() + loop.start() + if t.isAlive(): + t.abort() + else: + self.fail("IOLoop failed to exit") + + def test_timeout_compare(self): + """test timeout comparisons""" + loop = ioloop.IOLoop() + t = _Timeout(1, 2, loop) + t2 = _Timeout(1, 3, loop) + self.assertEqual(t < t2, id(t) < id(t2)) + t2 = _Timeout(2,1, loop) + self.assertTrue(t < t2) + + def test_poller_events(self): + """Tornado poller implementation maps events correctly""" + req,rep = self.create_bound_pair(zmq.REQ, zmq.REP) + poller = ioloop.ZMQPoller() + poller.register(req, ioloop.IOLoop.READ) + poller.register(rep, ioloop.IOLoop.READ) + events = dict(poller.poll(0)) + self.assertEqual(events.get(rep), None) + self.assertEqual(events.get(req), None) + + poller.register(req, ioloop.IOLoop.WRITE) + poller.register(rep, ioloop.IOLoop.WRITE) + events = dict(poller.poll(1)) + self.assertEqual(events.get(req), ioloop.IOLoop.WRITE) + self.assertEqual(events.get(rep), None) + + poller.register(rep, ioloop.IOLoop.READ) + req.send(b'hi') + events = dict(poller.poll(1)) + self.assertEqual(events.get(rep), ioloop.IOLoop.READ) + self.assertEqual(events.get(req), None) + + def test_instance(self): + """Test IOLoop.instance returns the right object""" + loop = ioloop.IOLoop.instance() + self.assertEqual(loop.__class__, ioloop.IOLoop) + loop = BaseIOLoop.instance() + self.assertEqual(loop.__class__, ioloop.IOLoop) + + def test_close_all(self): + """Test close(all_fds=True)""" + loop = ioloop.IOLoop.instance() + req,rep = self.create_bound_pair(zmq.REQ, zmq.REP) + loop.add_handler(req, lambda msg: msg, ioloop.IOLoop.READ) + loop.add_handler(rep, lambda msg: msg, ioloop.IOLoop.READ) + self.assertEqual(req.closed, False) + self.assertEqual(rep.closed, False) + loop.close(all_fds=True) + self.assertEqual(req.closed, True) + self.assertEqual(rep.closed, True) + + diff --git a/scripts/external_libs/zmq/tests/test_log.py b/scripts/external_libs/zmq/tests/test_log.py new file mode 100644 index 00000000..9206f095 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_log.py @@ -0,0 +1,116 @@ +# encoding: utf-8 + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import logging +import time +from unittest import TestCase + +import zmq +from zmq.log import handlers +from zmq.utils.strtypes import b, u +from zmq.tests import BaseZMQTestCase + + +class TestPubLog(BaseZMQTestCase): + + iface = 'inproc://zmqlog' + topic= 'zmq' + + @property + def logger(self): + # print dir(self) + logger = logging.getLogger('zmqtest') + logger.setLevel(logging.DEBUG) + return logger + + def connect_handler(self, topic=None): + topic = self.topic if topic is None else topic + logger = self.logger + pub,sub = self.create_bound_pair(zmq.PUB, zmq.SUB) + handler = handlers.PUBHandler(pub) + handler.setLevel(logging.DEBUG) + handler.root_topic = topic + logger.addHandler(handler) + sub.setsockopt(zmq.SUBSCRIBE, b(topic)) + time.sleep(0.1) + return logger, handler, sub + + def test_init_iface(self): + logger = self.logger + ctx = self.context + handler = handlers.PUBHandler(self.iface) + self.assertFalse(handler.ctx is ctx) + self.sockets.append(handler.socket) + # handler.ctx.term() + handler = handlers.PUBHandler(self.iface, self.context) + self.sockets.append(handler.socket) + self.assertTrue(handler.ctx is ctx) + handler.setLevel(logging.DEBUG) + handler.root_topic = self.topic + logger.addHandler(handler) + sub = ctx.socket(zmq.SUB) + self.sockets.append(sub) + sub.setsockopt(zmq.SUBSCRIBE, b(self.topic)) + sub.connect(self.iface) + import time; time.sleep(0.25) + msg1 = 'message' + logger.info(msg1) + + (topic, msg2) = sub.recv_multipart() + self.assertEqual(topic, b'zmq.INFO') + self.assertEqual(msg2, b(msg1)+b'\n') + logger.removeHandler(handler) + + def test_init_socket(self): + pub,sub = self.create_bound_pair(zmq.PUB, zmq.SUB) + logger = self.logger + handler = handlers.PUBHandler(pub) + handler.setLevel(logging.DEBUG) + handler.root_topic = self.topic + logger.addHandler(handler) + + self.assertTrue(handler.socket is pub) + self.assertTrue(handler.ctx is pub.context) + self.assertTrue(handler.ctx is self.context) + sub.setsockopt(zmq.SUBSCRIBE, b(self.topic)) + import time; time.sleep(0.1) + msg1 = 'message' + logger.info(msg1) + + (topic, msg2) = sub.recv_multipart() + self.assertEqual(topic, b'zmq.INFO') + self.assertEqual(msg2, b(msg1)+b'\n') + logger.removeHandler(handler) + + def test_root_topic(self): + logger, handler, sub = self.connect_handler() + handler.socket.bind(self.iface) + sub2 = sub.context.socket(zmq.SUB) + self.sockets.append(sub2) + sub2.connect(self.iface) + sub2.setsockopt(zmq.SUBSCRIBE, b'') + handler.root_topic = b'twoonly' + msg1 = 'ignored' + logger.info(msg1) + self.assertRaisesErrno(zmq.EAGAIN, sub.recv, zmq.NOBLOCK) + topic,msg2 = sub2.recv_multipart() + self.assertEqual(topic, b'twoonly.INFO') + self.assertEqual(msg2, b(msg1)+b'\n') + + logger.removeHandler(handler) + + def test_unicode_message(self): + logger, handler, sub = self.connect_handler() + base_topic = b(self.topic + '.INFO') + for msg, expected in [ + (u('hello'), [base_topic, b('hello\n')]), + (u('héllo'), [base_topic, b('héllo\n')]), + (u('tøpic::héllo'), [base_topic + b('.tøpic'), b('héllo\n')]), + ]: + logger.info(msg) + received = sub.recv_multipart() + self.assertEqual(received, expected) + diff --git a/scripts/external_libs/zmq/tests/test_message.py b/scripts/external_libs/zmq/tests/test_message.py new file mode 100644 index 00000000..d8770bdf --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_message.py @@ -0,0 +1,362 @@ +# -*- coding: utf8 -*- +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import copy +import sys +try: + from sys import getrefcount as grc +except ImportError: + grc = None + +import time +from pprint import pprint +from unittest import TestCase + +import zmq +from zmq.tests import BaseZMQTestCase, SkipTest, skip_pypy, PYPY +from zmq.utils.strtypes import unicode, bytes, b, u + + +# some useful constants: + +x = b'x' + +try: + view = memoryview +except NameError: + view = buffer + +if grc: + rc0 = grc(x) + v = view(x) + view_rc = grc(x) - rc0 + +def await_gc(obj, rc): + """wait for refcount on an object to drop to an expected value + + Necessary because of the zero-copy gc thread, + which can take some time to receive its DECREF message. + """ + for i in range(50): + # rc + 2 because of the refs in this function + if grc(obj) <= rc + 2: + return + time.sleep(0.05) + +class TestFrame(BaseZMQTestCase): + + @skip_pypy + def test_above_30(self): + """Message above 30 bytes are never copied by 0MQ.""" + for i in range(5, 16): # 32, 64,..., 65536 + s = (2**i)*x + self.assertEqual(grc(s), 2) + m = zmq.Frame(s) + self.assertEqual(grc(s), 4) + del m + await_gc(s, 2) + self.assertEqual(grc(s), 2) + del s + + def test_str(self): + """Test the str representations of the Frames.""" + for i in range(16): + s = (2**i)*x + m = zmq.Frame(s) + m_str = str(m) + m_str_b = b(m_str) # py3compat + self.assertEqual(s, m_str_b) + + def test_bytes(self): + """Test the Frame.bytes property.""" + for i in range(1,16): + s = (2**i)*x + m = zmq.Frame(s) + b = m.bytes + self.assertEqual(s, m.bytes) + if not PYPY: + # check that it copies + self.assert_(b is not s) + # check that it copies only once + self.assert_(b is m.bytes) + + def test_unicode(self): + """Test the unicode representations of the Frames.""" + s = u('asdf') + self.assertRaises(TypeError, zmq.Frame, s) + for i in range(16): + s = (2**i)*u('§') + m = zmq.Frame(s.encode('utf8')) + self.assertEqual(s, unicode(m.bytes,'utf8')) + + def test_len(self): + """Test the len of the Frames.""" + for i in range(16): + s = (2**i)*x + m = zmq.Frame(s) + self.assertEqual(len(s), len(m)) + + @skip_pypy + def test_lifecycle1(self): + """Run through a ref counting cycle with a copy.""" + for i in range(5, 16): # 32, 64,..., 65536 + s = (2**i)*x + rc = 2 + self.assertEqual(grc(s), rc) + m = zmq.Frame(s) + rc += 2 + self.assertEqual(grc(s), rc) + m2 = copy.copy(m) + rc += 1 + self.assertEqual(grc(s), rc) + buf = m2.buffer + + rc += view_rc + self.assertEqual(grc(s), rc) + + self.assertEqual(s, b(str(m))) + self.assertEqual(s, bytes(m2)) + self.assertEqual(s, m.bytes) + # self.assert_(s is str(m)) + # self.assert_(s is str(m2)) + del m2 + rc -= 1 + self.assertEqual(grc(s), rc) + rc -= view_rc + del buf + self.assertEqual(grc(s), rc) + del m + rc -= 2 + await_gc(s, rc) + self.assertEqual(grc(s), rc) + self.assertEqual(rc, 2) + del s + + @skip_pypy + def test_lifecycle2(self): + """Run through a different ref counting cycle with a copy.""" + for i in range(5, 16): # 32, 64,..., 65536 + s = (2**i)*x + rc = 2 + self.assertEqual(grc(s), rc) + m = zmq.Frame(s) + rc += 2 + self.assertEqual(grc(s), rc) + m2 = copy.copy(m) + rc += 1 + self.assertEqual(grc(s), rc) + buf = m.buffer + rc += view_rc + self.assertEqual(grc(s), rc) + self.assertEqual(s, b(str(m))) + self.assertEqual(s, bytes(m2)) + self.assertEqual(s, m2.bytes) + self.assertEqual(s, m.bytes) + # self.assert_(s is str(m)) + # self.assert_(s is str(m2)) + del buf + self.assertEqual(grc(s), rc) + del m + # m.buffer is kept until m is del'd + rc -= view_rc + rc -= 1 + self.assertEqual(grc(s), rc) + del m2 + rc -= 2 + await_gc(s, rc) + self.assertEqual(grc(s), rc) + self.assertEqual(rc, 2) + del s + + @skip_pypy + def test_tracker(self): + m = zmq.Frame(b'asdf', track=True) + self.assertFalse(m.tracker.done) + pm = zmq.MessageTracker(m) + self.assertFalse(pm.done) + del m + for i in range(10): + if pm.done: + break + time.sleep(0.1) + self.assertTrue(pm.done) + + def test_no_tracker(self): + m = zmq.Frame(b'asdf', track=False) + self.assertEqual(m.tracker, None) + m2 = copy.copy(m) + self.assertEqual(m2.tracker, None) + self.assertRaises(ValueError, zmq.MessageTracker, m) + + @skip_pypy + def test_multi_tracker(self): + m = zmq.Frame(b'asdf', track=True) + m2 = zmq.Frame(b'whoda', track=True) + mt = zmq.MessageTracker(m,m2) + self.assertFalse(m.tracker.done) + self.assertFalse(mt.done) + self.assertRaises(zmq.NotDone, mt.wait, 0.1) + del m + time.sleep(0.1) + self.assertRaises(zmq.NotDone, mt.wait, 0.1) + self.assertFalse(mt.done) + del m2 + self.assertTrue(mt.wait() is None) + self.assertTrue(mt.done) + + + def test_buffer_in(self): + """test using a buffer as input""" + ins = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√") + m = zmq.Frame(view(ins)) + + def test_bad_buffer_in(self): + """test using a bad object""" + self.assertRaises(TypeError, zmq.Frame, 5) + self.assertRaises(TypeError, zmq.Frame, object()) + + def test_buffer_out(self): + """receiving buffered output""" + ins = b("§§¶•ªº˜µ¬˚…∆˙åß∂©œ∑´†≈ç√") + m = zmq.Frame(ins) + outb = m.buffer + self.assertTrue(isinstance(outb, view)) + self.assert_(outb is m.buffer) + self.assert_(m.buffer is m.buffer) + + def test_multisend(self): + """ensure that a message remains intact after multiple sends""" + a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + s = b"message" + m = zmq.Frame(s) + self.assertEqual(s, m.bytes) + + a.send(m, copy=False) + time.sleep(0.1) + self.assertEqual(s, m.bytes) + a.send(m, copy=False) + time.sleep(0.1) + self.assertEqual(s, m.bytes) + a.send(m, copy=True) + time.sleep(0.1) + self.assertEqual(s, m.bytes) + a.send(m, copy=True) + time.sleep(0.1) + self.assertEqual(s, m.bytes) + for i in range(4): + r = b.recv() + self.assertEqual(s,r) + self.assertEqual(s, m.bytes) + + def test_buffer_numpy(self): + """test non-copying numpy array messages""" + try: + import numpy + except ImportError: + raise SkipTest("numpy required") + rand = numpy.random.randint + shapes = [ rand(2,16) for i in range(5) ] + for i in range(1,len(shapes)+1): + shape = shapes[:i] + A = numpy.random.random(shape) + m = zmq.Frame(A) + if view.__name__ == 'buffer': + self.assertEqual(A.data, m.buffer) + B = numpy.frombuffer(m.buffer,dtype=A.dtype).reshape(A.shape) + else: + self.assertEqual(memoryview(A), m.buffer) + B = numpy.array(m.buffer,dtype=A.dtype).reshape(A.shape) + self.assertEqual((A==B).all(), True) + + def test_memoryview(self): + """test messages from memoryview""" + major,minor = sys.version_info[:2] + if not (major >= 3 or (major == 2 and minor >= 7)): + raise SkipTest("memoryviews only in python >= 2.7") + + s = b'carrotjuice' + v = memoryview(s) + m = zmq.Frame(s) + buf = m.buffer + s2 = buf.tobytes() + self.assertEqual(s2,s) + self.assertEqual(m.bytes,s) + + def test_noncopying_recv(self): + """check for clobbering message buffers""" + null = b'\0'*64 + sa,sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + for i in range(32): + # try a few times + sb.send(null, copy=False) + m = sa.recv(copy=False) + mb = m.bytes + # buf = view(m) + buf = m.buffer + del m + for i in range(5): + ff=b'\xff'*(40 + i*10) + sb.send(ff, copy=False) + m2 = sa.recv(copy=False) + if view.__name__ == 'buffer': + b = bytes(buf) + else: + b = buf.tobytes() + self.assertEqual(b, null) + self.assertEqual(mb, null) + self.assertEqual(m2.bytes, ff) + + @skip_pypy + def test_buffer_numpy(self): + """test non-copying numpy array messages""" + try: + import numpy + except ImportError: + raise SkipTest("requires numpy") + if sys.version_info < (2,7): + raise SkipTest("requires new-style buffer interface (py >= 2.7)") + rand = numpy.random.randint + shapes = [ rand(2,5) for i in range(5) ] + a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + dtypes = [int, float, '>i4', 'B'] + for i in range(1,len(shapes)+1): + shape = shapes[:i] + for dt in dtypes: + A = numpy.empty(shape, dtype=dt) + while numpy.isnan(A).any(): + # don't let nan sneak in + A = numpy.ndarray(shape, dtype=dt) + a.send(A, copy=False) + msg = b.recv(copy=False) + + B = numpy.frombuffer(msg, A.dtype).reshape(A.shape) + self.assertEqual(A.shape, B.shape) + self.assertTrue((A==B).all()) + A = numpy.empty(shape, dtype=[('a', int), ('b', float), ('c', 'a32')]) + A['a'] = 1024 + A['b'] = 1e9 + A['c'] = 'hello there' + a.send(A, copy=False) + msg = b.recv(copy=False) + + B = numpy.frombuffer(msg, A.dtype).reshape(A.shape) + self.assertEqual(A.shape, B.shape) + self.assertTrue((A==B).all()) + + def test_frame_more(self): + """test Frame.more attribute""" + frame = zmq.Frame(b"hello") + self.assertFalse(frame.more) + sa,sb = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + sa.send_multipart([b'hi', b'there']) + frame = self.recv(sb, copy=False) + self.assertTrue(frame.more) + if zmq.zmq_version_info()[0] >= 3 and not PYPY: + self.assertTrue(frame.get(zmq.MORE)) + frame = self.recv(sb, copy=False) + self.assertFalse(frame.more) + if zmq.zmq_version_info()[0] >= 3 and not PYPY: + self.assertFalse(frame.get(zmq.MORE)) + diff --git a/scripts/external_libs/zmq/tests/test_monitor.py b/scripts/external_libs/zmq/tests/test_monitor.py new file mode 100644 index 00000000..4f035388 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_monitor.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import sys +import time +import struct + +from unittest import TestCase + +import zmq +from zmq.tests import BaseZMQTestCase, skip_if, skip_pypy +from zmq.utils.monitor import recv_monitor_message + +skip_lt_4 = skip_if(zmq.zmq_version_info() < (4,), "requires zmq >= 4") + +class TestSocketMonitor(BaseZMQTestCase): + + @skip_lt_4 + def test_monitor(self): + """Test monitoring interface for sockets.""" + s_rep = self.context.socket(zmq.REP) + s_req = self.context.socket(zmq.REQ) + self.sockets.extend([s_rep, s_req]) + s_req.bind("tcp://127.0.0.1:6666") + # try monitoring the REP socket + + s_rep.monitor("inproc://monitor.rep", zmq.EVENT_ALL) + # create listening socket for monitor + s_event = self.context.socket(zmq.PAIR) + self.sockets.append(s_event) + s_event.connect("inproc://monitor.rep") + s_event.linger = 0 + # test receive event for connect event + s_rep.connect("tcp://127.0.0.1:6666") + m = recv_monitor_message(s_event) + if m['event'] == zmq.EVENT_CONNECT_DELAYED: + self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6666") + # test receive event for connected event + m = recv_monitor_message(s_event) + self.assertEqual(m['event'], zmq.EVENT_CONNECTED) + self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6666") + + # test monitor can be disabled. + s_rep.disable_monitor() + m = recv_monitor_message(s_event) + self.assertEqual(m['event'], zmq.EVENT_MONITOR_STOPPED) + + + @skip_lt_4 + def test_monitor_connected(self): + """Test connected monitoring socket.""" + s_rep = self.context.socket(zmq.REP) + s_req = self.context.socket(zmq.REQ) + self.sockets.extend([s_rep, s_req]) + s_req.bind("tcp://127.0.0.1:6667") + # try monitoring the REP socket + # create listening socket for monitor + s_event = s_rep.get_monitor_socket() + s_event.linger = 0 + self.sockets.append(s_event) + # test receive event for connect event + s_rep.connect("tcp://127.0.0.1:6667") + m = recv_monitor_message(s_event) + if m['event'] == zmq.EVENT_CONNECT_DELAYED: + self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6667") + # test receive event for connected event + m = recv_monitor_message(s_event) + self.assertEqual(m['event'], zmq.EVENT_CONNECTED) + self.assertEqual(m['endpoint'], b"tcp://127.0.0.1:6667") diff --git a/scripts/external_libs/zmq/tests/test_monqueue.py b/scripts/external_libs/zmq/tests/test_monqueue.py new file mode 100644 index 00000000..e855602e --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_monqueue.py @@ -0,0 +1,227 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import time +from unittest import TestCase + +import zmq +from zmq import devices + +from zmq.tests import BaseZMQTestCase, SkipTest, PYPY +from zmq.utils.strtypes import unicode + + +if PYPY or zmq.zmq_version_info() >= (4,1): + # cleanup of shared Context doesn't work on PyPy + # there also seems to be a bug in cleanup in libzmq-4.1 (zeromq/libzmq#1052) + devices.Device.context_factory = zmq.Context + + +class TestMonitoredQueue(BaseZMQTestCase): + + sockets = [] + + def build_device(self, mon_sub=b"", in_prefix=b'in', out_prefix=b'out'): + self.device = devices.ThreadMonitoredQueue(zmq.PAIR, zmq.PAIR, zmq.PUB, + in_prefix, out_prefix) + alice = self.context.socket(zmq.PAIR) + bob = self.context.socket(zmq.PAIR) + mon = self.context.socket(zmq.SUB) + + aport = alice.bind_to_random_port('tcp://127.0.0.1') + bport = bob.bind_to_random_port('tcp://127.0.0.1') + mport = mon.bind_to_random_port('tcp://127.0.0.1') + mon.setsockopt(zmq.SUBSCRIBE, mon_sub) + + self.device.connect_in("tcp://127.0.0.1:%i"%aport) + self.device.connect_out("tcp://127.0.0.1:%i"%bport) + self.device.connect_mon("tcp://127.0.0.1:%i"%mport) + self.device.start() + time.sleep(.2) + try: + # this is currenlty necessary to ensure no dropped monitor messages + # see LIBZMQ-248 for more info + mon.recv_multipart(zmq.NOBLOCK) + except zmq.ZMQError: + pass + self.sockets.extend([alice, bob, mon]) + return alice, bob, mon + + + def teardown_device(self): + for socket in self.sockets: + socket.close() + del socket + del self.device + + def test_reply(self): + alice, bob, mon = self.build_device() + alices = b"hello bob".split() + alice.send_multipart(alices) + bobs = self.recv_multipart(bob) + self.assertEqual(alices, bobs) + bobs = b"hello alice".split() + bob.send_multipart(bobs) + alices = self.recv_multipart(alice) + self.assertEqual(alices, bobs) + self.teardown_device() + + def test_queue(self): + alice, bob, mon = self.build_device() + alices = b"hello bob".split() + alice.send_multipart(alices) + alices2 = b"hello again".split() + alice.send_multipart(alices2) + alices3 = b"hello again and again".split() + alice.send_multipart(alices3) + bobs = self.recv_multipart(bob) + self.assertEqual(alices, bobs) + bobs = self.recv_multipart(bob) + self.assertEqual(alices2, bobs) + bobs = self.recv_multipart(bob) + self.assertEqual(alices3, bobs) + bobs = b"hello alice".split() + bob.send_multipart(bobs) + alices = self.recv_multipart(alice) + self.assertEqual(alices, bobs) + self.teardown_device() + + def test_monitor(self): + alice, bob, mon = self.build_device() + alices = b"hello bob".split() + alice.send_multipart(alices) + alices2 = b"hello again".split() + alice.send_multipart(alices2) + alices3 = b"hello again and again".split() + alice.send_multipart(alices3) + bobs = self.recv_multipart(bob) + self.assertEqual(alices, bobs) + mons = self.recv_multipart(mon) + self.assertEqual([b'in']+bobs, mons) + bobs = self.recv_multipart(bob) + self.assertEqual(alices2, bobs) + bobs = self.recv_multipart(bob) + self.assertEqual(alices3, bobs) + mons = self.recv_multipart(mon) + self.assertEqual([b'in']+alices2, mons) + bobs = b"hello alice".split() + bob.send_multipart(bobs) + alices = self.recv_multipart(alice) + self.assertEqual(alices, bobs) + mons = self.recv_multipart(mon) + self.assertEqual([b'in']+alices3, mons) + mons = self.recv_multipart(mon) + self.assertEqual([b'out']+bobs, mons) + self.teardown_device() + + def test_prefix(self): + alice, bob, mon = self.build_device(b"", b'foo', b'bar') + alices = b"hello bob".split() + alice.send_multipart(alices) + alices2 = b"hello again".split() + alice.send_multipart(alices2) + alices3 = b"hello again and again".split() + alice.send_multipart(alices3) + bobs = self.recv_multipart(bob) + self.assertEqual(alices, bobs) + mons = self.recv_multipart(mon) + self.assertEqual([b'foo']+bobs, mons) + bobs = self.recv_multipart(bob) + self.assertEqual(alices2, bobs) + bobs = self.recv_multipart(bob) + self.assertEqual(alices3, bobs) + mons = self.recv_multipart(mon) + self.assertEqual([b'foo']+alices2, mons) + bobs = b"hello alice".split() + bob.send_multipart(bobs) + alices = self.recv_multipart(alice) + self.assertEqual(alices, bobs) + mons = self.recv_multipart(mon) + self.assertEqual([b'foo']+alices3, mons) + mons = self.recv_multipart(mon) + self.assertEqual([b'bar']+bobs, mons) + self.teardown_device() + + def test_monitor_subscribe(self): + alice, bob, mon = self.build_device(b"out") + alices = b"hello bob".split() + alice.send_multipart(alices) + alices2 = b"hello again".split() + alice.send_multipart(alices2) + alices3 = b"hello again and again".split() + alice.send_multipart(alices3) + bobs = self.recv_multipart(bob) + self.assertEqual(alices, bobs) + bobs = self.recv_multipart(bob) + self.assertEqual(alices2, bobs) + bobs = self.recv_multipart(bob) + self.assertEqual(alices3, bobs) + bobs = b"hello alice".split() + bob.send_multipart(bobs) + alices = self.recv_multipart(alice) + self.assertEqual(alices, bobs) + mons = self.recv_multipart(mon) + self.assertEqual([b'out']+bobs, mons) + self.teardown_device() + + def test_router_router(self): + """test router-router MQ devices""" + dev = devices.ThreadMonitoredQueue(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out') + self.device = dev + dev.setsockopt_in(zmq.LINGER, 0) + dev.setsockopt_out(zmq.LINGER, 0) + dev.setsockopt_mon(zmq.LINGER, 0) + + binder = self.context.socket(zmq.DEALER) + porta = binder.bind_to_random_port('tcp://127.0.0.1') + portb = binder.bind_to_random_port('tcp://127.0.0.1') + binder.close() + time.sleep(0.1) + a = self.context.socket(zmq.DEALER) + a.identity = b'a' + b = self.context.socket(zmq.DEALER) + b.identity = b'b' + self.sockets.extend([a, b]) + + a.connect('tcp://127.0.0.1:%i'%porta) + dev.bind_in('tcp://127.0.0.1:%i'%porta) + b.connect('tcp://127.0.0.1:%i'%portb) + dev.bind_out('tcp://127.0.0.1:%i'%portb) + dev.start() + time.sleep(0.2) + if zmq.zmq_version_info() >= (3,1,0): + # flush erroneous poll state, due to LIBZMQ-280 + ping_msg = [ b'ping', b'pong' ] + for s in (a,b): + s.send_multipart(ping_msg) + try: + s.recv(zmq.NOBLOCK) + except zmq.ZMQError: + pass + msg = [ b'hello', b'there' ] + a.send_multipart([b'b']+msg) + bmsg = self.recv_multipart(b) + self.assertEqual(bmsg, [b'a']+msg) + b.send_multipart(bmsg) + amsg = self.recv_multipart(a) + self.assertEqual(amsg, [b'b']+msg) + self.teardown_device() + + def test_default_mq_args(self): + self.device = dev = devices.ThreadMonitoredQueue(zmq.ROUTER, zmq.DEALER, zmq.PUB) + dev.setsockopt_in(zmq.LINGER, 0) + dev.setsockopt_out(zmq.LINGER, 0) + dev.setsockopt_mon(zmq.LINGER, 0) + # this will raise if default args are wrong + dev.start() + self.teardown_device() + + def test_mq_check_prefix(self): + ins = self.context.socket(zmq.ROUTER) + outs = self.context.socket(zmq.DEALER) + mons = self.context.socket(zmq.PUB) + self.sockets.extend([ins, outs, mons]) + + ins = unicode('in') + outs = unicode('out') + self.assertRaises(TypeError, devices.monitoredqueue, ins, outs, mons) diff --git a/scripts/external_libs/zmq/tests/test_multipart.py b/scripts/external_libs/zmq/tests/test_multipart.py new file mode 100644 index 00000000..24d41be0 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_multipart.py @@ -0,0 +1,35 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import zmq + + +from zmq.tests import BaseZMQTestCase, SkipTest, have_gevent, GreenTest + + +class TestMultipart(BaseZMQTestCase): + + def test_router_dealer(self): + router, dealer = self.create_bound_pair(zmq.ROUTER, zmq.DEALER) + + msg1 = b'message1' + dealer.send(msg1) + ident = self.recv(router) + more = router.rcvmore + self.assertEqual(more, True) + msg2 = self.recv(router) + self.assertEqual(msg1, msg2) + more = router.rcvmore + self.assertEqual(more, False) + + def test_basic_multipart(self): + a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + msg = [ b'hi', b'there', b'b'] + a.send_multipart(msg) + recvd = b.recv_multipart() + self.assertEqual(msg, recvd) + +if have_gevent: + class TestMultipartGreen(GreenTest, TestMultipart): + pass diff --git a/scripts/external_libs/zmq/tests/test_pair.py b/scripts/external_libs/zmq/tests/test_pair.py new file mode 100644 index 00000000..e88c1e8b --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_pair.py @@ -0,0 +1,53 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import zmq + + +from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest + + +x = b' ' +class TestPair(BaseZMQTestCase): + + def test_basic(self): + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + + msg1 = b'message1' + msg2 = self.ping_pong(s1, s2, msg1) + self.assertEqual(msg1, msg2) + + def test_multiple(self): + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + + for i in range(10): + msg = i*x + s1.send(msg) + + for i in range(10): + msg = i*x + s2.send(msg) + + for i in range(10): + msg = s1.recv() + self.assertEqual(msg, i*x) + + for i in range(10): + msg = s2.recv() + self.assertEqual(msg, i*x) + + def test_json(self): + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + o = dict(a=10,b=list(range(10))) + o2 = self.ping_pong_json(s1, s2, o) + + def test_pyobj(self): + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + o = dict(a=10,b=range(10)) + o2 = self.ping_pong_pyobj(s1, s2, o) + +if have_gevent: + class TestReqRepGreen(GreenTest, TestPair): + pass + diff --git a/scripts/external_libs/zmq/tests/test_poll.py b/scripts/external_libs/zmq/tests/test_poll.py new file mode 100644 index 00000000..57346c89 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_poll.py @@ -0,0 +1,229 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import time +from unittest import TestCase + +import zmq + +from zmq.tests import PollZMQTestCase, have_gevent, GreenTest + +def wait(): + time.sleep(.25) + + +class TestPoll(PollZMQTestCase): + + Poller = zmq.Poller + + # This test is failing due to this issue: + # http://github.com/sustrik/zeromq2/issues#issue/26 + def test_pair(self): + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + + # Sleep to allow sockets to connect. + wait() + + poller = self.Poller() + poller.register(s1, zmq.POLLIN|zmq.POLLOUT) + poller.register(s2, zmq.POLLIN|zmq.POLLOUT) + # Poll result should contain both sockets + socks = dict(poller.poll()) + # Now make sure that both are send ready. + self.assertEqual(socks[s1], zmq.POLLOUT) + self.assertEqual(socks[s2], zmq.POLLOUT) + # Now do a send on both, wait and test for zmq.POLLOUT|zmq.POLLIN + s1.send(b'msg1') + s2.send(b'msg2') + wait() + socks = dict(poller.poll()) + self.assertEqual(socks[s1], zmq.POLLOUT|zmq.POLLIN) + self.assertEqual(socks[s2], zmq.POLLOUT|zmq.POLLIN) + # Make sure that both are in POLLOUT after recv. + s1.recv() + s2.recv() + socks = dict(poller.poll()) + self.assertEqual(socks[s1], zmq.POLLOUT) + self.assertEqual(socks[s2], zmq.POLLOUT) + + poller.unregister(s1) + poller.unregister(s2) + + # Wait for everything to finish. + wait() + + def test_reqrep(self): + s1, s2 = self.create_bound_pair(zmq.REP, zmq.REQ) + + # Sleep to allow sockets to connect. + wait() + + poller = self.Poller() + poller.register(s1, zmq.POLLIN|zmq.POLLOUT) + poller.register(s2, zmq.POLLIN|zmq.POLLOUT) + + # Make sure that s1 is in state 0 and s2 is in POLLOUT + socks = dict(poller.poll()) + self.assertEqual(s1 in socks, 0) + self.assertEqual(socks[s2], zmq.POLLOUT) + + # Make sure that s2 goes immediately into state 0 after send. + s2.send(b'msg1') + socks = dict(poller.poll()) + self.assertEqual(s2 in socks, 0) + + # Make sure that s1 goes into POLLIN state after a time.sleep(). + time.sleep(0.5) + socks = dict(poller.poll()) + self.assertEqual(socks[s1], zmq.POLLIN) + + # Make sure that s1 goes into POLLOUT after recv. + s1.recv() + socks = dict(poller.poll()) + self.assertEqual(socks[s1], zmq.POLLOUT) + + # Make sure s1 goes into state 0 after send. + s1.send(b'msg2') + socks = dict(poller.poll()) + self.assertEqual(s1 in socks, 0) + + # Wait and then see that s2 is in POLLIN. + time.sleep(0.5) + socks = dict(poller.poll()) + self.assertEqual(socks[s2], zmq.POLLIN) + + # Make sure that s2 is in POLLOUT after recv. + s2.recv() + socks = dict(poller.poll()) + self.assertEqual(socks[s2], zmq.POLLOUT) + + poller.unregister(s1) + poller.unregister(s2) + + # Wait for everything to finish. + wait() + + def test_no_events(self): + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + poller = self.Poller() + poller.register(s1, zmq.POLLIN|zmq.POLLOUT) + poller.register(s2, 0) + self.assertTrue(s1 in poller) + self.assertFalse(s2 in poller) + poller.register(s1, 0) + self.assertFalse(s1 in poller) + + def test_pubsub(self): + s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB) + s2.setsockopt(zmq.SUBSCRIBE, b'') + + # Sleep to allow sockets to connect. + wait() + + poller = self.Poller() + poller.register(s1, zmq.POLLIN|zmq.POLLOUT) + poller.register(s2, zmq.POLLIN) + + # Now make sure that both are send ready. + socks = dict(poller.poll()) + self.assertEqual(socks[s1], zmq.POLLOUT) + self.assertEqual(s2 in socks, 0) + # Make sure that s1 stays in POLLOUT after a send. + s1.send(b'msg1') + socks = dict(poller.poll()) + self.assertEqual(socks[s1], zmq.POLLOUT) + + # Make sure that s2 is POLLIN after waiting. + wait() + socks = dict(poller.poll()) + self.assertEqual(socks[s2], zmq.POLLIN) + + # Make sure that s2 goes into 0 after recv. + s2.recv() + socks = dict(poller.poll()) + self.assertEqual(s2 in socks, 0) + + poller.unregister(s1) + poller.unregister(s2) + + # Wait for everything to finish. + wait() + def test_timeout(self): + """make sure Poller.poll timeout has the right units (milliseconds).""" + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + poller = self.Poller() + poller.register(s1, zmq.POLLIN) + tic = time.time() + evt = poller.poll(.005) + toc = time.time() + self.assertTrue(toc-tic < 0.1) + tic = time.time() + evt = poller.poll(5) + toc = time.time() + self.assertTrue(toc-tic < 0.1) + self.assertTrue(toc-tic > .001) + tic = time.time() + evt = poller.poll(500) + toc = time.time() + self.assertTrue(toc-tic < 1) + self.assertTrue(toc-tic > 0.1) + +class TestSelect(PollZMQTestCase): + + def test_pair(self): + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + + # Sleep to allow sockets to connect. + wait() + + rlist, wlist, xlist = zmq.select([s1, s2], [s1, s2], [s1, s2]) + self.assert_(s1 in wlist) + self.assert_(s2 in wlist) + self.assert_(s1 not in rlist) + self.assert_(s2 not in rlist) + + def test_timeout(self): + """make sure select timeout has the right units (seconds).""" + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + tic = time.time() + r,w,x = zmq.select([s1,s2],[],[],.005) + toc = time.time() + self.assertTrue(toc-tic < 1) + self.assertTrue(toc-tic > 0.001) + tic = time.time() + r,w,x = zmq.select([s1,s2],[],[],.25) + toc = time.time() + self.assertTrue(toc-tic < 1) + self.assertTrue(toc-tic > 0.1) + + +if have_gevent: + import gevent + from zmq import green as gzmq + + class TestPollGreen(GreenTest, TestPoll): + Poller = gzmq.Poller + + def test_wakeup(self): + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + poller = self.Poller() + poller.register(s2, zmq.POLLIN) + + tic = time.time() + r = gevent.spawn(lambda: poller.poll(10000)) + s = gevent.spawn(lambda: s1.send(b'msg1')) + r.join() + toc = time.time() + self.assertTrue(toc-tic < 1) + + def test_socket_poll(self): + s1, s2 = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + + tic = time.time() + r = gevent.spawn(lambda: s2.poll(10000)) + s = gevent.spawn(lambda: s1.send(b'msg1')) + r.join() + toc = time.time() + self.assertTrue(toc-tic < 1) + diff --git a/scripts/external_libs/zmq/tests/test_pubsub.py b/scripts/external_libs/zmq/tests/test_pubsub.py new file mode 100644 index 00000000..a3ee22aa --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_pubsub.py @@ -0,0 +1,41 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import time +from unittest import TestCase + +import zmq + +from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest + + +class TestPubSub(BaseZMQTestCase): + + pass + + # We are disabling this test while an issue is being resolved. + def test_basic(self): + s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB) + s2.setsockopt(zmq.SUBSCRIBE,b'') + time.sleep(0.1) + msg1 = b'message' + s1.send(msg1) + msg2 = s2.recv() # This is blocking! + self.assertEqual(msg1, msg2) + + def test_topic(self): + s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB) + s2.setsockopt(zmq.SUBSCRIBE, b'x') + time.sleep(0.1) + msg1 = b'message' + s1.send(msg1) + self.assertRaisesErrno(zmq.EAGAIN, s2.recv, zmq.NOBLOCK) + msg1 = b'xmessage' + s1.send(msg1) + msg2 = s2.recv() + self.assertEqual(msg1, msg2) + +if have_gevent: + class TestPubSubGreen(GreenTest, TestPubSub): + pass diff --git a/scripts/external_libs/zmq/tests/test_reqrep.py b/scripts/external_libs/zmq/tests/test_reqrep.py new file mode 100644 index 00000000..de17f2b3 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_reqrep.py @@ -0,0 +1,62 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +from unittest import TestCase + +import zmq +from zmq.tests import BaseZMQTestCase, have_gevent, GreenTest + + +class TestReqRep(BaseZMQTestCase): + + def test_basic(self): + s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) + + msg1 = b'message 1' + msg2 = self.ping_pong(s1, s2, msg1) + self.assertEqual(msg1, msg2) + + def test_multiple(self): + s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) + + for i in range(10): + msg1 = i*b' ' + msg2 = self.ping_pong(s1, s2, msg1) + self.assertEqual(msg1, msg2) + + def test_bad_send_recv(self): + s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) + + if zmq.zmq_version() != '2.1.8': + # this doesn't work on 2.1.8 + for copy in (True,False): + self.assertRaisesErrno(zmq.EFSM, s1.recv, copy=copy) + self.assertRaisesErrno(zmq.EFSM, s2.send, b'asdf', copy=copy) + + # I have to have this or we die on an Abort trap. + msg1 = b'asdf' + msg2 = self.ping_pong(s1, s2, msg1) + self.assertEqual(msg1, msg2) + + def test_json(self): + s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) + o = dict(a=10,b=list(range(10))) + o2 = self.ping_pong_json(s1, s2, o) + + def test_pyobj(self): + s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) + o = dict(a=10,b=range(10)) + o2 = self.ping_pong_pyobj(s1, s2, o) + + def test_large_msg(self): + s1, s2 = self.create_bound_pair(zmq.REQ, zmq.REP) + msg1 = 10000*b'X' + + for i in range(10): + msg2 = self.ping_pong(s1, s2, msg1) + self.assertEqual(msg1, msg2) + +if have_gevent: + class TestReqRepGreen(GreenTest, TestReqRep): + pass diff --git a/scripts/external_libs/zmq/tests/test_security.py b/scripts/external_libs/zmq/tests/test_security.py new file mode 100644 index 00000000..687b7e0f --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_security.py @@ -0,0 +1,212 @@ +"""Test libzmq security (libzmq >= 3.3.0)""" +# -*- coding: utf8 -*- + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import os +from threading import Thread + +import zmq +from zmq.tests import ( + BaseZMQTestCase, SkipTest, PYPY +) +from zmq.utils import z85 + + +USER = b"admin" +PASS = b"password" + +class TestSecurity(BaseZMQTestCase): + + def setUp(self): + if zmq.zmq_version_info() < (4,0): + raise SkipTest("security is new in libzmq 4.0") + try: + zmq.curve_keypair() + except zmq.ZMQError: + raise SkipTest("security requires libzmq to be linked against libsodium") + super(TestSecurity, self).setUp() + + + def zap_handler(self): + socket = self.context.socket(zmq.REP) + socket.bind("inproc://zeromq.zap.01") + try: + msg = self.recv_multipart(socket) + + version, sequence, domain, address, identity, mechanism = msg[:6] + if mechanism == b'PLAIN': + username, password = msg[6:] + elif mechanism == b'CURVE': + key = msg[6] + + self.assertEqual(version, b"1.0") + self.assertEqual(identity, b"IDENT") + reply = [version, sequence] + if mechanism == b'CURVE' or \ + (mechanism == b'PLAIN' and username == USER and password == PASS) or \ + (mechanism == b'NULL'): + reply.extend([ + b"200", + b"OK", + b"anonymous", + b"\5Hello\0\0\0\5World", + ]) + else: + reply.extend([ + b"400", + b"Invalid username or password", + b"", + b"", + ]) + socket.send_multipart(reply) + finally: + socket.close() + + def start_zap(self): + self.zap_thread = Thread(target=self.zap_handler) + self.zap_thread.start() + + def stop_zap(self): + self.zap_thread.join() + + def bounce(self, server, client, test_metadata=True): + msg = [os.urandom(64), os.urandom(64)] + client.send_multipart(msg) + frames = self.recv_multipart(server, copy=False) + recvd = list(map(lambda x: x.bytes, frames)) + + try: + if test_metadata and not PYPY: + for frame in frames: + self.assertEqual(frame.get('User-Id'), 'anonymous') + self.assertEqual(frame.get('Hello'), 'World') + self.assertEqual(frame['Socket-Type'], 'DEALER') + except zmq.ZMQVersionError: + pass + + self.assertEqual(recvd, msg) + server.send_multipart(recvd) + msg2 = self.recv_multipart(client) + self.assertEqual(msg2, msg) + + def test_null(self): + """test NULL (default) security""" + server = self.socket(zmq.DEALER) + client = self.socket(zmq.DEALER) + self.assertEqual(client.MECHANISM, zmq.NULL) + self.assertEqual(server.mechanism, zmq.NULL) + self.assertEqual(client.plain_server, 0) + self.assertEqual(server.plain_server, 0) + iface = 'tcp://127.0.0.1' + port = server.bind_to_random_port(iface) + client.connect("%s:%i" % (iface, port)) + self.bounce(server, client, False) + + def test_plain(self): + """test PLAIN authentication""" + server = self.socket(zmq.DEALER) + server.identity = b'IDENT' + client = self.socket(zmq.DEALER) + self.assertEqual(client.plain_username, b'') + self.assertEqual(client.plain_password, b'') + client.plain_username = USER + client.plain_password = PASS + self.assertEqual(client.getsockopt(zmq.PLAIN_USERNAME), USER) + self.assertEqual(client.getsockopt(zmq.PLAIN_PASSWORD), PASS) + self.assertEqual(client.plain_server, 0) + self.assertEqual(server.plain_server, 0) + server.plain_server = True + self.assertEqual(server.mechanism, zmq.PLAIN) + self.assertEqual(client.mechanism, zmq.PLAIN) + + assert not client.plain_server + assert server.plain_server + + self.start_zap() + + iface = 'tcp://127.0.0.1' + port = server.bind_to_random_port(iface) + client.connect("%s:%i" % (iface, port)) + self.bounce(server, client) + self.stop_zap() + + def skip_plain_inauth(self): + """test PLAIN failed authentication""" + server = self.socket(zmq.DEALER) + server.identity = b'IDENT' + client = self.socket(zmq.DEALER) + self.sockets.extend([server, client]) + client.plain_username = USER + client.plain_password = b'incorrect' + server.plain_server = True + self.assertEqual(server.mechanism, zmq.PLAIN) + self.assertEqual(client.mechanism, zmq.PLAIN) + + self.start_zap() + + iface = 'tcp://127.0.0.1' + port = server.bind_to_random_port(iface) + client.connect("%s:%i" % (iface, port)) + client.send(b'ping') + server.rcvtimeo = 250 + self.assertRaisesErrno(zmq.EAGAIN, server.recv) + self.stop_zap() + + def test_keypair(self): + """test curve_keypair""" + try: + public, secret = zmq.curve_keypair() + except zmq.ZMQError: + raise SkipTest("CURVE unsupported") + + self.assertEqual(type(secret), bytes) + self.assertEqual(type(public), bytes) + self.assertEqual(len(secret), 40) + self.assertEqual(len(public), 40) + + # verify that it is indeed Z85 + bsecret, bpublic = [ z85.decode(key) for key in (public, secret) ] + self.assertEqual(type(bsecret), bytes) + self.assertEqual(type(bpublic), bytes) + self.assertEqual(len(bsecret), 32) + self.assertEqual(len(bpublic), 32) + + + def test_curve(self): + """test CURVE encryption""" + server = self.socket(zmq.DEALER) + server.identity = b'IDENT' + client = self.socket(zmq.DEALER) + self.sockets.extend([server, client]) + try: + server.curve_server = True + except zmq.ZMQError as e: + # will raise EINVAL if not linked against libsodium + if e.errno == zmq.EINVAL: + raise SkipTest("CURVE unsupported") + + server_public, server_secret = zmq.curve_keypair() + client_public, client_secret = zmq.curve_keypair() + + server.curve_secretkey = server_secret + server.curve_publickey = server_public + client.curve_serverkey = server_public + client.curve_publickey = client_public + client.curve_secretkey = client_secret + + self.assertEqual(server.mechanism, zmq.CURVE) + self.assertEqual(client.mechanism, zmq.CURVE) + + self.assertEqual(server.get(zmq.CURVE_SERVER), True) + self.assertEqual(client.get(zmq.CURVE_SERVER), False) + + self.start_zap() + + iface = 'tcp://127.0.0.1' + port = server.bind_to_random_port(iface) + client.connect("%s:%i" % (iface, port)) + self.bounce(server, client) + self.stop_zap() + diff --git a/scripts/external_libs/zmq/tests/test_socket.py b/scripts/external_libs/zmq/tests/test_socket.py new file mode 100644 index 00000000..5c842edc --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_socket.py @@ -0,0 +1,450 @@ +# -*- coding: utf8 -*- +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import time +import warnings + +import zmq +from zmq.tests import ( + BaseZMQTestCase, SkipTest, have_gevent, GreenTest, skip_pypy, skip_if +) +from zmq.utils.strtypes import bytes, unicode + + +class TestSocket(BaseZMQTestCase): + + def test_create(self): + ctx = self.Context() + s = ctx.socket(zmq.PUB) + # Superluminal protocol not yet implemented + self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.bind, 'ftl://a') + self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.connect, 'ftl://a') + self.assertRaisesErrno(zmq.EINVAL, s.bind, 'tcp://') + s.close() + del ctx + + def test_context_manager(self): + url = 'inproc://a' + with self.Context() as ctx: + with ctx.socket(zmq.PUSH) as a: + a.bind(url) + with ctx.socket(zmq.PULL) as b: + b.connect(url) + msg = b'hi' + a.send(msg) + rcvd = self.recv(b) + self.assertEqual(rcvd, msg) + self.assertEqual(b.closed, True) + self.assertEqual(a.closed, True) + self.assertEqual(ctx.closed, True) + + def test_dir(self): + ctx = self.Context() + s = ctx.socket(zmq.PUB) + self.assertTrue('send' in dir(s)) + self.assertTrue('IDENTITY' in dir(s)) + self.assertTrue('AFFINITY' in dir(s)) + self.assertTrue('FD' in dir(s)) + s.close() + ctx.term() + + def test_bind_unicode(self): + s = self.socket(zmq.PUB) + p = s.bind_to_random_port(unicode("tcp://*")) + + def test_connect_unicode(self): + s = self.socket(zmq.PUB) + s.connect(unicode("tcp://127.0.0.1:5555")) + + def test_bind_to_random_port(self): + # Check that bind_to_random_port do not hide usefull exception + ctx = self.Context() + c = ctx.socket(zmq.PUB) + # Invalid format + try: + c.bind_to_random_port('tcp:*') + except zmq.ZMQError as e: + self.assertEqual(e.errno, zmq.EINVAL) + # Invalid protocol + try: + c.bind_to_random_port('rand://*') + except zmq.ZMQError as e: + self.assertEqual(e.errno, zmq.EPROTONOSUPPORT) + + def test_identity(self): + s = self.context.socket(zmq.PULL) + self.sockets.append(s) + ident = b'identity\0\0' + s.identity = ident + self.assertEqual(s.get(zmq.IDENTITY), ident) + + def test_unicode_sockopts(self): + """test setting/getting sockopts with unicode strings""" + topic = "tést" + if str is not unicode: + topic = topic.decode('utf8') + p,s = self.create_bound_pair(zmq.PUB, zmq.SUB) + self.assertEqual(s.send_unicode, s.send_unicode) + self.assertEqual(p.recv_unicode, p.recv_unicode) + self.assertRaises(TypeError, s.setsockopt, zmq.SUBSCRIBE, topic) + self.assertRaises(TypeError, s.setsockopt, zmq.IDENTITY, topic) + s.setsockopt_unicode(zmq.IDENTITY, topic, 'utf16') + self.assertRaises(TypeError, s.setsockopt, zmq.AFFINITY, topic) + s.setsockopt_unicode(zmq.SUBSCRIBE, topic) + self.assertRaises(TypeError, s.getsockopt_unicode, zmq.AFFINITY) + self.assertRaisesErrno(zmq.EINVAL, s.getsockopt_unicode, zmq.SUBSCRIBE) + + identb = s.getsockopt(zmq.IDENTITY) + identu = identb.decode('utf16') + identu2 = s.getsockopt_unicode(zmq.IDENTITY, 'utf16') + self.assertEqual(identu, identu2) + time.sleep(0.1) # wait for connection/subscription + p.send_unicode(topic,zmq.SNDMORE) + p.send_unicode(topic*2, encoding='latin-1') + self.assertEqual(topic, s.recv_unicode()) + self.assertEqual(topic*2, s.recv_unicode(encoding='latin-1')) + + def test_int_sockopts(self): + "test integer sockopts" + v = zmq.zmq_version_info() + if v < (3,0): + default_hwm = 0 + else: + default_hwm = 1000 + p,s = self.create_bound_pair(zmq.PUB, zmq.SUB) + p.setsockopt(zmq.LINGER, 0) + self.assertEqual(p.getsockopt(zmq.LINGER), 0) + p.setsockopt(zmq.LINGER, -1) + self.assertEqual(p.getsockopt(zmq.LINGER), -1) + self.assertEqual(p.hwm, default_hwm) + p.hwm = 11 + self.assertEqual(p.hwm, 11) + # p.setsockopt(zmq.EVENTS, zmq.POLLIN) + self.assertEqual(p.getsockopt(zmq.EVENTS), zmq.POLLOUT) + self.assertRaisesErrno(zmq.EINVAL, p.setsockopt,zmq.EVENTS, 2**7-1) + self.assertEqual(p.getsockopt(zmq.TYPE), p.socket_type) + self.assertEqual(p.getsockopt(zmq.TYPE), zmq.PUB) + self.assertEqual(s.getsockopt(zmq.TYPE), s.socket_type) + self.assertEqual(s.getsockopt(zmq.TYPE), zmq.SUB) + + # check for overflow / wrong type: + errors = [] + backref = {} + constants = zmq.constants + for name in constants.__all__: + value = getattr(constants, name) + if isinstance(value, int): + backref[value] = name + for opt in zmq.constants.int_sockopts.union(zmq.constants.int64_sockopts): + sopt = backref[opt] + if sopt.startswith(( + 'ROUTER', 'XPUB', 'TCP', 'FAIL', + 'REQ_', 'CURVE_', 'PROBE_ROUTER', + 'IPC_FILTER', 'GSSAPI', + )): + # some sockopts are write-only + continue + try: + n = p.getsockopt(opt) + except zmq.ZMQError as e: + errors.append("getsockopt(zmq.%s) raised '%s'."%(sopt, e)) + else: + if n > 2**31: + errors.append("getsockopt(zmq.%s) returned a ridiculous value." + " It is probably the wrong type."%sopt) + if errors: + self.fail('\n'.join([''] + errors)) + + def test_bad_sockopts(self): + """Test that appropriate errors are raised on bad socket options""" + s = self.context.socket(zmq.PUB) + self.sockets.append(s) + s.setsockopt(zmq.LINGER, 0) + # unrecognized int sockopts pass through to libzmq, and should raise EINVAL + self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, 9999, 5) + self.assertRaisesErrno(zmq.EINVAL, s.getsockopt, 9999) + # but only int sockopts are allowed through this way, otherwise raise a TypeError + self.assertRaises(TypeError, s.setsockopt, 9999, b"5") + # some sockopts are valid in general, but not on every socket: + self.assertRaisesErrno(zmq.EINVAL, s.setsockopt, zmq.SUBSCRIBE, b'hi') + + def test_sockopt_roundtrip(self): + "test set/getsockopt roundtrip." + p = self.context.socket(zmq.PUB) + self.sockets.append(p) + p.setsockopt(zmq.LINGER, 11) + self.assertEqual(p.getsockopt(zmq.LINGER), 11) + + def test_send_unicode(self): + "test sending unicode objects" + a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR) + self.sockets.extend([a,b]) + u = "çπ§" + if str is not unicode: + u = u.decode('utf8') + self.assertRaises(TypeError, a.send, u,copy=False) + self.assertRaises(TypeError, a.send, u,copy=True) + a.send_unicode(u) + s = b.recv() + self.assertEqual(s,u.encode('utf8')) + self.assertEqual(s.decode('utf8'),u) + a.send_unicode(u,encoding='utf16') + s = b.recv_unicode(encoding='utf16') + self.assertEqual(s,u) + + @skip_pypy + def test_tracker(self): + "test the MessageTracker object for tracking when zmq is done with a buffer" + addr = 'tcp://127.0.0.1' + a = self.context.socket(zmq.PUB) + port = a.bind_to_random_port(addr) + a.close() + iface = "%s:%i"%(addr,port) + a = self.context.socket(zmq.PAIR) + # a.setsockopt(zmq.IDENTITY, b"a") + b = self.context.socket(zmq.PAIR) + self.sockets.extend([a,b]) + a.connect(iface) + time.sleep(0.1) + p1 = a.send(b'something', copy=False, track=True) + self.assertTrue(isinstance(p1, zmq.MessageTracker)) + self.assertFalse(p1.done) + p2 = a.send_multipart([b'something', b'else'], copy=False, track=True) + self.assert_(isinstance(p2, zmq.MessageTracker)) + self.assertEqual(p2.done, False) + self.assertEqual(p1.done, False) + + b.bind(iface) + msg = b.recv_multipart() + for i in range(10): + if p1.done: + break + time.sleep(0.1) + self.assertEqual(p1.done, True) + self.assertEqual(msg, [b'something']) + msg = b.recv_multipart() + for i in range(10): + if p2.done: + break + time.sleep(0.1) + self.assertEqual(p2.done, True) + self.assertEqual(msg, [b'something', b'else']) + m = zmq.Frame(b"again", track=True) + self.assertEqual(m.tracker.done, False) + p1 = a.send(m, copy=False) + p2 = a.send(m, copy=False) + self.assertEqual(m.tracker.done, False) + self.assertEqual(p1.done, False) + self.assertEqual(p2.done, False) + msg = b.recv_multipart() + self.assertEqual(m.tracker.done, False) + self.assertEqual(msg, [b'again']) + msg = b.recv_multipart() + self.assertEqual(m.tracker.done, False) + self.assertEqual(msg, [b'again']) + self.assertEqual(p1.done, False) + self.assertEqual(p2.done, False) + pm = m.tracker + del m + for i in range(10): + if p1.done: + break + time.sleep(0.1) + self.assertEqual(p1.done, True) + self.assertEqual(p2.done, True) + m = zmq.Frame(b'something', track=False) + self.assertRaises(ValueError, a.send, m, copy=False, track=True) + + + def test_close(self): + ctx = self.Context() + s = ctx.socket(zmq.PUB) + s.close() + self.assertRaisesErrno(zmq.ENOTSOCK, s.bind, b'') + self.assertRaisesErrno(zmq.ENOTSOCK, s.connect, b'') + self.assertRaisesErrno(zmq.ENOTSOCK, s.setsockopt, zmq.SUBSCRIBE, b'') + self.assertRaisesErrno(zmq.ENOTSOCK, s.send, b'asdf') + self.assertRaisesErrno(zmq.ENOTSOCK, s.recv) + del ctx + + def test_attr(self): + """set setting/getting sockopts as attributes""" + s = self.context.socket(zmq.DEALER) + self.sockets.append(s) + linger = 10 + s.linger = linger + self.assertEqual(linger, s.linger) + self.assertEqual(linger, s.getsockopt(zmq.LINGER)) + self.assertEqual(s.fd, s.getsockopt(zmq.FD)) + + def test_bad_attr(self): + s = self.context.socket(zmq.DEALER) + self.sockets.append(s) + try: + s.apple='foo' + except AttributeError: + pass + else: + self.fail("bad setattr should have raised AttributeError") + try: + s.apple + except AttributeError: + pass + else: + self.fail("bad getattr should have raised AttributeError") + + def test_subclass(self): + """subclasses can assign attributes""" + class S(zmq.Socket): + a = None + def __init__(self, *a, **kw): + self.a=-1 + super(S, self).__init__(*a, **kw) + + s = S(self.context, zmq.REP) + self.sockets.append(s) + self.assertEqual(s.a, -1) + s.a=1 + self.assertEqual(s.a, 1) + a=s.a + self.assertEqual(a, 1) + + def test_recv_multipart(self): + a,b = self.create_bound_pair() + msg = b'hi' + for i in range(3): + a.send(msg) + time.sleep(0.1) + for i in range(3): + self.assertEqual(b.recv_multipart(), [msg]) + + def test_close_after_destroy(self): + """s.close() after ctx.destroy() should be fine""" + ctx = self.Context() + s = ctx.socket(zmq.REP) + ctx.destroy() + # reaper is not instantaneous + time.sleep(1e-2) + s.close() + self.assertTrue(s.closed) + + def test_poll(self): + a,b = self.create_bound_pair() + tic = time.time() + evt = a.poll(50) + self.assertEqual(evt, 0) + evt = a.poll(50, zmq.POLLOUT) + self.assertEqual(evt, zmq.POLLOUT) + msg = b'hi' + a.send(msg) + evt = b.poll(50) + self.assertEqual(evt, zmq.POLLIN) + msg2 = self.recv(b) + evt = b.poll(50) + self.assertEqual(evt, 0) + self.assertEqual(msg2, msg) + + def test_ipc_path_max_length(self): + """IPC_PATH_MAX_LEN is a sensible value""" + if zmq.IPC_PATH_MAX_LEN == 0: + raise SkipTest("IPC_PATH_MAX_LEN undefined") + + msg = "Surprising value for IPC_PATH_MAX_LEN: %s" % zmq.IPC_PATH_MAX_LEN + self.assertTrue(zmq.IPC_PATH_MAX_LEN > 30, msg) + self.assertTrue(zmq.IPC_PATH_MAX_LEN < 1025, msg) + + def test_ipc_path_max_length_msg(self): + if zmq.IPC_PATH_MAX_LEN == 0: + raise SkipTest("IPC_PATH_MAX_LEN undefined") + + s = self.context.socket(zmq.PUB) + self.sockets.append(s) + try: + s.bind('ipc://{0}'.format('a' * (zmq.IPC_PATH_MAX_LEN + 1))) + except zmq.ZMQError as e: + self.assertTrue(str(zmq.IPC_PATH_MAX_LEN) in e.strerror) + + def test_hwm(self): + zmq3 = zmq.zmq_version_info()[0] >= 3 + for stype in (zmq.PUB, zmq.ROUTER, zmq.SUB, zmq.REQ, zmq.DEALER): + s = self.context.socket(stype) + s.hwm = 100 + self.assertEqual(s.hwm, 100) + if zmq3: + try: + self.assertEqual(s.sndhwm, 100) + except AttributeError: + pass + try: + self.assertEqual(s.rcvhwm, 100) + except AttributeError: + pass + s.close() + + def test_shadow(self): + p = self.socket(zmq.PUSH) + p.bind("tcp://127.0.0.1:5555") + p2 = zmq.Socket.shadow(p.underlying) + self.assertEqual(p.underlying, p2.underlying) + s = self.socket(zmq.PULL) + s2 = zmq.Socket.shadow(s.underlying) + self.assertNotEqual(s.underlying, p.underlying) + self.assertEqual(s.underlying, s2.underlying) + s2.connect("tcp://127.0.0.1:5555") + sent = b'hi' + p2.send(sent) + rcvd = self.recv(s2) + self.assertEqual(rcvd, sent) + + def test_shadow_pyczmq(self): + try: + from pyczmq import zctx, zsocket + except Exception: + raise SkipTest("Requires pyczmq") + + ctx = zctx.new() + ca = zsocket.new(ctx, zmq.PUSH) + cb = zsocket.new(ctx, zmq.PULL) + a = zmq.Socket.shadow(ca) + b = zmq.Socket.shadow(cb) + a.bind("inproc://a") + b.connect("inproc://a") + a.send(b'hi') + rcvd = self.recv(b) + self.assertEqual(rcvd, b'hi') + + +if have_gevent: + import gevent + + class TestSocketGreen(GreenTest, TestSocket): + test_bad_attr = GreenTest.skip_green + test_close_after_destroy = GreenTest.skip_green + + def test_timeout(self): + a,b = self.create_bound_pair() + g = gevent.spawn_later(0.5, lambda: a.send(b'hi')) + timeout = gevent.Timeout(0.1) + timeout.start() + self.assertRaises(gevent.Timeout, b.recv) + g.kill() + + @skip_if(not hasattr(zmq, 'RCVTIMEO')) + def test_warn_set_timeo(self): + s = self.context.socket(zmq.REQ) + with warnings.catch_warnings(record=True) as w: + s.rcvtimeo = 5 + s.close() + self.assertEqual(len(w), 1) + self.assertEqual(w[0].category, UserWarning) + + + @skip_if(not hasattr(zmq, 'SNDTIMEO')) + def test_warn_get_timeo(self): + s = self.context.socket(zmq.REQ) + with warnings.catch_warnings(record=True) as w: + s.sndtimeo + s.close() + self.assertEqual(len(w), 1) + self.assertEqual(w[0].category, UserWarning) diff --git a/scripts/external_libs/zmq/tests/test_stopwatch.py b/scripts/external_libs/zmq/tests/test_stopwatch.py new file mode 100644 index 00000000..49fb79f2 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_stopwatch.py @@ -0,0 +1,42 @@ +# -*- coding: utf8 -*- +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import sys +import time + +from unittest import TestCase + +from zmq import Stopwatch, ZMQError + +if sys.version_info[0] >= 3: + long = int + +class TestStopWatch(TestCase): + + def test_stop_long(self): + """Ensure stop returns a long int.""" + watch = Stopwatch() + watch.start() + us = watch.stop() + self.assertTrue(isinstance(us, long)) + + def test_stop_microseconds(self): + """Test that stop/sleep have right units.""" + watch = Stopwatch() + watch.start() + tic = time.time() + watch.sleep(1) + us = watch.stop() + toc = time.time() + self.assertAlmostEqual(us/1e6,(toc-tic),places=0) + + def test_double_stop(self): + """Test error raised on multiple calls to stop.""" + watch = Stopwatch() + watch.start() + watch.stop() + self.assertRaises(ZMQError, watch.stop) + self.assertRaises(ZMQError, watch.stop) + diff --git a/scripts/external_libs/zmq/tests/test_version.py b/scripts/external_libs/zmq/tests/test_version.py new file mode 100644 index 00000000..6ebebf30 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_version.py @@ -0,0 +1,44 @@ +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +from unittest import TestCase +import zmq +from zmq.sugar import version + + +class TestVersion(TestCase): + + def test_pyzmq_version(self): + vs = zmq.pyzmq_version() + vs2 = zmq.__version__ + self.assertTrue(isinstance(vs, str)) + if zmq.__revision__: + self.assertEqual(vs, '@'.join(vs2, zmq.__revision__)) + else: + self.assertEqual(vs, vs2) + if version.VERSION_EXTRA: + self.assertTrue(version.VERSION_EXTRA in vs) + self.assertTrue(version.VERSION_EXTRA in vs2) + + def test_pyzmq_version_info(self): + info = zmq.pyzmq_version_info() + self.assertTrue(isinstance(info, tuple)) + for n in info[:3]: + self.assertTrue(isinstance(n, int)) + if version.VERSION_EXTRA: + self.assertEqual(len(info), 4) + self.assertEqual(info[-1], float('inf')) + else: + self.assertEqual(len(info), 3) + + def test_zmq_version_info(self): + info = zmq.zmq_version_info() + self.assertTrue(isinstance(info, tuple)) + for n in info[:3]: + self.assertTrue(isinstance(n, int)) + + def test_zmq_version(self): + v = zmq.zmq_version() + self.assertTrue(isinstance(v, str)) + diff --git a/scripts/external_libs/zmq/tests/test_win32_shim.py b/scripts/external_libs/zmq/tests/test_win32_shim.py new file mode 100644 index 00000000..55657bda --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_win32_shim.py @@ -0,0 +1,56 @@ +from __future__ import print_function + +import os + +from functools import wraps +from zmq.tests import BaseZMQTestCase +from zmq.utils.win32 import allow_interrupt + + +def count_calls(f): + @wraps(f) + def _(*args, **kwds): + try: + return f(*args, **kwds) + finally: + _.__calls__ += 1 + _.__calls__ = 0 + return _ + + +class TestWindowsConsoleControlHandler(BaseZMQTestCase): + + def test_handler(self): + @count_calls + def interrupt_polling(): + print('Caught CTRL-C!') + + if os.name == 'nt': + from ctypes import windll + from ctypes.wintypes import BOOL, DWORD + + kernel32 = windll.LoadLibrary('kernel32') + + # <http://msdn.microsoft.com/en-us/library/ms683155.aspx> + GenerateConsoleCtrlEvent = kernel32.GenerateConsoleCtrlEvent + GenerateConsoleCtrlEvent.argtypes = (DWORD, DWORD) + GenerateConsoleCtrlEvent.restype = BOOL + + try: + # Simulate CTRL-C event while handler is active. + with allow_interrupt(interrupt_polling): + result = GenerateConsoleCtrlEvent(0, 0) + if result == 0: + raise WindowsError + except KeyboardInterrupt: + pass + else: + self.fail('Expecting `KeyboardInterrupt` exception!') + + # Make sure our handler was called. + self.assertEqual(interrupt_polling.__calls__, 1) + else: + # On non-Windows systems, this utility is just a no-op! + with allow_interrupt(interrupt_polling): + pass + self.assertEqual(interrupt_polling.__calls__, 0) diff --git a/scripts/external_libs/zmq/tests/test_z85.py b/scripts/external_libs/zmq/tests/test_z85.py new file mode 100644 index 00000000..8a73cb4d --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_z85.py @@ -0,0 +1,63 @@ +# -*- coding: utf8 -*- +"""Test Z85 encoding + +confirm values and roundtrip with test values from the reference implementation. +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from unittest import TestCase +from zmq.utils import z85 + + +class TestZ85(TestCase): + + def test_client_public(self): + client_public = \ + b"\xBB\x88\x47\x1D\x65\xE2\x65\x9B" \ + b"\x30\xC5\x5A\x53\x21\xCE\xBB\x5A" \ + b"\xAB\x2B\x70\xA3\x98\x64\x5C\x26" \ + b"\xDC\xA2\xB2\xFC\xB4\x3F\xC5\x18" + encoded = z85.encode(client_public) + + self.assertEqual(encoded, b"Yne@$w-vo<fVvi]a<NY6T1ed:M$fCG*[IaLV{hID") + decoded = z85.decode(encoded) + self.assertEqual(decoded, client_public) + + def test_client_secret(self): + client_secret = \ + b"\x7B\xB8\x64\xB4\x89\xAF\xA3\x67" \ + b"\x1F\xBE\x69\x10\x1F\x94\xB3\x89" \ + b"\x72\xF2\x48\x16\xDF\xB0\x1B\x51" \ + b"\x65\x6B\x3F\xEC\x8D\xFD\x08\x88" + encoded = z85.encode(client_secret) + + self.assertEqual(encoded, b"D:)Q[IlAW!ahhC2ac:9*A}h:p?([4%wOTJ%JR%cs") + decoded = z85.decode(encoded) + self.assertEqual(decoded, client_secret) + + def test_server_public(self): + server_public = \ + b"\x54\xFC\xBA\x24\xE9\x32\x49\x96" \ + b"\x93\x16\xFB\x61\x7C\x87\x2B\xB0" \ + b"\xC1\xD1\xFF\x14\x80\x04\x27\xC5" \ + b"\x94\xCB\xFA\xCF\x1B\xC2\xD6\x52" + encoded = z85.encode(server_public) + + self.assertEqual(encoded, b"rq:rM>}U?@Lns47E1%kR.o@n%FcmmsL/@{H8]yf7") + decoded = z85.decode(encoded) + self.assertEqual(decoded, server_public) + + def test_server_secret(self): + server_secret = \ + b"\x8E\x0B\xDD\x69\x76\x28\xB9\x1D" \ + b"\x8F\x24\x55\x87\xEE\x95\xC5\xB0" \ + b"\x4D\x48\x96\x3F\x79\x25\x98\x77" \ + b"\xB4\x9C\xD9\x06\x3A\xEA\xD3\xB7" + encoded = z85.encode(server_secret) + + self.assertEqual(encoded, b"JTKVSB%%)wK0E.X)V>+}o?pNmC{O&4W4b!Ni{Lh6") + decoded = z85.decode(encoded) + self.assertEqual(decoded, server_secret) + diff --git a/scripts/external_libs/zmq/tests/test_zmqstream.py b/scripts/external_libs/zmq/tests/test_zmqstream.py new file mode 100644 index 00000000..cdb3a171 --- /dev/null +++ b/scripts/external_libs/zmq/tests/test_zmqstream.py @@ -0,0 +1,34 @@ +# -*- coding: utf8 -*- +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import sys +import time + +from unittest import TestCase + +import zmq +from zmq.eventloop import ioloop, zmqstream + +class TestZMQStream(TestCase): + + def setUp(self): + self.context = zmq.Context() + self.socket = self.context.socket(zmq.REP) + self.loop = ioloop.IOLoop.instance() + self.stream = zmqstream.ZMQStream(self.socket) + + def tearDown(self): + self.socket.close() + self.context.term() + + def test_callable_check(self): + """Ensure callable check works (py3k).""" + + self.stream.on_send(lambda *args: None) + self.stream.on_recv(lambda *args: None) + self.assertRaises(AssertionError, self.stream.on_recv, 1) + self.assertRaises(AssertionError, self.stream.on_send, 1) + self.assertRaises(AssertionError, self.stream.on_recv, zmq) + diff --git a/scripts/external_libs/zmq/utils/__init__.py b/scripts/external_libs/zmq/utils/__init__.py new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/scripts/external_libs/zmq/utils/__init__.py diff --git a/scripts/external_libs/zmq/utils/buffers.pxd b/scripts/external_libs/zmq/utils/buffers.pxd new file mode 100644 index 00000000..998aa551 --- /dev/null +++ b/scripts/external_libs/zmq/utils/buffers.pxd @@ -0,0 +1,313 @@ +"""Python version-independent methods for C/Python buffers. + +This file was copied and adapted from mpi4py. + +Authors +------- +* MinRK +""" + +#----------------------------------------------------------------------------- +# Copyright (c) 2010 Lisandro Dalcin +# All rights reserved. +# Used under BSD License: http://www.opensource.org/licenses/bsd-license.php +# +# Retrieval: +# Jul 23, 2010 18:00 PST (r539) +# http://code.google.com/p/mpi4py/source/browse/trunk/src/MPI/asbuffer.pxi +# +# Modifications from original: +# Copyright (c) 2010-2012 Brian Granger, Min Ragan-Kelley +# +# Distributed under the terms of the New BSD License. The full license is in +# the file COPYING.BSD, distributed as part of this software. +#----------------------------------------------------------------------------- + + +#----------------------------------------------------------------------------- +# Python includes. +#----------------------------------------------------------------------------- + +# get version-independent aliases: +cdef extern from "pyversion_compat.h": + pass + +# Python 3 buffer interface (PEP 3118) +cdef extern from "Python.h": + int PY_MAJOR_VERSION + int PY_MINOR_VERSION + ctypedef int Py_ssize_t + ctypedef struct PyMemoryViewObject: + pass + ctypedef struct Py_buffer: + void *buf + Py_ssize_t len + int readonly + char *format + int ndim + Py_ssize_t *shape + Py_ssize_t *strides + Py_ssize_t *suboffsets + Py_ssize_t itemsize + void *internal + cdef enum: + PyBUF_SIMPLE + PyBUF_WRITABLE + PyBUF_FORMAT + PyBUF_ANY_CONTIGUOUS + int PyObject_CheckBuffer(object) + int PyObject_GetBuffer(object, Py_buffer *, int) except -1 + void PyBuffer_Release(Py_buffer *) + + int PyBuffer_FillInfo(Py_buffer *view, object obj, void *buf, + Py_ssize_t len, int readonly, int infoflags) except -1 + object PyMemoryView_FromBuffer(Py_buffer *info) + + object PyMemoryView_FromObject(object) + +# Python 2 buffer interface (legacy) +cdef extern from "Python.h": + ctypedef void const_void "const void" + Py_ssize_t Py_END_OF_BUFFER + int PyObject_CheckReadBuffer(object) + int PyObject_AsReadBuffer (object, const_void **, Py_ssize_t *) except -1 + int PyObject_AsWriteBuffer(object, void **, Py_ssize_t *) except -1 + + object PyBuffer_FromMemory(void *ptr, Py_ssize_t s) + object PyBuffer_FromReadWriteMemory(void *ptr, Py_ssize_t s) + + object PyBuffer_FromObject(object, Py_ssize_t offset, Py_ssize_t size) + object PyBuffer_FromReadWriteObject(object, Py_ssize_t offset, Py_ssize_t size) + + +#----------------------------------------------------------------------------- +# asbuffer: C buffer from python object +#----------------------------------------------------------------------------- + + +cdef inline int memoryview_available(): + return PY_MAJOR_VERSION >= 3 or (PY_MAJOR_VERSION >=2 and PY_MINOR_VERSION >= 7) + +cdef inline int oldstyle_available(): + return PY_MAJOR_VERSION < 3 + + +cdef inline int check_buffer(object ob): + """Version independent check for whether an object is a buffer. + + Parameters + ---------- + object : object + Any Python object + + Returns + ------- + int : 0 if no buffer interface, 3 if newstyle buffer interface, 2 if oldstyle. + """ + if PyObject_CheckBuffer(ob): + return 3 + if oldstyle_available(): + return PyObject_CheckReadBuffer(ob) and 2 + return 0 + + +cdef inline object asbuffer(object ob, int writable, int format, + void **base, Py_ssize_t *size, + Py_ssize_t *itemsize): + """Turn an object into a C buffer in a Python version-independent way. + + Parameters + ---------- + ob : object + The object to be turned into a buffer. + Must provide a Python Buffer interface + writable : int + Whether the resulting buffer should be allowed to write + to the object. + format : int + The format of the buffer. See Python buffer docs. + base : void ** + The pointer that will be used to store the resulting C buffer. + size : Py_ssize_t * + The size of the buffer(s). + itemsize : Py_ssize_t * + The size of an item, if the buffer is non-contiguous. + + Returns + ------- + An object describing the buffer format. Generally a str, such as 'B'. + """ + + cdef void *bptr = NULL + cdef Py_ssize_t blen = 0, bitemlen = 0 + cdef Py_buffer view + cdef int flags = PyBUF_SIMPLE + cdef int mode = 0 + + bfmt = None + + mode = check_buffer(ob) + if mode == 0: + raise TypeError("%r does not provide a buffer interface."%ob) + + if mode == 3: + flags = PyBUF_ANY_CONTIGUOUS + if writable: + flags |= PyBUF_WRITABLE + if format: + flags |= PyBUF_FORMAT + PyObject_GetBuffer(ob, &view, flags) + bptr = view.buf + blen = view.len + if format: + if view.format != NULL: + bfmt = view.format + bitemlen = view.itemsize + PyBuffer_Release(&view) + else: # oldstyle + if writable: + PyObject_AsWriteBuffer(ob, &bptr, &blen) + else: + PyObject_AsReadBuffer(ob, <const_void **>&bptr, &blen) + if format: + try: # numpy.ndarray + dtype = ob.dtype + bfmt = dtype.char + bitemlen = dtype.itemsize + except AttributeError: + try: # array.array + bfmt = ob.typecode + bitemlen = ob.itemsize + except AttributeError: + if isinstance(ob, bytes): + bfmt = b"B" + bitemlen = 1 + else: + # nothing found + bfmt = None + bitemlen = 0 + if base: base[0] = <void *>bptr + if size: size[0] = <Py_ssize_t>blen + if itemsize: itemsize[0] = <Py_ssize_t>bitemlen + + if PY_MAJOR_VERSION >= 3 and bfmt is not None: + return bfmt.decode('ascii') + return bfmt + + +cdef inline object asbuffer_r(object ob, void **base, Py_ssize_t *size): + """Wrapper for standard calls to asbuffer with a readonly buffer.""" + asbuffer(ob, 0, 0, base, size, NULL) + return ob + + +cdef inline object asbuffer_w(object ob, void **base, Py_ssize_t *size): + """Wrapper for standard calls to asbuffer with a writable buffer.""" + asbuffer(ob, 1, 0, base, size, NULL) + return ob + +#------------------------------------------------------------------------------ +# frombuffer: python buffer/view from C buffer +#------------------------------------------------------------------------------ + + +cdef inline object frombuffer_3(void *ptr, Py_ssize_t s, int readonly): + """Python 3 version of frombuffer. + + This is the Python 3 model, but will work on Python >= 2.6. Currently, + we use it only on >= 3.0. + """ + cdef Py_buffer pybuf + cdef Py_ssize_t *shape = [s] + cdef str astr="" + PyBuffer_FillInfo(&pybuf, astr, ptr, s, readonly, PyBUF_SIMPLE) + pybuf.format = "B" + pybuf.shape = shape + return PyMemoryView_FromBuffer(&pybuf) + + +cdef inline object frombuffer_2(void *ptr, Py_ssize_t s, int readonly): + """Python 2 version of frombuffer. + + This must be used for Python <= 2.6, but we use it for all Python < 3. + """ + + if oldstyle_available(): + if readonly: + return PyBuffer_FromMemory(ptr, s) + else: + return PyBuffer_FromReadWriteMemory(ptr, s) + else: + raise NotImplementedError("Old style buffers not available.") + + +cdef inline object frombuffer(void *ptr, Py_ssize_t s, int readonly): + """Create a Python Buffer/View of a C array. + + Parameters + ---------- + ptr : void * + Pointer to the array to be copied. + s : size_t + Length of the buffer. + readonly : int + whether the resulting object should be allowed to write to the buffer. + + Returns + ------- + Python Buffer/View of the C buffer. + """ + # oldstyle first priority for now + if oldstyle_available(): + return frombuffer_2(ptr, s, readonly) + else: + return frombuffer_3(ptr, s, readonly) + + +cdef inline object frombuffer_r(void *ptr, Py_ssize_t s): + """Wrapper for readonly view frombuffer.""" + return frombuffer(ptr, s, 1) + + +cdef inline object frombuffer_w(void *ptr, Py_ssize_t s): + """Wrapper for writable view frombuffer.""" + return frombuffer(ptr, s, 0) + +#------------------------------------------------------------------------------ +# viewfromobject: python buffer/view from python object, refcounts intact +# frombuffer(asbuffer(obj)) would lose track of refs +#------------------------------------------------------------------------------ + +cdef inline object viewfromobject(object obj, int readonly): + """Construct a Python Buffer/View object from another Python object. + + This work in a Python version independent manner. + + Parameters + ---------- + obj : object + The input object to be cast as a buffer + readonly : int + Whether the result should be prevented from overwriting the original. + + Returns + ------- + Buffer/View of the original object. + """ + if not memoryview_available(): + if readonly: + return PyBuffer_FromObject(obj, 0, Py_END_OF_BUFFER) + else: + return PyBuffer_FromReadWriteObject(obj, 0, Py_END_OF_BUFFER) + else: + return PyMemoryView_FromObject(obj) + + +cdef inline object viewfromobject_r(object obj): + """Wrapper for readonly viewfromobject.""" + return viewfromobject(obj, 1) + + +cdef inline object viewfromobject_w(object obj): + """Wrapper for writable viewfromobject.""" + return viewfromobject(obj, 0) diff --git a/scripts/external_libs/zmq/utils/compiler.json b/scripts/external_libs/zmq/utils/compiler.json new file mode 100644 index 00000000..e58fc130 --- /dev/null +++ b/scripts/external_libs/zmq/utils/compiler.json @@ -0,0 +1,24 @@ +{ + "extra_link_args": [], + "define_macros": [ + [ + "HAVE_SYS_UN_H", + 1 + ] + ], + "runtime_library_dirs": [ + "$ORIGIN/.." + ], + "libraries": [ + "zmq" + ], + "library_dirs": [ + "zmq" + ], + "include_dirs": [ + "/auto/srg-sce-swinfra-usr/emb/users/hhaim/work/depot/asr1k/emb/private/bpsim/main/src/zmq/include", + "zmq/utils", + "zmq/backend/cython", + "zmq/devices" + ] +}
\ No newline at end of file diff --git a/scripts/external_libs/zmq/utils/config.json b/scripts/external_libs/zmq/utils/config.json new file mode 100644 index 00000000..1e4611f9 --- /dev/null +++ b/scripts/external_libs/zmq/utils/config.json @@ -0,0 +1,10 @@ +{ + "have_sys_un_h": true, + "zmq_prefix": "/auto/srg-sce-swinfra-usr/emb/users/hhaim/work/depot/asr1k/emb/private/bpsim/main/src/zmq", + "no_libzmq_extension": true, + "libzmq_extension": false, + "easy_install": {}, + "bdist_egg": {}, + "skip_check_zmq": false, + "build_ext": {} +}
\ No newline at end of file diff --git a/scripts/external_libs/zmq/utils/constant_names.py b/scripts/external_libs/zmq/utils/constant_names.py new file mode 100644 index 00000000..47da9dc2 --- /dev/null +++ b/scripts/external_libs/zmq/utils/constant_names.py @@ -0,0 +1,365 @@ +"""0MQ Constant names""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +# dictionaries of constants new or removed in particular versions + +new_in = { + (2,2,0) : [ + 'RCVTIMEO', + 'SNDTIMEO', + ], + (3,2,2) : [ + # errnos + 'EMSGSIZE', + 'EAFNOSUPPORT', + 'ENETUNREACH', + 'ECONNABORTED', + 'ECONNRESET', + 'ENOTCONN', + 'ETIMEDOUT', + 'EHOSTUNREACH', + 'ENETRESET', + + # ctx opts + 'IO_THREADS', + 'MAX_SOCKETS', + 'IO_THREADS_DFLT', + 'MAX_SOCKETS_DFLT', + + # socket opts + 'ROUTER_BEHAVIOR', + 'ROUTER_MANDATORY', + 'FAIL_UNROUTABLE', + 'TCP_KEEPALIVE', + 'TCP_KEEPALIVE_CNT', + 'TCP_KEEPALIVE_IDLE', + 'TCP_KEEPALIVE_INTVL', + 'DELAY_ATTACH_ON_CONNECT', + 'XPUB_VERBOSE', + + # msg opts + 'MORE', + + 'EVENT_CONNECTED', + 'EVENT_CONNECT_DELAYED', + 'EVENT_CONNECT_RETRIED', + 'EVENT_LISTENING', + 'EVENT_BIND_FAILED', + 'EVENT_ACCEPTED', + 'EVENT_ACCEPT_FAILED', + 'EVENT_CLOSED', + 'EVENT_CLOSE_FAILED', + 'EVENT_DISCONNECTED', + 'EVENT_ALL', + ], + (4,0,0) : [ + # socket types + 'STREAM', + + # socket opts + 'IMMEDIATE', + 'ROUTER_RAW', + 'IPV6', + 'MECHANISM', + 'PLAIN_SERVER', + 'PLAIN_USERNAME', + 'PLAIN_PASSWORD', + 'CURVE_SERVER', + 'CURVE_PUBLICKEY', + 'CURVE_SECRETKEY', + 'CURVE_SERVERKEY', + 'PROBE_ROUTER', + 'REQ_RELAXED', + 'REQ_CORRELATE', + 'CONFLATE', + 'ZAP_DOMAIN', + + # security + 'NULL', + 'PLAIN', + 'CURVE', + + # events + 'EVENT_MONITOR_STOPPED', + ], + (4,1,0) : [ + # ctx opts + 'SOCKET_LIMIT', + 'THREAD_PRIORITY', + 'THREAD_PRIORITY_DFLT', + 'THREAD_SCHED_POLICY', + 'THREAD_SCHED_POLICY_DFLT', + + # socket opts + 'ROUTER_HANDOVER', + 'TOS', + 'IPC_FILTER_PID', + 'IPC_FILTER_UID', + 'IPC_FILTER_GID', + 'CONNECT_RID', + 'GSSAPI_SERVER', + 'GSSAPI_PRINCIPAL', + 'GSSAPI_SERVICE_PRINCIPAL', + 'GSSAPI_PLAINTEXT', + 'HANDSHAKE_IVL', + 'IDENTITY_FD', + 'XPUB_NODROP', + 'SOCKS_PROXY', + + # msg opts + 'SRCFD', + 'SHARED', + + # security + 'GSSAPI', + + ], +} + + +removed_in = { + (3,2,2) : [ + 'UPSTREAM', + 'DOWNSTREAM', + + 'HWM', + 'SWAP', + 'MCAST_LOOP', + 'RECOVERY_IVL_MSEC', + ] +} + +# collections of zmq constant names based on their role +# base names have no specific use +# opt names are validated in get/set methods of various objects + +base_names = [ + # base + 'VERSION', + 'VERSION_MAJOR', + 'VERSION_MINOR', + 'VERSION_PATCH', + 'NOBLOCK', + 'DONTWAIT', + + 'POLLIN', + 'POLLOUT', + 'POLLERR', + + 'SNDMORE', + + 'STREAMER', + 'FORWARDER', + 'QUEUE', + + 'IO_THREADS_DFLT', + 'MAX_SOCKETS_DFLT', + 'POLLITEMS_DFLT', + 'THREAD_PRIORITY_DFLT', + 'THREAD_SCHED_POLICY_DFLT', + + # socktypes + 'PAIR', + 'PUB', + 'SUB', + 'REQ', + 'REP', + 'DEALER', + 'ROUTER', + 'XREQ', + 'XREP', + 'PULL', + 'PUSH', + 'XPUB', + 'XSUB', + 'UPSTREAM', + 'DOWNSTREAM', + 'STREAM', + + # events + 'EVENT_CONNECTED', + 'EVENT_CONNECT_DELAYED', + 'EVENT_CONNECT_RETRIED', + 'EVENT_LISTENING', + 'EVENT_BIND_FAILED', + 'EVENT_ACCEPTED', + 'EVENT_ACCEPT_FAILED', + 'EVENT_CLOSED', + 'EVENT_CLOSE_FAILED', + 'EVENT_DISCONNECTED', + 'EVENT_ALL', + 'EVENT_MONITOR_STOPPED', + + # security + 'NULL', + 'PLAIN', + 'CURVE', + 'GSSAPI', + + ## ERRNO + # Often used (these are alse in errno.) + 'EAGAIN', + 'EINVAL', + 'EFAULT', + 'ENOMEM', + 'ENODEV', + 'EMSGSIZE', + 'EAFNOSUPPORT', + 'ENETUNREACH', + 'ECONNABORTED', + 'ECONNRESET', + 'ENOTCONN', + 'ETIMEDOUT', + 'EHOSTUNREACH', + 'ENETRESET', + + # For Windows compatability + 'HAUSNUMERO', + 'ENOTSUP', + 'EPROTONOSUPPORT', + 'ENOBUFS', + 'ENETDOWN', + 'EADDRINUSE', + 'EADDRNOTAVAIL', + 'ECONNREFUSED', + 'EINPROGRESS', + 'ENOTSOCK', + + # 0MQ Native + 'EFSM', + 'ENOCOMPATPROTO', + 'ETERM', + 'EMTHREAD', +] + +int64_sockopt_names = [ + 'AFFINITY', + 'MAXMSGSIZE', + + # sockopts removed in 3.0.0 + 'HWM', + 'SWAP', + 'MCAST_LOOP', + 'RECOVERY_IVL_MSEC', +] + +bytes_sockopt_names = [ + 'IDENTITY', + 'SUBSCRIBE', + 'UNSUBSCRIBE', + 'LAST_ENDPOINT', + 'TCP_ACCEPT_FILTER', + + 'PLAIN_USERNAME', + 'PLAIN_PASSWORD', + + 'CURVE_PUBLICKEY', + 'CURVE_SECRETKEY', + 'CURVE_SERVERKEY', + 'ZAP_DOMAIN', + 'CONNECT_RID', + 'GSSAPI_PRINCIPAL', + 'GSSAPI_SERVICE_PRINCIPAL', + 'SOCKS_PROXY', +] + +fd_sockopt_names = [ + 'FD', + 'IDENTITY_FD', +] + +int_sockopt_names = [ + # sockopts + 'RECONNECT_IVL_MAX', + + # sockopts new in 2.2.0 + 'SNDTIMEO', + 'RCVTIMEO', + + # new in 3.x + 'SNDHWM', + 'RCVHWM', + 'MULTICAST_HOPS', + 'IPV4ONLY', + + 'ROUTER_BEHAVIOR', + 'TCP_KEEPALIVE', + 'TCP_KEEPALIVE_CNT', + 'TCP_KEEPALIVE_IDLE', + 'TCP_KEEPALIVE_INTVL', + 'DELAY_ATTACH_ON_CONNECT', + 'XPUB_VERBOSE', + + 'EVENTS', + 'TYPE', + 'LINGER', + 'RECONNECT_IVL', + 'BACKLOG', + + 'ROUTER_MANDATORY', + 'FAIL_UNROUTABLE', + + 'ROUTER_RAW', + 'IMMEDIATE', + 'IPV6', + 'MECHANISM', + 'PLAIN_SERVER', + 'CURVE_SERVER', + 'PROBE_ROUTER', + 'REQ_RELAXED', + 'REQ_CORRELATE', + 'CONFLATE', + 'ROUTER_HANDOVER', + 'TOS', + 'IPC_FILTER_PID', + 'IPC_FILTER_UID', + 'IPC_FILTER_GID', + 'GSSAPI_SERVER', + 'GSSAPI_PLAINTEXT', + 'HANDSHAKE_IVL', + 'XPUB_NODROP', +] + +switched_sockopt_names = [ + 'RATE', + 'RECOVERY_IVL', + 'SNDBUF', + 'RCVBUF', + 'RCVMORE', +] + +ctx_opt_names = [ + 'IO_THREADS', + 'MAX_SOCKETS', + 'SOCKET_LIMIT', + 'THREAD_PRIORITY', + 'THREAD_SCHED_POLICY', +] + +msg_opt_names = [ + 'MORE', + 'SRCFD', + 'SHARED', +] + +from itertools import chain + +all_names = list(chain( + base_names, + ctx_opt_names, + bytes_sockopt_names, + fd_sockopt_names, + int_sockopt_names, + int64_sockopt_names, + switched_sockopt_names, + msg_opt_names, +)) + +del chain + +def no_prefix(name): + """does the given constant have a ZMQ_ prefix?""" + return name.startswith('E') and not name.startswith('EVENT') + diff --git a/scripts/external_libs/zmq/utils/garbage.py b/scripts/external_libs/zmq/utils/garbage.py new file mode 100644 index 00000000..80a8725a --- /dev/null +++ b/scripts/external_libs/zmq/utils/garbage.py @@ -0,0 +1,180 @@ +"""Garbage collection thread for representing zmq refcount of Python objects +used in zero-copy sends. +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +import atexit +import struct + +from os import getpid +from collections import namedtuple +from threading import Thread, Event, Lock +import warnings + +import zmq + + +gcref = namedtuple('gcref', ['obj', 'event']) + +class GarbageCollectorThread(Thread): + """Thread in which garbage collection actually happens.""" + def __init__(self, gc): + super(GarbageCollectorThread, self).__init__() + self.gc = gc + self.daemon = True + self.pid = getpid() + self.ready = Event() + + def run(self): + # detect fork at begining of the thread + if getpid is None or getpid() != self.pid: + self.ready.set() + return + try: + s = self.gc.context.socket(zmq.PULL) + s.linger = 0 + s.bind(self.gc.url) + finally: + self.ready.set() + + while True: + # detect fork + if getpid is None or getpid() != self.pid: + return + msg = s.recv() + if msg == b'DIE': + break + fmt = 'L' if len(msg) == 4 else 'Q' + key = struct.unpack(fmt, msg)[0] + tup = self.gc.refs.pop(key, None) + if tup and tup.event: + tup.event.set() + del tup + s.close() + + +class GarbageCollector(object): + """PyZMQ Garbage Collector + + Used for representing the reference held by libzmq during zero-copy sends. + This object holds a dictionary, keyed by Python id, + of the Python objects whose memory are currently in use by zeromq. + + When zeromq is done with the memory, it sends a message on an inproc PUSH socket + containing the packed size_t (32 or 64-bit unsigned int), + which is the key in the dict. + When the PULL socket in the gc thread receives that message, + the reference is popped from the dict, + and any tracker events that should be signaled fire. + """ + + refs = None + _context = None + _lock = None + url = "inproc://pyzmq.gc.01" + + def __init__(self, context=None): + super(GarbageCollector, self).__init__() + self.refs = {} + self.pid = None + self.thread = None + self._context = context + self._lock = Lock() + self._stay_down = False + atexit.register(self._atexit) + + @property + def context(self): + if self._context is None: + self._context = zmq.Context() + return self._context + + @context.setter + def context(self, ctx): + if self.is_alive(): + if self.refs: + warnings.warn("Replacing gc context while gc is running", RuntimeWarning) + self.stop() + self._context = ctx + + def _atexit(self): + """atexit callback + + sets _stay_down flag so that gc doesn't try to start up again in other atexit handlers + """ + self._stay_down = True + self.stop() + + def stop(self): + """stop the garbage-collection thread""" + if not self.is_alive(): + return + self._stop() + + def _stop(self): + push = self.context.socket(zmq.PUSH) + push.connect(self.url) + push.send(b'DIE') + push.close() + self.thread.join() + self.context.term() + self.refs.clear() + self.context = None + + def start(self): + """Start a new garbage collection thread. + + Creates a new zmq Context used for garbage collection. + Under most circumstances, this will only be called once per process. + """ + if self.thread is not None and self.pid != getpid(): + # It's re-starting, must free earlier thread's context + # since a fork probably broke it + self._stop() + self.pid = getpid() + self.refs = {} + self.thread = GarbageCollectorThread(self) + self.thread.start() + self.thread.ready.wait() + + def is_alive(self): + """Is the garbage collection thread currently running? + + Includes checks for process shutdown or fork. + """ + if (getpid is None or + getpid() != self.pid or + self.thread is None or + not self.thread.is_alive() + ): + return False + return True + + def store(self, obj, event=None): + """store an object and (optionally) event for zero-copy""" + if not self.is_alive(): + if self._stay_down: + return 0 + # safely start the gc thread + # use lock and double check, + # so we don't start multiple threads + with self._lock: + if not self.is_alive(): + self.start() + tup = gcref(obj, event) + theid = id(tup) + self.refs[theid] = tup + return theid + + def __del__(self): + if not self.is_alive(): + return + try: + self.stop() + except Exception as e: + raise (e) + +gc = GarbageCollector() diff --git a/scripts/external_libs/zmq/utils/getpid_compat.h b/scripts/external_libs/zmq/utils/getpid_compat.h new file mode 100644 index 00000000..47ce90fa --- /dev/null +++ b/scripts/external_libs/zmq/utils/getpid_compat.h @@ -0,0 +1,6 @@ +#ifdef _WIN32 + #include <process.h> + #define getpid _getpid +#else + #include <unistd.h> +#endif diff --git a/scripts/external_libs/zmq/utils/interop.py b/scripts/external_libs/zmq/utils/interop.py new file mode 100644 index 00000000..26c01969 --- /dev/null +++ b/scripts/external_libs/zmq/utils/interop.py @@ -0,0 +1,33 @@ +"""Utils for interoperability with other libraries. + +Just CFFI pointer casting for now. +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + + +try: + long +except NameError: + long = int # Python 3 + + +def cast_int_addr(n): + """Cast an address to a Python int + + This could be a Python integer or a CFFI pointer + """ + if isinstance(n, (int, long)): + return n + try: + import cffi + except ImportError: + pass + else: + # from pyzmq, this is an FFI void * + ffi = cffi.FFI() + if isinstance(n, ffi.CData): + return int(ffi.cast("size_t", n)) + + raise ValueError("Cannot cast %r to int" % n) diff --git a/scripts/external_libs/zmq/utils/ipcmaxlen.h b/scripts/external_libs/zmq/utils/ipcmaxlen.h new file mode 100644 index 00000000..7218db78 --- /dev/null +++ b/scripts/external_libs/zmq/utils/ipcmaxlen.h @@ -0,0 +1,21 @@ +/* + +Platform-independant detection of IPC path max length + +Copyright (c) 2012 Godefroid Chapelle + +Distributed under the terms of the New BSD License. The full license is in +the file COPYING.BSD, distributed as part of this software. + */ + +#if defined(HAVE_SYS_UN_H) +#include "sys/un.h" +int get_ipc_path_max_len(void) { + struct sockaddr_un *dummy; + return sizeof(dummy->sun_path) - 1; +} +#else +int get_ipc_path_max_len(void) { + return 0; +} +#endif diff --git a/scripts/external_libs/zmq/utils/jsonapi.py b/scripts/external_libs/zmq/utils/jsonapi.py new file mode 100644 index 00000000..865ca6d5 --- /dev/null +++ b/scripts/external_libs/zmq/utils/jsonapi.py @@ -0,0 +1,59 @@ +"""Priority based json library imports. + +Always serializes to bytes instead of unicode for zeromq compatibility +on Python 2 and 3. + +Use ``jsonapi.loads()`` and ``jsonapi.dumps()`` for guaranteed symmetry. + +Priority: ``simplejson`` > ``jsonlib2`` > stdlib ``json`` + +``jsonapi.loads/dumps`` provide kwarg-compatibility with stdlib json. + +``jsonapi.jsonmod`` will be the module of the actual underlying implementation. +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +from zmq.utils.strtypes import bytes, unicode + +jsonmod = None + +priority = ['simplejson', 'jsonlib2', 'json'] +for mod in priority: + try: + jsonmod = __import__(mod) + except ImportError: + pass + else: + break + +def dumps(o, **kwargs): + """Serialize object to JSON bytes (utf-8). + + See jsonapi.jsonmod.dumps for details on kwargs. + """ + + if 'separators' not in kwargs: + kwargs['separators'] = (',', ':') + + s = jsonmod.dumps(o, **kwargs) + + if isinstance(s, unicode): + s = s.encode('utf8') + + return s + +def loads(s, **kwargs): + """Load object from JSON bytes (utf-8). + + See jsonapi.jsonmod.loads for details on kwargs. + """ + + if str is unicode and isinstance(s, bytes): + s = s.decode('utf8') + + return jsonmod.loads(s, **kwargs) + +__all__ = ['jsonmod', 'dumps', 'loads'] + diff --git a/scripts/external_libs/zmq/utils/monitor.py b/scripts/external_libs/zmq/utils/monitor.py new file mode 100644 index 00000000..734d54b1 --- /dev/null +++ b/scripts/external_libs/zmq/utils/monitor.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +"""Module holding utility and convenience functions for zmq event monitoring.""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import struct +import zmq +from zmq.error import _check_version + +def parse_monitor_message(msg): + """decode zmq_monitor event messages. + + Parameters + ---------- + msg : list(bytes) + zmq multipart message that has arrived on a monitor PAIR socket. + + First frame is:: + + 16 bit event id + 32 bit event value + no padding + + Second frame is the endpoint as a bytestring + + Returns + ------- + event : dict + event description as dict with the keys `event`, `value`, and `endpoint`. + """ + + if len(msg) != 2 or len(msg[0]) != 6: + raise RuntimeError("Invalid event message format: %s" % msg) + event = {} + event['event'], event['value'] = struct.unpack("=hi", msg[0]) + event['endpoint'] = msg[1] + return event + +def recv_monitor_message(socket, flags=0): + """Receive and decode the given raw message from the monitoring socket and return a dict. + + Requires libzmq ≥ 4.0 + + The returned dict will have the following entries: + event : int, the event id as described in libzmq.zmq_socket_monitor + value : int, the event value associated with the event, see libzmq.zmq_socket_monitor + endpoint : string, the affected endpoint + + Parameters + ---------- + socket : zmq PAIR socket + The PAIR socket (created by other.get_monitor_socket()) on which to recv the message + flags : bitfield (int) + standard zmq recv flags + + Returns + ------- + event : dict + event description as dict with the keys `event`, `value`, and `endpoint`. + """ + _check_version((4,0), 'libzmq event API') + # will always return a list + msg = socket.recv_multipart(flags) + # 4.0-style event API + return parse_monitor_message(msg) + +__all__ = ['parse_monitor_message', 'recv_monitor_message'] diff --git a/scripts/external_libs/zmq/utils/pyversion_compat.h b/scripts/external_libs/zmq/utils/pyversion_compat.h new file mode 100644 index 00000000..fac09046 --- /dev/null +++ b/scripts/external_libs/zmq/utils/pyversion_compat.h @@ -0,0 +1,25 @@ +#include "Python.h" + +#if PY_VERSION_HEX < 0x02070000 + #define PyMemoryView_FromBuffer(info) (PyErr_SetString(PyExc_NotImplementedError, \ + "new buffer interface is not available"), (PyObject *)NULL) + #define PyMemoryView_FromObject(object) (PyErr_SetString(PyExc_NotImplementedError, \ + "new buffer interface is not available"), (PyObject *)NULL) +#endif + +#if PY_VERSION_HEX >= 0x03000000 + // for buffers + #define Py_END_OF_BUFFER ((Py_ssize_t) 0) + + #define PyObject_CheckReadBuffer(object) (0) + + #define PyBuffer_FromMemory(ptr, s) (PyErr_SetString(PyExc_NotImplementedError, \ + "old buffer interface is not available"), (PyObject *)NULL) + #define PyBuffer_FromReadWriteMemory(ptr, s) (PyErr_SetString(PyExc_NotImplementedError, \ + "old buffer interface is not available"), (PyObject *)NULL) + #define PyBuffer_FromObject(object, offset, size) (PyErr_SetString(PyExc_NotImplementedError, \ + "old buffer interface is not available"), (PyObject *)NULL) + #define PyBuffer_FromReadWriteObject(object, offset, size) (PyErr_SetString(PyExc_NotImplementedError, \ + "old buffer interface is not available"), (PyObject *)NULL) + +#endif diff --git a/scripts/external_libs/zmq/utils/sixcerpt.py b/scripts/external_libs/zmq/utils/sixcerpt.py new file mode 100644 index 00000000..5492fd59 --- /dev/null +++ b/scripts/external_libs/zmq/utils/sixcerpt.py @@ -0,0 +1,52 @@ +"""Excerpts of six.py""" + +# Copyright (C) 2010-2014 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import sys + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") diff --git a/scripts/external_libs/zmq/utils/strtypes.py b/scripts/external_libs/zmq/utils/strtypes.py new file mode 100644 index 00000000..548410dc --- /dev/null +++ b/scripts/external_libs/zmq/utils/strtypes.py @@ -0,0 +1,45 @@ +"""Declare basic string types unambiguously for various Python versions. + +Authors +------- +* MinRK +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import sys + +if sys.version_info[0] >= 3: + bytes = bytes + unicode = str + basestring = (bytes, unicode) +else: + unicode = unicode + bytes = str + basestring = basestring + +def cast_bytes(s, encoding='utf8', errors='strict'): + """cast unicode or bytes to bytes""" + if isinstance(s, bytes): + return s + elif isinstance(s, unicode): + return s.encode(encoding, errors) + else: + raise TypeError("Expected unicode or bytes, got %r" % s) + +def cast_unicode(s, encoding='utf8', errors='strict'): + """cast bytes or unicode to unicode""" + if isinstance(s, bytes): + return s.decode(encoding, errors) + elif isinstance(s, unicode): + return s + else: + raise TypeError("Expected unicode or bytes, got %r" % s) + +# give short 'b' alias for cast_bytes, so that we can use fake b('stuff') +# to simulate b'stuff' +b = asbytes = cast_bytes +u = cast_unicode + +__all__ = ['asbytes', 'bytes', 'unicode', 'basestring', 'b', 'u', 'cast_bytes', 'cast_unicode'] diff --git a/scripts/external_libs/zmq/utils/win32.py b/scripts/external_libs/zmq/utils/win32.py new file mode 100644 index 00000000..ea758299 --- /dev/null +++ b/scripts/external_libs/zmq/utils/win32.py @@ -0,0 +1,132 @@ +"""Win32 compatibility utilities.""" + +#----------------------------------------------------------------------------- +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. +#----------------------------------------------------------------------------- + +import os + +# No-op implementation for other platforms. +class _allow_interrupt(object): + """Utility for fixing CTRL-C events on Windows. + + On Windows, the Python interpreter intercepts CTRL-C events in order to + translate them into ``KeyboardInterrupt`` exceptions. It (presumably) + does this by setting a flag in its "control control handler" and + checking it later at a convenient location in the interpreter. + + However, when the Python interpreter is blocked waiting for the ZMQ + poll operation to complete, it must wait for ZMQ's ``select()`` + operation to complete before translating the CTRL-C event into the + ``KeyboardInterrupt`` exception. + + The only way to fix this seems to be to add our own "console control + handler" and perform some application-defined operation that will + unblock the ZMQ polling operation in order to force ZMQ to pass control + back to the Python interpreter. + + This context manager performs all that Windows-y stuff, providing you + with a hook that is called when a CTRL-C event is intercepted. This + hook allows you to unblock your ZMQ poll operation immediately, which + will then result in the expected ``KeyboardInterrupt`` exception. + + Without this context manager, your ZMQ-based application will not + respond normally to CTRL-C events on Windows. If a CTRL-C event occurs + while blocked on ZMQ socket polling, the translation to a + ``KeyboardInterrupt`` exception will be delayed until the I/O completes + and control returns to the Python interpreter (this may never happen if + you use an infinite timeout). + + A no-op implementation is provided on non-Win32 systems to avoid the + application from having to conditionally use it. + + Example usage: + + .. sourcecode:: python + + def stop_my_application(): + # ... + + with allow_interrupt(stop_my_application): + # main polling loop. + + In a typical ZMQ application, you would use the "self pipe trick" to + send message to a ``PAIR`` socket in order to interrupt your blocking + socket polling operation. + + In a Tornado event loop, you can use the ``IOLoop.stop`` method to + unblock your I/O loop. + """ + + def __init__(self, action=None): + """Translate ``action`` into a CTRL-C handler. + + ``action`` is a callable that takes no arguments and returns no + value (returned value is ignored). It must *NEVER* raise an + exception. + + If unspecified, a no-op will be used. + """ + self._init_action(action) + + def _init_action(self, action): + pass + + def __enter__(self): + return self + + def __exit__(self, *args): + return + +if os.name == 'nt': + from ctypes import WINFUNCTYPE, windll + from ctypes.wintypes import BOOL, DWORD + + kernel32 = windll.LoadLibrary('kernel32') + + # <http://msdn.microsoft.com/en-us/library/ms686016.aspx> + PHANDLER_ROUTINE = WINFUNCTYPE(BOOL, DWORD) + SetConsoleCtrlHandler = kernel32.SetConsoleCtrlHandler + SetConsoleCtrlHandler.argtypes = (PHANDLER_ROUTINE, BOOL) + SetConsoleCtrlHandler.restype = BOOL + + class allow_interrupt(_allow_interrupt): + __doc__ = _allow_interrupt.__doc__ + + def _init_action(self, action): + if action is None: + action = lambda: None + self.action = action + @PHANDLER_ROUTINE + def handle(event): + if event == 0: # CTRL_C_EVENT + action() + # Typical C implementations would return 1 to indicate that + # the event was processed and other control handlers in the + # stack should not be executed. However, that would + # prevent the Python interpreter's handler from translating + # CTRL-C to a `KeyboardInterrupt` exception, so we pretend + # that we didn't handle it. + return 0 + self.handle = handle + + def __enter__(self): + """Install the custom CTRL-C handler.""" + result = SetConsoleCtrlHandler(self.handle, 1) + if result == 0: + # Have standard library automatically call `GetLastError()` and + # `FormatMessage()` into a nice exception object :-) + raise WindowsError() + + def __exit__(self, *args): + """Remove the custom CTRL-C handler.""" + result = SetConsoleCtrlHandler(self.handle, 0) + if result == 0: + # Have standard library automatically call `GetLastError()` and + # `FormatMessage()` into a nice exception object :-) + raise WindowsError() +else: + class allow_interrupt(_allow_interrupt): + __doc__ = _allow_interrupt.__doc__ + pass diff --git a/scripts/external_libs/zmq/utils/z85.py b/scripts/external_libs/zmq/utils/z85.py new file mode 100644 index 00000000..1bb1784e --- /dev/null +++ b/scripts/external_libs/zmq/utils/z85.py @@ -0,0 +1,56 @@ +"""Python implementation of Z85 85-bit encoding + +Z85 encoding is a plaintext encoding for a bytestring interpreted as 32bit integers. +Since the chunks are 32bit, a bytestring must be a multiple of 4 bytes. +See ZMQ RFC 32 for details. + + +""" + +# Copyright (C) PyZMQ Developers +# Distributed under the terms of the Modified BSD License. + +import sys +import struct + +PY3 = sys.version_info[0] >= 3 +# Z85CHARS is the base 85 symbol table +Z85CHARS = b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.-:+=^!/*?&<>()[]{}@%$#" +# Z85MAP maps integers in [0,84] to the appropriate character in Z85CHARS +Z85MAP = dict([(c, idx) for idx, c in enumerate(Z85CHARS)]) + +_85s = [ 85**i for i in range(5) ][::-1] + +def encode(rawbytes): + """encode raw bytes into Z85""" + # Accepts only byte arrays bounded to 4 bytes + if len(rawbytes) % 4: + raise ValueError("length must be multiple of 4, not %i" % len(rawbytes)) + + nvalues = len(rawbytes) / 4 + + values = struct.unpack('>%dI' % nvalues, rawbytes) + encoded = [] + for v in values: + for offset in _85s: + encoded.append(Z85CHARS[(v // offset) % 85]) + + # In Python 3, encoded is a list of integers (obviously?!) + if PY3: + return bytes(encoded) + else: + return b''.join(encoded) + +def decode(z85bytes): + """decode Z85 bytes to raw bytes""" + if len(z85bytes) % 5: + raise ValueError("Z85 length must be multiple of 5, not %i" % len(z85bytes)) + + nvalues = len(z85bytes) / 5 + values = [] + for i in range(0, len(z85bytes), 5): + value = 0 + for j, offset in enumerate(_85s): + value += Z85MAP[z85bytes[i+j]] * offset + values.append(value) + return struct.pack('>%dI' % nvalues, *values) diff --git a/scripts/external_libs/zmq/utils/zmq_compat.h b/scripts/external_libs/zmq/utils/zmq_compat.h new file mode 100644 index 00000000..81c57b69 --- /dev/null +++ b/scripts/external_libs/zmq/utils/zmq_compat.h @@ -0,0 +1,80 @@ +//----------------------------------------------------------------------------- +// Copyright (c) 2010 Brian Granger, Min Ragan-Kelley +// +// Distributed under the terms of the New BSD License. The full license is in +// the file COPYING.BSD, distributed as part of this software. +//----------------------------------------------------------------------------- + +#if defined(_MSC_VER) +#define pyzmq_int64_t __int64 +#else +#include <stdint.h> +#define pyzmq_int64_t int64_t +#endif + + +#include "zmq.h" +// version compatibility for constants: +#include "zmq_constants.h" + +#define _missing (-1) + + +// define fd type (from libzmq's fd.hpp) +#ifdef _WIN32 + #ifdef _MSC_VER && _MSC_VER <= 1400 + #define ZMQ_FD_T UINT_PTR + #else + #define ZMQ_FD_T SOCKET + #endif +#else + #define ZMQ_FD_T int +#endif + +// use unambiguous aliases for zmq_send/recv functions + +#if ZMQ_VERSION_MAJOR >= 4 +// nothing to remove +#else + #define zmq_curve_keypair(z85_public_key, z85_secret_key) _missing +#endif + +#if ZMQ_VERSION_MAJOR >= 4 && ZMQ_VERSION_MINOR >= 1 +// nothing to remove +#else + #define zmq_msg_gets(msg, prop) _missing + #define zmq_has(capability) _missing +#endif + +#if ZMQ_VERSION_MAJOR >= 3 + #define zmq_sendbuf zmq_send + #define zmq_recvbuf zmq_recv + + // 3.x deprecations - these symbols haven't been removed, + // but let's protect against their planned removal + #define zmq_device(device_type, isocket, osocket) _missing + #define zmq_init(io_threads) ((void*)NULL) + #define zmq_term zmq_ctx_destroy +#else + #define zmq_ctx_set(ctx, opt, val) _missing + #define zmq_ctx_get(ctx, opt) _missing + #define zmq_ctx_destroy zmq_term + #define zmq_ctx_new() ((void*)NULL) + + #define zmq_proxy(a,b,c) _missing + + #define zmq_disconnect(s, addr) _missing + #define zmq_unbind(s, addr) _missing + + #define zmq_msg_more(msg) _missing + #define zmq_msg_get(msg, opt) _missing + #define zmq_msg_set(msg, opt, val) _missing + #define zmq_msg_send(msg, s, flags) zmq_send(s, msg, flags) + #define zmq_msg_recv(msg, s, flags) zmq_recv(s, msg, flags) + + #define zmq_sendbuf(s, buf, len, flags) _missing + #define zmq_recvbuf(s, buf, len, flags) _missing + + #define zmq_socket_monitor(s, addr, flags) _missing + +#endif diff --git a/scripts/external_libs/zmq/utils/zmq_constants.h b/scripts/external_libs/zmq/utils/zmq_constants.h new file mode 100644 index 00000000..97683022 --- /dev/null +++ b/scripts/external_libs/zmq/utils/zmq_constants.h @@ -0,0 +1,622 @@ +#ifndef _PYZMQ_CONSTANT_DEFS +#define _PYZMQ_CONSTANT_DEFS + +#define _PYZMQ_UNDEFINED (-9999) +#ifndef ZMQ_VERSION + #define ZMQ_VERSION (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_VERSION_MAJOR + #define ZMQ_VERSION_MAJOR (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_VERSION_MINOR + #define ZMQ_VERSION_MINOR (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_VERSION_PATCH + #define ZMQ_VERSION_PATCH (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_NOBLOCK + #define ZMQ_NOBLOCK (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_DONTWAIT + #define ZMQ_DONTWAIT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_POLLIN + #define ZMQ_POLLIN (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_POLLOUT + #define ZMQ_POLLOUT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_POLLERR + #define ZMQ_POLLERR (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SNDMORE + #define ZMQ_SNDMORE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_STREAMER + #define ZMQ_STREAMER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_FORWARDER + #define ZMQ_FORWARDER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_QUEUE + #define ZMQ_QUEUE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_IO_THREADS_DFLT + #define ZMQ_IO_THREADS_DFLT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_MAX_SOCKETS_DFLT + #define ZMQ_MAX_SOCKETS_DFLT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_POLLITEMS_DFLT + #define ZMQ_POLLITEMS_DFLT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_THREAD_PRIORITY_DFLT + #define ZMQ_THREAD_PRIORITY_DFLT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_THREAD_SCHED_POLICY_DFLT + #define ZMQ_THREAD_SCHED_POLICY_DFLT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_PAIR + #define ZMQ_PAIR (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_PUB + #define ZMQ_PUB (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SUB + #define ZMQ_SUB (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_REQ + #define ZMQ_REQ (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_REP + #define ZMQ_REP (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_DEALER + #define ZMQ_DEALER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_ROUTER + #define ZMQ_ROUTER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_XREQ + #define ZMQ_XREQ (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_XREP + #define ZMQ_XREP (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_PULL + #define ZMQ_PULL (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_PUSH + #define ZMQ_PUSH (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_XPUB + #define ZMQ_XPUB (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_XSUB + #define ZMQ_XSUB (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_UPSTREAM + #define ZMQ_UPSTREAM (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_DOWNSTREAM + #define ZMQ_DOWNSTREAM (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_STREAM + #define ZMQ_STREAM (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_CONNECTED + #define ZMQ_EVENT_CONNECTED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_CONNECT_DELAYED + #define ZMQ_EVENT_CONNECT_DELAYED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_CONNECT_RETRIED + #define ZMQ_EVENT_CONNECT_RETRIED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_LISTENING + #define ZMQ_EVENT_LISTENING (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_BIND_FAILED + #define ZMQ_EVENT_BIND_FAILED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_ACCEPTED + #define ZMQ_EVENT_ACCEPTED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_ACCEPT_FAILED + #define ZMQ_EVENT_ACCEPT_FAILED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_CLOSED + #define ZMQ_EVENT_CLOSED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_CLOSE_FAILED + #define ZMQ_EVENT_CLOSE_FAILED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_DISCONNECTED + #define ZMQ_EVENT_DISCONNECTED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_ALL + #define ZMQ_EVENT_ALL (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENT_MONITOR_STOPPED + #define ZMQ_EVENT_MONITOR_STOPPED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_NULL + #define ZMQ_NULL (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_PLAIN + #define ZMQ_PLAIN (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_CURVE + #define ZMQ_CURVE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_GSSAPI + #define ZMQ_GSSAPI (_PYZMQ_UNDEFINED) +#endif + +#ifndef EAGAIN + #define EAGAIN (_PYZMQ_UNDEFINED) +#endif + +#ifndef EINVAL + #define EINVAL (_PYZMQ_UNDEFINED) +#endif + +#ifndef EFAULT + #define EFAULT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ENOMEM + #define ENOMEM (_PYZMQ_UNDEFINED) +#endif + +#ifndef ENODEV + #define ENODEV (_PYZMQ_UNDEFINED) +#endif + +#ifndef EMSGSIZE + #define EMSGSIZE (_PYZMQ_UNDEFINED) +#endif + +#ifndef EAFNOSUPPORT + #define EAFNOSUPPORT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ENETUNREACH + #define ENETUNREACH (_PYZMQ_UNDEFINED) +#endif + +#ifndef ECONNABORTED + #define ECONNABORTED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ECONNRESET + #define ECONNRESET (_PYZMQ_UNDEFINED) +#endif + +#ifndef ENOTCONN + #define ENOTCONN (_PYZMQ_UNDEFINED) +#endif + +#ifndef ETIMEDOUT + #define ETIMEDOUT (_PYZMQ_UNDEFINED) +#endif + +#ifndef EHOSTUNREACH + #define EHOSTUNREACH (_PYZMQ_UNDEFINED) +#endif + +#ifndef ENETRESET + #define ENETRESET (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_HAUSNUMERO + #define ZMQ_HAUSNUMERO (_PYZMQ_UNDEFINED) +#endif + +#ifndef ENOTSUP + #define ENOTSUP (_PYZMQ_UNDEFINED) +#endif + +#ifndef EPROTONOSUPPORT + #define EPROTONOSUPPORT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ENOBUFS + #define ENOBUFS (_PYZMQ_UNDEFINED) +#endif + +#ifndef ENETDOWN + #define ENETDOWN (_PYZMQ_UNDEFINED) +#endif + +#ifndef EADDRINUSE + #define EADDRINUSE (_PYZMQ_UNDEFINED) +#endif + +#ifndef EADDRNOTAVAIL + #define EADDRNOTAVAIL (_PYZMQ_UNDEFINED) +#endif + +#ifndef ECONNREFUSED + #define ECONNREFUSED (_PYZMQ_UNDEFINED) +#endif + +#ifndef EINPROGRESS + #define EINPROGRESS (_PYZMQ_UNDEFINED) +#endif + +#ifndef ENOTSOCK + #define ENOTSOCK (_PYZMQ_UNDEFINED) +#endif + +#ifndef EFSM + #define EFSM (_PYZMQ_UNDEFINED) +#endif + +#ifndef ENOCOMPATPROTO + #define ENOCOMPATPROTO (_PYZMQ_UNDEFINED) +#endif + +#ifndef ETERM + #define ETERM (_PYZMQ_UNDEFINED) +#endif + +#ifndef EMTHREAD + #define EMTHREAD (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_IO_THREADS + #define ZMQ_IO_THREADS (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_MAX_SOCKETS + #define ZMQ_MAX_SOCKETS (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SOCKET_LIMIT + #define ZMQ_SOCKET_LIMIT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_THREAD_PRIORITY + #define ZMQ_THREAD_PRIORITY (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_THREAD_SCHED_POLICY + #define ZMQ_THREAD_SCHED_POLICY (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_IDENTITY + #define ZMQ_IDENTITY (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SUBSCRIBE + #define ZMQ_SUBSCRIBE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_UNSUBSCRIBE + #define ZMQ_UNSUBSCRIBE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_LAST_ENDPOINT + #define ZMQ_LAST_ENDPOINT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_TCP_ACCEPT_FILTER + #define ZMQ_TCP_ACCEPT_FILTER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_PLAIN_USERNAME + #define ZMQ_PLAIN_USERNAME (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_PLAIN_PASSWORD + #define ZMQ_PLAIN_PASSWORD (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_CURVE_PUBLICKEY + #define ZMQ_CURVE_PUBLICKEY (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_CURVE_SECRETKEY + #define ZMQ_CURVE_SECRETKEY (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_CURVE_SERVERKEY + #define ZMQ_CURVE_SERVERKEY (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_ZAP_DOMAIN + #define ZMQ_ZAP_DOMAIN (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_CONNECT_RID + #define ZMQ_CONNECT_RID (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_GSSAPI_PRINCIPAL + #define ZMQ_GSSAPI_PRINCIPAL (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_GSSAPI_SERVICE_PRINCIPAL + #define ZMQ_GSSAPI_SERVICE_PRINCIPAL (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SOCKS_PROXY + #define ZMQ_SOCKS_PROXY (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_FD + #define ZMQ_FD (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_IDENTITY_FD + #define ZMQ_IDENTITY_FD (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_RECONNECT_IVL_MAX + #define ZMQ_RECONNECT_IVL_MAX (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SNDTIMEO + #define ZMQ_SNDTIMEO (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_RCVTIMEO + #define ZMQ_RCVTIMEO (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SNDHWM + #define ZMQ_SNDHWM (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_RCVHWM + #define ZMQ_RCVHWM (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_MULTICAST_HOPS + #define ZMQ_MULTICAST_HOPS (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_IPV4ONLY + #define ZMQ_IPV4ONLY (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_ROUTER_BEHAVIOR + #define ZMQ_ROUTER_BEHAVIOR (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_TCP_KEEPALIVE + #define ZMQ_TCP_KEEPALIVE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_TCP_KEEPALIVE_CNT + #define ZMQ_TCP_KEEPALIVE_CNT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_TCP_KEEPALIVE_IDLE + #define ZMQ_TCP_KEEPALIVE_IDLE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_TCP_KEEPALIVE_INTVL + #define ZMQ_TCP_KEEPALIVE_INTVL (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_DELAY_ATTACH_ON_CONNECT + #define ZMQ_DELAY_ATTACH_ON_CONNECT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_XPUB_VERBOSE + #define ZMQ_XPUB_VERBOSE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_EVENTS + #define ZMQ_EVENTS (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_TYPE + #define ZMQ_TYPE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_LINGER + #define ZMQ_LINGER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_RECONNECT_IVL + #define ZMQ_RECONNECT_IVL (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_BACKLOG + #define ZMQ_BACKLOG (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_ROUTER_MANDATORY + #define ZMQ_ROUTER_MANDATORY (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_FAIL_UNROUTABLE + #define ZMQ_FAIL_UNROUTABLE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_ROUTER_RAW + #define ZMQ_ROUTER_RAW (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_IMMEDIATE + #define ZMQ_IMMEDIATE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_IPV6 + #define ZMQ_IPV6 (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_MECHANISM + #define ZMQ_MECHANISM (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_PLAIN_SERVER + #define ZMQ_PLAIN_SERVER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_CURVE_SERVER + #define ZMQ_CURVE_SERVER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_PROBE_ROUTER + #define ZMQ_PROBE_ROUTER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_REQ_RELAXED + #define ZMQ_REQ_RELAXED (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_REQ_CORRELATE + #define ZMQ_REQ_CORRELATE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_CONFLATE + #define ZMQ_CONFLATE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_ROUTER_HANDOVER + #define ZMQ_ROUTER_HANDOVER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_TOS + #define ZMQ_TOS (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_IPC_FILTER_PID + #define ZMQ_IPC_FILTER_PID (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_IPC_FILTER_UID + #define ZMQ_IPC_FILTER_UID (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_IPC_FILTER_GID + #define ZMQ_IPC_FILTER_GID (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_GSSAPI_SERVER + #define ZMQ_GSSAPI_SERVER (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_GSSAPI_PLAINTEXT + #define ZMQ_GSSAPI_PLAINTEXT (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_HANDSHAKE_IVL + #define ZMQ_HANDSHAKE_IVL (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_XPUB_NODROP + #define ZMQ_XPUB_NODROP (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_AFFINITY + #define ZMQ_AFFINITY (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_MAXMSGSIZE + #define ZMQ_MAXMSGSIZE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_HWM + #define ZMQ_HWM (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SWAP + #define ZMQ_SWAP (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_MCAST_LOOP + #define ZMQ_MCAST_LOOP (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_RECOVERY_IVL_MSEC + #define ZMQ_RECOVERY_IVL_MSEC (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_RATE + #define ZMQ_RATE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_RECOVERY_IVL + #define ZMQ_RECOVERY_IVL (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SNDBUF + #define ZMQ_SNDBUF (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_RCVBUF + #define ZMQ_RCVBUF (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_RCVMORE + #define ZMQ_RCVMORE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_MORE + #define ZMQ_MORE (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SRCFD + #define ZMQ_SRCFD (_PYZMQ_UNDEFINED) +#endif + +#ifndef ZMQ_SHARED + #define ZMQ_SHARED (_PYZMQ_UNDEFINED) +#endif + + +#endif // ifndef _PYZMQ_CONSTANT_DEFS diff --git a/scripts/libzmq.so.3 b/scripts/libzmq.so.3 Binary files differindex 16980c27..16980c27 100755..100644 --- a/scripts/libzmq.so.3 +++ b/scripts/libzmq.so.3 diff --git a/scripts/libzmq.so.3.1.0 b/scripts/libzmq.so.3.1.0 Binary files differindex 16980c27..16980c27 100755..100644 --- a/scripts/libzmq.so.3.1.0 +++ b/scripts/libzmq.so.3.1.0 diff --git a/scripts/trex-console b/scripts/trex-console index 50e097e7..6eab77dd 100755 --- a/scripts/trex-console +++ b/scripts/trex-console @@ -1,2 +1,2 @@ #!/bin/bash -../src/console/trex_console.py $@ +../scripts/automation/trex_control_plane/console/trex_console.py $@ |