summaryrefslogtreecommitdiffstats
path: root/testing/mochitest/pywebsocket/mod_pywebsocket
diff options
context:
space:
mode:
Diffstat (limited to 'testing/mochitest/pywebsocket/mod_pywebsocket')
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/__init__.py224
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/_stream_base.py181
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/_stream_hixie75.py229
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/_stream_hybi.py894
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/common.py301
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/dispatch.py393
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/extensions.py764
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/fast_masking.i98
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/handshake/__init__.py110
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/handshake/_base.py182
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/handshake/hybi.py428
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/handshake/hybi00.py293
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/headerparserhandler.py254
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/http_header_util.py263
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/memorizingfile.py99
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/msgutil.py219
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/mux.py1889
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/stream.py57
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/util.py416
-rw-r--r--testing/mochitest/pywebsocket/mod_pywebsocket/xhr_benchmark_handler.py109
20 files changed, 7403 insertions, 0 deletions
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/__init__.py b/testing/mochitest/pywebsocket/mod_pywebsocket/__init__.py
new file mode 100644
index 000000000..70933a220
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/__init__.py
@@ -0,0 +1,224 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket extension for Apache HTTP Server.
+
+mod_pywebsocket is a WebSocket extension for Apache HTTP Server
+intended for testing or experimental purposes. mod_python is required.
+
+
+Installation
+============
+
+0. Prepare an Apache HTTP Server for which mod_python is enabled.
+
+1. Specify the following Apache HTTP Server directives to suit your
+ configuration.
+
+ If mod_pywebsocket is not in the Python path, specify the following.
+ <websock_lib> is the directory where mod_pywebsocket is installed.
+
+ PythonPath "sys.path+['<websock_lib>']"
+
+ Always specify the following. <websock_handlers> is the directory where
+ user-written WebSocket handlers are placed.
+
+ PythonOption mod_pywebsocket.handler_root <websock_handlers>
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+
+ To limit the search for WebSocket handlers to a directory <scan_dir>
+ under <websock_handlers>, configure as follows:
+
+ PythonOption mod_pywebsocket.handler_scan <scan_dir>
+
+ <scan_dir> is useful in saving scan time when <websock_handlers>
+ contains many non-WebSocket handler files.
+
+ If you want to allow handlers whose canonical path is not under the root
+ directory (i.e. symbolic link is in root directory but its target is not),
+ configure as follows:
+
+ PythonOption mod_pywebsocket.allow_handlers_outside_root_dir On
+
+ Example snippet of httpd.conf:
+ (mod_pywebsocket is in /websock_lib, WebSocket handlers are in
+ /websock_handlers, port is 80 for ws, 443 for wss.)
+
+ <IfModule python_module>
+ PythonPath "sys.path+['/websock_lib']"
+ PythonOption mod_pywebsocket.handler_root /websock_handlers
+ PythonHeaderParserHandler mod_pywebsocket.headerparserhandler
+ </IfModule>
+
+2. Tune Apache parameters for serving WebSocket. We'd like to note that at
+ least TimeOut directive from core features and RequestReadTimeout
+ directive from mod_reqtimeout should be modified not to kill connections
+ in only a few seconds of idle time.
+
+3. Verify installation. You can use example/console.html to poke the server.
+
+
+Writing WebSocket handlers
+==========================
+
+When a WebSocket request comes in, the resource name
+specified in the handshake is considered as if it is a file path under
+<websock_handlers> and the handler defined in
+<websock_handlers>/<resource_name>_wsh.py is invoked.
+
+For example, if the resource name is /example/chat, the handler defined in
+<websock_handlers>/example/chat_wsh.py is invoked.
+
+A WebSocket handler is composed of the following three functions:
+
+ web_socket_do_extra_handshake(request)
+ web_socket_transfer_data(request)
+ web_socket_passive_closing_handshake(request)
+
+where:
+ request: mod_python request.
+
+web_socket_do_extra_handshake is called during the handshake after the
+headers are successfully parsed and WebSocket properties (ws_location,
+ws_origin, and ws_resource) are added to request. A handler
+can reject the request by raising an exception.
+
+A request object has the following properties that you can use during the
+extra handshake (web_socket_do_extra_handshake):
+- ws_resource
+- ws_origin
+- ws_version
+- ws_location (HyBi 00 only)
+- ws_extensions (HyBi 06 and later)
+- ws_deflate (HyBi 06 and later)
+- ws_protocol
+- ws_requested_protocols (HyBi 06 and later)
+
+The last two are a bit tricky. See the next subsection.
+
+
+Subprotocol Negotiation
+-----------------------
+
+For HyBi 06 and later, ws_protocol is always set to None when
+web_socket_do_extra_handshake is called. If ws_requested_protocols is not
+None, you must choose one subprotocol from this list and set it to
+ws_protocol.
+
+For HyBi 00, when web_socket_do_extra_handshake is called,
+ws_protocol is set to the value given by the client in
+Sec-WebSocket-Protocol header or None if
+such header was not found in the opening handshake request. Finish extra
+handshake with ws_protocol untouched to accept the request subprotocol.
+Then, Sec-WebSocket-Protocol header will be sent to
+the client in response with the same value as requested. Raise an exception
+in web_socket_do_extra_handshake to reject the requested subprotocol.
+
+
+Data Transfer
+-------------
+
+web_socket_transfer_data is called after the handshake completed
+successfully. A handler can receive/send messages from/to the client
+using request. mod_pywebsocket.msgutil module provides utilities
+for data transfer.
+
+You can receive a message by the following statement.
+
+ message = request.ws_stream.receive_message()
+
+This call blocks until any complete text frame arrives, and the payload data
+of the incoming frame will be stored into message. When you're using IETF
+HyBi 00 or later protocol, receive_message() will return None on receiving
+client-initiated closing handshake. When any error occurs, receive_message()
+will raise some exception.
+
+You can send a message by the following statement.
+
+ request.ws_stream.send_message(message)
+
+
+Closing Connection
+------------------
+
+Executing the following statement or just return-ing from
+web_socket_transfer_data cause connection close.
+
+ request.ws_stream.close_connection()
+
+close_connection will wait
+for closing handshake acknowledgement coming from the client. When it
+couldn't receive a valid acknowledgement, raises an exception.
+
+web_socket_passive_closing_handshake is called after the server receives
+incoming closing frame from the client peer immediately. You can specify
+code and reason by return values. They are sent as a outgoing closing frame
+from the server. A request object has the following properties that you can
+use in web_socket_passive_closing_handshake.
+- ws_close_code
+- ws_close_reason
+
+
+Threading
+---------
+
+A WebSocket handler must be thread-safe if the server (Apache or
+standalone.py) is configured to use threads.
+
+
+Configuring WebSocket Extension Processors
+------------------------------------------
+
+See extensions.py for supported WebSocket extensions. Note that they are
+unstable and their APIs are subject to change substantially.
+
+A request object has these extension processing related attributes.
+
+- ws_requested_extensions:
+
+ A list of common.ExtensionParameter instances representing extension
+ parameters received from the client in the client's opening handshake.
+ You shouldn't modify it manually.
+
+- ws_extensions:
+
+ A list of common.ExtensionParameter instances representing extension
+ parameters to send back to the client in the server's opening handshake.
+ You shouldn't touch it directly. Instead, call methods on extension
+ processors.
+
+- ws_extension_processors:
+
+ A list of loaded extension processors. Find the processor for the
+ extension you want to configure from it, and call its methods.
+"""
+
+
+# vi:sts=4 sw=4 et tw=72
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/_stream_base.py b/testing/mochitest/pywebsocket/mod_pywebsocket/_stream_base.py
new file mode 100644
index 000000000..8235666bb
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/_stream_base.py
@@ -0,0 +1,181 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Base stream class.
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+import socket
+
+from mod_pywebsocket import util
+
+
+# Exceptions
+
+
+class ConnectionTerminatedException(Exception):
+ """This exception will be raised when a connection is terminated
+ unexpectedly.
+ """
+
+ pass
+
+
+class InvalidFrameException(ConnectionTerminatedException):
+ """This exception will be raised when we received an invalid frame we
+ cannot parse.
+ """
+
+ pass
+
+
+class BadOperationException(Exception):
+ """This exception will be raised when send_message() is called on
+ server-terminated connection or receive_message() is called on
+ client-terminated connection.
+ """
+
+ pass
+
+
+class UnsupportedFrameException(Exception):
+ """This exception will be raised when we receive a frame with flag, opcode
+ we cannot handle. Handlers can just catch and ignore this exception and
+ call receive_message() again to continue processing the next frame.
+ """
+
+ pass
+
+
+class InvalidUTF8Exception(Exception):
+ """This exception will be raised when we receive a text frame which
+ contains invalid UTF-8 strings.
+ """
+
+ pass
+
+
+class StreamBase(object):
+ """Base stream class."""
+
+ def __init__(self, request):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+
+ def _read(self, length):
+ """Reads length bytes from connection. In case we catch any exception,
+ prepends remote address to the exception message and raise again.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ try:
+ read_bytes = self._request.connection.read(length)
+ if not read_bytes:
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. Peer (%r) closed connection' %
+ (length, (self._request.connection.remote_addr,)))
+ return read_bytes
+ except socket.error, e:
+ # Catch a socket.error. Because it's not a child class of the
+ # IOError prior to Python 2.6, we cannot omit this except clause.
+ # Use %s rather than %r for the exception to use human friendly
+ # format.
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. socket.error (%s) occurred' %
+ (length, e))
+ except IOError, e:
+ # Also catch an IOError because mod_python throws it.
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. IOError (%s) occurred' %
+ (length, e))
+
+ def _write(self, bytes_to_write):
+ """Writes given bytes to connection. In case we catch any exception,
+ prepends remote address to the exception message and raise again.
+ """
+
+ try:
+ self._request.connection.write(bytes_to_write)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to send message to %r: ' %
+ (self._request.connection.remote_addr,),
+ e)
+ raise
+
+ def receive_bytes(self, length):
+ """Receives multiple bytes. Retries read when we couldn't receive the
+ specified amount.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ read_bytes = []
+ while length > 0:
+ new_read_bytes = self._read(length)
+ read_bytes.append(new_read_bytes)
+ length -= len(new_read_bytes)
+ return ''.join(read_bytes)
+
+ def _read_until(self, delim_char):
+ """Reads bytes until we encounter delim_char. The result will not
+ contain delim_char.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ read_bytes = []
+ while True:
+ ch = self._read(1)
+ if ch == delim_char:
+ break
+ read_bytes.append(ch)
+ return ''.join(read_bytes)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/_stream_hixie75.py b/testing/mochitest/pywebsocket/mod_pywebsocket/_stream_hixie75.py
new file mode 100644
index 000000000..94cf5b31b
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/_stream_hixie75.py
@@ -0,0 +1,229 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides a class for parsing/building frames of the WebSocket
+protocol version HyBi 00 and Hixie 75.
+
+Specification:
+- HyBi 00 http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+- Hixie 75 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-75
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket import util
+
+
+class StreamHixie75(StreamBase):
+ """A class for parsing/building frames of the WebSocket protocol version
+ HyBi 00 and Hixie 75.
+ """
+
+ def __init__(self, request, enable_closing_handshake=False):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ enable_closing_handshake: to let StreamHixie75 perform closing
+ handshake as specified in HyBi 00, set
+ this option to True.
+ """
+
+ StreamBase.__init__(self, request)
+
+ self._logger = util.get_class_logger(self)
+
+ self._enable_closing_handshake = enable_closing_handshake
+
+ self._request.client_terminated = False
+ self._request.server_terminated = False
+
+ def send_message(self, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ message: unicode string to send.
+ binary: not used in hixie75.
+
+ Raises:
+ BadOperationException: when called on a server-terminated
+ connection.
+ """
+
+ if not end:
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_message with end=False')
+
+ if binary:
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_message with binary=True')
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ self._write(''.join(['\x00', message.encode('utf-8'), '\xff']))
+
+ def _read_payload_length_hixie75(self):
+ """Reads a length header in a Hixie75 version frame with length.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty string.
+ """
+
+ length = 0
+ while True:
+ b_str = self._read(1)
+ b = ord(b_str)
+ length = length * 128 + (b & 0x7f)
+ if (b & 0x80) == 0:
+ break
+ return length
+
+ def receive_message(self):
+ """Receive a WebSocket frame and return its payload an unicode string.
+
+ Returns:
+ payload unicode string in a WebSocket frame.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty
+ string.
+ BadOperationException: when called on a client-terminated
+ connection.
+ """
+
+ if self._request.client_terminated:
+ raise BadOperationException(
+ 'Requested receive_message after receiving a closing '
+ 'handshake')
+
+ while True:
+ # Read 1 byte.
+ # mp_conn.read will block if no bytes are available.
+ # Timeout is controlled by TimeOut directive of Apache.
+ frame_type_str = self.receive_bytes(1)
+ frame_type = ord(frame_type_str)
+ if (frame_type & 0x80) == 0x80:
+ # The payload length is specified in the frame.
+ # Read and discard.
+ length = self._read_payload_length_hixie75()
+ if length > 0:
+ _ = self.receive_bytes(length)
+ # 5.3 3. 12. if /type/ is 0xFF and /length/ is 0, then set the
+ # /client terminated/ flag and abort these steps.
+ if not self._enable_closing_handshake:
+ continue
+
+ if frame_type == 0xFF and length == 0:
+ self._request.client_terminated = True
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Received ack for server-initiated closing '
+ 'handshake')
+ return None
+
+ self._logger.debug(
+ 'Received client-initiated closing handshake')
+
+ self._send_closing_handshake()
+ self._logger.debug(
+ 'Sent ack for client-initiated closing handshake')
+ return None
+ else:
+ # The payload is delimited with \xff.
+ bytes = self._read_until('\xff')
+ # The WebSocket protocol section 4.4 specifies that invalid
+ # characters must be replaced with U+fffd REPLACEMENT
+ # CHARACTER.
+ message = bytes.decode('utf-8', 'replace')
+ if frame_type == 0x00:
+ return message
+ # Discard data of other types.
+
+ def _send_closing_handshake(self):
+ if not self._enable_closing_handshake:
+ raise BadOperationException(
+ 'Closing handshake is not supported in Hixie 75 protocol')
+
+ self._request.server_terminated = True
+
+ # 5.3 the server may decide to terminate the WebSocket connection by
+ # running through the following steps:
+ # 1. send a 0xFF byte and a 0x00 byte to the client to indicate the
+ # start of the closing handshake.
+ self._write('\xff\x00')
+
+ def close_connection(self, unused_code='', unused_reason=''):
+ """Closes a WebSocket connection.
+
+ Raises:
+ ConnectionTerminatedException: when closing handshake was
+ not successfull.
+ """
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Requested close_connection but server is already terminated')
+ return
+
+ if not self._enable_closing_handshake:
+ self._request.server_terminated = True
+ self._logger.debug('Connection closed')
+ return
+
+ self._send_closing_handshake()
+ self._logger.debug('Sent server-initiated closing handshake')
+
+ # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+ # or until a server-defined timeout expires.
+ #
+ # For now, we expect receiving closing handshake right after sending
+ # out closing handshake, and if we couldn't receive non-handshake
+ # frame, we take it as ConnectionTerminatedException.
+ message = self.receive_message()
+ if message is not None:
+ raise ConnectionTerminatedException(
+ 'Didn\'t receive valid ack for closing handshake')
+ # TODO: 3. close the WebSocket connection.
+ # note: mod_python Connection (mp_conn) doesn't have close method.
+
+ def send_ping(self, body):
+ raise BadOperationException(
+ 'StreamHixie75 doesn\'t support send_ping')
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/_stream_hybi.py b/testing/mochitest/pywebsocket/mod_pywebsocket/_stream_hybi.py
new file mode 100644
index 000000000..104221b4c
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/_stream_hybi.py
@@ -0,0 +1,894 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for parsing/building frames
+of the WebSocket protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+from collections import deque
+import logging
+import os
+import struct
+import time
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import StreamBase
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+_NOOP_MASKER = util.NoopMasker()
+
+
+class Frame(object):
+
+ def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0,
+ opcode=None, payload=''):
+ self.fin = fin
+ self.rsv1 = rsv1
+ self.rsv2 = rsv2
+ self.rsv3 = rsv3
+ self.opcode = opcode
+ self.payload = payload
+
+
+# Helper functions made public to be used for writing unittests for WebSocket
+# clients.
+
+
+def create_length_header(length, mask):
+ """Creates a length header.
+
+ Args:
+ length: Frame length. Must be less than 2^63.
+ mask: Mask bit. Must be boolean.
+
+ Raises:
+ ValueError: when bad data is given.
+ """
+
+ if mask:
+ mask_bit = 1 << 7
+ else:
+ mask_bit = 0
+
+ if length < 0:
+ raise ValueError('length must be non negative integer')
+ elif length <= 125:
+ return chr(mask_bit | length)
+ elif length < (1 << 16):
+ return chr(mask_bit | 126) + struct.pack('!H', length)
+ elif length < (1 << 63):
+ return chr(mask_bit | 127) + struct.pack('!Q', length)
+ else:
+ raise ValueError('Payload is too big for one frame')
+
+
+def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask):
+ """Creates a frame header.
+
+ Raises:
+ Exception: when bad data is given.
+ """
+
+ if opcode < 0 or 0xf < opcode:
+ raise ValueError('Opcode out of range')
+
+ if payload_length < 0 or (1 << 63) <= payload_length:
+ raise ValueError('payload_length out of range')
+
+ if (fin | rsv1 | rsv2 | rsv3) & ~1:
+ raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1')
+
+ header = ''
+
+ first_byte = ((fin << 7)
+ | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4)
+ | opcode)
+ header += chr(first_byte)
+ header += create_length_header(payload_length, mask)
+
+ return header
+
+
+def _build_frame(header, body, mask):
+ if not mask:
+ return header + body
+
+ masking_nonce = os.urandom(4)
+ masker = util.RepeatedXorMasker(masking_nonce)
+
+ return header + masking_nonce + masker.mask(body)
+
+
+def _filter_and_format_frame_object(frame, mask, frame_filters):
+ for frame_filter in frame_filters:
+ frame_filter.filter(frame)
+
+ header = create_header(
+ frame.opcode, len(frame.payload), frame.fin,
+ frame.rsv1, frame.rsv2, frame.rsv3, mask)
+ return _build_frame(header, frame.payload, mask)
+
+
+def create_binary_frame(
+ message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]):
+ """Creates a simple binary frame with no extension, reserved bit."""
+
+ frame = Frame(fin=fin, opcode=opcode, payload=message)
+ return _filter_and_format_frame_object(frame, mask, frame_filters)
+
+
+def create_text_frame(
+ message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]):
+ """Creates a simple text frame with no extension, reserved bit."""
+
+ encoded_message = message.encode('utf-8')
+ return create_binary_frame(encoded_message, opcode, fin, mask,
+ frame_filters)
+
+
+def parse_frame(receive_bytes, logger=None,
+ ws_version=common.VERSION_HYBI_LATEST,
+ unmask_receive=True):
+ """Parses a frame. Returns a tuple containing each header field and
+ payload.
+
+ Args:
+ receive_bytes: a function that reads frame data from a stream or
+ something similar. The function takes length of the bytes to be
+ read. The function must raise ConnectionTerminatedException if
+ there is not enough data to be read.
+ logger: a logging object.
+ ws_version: the version of WebSocket protocol.
+ unmask_receive: unmask received frames. When received unmasked
+ frame, raises InvalidFrameException.
+
+ Raises:
+ ConnectionTerminatedException: when receive_bytes raises it.
+ InvalidFrameException: when the frame contains invalid data.
+ """
+
+ if not logger:
+ logger = logging.getLogger()
+
+ logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame')
+
+ received = receive_bytes(2)
+
+ first_byte = ord(received[0])
+ fin = (first_byte >> 7) & 1
+ rsv1 = (first_byte >> 6) & 1
+ rsv2 = (first_byte >> 5) & 1
+ rsv3 = (first_byte >> 4) & 1
+ opcode = first_byte & 0xf
+
+ second_byte = ord(received[1])
+ mask = (second_byte >> 7) & 1
+ payload_length = second_byte & 0x7f
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, '
+ 'Mask=%s, Payload_length=%s',
+ fin, rsv1, rsv2, rsv3, opcode, mask, payload_length)
+
+ if (mask == 1) != unmask_receive:
+ raise InvalidFrameException(
+ 'Mask bit on the received frame did\'nt match masking '
+ 'configuration for received frames')
+
+ # The HyBi and later specs disallow putting a value in 0x0-0xFFFF
+ # into the 8-octet extended payload length field (or 0x0-0xFD in
+ # 2-octet field).
+ valid_length_encoding = True
+ length_encoding_bytes = 1
+ if payload_length == 127:
+ logger.log(common.LOGLEVEL_FINE,
+ 'Receive 8-octet extended payload length')
+
+ extended_payload_length = receive_bytes(8)
+ payload_length = struct.unpack(
+ '!Q', extended_payload_length)[0]
+ if payload_length > 0x7FFFFFFFFFFFFFFF:
+ raise InvalidFrameException(
+ 'Extended payload length >= 2^63')
+ if ws_version >= 13 and payload_length < 0x10000:
+ valid_length_encoding = False
+ length_encoding_bytes = 8
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'Decoded_payload_length=%s', payload_length)
+ elif payload_length == 126:
+ logger.log(common.LOGLEVEL_FINE,
+ 'Receive 2-octet extended payload length')
+
+ extended_payload_length = receive_bytes(2)
+ payload_length = struct.unpack(
+ '!H', extended_payload_length)[0]
+ if ws_version >= 13 and payload_length < 126:
+ valid_length_encoding = False
+ length_encoding_bytes = 2
+
+ logger.log(common.LOGLEVEL_FINE,
+ 'Decoded_payload_length=%s', payload_length)
+
+ if not valid_length_encoding:
+ logger.warning(
+ 'Payload length is not encoded using the minimal number of '
+ 'bytes (%d is encoded using %d bytes)',
+ payload_length,
+ length_encoding_bytes)
+
+ if mask == 1:
+ logger.log(common.LOGLEVEL_FINE, 'Receive mask')
+
+ masking_nonce = receive_bytes(4)
+ masker = util.RepeatedXorMasker(masking_nonce)
+
+ logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce)
+ else:
+ masker = _NOOP_MASKER
+
+ logger.log(common.LOGLEVEL_FINE, 'Receive payload data')
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ receive_start = time.time()
+
+ raw_payload_bytes = receive_bytes(payload_length)
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ logger.log(
+ common.LOGLEVEL_FINE,
+ 'Done receiving payload data at %s MB/s',
+ payload_length / (time.time() - receive_start) / 1000 / 1000)
+ logger.log(common.LOGLEVEL_FINE, 'Unmask payload data')
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ unmask_start = time.time()
+
+ unmasked_bytes = masker.mask(raw_payload_bytes)
+
+ if logger.isEnabledFor(common.LOGLEVEL_FINE):
+ logger.log(
+ common.LOGLEVEL_FINE,
+ 'Done unmasking payload data at %s MB/s',
+ payload_length / (time.time() - unmask_start) / 1000 / 1000)
+
+ return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3
+
+
+class FragmentedFrameBuilder(object):
+ """A stateful class to send a message as fragments."""
+
+ def __init__(self, mask, frame_filters=[], encode_utf8=True):
+ """Constructs an instance."""
+
+ self._mask = mask
+ self._frame_filters = frame_filters
+ # This is for skipping UTF-8 encoding when building text type frames
+ # from compressed data.
+ self._encode_utf8 = encode_utf8
+
+ self._started = False
+
+ # Hold opcode of the first frame in messages to verify types of other
+ # frames in the message are all the same.
+ self._opcode = common.OPCODE_TEXT
+
+ def build(self, payload_data, end, binary):
+ if binary:
+ frame_type = common.OPCODE_BINARY
+ else:
+ frame_type = common.OPCODE_TEXT
+ if self._started:
+ if self._opcode != frame_type:
+ raise ValueError('Message types are different in frames for '
+ 'the same message')
+ opcode = common.OPCODE_CONTINUATION
+ else:
+ opcode = frame_type
+ self._opcode = frame_type
+
+ if end:
+ self._started = False
+ fin = 1
+ else:
+ self._started = True
+ fin = 0
+
+ if binary or not self._encode_utf8:
+ return create_binary_frame(
+ payload_data, opcode, fin, self._mask, self._frame_filters)
+ else:
+ return create_text_frame(
+ payload_data, opcode, fin, self._mask, self._frame_filters)
+
+
+def _create_control_frame(opcode, body, mask, frame_filters):
+ frame = Frame(opcode=opcode, payload=body)
+
+ for frame_filter in frame_filters:
+ frame_filter.filter(frame)
+
+ if len(frame.payload) > 125:
+ raise BadOperationException(
+ 'Payload data size of control frames must be 125 bytes or less')
+
+ header = create_header(
+ frame.opcode, len(frame.payload), frame.fin,
+ frame.rsv1, frame.rsv2, frame.rsv3, mask)
+ return _build_frame(header, frame.payload, mask)
+
+
+def create_ping_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters)
+
+
+def create_pong_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters)
+
+
+def create_close_frame(body, mask=False, frame_filters=[]):
+ return _create_control_frame(
+ common.OPCODE_CLOSE, body, mask, frame_filters)
+
+
+def create_closing_handshake_body(code, reason):
+ body = ''
+ if code is not None:
+ if (code > common.STATUS_USER_PRIVATE_MAX or
+ code < common.STATUS_NORMAL_CLOSURE):
+ raise BadOperationException('Status code is out of range')
+ if (code == common.STATUS_NO_STATUS_RECEIVED or
+ code == common.STATUS_ABNORMAL_CLOSURE or
+ code == common.STATUS_TLS_HANDSHAKE):
+ raise BadOperationException('Status code is reserved pseudo '
+ 'code')
+ encoded_reason = reason.encode('utf-8')
+ body = struct.pack('!H', code) + encoded_reason
+ return body
+
+
+class StreamOptions(object):
+ """Holds option values to configure Stream objects."""
+
+ def __init__(self):
+ """Constructs StreamOptions."""
+
+ # Filters applied to frames.
+ self.outgoing_frame_filters = []
+ self.incoming_frame_filters = []
+
+ # Filters applied to messages. Control frames are not affected by them.
+ self.outgoing_message_filters = []
+ self.incoming_message_filters = []
+
+ self.encode_text_message_to_utf8 = True
+ self.mask_send = False
+ self.unmask_receive = True
+
+
+class Stream(StreamBase):
+ """A class for parsing/building frames of the WebSocket protocol
+ (RFC 6455).
+ """
+
+ def __init__(self, request, options):
+ """Constructs an instance.
+
+ Args:
+ request: mod_python request.
+ """
+
+ StreamBase.__init__(self, request)
+
+ self._logger = util.get_class_logger(self)
+
+ self._options = options
+
+ self._request.client_terminated = False
+ self._request.server_terminated = False
+
+ # Holds body of received fragments.
+ self._received_fragments = []
+ # Holds the opcode of the first fragment.
+ self._original_opcode = None
+
+ self._writer = FragmentedFrameBuilder(
+ self._options.mask_send, self._options.outgoing_frame_filters,
+ self._options.encode_text_message_to_utf8)
+
+ self._ping_queue = deque()
+
+ def _receive_frame(self):
+ """Receives a frame and return data in the frame as a tuple containing
+ each header field and payload separately.
+
+ Raises:
+ ConnectionTerminatedException: when read returns empty
+ string.
+ InvalidFrameException: when the frame contains invalid data.
+ """
+
+ def _receive_bytes(length):
+ return self.receive_bytes(length)
+
+ return parse_frame(receive_bytes=_receive_bytes,
+ logger=self._logger,
+ ws_version=self._request.ws_version,
+ unmask_receive=self._options.unmask_receive)
+
+ def _receive_frame_as_frame_object(self):
+ opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
+
+ return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
+ opcode=opcode, payload=unmasked_bytes)
+
+ def receive_filtered_frame(self):
+ """Receives a frame and applies frame filters and message filters.
+ The frame to be received must satisfy following conditions:
+ - The frame is not fragmented.
+ - The opcode of the frame is TEXT or BINARY.
+
+ DO NOT USE this method except for testing purpose.
+ """
+
+ frame = self._receive_frame_as_frame_object()
+ if not frame.fin:
+ raise InvalidFrameException(
+ 'Segmented frames must not be received via '
+ 'receive_filtered_frame()')
+ if (frame.opcode != common.OPCODE_TEXT and
+ frame.opcode != common.OPCODE_BINARY):
+ raise InvalidFrameException(
+ 'Control frames must not be received via '
+ 'receive_filtered_frame()')
+
+ for frame_filter in self._options.incoming_frame_filters:
+ frame_filter.filter(frame)
+ for message_filter in self._options.incoming_message_filters:
+ frame.payload = message_filter.filter(frame.payload)
+ return frame
+
+ def send_message(self, message, end=True, binary=False):
+ """Send message.
+
+ Args:
+ message: text in unicode or binary in str to send.
+ binary: send message as binary frame.
+
+ Raises:
+ BadOperationException: when called on a server-terminated
+ connection or called with inconsistent message type or
+ binary parameter.
+ """
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ if binary and isinstance(message, unicode):
+ raise BadOperationException(
+ 'Message for binary frame must be instance of str')
+
+ for message_filter in self._options.outgoing_message_filters:
+ message = message_filter.filter(message, end, binary)
+
+ try:
+ # Set this to any positive integer to limit maximum size of data in
+ # payload data of each frame.
+ MAX_PAYLOAD_DATA_SIZE = -1
+
+ if MAX_PAYLOAD_DATA_SIZE <= 0:
+ self._write(self._writer.build(message, end, binary))
+ return
+
+ bytes_written = 0
+ while True:
+ end_for_this_frame = end
+ bytes_to_write = len(message) - bytes_written
+ if (MAX_PAYLOAD_DATA_SIZE > 0 and
+ bytes_to_write > MAX_PAYLOAD_DATA_SIZE):
+ end_for_this_frame = False
+ bytes_to_write = MAX_PAYLOAD_DATA_SIZE
+
+ frame = self._writer.build(
+ message[bytes_written:bytes_written + bytes_to_write],
+ end_for_this_frame,
+ binary)
+ self._write(frame)
+
+ bytes_written += bytes_to_write
+
+ # This if must be placed here (the end of while block) so that
+ # at least one frame is sent.
+ if len(message) <= bytes_written:
+ break
+ except ValueError, e:
+ raise BadOperationException(e)
+
+ def _get_message_from_frame(self, frame):
+ """Gets a message from frame. If the message is composed of fragmented
+ frames and the frame is not the last fragmented frame, this method
+ returns None. The whole message will be returned when the last
+ fragmented frame is passed to this method.
+
+ Raises:
+ InvalidFrameException: when the frame doesn't match defragmentation
+ context, or the frame contains invalid data.
+ """
+
+ if frame.opcode == common.OPCODE_CONTINUATION:
+ if not self._received_fragments:
+ if frame.fin:
+ raise InvalidFrameException(
+ 'Received a termination frame but fragmentation '
+ 'not started')
+ else:
+ raise InvalidFrameException(
+ 'Received an intermediate frame but '
+ 'fragmentation not started')
+
+ if frame.fin:
+ # End of fragmentation frame
+ self._received_fragments.append(frame.payload)
+ message = ''.join(self._received_fragments)
+ self._received_fragments = []
+ return message
+ else:
+ # Intermediate frame
+ self._received_fragments.append(frame.payload)
+ return None
+ else:
+ if self._received_fragments:
+ if frame.fin:
+ raise InvalidFrameException(
+ 'Received an unfragmented frame without '
+ 'terminating existing fragmentation')
+ else:
+ raise InvalidFrameException(
+ 'New fragmentation started without terminating '
+ 'existing fragmentation')
+
+ if frame.fin:
+ # Unfragmented frame
+
+ self._original_opcode = frame.opcode
+ return frame.payload
+ else:
+ # Start of fragmentation frame
+
+ if common.is_control_opcode(frame.opcode):
+ raise InvalidFrameException(
+ 'Control frames must not be fragmented')
+
+ self._original_opcode = frame.opcode
+ self._received_fragments.append(frame.payload)
+ return None
+
+ def _process_close_message(self, message):
+ """Processes close message.
+
+ Args:
+ message: close message.
+
+ Raises:
+ InvalidFrameException: when the message is invalid.
+ """
+
+ self._request.client_terminated = True
+
+ # Status code is optional. We can have status reason only if we
+ # have status code. Status reason can be empty string. So,
+ # allowed cases are
+ # - no application data: no code no reason
+ # - 2 octet of application data: has code but no reason
+ # - 3 or more octet of application data: both code and reason
+ if len(message) == 0:
+ self._logger.debug('Received close frame (empty body)')
+ self._request.ws_close_code = (
+ common.STATUS_NO_STATUS_RECEIVED)
+ elif len(message) == 1:
+ raise InvalidFrameException(
+ 'If a close frame has status code, the length of '
+ 'status code must be 2 octet')
+ elif len(message) >= 2:
+ self._request.ws_close_code = struct.unpack(
+ '!H', message[0:2])[0]
+ self._request.ws_close_reason = message[2:].decode(
+ 'utf-8', 'replace')
+ self._logger.debug(
+ 'Received close frame (code=%d, reason=%r)',
+ self._request.ws_close_code,
+ self._request.ws_close_reason)
+
+ # As we've received a close frame, no more data is coming over the
+ # socket. We can now safely close the socket without worrying about
+ # RST sending.
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Received ack for server-initiated closing handshake')
+ return
+
+ self._logger.debug(
+ 'Received client-initiated closing handshake')
+
+ code = common.STATUS_NORMAL_CLOSURE
+ reason = ''
+ if hasattr(self._request, '_dispatcher'):
+ dispatcher = self._request._dispatcher
+ code, reason = dispatcher.passive_closing_handshake(
+ self._request)
+ if code is None and reason is not None and len(reason) > 0:
+ self._logger.warning(
+ 'Handler specified reason despite code being None')
+ reason = ''
+ if reason is None:
+ reason = ''
+ self._send_closing_handshake(code, reason)
+ self._logger.debug(
+ 'Acknowledged closing handshake initiated by the peer '
+ '(code=%r, reason=%r)', code, reason)
+
+ def _process_ping_message(self, message):
+ """Processes ping message.
+
+ Args:
+ message: ping message.
+ """
+
+ try:
+ handler = self._request.on_ping_handler
+ if handler:
+ handler(self._request, message)
+ return
+ except AttributeError, e:
+ pass
+ self._send_pong(message)
+
+ def _process_pong_message(self, message):
+ """Processes pong message.
+
+ Args:
+ message: pong message.
+ """
+
+ # TODO(tyoshino): Add ping timeout handling.
+
+ inflight_pings = deque()
+
+ while True:
+ try:
+ expected_body = self._ping_queue.popleft()
+ if expected_body == message:
+ # inflight_pings contains pings ignored by the
+ # other peer. Just forget them.
+ self._logger.debug(
+ 'Ping %r is acked (%d pings were ignored)',
+ expected_body, len(inflight_pings))
+ break
+ else:
+ inflight_pings.append(expected_body)
+ except IndexError, e:
+ # The received pong was unsolicited pong. Keep the
+ # ping queue as is.
+ self._ping_queue = inflight_pings
+ self._logger.debug('Received a unsolicited pong')
+ break
+
+ try:
+ handler = self._request.on_pong_handler
+ if handler:
+ handler(self._request, message)
+ except AttributeError, e:
+ pass
+
+ def receive_message(self):
+ """Receive a WebSocket frame and return its payload as a text in
+ unicode or a binary in str.
+
+ Returns:
+ payload data of the frame
+ - as unicode instance if received text frame
+ - as str instance if received binary frame
+ or None iff received closing handshake.
+ Raises:
+ BadOperationException: when called on a client-terminated
+ connection.
+ ConnectionTerminatedException: when read returns empty
+ string.
+ InvalidFrameException: when the frame contains invalid
+ data.
+ UnsupportedFrameException: when the received frame has
+ flags, opcode we cannot handle. You can ignore this
+ exception and continue receiving the next frame.
+ """
+
+ if self._request.client_terminated:
+ raise BadOperationException(
+ 'Requested receive_message after receiving a closing '
+ 'handshake')
+
+ while True:
+ # mp_conn.read will block if no bytes are available.
+ # Timeout is controlled by TimeOut directive of Apache.
+
+ frame = self._receive_frame_as_frame_object()
+
+ # Check the constraint on the payload size for control frames
+ # before extension processes the frame.
+ # See also http://tools.ietf.org/html/rfc6455#section-5.5
+ if (common.is_control_opcode(frame.opcode) and
+ len(frame.payload) > 125):
+ raise InvalidFrameException(
+ 'Payload data size of control frames must be 125 bytes or '
+ 'less')
+
+ for frame_filter in self._options.incoming_frame_filters:
+ frame_filter.filter(frame)
+
+ if frame.rsv1 or frame.rsv2 or frame.rsv3:
+ raise UnsupportedFrameException(
+ 'Unsupported flag is set (rsv = %d%d%d)' %
+ (frame.rsv1, frame.rsv2, frame.rsv3))
+
+ message = self._get_message_from_frame(frame)
+ if message is None:
+ continue
+
+ for message_filter in self._options.incoming_message_filters:
+ message = message_filter.filter(message)
+
+ if self._original_opcode == common.OPCODE_TEXT:
+ # The WebSocket protocol section 4.4 specifies that invalid
+ # characters must be replaced with U+fffd REPLACEMENT
+ # CHARACTER.
+ try:
+ return message.decode('utf-8')
+ except UnicodeDecodeError, e:
+ raise InvalidUTF8Exception(e)
+ elif self._original_opcode == common.OPCODE_BINARY:
+ return message
+ elif self._original_opcode == common.OPCODE_CLOSE:
+ self._process_close_message(message)
+ return None
+ elif self._original_opcode == common.OPCODE_PING:
+ self._process_ping_message(message)
+ elif self._original_opcode == common.OPCODE_PONG:
+ self._process_pong_message(message)
+ else:
+ raise UnsupportedFrameException(
+ 'Opcode %d is not supported' % self._original_opcode)
+
+ def _send_closing_handshake(self, code, reason):
+ body = create_closing_handshake_body(code, reason)
+ frame = create_close_frame(
+ body, mask=self._options.mask_send,
+ frame_filters=self._options.outgoing_frame_filters)
+
+ self._request.server_terminated = True
+
+ self._write(frame)
+
+ def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason='',
+ wait_response=True):
+ """Closes a WebSocket connection. Note that this method blocks until
+ it receives acknowledgement to the closing handshake.
+
+ Args:
+ code: Status code for close frame. If code is None, a close
+ frame with empty body will be sent.
+ reason: string representing close reason.
+ wait_response: True when caller want to wait the response.
+ Raises:
+ BadOperationException: when reason is specified with code None
+ or reason is not an instance of both str and unicode.
+ """
+
+ if self._request.server_terminated:
+ self._logger.debug(
+ 'Requested close_connection but server is already terminated')
+ return
+
+ # When we receive a close frame, we call _process_close_message().
+ # _process_close_message() immediately acknowledges to the
+ # server-initiated closing handshake and sets server_terminated to
+ # True. So, here we can assume that we haven't received any close
+ # frame. We're initiating a closing handshake.
+
+ if code is None:
+ if reason is not None and len(reason) > 0:
+ raise BadOperationException(
+ 'close reason must not be specified if code is None')
+ reason = ''
+ else:
+ if not isinstance(reason, str) and not isinstance(reason, unicode):
+ raise BadOperationException(
+ 'close reason must be an instance of str or unicode')
+
+ self._send_closing_handshake(code, reason)
+ self._logger.debug(
+ 'Initiated closing handshake (code=%r, reason=%r)',
+ code, reason)
+
+ if (code == common.STATUS_GOING_AWAY or
+ code == common.STATUS_PROTOCOL_ERROR) or not wait_response:
+ # It doesn't make sense to wait for a close frame if the reason is
+ # protocol error or that the server is going away. For some of
+ # other reasons, it might not make sense to wait for a close frame,
+ # but it's not clear, yet.
+ return
+
+ # TODO(ukai): 2. wait until the /client terminated/ flag has been set,
+ # or until a server-defined timeout expires.
+ #
+ # For now, we expect receiving closing handshake right after sending
+ # out closing handshake.
+ message = self.receive_message()
+ if message is not None:
+ raise ConnectionTerminatedException(
+ 'Didn\'t receive valid ack for closing handshake')
+ # TODO: 3. close the WebSocket connection.
+ # note: mod_python Connection (mp_conn) doesn't have close method.
+
+ def send_ping(self, body=''):
+ frame = create_ping_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+ self._write(frame)
+
+ self._ping_queue.append(body)
+
+ def _send_pong(self, body):
+ frame = create_pong_frame(
+ body,
+ self._options.mask_send,
+ self._options.outgoing_frame_filters)
+ self._write(frame)
+
+ def get_last_received_opcode(self):
+ """Returns the opcode of the WebSocket message which the last received
+ frame belongs to. The return value is valid iff immediately after
+ receive_message call.
+ """
+
+ return self._original_opcode
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/common.py b/testing/mochitest/pywebsocket/mod_pywebsocket/common.py
new file mode 100644
index 000000000..8c1524284
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/common.py
@@ -0,0 +1,301 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file must not depend on any module specific to the WebSocket protocol.
+"""
+
+
+from mod_pywebsocket import http_header_util
+
+
+# Additional log level definitions.
+LOGLEVEL_FINE = 9
+
+# Constants indicating WebSocket protocol version.
+VERSION_HIXIE75 = -1
+VERSION_HYBI00 = 0
+VERSION_HYBI01 = 1
+VERSION_HYBI02 = 2
+VERSION_HYBI03 = 2
+VERSION_HYBI04 = 4
+VERSION_HYBI05 = 5
+VERSION_HYBI06 = 6
+VERSION_HYBI07 = 7
+VERSION_HYBI08 = 8
+VERSION_HYBI09 = 8
+VERSION_HYBI10 = 8
+VERSION_HYBI11 = 8
+VERSION_HYBI12 = 8
+VERSION_HYBI13 = 13
+VERSION_HYBI14 = 13
+VERSION_HYBI15 = 13
+VERSION_HYBI16 = 13
+VERSION_HYBI17 = 13
+
+# Constants indicating WebSocket protocol latest version.
+VERSION_HYBI_LATEST = VERSION_HYBI13
+
+# Port numbers
+DEFAULT_WEB_SOCKET_PORT = 80
+DEFAULT_WEB_SOCKET_SECURE_PORT = 443
+
+# Schemes
+WEB_SOCKET_SCHEME = 'ws'
+WEB_SOCKET_SECURE_SCHEME = 'wss'
+
+# Frame opcodes defined in the spec.
+OPCODE_CONTINUATION = 0x0
+OPCODE_TEXT = 0x1
+OPCODE_BINARY = 0x2
+OPCODE_CLOSE = 0x8
+OPCODE_PING = 0x9
+OPCODE_PONG = 0xa
+
+# UUIDs used by HyBi 04 and later opening handshake and frame masking.
+WEBSOCKET_ACCEPT_UUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+
+# Opening handshake header names and expected values.
+UPGRADE_HEADER = 'Upgrade'
+WEBSOCKET_UPGRADE_TYPE = 'websocket'
+WEBSOCKET_UPGRADE_TYPE_HIXIE75 = 'WebSocket'
+CONNECTION_HEADER = 'Connection'
+UPGRADE_CONNECTION_TYPE = 'Upgrade'
+HOST_HEADER = 'Host'
+ORIGIN_HEADER = 'Origin'
+SEC_WEBSOCKET_ORIGIN_HEADER = 'Sec-WebSocket-Origin'
+SEC_WEBSOCKET_KEY_HEADER = 'Sec-WebSocket-Key'
+SEC_WEBSOCKET_ACCEPT_HEADER = 'Sec-WebSocket-Accept'
+SEC_WEBSOCKET_VERSION_HEADER = 'Sec-WebSocket-Version'
+SEC_WEBSOCKET_PROTOCOL_HEADER = 'Sec-WebSocket-Protocol'
+SEC_WEBSOCKET_EXTENSIONS_HEADER = 'Sec-WebSocket-Extensions'
+SEC_WEBSOCKET_DRAFT_HEADER = 'Sec-WebSocket-Draft'
+SEC_WEBSOCKET_KEY1_HEADER = 'Sec-WebSocket-Key1'
+SEC_WEBSOCKET_KEY2_HEADER = 'Sec-WebSocket-Key2'
+SEC_WEBSOCKET_LOCATION_HEADER = 'Sec-WebSocket-Location'
+
+# Extensions
+DEFLATE_FRAME_EXTENSION = 'deflate-frame'
+PERMESSAGE_DEFLATE_EXTENSION = 'permessage-deflate'
+X_WEBKIT_DEFLATE_FRAME_EXTENSION = 'x-webkit-deflate-frame'
+MUX_EXTENSION = 'mux_DO_NOT_USE'
+
+# Status codes
+# Code STATUS_NO_STATUS_RECEIVED, STATUS_ABNORMAL_CLOSURE, and
+# STATUS_TLS_HANDSHAKE are pseudo codes to indicate specific error cases.
+# Could not be used for codes in actual closing frames.
+# Application level errors must use codes in the range
+# STATUS_USER_REGISTERED_BASE to STATUS_USER_PRIVATE_MAX. The codes in the
+# range STATUS_USER_REGISTERED_BASE to STATUS_USER_REGISTERED_MAX are managed
+# by IANA. Usually application must define user protocol level errors in the
+# range STATUS_USER_PRIVATE_BASE to STATUS_USER_PRIVATE_MAX.
+STATUS_NORMAL_CLOSURE = 1000
+STATUS_GOING_AWAY = 1001
+STATUS_PROTOCOL_ERROR = 1002
+STATUS_UNSUPPORTED_DATA = 1003
+STATUS_NO_STATUS_RECEIVED = 1005
+STATUS_ABNORMAL_CLOSURE = 1006
+STATUS_INVALID_FRAME_PAYLOAD_DATA = 1007
+STATUS_POLICY_VIOLATION = 1008
+STATUS_MESSAGE_TOO_BIG = 1009
+STATUS_MANDATORY_EXTENSION = 1010
+STATUS_INTERNAL_ENDPOINT_ERROR = 1011
+STATUS_TLS_HANDSHAKE = 1015
+STATUS_USER_REGISTERED_BASE = 3000
+STATUS_USER_REGISTERED_MAX = 3999
+STATUS_USER_PRIVATE_BASE = 4000
+STATUS_USER_PRIVATE_MAX = 4999
+# Following definitions are aliases to keep compatibility. Applications must
+# not use these obsoleted definitions anymore.
+STATUS_NORMAL = STATUS_NORMAL_CLOSURE
+STATUS_UNSUPPORTED = STATUS_UNSUPPORTED_DATA
+STATUS_CODE_NOT_AVAILABLE = STATUS_NO_STATUS_RECEIVED
+STATUS_ABNORMAL_CLOSE = STATUS_ABNORMAL_CLOSURE
+STATUS_INVALID_FRAME_PAYLOAD = STATUS_INVALID_FRAME_PAYLOAD_DATA
+STATUS_MANDATORY_EXT = STATUS_MANDATORY_EXTENSION
+
+# HTTP status codes
+HTTP_STATUS_BAD_REQUEST = 400
+HTTP_STATUS_FORBIDDEN = 403
+HTTP_STATUS_NOT_FOUND = 404
+
+
+def is_control_opcode(opcode):
+ return (opcode >> 3) == 1
+
+
+class ExtensionParameter(object):
+ """Holds information about an extension which is exchanged on extension
+ negotiation in opening handshake.
+ """
+
+ def __init__(self, name):
+ self._name = name
+ # TODO(tyoshino): Change the data structure to more efficient one such
+ # as dict when the spec changes to say like
+ # - Parameter names must be unique
+ # - The order of parameters is not significant
+ self._parameters = []
+
+ def name(self):
+ return self._name
+
+ def add_parameter(self, name, value):
+ self._parameters.append((name, value))
+
+ def get_parameters(self):
+ return self._parameters
+
+ def get_parameter_names(self):
+ return [name for name, unused_value in self._parameters]
+
+ def has_parameter(self, name):
+ for param_name, param_value in self._parameters:
+ if param_name == name:
+ return True
+ return False
+
+ def get_parameter_value(self, name):
+ for param_name, param_value in self._parameters:
+ if param_name == name:
+ return param_value
+
+
+class ExtensionParsingException(Exception):
+ def __init__(self, name):
+ super(ExtensionParsingException, self).__init__(name)
+
+
+def _parse_extension_param(state, definition):
+ param_name = http_header_util.consume_token(state)
+
+ if param_name is None:
+ raise ExtensionParsingException('No valid parameter name found')
+
+ http_header_util.consume_lwses(state)
+
+ if not http_header_util.consume_string(state, '='):
+ definition.add_parameter(param_name, None)
+ return
+
+ http_header_util.consume_lwses(state)
+
+ # TODO(tyoshino): Add code to validate that parsed param_value is token
+ param_value = http_header_util.consume_token_or_quoted_string(state)
+ if param_value is None:
+ raise ExtensionParsingException(
+ 'No valid parameter value found on the right-hand side of '
+ 'parameter %r' % param_name)
+
+ definition.add_parameter(param_name, param_value)
+
+
+def _parse_extension(state):
+ extension_token = http_header_util.consume_token(state)
+ if extension_token is None:
+ return None
+
+ extension = ExtensionParameter(extension_token)
+
+ while True:
+ http_header_util.consume_lwses(state)
+
+ if not http_header_util.consume_string(state, ';'):
+ break
+
+ http_header_util.consume_lwses(state)
+
+ try:
+ _parse_extension_param(state, extension)
+ except ExtensionParsingException, e:
+ raise ExtensionParsingException(
+ 'Failed to parse parameter for %r (%r)' %
+ (extension_token, e))
+
+ return extension
+
+
+def parse_extensions(data):
+ """Parses Sec-WebSocket-Extensions header value returns a list of
+ ExtensionParameter objects.
+
+ Leading LWSes must be trimmed.
+ """
+
+ state = http_header_util.ParsingState(data)
+
+ extension_list = []
+ while True:
+ extension = _parse_extension(state)
+ if extension is not None:
+ extension_list.append(extension)
+
+ http_header_util.consume_lwses(state)
+
+ if http_header_util.peek(state) is None:
+ break
+
+ if not http_header_util.consume_string(state, ','):
+ raise ExtensionParsingException(
+ 'Failed to parse Sec-WebSocket-Extensions header: '
+ 'Expected a comma but found %r' %
+ http_header_util.peek(state))
+
+ http_header_util.consume_lwses(state)
+
+ if len(extension_list) == 0:
+ raise ExtensionParsingException(
+ 'No valid extension entry found')
+
+ return extension_list
+
+
+def format_extension(extension):
+ """Formats an ExtensionParameter object."""
+
+ formatted_params = [extension.name()]
+ for param_name, param_value in extension.get_parameters():
+ if param_value is None:
+ formatted_params.append(param_name)
+ else:
+ quoted_value = http_header_util.quote_if_necessary(param_value)
+ formatted_params.append('%s=%s' % (param_name, quoted_value))
+ return '; '.join(formatted_params)
+
+
+def format_extensions(extension_list):
+ """Formats a list of ExtensionParameter objects."""
+
+ formatted_extension_list = []
+ for extension in extension_list:
+ formatted_extension_list.append(format_extension(extension))
+ return ', '.join(formatted_extension_list)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/dispatch.py b/testing/mochitest/pywebsocket/mod_pywebsocket/dispatch.py
new file mode 100644
index 000000000..96c91e0c9
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/dispatch.py
@@ -0,0 +1,393 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Dispatch WebSocket request.
+"""
+
+
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import msgutil
+from mod_pywebsocket import mux
+from mod_pywebsocket import stream
+from mod_pywebsocket import util
+
+
+_SOURCE_PATH_PATTERN = re.compile(r'(?i)_wsh\.py$')
+_SOURCE_SUFFIX = '_wsh.py'
+_DO_EXTRA_HANDSHAKE_HANDLER_NAME = 'web_socket_do_extra_handshake'
+_TRANSFER_DATA_HANDLER_NAME = 'web_socket_transfer_data'
+_PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME = (
+ 'web_socket_passive_closing_handshake')
+
+
+class DispatchException(Exception):
+ """Exception in dispatching WebSocket request."""
+
+ def __init__(self, name, status=common.HTTP_STATUS_NOT_FOUND):
+ super(DispatchException, self).__init__(name)
+ self.status = status
+
+
+def _default_passive_closing_handshake_handler(request):
+ """Default web_socket_passive_closing_handshake handler."""
+
+ return common.STATUS_NORMAL_CLOSURE, ''
+
+
+def _normalize_path(path):
+ """Normalize path.
+
+ Args:
+ path: the path to normalize.
+
+ Path is converted to the absolute path.
+ The input path can use either '\\' or '/' as the separator.
+ The normalized path always uses '/' regardless of the platform.
+ """
+
+ path = path.replace('\\', os.path.sep)
+ path = os.path.realpath(path)
+ path = path.replace('\\', '/')
+ return path
+
+
+def _create_path_to_resource_converter(base_dir):
+ """Returns a function that converts the path of a WebSocket handler source
+ file to a resource string by removing the path to the base directory from
+ its head, removing _SOURCE_SUFFIX from its tail, and replacing path
+ separators in it with '/'.
+
+ Args:
+ base_dir: the path to the base directory.
+ """
+
+ base_dir = _normalize_path(base_dir)
+
+ base_len = len(base_dir)
+ suffix_len = len(_SOURCE_SUFFIX)
+
+ def converter(path):
+ if not path.endswith(_SOURCE_SUFFIX):
+ return None
+ # _normalize_path must not be used because resolving symlink breaks
+ # following path check.
+ path = path.replace('\\', '/')
+ if not path.startswith(base_dir):
+ return None
+ return path[base_len:-suffix_len]
+
+ return converter
+
+
+def _enumerate_handler_file_paths(directory):
+ """Returns a generator that enumerates WebSocket Handler source file names
+ in the given directory.
+ """
+
+ for root, unused_dirs, files in os.walk(directory):
+ for base in files:
+ path = os.path.join(root, base)
+ if _SOURCE_PATH_PATTERN.search(path):
+ yield path
+
+
+class _HandlerSuite(object):
+ """A handler suite holder class."""
+
+ def __init__(self, do_extra_handshake, transfer_data,
+ passive_closing_handshake):
+ self.do_extra_handshake = do_extra_handshake
+ self.transfer_data = transfer_data
+ self.passive_closing_handshake = passive_closing_handshake
+
+
+def _source_handler_file(handler_definition):
+ """Source a handler definition string.
+
+ Args:
+ handler_definition: a string containing Python statements that define
+ handler functions.
+ """
+
+ global_dic = {}
+ try:
+ exec handler_definition in global_dic
+ except Exception:
+ raise DispatchException('Error in sourcing handler:' +
+ util.get_stack_trace())
+ passive_closing_handshake_handler = None
+ try:
+ passive_closing_handshake_handler = _extract_handler(
+ global_dic, _PASSIVE_CLOSING_HANDSHAKE_HANDLER_NAME)
+ except Exception:
+ passive_closing_handshake_handler = (
+ _default_passive_closing_handshake_handler)
+ return _HandlerSuite(
+ _extract_handler(global_dic, _DO_EXTRA_HANDSHAKE_HANDLER_NAME),
+ _extract_handler(global_dic, _TRANSFER_DATA_HANDLER_NAME),
+ passive_closing_handshake_handler)
+
+
+def _extract_handler(dic, name):
+ """Extracts a callable with the specified name from the given dictionary
+ dic.
+ """
+
+ if name not in dic:
+ raise DispatchException('%s is not defined.' % name)
+ handler = dic[name]
+ if not callable(handler):
+ raise DispatchException('%s is not callable.' % name)
+ return handler
+
+
+class Dispatcher(object):
+ """Dispatches WebSocket requests.
+
+ This class maintains a map from resource name to handlers.
+ """
+
+ def __init__(
+ self, root_dir, scan_dir=None,
+ allow_handlers_outside_root_dir=True):
+ """Construct an instance.
+
+ Args:
+ root_dir: The directory where handler definition files are
+ placed.
+ scan_dir: The directory where handler definition files are
+ searched. scan_dir must be a directory under root_dir,
+ including root_dir itself. If scan_dir is None,
+ root_dir is used as scan_dir. scan_dir can be useful
+ in saving scan time when root_dir contains many
+ subdirectories.
+ allow_handlers_outside_root_dir: Scans handler files even if their
+ canonical path is not under root_dir.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._handler_suite_map = {}
+ self._source_warnings = []
+ if scan_dir is None:
+ scan_dir = root_dir
+ if not os.path.realpath(scan_dir).startswith(
+ os.path.realpath(root_dir)):
+ raise DispatchException('scan_dir:%s must be a directory under '
+ 'root_dir:%s.' % (scan_dir, root_dir))
+ self._source_handler_files_in_dir(
+ root_dir, scan_dir, allow_handlers_outside_root_dir)
+
+ def add_resource_path_alias(self,
+ alias_resource_path, existing_resource_path):
+ """Add resource path alias.
+
+ Once added, request to alias_resource_path would be handled by
+ handler registered for existing_resource_path.
+
+ Args:
+ alias_resource_path: alias resource path
+ existing_resource_path: existing resource path
+ """
+ try:
+ handler_suite = self._handler_suite_map[existing_resource_path]
+ self._handler_suite_map[alias_resource_path] = handler_suite
+ except KeyError:
+ raise DispatchException('No handler for: %r' %
+ existing_resource_path)
+
+ def source_warnings(self):
+ """Return warnings in sourcing handlers."""
+
+ return self._source_warnings
+
+ def do_extra_handshake(self, request):
+ """Do extra checking in WebSocket handshake.
+
+ Select a handler based on request.uri and call its
+ web_socket_do_extra_handshake function.
+
+ Args:
+ request: mod_python request.
+
+ Raises:
+ DispatchException: when handler was not found
+ AbortedByUserException: when user handler abort connection
+ HandshakeException: when opening handshake failed
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ raise DispatchException('No handler for: %r' % request.ws_resource)
+ do_extra_handshake_ = handler_suite.do_extra_handshake
+ try:
+ do_extra_handshake_(request)
+ except handshake.AbortedByUserException, e:
+ # Re-raise to tell the caller of this function to finish this
+ # connection without sending any error.
+ self._logger.debug('%s', util.get_stack_trace())
+ raise
+ except Exception, e:
+ util.prepend_message_to_exception(
+ '%s raised exception for %s: ' % (
+ _DO_EXTRA_HANDSHAKE_HANDLER_NAME,
+ request.ws_resource),
+ e)
+ raise handshake.HandshakeException(e, common.HTTP_STATUS_FORBIDDEN)
+
+ def transfer_data(self, request):
+ """Let a handler transfer_data with a WebSocket client.
+
+ Select a handler based on request.ws_resource and call its
+ web_socket_transfer_data function.
+
+ Args:
+ request: mod_python request.
+
+ Raises:
+ DispatchException: when handler was not found
+ AbortedByUserException: when user handler abort connection
+ """
+
+ # TODO(tyoshino): Terminate underlying TCP connection if possible.
+ try:
+ if mux.use_mux(request):
+ mux.start(request, self)
+ else:
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ raise DispatchException('No handler for: %r' %
+ request.ws_resource)
+ transfer_data_ = handler_suite.transfer_data
+ transfer_data_(request)
+
+ if not request.server_terminated:
+ request.ws_stream.close_connection()
+ # Catch non-critical exceptions the handler didn't handle.
+ except handshake.AbortedByUserException, e:
+ self._logger.debug('%s', util.get_stack_trace())
+ raise
+ except msgutil.BadOperationException, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(
+ common.STATUS_INTERNAL_ENDPOINT_ERROR)
+ except msgutil.InvalidFrameException, e:
+ # InvalidFrameException must be caught before
+ # ConnectionTerminatedException that catches InvalidFrameException.
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_PROTOCOL_ERROR)
+ except msgutil.UnsupportedFrameException, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(common.STATUS_UNSUPPORTED_DATA)
+ except stream.InvalidUTF8Exception, e:
+ self._logger.debug('%s', e)
+ request.ws_stream.close_connection(
+ common.STATUS_INVALID_FRAME_PAYLOAD_DATA)
+ except msgutil.ConnectionTerminatedException, e:
+ self._logger.debug('%s', e)
+ except Exception, e:
+ # Any other exceptions are forwarded to the caller of this
+ # function.
+ util.prepend_message_to_exception(
+ '%s raised exception for %s: ' % (
+ _TRANSFER_DATA_HANDLER_NAME, request.ws_resource),
+ e)
+ raise
+
+ def passive_closing_handshake(self, request):
+ """Prepare code and reason for responding client initiated closing
+ handshake.
+ """
+
+ handler_suite = self.get_handler_suite(request.ws_resource)
+ if handler_suite is None:
+ return _default_passive_closing_handshake_handler(request)
+ return handler_suite.passive_closing_handshake(request)
+
+ def get_handler_suite(self, resource):
+ """Retrieves two handlers (one for extra handshake processing, and one
+ for data transfer) for the given request as a HandlerSuite object.
+ """
+
+ fragment = None
+ if '#' in resource:
+ resource, fragment = resource.split('#', 1)
+ if '?' in resource:
+ resource = resource.split('?', 1)[0]
+ handler_suite = self._handler_suite_map.get(resource)
+ if handler_suite and fragment:
+ raise DispatchException('Fragment identifiers MUST NOT be used on '
+ 'WebSocket URIs',
+ common.HTTP_STATUS_BAD_REQUEST)
+ return handler_suite
+
+ def _source_handler_files_in_dir(
+ self, root_dir, scan_dir, allow_handlers_outside_root_dir):
+ """Source all the handler source files in the scan_dir directory.
+
+ The resource path is determined relative to root_dir.
+ """
+
+ # We build a map from resource to handler code assuming that there's
+ # only one path from root_dir to scan_dir and it can be obtained by
+ # comparing realpath of them.
+
+ # Here we cannot use abspath. See
+ # https://bugs.webkit.org/show_bug.cgi?id=31603
+
+ convert = _create_path_to_resource_converter(root_dir)
+ scan_realpath = os.path.realpath(scan_dir)
+ root_realpath = os.path.realpath(root_dir)
+ for path in _enumerate_handler_file_paths(scan_realpath):
+ if (not allow_handlers_outside_root_dir and
+ (not os.path.realpath(path).startswith(root_realpath))):
+ self._logger.debug(
+ 'Canonical path of %s is not under root directory' %
+ path)
+ continue
+ try:
+ handler_suite = _source_handler_file(open(path).read())
+ except DispatchException, e:
+ self._source_warnings.append('%s: %s' % (path, e))
+ continue
+ resource = convert(path)
+ if resource is None:
+ self._logger.debug(
+ 'Path to resource conversion on %s failed' % path)
+ else:
+ self._handler_suite_map[convert(path)] = handler_suite
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/extensions.py b/testing/mochitest/pywebsocket/mod_pywebsocket/extensions.py
new file mode 100644
index 000000000..1edd988f6
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/extensions.py
@@ -0,0 +1,764 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import util
+from mod_pywebsocket.http_header_util import quote_if_necessary
+
+
+# The list of available server side extension processor classes.
+_available_processors = {}
+_compression_extension_names = []
+
+
+class ExtensionProcessorInterface(object):
+
+ def __init__(self, request):
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._active = True
+
+ def request(self):
+ return self._request
+
+ def name(self):
+ return None
+
+ def check_consistency_with_other_processors(self, processors):
+ pass
+
+ def set_active(self, active):
+ self._active = active
+
+ def is_active(self):
+ return self._active
+
+ def _get_extension_response_internal(self):
+ return None
+
+ def get_extension_response(self):
+ if not self._active:
+ self._logger.debug('Extension %s is deactivated', self.name())
+ return None
+
+ response = self._get_extension_response_internal()
+ if response is None:
+ self._active = False
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+ pass
+
+ def setup_stream_options(self, stream_options):
+ if self._active:
+ self._setup_stream_options_internal(stream_options)
+
+
+def _log_outgoing_compression_ratio(
+ logger, original_bytes, filtered_bytes, average_ratio):
+ # Print inf when ratio is not available.
+ ratio = float('inf')
+ if original_bytes != 0:
+ ratio = float(filtered_bytes) / original_bytes
+
+ logger.debug('Outgoing compression ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+
+def _log_incoming_compression_ratio(
+ logger, received_bytes, filtered_bytes, average_ratio):
+ # Print inf when ratio is not available.
+ ratio = float('inf')
+ if filtered_bytes != 0:
+ ratio = float(received_bytes) / filtered_bytes
+
+ logger.debug('Incoming compression ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+
+def _parse_window_bits(bits):
+ """Return parsed integer value iff the given string conforms to the
+ grammar of the window bits extension parameters.
+ """
+
+ if bits is None:
+ raise ValueError('Value is required')
+
+ # For non integer values such as "10.0", ValueError will be raised.
+ int_bits = int(bits)
+
+ # First condition is to drop leading zero case e.g. "08".
+ if bits != str(int_bits) or int_bits < 8 or int_bits > 15:
+ raise ValueError('Invalid value: %r' % bits)
+
+ return int_bits
+
+
+class _AverageRatioCalculator(object):
+ """Stores total bytes of original and result data, and calculates average
+ result / original ratio.
+ """
+
+ def __init__(self):
+ self._total_original_bytes = 0
+ self._total_result_bytes = 0
+
+ def add_original_bytes(self, value):
+ self._total_original_bytes += value
+
+ def add_result_bytes(self, value):
+ self._total_result_bytes += value
+
+ def get_average_ratio(self):
+ if self._total_original_bytes != 0:
+ return (float(self._total_result_bytes) /
+ self._total_original_bytes)
+ else:
+ return float('inf')
+
+
+class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
+ """deflate-frame extension processor.
+
+ Specification:
+ http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate
+ """
+
+ _WINDOW_BITS_PARAM = 'max_window_bits'
+ _NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
+
+ def __init__(self, request):
+ ExtensionProcessorInterface.__init__(self, request)
+ self._logger = util.get_class_logger(self)
+
+ self._response_window_bits = None
+ self._response_no_context_takeover = False
+ self._bfinal = False
+
+ # Calculates
+ # (Total outgoing bytes supplied to this filter) /
+ # (Total bytes sent to the network after applying this filter)
+ self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
+
+ # Calculates
+ # (Total bytes received from the network) /
+ # (Total incoming bytes obtained after applying this filter)
+ self._incoming_average_ratio_calculator = _AverageRatioCalculator()
+
+ def name(self):
+ return common.DEFLATE_FRAME_EXTENSION
+
+ def _get_extension_response_internal(self):
+ # Any unknown parameter will be just ignored.
+
+ window_bits = None
+ if self._request.has_parameter(self._WINDOW_BITS_PARAM):
+ window_bits = self._request.get_parameter_value(
+ self._WINDOW_BITS_PARAM)
+ try:
+ window_bits = _parse_window_bits(window_bits)
+ except ValueError, e:
+ return None
+
+ no_context_takeover = self._request.has_parameter(
+ self._NO_CONTEXT_TAKEOVER_PARAM)
+ if (no_context_takeover and
+ self._request.get_parameter_value(
+ self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
+ return None
+
+ self._rfc1979_deflater = util._RFC1979Deflater(
+ window_bits, no_context_takeover)
+
+ self._rfc1979_inflater = util._RFC1979Inflater()
+
+ self._compress_outgoing = True
+
+ response = common.ExtensionParameter(self._request.name())
+
+ if self._response_window_bits is not None:
+ response.add_parameter(
+ self._WINDOW_BITS_PARAM, str(self._response_window_bits))
+ if self._response_no_context_takeover:
+ response.add_parameter(
+ self._NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ self._logger.debug(
+ 'Enable %s extension ('
+ 'request: window_bits=%s; no_context_takeover=%r, '
+ 'response: window_wbits=%s; no_context_takeover=%r)' %
+ (self._request.name(),
+ window_bits,
+ no_context_takeover,
+ self._response_window_bits,
+ self._response_no_context_takeover))
+
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+
+ class _OutgoingFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._outgoing_filter(frame)
+
+ class _IncomingFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._incoming_filter(frame)
+
+ stream_options.outgoing_frame_filters.append(
+ _OutgoingFilter(self))
+ stream_options.incoming_frame_filters.insert(
+ 0, _IncomingFilter(self))
+
+ def set_response_window_bits(self, value):
+ self._response_window_bits = value
+
+ def set_response_no_context_takeover(self, value):
+ self._response_no_context_takeover = value
+
+ def set_bfinal(self, value):
+ self._bfinal = value
+
+ def enable_outgoing_compression(self):
+ self._compress_outgoing = True
+
+ def disable_outgoing_compression(self):
+ self._compress_outgoing = False
+
+ def _outgoing_filter(self, frame):
+ """Transform outgoing frames. This method is called only by
+ an _OutgoingFilter instance.
+ """
+
+ original_payload_size = len(frame.payload)
+ self._outgoing_average_ratio_calculator.add_original_bytes(
+ original_payload_size)
+
+ if (not self._compress_outgoing or
+ common.is_control_opcode(frame.opcode)):
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ original_payload_size)
+ return
+
+ frame.payload = self._rfc1979_deflater.filter(
+ frame.payload, bfinal=self._bfinal)
+ frame.rsv1 = 1
+
+ filtered_payload_size = len(frame.payload)
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ filtered_payload_size)
+
+ _log_outgoing_compression_ratio(
+ self._logger,
+ original_payload_size,
+ filtered_payload_size,
+ self._outgoing_average_ratio_calculator.get_average_ratio())
+
+ def _incoming_filter(self, frame):
+ """Transform incoming frames. This method is called only by
+ an _IncomingFilter instance.
+ """
+
+ received_payload_size = len(frame.payload)
+ self._incoming_average_ratio_calculator.add_result_bytes(
+ received_payload_size)
+
+ if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ received_payload_size)
+ return
+
+ frame.payload = self._rfc1979_inflater.filter(frame.payload)
+ frame.rsv1 = 0
+
+ filtered_payload_size = len(frame.payload)
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ filtered_payload_size)
+
+ _log_incoming_compression_ratio(
+ self._logger,
+ received_payload_size,
+ filtered_payload_size,
+ self._incoming_average_ratio_calculator.get_average_ratio())
+
+
+_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
+ DeflateFrameExtensionProcessor)
+_compression_extension_names.append(common.DEFLATE_FRAME_EXTENSION)
+
+_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
+ DeflateFrameExtensionProcessor)
+_compression_extension_names.append(common.X_WEBKIT_DEFLATE_FRAME_EXTENSION)
+
+
+class PerMessageDeflateExtensionProcessor(ExtensionProcessorInterface):
+ """permessage-deflate extension processor.
+
+ Specification:
+ http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-08
+ """
+
+ _SERVER_MAX_WINDOW_BITS_PARAM = 'server_max_window_bits'
+ _SERVER_NO_CONTEXT_TAKEOVER_PARAM = 'server_no_context_takeover'
+ _CLIENT_MAX_WINDOW_BITS_PARAM = 'client_max_window_bits'
+ _CLIENT_NO_CONTEXT_TAKEOVER_PARAM = 'client_no_context_takeover'
+
+ def __init__(self, request, draft08=True):
+ """Construct PerMessageDeflateExtensionProcessor
+
+ Args:
+ draft08: Follow the constraints on the parameters that were not
+ specified for permessage-compress but are specified for
+ permessage-deflate as on
+ draft-ietf-hybi-permessage-compression-08.
+ """
+
+ ExtensionProcessorInterface.__init__(self, request)
+ self._logger = util.get_class_logger(self)
+
+ self._preferred_client_max_window_bits = None
+ self._client_no_context_takeover = False
+
+ self._draft08 = draft08
+
+ def name(self):
+ # This method returns "deflate" (not "permessage-deflate") for
+ # compatibility.
+ return 'deflate'
+
+ def _get_extension_response_internal(self):
+ if self._draft08:
+ for name in self._request.get_parameter_names():
+ if name not in [self._SERVER_MAX_WINDOW_BITS_PARAM,
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
+ self._CLIENT_MAX_WINDOW_BITS_PARAM]:
+ self._logger.debug('Unknown parameter: %r', name)
+ return None
+ else:
+ # Any unknown parameter will be just ignored.
+ pass
+
+ server_max_window_bits = None
+ if self._request.has_parameter(self._SERVER_MAX_WINDOW_BITS_PARAM):
+ server_max_window_bits = self._request.get_parameter_value(
+ self._SERVER_MAX_WINDOW_BITS_PARAM)
+ try:
+ server_max_window_bits = _parse_window_bits(
+ server_max_window_bits)
+ except ValueError, e:
+ self._logger.debug('Bad %s parameter: %r',
+ self._SERVER_MAX_WINDOW_BITS_PARAM,
+ e)
+ return None
+
+ server_no_context_takeover = self._request.has_parameter(
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM)
+ if (server_no_context_takeover and
+ self._request.get_parameter_value(
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM) is not None):
+ self._logger.debug('%s parameter must not have a value: %r',
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM,
+ server_no_context_takeover)
+ return None
+
+ # client_max_window_bits from a client indicates whether the client can
+ # accept client_max_window_bits from a server or not.
+ client_client_max_window_bits = self._request.has_parameter(
+ self._CLIENT_MAX_WINDOW_BITS_PARAM)
+ if (self._draft08 and
+ client_client_max_window_bits and
+ self._request.get_parameter_value(
+ self._CLIENT_MAX_WINDOW_BITS_PARAM) is not None):
+ self._logger.debug('%s parameter must not have a value in a '
+ 'client\'s opening handshake: %r',
+ self._CLIENT_MAX_WINDOW_BITS_PARAM,
+ client_client_max_window_bits)
+ return None
+
+ self._rfc1979_deflater = util._RFC1979Deflater(
+ server_max_window_bits, server_no_context_takeover)
+
+ # Note that we prepare for incoming messages compressed with window
+ # bits upto 15 regardless of the client_max_window_bits value to be
+ # sent to the client.
+ self._rfc1979_inflater = util._RFC1979Inflater()
+
+ self._framer = _PerMessageDeflateFramer(
+ server_max_window_bits, server_no_context_takeover)
+ self._framer.set_bfinal(False)
+ self._framer.set_compress_outgoing_enabled(True)
+
+ response = common.ExtensionParameter(self._request.name())
+
+ if server_max_window_bits is not None:
+ response.add_parameter(
+ self._SERVER_MAX_WINDOW_BITS_PARAM,
+ str(server_max_window_bits))
+
+ if server_no_context_takeover:
+ response.add_parameter(
+ self._SERVER_NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ if self._preferred_client_max_window_bits is not None:
+ if self._draft08 and not client_client_max_window_bits:
+ self._logger.debug('Processor is configured to use %s but '
+ 'the client cannot accept it',
+ self._CLIENT_MAX_WINDOW_BITS_PARAM)
+ return None
+ response.add_parameter(
+ self._CLIENT_MAX_WINDOW_BITS_PARAM,
+ str(self._preferred_client_max_window_bits))
+
+ if self._client_no_context_takeover:
+ response.add_parameter(
+ self._CLIENT_NO_CONTEXT_TAKEOVER_PARAM, None)
+
+ self._logger.debug(
+ 'Enable %s extension ('
+ 'request: server_max_window_bits=%s; '
+ 'server_no_context_takeover=%r, '
+ 'response: client_max_window_bits=%s; '
+ 'client_no_context_takeover=%r)' %
+ (self._request.name(),
+ server_max_window_bits,
+ server_no_context_takeover,
+ self._preferred_client_max_window_bits,
+ self._client_no_context_takeover))
+
+ return response
+
+ def _setup_stream_options_internal(self, stream_options):
+ self._framer.setup_stream_options(stream_options)
+
+ def set_client_max_window_bits(self, value):
+ """If this option is specified, this class adds the
+ client_max_window_bits extension parameter to the handshake response,
+ but doesn't reduce the LZ77 sliding window size of its inflater.
+ I.e., you can use this for testing client implementation but cannot
+ reduce memory usage of this class.
+
+ If this method has been called with True and an offer without the
+ client_max_window_bits extension parameter is received,
+ - (When processing the permessage-deflate extension) this processor
+ declines the request.
+ - (When processing the permessage-compress extension) this processor
+ accepts the request.
+ """
+
+ self._preferred_client_max_window_bits = value
+
+ def set_client_no_context_takeover(self, value):
+ """If this option is specified, this class adds the
+ client_no_context_takeover extension parameter to the handshake
+ response, but doesn't reset inflater for each message. I.e., you can
+ use this for testing client implementation but cannot reduce memory
+ usage of this class.
+ """
+
+ self._client_no_context_takeover = value
+
+ def set_bfinal(self, value):
+ self._framer.set_bfinal(value)
+
+ def enable_outgoing_compression(self):
+ self._framer.set_compress_outgoing_enabled(True)
+
+ def disable_outgoing_compression(self):
+ self._framer.set_compress_outgoing_enabled(False)
+
+
+class _PerMessageDeflateFramer(object):
+ """A framer for extensions with per-message DEFLATE feature."""
+
+ def __init__(self, deflate_max_window_bits, deflate_no_context_takeover):
+ self._logger = util.get_class_logger(self)
+
+ self._rfc1979_deflater = util._RFC1979Deflater(
+ deflate_max_window_bits, deflate_no_context_takeover)
+
+ self._rfc1979_inflater = util._RFC1979Inflater()
+
+ self._bfinal = False
+
+ self._compress_outgoing_enabled = False
+
+ # True if a message is fragmented and compression is ongoing.
+ self._compress_ongoing = False
+
+ # Calculates
+ # (Total outgoing bytes supplied to this filter) /
+ # (Total bytes sent to the network after applying this filter)
+ self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
+
+ # Calculates
+ # (Total bytes received from the network) /
+ # (Total incoming bytes obtained after applying this filter)
+ self._incoming_average_ratio_calculator = _AverageRatioCalculator()
+
+ def set_bfinal(self, value):
+ self._bfinal = value
+
+ def set_compress_outgoing_enabled(self, value):
+ self._compress_outgoing_enabled = value
+
+ def _process_incoming_message(self, message, decompress):
+ if not decompress:
+ return message
+
+ received_payload_size = len(message)
+ self._incoming_average_ratio_calculator.add_result_bytes(
+ received_payload_size)
+
+ message = self._rfc1979_inflater.filter(message)
+
+ filtered_payload_size = len(message)
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ filtered_payload_size)
+
+ _log_incoming_compression_ratio(
+ self._logger,
+ received_payload_size,
+ filtered_payload_size,
+ self._incoming_average_ratio_calculator.get_average_ratio())
+
+ return message
+
+ def _process_outgoing_message(self, message, end, binary):
+ if not binary:
+ message = message.encode('utf-8')
+
+ if not self._compress_outgoing_enabled:
+ return message
+
+ original_payload_size = len(message)
+ self._outgoing_average_ratio_calculator.add_original_bytes(
+ original_payload_size)
+
+ message = self._rfc1979_deflater.filter(
+ message, end=end, bfinal=self._bfinal)
+
+ filtered_payload_size = len(message)
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ filtered_payload_size)
+
+ _log_outgoing_compression_ratio(
+ self._logger,
+ original_payload_size,
+ filtered_payload_size,
+ self._outgoing_average_ratio_calculator.get_average_ratio())
+
+ if not self._compress_ongoing:
+ self._outgoing_frame_filter.set_compression_bit()
+ self._compress_ongoing = not end
+ return message
+
+ def _process_incoming_frame(self, frame):
+ if frame.rsv1 == 1 and not common.is_control_opcode(frame.opcode):
+ self._incoming_message_filter.decompress_next_message()
+ frame.rsv1 = 0
+
+ def _process_outgoing_frame(self, frame, compression_bit):
+ if (not compression_bit or
+ common.is_control_opcode(frame.opcode)):
+ return
+
+ frame.rsv1 = 1
+
+ def setup_stream_options(self, stream_options):
+ """Creates filters and sets them to the StreamOptions."""
+
+ class _OutgoingMessageFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, message, end=True, binary=False):
+ return self._parent._process_outgoing_message(
+ message, end, binary)
+
+ class _IncomingMessageFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+ self._decompress_next_message = False
+
+ def decompress_next_message(self):
+ self._decompress_next_message = True
+
+ def filter(self, message):
+ message = self._parent._process_incoming_message(
+ message, self._decompress_next_message)
+ self._decompress_next_message = False
+ return message
+
+ self._outgoing_message_filter = _OutgoingMessageFilter(self)
+ self._incoming_message_filter = _IncomingMessageFilter(self)
+ stream_options.outgoing_message_filters.append(
+ self._outgoing_message_filter)
+ stream_options.incoming_message_filters.append(
+ self._incoming_message_filter)
+
+ class _OutgoingFrameFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+ self._set_compression_bit = False
+
+ def set_compression_bit(self):
+ self._set_compression_bit = True
+
+ def filter(self, frame):
+ self._parent._process_outgoing_frame(
+ frame, self._set_compression_bit)
+ self._set_compression_bit = False
+
+ class _IncomingFrameFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._process_incoming_frame(frame)
+
+ self._outgoing_frame_filter = _OutgoingFrameFilter(self)
+ self._incoming_frame_filter = _IncomingFrameFilter(self)
+ stream_options.outgoing_frame_filters.append(
+ self._outgoing_frame_filter)
+ stream_options.incoming_frame_filters.append(
+ self._incoming_frame_filter)
+
+ stream_options.encode_text_message_to_utf8 = False
+
+
+_available_processors[common.PERMESSAGE_DEFLATE_EXTENSION] = (
+ PerMessageDeflateExtensionProcessor)
+# TODO(tyoshino): Reorganize class names.
+_compression_extension_names.append('deflate')
+
+
+class MuxExtensionProcessor(ExtensionProcessorInterface):
+ """WebSocket multiplexing extension processor."""
+
+ _QUOTA_PARAM = 'quota'
+
+ def __init__(self, request):
+ ExtensionProcessorInterface.__init__(self, request)
+ self._quota = 0
+ self._extensions = []
+
+ def name(self):
+ return common.MUX_EXTENSION
+
+ def check_consistency_with_other_processors(self, processors):
+ before_mux = True
+ for processor in processors:
+ name = processor.name()
+ if name == self.name():
+ before_mux = False
+ continue
+ if not processor.is_active():
+ continue
+ if before_mux:
+ # Mux extension cannot be used after extensions
+ # that depend on frame boundary, extension data field, or any
+ # reserved bits which are attributed to each frame.
+ if (name == common.DEFLATE_FRAME_EXTENSION or
+ name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
+ self.set_active(False)
+ return
+ else:
+ # Mux extension should not be applied before any history-based
+ # compression extension.
+ if (name == 'deflate' or
+ name == common.DEFLATE_FRAME_EXTENSION or
+ name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
+ self.set_active(False)
+ return
+
+ def _get_extension_response_internal(self):
+ self._active = False
+ quota = self._request.get_parameter_value(self._QUOTA_PARAM)
+ if quota is not None:
+ try:
+ quota = int(quota)
+ except ValueError, e:
+ return None
+ if quota < 0 or quota >= 2 ** 32:
+ return None
+ self._quota = quota
+
+ self._active = True
+ return common.ExtensionParameter(common.MUX_EXTENSION)
+
+ def _setup_stream_options_internal(self, stream_options):
+ pass
+
+ def set_quota(self, quota):
+ self._quota = quota
+
+ def quota(self):
+ return self._quota
+
+ def set_extensions(self, extensions):
+ self._extensions = extensions
+
+ def extensions(self):
+ return self._extensions
+
+
+_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
+
+
+def get_extension_processor(extension_request):
+ """Given an ExtensionParameter representing an extension offer received
+ from a client, configures and returns an instance of the corresponding
+ extension processor class.
+ """
+
+ processor_class = _available_processors.get(extension_request.name())
+ if processor_class is None:
+ return None
+ return processor_class(extension_request)
+
+
+def is_compression_extension(extension_name):
+ return extension_name in _compression_extension_names
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/fast_masking.i b/testing/mochitest/pywebsocket/mod_pywebsocket/fast_masking.i
new file mode 100644
index 000000000..ddaad27f5
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/fast_masking.i
@@ -0,0 +1,98 @@
+// Copyright 2013, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+%module fast_masking
+
+%include "cstring.i"
+
+%{
+#include <cstring>
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+%}
+
+%apply (char *STRING, int LENGTH) {
+ (const char* payload, int payload_length),
+ (const char* masking_key, int masking_key_length) };
+%cstring_output_allocate_size(
+ char** result, int* result_length, delete [] *$1);
+
+%inline %{
+
+void mask(
+ const char* payload, int payload_length,
+ const char* masking_key, int masking_key_length,
+ int masking_key_index,
+ char** result, int* result_length) {
+ *result = new char[payload_length];
+ *result_length = payload_length;
+ memcpy(*result, payload, payload_length);
+
+ char* cursor = *result;
+ char* cursor_end = *result + *result_length;
+
+#ifdef __SSE2__
+ while ((cursor < cursor_end) &&
+ (reinterpret_cast<size_t>(cursor) & 0xf)) {
+ *cursor ^= masking_key[masking_key_index];
+ ++cursor;
+ masking_key_index = (masking_key_index + 1) % masking_key_length;
+ }
+ if (cursor == cursor_end) {
+ return;
+ }
+
+ const int kBlockSize = 16;
+ __m128i masking_key_block;
+ for (int i = 0; i < kBlockSize; ++i) {
+ *(reinterpret_cast<char*>(&masking_key_block) + i) =
+ masking_key[masking_key_index];
+ masking_key_index = (masking_key_index + 1) % masking_key_length;
+ }
+
+ while (cursor + kBlockSize <= cursor_end) {
+ __m128i payload_block =
+ _mm_load_si128(reinterpret_cast<__m128i*>(cursor));
+ _mm_stream_si128(reinterpret_cast<__m128i*>(cursor),
+ _mm_xor_si128(payload_block, masking_key_block));
+ cursor += kBlockSize;
+ }
+#endif
+
+ while (cursor < cursor_end) {
+ *cursor ^= masking_key[masking_key_index];
+ ++cursor;
+ masking_key_index = (masking_key_index + 1) % masking_key_length;
+ }
+}
+
+%}
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/__init__.py b/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/__init__.py
new file mode 100644
index 000000000..194f6b395
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/__init__.py
@@ -0,0 +1,110 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket opening handshake processor. This class try to apply available
+opening handshake processors for each protocol version until a connection is
+successfully established.
+"""
+
+
+import logging
+
+from mod_pywebsocket import common
+from mod_pywebsocket.handshake import hybi00
+from mod_pywebsocket.handshake import hybi
+# Export AbortedByUserException, HandshakeException, and VersionException
+# symbol from this module.
+from mod_pywebsocket.handshake._base import AbortedByUserException
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import VersionException
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
+ """Performs WebSocket handshake.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+ allowDraft75: obsolete argument. ignored.
+ strict: obsolete argument. ignored.
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ _LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
+ # To print mimetools.Message as escaped one-line string, we converts
+ # headers_in to dict object. Without conversion, if we use %r, it just
+ # prints the type and address, and if we use %s, it prints the original
+ # header string as multiple lines.
+ #
+ # Both mimetools.Message and MpTable_Type of mod_python can be
+ # converted to dict.
+ #
+ # mimetools.Message.__str__ returns the original header string.
+ # dict(mimetools.Message object) returns the map from header names to
+ # header values. While MpTable_Type doesn't have such __str__ but just
+ # __repr__ which formats itself as well as dictionary object.
+ _LOGGER.debug(
+ 'Client\'s opening handshake headers: %r', dict(request.headers_in))
+
+ handshakers = []
+ handshakers.append(
+ ('RFC 6455', hybi.Handshaker(request, dispatcher)))
+ handshakers.append(
+ ('HyBi 00', hybi00.Handshaker(request, dispatcher)))
+
+ for name, handshaker in handshakers:
+ _LOGGER.debug('Trying protocol version %s', name)
+ try:
+ handshaker.do_handshake()
+ _LOGGER.info('Established (%s protocol)', name)
+ return
+ except HandshakeException, e:
+ _LOGGER.debug(
+ 'Failed to complete opening handshake as %s protocol: %r',
+ name, e)
+ if e.status:
+ raise e
+ except AbortedByUserException, e:
+ raise
+ except VersionException, e:
+ raise
+
+ # TODO(toyoshim): Add a test to cover the case all handshakers fail.
+ raise HandshakeException(
+ 'Failed to complete opening handshake for all available protocols',
+ status=common.HTTP_STATUS_BAD_REQUEST)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/_base.py b/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/_base.py
new file mode 100644
index 000000000..c993a584b
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/_base.py
@@ -0,0 +1,182 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Common functions and exceptions used by WebSocket opening handshake
+processors.
+"""
+
+
+from mod_pywebsocket import common
+from mod_pywebsocket import http_header_util
+
+
+class AbortedByUserException(Exception):
+ """Exception for aborting a connection intentionally.
+
+ If this exception is raised in do_extra_handshake handler, the connection
+ will be abandoned. No other WebSocket or HTTP(S) handler will be invoked.
+
+ If this exception is raised in transfer_data_handler, the connection will
+ be closed without closing handshake. No other WebSocket or HTTP(S) handler
+ will be invoked.
+ """
+
+ pass
+
+
+class HandshakeException(Exception):
+ """This exception will be raised when an error occurred while processing
+ WebSocket initial handshake.
+ """
+
+ def __init__(self, name, status=None):
+ super(HandshakeException, self).__init__(name)
+ self.status = status
+
+
+class VersionException(Exception):
+ """This exception will be raised when a version of client request does not
+ match with version the server supports.
+ """
+
+ def __init__(self, name, supported_versions=''):
+ """Construct an instance.
+
+ Args:
+ supported_version: a str object to show supported hybi versions.
+ (e.g. '8, 13')
+ """
+ super(VersionException, self).__init__(name)
+ self.supported_versions = supported_versions
+
+
+def get_default_port(is_secure):
+ if is_secure:
+ return common.DEFAULT_WEB_SOCKET_SECURE_PORT
+ else:
+ return common.DEFAULT_WEB_SOCKET_PORT
+
+
+def validate_subprotocol(subprotocol):
+ """Validate a value in the Sec-WebSocket-Protocol field.
+
+ See the Section 4.1., 4.2.2., and 4.3. of RFC 6455.
+ """
+
+ if not subprotocol:
+ raise HandshakeException('Invalid subprotocol name: empty')
+
+ # Parameter should be encoded HTTP token.
+ state = http_header_util.ParsingState(subprotocol)
+ token = http_header_util.consume_token(state)
+ rest = http_header_util.peek(state)
+ # If |rest| is not None, |subprotocol| is not one token or invalid. If
+ # |rest| is None, |token| must not be None because |subprotocol| is
+ # concatenation of |token| and |rest| and is not None.
+ if rest is not None:
+ raise HandshakeException('Invalid non-token string in subprotocol '
+ 'name: %r' % rest)
+
+
+def parse_host_header(request):
+ fields = request.headers_in[common.HOST_HEADER].split(':', 1)
+ if len(fields) == 1:
+ return fields[0], get_default_port(request.is_https())
+ try:
+ return fields[0], int(fields[1])
+ except ValueError, e:
+ raise HandshakeException('Invalid port number format: %r' % e)
+
+
+def format_header(name, value):
+ return '%s: %s\r\n' % (name, value)
+
+
+def get_mandatory_header(request, key):
+ value = request.headers_in.get(key)
+ if value is None:
+ raise HandshakeException('Header %s is not defined' % key)
+ return value
+
+
+def validate_mandatory_header(request, key, expected_value, fail_status=None):
+ value = get_mandatory_header(request, key)
+
+ if value.lower() != expected_value.lower():
+ raise HandshakeException(
+ 'Expected %r for header %s but found %r (case-insensitive)' %
+ (expected_value, key, value), status=fail_status)
+
+
+def check_request_line(request):
+ # 5.1 1. The three character UTF-8 string "GET".
+ # 5.1 2. A UTF-8-encoded U+0020 SPACE character (0x20 byte).
+ if request.method != 'GET':
+ raise HandshakeException('Method is not GET: %r' % request.method)
+
+ if request.protocol != 'HTTP/1.1':
+ raise HandshakeException('Version is not HTTP/1.1: %r' %
+ request.protocol)
+
+
+def parse_token_list(data):
+ """Parses a header value which follows 1#token and returns parsed elements
+ as a list of strings.
+
+ Leading LWSes must be trimmed.
+ """
+
+ state = http_header_util.ParsingState(data)
+
+ token_list = []
+
+ while True:
+ token = http_header_util.consume_token(state)
+ if token is not None:
+ token_list.append(token)
+
+ http_header_util.consume_lwses(state)
+
+ if http_header_util.peek(state) is None:
+ break
+
+ if not http_header_util.consume_string(state, ','):
+ raise HandshakeException(
+ 'Expected a comma but found %r' % http_header_util.peek(state))
+
+ http_header_util.consume_lwses(state)
+
+ if len(token_list) == 0:
+ raise HandshakeException('No valid token found')
+
+ return token_list
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/hybi.py b/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/hybi.py
new file mode 100644
index 000000000..44e71f179
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/hybi.py
@@ -0,0 +1,428 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol (RFC 6455).
+
+Specification:
+http://tools.ietf.org/html/rfc6455
+"""
+
+
+# Note: request.connection.write is used in this module, even though mod_python
+# document says that it should be used only in connection handlers.
+# Unfortunately, we have no other options. For example, request.write is not
+# suitable because it doesn't allow direct raw bytes writing.
+
+
+import base64
+import logging
+import os
+import re
+
+from mod_pywebsocket import common
+from mod_pywebsocket.extensions import get_extension_processor
+from mod_pywebsocket.extensions import is_compression_extension
+from mod_pywebsocket.handshake._base import check_request_line
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import parse_token_list
+from mod_pywebsocket.handshake._base import validate_mandatory_header
+from mod_pywebsocket.handshake._base import validate_subprotocol
+from mod_pywebsocket.handshake._base import VersionException
+from mod_pywebsocket.stream import Stream
+from mod_pywebsocket.stream import StreamOptions
+from mod_pywebsocket import util
+
+
+# Used to validate the value in the Sec-WebSocket-Key header strictly. RFC 4648
+# disallows non-zero padding, so the character right before == must be any of
+# A, Q, g and w.
+_SEC_WEBSOCKET_KEY_REGEX = re.compile('^[+/0-9A-Za-z]{21}[AQgw]==$')
+
+# Defining aliases for values used frequently.
+_VERSION_LATEST = common.VERSION_HYBI_LATEST
+_VERSION_LATEST_STRING = str(_VERSION_LATEST)
+_SUPPORTED_VERSIONS = [
+ _VERSION_LATEST,
+]
+
+
+def compute_accept(key):
+ """Computes value for the Sec-WebSocket-Accept header from value of the
+ Sec-WebSocket-Key header.
+ """
+
+ accept_binary = util.sha1_hash(
+ key + common.WEBSOCKET_ACCEPT_UUID).digest()
+ accept = base64.b64encode(accept_binary)
+
+ return (accept, accept_binary)
+
+
+class Handshaker(object):
+ """Opening handshake processor for the WebSocket protocol (RFC 6455)."""
+
+ def __init__(self, request, dispatcher):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+
+ Handshaker will add attributes such as ws_resource during handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+
+ def _validate_connection_header(self):
+ connection = get_mandatory_header(
+ self._request, common.CONNECTION_HEADER)
+
+ try:
+ connection_tokens = parse_token_list(connection)
+ except HandshakeException, e:
+ raise HandshakeException(
+ 'Failed to parse %s: %s' % (common.CONNECTION_HEADER, e))
+
+ connection_is_valid = False
+ for token in connection_tokens:
+ if token.lower() == common.UPGRADE_CONNECTION_TYPE.lower():
+ connection_is_valid = True
+ break
+ if not connection_is_valid:
+ raise HandshakeException(
+ '%s header doesn\'t contain "%s"' %
+ (common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+
+ def do_handshake(self):
+ self._request.ws_close_code = None
+ self._request.ws_close_reason = None
+
+ # Parsing.
+
+ check_request_line(self._request)
+
+ validate_mandatory_header(
+ self._request,
+ common.UPGRADE_HEADER,
+ common.WEBSOCKET_UPGRADE_TYPE)
+
+ self._validate_connection_header()
+
+ self._request.ws_resource = self._request.uri
+
+ unused_host = get_mandatory_header(self._request, common.HOST_HEADER)
+
+ self._request.ws_version = self._check_version()
+
+ try:
+ self._get_origin()
+ self._set_protocol()
+ self._parse_extensions()
+
+ # Key validation, response generation.
+
+ key = self._get_key()
+ (accept, accept_binary) = compute_accept(key)
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_ACCEPT_HEADER,
+ accept,
+ util.hexify(accept_binary))
+
+ self._logger.debug('Protocol version is RFC 6455')
+
+ # Setup extension processors.
+
+ processors = []
+ if self._request.ws_requested_extensions is not None:
+ for extension_request in self._request.ws_requested_extensions:
+ processor = get_extension_processor(extension_request)
+ # Unknown extension requests are just ignored.
+ if processor is not None:
+ processors.append(processor)
+ self._request.ws_extension_processors = processors
+
+ # List of extra headers. The extra handshake handler may add header
+ # data as name/value pairs to this list and pywebsocket appends
+ # them to the WebSocket handshake.
+ self._request.extra_headers = []
+
+ # Extra handshake handler may modify/remove processors.
+ self._dispatcher.do_extra_handshake(self._request)
+ processors = filter(lambda processor: processor is not None,
+ self._request.ws_extension_processors)
+
+ # Ask each processor if there are extensions on the request which
+ # cannot co-exist. When processor decided other processors cannot
+ # co-exist with it, the processor marks them (or itself) as
+ # "inactive". The first extension processor has the right to
+ # make the final call.
+ for processor in reversed(processors):
+ if processor.is_active():
+ processor.check_consistency_with_other_processors(
+ processors)
+ processors = filter(lambda processor: processor.is_active(),
+ processors)
+
+ accepted_extensions = []
+
+ # We need to take into account of mux extension here.
+ # If mux extension exists:
+ # - Remove processors of extensions for logical channel,
+ # which are processors located before the mux processor
+ # - Pass extension requests for logical channel to mux processor
+ # - Attach the mux processor to the request. It will be referred
+ # by dispatcher to see whether the dispatcher should use mux
+ # handler or not.
+ mux_index = -1
+ for i, processor in enumerate(processors):
+ if processor.name() == common.MUX_EXTENSION:
+ mux_index = i
+ break
+ if mux_index >= 0:
+ logical_channel_extensions = []
+ for processor in processors[:mux_index]:
+ logical_channel_extensions.append(processor.request())
+ processor.set_active(False)
+ self._request.mux_processor = processors[mux_index]
+ self._request.mux_processor.set_extensions(
+ logical_channel_extensions)
+ processors = filter(lambda processor: processor.is_active(),
+ processors)
+
+ stream_options = StreamOptions()
+
+ for index, processor in enumerate(processors):
+ if not processor.is_active():
+ continue
+
+ extension_response = processor.get_extension_response()
+ if extension_response is None:
+ # Rejected.
+ continue
+
+ accepted_extensions.append(extension_response)
+
+ processor.setup_stream_options(stream_options)
+
+ if not is_compression_extension(processor.name()):
+ continue
+
+ # Inactivate all of the following compression extensions.
+ for j in xrange(index + 1, len(processors)):
+ if is_compression_extension(processors[j].name()):
+ processors[j].set_active(False)
+
+ if len(accepted_extensions) > 0:
+ self._request.ws_extensions = accepted_extensions
+ self._logger.debug(
+ 'Extensions accepted: %r',
+ map(common.ExtensionParameter.name, accepted_extensions))
+ else:
+ self._request.ws_extensions = None
+
+ self._request.ws_stream = self._create_stream(stream_options)
+
+ if self._request.ws_requested_protocols is not None:
+ if self._request.ws_protocol is None:
+ raise HandshakeException(
+ 'do_extra_handshake must choose one subprotocol from '
+ 'ws_requested_protocols and set it to ws_protocol')
+ validate_subprotocol(self._request.ws_protocol)
+
+ self._logger.debug(
+ 'Subprotocol accepted: %r',
+ self._request.ws_protocol)
+ else:
+ if self._request.ws_protocol is not None:
+ raise HandshakeException(
+ 'ws_protocol must be None when the client didn\'t '
+ 'request any subprotocol')
+
+ self._send_handshake(accept)
+ except HandshakeException, e:
+ if not e.status:
+ # Fallback to 400 bad request by default.
+ e.status = common.HTTP_STATUS_BAD_REQUEST
+ raise e
+
+ def _get_origin(self):
+ origin_header = common.ORIGIN_HEADER
+ origin = self._request.headers_in.get(origin_header)
+ if origin is None:
+ self._logger.debug('Client request does not have origin header')
+ self._request.ws_origin = origin
+
+ def _check_version(self):
+ version = get_mandatory_header(self._request,
+ common.SEC_WEBSOCKET_VERSION_HEADER)
+ if version == _VERSION_LATEST_STRING:
+ return _VERSION_LATEST
+
+ if version.find(',') >= 0:
+ raise HandshakeException(
+ 'Multiple versions (%r) are not allowed for header %s' %
+ (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+ status=common.HTTP_STATUS_BAD_REQUEST)
+ raise VersionException(
+ 'Unsupported version %r for header %s' %
+ (version, common.SEC_WEBSOCKET_VERSION_HEADER),
+ supported_versions=', '.join(map(str, _SUPPORTED_VERSIONS)))
+
+ def _set_protocol(self):
+ self._request.ws_protocol = None
+ # MOZILLA
+ self._request.sts = None
+ # /MOZILLA
+
+ protocol_header = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+
+ if protocol_header is None:
+ self._request.ws_requested_protocols = None
+ return
+
+ self._request.ws_requested_protocols = parse_token_list(
+ protocol_header)
+ self._logger.debug('Subprotocols requested: %r',
+ self._request.ws_requested_protocols)
+
+ def _parse_extensions(self):
+ extensions_header = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER)
+ if not extensions_header:
+ self._request.ws_requested_extensions = None
+ return
+
+ try:
+ self._request.ws_requested_extensions = common.parse_extensions(
+ extensions_header)
+ except common.ExtensionParsingException, e:
+ raise HandshakeException(
+ 'Failed to parse Sec-WebSocket-Extensions header: %r' % e)
+
+ self._logger.debug(
+ 'Extensions requested: %r',
+ map(common.ExtensionParameter.name,
+ self._request.ws_requested_extensions))
+
+ def _validate_key(self, key):
+ if key.find(',') >= 0:
+ raise HandshakeException('Request has multiple %s header lines or '
+ 'contains illegal character \',\': %r' %
+ (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+ # Validate
+ key_is_valid = False
+ try:
+ # Validate key by quick regex match before parsing by base64
+ # module. Because base64 module skips invalid characters, we have
+ # to do this in advance to make this server strictly reject illegal
+ # keys.
+ if _SEC_WEBSOCKET_KEY_REGEX.match(key):
+ decoded_key = base64.b64decode(key)
+ if len(decoded_key) == 16:
+ key_is_valid = True
+ except TypeError, e:
+ pass
+
+ if not key_is_valid:
+ raise HandshakeException(
+ 'Illegal value for header %s: %r' %
+ (common.SEC_WEBSOCKET_KEY_HEADER, key))
+
+ return decoded_key
+
+ def _get_key(self):
+ key = get_mandatory_header(
+ self._request, common.SEC_WEBSOCKET_KEY_HEADER)
+
+ decoded_key = self._validate_key(key)
+
+ self._logger.debug(
+ '%s: %r (%s)',
+ common.SEC_WEBSOCKET_KEY_HEADER,
+ key,
+ util.hexify(decoded_key))
+
+ return key
+
+ def _create_stream(self, stream_options):
+ return Stream(self._request, stream_options)
+
+ def _create_handshake_response(self, accept):
+ response = []
+
+ response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+ # WebSocket headers
+ response.append(format_header(
+ common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
+ response.append(format_header(
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_ACCEPT_HEADER, accept))
+ if self._request.ws_protocol is not None:
+ response.append(format_header(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ if (self._request.ws_extensions is not None and
+ len(self._request.ws_extensions) != 0):
+ response.append(format_header(
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+ common.format_extensions(self._request.ws_extensions)))
+ # MOZILLA: Add HSTS header if requested to
+ if self._request.sts is not None:
+ response.append(format_header("Strict-Transport-Security",
+ self._request.sts))
+ # /MOZILLA
+
+ # Headers not specific for WebSocket
+ for name, value in self._request.extra_headers:
+ response.append(format_header(name, value))
+
+ response.append('\r\n')
+
+ return ''.join(response)
+
+ def _send_handshake(self, accept):
+ raw_response = self._create_handshake_response(accept)
+ self._request.connection.write(raw_response)
+ self._logger.debug('Sent server\'s opening handshake: %r',
+ raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/hybi00.py b/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/hybi00.py
new file mode 100644
index 000000000..8757717a6
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/handshake/hybi00.py
@@ -0,0 +1,293 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides the opening handshake processor for the WebSocket
+protocol version HyBi 00.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00
+"""
+
+
+# Note: request.connection.write/read are used in this module, even though
+# mod_python document says that they should be used only in connection
+# handlers. Unfortunately, we have no other options. For example,
+# request.write/read are not suitable because they don't allow direct raw bytes
+# writing/reading.
+
+
+import logging
+import re
+import struct
+
+from mod_pywebsocket import common
+from mod_pywebsocket.stream import StreamHixie75
+from mod_pywebsocket import util
+from mod_pywebsocket.handshake._base import HandshakeException
+from mod_pywebsocket.handshake._base import check_request_line
+from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_default_port
+from mod_pywebsocket.handshake._base import get_mandatory_header
+from mod_pywebsocket.handshake._base import parse_host_header
+from mod_pywebsocket.handshake._base import validate_mandatory_header
+
+
+_MANDATORY_HEADERS = [
+ # key, expected value or None
+ [common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75],
+ [common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE],
+]
+
+
+def _validate_subprotocol(subprotocol):
+ """Checks if characters in subprotocol are in range between U+0020 and
+ U+007E. A value in the Sec-WebSocket-Protocol field need to satisfy this
+ requirement.
+
+ See the Section 4.1. Opening handshake of the spec.
+ """
+
+ if not subprotocol:
+ raise HandshakeException('Invalid subprotocol name: empty')
+
+ # Parameter should be in the range U+0020 to U+007E.
+ for c in subprotocol:
+ if not 0x20 <= ord(c) <= 0x7e:
+ raise HandshakeException(
+ 'Illegal character in subprotocol name: %r' % c)
+
+
+def _check_header_lines(request, mandatory_headers):
+ check_request_line(request)
+
+ # The expected field names, and the meaning of their corresponding
+ # values, are as follows.
+ # |Upgrade| and |Connection|
+ for key, expected_value in mandatory_headers:
+ validate_mandatory_header(request, key, expected_value)
+
+
+def _build_location(request):
+ """Build WebSocket location for request."""
+
+ location_parts = []
+ if request.is_https():
+ location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
+ else:
+ location_parts.append(common.WEB_SOCKET_SCHEME)
+ location_parts.append('://')
+ host, port = parse_host_header(request)
+ connection_port = request.connection.local_addr[1]
+ if port != connection_port:
+ raise HandshakeException('Header/connection port mismatch: %d/%d' %
+ (port, connection_port))
+ location_parts.append(host)
+ if (port != get_default_port(request.is_https())):
+ location_parts.append(':')
+ location_parts.append(str(port))
+ location_parts.append(request.unparsed_uri)
+ return ''.join(location_parts)
+
+
+class Handshaker(object):
+ """Opening handshake processor for the WebSocket protocol version HyBi 00.
+ """
+
+ def __init__(self, request, dispatcher):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ dispatcher: Dispatcher (dispatch.Dispatcher).
+
+ Handshaker will add attributes such as ws_resource in performing
+ handshake.
+ """
+
+ self._logger = util.get_class_logger(self)
+
+ self._request = request
+ self._dispatcher = dispatcher
+
+ def do_handshake(self):
+ """Perform WebSocket Handshake.
+
+ On _request, we set
+ ws_resource, ws_protocol, ws_location, ws_origin, ws_challenge,
+ ws_challenge_md5: WebSocket handshake information.
+ ws_stream: Frame generation/parsing class.
+ ws_version: Protocol version.
+
+ Raises:
+ HandshakeException: when any error happened in parsing the opening
+ handshake request.
+ """
+
+ # 5.1 Reading the client's opening handshake.
+ # dispatcher sets it in self._request.
+ _check_header_lines(self._request, _MANDATORY_HEADERS)
+ self._set_resource()
+ self._set_subprotocol()
+ self._set_location()
+ self._set_origin()
+ self._set_challenge_response()
+ self._set_protocol_version()
+
+ self._dispatcher.do_extra_handshake(self._request)
+
+ self._send_handshake()
+
+ def _set_resource(self):
+ self._request.ws_resource = self._request.uri
+
+ def _set_subprotocol(self):
+ # |Sec-WebSocket-Protocol|
+ subprotocol = self._request.headers_in.get(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER)
+ if subprotocol is not None:
+ _validate_subprotocol(subprotocol)
+ self._request.ws_protocol = subprotocol
+
+ def _set_location(self):
+ # |Host|
+ host = self._request.headers_in.get(common.HOST_HEADER)
+ if host is not None:
+ self._request.ws_location = _build_location(self._request)
+ # TODO(ukai): check host is this host.
+
+ def _set_origin(self):
+ # |Origin|
+ origin = self._request.headers_in.get(common.ORIGIN_HEADER)
+ if origin is not None:
+ self._request.ws_origin = origin
+
+ def _set_protocol_version(self):
+ # |Sec-WebSocket-Draft|
+ draft = self._request.headers_in.get(common.SEC_WEBSOCKET_DRAFT_HEADER)
+ if draft is not None and draft != '0':
+ raise HandshakeException('Illegal value for %s: %s' %
+ (common.SEC_WEBSOCKET_DRAFT_HEADER,
+ draft))
+
+ self._logger.debug('Protocol version is HyBi 00')
+ self._request.ws_version = common.VERSION_HYBI00
+ self._request.ws_stream = StreamHixie75(self._request, True)
+
+ def _set_challenge_response(self):
+ # 5.2 4-8.
+ self._request.ws_challenge = self._get_challenge()
+ # 5.2 9. let /response/ be the MD5 finterprint of /challenge/
+ self._request.ws_challenge_md5 = util.md5_hash(
+ self._request.ws_challenge).digest()
+ self._logger.debug(
+ 'Challenge: %r (%s)',
+ self._request.ws_challenge,
+ util.hexify(self._request.ws_challenge))
+ self._logger.debug(
+ 'Challenge response: %r (%s)',
+ self._request.ws_challenge_md5,
+ util.hexify(self._request.ws_challenge_md5))
+
+ def _get_key_value(self, key_field):
+ key_value = get_mandatory_header(self._request, key_field)
+
+ self._logger.debug('%s: %r', key_field, key_value)
+
+ # 5.2 4. let /key-number_n/ be the digits (characters in the range
+ # U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9)) in /key_n/,
+ # interpreted as a base ten integer, ignoring all other characters
+ # in /key_n/.
+ try:
+ key_number = int(re.sub("\\D", "", key_value))
+ except:
+ raise HandshakeException('%s field contains no digit' % key_field)
+ # 5.2 5. let /spaces_n/ be the number of U+0020 SPACE characters
+ # in /key_n/.
+ spaces = re.subn(" ", "", key_value)[1]
+ if spaces == 0:
+ raise HandshakeException('%s field contains no space' % key_field)
+
+ self._logger.debug(
+ '%s: Key-number is %d and number of spaces is %d',
+ key_field, key_number, spaces)
+
+ # 5.2 6. if /key-number_n/ is not an integral multiple of /spaces_n/
+ # then abort the WebSocket connection.
+ if key_number % spaces != 0:
+ raise HandshakeException(
+ '%s: Key-number (%d) is not an integral multiple of spaces '
+ '(%d)' % (key_field, key_number, spaces))
+ # 5.2 7. let /part_n/ be /key-number_n/ divided by /spaces_n/.
+ part = key_number / spaces
+ self._logger.debug('%s: Part is %d', key_field, part)
+ return part
+
+ def _get_challenge(self):
+ # 5.2 4-7.
+ key1 = self._get_key_value(common.SEC_WEBSOCKET_KEY1_HEADER)
+ key2 = self._get_key_value(common.SEC_WEBSOCKET_KEY2_HEADER)
+ # 5.2 8. let /challenge/ be the concatenation of /part_1/,
+ challenge = ''
+ challenge += struct.pack('!I', key1) # network byteorder int
+ challenge += struct.pack('!I', key2) # network byteorder int
+ challenge += self._request.connection.read(8)
+ return challenge
+
+ def _send_handshake(self):
+ response = []
+
+ # 5.2 10. send the following line.
+ response.append('HTTP/1.1 101 WebSocket Protocol Handshake\r\n')
+
+ # 5.2 11. send the following fields to the client.
+ response.append(format_header(
+ common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE_HIXIE75))
+ response.append(format_header(
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_LOCATION_HEADER, self._request.ws_location))
+ response.append(format_header(
+ common.SEC_WEBSOCKET_ORIGIN_HEADER, self._request.ws_origin))
+ if self._request.ws_protocol:
+ response.append(format_header(
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ # 5.2 12. send two bytes 0x0D 0x0A.
+ response.append('\r\n')
+ # 5.2 13. send /response/
+ response.append(self._request.ws_challenge_md5)
+
+ raw_response = ''.join(response)
+ self._request.connection.write(raw_response)
+ self._logger.debug('Sent server\'s opening handshake: %r',
+ raw_response)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/headerparserhandler.py b/testing/mochitest/pywebsocket/mod_pywebsocket/headerparserhandler.py
new file mode 100644
index 000000000..c244421cf
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/headerparserhandler.py
@@ -0,0 +1,254 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""PythonHeaderParserHandler for mod_pywebsocket.
+
+Apache HTTP Server and mod_python must be configured such that this
+function is called to handle WebSocket request.
+"""
+
+
+import logging
+
+from mod_python import apache
+
+from mod_pywebsocket import common
+from mod_pywebsocket import dispatch
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+
+
+# PythonOption to specify the handler root directory.
+_PYOPT_HANDLER_ROOT = 'mod_pywebsocket.handler_root'
+
+# PythonOption to specify the handler scan directory.
+# This must be a directory under the root directory.
+# The default is the root directory.
+_PYOPT_HANDLER_SCAN = 'mod_pywebsocket.handler_scan'
+
+# PythonOption to allow handlers whose canonical path is
+# not under the root directory. It's disallowed by default.
+# Set this option with value of 'yes' to allow.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT = (
+ 'mod_pywebsocket.allow_handlers_outside_root_dir')
+# Map from values to their meanings. 'Yes' and 'No' are allowed just for
+# compatibility.
+_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION = {
+ 'off': False, 'no': False, 'on': True, 'yes': True}
+
+# (Obsolete option. Ignored.)
+# PythonOption to specify to allow handshake defined in Hixie 75 version
+# protocol. The default is None (Off)
+_PYOPT_ALLOW_DRAFT75 = 'mod_pywebsocket.allow_draft75'
+# Map from values to their meanings.
+_PYOPT_ALLOW_DRAFT75_DEFINITION = {'off': False, 'on': True}
+
+
+class ApacheLogHandler(logging.Handler):
+ """Wrapper logging.Handler to emit log message to apache's error.log."""
+
+ _LEVELS = {
+ logging.DEBUG: apache.APLOG_DEBUG,
+ logging.INFO: apache.APLOG_INFO,
+ logging.WARNING: apache.APLOG_WARNING,
+ logging.ERROR: apache.APLOG_ERR,
+ logging.CRITICAL: apache.APLOG_CRIT,
+ }
+
+ def __init__(self, request=None):
+ logging.Handler.__init__(self)
+ self._log_error = apache.log_error
+ if request is not None:
+ self._log_error = request.log_error
+
+ # Time and level will be printed by Apache.
+ self._formatter = logging.Formatter('%(name)s: %(message)s')
+
+ def emit(self, record):
+ apache_level = apache.APLOG_DEBUG
+ if record.levelno in ApacheLogHandler._LEVELS:
+ apache_level = ApacheLogHandler._LEVELS[record.levelno]
+
+ msg = self._formatter.format(record)
+
+ # "server" parameter must be passed to have "level" parameter work.
+ # If only "level" parameter is passed, nothing shows up on Apache's
+ # log. However, at this point, we cannot get the server object of the
+ # virtual host which will process WebSocket requests. The only server
+ # object we can get here is apache.main_server. But Wherever (server
+ # configuration context or virtual host context) we put
+ # PythonHeaderParserHandler directive, apache.main_server just points
+ # the main server instance (not any of virtual server instance). Then,
+ # Apache follows LogLevel directive in the server configuration context
+ # to filter logs. So, we need to specify LogLevel in the server
+ # configuration context. Even if we specify "LogLevel debug" in the
+ # virtual host context which actually handles WebSocket connections,
+ # DEBUG level logs never show up unless "LogLevel debug" is specified
+ # in the server configuration context.
+ #
+ # TODO(tyoshino): Provide logging methods on request object. When
+ # request is mp_request object (when used together with Apache), the
+ # methods call request.log_error indirectly. When request is
+ # _StandaloneRequest, the methods call Python's logging facility which
+ # we create in standalone.py.
+ self._log_error(msg, apache_level, apache.main_server)
+
+
+def _configure_logging():
+ logger = logging.getLogger()
+ # Logs are filtered by Apache based on LogLevel directive in Apache
+ # configuration file. We must just pass logs for all levels to
+ # ApacheLogHandler.
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(ApacheLogHandler())
+
+
+_configure_logging()
+
+_LOGGER = logging.getLogger(__name__)
+
+
+def _parse_option(name, value, definition):
+ if value is None:
+ return False
+
+ meaning = definition.get(value.lower())
+ if meaning is None:
+ raise Exception('Invalid value for PythonOption %s: %r' %
+ (name, value))
+ return meaning
+
+
+def _create_dispatcher():
+ _LOGGER.info('Initializing Dispatcher')
+
+ options = apache.main_server.get_options()
+
+ handler_root = options.get(_PYOPT_HANDLER_ROOT, None)
+ if not handler_root:
+ raise Exception('PythonOption %s is not defined' % _PYOPT_HANDLER_ROOT,
+ apache.APLOG_ERR)
+
+ handler_scan = options.get(_PYOPT_HANDLER_SCAN, handler_root)
+
+ allow_handlers_outside_root = _parse_option(
+ _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT,
+ options.get(_PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT),
+ _PYOPT_ALLOW_HANDLERS_OUTSIDE_ROOT_DEFINITION)
+
+ dispatcher = dispatch.Dispatcher(
+ handler_root, handler_scan, allow_handlers_outside_root)
+
+ for warning in dispatcher.source_warnings():
+ apache.log_error(
+ 'mod_pywebsocket: Warning in source loading: %s' % warning,
+ apache.APLOG_WARNING)
+
+ return dispatcher
+
+
+# Initialize
+_dispatcher = _create_dispatcher()
+
+
+def headerparserhandler(request):
+ """Handle request.
+
+ Args:
+ request: mod_python request.
+
+ This function is named headerparserhandler because it is the default
+ name for a PythonHeaderParserHandler.
+ """
+
+ handshake_is_done = False
+ try:
+ # Fallback to default http handler for request paths for which
+ # we don't have request handlers.
+ if not _dispatcher.get_handler_suite(request.uri):
+ request.log_error(
+ 'mod_pywebsocket: No handler for resource: %r' % request.uri,
+ apache.APLOG_INFO)
+ request.log_error(
+ 'mod_pywebsocket: Fallback to Apache', apache.APLOG_INFO)
+ return apache.DECLINED
+ except dispatch.DispatchException, e:
+ request.log_error(
+ 'mod_pywebsocket: Dispatch failed for error: %s' % e,
+ apache.APLOG_INFO)
+ if not handshake_is_done:
+ return e.status
+
+ try:
+ allow_draft75 = _parse_option(
+ _PYOPT_ALLOW_DRAFT75,
+ apache.main_server.get_options().get(_PYOPT_ALLOW_DRAFT75),
+ _PYOPT_ALLOW_DRAFT75_DEFINITION)
+
+ try:
+ handshake.do_handshake(
+ request, _dispatcher, allowDraft75=allow_draft75)
+ except handshake.VersionException, e:
+ request.log_error(
+ 'mod_pywebsocket: Handshake failed for version error: %s' % e,
+ apache.APLOG_INFO)
+ request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
+ e.supported_versions)
+ return apache.HTTP_BAD_REQUEST
+ except handshake.HandshakeException, e:
+ # Handshake for ws/wss failed.
+ # Send http response with error status.
+ request.log_error(
+ 'mod_pywebsocket: Handshake failed for error: %s' % e,
+ apache.APLOG_INFO)
+ return e.status
+
+ handshake_is_done = True
+ request._dispatcher = _dispatcher
+ _dispatcher.transfer_data(request)
+ except handshake.AbortedByUserException, e:
+ request.log_error('mod_pywebsocket: Aborted: %s' % e, apache.APLOG_INFO)
+ except Exception, e:
+ # DispatchException can also be thrown if something is wrong in
+ # pywebsocket code. It's caught here, then.
+
+ request.log_error('mod_pywebsocket: Exception occurred: %s\n%s' %
+ (e, util.get_stack_trace()),
+ apache.APLOG_ERR)
+ # Unknown exceptions before handshake mean Apache must handle its
+ # request with another handler.
+ if not handshake_is_done:
+ return apache.DECLINED
+ # Set assbackwards to suppress response header generation by Apache.
+ request.assbackwards = 1
+ return apache.DONE # Return DONE such that no other handlers are invoked.
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/http_header_util.py b/testing/mochitest/pywebsocket/mod_pywebsocket/http_header_util.py
new file mode 100644
index 000000000..b77465393
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/http_header_util.py
@@ -0,0 +1,263 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Utilities for parsing and formatting headers that follow the grammar defined
+in HTTP RFC http://www.ietf.org/rfc/rfc2616.txt.
+"""
+
+
+import urlparse
+
+
+_SEPARATORS = '()<>@,;:\\"/[]?={} \t'
+
+
+def _is_char(c):
+ """Returns true iff c is in CHAR as specified in HTTP RFC."""
+
+ return ord(c) <= 127
+
+
+def _is_ctl(c):
+ """Returns true iff c is in CTL as specified in HTTP RFC."""
+
+ return ord(c) <= 31 or ord(c) == 127
+
+
+class ParsingState(object):
+
+ def __init__(self, data):
+ self.data = data
+ self.head = 0
+
+
+def peek(state, pos=0):
+ """Peeks the character at pos from the head of data."""
+
+ if state.head + pos >= len(state.data):
+ return None
+
+ return state.data[state.head + pos]
+
+
+def consume(state, amount=1):
+ """Consumes specified amount of bytes from the head and returns the
+ consumed bytes. If there's not enough bytes to consume, returns None.
+ """
+
+ if state.head + amount > len(state.data):
+ return None
+
+ result = state.data[state.head:state.head + amount]
+ state.head = state.head + amount
+ return result
+
+
+def consume_string(state, expected):
+ """Given a parsing state and a expected string, consumes the string from
+ the head. Returns True if consumed successfully. Otherwise, returns
+ False.
+ """
+
+ pos = 0
+
+ for c in expected:
+ if c != peek(state, pos):
+ return False
+ pos += 1
+
+ consume(state, pos)
+ return True
+
+
+def consume_lws(state):
+ """Consumes a LWS from the head. Returns True if any LWS is consumed.
+ Otherwise, returns False.
+
+ LWS = [CRLF] 1*( SP | HT )
+ """
+
+ original_head = state.head
+
+ consume_string(state, '\r\n')
+
+ pos = 0
+
+ while True:
+ c = peek(state, pos)
+ if c == ' ' or c == '\t':
+ pos += 1
+ else:
+ if pos == 0:
+ state.head = original_head
+ return False
+ else:
+ consume(state, pos)
+ return True
+
+
+def consume_lwses(state):
+ """Consumes *LWS from the head."""
+
+ while consume_lws(state):
+ pass
+
+
+def consume_token(state):
+ """Consumes a token from the head. Returns the token or None if no token
+ was found.
+ """
+
+ pos = 0
+
+ while True:
+ c = peek(state, pos)
+ if c is None or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+ if pos == 0:
+ return None
+
+ return consume(state, pos)
+ else:
+ pos += 1
+
+
+def consume_token_or_quoted_string(state):
+ """Consumes a token or a quoted-string, and returns the token or unquoted
+ string. If no token or quoted-string was found, returns None.
+ """
+
+ original_head = state.head
+
+ if not consume_string(state, '"'):
+ return consume_token(state)
+
+ result = []
+
+ expect_quoted_pair = False
+
+ while True:
+ if not expect_quoted_pair and consume_lws(state):
+ result.append(' ')
+ continue
+
+ c = consume(state)
+ if c is None:
+ # quoted-string is not enclosed with double quotation
+ state.head = original_head
+ return None
+ elif expect_quoted_pair:
+ expect_quoted_pair = False
+ if _is_char(c):
+ result.append(c)
+ else:
+ # Non CHAR character found in quoted-pair
+ state.head = original_head
+ return None
+ elif c == '\\':
+ expect_quoted_pair = True
+ elif c == '"':
+ return ''.join(result)
+ elif _is_ctl(c):
+ # Invalid character %r found in qdtext
+ state.head = original_head
+ return None
+ else:
+ result.append(c)
+
+
+def quote_if_necessary(s):
+ """Quotes arbitrary string into quoted-string."""
+
+ quote = False
+ if s == '':
+ return '""'
+
+ result = []
+ for c in s:
+ if c == '"' or c in _SEPARATORS or _is_ctl(c) or not _is_char(c):
+ quote = True
+
+ if c == '"' or _is_ctl(c):
+ result.append('\\' + c)
+ else:
+ result.append(c)
+
+ if quote:
+ return '"' + ''.join(result) + '"'
+ else:
+ return ''.join(result)
+
+
+def parse_uri(uri):
+ """Parse absolute URI then return host, port and resource."""
+
+ parsed = urlparse.urlsplit(uri)
+ if parsed.scheme != 'wss' and parsed.scheme != 'ws':
+ # |uri| must be a relative URI.
+ # TODO(toyoshim): Should validate |uri|.
+ return None, None, uri
+
+ if parsed.hostname is None:
+ return None, None, None
+
+ port = None
+ try:
+ port = parsed.port
+ except ValueError, e:
+ # port property cause ValueError on invalid null port description like
+ # 'ws://host:/path'.
+ return None, None, None
+
+ if port is None:
+ if parsed.scheme == 'ws':
+ port = 80
+ else:
+ port = 443
+
+ path = parsed.path
+ if not path:
+ path += '/'
+ if parsed.query:
+ path += '?' + parsed.query
+ if parsed.fragment:
+ path += '#' + parsed.fragment
+
+ return parsed.hostname, port, path
+
+
+try:
+ urlparse.uses_netloc.index('ws')
+except ValueError, e:
+ # urlparse in Python2.5.1 doesn't have 'ws' and 'wss' entries.
+ urlparse.uses_netloc.append('ws')
+ urlparse.uses_netloc.append('wss')
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/memorizingfile.py b/testing/mochitest/pywebsocket/mod_pywebsocket/memorizingfile.py
new file mode 100644
index 000000000..4d4cd9585
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/memorizingfile.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+#
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Memorizing file.
+
+A memorizing file wraps a file and memorizes lines read by readline.
+"""
+
+
+import sys
+
+
+class MemorizingFile(object):
+ """MemorizingFile wraps a file and memorizes lines read by readline.
+
+ Note that data read by other methods are not memorized. This behavior
+ is good enough for memorizing lines SimpleHTTPServer reads before
+ the control reaches WebSocketRequestHandler.
+ """
+
+ def __init__(self, file_, max_memorized_lines=sys.maxint):
+ """Construct an instance.
+
+ Args:
+ file_: the file object to wrap.
+ max_memorized_lines: the maximum number of lines to memorize.
+ Only the first max_memorized_lines are memorized.
+ Default: sys.maxint.
+ """
+
+ self._file = file_
+ self._memorized_lines = []
+ self._max_memorized_lines = max_memorized_lines
+ self._buffered = False
+ self._buffered_line = None
+
+ def __getattribute__(self, name):
+ if name in ('_file', '_memorized_lines', '_max_memorized_lines',
+ '_buffered', '_buffered_line', 'readline',
+ 'get_memorized_lines'):
+ return object.__getattribute__(self, name)
+ return self._file.__getattribute__(name)
+
+ def readline(self, size=-1):
+ """Override file.readline and memorize the line read.
+
+ Note that even if size is specified and smaller than actual size,
+ the whole line will be read out from underlying file object by
+ subsequent readline calls.
+ """
+
+ if self._buffered:
+ line = self._buffered_line
+ self._buffered = False
+ else:
+ line = self._file.readline()
+ if line and len(self._memorized_lines) < self._max_memorized_lines:
+ self._memorized_lines.append(line)
+ if size >= 0 and size < len(line):
+ self._buffered = True
+ self._buffered_line = line[size:]
+ return line[:size]
+ return line
+
+ def get_memorized_lines(self):
+ """Get lines memorized so far."""
+ return self._memorized_lines
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/msgutil.py b/testing/mochitest/pywebsocket/mod_pywebsocket/msgutil.py
new file mode 100644
index 000000000..4c1a0114b
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/msgutil.py
@@ -0,0 +1,219 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Message related utilities.
+
+Note: request.connection.write/read are used in this module, even though
+mod_python document says that they should be used only in connection
+handlers. Unfortunately, we have no other options. For example,
+request.write/read are not suitable because they don't allow direct raw
+bytes writing/reading.
+"""
+
+
+import Queue
+import threading
+
+
+# Export Exception symbols from msgutil for backward compatibility
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+
+
+# An API for handler to send/receive WebSocket messages.
+def close_connection(request):
+ """Close connection.
+
+ Args:
+ request: mod_python request.
+ """
+ request.ws_stream.close_connection()
+
+
+def send_message(request, payload_data, end=True, binary=False):
+ """Send a message (or part of a message).
+
+ Args:
+ request: mod_python request.
+ payload_data: unicode text or str binary to send.
+ end: True to terminate a message.
+ False to send payload_data as part of a message that is to be
+ terminated by next or later send_message call with end=True.
+ binary: send payload_data as binary frame(s).
+ Raises:
+ BadOperationException: when server already terminated.
+ """
+ request.ws_stream.send_message(payload_data, end, binary)
+
+
+def receive_message(request):
+ """Receive a WebSocket frame and return its payload as a text in
+ unicode or a binary in str.
+
+ Args:
+ request: mod_python request.
+ Raises:
+ InvalidFrameException: when client send invalid frame.
+ UnsupportedFrameException: when client send unsupported frame e.g. some
+ of reserved bit is set but no extension can
+ recognize it.
+ InvalidUTF8Exception: when client send a text frame containing any
+ invalid UTF-8 string.
+ ConnectionTerminatedException: when the connection is closed
+ unexpectedly.
+ BadOperationException: when client already terminated.
+ """
+ return request.ws_stream.receive_message()
+
+
+def send_ping(request, body=''):
+ request.ws_stream.send_ping(body)
+
+
+class MessageReceiver(threading.Thread):
+ """This class receives messages from the client.
+
+ This class provides three ways to receive messages: blocking,
+ non-blocking, and via callback. Callback has the highest precedence.
+
+ Note: This class should not be used with the standalone server for wss
+ because pyOpenSSL used by the server raises a fatal error if the socket
+ is accessed from multiple threads.
+ """
+
+ def __init__(self, request, onmessage=None):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ onmessage: a function to be called when a message is received.
+ May be None. If not None, the function is called on
+ another thread. In that case, MessageReceiver.receive
+ and MessageReceiver.receive_nowait are useless
+ because they will never return any messages.
+ """
+
+ threading.Thread.__init__(self)
+ self._request = request
+ self._queue = Queue.Queue()
+ self._onmessage = onmessage
+ self._stop_requested = False
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ try:
+ while not self._stop_requested:
+ message = receive_message(self._request)
+ if self._onmessage:
+ self._onmessage(message)
+ else:
+ self._queue.put(message)
+ finally:
+ close_connection(self._request)
+
+ def receive(self):
+ """ Receive a message from the channel, blocking.
+
+ Returns:
+ message as a unicode string.
+ """
+ return self._queue.get()
+
+ def receive_nowait(self):
+ """ Receive a message from the channel, non-blocking.
+
+ Returns:
+ message as a unicode string if available. None otherwise.
+ """
+ try:
+ message = self._queue.get_nowait()
+ except Queue.Empty:
+ message = None
+ return message
+
+ def stop(self):
+ """Request to stop this instance.
+
+ The instance will be stopped after receiving the next message.
+ This method may not be very useful, but there is no clean way
+ in Python to forcefully stop a running thread.
+ """
+ self._stop_requested = True
+
+
+class MessageSender(threading.Thread):
+ """This class sends messages to the client.
+
+ This class provides both synchronous and asynchronous ways to send
+ messages.
+
+ Note: This class should not be used with the standalone server for wss
+ because pyOpenSSL used by the server raises a fatal error if the socket
+ is accessed from multiple threads.
+ """
+
+ def __init__(self, request):
+ """Construct an instance.
+
+ Args:
+ request: mod_python request.
+ """
+ threading.Thread.__init__(self)
+ self._request = request
+ self._queue = Queue.Queue()
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ while True:
+ message, condition = self._queue.get()
+ condition.acquire()
+ send_message(self._request, message)
+ condition.notify()
+ condition.release()
+
+ def send(self, message):
+ """Send a message, blocking."""
+
+ condition = threading.Condition()
+ condition.acquire()
+ self._queue.put((message, condition))
+ condition.wait()
+
+ def send_nowait(self, message):
+ """Send a message, non-blocking."""
+
+ self._queue.put((message, threading.Condition()))
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/mux.py b/testing/mochitest/pywebsocket/mod_pywebsocket/mux.py
new file mode 100644
index 000000000..76334685b
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/mux.py
@@ -0,0 +1,1889 @@
+# Copyright 2012, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file provides classes and helper functions for multiplexing extension.
+
+Specification:
+http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
+"""
+
+
+import collections
+import copy
+import email
+import email.parser
+import logging
+import math
+import struct
+import threading
+import traceback
+
+from mod_pywebsocket import common
+from mod_pywebsocket import handshake
+from mod_pywebsocket import util
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import parse_frame
+from mod_pywebsocket.handshake import hybi
+
+
+_CONTROL_CHANNEL_ID = 0
+_DEFAULT_CHANNEL_ID = 1
+
+_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
+_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
+_MUX_OPCODE_FLOW_CONTROL = 2
+_MUX_OPCODE_DROP_CHANNEL = 3
+_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
+
+_MAX_CHANNEL_ID = 2 ** 29 - 1
+
+_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
+_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
+
+_HANDSHAKE_ENCODING_IDENTITY = 0
+_HANDSHAKE_ENCODING_DELTA = 1
+
+# We need only these status code for now.
+_HTTP_BAD_RESPONSE_MESSAGES = {
+ common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
+}
+
+# DropChannel reason code
+# TODO(bashi): Define all reason code defined in -05 draft.
+_DROP_CODE_NORMAL_CLOSURE = 1000
+
+_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
+_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
+_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
+_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
+_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
+_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
+_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
+_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
+
+_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
+_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
+_DROP_CODE_ACKNOWLEDGED = 3008
+_DROP_CODE_BAD_FRAGMENTATION = 3009
+
+
+class MuxUnexpectedException(Exception):
+ """Exception in handling multiplexing extension."""
+ pass
+
+
+# Temporary
+class MuxNotImplementedException(Exception):
+ """Raised when a flow enters unimplemented code path."""
+ pass
+
+
+class LogicalConnectionClosedException(Exception):
+ """Raised when logical connection is gracefully closed."""
+ pass
+
+
+class PhysicalConnectionError(Exception):
+ """Raised when there is a physical connection error."""
+ def __init__(self, drop_code, message=''):
+ super(PhysicalConnectionError, self).__init__(
+ 'code=%d, message=%r' % (drop_code, message))
+ self.drop_code = drop_code
+ self.message = message
+
+
+class LogicalChannelError(Exception):
+ """Raised when there is a logical channel error."""
+ def __init__(self, channel_id, drop_code, message=''):
+ super(LogicalChannelError, self).__init__(
+ 'channel_id=%d, code=%d, message=%r' % (
+ channel_id, drop_code, message))
+ self.channel_id = channel_id
+ self.drop_code = drop_code
+ self.message = message
+
+
+def _encode_channel_id(channel_id):
+ if channel_id < 0:
+ raise ValueError('Channel id %d must not be negative' % channel_id)
+
+ if channel_id < 2 ** 7:
+ return chr(channel_id)
+ if channel_id < 2 ** 14:
+ return struct.pack('!H', 0x8000 + channel_id)
+ if channel_id < 2 ** 21:
+ first = chr(0xc0 + (channel_id >> 16))
+ return first + struct.pack('!H', channel_id & 0xffff)
+ if channel_id < 2 ** 29:
+ return struct.pack('!L', 0xe0000000 + channel_id)
+
+ raise ValueError('Channel id %d is too large' % channel_id)
+
+
+def _encode_number(number):
+ return create_length_header(number, False)
+
+
+def _create_add_channel_response(channel_id, encoded_handshake,
+ encoding=0, rejected=False):
+ if encoding != 0 and encoding != 1:
+ raise ValueError('Invalid encoding %d' % encoding)
+
+ first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
+ (rejected << 4) | encoding)
+ block = (chr(first_byte) +
+ _encode_channel_id(channel_id) +
+ _encode_number(len(encoded_handshake)) +
+ encoded_handshake)
+ return block
+
+
+def _create_drop_channel(channel_id, code=None, message=''):
+ if len(message) > 0 and code is None:
+ raise ValueError('Code must be specified if message is specified')
+
+ first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
+ block = chr(first_byte) + _encode_channel_id(channel_id)
+ if code is None:
+ block += _encode_number(0) # Reason size
+ else:
+ reason = struct.pack('!H', code) + message
+ reason_size = _encode_number(len(reason))
+ block += reason_size + reason
+
+ return block
+
+
+def _create_flow_control(channel_id, replenished_quota):
+ first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
+ block = (chr(first_byte) +
+ _encode_channel_id(channel_id) +
+ _encode_number(replenished_quota))
+ return block
+
+
+def _create_new_channel_slot(slots, send_quota):
+ if slots < 0 or send_quota < 0:
+ raise ValueError('slots and send_quota must be non-negative.')
+ first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
+ block = (chr(first_byte) +
+ _encode_number(slots) +
+ _encode_number(send_quota))
+ return block
+
+
+def _create_fallback_new_channel_slot():
+ first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
+ block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
+ return block
+
+
+def _parse_request_text(request_text):
+ request_line, header_lines = request_text.split('\r\n', 1)
+
+ words = request_line.split(' ')
+ if len(words) != 3:
+ raise ValueError('Bad Request-Line syntax %r' % request_line)
+ [command, path, version] = words
+ if version != 'HTTP/1.1':
+ raise ValueError('Bad request version %r' % version)
+
+ # email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
+ # RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
+ # RFC 822.
+ headers = email.parser.Parser().parsestr(header_lines)
+ return command, path, version, headers
+
+
+class _ControlBlock(object):
+ """A structure that holds parsing result of multiplexing control block.
+ Control block specific attributes will be added by _MuxFramePayloadParser.
+ (e.g. encoded_handshake will be added for AddChannelRequest and
+ AddChannelResponse)
+ """
+
+ def __init__(self, opcode):
+ self.opcode = opcode
+
+
+class _MuxFramePayloadParser(object):
+ """A class that parses multiplexed frame payload."""
+
+ def __init__(self, payload):
+ self._data = payload
+ self._read_position = 0
+ self._logger = util.get_class_logger(self)
+
+ def read_channel_id(self):
+ """Reads channel id.
+
+ Raises:
+ ValueError: when the payload doesn't contain
+ valid channel id.
+ """
+
+ remaining_length = len(self._data) - self._read_position
+ pos = self._read_position
+ if remaining_length == 0:
+ raise ValueError('Invalid channel id format')
+
+ channel_id = ord(self._data[pos])
+ channel_id_length = 1
+ if channel_id & 0xe0 == 0xe0:
+ if remaining_length < 4:
+ raise ValueError('Invalid channel id format')
+ channel_id = struct.unpack('!L',
+ self._data[pos:pos+4])[0] & 0x1fffffff
+ channel_id_length = 4
+ elif channel_id & 0xc0 == 0xc0:
+ if remaining_length < 3:
+ raise ValueError('Invalid channel id format')
+ channel_id = (((channel_id & 0x1f) << 16) +
+ struct.unpack('!H', self._data[pos+1:pos+3])[0])
+ channel_id_length = 3
+ elif channel_id & 0x80 == 0x80:
+ if remaining_length < 2:
+ raise ValueError('Invalid channel id format')
+ channel_id = struct.unpack('!H',
+ self._data[pos:pos+2])[0] & 0x3fff
+ channel_id_length = 2
+ self._read_position += channel_id_length
+
+ return channel_id
+
+ def read_inner_frame(self):
+ """Reads an inner frame.
+
+ Raises:
+ PhysicalConnectionError: when the inner frame is invalid.
+ """
+
+ if len(self._data) == self._read_position:
+ raise PhysicalConnectionError(
+ _DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
+
+ bits = ord(self._data[self._read_position])
+ self._read_position += 1
+ fin = (bits & 0x80) == 0x80
+ rsv1 = (bits & 0x40) == 0x40
+ rsv2 = (bits & 0x20) == 0x20
+ rsv3 = (bits & 0x10) == 0x10
+ opcode = bits & 0xf
+ payload = self.remaining_data()
+ # Consume rest of the message which is payload data of the original
+ # frame.
+ self._read_position = len(self._data)
+ return fin, rsv1, rsv2, rsv3, opcode, payload
+
+ def _read_number(self):
+ if self._read_position + 1 > len(self._data):
+ raise ValueError(
+ 'Cannot read the first byte of number field')
+
+ number = ord(self._data[self._read_position])
+ if number & 0x80 == 0x80:
+ raise ValueError(
+ 'The most significant bit of the first byte of number should '
+ 'be unset')
+ self._read_position += 1
+ pos = self._read_position
+ if number == 127:
+ if pos + 8 > len(self._data):
+ raise ValueError('Invalid number field')
+ self._read_position += 8
+ number = struct.unpack('!Q', self._data[pos:pos+8])[0]
+ if number > 0x7FFFFFFFFFFFFFFF:
+ raise ValueError('Encoded number(%d) >= 2^63' % number)
+ if number <= 0xFFFF:
+ raise ValueError(
+ '%d should not be encoded by 9 bytes encoding' % number)
+ return number
+ if number == 126:
+ if pos + 2 > len(self._data):
+ raise ValueError('Invalid number field')
+ self._read_position += 2
+ number = struct.unpack('!H', self._data[pos:pos+2])[0]
+ if number <= 125:
+ raise ValueError(
+ '%d should not be encoded by 3 bytes encoding' % number)
+ return number
+
+ def _read_size_and_contents(self):
+ """Reads data that consists of followings:
+ - the size of the contents encoded the same way as payload length
+ of the WebSocket Protocol with 1 bit padding at the head.
+ - the contents.
+ """
+
+ try:
+ size = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
+ pos = self._read_position
+ if pos + size > len(self._data):
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Cannot read %d bytes data' % size)
+
+ self._read_position += size
+ return self._data[pos:pos+size]
+
+ def _read_add_channel_request(self, first_byte, control_block):
+ reserved = (first_byte >> 2) & 0x7
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ # Invalid encoding will be handled by MuxHandler.
+ encoding = first_byte & 0x3
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ control_block.encoding = encoding
+ encoded_handshake = self._read_size_and_contents()
+ control_block.encoded_handshake = encoded_handshake
+ return control_block
+
+ def _read_add_channel_response(self, first_byte, control_block):
+ reserved = (first_byte >> 2) & 0x3
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ control_block.accepted = (first_byte >> 4) & 1
+ control_block.encoding = first_byte & 0x3
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ control_block.encoded_handshake = self._read_size_and_contents()
+ return control_block
+
+ def _read_flow_control(self, first_byte, control_block):
+ reserved = first_byte & 0x1f
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ try:
+ control_block.channel_id = self.read_channel_id()
+ control_block.send_quota = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
+
+ return control_block
+
+ def _read_drop_channel(self, first_byte, control_block):
+ reserved = first_byte & 0x1f
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+
+ try:
+ control_block.channel_id = self.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
+ reason = self._read_size_and_contents()
+ if len(reason) == 0:
+ control_block.drop_code = None
+ control_block.drop_message = ''
+ elif len(reason) >= 2:
+ control_block.drop_code = struct.unpack('!H', reason[:2])[0]
+ control_block.drop_message = reason[2:]
+ else:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received DropChannel that conains only 1-byte reason')
+ return control_block
+
+ def _read_new_channel_slot(self, first_byte, control_block):
+ reserved = first_byte & 0x1e
+ if reserved != 0:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Reserved bits must be unset')
+ control_block.fallback = first_byte & 1
+ try:
+ control_block.slots = self._read_number()
+ control_block.send_quota = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
+ return control_block
+
+ def read_control_blocks(self):
+ """Reads control block(s).
+
+ Raises:
+ PhysicalConnectionError: when the payload contains invalid control
+ block(s).
+ StopIteration: when no control blocks left.
+ """
+
+ while self._read_position < len(self._data):
+ first_byte = ord(self._data[self._read_position])
+ self._read_position += 1
+ opcode = (first_byte >> 5) & 0x7
+ control_block = _ControlBlock(opcode=opcode)
+ if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
+ yield self._read_add_channel_request(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ yield self._read_add_channel_response(
+ first_byte, control_block)
+ elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+ yield self._read_flow_control(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+ yield self._read_drop_channel(first_byte, control_block)
+ elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+ yield self._read_new_channel_slot(first_byte, control_block)
+ else:
+ raise PhysicalConnectionError(
+ _DROP_CODE_UNKNOWN_MUX_OPCODE,
+ 'Invalid opcode %d' % opcode)
+
+ assert self._read_position == len(self._data)
+ raise StopIteration
+
+ def remaining_data(self):
+ """Returns remaining data."""
+
+ return self._data[self._read_position:]
+
+
+class _LogicalRequest(object):
+ """Mimics mod_python request."""
+
+ def __init__(self, channel_id, command, path, protocol, headers,
+ connection):
+ """Constructs an instance.
+
+ Args:
+ channel_id: the channel id of the logical channel.
+ command: HTTP request command.
+ path: HTTP request path.
+ headers: HTTP headers.
+ connection: _LogicalConnection instance.
+ """
+
+ self.channel_id = channel_id
+ self.method = command
+ self.uri = path
+ self.protocol = protocol
+ self.headers_in = headers
+ self.connection = connection
+ self.server_terminated = False
+ self.client_terminated = False
+
+ def is_https(self):
+ """Mimics request.is_https(). Returns False because this method is
+ used only by old protocols (hixie and hybi00).
+ """
+
+ return False
+
+
+class _LogicalConnection(object):
+ """Mimics mod_python mp_conn."""
+
+ # For details, see the comment of set_read_state().
+ STATE_ACTIVE = 1
+ STATE_GRACEFULLY_CLOSED = 2
+ STATE_TERMINATED = 3
+
+ def __init__(self, mux_handler, channel_id):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ channel_id: channel id of this connection.
+ """
+
+ self._mux_handler = mux_handler
+ self._channel_id = channel_id
+ self._incoming_data = ''
+
+ # - Protects _waiting_write_completion
+ # - Signals the thread waiting for completion of write by mux handler
+ self._write_condition = threading.Condition()
+ self._waiting_write_completion = False
+
+ self._read_condition = threading.Condition()
+ self._read_state = self.STATE_ACTIVE
+
+ def get_local_addr(self):
+ """Getter to mimic mp_conn.local_addr."""
+
+ return self._mux_handler.physical_connection.get_local_addr()
+ local_addr = property(get_local_addr)
+
+ def get_remote_addr(self):
+ """Getter to mimic mp_conn.remote_addr."""
+
+ return self._mux_handler.physical_connection.get_remote_addr()
+ remote_addr = property(get_remote_addr)
+
+ def get_memorized_lines(self):
+ """Gets memorized lines. Not supported."""
+
+ raise MuxUnexpectedException('_LogicalConnection does not support '
+ 'get_memorized_lines')
+
+ def write(self, data):
+ """Writes data. mux_handler sends data asynchronously. The caller will
+ be suspended until write done.
+
+ Args:
+ data: data to be written.
+
+ Raises:
+ MuxUnexpectedException: when called before finishing the previous
+ write.
+ """
+
+ try:
+ self._write_condition.acquire()
+ if self._waiting_write_completion:
+ raise MuxUnexpectedException(
+ 'Logical connection %d is already waiting the completion '
+ 'of write' % self._channel_id)
+
+ self._waiting_write_completion = True
+ self._mux_handler.send_data(self._channel_id, data)
+ self._write_condition.wait()
+ # TODO(tyoshino): Raise an exception if woke up by on_writer_done.
+ finally:
+ self._write_condition.release()
+
+ def write_control_data(self, data):
+ """Writes data via the control channel. Don't wait finishing write
+ because this method can be called by mux dispatcher.
+
+ Args:
+ data: data to be written.
+ """
+
+ self._mux_handler.send_control_data(data)
+
+ def on_write_data_done(self):
+ """Called when sending data is completed."""
+
+ try:
+ self._write_condition.acquire()
+ if not self._waiting_write_completion:
+ raise MuxUnexpectedException(
+ 'Invalid call of on_write_data_done for logical '
+ 'connection %d' % self._channel_id)
+ self._waiting_write_completion = False
+ self._write_condition.notify()
+ finally:
+ self._write_condition.release()
+
+ def on_writer_done(self):
+ """Called by the mux handler when the writer thread has finished."""
+
+ try:
+ self._write_condition.acquire()
+ self._waiting_write_completion = False
+ self._write_condition.notify()
+ finally:
+ self._write_condition.release()
+
+
+ def append_frame_data(self, frame_data):
+ """Appends incoming frame data. Called when mux_handler dispatches
+ frame data to the corresponding application.
+
+ Args:
+ frame_data: incoming frame data.
+ """
+
+ self._read_condition.acquire()
+ self._incoming_data += frame_data
+ self._read_condition.notify()
+ self._read_condition.release()
+
+ def read(self, length):
+ """Reads data. Blocks until enough data has arrived via physical
+ connection.
+
+ Args:
+ length: length of data to be read.
+ Raises:
+ LogicalConnectionClosedException: when closing handshake for this
+ logical channel has been received.
+ ConnectionTerminatedException: when the physical connection has
+ closed, or an error is caused on the reader thread.
+ """
+
+ self._read_condition.acquire()
+ while (self._read_state == self.STATE_ACTIVE and
+ len(self._incoming_data) < length):
+ self._read_condition.wait()
+
+ try:
+ if self._read_state == self.STATE_GRACEFULLY_CLOSED:
+ raise LogicalConnectionClosedException(
+ 'Logical channel %d has closed.' % self._channel_id)
+ elif self._read_state == self.STATE_TERMINATED:
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. Logical channel (%d) closed' %
+ (length, self._channel_id))
+
+ value = self._incoming_data[:length]
+ self._incoming_data = self._incoming_data[length:]
+ finally:
+ self._read_condition.release()
+
+ return value
+
+ def set_read_state(self, new_state):
+ """Sets the state of this connection. Called when an event for this
+ connection has occurred.
+
+ Args:
+ new_state: state to be set. new_state must be one of followings:
+ - STATE_GRACEFULLY_CLOSED: when closing handshake for this
+ connection has been received.
+ - STATE_TERMINATED: when the physical connection has closed or
+ DropChannel of this connection has received.
+ """
+
+ self._read_condition.acquire()
+ self._read_state = new_state
+ self._read_condition.notify()
+ self._read_condition.release()
+
+
+class _InnerMessage(object):
+ """Holds the result of _InnerMessageBuilder.build().
+ """
+
+ def __init__(self, opcode, payload):
+ self.opcode = opcode
+ self.payload = payload
+
+
+class _InnerMessageBuilder(object):
+ """A class that holds the context of inner message fragmentation and
+ builds a message from fragmented inner frame(s).
+ """
+
+ def __init__(self):
+ self._control_opcode = None
+ self._pending_control_fragments = []
+ self._message_opcode = None
+ self._pending_message_fragments = []
+ self._frame_handler = self._handle_first
+
+ def _handle_first(self, frame):
+ if frame.opcode == common.OPCODE_CONTINUATION:
+ raise InvalidFrameException('Sending invalid continuation opcode')
+
+ if common.is_control_opcode(frame.opcode):
+ return self._process_first_fragmented_control(frame)
+ else:
+ return self._process_first_fragmented_message(frame)
+
+ def _process_first_fragmented_control(self, frame):
+ self._control_opcode = frame.opcode
+ self._pending_control_fragments.append(frame.payload)
+ if not frame.fin:
+ self._frame_handler = self._handle_fragmented_control
+ return None
+ return self._reassemble_fragmented_control()
+
+ def _process_first_fragmented_message(self, frame):
+ self._message_opcode = frame.opcode
+ self._pending_message_fragments.append(frame.payload)
+ if not frame.fin:
+ self._frame_handler = self._handle_fragmented_message
+ return None
+ return self._reassemble_fragmented_message()
+
+ def _handle_fragmented_control(self, frame):
+ if frame.opcode != common.OPCODE_CONTINUATION:
+ raise InvalidFrameException(
+ 'Sending invalid opcode %d while sending fragmented control '
+ 'message' % frame.opcode)
+ self._pending_control_fragments.append(frame.payload)
+ if not frame.fin:
+ return None
+ return self._reassemble_fragmented_control()
+
+ def _reassemble_fragmented_control(self):
+ opcode = self._control_opcode
+ payload = ''.join(self._pending_control_fragments)
+ self._control_opcode = None
+ self._pending_control_fragments = []
+ if self._message_opcode is not None:
+ self._frame_handler = self._handle_fragmented_message
+ else:
+ self._frame_handler = self._handle_first
+ return _InnerMessage(opcode, payload)
+
+ def _handle_fragmented_message(self, frame):
+ # Sender can interleave a control message while sending fragmented
+ # messages.
+ if common.is_control_opcode(frame.opcode):
+ if self._control_opcode is not None:
+ raise MuxUnexpectedException(
+ 'Should not reach here(Bug in builder)')
+ return self._process_first_fragmented_control(frame)
+
+ if frame.opcode != common.OPCODE_CONTINUATION:
+ raise InvalidFrameException(
+ 'Sending invalid opcode %d while sending fragmented message' %
+ frame.opcode)
+ self._pending_message_fragments.append(frame.payload)
+ if not frame.fin:
+ return None
+ return self._reassemble_fragmented_message()
+
+ def _reassemble_fragmented_message(self):
+ opcode = self._message_opcode
+ payload = ''.join(self._pending_message_fragments)
+ self._message_opcode = None
+ self._pending_message_fragments = []
+ self._frame_handler = self._handle_first
+ return _InnerMessage(opcode, payload)
+
+ def build(self, frame):
+ """Build an inner message. Returns an _InnerMessage instance when
+ the given frame is the last fragmented frame. Returns None otherwise.
+
+ Args:
+ frame: an inner frame.
+ Raises:
+ InvalidFrameException: when received invalid opcode. (e.g.
+ receiving non continuation data opcode but the fin flag of
+ the previous inner frame was not set.)
+ """
+
+ return self._frame_handler(frame)
+
+
+class _LogicalStream(Stream):
+ """Mimics the Stream class. This class interprets multiplexed WebSocket
+ frames.
+ """
+
+ def __init__(self, request, stream_options, send_quota, receive_quota):
+ """Constructs an instance.
+
+ Args:
+ request: _LogicalRequest instance.
+ stream_options: StreamOptions instance.
+ send_quota: Initial send quota.
+ receive_quota: Initial receive quota.
+ """
+
+ # Physical stream is responsible for masking.
+ stream_options.unmask_receive = False
+ Stream.__init__(self, request, stream_options)
+
+ self._send_closed = False
+ self._send_quota = send_quota
+ # - Protects _send_closed and _send_quota
+ # - Signals the thread waiting for send quota replenished
+ self._send_condition = threading.Condition()
+
+ # The opcode of the first frame in messages.
+ self._message_opcode = common.OPCODE_TEXT
+ # True when the last message was fragmented.
+ self._last_message_was_fragmented = False
+
+ self._receive_quota = receive_quota
+ self._write_inner_frame_semaphore = threading.Semaphore()
+
+ self._inner_message_builder = _InnerMessageBuilder()
+
+ def _create_inner_frame(self, opcode, payload, end=True):
+ frame = Frame(fin=end, opcode=opcode, payload=payload)
+ for frame_filter in self._options.outgoing_frame_filters:
+ frame_filter.filter(frame)
+
+ if len(payload) != len(frame.payload):
+ raise MuxUnexpectedException(
+ 'Mux extension must not be used after extensions which change '
+ ' frame boundary')
+
+ first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
+ (frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
+ return chr(first_byte) + frame.payload
+
+ def _write_inner_frame(self, opcode, payload, end=True):
+ payload_length = len(payload)
+ write_position = 0
+
+ try:
+ # An inner frame will be fragmented if there is no enough send
+ # quota. This semaphore ensures that fragmented inner frames are
+ # sent in order on the logical channel.
+ # Note that frames that come from other logical channels or
+ # multiplexing control blocks can be inserted between fragmented
+ # inner frames on the physical channel.
+ self._write_inner_frame_semaphore.acquire()
+
+ # Consume an octet quota when this is the first fragmented frame.
+ if opcode != common.OPCODE_CONTINUATION:
+ try:
+ self._send_condition.acquire()
+ while (not self._send_closed) and self._send_quota == 0:
+ self._send_condition.wait()
+
+ if self._send_closed:
+ raise BadOperationException(
+ 'Logical connection %d is closed' %
+ self._request.channel_id)
+
+ self._send_quota -= 1
+ finally:
+ self._send_condition.release()
+
+ while write_position < payload_length:
+ try:
+ self._send_condition.acquire()
+ while (not self._send_closed) and self._send_quota == 0:
+ self._logger.debug(
+ 'No quota. Waiting FlowControl message for %d.' %
+ self._request.channel_id)
+ self._send_condition.wait()
+
+ if self._send_closed:
+ raise BadOperationException(
+ 'Logical connection %d is closed' %
+ self.request._channel_id)
+
+ remaining = payload_length - write_position
+ write_length = min(self._send_quota, remaining)
+ inner_frame_end = (
+ end and
+ (write_position + write_length == payload_length))
+
+ inner_frame = self._create_inner_frame(
+ opcode,
+ payload[write_position:write_position+write_length],
+ inner_frame_end)
+ self._send_quota -= write_length
+ self._logger.debug('Consumed quota=%d, remaining=%d' %
+ (write_length, self._send_quota))
+ finally:
+ self._send_condition.release()
+
+ # Writing data will block the worker so we need to release
+ # _send_condition before writing.
+ self._logger.debug('Sending inner frame: %r' % inner_frame)
+ self._request.connection.write(inner_frame)
+ write_position += write_length
+
+ opcode = common.OPCODE_CONTINUATION
+
+ except ValueError, e:
+ raise BadOperationException(e)
+ finally:
+ self._write_inner_frame_semaphore.release()
+
+ def replenish_send_quota(self, send_quota):
+ """Replenish send quota."""
+
+ try:
+ self._send_condition.acquire()
+ if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
+ self._send_quota = 0
+ raise LogicalChannelError(
+ self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
+ self._send_quota += send_quota
+ self._logger.debug('Replenished send quota for channel id %d: %d' %
+ (self._request.channel_id, self._send_quota))
+ finally:
+ self._send_condition.notify()
+ self._send_condition.release()
+
+ def consume_receive_quota(self, amount):
+ """Consumes receive quota. Returns False on failure."""
+
+ if self._receive_quota < amount:
+ self._logger.debug('Violate quota on channel id %d: %d < %d' %
+ (self._request.channel_id,
+ self._receive_quota, amount))
+ return False
+ self._receive_quota -= amount
+ return True
+
+ def send_message(self, message, end=True, binary=False):
+ """Override Stream.send_message."""
+
+ if self._request.server_terminated:
+ raise BadOperationException(
+ 'Requested send_message after sending out a closing handshake')
+
+ if binary and isinstance(message, unicode):
+ raise BadOperationException(
+ 'Message for binary frame must be instance of str')
+
+ if binary:
+ opcode = common.OPCODE_BINARY
+ else:
+ opcode = common.OPCODE_TEXT
+ message = message.encode('utf-8')
+
+ for message_filter in self._options.outgoing_message_filters:
+ message = message_filter.filter(message, end, binary)
+
+ if self._last_message_was_fragmented:
+ if opcode != self._message_opcode:
+ raise BadOperationException('Message types are different in '
+ 'frames for the same message')
+ opcode = common.OPCODE_CONTINUATION
+ else:
+ self._message_opcode = opcode
+
+ self._write_inner_frame(opcode, message, end)
+ self._last_message_was_fragmented = not end
+
+ def _receive_frame(self):
+ """Overrides Stream._receive_frame.
+
+ In addition to call Stream._receive_frame, this method adds the amount
+ of payload to receiving quota and sends FlowControl to the client.
+ We need to do it here because Stream.receive_message() handles
+ control frames internally.
+ """
+
+ opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
+ amount = len(payload)
+ # Replenish extra one octet when receiving the first fragmented frame.
+ if opcode != common.OPCODE_CONTINUATION:
+ amount += 1
+ self._receive_quota += amount
+ frame_data = _create_flow_control(self._request.channel_id,
+ amount)
+ self._logger.debug('Sending flow control for %d, replenished=%d' %
+ (self._request.channel_id, amount))
+ self._request.connection.write_control_data(frame_data)
+ return opcode, payload, fin, rsv1, rsv2, rsv3
+
+ def _get_message_from_frame(self, frame):
+ """Overrides Stream._get_message_from_frame.
+ """
+
+ try:
+ inner_message = self._inner_message_builder.build(frame)
+ except InvalidFrameException:
+ raise LogicalChannelError(
+ self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
+
+ if inner_message is None:
+ return None
+ self._original_opcode = inner_message.opcode
+ return inner_message.payload
+
+ def receive_message(self):
+ """Overrides Stream.receive_message."""
+
+ # Just call Stream.receive_message(), but catch
+ # LogicalConnectionClosedException, which is raised when the logical
+ # connection has closed gracefully.
+ try:
+ return Stream.receive_message(self)
+ except LogicalConnectionClosedException, e:
+ self._logger.debug('%s', e)
+ return None
+
+ def _send_closing_handshake(self, code, reason):
+ """Overrides Stream._send_closing_handshake."""
+
+ body = create_closing_handshake_body(code, reason)
+ self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
+ (self._request.channel_id, code, reason))
+ self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
+
+ self._request.server_terminated = True
+
+ def send_ping(self, body=''):
+ """Overrides Stream.send_ping"""
+
+ self._logger.debug('Sending ping on logical channel %d: %r' %
+ (self._request.channel_id, body))
+ self._write_inner_frame(common.OPCODE_PING, body, end=True)
+
+ self._ping_queue.append(body)
+
+ def _send_pong(self, body):
+ """Overrides Stream._send_pong"""
+
+ self._logger.debug('Sending pong on logical channel %d: %r' %
+ (self._request.channel_id, body))
+ self._write_inner_frame(common.OPCODE_PONG, body, end=True)
+
+ def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
+ """Overrides Stream.close_connection."""
+
+ # TODO(bashi): Implement
+ self._logger.debug('Closing logical connection %d' %
+ self._request.channel_id)
+ self._request.server_terminated = True
+
+ def stop_sending(self):
+ """Stops accepting new send operation (_write_inner_frame)."""
+
+ self._send_condition.acquire()
+ self._send_closed = True
+ self._send_condition.notify()
+ self._send_condition.release()
+
+
+class _OutgoingData(object):
+ """A structure that holds data to be sent via physical connection and
+ origin of the data.
+ """
+
+ def __init__(self, channel_id, data):
+ self.channel_id = channel_id
+ self.data = data
+
+
+class _PhysicalConnectionWriter(threading.Thread):
+ """A thread that is responsible for writing data to physical connection.
+
+ TODO(bashi): Make sure there is no thread-safety problem when the reader
+ thread reads data from the same socket at a time.
+ """
+
+ def __init__(self, mux_handler):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self.setDaemon(True)
+
+ # When set, make this thread stop accepting new data, flush pending
+ # data and exit.
+ self._stop_requested = False
+ # The close code of the physical connection.
+ self._close_code = common.STATUS_NORMAL_CLOSURE
+ # Deque for passing write data. It's protected by _deque_condition
+ # until _stop_requested is set.
+ self._deque = collections.deque()
+ # - Protects _deque, _stop_requested and _close_code
+ # - Signals threads waiting for them to be available
+ self._deque_condition = threading.Condition()
+
+ def put_outgoing_data(self, data):
+ """Puts outgoing data.
+
+ Args:
+ data: _OutgoingData instance.
+
+ Raises:
+ BadOperationException: when the thread has been requested to
+ terminate.
+ """
+
+ try:
+ self._deque_condition.acquire()
+ if self._stop_requested:
+ raise BadOperationException('Cannot write data anymore')
+
+ self._deque.append(data)
+ self._deque_condition.notify()
+ finally:
+ self._deque_condition.release()
+
+ def _write_data(self, outgoing_data):
+ message = (_encode_channel_id(outgoing_data.channel_id) +
+ outgoing_data.data)
+ try:
+ self._mux_handler.physical_stream.send_message(
+ message=message, end=True, binary=True)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to send message to %r: ' %
+ (self._mux_handler.physical_connection.remote_addr,), e)
+ raise
+
+ # TODO(bashi): It would be better to block the thread that sends
+ # control data as well.
+ if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
+ self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
+
+ def run(self):
+ try:
+ self._deque_condition.acquire()
+ while not self._stop_requested:
+ if len(self._deque) == 0:
+ self._deque_condition.wait()
+ continue
+
+ outgoing_data = self._deque.popleft()
+
+ self._deque_condition.release()
+ self._write_data(outgoing_data)
+ self._deque_condition.acquire()
+
+ # Flush deque.
+ #
+ # At this point, self._deque_condition is always acquired.
+ try:
+ while len(self._deque) > 0:
+ outgoing_data = self._deque.popleft()
+ self._write_data(outgoing_data)
+ finally:
+ self._deque_condition.release()
+
+ # Close physical connection.
+ try:
+ # Don't wait the response here. The response will be read
+ # by the reader thread.
+ self._mux_handler.physical_stream.close_connection(
+ self._close_code, wait_response=False)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to close the physical connection: %r' % e)
+ raise
+ finally:
+ self._mux_handler.notify_writer_done()
+
+ def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
+ """Stops the writer thread."""
+
+ self._deque_condition.acquire()
+ self._stop_requested = True
+ self._close_code = close_code
+ self._deque_condition.notify()
+ self._deque_condition.release()
+
+
+class _PhysicalConnectionReader(threading.Thread):
+ """A thread that is responsible for reading data from physical connection.
+ """
+
+ def __init__(self, mux_handler):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self.setDaemon(True)
+
+ def run(self):
+ while True:
+ try:
+ physical_stream = self._mux_handler.physical_stream
+ message = physical_stream.receive_message()
+ if message is None:
+ break
+ # Below happens only when a data message is received.
+ opcode = physical_stream.get_last_received_opcode()
+ if opcode != common.OPCODE_BINARY:
+ self._mux_handler.fail_physical_connection(
+ _DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
+ 'Received a text message on physical connection')
+ break
+
+ except ConnectionTerminatedException, e:
+ self._logger.debug('%s', e)
+ break
+
+ try:
+ self._mux_handler.dispatch_message(message)
+ except PhysicalConnectionError, e:
+ self._mux_handler.fail_physical_connection(
+ e.drop_code, e.message)
+ break
+ except LogicalChannelError, e:
+ self._mux_handler.fail_logical_channel(
+ e.channel_id, e.drop_code, e.message)
+ except Exception, e:
+ self._logger.debug(traceback.format_exc())
+ break
+
+ self._mux_handler.notify_reader_done()
+
+
+class _Worker(threading.Thread):
+ """A thread that is responsible for running the corresponding application
+ handler.
+ """
+
+ def __init__(self, mux_handler, request):
+ """Constructs an instance.
+
+ Args:
+ mux_handler: _MuxHandler instance.
+ request: _LogicalRequest instance.
+ """
+
+ threading.Thread.__init__(self)
+ self._logger = util.get_class_logger(self)
+ self._mux_handler = mux_handler
+ self._request = request
+ self.setDaemon(True)
+
+ def run(self):
+ self._logger.debug('Logical channel worker started. (id=%d)' %
+ self._request.channel_id)
+ try:
+ # Non-critical exceptions will be handled by dispatcher.
+ self._mux_handler.dispatcher.transfer_data(self._request)
+ except LogicalChannelError, e:
+ self._mux_handler.fail_logical_channel(
+ e.channel_id, e.drop_code, e.message)
+ finally:
+ self._mux_handler.notify_worker_done(self._request.channel_id)
+
+
+class _MuxHandshaker(hybi.Handshaker):
+ """Opening handshake processor for multiplexing."""
+
+ _DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
+
+ def __init__(self, request, dispatcher, send_quota, receive_quota):
+ """Constructs an instance.
+ Args:
+ request: _LogicalRequest instance.
+ dispatcher: Dispatcher instance (dispatch.Dispatcher).
+ send_quota: Initial send quota.
+ receive_quota: Initial receive quota.
+ """
+
+ hybi.Handshaker.__init__(self, request, dispatcher)
+ self._send_quota = send_quota
+ self._receive_quota = receive_quota
+
+ # Append headers which should not be included in handshake field of
+ # AddChannelRequest.
+ # TODO(bashi): Make sure whether we should raise exception when
+ # these headers are included already.
+ request.headers_in[common.UPGRADE_HEADER] = (
+ common.WEBSOCKET_UPGRADE_TYPE)
+ request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
+ str(common.VERSION_HYBI_LATEST))
+ request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
+ self._DUMMY_WEBSOCKET_KEY)
+
+ def _create_stream(self, stream_options):
+ """Override hybi.Handshaker._create_stream."""
+
+ self._logger.debug('Creating logical stream for %d' %
+ self._request.channel_id)
+ return _LogicalStream(
+ self._request, stream_options, self._send_quota,
+ self._receive_quota)
+
+ def _create_handshake_response(self, accept):
+ """Override hybi._create_handshake_response."""
+
+ response = []
+
+ response.append('HTTP/1.1 101 Switching Protocols\r\n')
+
+ # Upgrade and Sec-WebSocket-Accept should be excluded.
+ response.append('%s: %s\r\n' % (
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
+ if self._request.ws_protocol is not None:
+ response.append('%s: %s\r\n' % (
+ common.SEC_WEBSOCKET_PROTOCOL_HEADER,
+ self._request.ws_protocol))
+ if (self._request.ws_extensions is not None and
+ len(self._request.ws_extensions) != 0):
+ response.append('%s: %s\r\n' % (
+ common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
+ common.format_extensions(self._request.ws_extensions)))
+ response.append('\r\n')
+
+ return ''.join(response)
+
+ def _send_handshake(self, accept):
+ """Override hybi.Handshaker._send_handshake."""
+
+ # Don't send handshake response for the default channel
+ if self._request.channel_id == _DEFAULT_CHANNEL_ID:
+ return
+
+ handshake_response = self._create_handshake_response(accept)
+ frame_data = _create_add_channel_response(
+ self._request.channel_id,
+ handshake_response)
+ self._logger.debug('Sending handshake response for %d: %r' %
+ (self._request.channel_id, frame_data))
+ self._request.connection.write_control_data(frame_data)
+
+
+class _LogicalChannelData(object):
+ """A structure that holds information about logical channel.
+ """
+
+ def __init__(self, request, worker):
+ self.request = request
+ self.worker = worker
+ self.drop_code = _DROP_CODE_NORMAL_CLOSURE
+ self.drop_message = ''
+
+
+class _HandshakeDeltaBase(object):
+ """A class that holds information for delta-encoded handshake."""
+
+ def __init__(self, headers):
+ self._headers = headers
+
+ def create_headers(self, delta=None):
+ """Creates request headers for an AddChannelRequest that has
+ delta-encoded handshake.
+
+ Args:
+ delta: headers should be overridden.
+ """
+
+ headers = copy.copy(self._headers)
+ if delta:
+ for key, value in delta.items():
+ # The spec requires that a header with an empty value is
+ # removed from the delta base.
+ if len(value) == 0 and headers.has_key(key):
+ del headers[key]
+ else:
+ headers[key] = value
+ return headers
+
+
+class _MuxHandler(object):
+ """Multiplexing handler. When a handler starts, it launches three
+ threads; the reader thread, the writer thread, and a worker thread.
+
+ The reader thread reads data from the physical stream, i.e., the
+ ws_stream object of the underlying websocket connection. The reader
+ thread interprets multiplexed frames and dispatches them to logical
+ channels. Methods of this class are mostly called by the reader thread.
+
+ The writer thread sends multiplexed frames which are created by
+ logical channels via the physical connection.
+
+ The worker thread launched at the starting point handles the
+ "Implicitly Opened Connection". If multiplexing handler receives
+ an AddChannelRequest and accepts it, the handler will launch a new worker
+ thread and dispatch the request to it.
+ """
+
+ def __init__(self, request, dispatcher):
+ """Constructs an instance.
+
+ Args:
+ request: mod_python request of the physical connection.
+ dispatcher: Dispatcher instance (dispatch.Dispatcher).
+ """
+
+ self.original_request = request
+ self.dispatcher = dispatcher
+ self.physical_connection = request.connection
+ self.physical_stream = request.ws_stream
+ self._logger = util.get_class_logger(self)
+ self._logical_channels = {}
+ self._logical_channels_condition = threading.Condition()
+ # Holds client's initial quota
+ self._channel_slots = collections.deque()
+ self._handshake_base = None
+ self._worker_done_notify_received = False
+ self._reader = None
+ self._writer = None
+
+ def start(self):
+ """Starts the handler.
+
+ Raises:
+ MuxUnexpectedException: when the handler already started, or when
+ opening handshake of the default channel fails.
+ """
+
+ if self._reader or self._writer:
+ raise MuxUnexpectedException('MuxHandler already started')
+
+ self._reader = _PhysicalConnectionReader(self)
+ self._writer = _PhysicalConnectionWriter(self)
+ self._reader.start()
+ self._writer.start()
+
+ # Create "Implicitly Opened Connection".
+ logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
+ headers = copy.copy(self.original_request.headers_in)
+ # Add extensions for logical channel.
+ headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
+ common.format_extensions(
+ self.original_request.mux_processor.extensions()))
+ self._handshake_base = _HandshakeDeltaBase(headers)
+ logical_request = _LogicalRequest(
+ _DEFAULT_CHANNEL_ID,
+ self.original_request.method,
+ self.original_request.uri,
+ self.original_request.protocol,
+ self._handshake_base.create_headers(),
+ logical_connection)
+ # Client's send quota for the implicitly opened connection is zero,
+ # but we will send FlowControl later so set the initial quota to
+ # _INITIAL_QUOTA_FOR_CLIENT.
+ self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
+ send_quota = self.original_request.mux_processor.quota()
+ if not self._do_handshake_for_logical_request(
+ logical_request, send_quota=send_quota):
+ raise MuxUnexpectedException(
+ 'Failed handshake on the default channel id')
+ self._add_logical_channel(logical_request)
+
+ # Send FlowControl for the implicitly opened connection.
+ frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
+ _INITIAL_QUOTA_FOR_CLIENT)
+ logical_request.connection.write_control_data(frame_data)
+
+ def add_channel_slots(self, slots, send_quota):
+ """Adds channel slots.
+
+ Args:
+ slots: number of slots to be added.
+ send_quota: initial send quota for slots.
+ """
+
+ self._channel_slots.extend([send_quota] * slots)
+ # Send NewChannelSlot to client.
+ frame_data = _create_new_channel_slot(slots, send_quota)
+ self.send_control_data(frame_data)
+
+ def wait_until_done(self, timeout=None):
+ """Waits until all workers are done. Returns False when timeout has
+ occurred. Returns True on success.
+
+ Args:
+ timeout: timeout in sec.
+ """
+
+ self._logical_channels_condition.acquire()
+ try:
+ while len(self._logical_channels) > 0:
+ self._logger.debug('Waiting workers(%d)...' %
+ len(self._logical_channels))
+ self._worker_done_notify_received = False
+ self._logical_channels_condition.wait(timeout)
+ if not self._worker_done_notify_received:
+ self._logger.debug('Waiting worker(s) timed out')
+ return False
+ finally:
+ self._logical_channels_condition.release()
+
+ # Flush pending outgoing data
+ self._writer.stop()
+ self._writer.join()
+
+ return True
+
+ def notify_write_data_done(self, channel_id):
+ """Called by the writer thread when a write operation has done.
+
+ Args:
+ channel_id: objective channel id.
+ """
+
+ try:
+ self._logical_channels_condition.acquire()
+ if channel_id in self._logical_channels:
+ channel_data = self._logical_channels[channel_id]
+ channel_data.request.connection.on_write_data_done()
+ else:
+ self._logger.debug('Seems that logical channel for %d has gone'
+ % channel_id)
+ finally:
+ self._logical_channels_condition.release()
+
+ def send_control_data(self, data):
+ """Sends data via the control channel.
+
+ Args:
+ data: data to be sent.
+ """
+
+ self._writer.put_outgoing_data(_OutgoingData(
+ channel_id=_CONTROL_CHANNEL_ID, data=data))
+
+ def send_data(self, channel_id, data):
+ """Sends data via given logical channel. This method is called by
+ worker threads.
+
+ Args:
+ channel_id: objective channel id.
+ data: data to be sent.
+ """
+
+ self._writer.put_outgoing_data(_OutgoingData(
+ channel_id=channel_id, data=data))
+
+ def _send_drop_channel(self, channel_id, code=None, message=''):
+ frame_data = _create_drop_channel(channel_id, code, message)
+ self._logger.debug(
+ 'Sending drop channel for channel id %d' % channel_id)
+ self.send_control_data(frame_data)
+
+ def _send_error_add_channel_response(self, channel_id, status=None):
+ if status is None:
+ status = common.HTTP_STATUS_BAD_REQUEST
+
+ if status in _HTTP_BAD_RESPONSE_MESSAGES:
+ message = _HTTP_BAD_RESPONSE_MESSAGES[status]
+ else:
+ self._logger.debug('Response message for %d is not found' % status)
+ message = '???'
+
+ response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
+ frame_data = _create_add_channel_response(channel_id,
+ encoded_handshake=response,
+ encoding=0, rejected=True)
+ self.send_control_data(frame_data)
+
+ def _create_logical_request(self, block):
+ if block.channel_id == _CONTROL_CHANNEL_ID:
+ # TODO(bashi): Raise PhysicalConnectionError with code 2006
+ # instead of MuxUnexpectedException.
+ raise MuxUnexpectedException(
+ 'Received the control channel id (0) as objective channel '
+ 'id for AddChannel')
+
+ if block.encoding > _HANDSHAKE_ENCODING_DELTA:
+ raise PhysicalConnectionError(
+ _DROP_CODE_UNKNOWN_REQUEST_ENCODING)
+
+ method, path, version, headers = _parse_request_text(
+ block.encoded_handshake)
+ if block.encoding == _HANDSHAKE_ENCODING_DELTA:
+ headers = self._handshake_base.create_headers(headers)
+
+ connection = _LogicalConnection(self, block.channel_id)
+ request = _LogicalRequest(block.channel_id, method, path, version,
+ headers, connection)
+ return request
+
+ def _do_handshake_for_logical_request(self, request, send_quota=0):
+ try:
+ receive_quota = self._channel_slots.popleft()
+ except IndexError:
+ raise LogicalChannelError(
+ request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
+
+ handshaker = _MuxHandshaker(request, self.dispatcher,
+ send_quota, receive_quota)
+ try:
+ handshaker.do_handshake()
+ except handshake.VersionException, e:
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(
+ request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+ return False
+ except handshake.HandshakeException, e:
+ # TODO(bashi): Should we _Fail the Logical Channel_ with 3001
+ # instead?
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(request.channel_id,
+ status=e.status)
+ return False
+ except handshake.AbortedByUserException, e:
+ self._logger.info('%s', e)
+ self._send_error_add_channel_response(request.channel_id)
+ return False
+
+ return True
+
+ def _add_logical_channel(self, logical_request):
+ try:
+ self._logical_channels_condition.acquire()
+ if logical_request.channel_id in self._logical_channels:
+ self._logger.debug('Channel id %d already exists' %
+ logical_request.channel_id)
+ raise PhysicalConnectionError(
+ _DROP_CODE_CHANNEL_ALREADY_EXISTS,
+ 'Channel id %d already exists' %
+ logical_request.channel_id)
+ worker = _Worker(self, logical_request)
+ channel_data = _LogicalChannelData(logical_request, worker)
+ self._logical_channels[logical_request.channel_id] = channel_data
+ worker.start()
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_add_channel_request(self, block):
+ try:
+ logical_request = self._create_logical_request(block)
+ except ValueError, e:
+ self._logger.debug('Failed to create logical request: %r' % e)
+ self._send_error_add_channel_response(
+ block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+ return
+ if self._do_handshake_for_logical_request(logical_request):
+ if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
+ # Update handshake base.
+ # TODO(bashi): Make sure this is the right place to update
+ # handshake base.
+ self._handshake_base = _HandshakeDeltaBase(
+ logical_request.headers_in)
+ self._add_logical_channel(logical_request)
+ else:
+ self._send_error_add_channel_response(
+ block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
+
+ def _process_flow_control(self, block):
+ try:
+ self._logical_channels_condition.acquire()
+ if not block.channel_id in self._logical_channels:
+ return
+ channel_data = self._logical_channels[block.channel_id]
+ channel_data.request.ws_stream.replenish_send_quota(
+ block.send_quota)
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_drop_channel(self, block):
+ self._logger.debug(
+ 'DropChannel received for %d: code=%r, reason=%r' %
+ (block.channel_id, block.drop_code, block.drop_message))
+ try:
+ self._logical_channels_condition.acquire()
+ if not block.channel_id in self._logical_channels:
+ return
+ channel_data = self._logical_channels[block.channel_id]
+ channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
+
+ # Close the logical channel
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ channel_data.request.ws_stream.stop_sending()
+ finally:
+ self._logical_channels_condition.release()
+
+ def _process_control_blocks(self, parser):
+ for control_block in parser.read_control_blocks():
+ opcode = control_block.opcode
+ self._logger.debug('control block received, opcode: %d' % opcode)
+ if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
+ self._process_add_channel_request(control_block)
+ elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received AddChannelResponse')
+ elif opcode == _MUX_OPCODE_FLOW_CONTROL:
+ self._process_flow_control(control_block)
+ elif opcode == _MUX_OPCODE_DROP_CHANNEL:
+ self._process_drop_channel(control_block)
+ elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
+ raise PhysicalConnectionError(
+ _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ 'Received NewChannelSlot')
+ else:
+ raise MuxUnexpectedException(
+ 'Unexpected opcode %r' % opcode)
+
+ def _process_logical_frame(self, channel_id, parser):
+ self._logger.debug('Received a frame. channel id=%d' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if not channel_id in self._logical_channels:
+ # We must ignore the message for an inactive channel.
+ return
+ channel_data = self._logical_channels[channel_id]
+ fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
+ consuming_byte = len(payload)
+ if opcode != common.OPCODE_CONTINUATION:
+ consuming_byte += 1
+ if not channel_data.request.ws_stream.consume_receive_quota(
+ consuming_byte):
+ # The client violates quota. Close logical channel.
+ raise LogicalChannelError(
+ channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
+ header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
+ mask=False)
+ frame_data = header + payload
+ channel_data.request.connection.append_frame_data(frame_data)
+ finally:
+ self._logical_channels_condition.release()
+
+ def dispatch_message(self, message):
+ """Dispatches message. The reader thread calls this method.
+
+ Args:
+ message: a message that contains encapsulated frame.
+ Raises:
+ PhysicalConnectionError: if the message contains physical
+ connection level errors.
+ LogicalChannelError: if the message contains logical channel
+ level errors.
+ """
+
+ parser = _MuxFramePayloadParser(message)
+ try:
+ channel_id = parser.read_channel_id()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
+ if channel_id == _CONTROL_CHANNEL_ID:
+ self._process_control_blocks(parser)
+ else:
+ self._process_logical_frame(channel_id, parser)
+
+ def notify_worker_done(self, channel_id):
+ """Called when a worker has finished.
+
+ Args:
+ channel_id: channel id corresponded with the worker.
+ """
+
+ self._logger.debug('Worker for channel id %d terminated' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if not channel_id in self._logical_channels:
+ raise MuxUnexpectedException(
+ 'Channel id %d not found' % channel_id)
+ channel_data = self._logical_channels.pop(channel_id)
+ finally:
+ self._worker_done_notify_received = True
+ self._logical_channels_condition.notify()
+ self._logical_channels_condition.release()
+
+ if not channel_data.request.server_terminated:
+ self._send_drop_channel(
+ channel_id, code=channel_data.drop_code,
+ message=channel_data.drop_message)
+
+ def notify_reader_done(self):
+ """This method is called by the reader thread when the reader has
+ finished.
+ """
+
+ self._logger.debug(
+ 'Termiating all logical connections waiting for incoming data '
+ '...')
+ self._logical_channels_condition.acquire()
+ for channel_data in self._logical_channels.values():
+ try:
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ except Exception:
+ self._logger.debug(traceback.format_exc())
+ self._logical_channels_condition.release()
+
+ def notify_writer_done(self):
+ """This method is called by the writer thread when the writer has
+ finished.
+ """
+
+ self._logger.debug(
+ 'Termiating all logical connections waiting for write '
+ 'completion ...')
+ self._logical_channels_condition.acquire()
+ for channel_data in self._logical_channels.values():
+ try:
+ channel_data.request.connection.on_writer_done()
+ except Exception:
+ self._logger.debug(traceback.format_exc())
+ self._logical_channels_condition.release()
+
+ def fail_physical_connection(self, code, message):
+ """Fail the physical connection.
+
+ Args:
+ code: drop reason code.
+ message: drop message.
+ """
+
+ self._logger.debug('Failing the physical connection...')
+ self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
+ self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
+
+ def fail_logical_channel(self, channel_id, code, message):
+ """Fail a logical channel.
+
+ Args:
+ channel_id: channel id.
+ code: drop reason code.
+ message: drop message.
+ """
+
+ self._logger.debug('Failing logical channel %d...' % channel_id)
+ try:
+ self._logical_channels_condition.acquire()
+ if channel_id in self._logical_channels:
+ channel_data = self._logical_channels[channel_id]
+ # Close the logical channel. notify_worker_done() will be
+ # called later and it will send DropChannel.
+ channel_data.drop_code = code
+ channel_data.drop_message = message
+
+ channel_data.request.connection.set_read_state(
+ _LogicalConnection.STATE_TERMINATED)
+ channel_data.request.ws_stream.stop_sending()
+ else:
+ self._send_drop_channel(channel_id, code, message)
+ finally:
+ self._logical_channels_condition.release()
+
+
+def use_mux(request):
+ return hasattr(request, 'mux_processor') and (
+ request.mux_processor.is_active())
+
+
+def start(request, dispatcher):
+ mux_handler = _MuxHandler(request, dispatcher)
+ mux_handler.start()
+
+ mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
+ _INITIAL_QUOTA_FOR_CLIENT)
+
+ mux_handler.wait_until_done()
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/stream.py b/testing/mochitest/pywebsocket/mod_pywebsocket/stream.py
new file mode 100644
index 000000000..edc533279
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/stream.py
@@ -0,0 +1,57 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""This file exports public symbols.
+"""
+
+
+from mod_pywebsocket._stream_base import BadOperationException
+from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
+from mod_pywebsocket._stream_base import InvalidUTF8Exception
+from mod_pywebsocket._stream_base import UnsupportedFrameException
+from mod_pywebsocket._stream_hixie75 import StreamHixie75
+from mod_pywebsocket._stream_hybi import Frame
+from mod_pywebsocket._stream_hybi import Stream
+from mod_pywebsocket._stream_hybi import StreamOptions
+
+# These methods are intended to be used by WebSocket client developers to have
+# their implementations receive broken data in tests.
+from mod_pywebsocket._stream_hybi import create_close_frame
+from mod_pywebsocket._stream_hybi import create_header
+from mod_pywebsocket._stream_hybi import create_length_header
+from mod_pywebsocket._stream_hybi import create_ping_frame
+from mod_pywebsocket._stream_hybi import create_pong_frame
+from mod_pywebsocket._stream_hybi import create_binary_frame
+from mod_pywebsocket._stream_hybi import create_text_frame
+from mod_pywebsocket._stream_hybi import create_closing_handshake_body
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/util.py b/testing/mochitest/pywebsocket/mod_pywebsocket/util.py
new file mode 100644
index 000000000..d224ae394
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/util.py
@@ -0,0 +1,416 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""WebSocket utilities.
+"""
+
+
+import array
+import errno
+
+# Import hash classes from a module available and recommended for each Python
+# version and re-export those symbol. Use sha and md5 module in Python 2.4, and
+# hashlib module in Python 2.6.
+try:
+ import hashlib
+ md5_hash = hashlib.md5
+ sha1_hash = hashlib.sha1
+except ImportError:
+ import md5
+ import sha
+ md5_hash = md5.md5
+ sha1_hash = sha.sha
+
+import StringIO
+import logging
+import os
+import re
+import socket
+import traceback
+import zlib
+
+try:
+ from mod_pywebsocket import fast_masking
+except ImportError:
+ pass
+
+
+def get_stack_trace():
+ """Get the current stack trace as string.
+
+ This is needed to support Python 2.3.
+ TODO: Remove this when we only support Python 2.4 and above.
+ Use traceback.format_exc instead.
+ """
+
+ out = StringIO.StringIO()
+ traceback.print_exc(file=out)
+ return out.getvalue()
+
+
+def prepend_message_to_exception(message, exc):
+ """Prepend message to the exception."""
+
+ exc.args = (message + str(exc),)
+ return
+
+
+def __translate_interp(interp, cygwin_path):
+ """Translate interp program path for Win32 python to run cygwin program
+ (e.g. perl). Note that it doesn't support path that contains space,
+ which is typically true for Unix, where #!-script is written.
+ For Win32 python, cygwin_path is a directory of cygwin binaries.
+
+ Args:
+ interp: interp command line
+ cygwin_path: directory name of cygwin binary, or None
+ Returns:
+ translated interp command line.
+ """
+ if not cygwin_path:
+ return interp
+ m = re.match('^[^ ]*/([^ ]+)( .*)?', interp)
+ if m:
+ cmd = os.path.join(cygwin_path, m.group(1))
+ return cmd + m.group(2)
+ return interp
+
+
+def get_script_interp(script_path, cygwin_path=None):
+ """Gets #!-interpreter command line from the script.
+
+ It also fixes command path. When Cygwin Python is used, e.g. in WebKit,
+ it could run "/usr/bin/perl -wT hello.pl".
+ When Win32 Python is used, e.g. in Chromium, it couldn't. So, fix
+ "/usr/bin/perl" to "<cygwin_path>\perl.exe".
+
+ Args:
+ script_path: pathname of the script
+ cygwin_path: directory name of cygwin binary, or None
+ Returns:
+ #!-interpreter command line, or None if it is not #!-script.
+ """
+ fp = open(script_path)
+ line = fp.readline()
+ fp.close()
+ m = re.match('^#!(.*)', line)
+ if m:
+ return __translate_interp(m.group(1), cygwin_path)
+ return None
+
+
+def wrap_popen3_for_win(cygwin_path):
+ """Wrap popen3 to support #!-script on Windows.
+
+ Args:
+ cygwin_path: path for cygwin binary if command path is needed to be
+ translated. None if no translation required.
+ """
+
+ __orig_popen3 = os.popen3
+
+ def __wrap_popen3(cmd, mode='t', bufsize=-1):
+ cmdline = cmd.split(' ')
+ interp = get_script_interp(cmdline[0], cygwin_path)
+ if interp:
+ cmd = interp + ' ' + cmd
+ return __orig_popen3(cmd, mode, bufsize)
+
+ os.popen3 = __wrap_popen3
+
+
+def hexify(s):
+ return ' '.join(map(lambda x: '%02x' % ord(x), s))
+
+
+def get_class_logger(o):
+ return logging.getLogger(
+ '%s.%s' % (o.__class__.__module__, o.__class__.__name__))
+
+
+class NoopMasker(object):
+ """A masking object that has the same interface as RepeatedXorMasker but
+ just returns the string passed in without making any change.
+ """
+
+ def __init__(self):
+ pass
+
+ def mask(self, s):
+ return s
+
+
+class RepeatedXorMasker(object):
+ """A masking object that applies XOR on the string given to mask method
+ with the masking bytes given to the constructor repeatedly. This object
+ remembers the position in the masking bytes the last mask method call
+ ended and resumes from that point on the next mask method call.
+ """
+
+ def __init__(self, masking_key):
+ self._masking_key = masking_key
+ self._masking_key_index = 0
+
+ def _mask_using_swig(self, s):
+ masked_data = fast_masking.mask(
+ s, self._masking_key, self._masking_key_index)
+ self._masking_key_index = (
+ (self._masking_key_index + len(s)) % len(self._masking_key))
+ return masked_data
+
+ def _mask_using_array(self, s):
+ result = array.array('B')
+ result.fromstring(s)
+
+ # Use temporary local variables to eliminate the cost to access
+ # attributes
+ masking_key = map(ord, self._masking_key)
+ masking_key_size = len(masking_key)
+ masking_key_index = self._masking_key_index
+
+ for i in xrange(len(result)):
+ result[i] ^= masking_key[masking_key_index]
+ masking_key_index = (masking_key_index + 1) % masking_key_size
+
+ self._masking_key_index = masking_key_index
+
+ return result.tostring()
+
+ if 'fast_masking' in globals():
+ mask = _mask_using_swig
+ else:
+ mask = _mask_using_array
+
+
+# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
+# ADLER32 (4 octet) fields of zlib so that we can use zlib module just as
+# deflate library. DICTID won't be added as far as we don't set dictionary.
+# LZ77 window of 32K will be used for both compression and decompression.
+# For decompression, we can just use 32K to cover any windows size. For
+# compression, we use 32K so receivers must use 32K.
+#
+# Compression level is Z_DEFAULT_COMPRESSION. We don't have to match level
+# to decode.
+#
+# See zconf.h, deflate.cc, inflate.cc of zlib library, and zlibmodule.c of
+# Python. See also RFC1950 (ZLIB 3.3).
+
+
+class _Deflater(object):
+
+ def __init__(self, window_bits):
+ self._logger = get_class_logger(self)
+
+ self._compress = zlib.compressobj(
+ zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -window_bits)
+
+ def compress(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+ def compress_and_flush(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ compressed_bytes += self._compress.flush(zlib.Z_SYNC_FLUSH)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+ def compress_and_finish(self, bytes):
+ compressed_bytes = self._compress.compress(bytes)
+ compressed_bytes += self._compress.flush(zlib.Z_FINISH)
+ self._logger.debug('Compress input %r', bytes)
+ self._logger.debug('Compress result %r', compressed_bytes)
+ return compressed_bytes
+
+
+class _Inflater(object):
+
+ def __init__(self, window_bits):
+ self._logger = get_class_logger(self)
+ self._window_bits = window_bits
+
+ self._unconsumed = ''
+
+ self.reset()
+
+ def decompress(self, size):
+ if not (size == -1 or size > 0):
+ raise Exception('size must be -1 or positive')
+
+ data = ''
+
+ while True:
+ if size == -1:
+ data += self._decompress.decompress(self._unconsumed)
+ # See Python bug http://bugs.python.org/issue12050 to
+ # understand why the same code cannot be used for updating
+ # self._unconsumed for here and else block.
+ self._unconsumed = ''
+ else:
+ data += self._decompress.decompress(
+ self._unconsumed, size - len(data))
+ self._unconsumed = self._decompress.unconsumed_tail
+ if self._decompress.unused_data:
+ # Encountered a last block (i.e. a block with BFINAL = 1) and
+ # found a new stream (unused_data). We cannot use the same
+ # zlib.Decompress object for the new stream. Create a new
+ # Decompress object to decompress the new one.
+ #
+ # It's fine to ignore unconsumed_tail if unused_data is not
+ # empty.
+ self._unconsumed = self._decompress.unused_data
+ self.reset()
+ if size >= 0 and len(data) == size:
+ # data is filled. Don't call decompress again.
+ break
+ else:
+ # Re-invoke Decompress.decompress to try to decompress all
+ # available bytes before invoking read which blocks until
+ # any new byte is available.
+ continue
+ else:
+ # Here, since unused_data is empty, even if unconsumed_tail is
+ # not empty, bytes of requested length are already in data. We
+ # don't have to "continue" here.
+ break
+
+ if data:
+ self._logger.debug('Decompressed %r', data)
+ return data
+
+ def append(self, data):
+ self._logger.debug('Appended %r', data)
+ self._unconsumed += data
+
+ def reset(self):
+ self._logger.debug('Reset')
+ self._decompress = zlib.decompressobj(-self._window_bits)
+
+
+# Compresses/decompresses given octets using the method introduced in RFC1979.
+
+
+class _RFC1979Deflater(object):
+ """A compressor class that applies DEFLATE to given byte sequence and
+ flushes using the algorithm described in the RFC1979 section 2.1.
+ """
+
+ def __init__(self, window_bits, no_context_takeover):
+ self._deflater = None
+ if window_bits is None:
+ window_bits = zlib.MAX_WBITS
+ self._window_bits = window_bits
+ self._no_context_takeover = no_context_takeover
+
+ def filter(self, bytes, end=True, bfinal=False):
+ if self._deflater is None:
+ self._deflater = _Deflater(self._window_bits)
+
+ if bfinal:
+ result = self._deflater.compress_and_finish(bytes)
+ # Add a padding block with BFINAL = 0 and BTYPE = 0.
+ result = result + chr(0)
+ self._deflater = None
+ return result
+
+ result = self._deflater.compress_and_flush(bytes)
+ if end:
+ # Strip last 4 octets which is LEN and NLEN field of a
+ # non-compressed block added for Z_SYNC_FLUSH.
+ result = result[:-4]
+
+ if self._no_context_takeover and end:
+ self._deflater = None
+
+ return result
+
+
+class _RFC1979Inflater(object):
+ """A decompressor class for byte sequence compressed and flushed following
+ the algorithm described in the RFC1979 section 2.1.
+ """
+
+ def __init__(self, window_bits=zlib.MAX_WBITS):
+ self._inflater = _Inflater(window_bits)
+
+ def filter(self, bytes):
+ # Restore stripped LEN and NLEN field of a non-compressed block added
+ # for Z_SYNC_FLUSH.
+ self._inflater.append(bytes + '\x00\x00\xff\xff')
+ return self._inflater.decompress(-1)
+
+
+class DeflateSocket(object):
+ """A wrapper class for socket object to intercept send and recv to perform
+ deflate compression and decompression transparently.
+ """
+
+ # Size of the buffer passed to recv to receive compressed data.
+ _RECV_SIZE = 4096
+
+ def __init__(self, socket):
+ self._socket = socket
+
+ self._logger = get_class_logger(self)
+
+ self._deflater = _Deflater(zlib.MAX_WBITS)
+ self._inflater = _Inflater(zlib.MAX_WBITS)
+
+ def recv(self, size):
+ """Receives data from the socket specified on the construction up
+ to the specified size. Once any data is available, returns it even
+ if it's smaller than the specified size.
+ """
+
+ # TODO(tyoshino): Allow call with size=0. It should block until any
+ # decompressed data is available.
+ if size <= 0:
+ raise Exception('Non-positive size passed')
+ while True:
+ data = self._inflater.decompress(size)
+ if len(data) != 0:
+ return data
+
+ read_data = self._socket.recv(DeflateSocket._RECV_SIZE)
+ if not read_data:
+ return ''
+ self._inflater.append(read_data)
+
+ def sendall(self, bytes):
+ self.send(bytes)
+
+ def send(self, bytes):
+ self._socket.sendall(self._deflater.compress_and_flush(bytes))
+ return len(bytes)
+
+
+# vi:sts=4 sw=4 et
diff --git a/testing/mochitest/pywebsocket/mod_pywebsocket/xhr_benchmark_handler.py b/testing/mochitest/pywebsocket/mod_pywebsocket/xhr_benchmark_handler.py
new file mode 100644
index 000000000..6735c7e2a
--- /dev/null
+++ b/testing/mochitest/pywebsocket/mod_pywebsocket/xhr_benchmark_handler.py
@@ -0,0 +1,109 @@
+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Use of this source code is governed by a BSD-style
+# license that can be found in the COPYING file or at
+# https://developers.google.com/open-source/licenses/bsd
+
+
+from mod_pywebsocket import util
+
+
+class XHRBenchmarkHandler(object):
+ def __init__(self, headers, rfile, wfile):
+ self._logger = util.get_class_logger(self)
+
+ self.headers = headers
+ self.rfile = rfile
+ self.wfile = wfile
+
+ def do_send(self):
+ content_length = int(self.headers.getheader('Content-Length'))
+
+ self._logger.debug('Requested to receive %s bytes', content_length)
+
+ RECEIVE_BLOCK_SIZE = 1024 * 1024
+
+ bytes_to_receive = content_length
+ while bytes_to_receive > 0:
+ bytes_to_receive_in_this_loop = bytes_to_receive
+ if bytes_to_receive_in_this_loop > RECEIVE_BLOCK_SIZE:
+ bytes_to_receive_in_this_loop = RECEIVE_BLOCK_SIZE
+ received_data = self.rfile.read(bytes_to_receive_in_this_loop)
+ if received_data != ('a' * bytes_to_receive_in_this_loop):
+ self._logger.debug('Request body verification failed')
+ return
+ bytes_to_receive -= len(received_data)
+ if bytes_to_receive < 0:
+ self._logger.debug('Received %d more bytes than expected' %
+ (-bytes_to_receive))
+ return
+
+ # Return the number of received bytes back to the client.
+ response_body = '%d' % content_length
+ self.wfile.write(
+ 'HTTP/1.1 200 OK\r\n'
+ 'Content-Type: text/html\r\n'
+ 'Content-Length: %d\r\n'
+ '\r\n%s' % (len(response_body), response_body))
+ self.wfile.flush()
+
+ def do_receive(self):
+ content_length = int(self.headers.getheader('Content-Length'))
+ request_body = self.rfile.read(content_length)
+
+ request_array = request_body.split(' ')
+ if len(request_array) < 2:
+ self._logger.debug('Malformed request body: %r', request_body)
+ return
+
+ # Parse the size parameter.
+ bytes_to_send = request_array[0]
+ try:
+ bytes_to_send = int(bytes_to_send)
+ except ValueError, e:
+ self._logger.debug('Malformed size parameter: %r', bytes_to_send)
+ return
+ self._logger.debug('Requested to send %s bytes', bytes_to_send)
+
+ # Parse the transfer encoding parameter.
+ chunked_mode = False
+ mode_parameter = request_array[1]
+ if mode_parameter == 'chunked':
+ self._logger.debug('Requested chunked transfer encoding')
+ chunked_mode = True
+ elif mode_parameter != 'none':
+ self._logger.debug('Invalid mode parameter: %r', mode_parameter)
+ return
+
+ # Write a header
+ response_header = (
+ 'HTTP/1.1 200 OK\r\n'
+ 'Content-Type: application/octet-stream\r\n')
+ if chunked_mode:
+ response_header += 'Transfer-Encoding: chunked\r\n\r\n'
+ else:
+ response_header += (
+ 'Content-Length: %d\r\n\r\n' % bytes_to_send)
+ self.wfile.write(response_header)
+ self.wfile.flush()
+
+ # Write a body
+ SEND_BLOCK_SIZE = 1024 * 1024
+
+ while bytes_to_send > 0:
+ bytes_to_send_in_this_loop = bytes_to_send
+ if bytes_to_send_in_this_loop > SEND_BLOCK_SIZE:
+ bytes_to_send_in_this_loop = SEND_BLOCK_SIZE
+
+ if chunked_mode:
+ self.wfile.write('%x\r\n' % bytes_to_send_in_this_loop)
+ self.wfile.write('a' * bytes_to_send_in_this_loop)
+ if chunked_mode:
+ self.wfile.write('\r\n')
+ self.wfile.flush()
+
+ bytes_to_send -= bytes_to_send_in_this_loop
+
+ if chunked_mode:
+ self.wfile.write('0\r\n\r\n')
+ self.wfile.flush()