uncompyled files

This commit is contained in:
Nico Melone
2025-04-30 08:48:49 -05:00
parent fa1dfeb4be
commit 658e970ce0
399 changed files with 127983 additions and 1021 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,7 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/__init__.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 2853 bytes

View File

@@ -0,0 +1,159 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/configuration.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 6892 bytes
"""
Configuration settings.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import os, platform, pickle, socket
from Pyro4 import constants
class Configuration(object):
__slots__ = ('HOST', 'NS_HOST', 'NS_PORT', 'NS_BCPORT', 'NS_BCHOST', 'NS_AUTOCLEAN',
'COMPRESSION', 'SERVERTYPE', 'COMMTIMEOUT', 'POLLTIMEOUT', 'ONEWAY_THREADED',
'DETAILED_TRACEBACK', 'SOCK_REUSE', 'SOCK_NODELAY', 'PREFER_IP_VERSION',
'THREADPOOL_SIZE', 'THREADPOOL_SIZE_MIN', 'AUTOPROXY', 'PICKLE_PROTOCOL_VERSION',
'BROADCAST_ADDRS', 'NATHOST', 'NATPORT', 'MAX_MESSAGE_SIZE', 'FLAME_ENABLED',
'SERIALIZER', 'SERIALIZERS_ACCEPTED', 'LOGWIRE', 'METADATA', 'REQUIRE_EXPOSE',
'USE_MSG_WAITALL', 'JSON_MODULE', 'MAX_RETRIES', 'DILL_PROTOCOL_VERSION',
'ITER_STREAMING', 'ITER_STREAM_LIFETIME', 'ITER_STREAM_LINGER',
'SSL', 'SSL_REQUIRECLIENTCERT', 'SSL_CACERTS', 'SSL_SERVERCERT',
'SSL_SERVERKEY', 'SSL_SERVERKEYPASSWD', 'SSL_CLIENTCERT', 'SSL_CLIENTKEY',
'SSL_CLIENTKEYPASSWD')
def __init__(self):
self.reset()
def reset(self, useenvironment=True):
"""
Set default config items.
If useenvironment is False, won't read environment variables settings (useful if you can't trust your env).
"""
self.HOST = "localhost"
self.NS_HOST = self.HOST
self.NS_PORT = 9090
self.NS_BCPORT = 9091
self.NS_BCHOST = None
self.NS_AUTOCLEAN = 0.0
self.NATHOST = None
self.NATPORT = 0
self.COMPRESSION = False
self.SERVERTYPE = "thread"
self.COMMTIMEOUT = 0.0
self.POLLTIMEOUT = 2.0
self.SOCK_REUSE = True
self.SOCK_NODELAY = False
self.ONEWAY_THREADED = True
self.DETAILED_TRACEBACK = False
self.THREADPOOL_SIZE = 40
self.THREADPOOL_SIZE_MIN = 4
self.AUTOPROXY = True
self.MAX_MESSAGE_SIZE = 0
self.BROADCAST_ADDRS = "<broadcast>, 0.0.0.0"
self.FLAME_ENABLED = False
self.PREFER_IP_VERSION = 4
self.SERIALIZER = "serpent"
self.SERIALIZERS_ACCEPTED = "serpent,marshal,json"
self.LOGWIRE = False
self.PICKLE_PROTOCOL_VERSION = pickle.HIGHEST_PROTOCOL
try:
import dill
self.DILL_PROTOCOL_VERSION = dill.HIGHEST_PROTOCOL
except ImportError:
self.DILL_PROTOCOL_VERSION = -1
self.METADATA = True
self.REQUIRE_EXPOSE = True
self.USE_MSG_WAITALL = hasattr(socket, "MSG_WAITALL") and platform.system() != "Windows"
self.JSON_MODULE = "json"
self.MAX_RETRIES = 0
self.ITER_STREAMING = True
self.ITER_STREAM_LIFETIME = 0.0
self.ITER_STREAM_LINGER = 30.0
self.SSL = False
self.SSL_SERVERCERT = ""
self.SSL_SERVERKEY = ""
self.SSL_SERVERKEYPASSWD = ""
self.SSL_REQUIRECLIENTCERT = False
self.SSL_CLIENTCERT = ""
self.SSL_CLIENTKEY = ""
self.SSL_CLIENTKEYPASSWD = ""
self.SSL_CACERTS = ""
if useenvironment:
PREFIX = "PYRO_"
for symbol in self.__slots__:
if PREFIX + symbol in os.environ:
value = getattr(self, symbol)
envvalue = os.environ[PREFIX + symbol]
if value is not None:
valuetype = type(value)
if valuetype is bool:
envvalue = envvalue.lower()
if envvalue in ('0', 'off', 'no', 'false'):
envvalue = False
else:
if envvalue in ('1', 'yes', 'on', 'true'):
envvalue = True
else:
raise ValueError("invalid boolean value: %s%s=%s" % (PREFIX, symbol, envvalue))
else:
envvalue = valuetype(envvalue)
setattr(self, symbol, envvalue)
self.SERIALIZERS_ACCEPTED = set(self.SERIALIZERS_ACCEPTED.split(","))
def asDict(self):
"""returns the current config as a regular dictionary"""
result = {}
for item in self.__slots__:
result[item] = getattr(self, item)
return result
def parseAddressesString(self, addresses):
"""
Parses the addresses string which contains one or more ip addresses separated by a comma.
Returns a sequence of these addresses. '' is replaced by the empty string.
"""
result = []
for addr in addresses.split(","):
addr = addr.strip()
if addr == "''":
addr = ""
result.append(addr)
return result
def dump(self):
if hasattr(platform, "python_implementation"):
implementation = platform.python_implementation()
else:
implementation = "???"
config = self.asDict()
config["LOGFILE"] = os.environ.get("PYRO_LOGFILE")
config["LOGLEVEL"] = os.environ.get("PYRO_LOGLEVEL")
result = ["Pyro version: %s" % constants.VERSION,
"Loaded from: %s" % os.path.dirname(__file__),
"Python version: %s %s (%s, %s)" % (implementation, platform.python_version(), platform.system(), os.name),
"Protocol version: %d" % constants.PROTOCOL_VERSION,
"Currently active configuration settings:"]
for n, v in sorted(config.items()):
result.append("%s = %s" % (n, v))
return "\n".join(result)
config = Configuration()
def main():
print(config.dump())
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,17 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/constants.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 497 bytes
"""
Definitions of various hard coded constants.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
VERSION = "4.82"
DAEMON_NAME = "Pyro.Daemon"
NAMESERVER_NAME = "Pyro.NameServer"
FLAME_NAME = "Pyro.Flame"
PROTOCOL_VERSION = 48

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,51 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/errors.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 1392 bytes
"""
Definition of the various exceptions that are used in Pyro.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
class PyroError(Exception):
__doc__ = "Generic base of all Pyro-specific errors."
class CommunicationError(PyroError):
__doc__ = "Base class for the errors related to network communication problems."
class ConnectionClosedError(CommunicationError):
__doc__ = "The connection was unexpectedly closed."
class TimeoutError(CommunicationError):
__doc__ = "\n A call could not be completed within the set timeout period,\n or the network caused a timeout.\n "
class ProtocolError(CommunicationError):
__doc__ = "Pyro received a message that didn't match the active Pyro network protocol, or there was a protocol related error."
class MessageTooLargeError(ProtocolError):
__doc__ = "Pyro received a message or was trying to send a message that exceeds the maximum message size as configured."
class NamingError(PyroError):
__doc__ = "There was a problem related to the name server or object names."
class DaemonError(PyroError):
__doc__ = "The Daemon encountered a problem."
class SecurityError(PyroError):
__doc__ = "A security related error occurred."
class SerializeError(ProtocolError):
__doc__ = "Something went wrong while (de)serializing data."

View File

@@ -0,0 +1,212 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/futures.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 8749 bytes
"""
Support for Futures (asynchronously executed callables).
If you're using Python 3.2 or newer, also see
http://docs.python.org/3/library/concurrent.futures.html#future-objects
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import sys, functools, logging, threading, time
__all__ = [
"Future", "FutureResult", "_ExceptionWrapper"]
log = logging.getLogger("Pyro4.futures")
class Future(object):
__doc__ = "\n Holds a callable that will be executed asynchronously and provide its\n result value some time in the future.\n This is a more general implementation than the AsyncRemoteMethod, which\n only works with Pyro proxies (and provides a bit different syntax).\n This class has a few extra features as well (delay, canceling).\n "
def __init__(self, somecallable):
self.callable = somecallable
self.chain = []
self.exceptionhandler = None
self.call_delay = 0
self.cancelled = False
self.completed = False
def __call__(self, *args, **kwargs):
"""
Start the future call with the provided arguments.
Control flow returns immediately, with a FutureResult object.
"""
if not (self.completed or hasattr(self, "chain")):
raise RuntimeError("the future has already been evaluated")
if self.cancelled:
raise RuntimeError("the future has been cancelled")
chain = self.chain
del self.chain
result = FutureResult()
thread = threading.Thread(target=(self._Future__asynccall), args=(result, chain, args, kwargs))
thread.setDaemon(True)
thread.start()
return result
def __asynccall(self, asyncresult, chain, args, kwargs):
while self.call_delay > 0:
delay = self.cancelled or min(self.call_delay, 2)
time.sleep(delay)
self.call_delay -= delay
if self.cancelled:
self.completed = True
asyncresult.set_cancelled()
return
try:
self.completed = True
self.cancelled = False
value = (self.callable)(*args, **kwargs)
for call, args, kwargs in chain:
call = functools.partial(call, value)
value = call(*args, **kwargs)
asyncresult.value = value
except Exception as x:
try:
if self.exceptionhandler:
self.exceptionhandler(x)
asyncresult.value = _ExceptionWrapper(x)
finally:
x = None
del x
def delay(self, seconds):
"""
Delay the evaluation of the future for the given number of seconds.
Return True if successful otherwise False if the future has already been evaluated.
"""
if self.completed:
return False
self.call_delay = seconds
return True
def cancel(self):
"""
Cancels the execution of the future altogether.
If the execution hasn't been started yet, the cancellation is successful and returns True.
Otherwise, it failed and returns False.
"""
if self.completed:
return False
self.cancelled = True
return True
def then(self, call, *args, **kwargs):
"""
Add a callable to the call chain, to be invoked when the results become available.
The result of the current call will be used as the first argument for the next call.
Optional extra arguments can be provided in args and kwargs.
Returns self so you can easily chain then() calls.
"""
self.chain.append((call, args, kwargs))
return self
def iferror(self, exceptionhandler):
"""
Specify the exception handler to be invoked (with the exception object as only
argument) when calculating the result raises an exception.
If no exception handler is set, any exception raised in the asynchronous call will be silently ignored.
Returns self so you can easily chain other calls.
"""
self.exceptionhandler = exceptionhandler
return self
class FutureResult(object):
__doc__ = "\n The result object for asynchronous Pyro calls.\n Unfortunatley it should be similar to the more general Future class but\n it is still somewhat limited (no delay, no canceling).\n "
def __init__(self):
self._FutureResult__ready = threading.Event()
self.callchain = []
self.valueLock = threading.Lock()
self.exceptionhandler = None
def wait(self, timeout=None):
"""
Wait for the result to become available, with optional timeout (in seconds).
Returns True if the result is ready, or False if it still isn't ready.
"""
result = self._FutureResult__ready.wait(timeout)
if result is None:
return self._FutureResult__ready.isSet()
return result
@property
def ready(self):
"""Boolean that contains the readiness of the asynchronous result"""
return self._FutureResult__ready.isSet()
def get_value(self):
self._FutureResult__ready.wait()
if isinstance(self._FutureResult__value, _ExceptionWrapper):
self._FutureResult__value.raiseIt()
else:
return self._FutureResult__value
def set_value(self, value):
with self.valueLock:
self._FutureResult__value = value
if isinstance(value, _ExceptionWrapper):
if self.exceptionhandler:
self.exceptionhandler(value.exception)
else:
for call, args, kwargs in self.callchain:
call = functools.partial(call, self._FutureResult__value)
self._FutureResult__value = call(*args, **kwargs)
if isinstance(self._FutureResult__value, _ExceptionWrapper):
break
self.callchain = []
self._FutureResult__ready.set()
value = property(get_value, set_value, None, "The result value of the call. Reading it will block if not available yet.")
def set_cancelled(self):
self.set_value(_ExceptionWrapper(RuntimeError("future has been cancelled")))
def then(self, call, *args, **kwargs):
"""
Add a callable to the call chain, to be invoked when the results become available.
The result of the current call will be used as the first argument for the next call.
Optional extra arguments can be provided in args and kwargs.
Returns self so you can easily chain then() calls.
"""
with self.valueLock:
if self._FutureResult__ready.isSet():
call = functools.partial(call, self._FutureResult__value)
self._FutureResult__value = call(*args, **kwargs)
else:
self.callchain.append((call, args, kwargs))
return self
def iferror(self, exceptionhandler):
"""
Specify the exception handler to be invoked (with the exception object as only
argument) when asking for the result raises an exception.
If no exception handler is set, any exception result will be silently ignored (unless
you explicitly ask for the value). Returns self so you can easily chain other calls.
"""
self.exceptionhandler = exceptionhandler
return self
class _ExceptionWrapper(object):
__doc__ = "Class that wraps a remote exception. If this is returned, Pyro will\n re-throw the exception on the receiving side. Usually this is taken care of\n by a special response message flag, but in the case of batched calls this\n flag is useless and another mechanism was needed."
def __init__(self, exception):
self.exception = exception
def raiseIt(self):
from Pyro4.util import fixIronPythonExceptionForPickle
if sys.platform == "cli":
fixIronPythonExceptionForPickle(self.exception, False)
raise self.exception
def __serialized_dict__(self):
"""serialized form as a dictionary"""
from Pyro4.util import SerializerBase
return {'__class__':"Pyro4.futures._ExceptionWrapper",
'exception':(SerializerBase.class_to_dict)(self.exception)}

View File

@@ -0,0 +1,199 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/message.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 11082 bytes
"""
The pyro wire protocol message.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import hashlib, hmac, struct, logging, sys, zlib
from Pyro4 import errors, constants
from Pyro4.configuration import config
__all__ = [
"Message", "secure_compare"]
log = logging.getLogger("Pyro4.message")
MSG_CONNECT = 1
MSG_CONNECTOK = 2
MSG_CONNECTFAIL = 3
MSG_INVOKE = 4
MSG_RESULT = 5
MSG_PING = 6
FLAGS_EXCEPTION = 1
FLAGS_COMPRESSED = 2
FLAGS_ONEWAY = 4
FLAGS_BATCH = 8
FLAGS_META_ON_CONNECT = 16
FLAGS_ITEMSTREAMRESULT = 32
FLAGS_KEEPSERIALIZED = 64
class Message(object):
__doc__ = "\n Pyro write protocol message.\n\n Wire messages contains of a fixed size header, an optional set of annotation chunks,\n and then the payload data. This class doesn't deal with the payload data:\n (de)serialization and handling of that data is done elsewhere.\n Annotation chunks are only parsed, except the 'HMAC' chunk: that is created\n and validated because it is used as a message digest.\n\n The header format is::\n\n 4 id ('PYRO')\n 2 protocol version\n 2 message type\n 2 message flags\n 2 sequence number\n 4 data length (i.e. 2 Gb data size limitation)\n 2 data serialization format (serializer id)\n 2 annotations length (total of all chunks, 0 if no annotation chunks present)\n 2 (reserved)\n 2 checksum\n\n After the header, zero or more annotation chunks may follow, of the format::\n\n 4 id (ASCII)\n 2 chunk length\n x annotation chunk databytes\n\n After that, the actual payload data bytes follow.\n\n The sequencenumber is used to check if response messages correspond to the\n actual request message. This prevents the situation where Pyro would perhaps return\n the response data from another remote call (which would not result in an error otherwise!)\n This could happen for instance if the socket data stream gets out of sync, perhaps due To\n some form of signal that interrupts I/O.\n\n The header checksum is a simple sum of the header fields to make reasonably sure\n that we are dealing with an actual correct PYRO protocol header and not some random\n data that happens to start with the 'PYRO' protocol identifier.\n\n Pyro now uses two annotation chunks that you should not touch yourself:\n 'HMAC' contains the hmac digest of the message data bytes and\n all of the annotation chunk data bytes (except those of the HMAC chunk itself).\n 'CORR' contains the correlation id (guid bytes)\n Other chunk names are free to use for custom purposes, but Pyro has the right\n to reserve more of them for internal use in the future.\n "
__slots__ = ['type', 'flags', 'seq', 'data', 'data_size', 'serializer_id', 'annotations',
'annotations_size', 'hmac_key']
header_format = "!4sHHHHiHHHH"
header_size = struct.calcsize(header_format)
checksum_magic = 13545
def __init__(self, msgType, databytes, serializer_id, flags, seq, annotations=None, hmac_key=None):
self.type = msgType
self.flags = flags
self.seq = seq
self.data = databytes
self.data_size = len(self.data)
self.serializer_id = serializer_id
self.annotations = dict(annotations or {})
self.hmac_key = hmac_key
if self.hmac_key:
self.annotations["HMAC"] = self.hmac()
self.annotations_size = sum([6 + len(v) for v in self.annotations.values()])
if 0 < config.MAX_MESSAGE_SIZE < self.data_size + self.annotations_size:
raise errors.MessageTooLargeError("max message size exceeded (%d where max=%d)" % (
self.data_size + self.annotations_size, config.MAX_MESSAGE_SIZE))
def __repr__(self):
return "<%s.%s at %x; type=%d flags=%d seq=%d datasize=%d #ann=%d>" % (
self.__module__, self.__class__.__name__, id(self), self.type, self.flags, self.seq, self.data_size, len(self.annotations))
def to_bytes(self):
"""creates a byte stream containing the header followed by annotations (if any) followed by the data"""
return self._Message__header_bytes() + self._Message__annotations_bytes() + self.data
def __header_bytes(self):
if not 0 <= self.data_size <= 2147483647:
raise ValueError("invalid message size (outside range 0..2Gb)")
checksum = self.type + constants.PROTOCOL_VERSION + self.data_size + self.annotations_size + self.serializer_id + self.flags + self.seq + self.checksum_magic & 65535
return struct.pack(self.header_format, b'PYRO', constants.PROTOCOL_VERSION, self.type, self.flags, self.seq, self.data_size, self.serializer_id, self.annotations_size, 0, checksum)
def __annotations_bytes(self):
if self.annotations:
a = []
for k, v in self.annotations.items():
if len(k) != 4:
raise errors.ProtocolError("annotation key must be of length 4")
if sys.version_info >= (3, 0):
k = k.encode("ASCII")
a.append(struct.pack("!4sH", k, len(v)))
a.append(v)
return (b'').join(a)
return b''
def send(self, connection):
"""send the message as bytes over the connection"""
connection.send(self._Message__header_bytes())
if self.annotations:
connection.send(self._Message__annotations_bytes())
connection.send(self.data)
@classmethod
def from_header(cls, headerData):
"""Parses a message header. Does not yet process the annotations chunks and message data."""
if not headerData or len(headerData) != cls.header_size:
raise errors.ProtocolError("header data size mismatch")
tag, ver, msg_type, flags, seq, data_size, serializer_id, anns_size, _, checksum = struct.unpack(cls.header_format, headerData)
if tag != b'PYRO' or ver != constants.PROTOCOL_VERSION:
raise errors.ProtocolError("invalid data or unsupported protocol version")
if checksum != msg_type + ver + data_size + anns_size + flags + serializer_id + seq + cls.checksum_magic & 65535:
raise errors.ProtocolError("header checksum mismatch")
msg = Message(msg_type, b'', serializer_id, flags, seq)
msg.data_size = data_size
msg.annotations_size = anns_size
return msg
@classmethod
def recv(cls, connection, requiredMsgTypes=None, hmac_key=None):
"""
Receives a pyro message from a given connection.
Accepts the given message types (None=any, or pass a sequence).
Also reads annotation chunks and the actual payload data.
Validates a HMAC chunk if present.
"""
msg = cls.from_header(connection.recv(cls.header_size))
msg.hmac_key = hmac_key
if 0 < config.MAX_MESSAGE_SIZE < msg.data_size + msg.annotations_size:
errorMsg = "max message size exceeded (%d where max=%d)" % (msg.data_size + msg.annotations_size, config.MAX_MESSAGE_SIZE)
log.error("connection " + str(connection) + ": " + errorMsg)
connection.close()
exc = errors.MessageTooLargeError(errorMsg)
exc.pyroMsg = msg
raise exc
if requiredMsgTypes:
if msg.type not in requiredMsgTypes:
err = "invalid msg type %d received" % msg.type
log.error(err)
exc = errors.ProtocolError(err)
exc.pyroMsg = msg
raise exc
if msg.annotations_size:
annotations_data = connection.recv(msg.annotations_size)
msg.annotations = {}
i = 0
while i < msg.annotations_size:
anno, length = struct.unpack("!4sH", annotations_data[i[:i + 6]])
if sys.version_info >= (3, 0):
anno = anno.decode("ASCII")
msg.annotations[anno] = annotations_data[(i + 6)[:i + 6 + length]]
if sys.platform == "cli":
msg.annotations[anno] = bytes(msg.annotations[anno])
i += 6 + length
msg.data = connection.recv(msg.data_size)
if "HMAC" in msg.annotations and hmac_key:
exc = secure_compare(msg.annotations["HMAC"], msg.hmac()) or errors.SecurityError("message hmac mismatch")
exc.pyroMsg = msg
raise exc
else:
if ("HMAC" in msg.annotations) != bool(hmac_key):
err = "hmac key config not symmetric"
log.warning(err)
exc = errors.SecurityError(err)
exc.pyroMsg = msg
raise exc
return msg
def hmac(self):
"""returns the hmac of the data and the annotation chunk values (except HMAC chunk itself)"""
mac = hmac.new((self.hmac_key), (self.data), digestmod=(hashlib.sha1))
for k, v in sorted(self.annotations.items()):
if k != "HMAC":
mac.update(v)
if sys.platform != "cli":
return mac.digest()
return bytes(mac.digest())
@staticmethod
def ping(pyroConnection, hmac_key=None):
"""Convenience method to send a 'ping' message and wait for the 'pong' response"""
ping = Message(MSG_PING, b'ping', 42, 0, 0, hmac_key=hmac_key)
pyroConnection.send(ping.to_bytes())
Message.recv(pyroConnection, [MSG_PING])
def decompress_if_needed(self):
"""Decompress the message data if it is compressed."""
if self.flags & FLAGS_COMPRESSED:
self.data = zlib.decompress(self.data)
self.flags &= ~FLAGS_COMPRESSED
self.data_size = len(self.data)
return self
try:
from hmac import compare_digest as secure_compare
except ImportError:
import operator
try:
reduce
except NameError:
from functools import reduce
def secure_compare(a, b):
if type(a) != type(b):
raise TypeError("arguments must both be same type")
if len(a) != len(b):
return False
return reduce(operator.and_, map(operator.eq, a, b), True)

View File

@@ -0,0 +1,7 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/naming.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 25442 bytes

View File

@@ -0,0 +1,496 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/naming_storage.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 18133 bytes
"""
Name Server persistent storage implementations.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import re, logging, sys, threading
if sys.version_info <= (3, 4):
from collections import MutableMapping
else:
from collections.abc import MutableMapping
from contextlib import closing
from Pyro4.errors import NamingError
try:
import anydbm as dbm
except ImportError:
try:
import dbm
except ImportError:
dbm = None
except Exception as x:
try:
dbm = None
finally:
x = None
del x
try:
import sqlite3
except ImportError:
sqlite3 = None
log = logging.getLogger("Pyro4.naming_storage")
class SqlStorage(MutableMapping):
__doc__ = "\n Sqlite-based storage.\n It is just a single (name,uri) table for the names and another table for the metadata.\n Sqlite db connection objects aren't thread-safe, so a new connection is created in every method.\n "
def __init__(self, dbfile):
if dbfile == ":memory:":
raise ValueError("We don't support the sqlite :memory: database type. Just use the default volatile in-memory store.")
self.dbfile = dbfile
with closing(sqlite3.connect(dbfile)) as db:
db.execute("PRAGMA foreign_keys=ON")
try:
db.execute("SELECT COUNT(*) FROM pyro_names").fetchone()
except sqlite3.OperationalError:
self._create_schema(db)
else:
try:
db.execute("SELECT COUNT(*) FROM pyro_metadata").fetchone()
except sqlite3.OperationalError:
db.execute("ALTER TABLE pyro_names RENAME TO pyro_names_old")
self._create_schema(db)
db.execute("INSERT INTO pyro_names(name, uri) SELECT name, uri FROM pyro_names_old")
db.execute("DROP TABLE pyro_names_old")
db.commit()
def _create_schema(self, db):
db.execute("CREATE TABLE pyro_names\n (\n id integer PRIMARY KEY,\n name nvarchar NOT NULL UNIQUE,\n uri nvarchar NOT NULL\n );")
db.execute("CREATE TABLE pyro_metadata\n (\n object integer NOT NULL,\n metadata nvarchar NOT NULL,\n FOREIGN KEY(object) REFERENCES pyro_names(id)\n );")
def __getattr__(self, item):
raise NotImplementedError("SqlStorage doesn't implement method/attribute '" + item + "'")
def __getitem__(self, item):
try:
with closing(sqlite3.connect(self.dbfile)) as db:
result = db.execute("SELECT id, uri FROM pyro_names WHERE name=?", (item,)).fetchone()
if result:
dbid, uri = result
metadata = {m[0] for m in db.execute("SELECT metadata FROM pyro_metadata WHERE object=?", (dbid,)).fetchall()}
return (uri, metadata)
raise KeyError(item)
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in getitem: " + str(e))
finally:
e = None
del e
def __setitem__(self, key, value):
uri, metadata = value
try:
with closing(sqlite3.connect(self.dbfile)) as db:
cursor = db.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
dbid = cursor.execute("SELECT id FROM pyro_names WHERE name=?", (key,)).fetchone()
if dbid:
dbid = dbid[0]
cursor.execute("DELETE FROM pyro_metadata WHERE object=?", (dbid,))
cursor.execute("DELETE FROM pyro_names WHERE id=?", (dbid,))
cursor.execute("INSERT INTO pyro_names(name, uri) VALUES(?,?)", (key, uri))
if metadata:
object_id = cursor.lastrowid
for m in metadata:
cursor.execute("INSERT INTO pyro_metadata(object, metadata) VALUES (?,?)", (object_id, m))
cursor.close()
db.commit()
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in setitem: " + str(e))
finally:
e = None
del e
def __len__(self):
try:
with closing(sqlite3.connect(self.dbfile)) as db:
return db.execute("SELECT count(*) FROM pyro_names").fetchone()[0]
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in len: " + str(e))
finally:
e = None
del e
def __contains__(self, item):
try:
with closing(sqlite3.connect(self.dbfile)) as db:
return db.execute("SELECT EXISTS(SELECT 1 FROM pyro_names WHERE name=? LIMIT 1)", (item,)).fetchone()[0]
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in contains: " + str(e))
finally:
e = None
del e
def __delitem__(self, key):
try:
with closing(sqlite3.connect(self.dbfile)) as db:
db.execute("PRAGMA foreign_keys=ON")
dbid = db.execute("SELECT id FROM pyro_names WHERE name=?", (key,)).fetchone()
if dbid:
dbid = dbid[0]
db.execute("DELETE FROM pyro_metadata WHERE object=?", (dbid,))
db.execute("DELETE FROM pyro_names WHERE id=?", (dbid,))
db.commit()
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in delitem: " + str(e))
finally:
e = None
del e
def __iter__(self):
try:
with closing(sqlite3.connect(self.dbfile)) as db:
result = db.execute("SELECT name FROM pyro_names")
return iter([n[0] for n in result.fetchall()])
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in iter: " + str(e))
finally:
e = None
del e
def clear(self):
try:
with closing(sqlite3.connect(self.dbfile)) as db:
db.execute("PRAGMA foreign_keys=ON")
db.execute("DELETE FROM pyro_metadata")
db.execute("DELETE FROM pyro_names")
db.commit()
with closing(sqlite3.connect((self.dbfile), isolation_level=None)) as db:
db.execute("VACUUM")
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in clear: " + str(e))
finally:
e = None
del e
def optimized_prefix_list(self, prefix, return_metadata=False):
try:
with closing(sqlite3.connect(self.dbfile)) as db:
names = {}
if return_metadata:
for dbid, name, uri in db.execute("SELECT id, name, uri FROM pyro_names WHERE name LIKE ?", (prefix + "%",)).fetchall():
metadata = {m[0] for m in db.execute("SELECT metadata FROM pyro_metadata WHERE object=?", (dbid,)).fetchall()}
names[name] = (uri, metadata)
else:
for name, uri in db.execute("SELECT name, uri FROM pyro_names WHERE name LIKE ?", (prefix + "%",)).fetchall():
names[name] = uri
return names
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in optimized_prefix_list: " + str(e))
finally:
e = None
del e
def optimized_regex_list(self, regex, return_metadata=False):
pass
def optimized_metadata_search(self, metadata_all=None, metadata_any=None, return_metadata=False):
try:
with closing(sqlite3.connect(self.dbfile)) as db:
if metadata_any:
params = list(metadata_any)
sql = "SELECT id, name, uri FROM pyro_names WHERE id IN (SELECT object FROM pyro_metadata WHERE metadata IN ({seq}))".format(seq=(",".join(["?"] * len(metadata_any))))
else:
params = list(metadata_all)
params.append(len(metadata_all))
sql = "SELECT id, name, uri FROM pyro_names WHERE id IN (SELECT object FROM pyro_metadata WHERE metadata IN ({seq}) GROUP BY object HAVING COUNT(metadata)=?)".format(seq=(",".join(["?"] * len(metadata_all))))
result = db.execute(sql, params).fetchall()
if return_metadata:
names = {}
for dbid, name, uri in result:
metadata = {m[0] for m in db.execute("SELECT metadata FROM pyro_metadata WHERE object=?", (dbid,)).fetchall()}
names[name] = (uri, metadata)
else:
names = {name: uri for dbid, name, uri in result}
return names
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in optimized_metadata_search: " + str(e))
finally:
e = None
del e
def remove_items(self, items):
try:
with closing(sqlite3.connect(self.dbfile)) as db:
db.execute("PRAGMA foreign_keys=ON")
for item in items:
dbid = db.execute("SELECT id FROM pyro_names WHERE name=?", (item,)).fetchone()
if dbid:
dbid = dbid[0]
db.execute("DELETE FROM pyro_metadata WHERE object=?", (dbid,))
db.execute("DELETE FROM pyro_names WHERE id=?", (dbid,))
db.commit()
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in remove_items: " + str(e))
finally:
e = None
del e
def everything(self, return_metadata=False):
try:
with closing(sqlite3.connect(self.dbfile)) as db:
names = {}
if return_metadata:
for dbid, name, uri in db.execute("SELECT id, name, uri FROM pyro_names").fetchall():
metadata = {m[0] for m in db.execute("SELECT metadata FROM pyro_metadata WHERE object=?", (dbid,)).fetchall()}
names[name] = (uri, metadata)
else:
for name, uri in db.execute("SELECT name, uri FROM pyro_names").fetchall():
names[name] = uri
return names
except sqlite3.DatabaseError as e:
try:
raise NamingError("sqlite error in everything: " + str(e))
finally:
e = None
del e
def close(self):
pass
class DbmStorage(MutableMapping):
__doc__ = "\n Storage implementation that uses a persistent dbm file.\n Because dbm only supports strings as key/value, we encode/decode them in utf-8.\n Dbm files cannot be accessed concurrently, so a strict concurrency model\n is used where only one operation is processed at the same time\n (this is very slow when compared to the in-memory storage)\n DbmStorage does NOT support storing metadata! It only accepts empty metadata,\n and always returns empty metadata.\n "
def __init__(self, dbmfile):
self.dbmfile = dbmfile
db = dbm.open((self.dbmfile), "c", mode=384)
db.close()
self.lock = threading.Lock()
def __getattr__(self, item):
raise NotImplementedError("DbmStorage doesn't implement method/attribute '" + item + "'")
def __getitem__(self, item):
item = item.encode("utf-8")
with self.lock:
try:
with closing(dbm.open(self.dbmfile)) as db:
return (
db[item].decode("utf-8"), frozenset())
except dbm.error as e:
try:
raise NamingError("dbm error in getitem: " + str(e))
finally:
e = None
del e
def __setitem__(self, key, value):
uri, metadata = value
if metadata:
log.warning("DbmStorage doesn't support metadata, silently discarded")
key = key.encode("utf-8")
uri = uri.encode("utf-8")
with self.lock:
try:
with closing(dbm.open(self.dbmfile, "w")) as db:
db[key] = uri
except dbm.error as e:
try:
raise NamingError("dbm error in setitem: " + str(e))
finally:
e = None
del e
def __len__(self):
with self.lock:
try:
with closing(dbm.open(self.dbmfile)) as db:
return len(db)
except dbm.error as e:
try:
raise NamingError("dbm error in len: " + str(e))
finally:
e = None
del e
def __contains__(self, item):
item = item.encode("utf-8")
with self.lock:
try:
with closing(dbm.open(self.dbmfile)) as db:
return item in db
except dbm.error as e:
try:
raise NamingError("dbm error in contains: " + str(e))
finally:
e = None
del e
def __delitem__(self, key):
key = key.encode("utf-8")
with self.lock:
try:
with closing(dbm.open(self.dbmfile, "w")) as db:
del db[key]
except dbm.error as e:
try:
raise NamingError("dbm error in delitem: " + str(e))
finally:
e = None
del e
def __iter__(self):
with self.lock:
try:
with closing(dbm.open(self.dbmfile)) as db:
return iter([key.decode("utf-8") for key in db.keys()])
except dbm.error as e:
try:
raise NamingError("dbm error in iter: " + str(e))
finally:
e = None
del e
def clear(self):
with self.lock:
try:
with closing(dbm.open(self.dbmfile, "w")) as db:
if hasattr(db, "clear"):
db.clear()
else:
for key in db.keys():
del db[key]
except dbm.error as e:
try:
raise NamingError("dbm error in clear: " + str(e))
finally:
e = None
del e
def optimized_prefix_list(self, prefix, return_metadata=False):
with self.lock:
try:
with closing(dbm.open(self.dbmfile)) as db:
result = {}
if hasattr(db, "items"):
for key, value in db.items():
key = key.decode("utf-8")
if key.startswith(prefix):
uri = value.decode("utf-8")
result[key] = (uri, frozenset()) if return_metadata else uri
else:
for key in db.keys():
keystr = key.decode("utf-8")
if keystr.startswith(prefix):
uri = db[key].decode("utf-8")
result[keystr] = (uri, frozenset()) if return_metadata else uri
return result
except dbm.error as e:
try:
raise NamingError("dbm error in optimized_prefix_list: " + str(e))
finally:
e = None
del e
def optimized_regex_list(self, regex, return_metadata=False):
try:
regex = re.compile(regex + "$")
except re.error as x:
try:
raise NamingError("invalid regex: " + str(x))
finally:
x = None
del x
with self.lock:
try:
with closing(dbm.open(self.dbmfile)) as db:
result = {}
if hasattr(db, "items"):
for key, value in db.items():
key = key.decode("utf-8")
if regex.match(key):
uri = value.decode("utf-8")
result[key] = (uri, frozenset()) if return_metadata else uri
else:
for key in db.keys():
keystr = key.decode("utf-8")
if regex.match(keystr):
uri = db[key].decode("utf-8")
result[keystr] = (uri, frozenset()) if return_metadata else uri
return result
except dbm.error as e:
try:
raise NamingError("dbm error in optimized_regex_list: " + str(e))
finally:
e = None
del e
def optimized_metadata_search(self, metadata_all=None, metadata_any=None, return_metadata=False):
if metadata_all or metadata_any:
raise NamingError("DbmStorage doesn't support metadata")
return self.everything(return_metadata)
def remove_items(self, items):
with self.lock:
try:
with closing(dbm.open(self.dbmfile, "w")) as db:
for item in items:
try:
del db[item.encode("utf-8")]
except KeyError:
pass
except dbm.error as e:
try:
raise NamingError("dbm error in remove_items: " + str(e))
finally:
e = None
del e
def everything(self, return_metadata=False):
with self.lock:
try:
with closing(dbm.open(self.dbmfile)) as db:
result = {}
if hasattr(db, "items"):
for key, value in db.items():
uri = value.decode("utf-8")
result[key.decode("utf-8")] = (uri, frozenset()) if return_metadata else uri
else:
for key in db.keys():
uri = db[key].decode("utf-8")
result[key.decode("utf-8")] = (uri, frozenset()) if return_metadata else uri
return result
except dbm.error as e:
try:
raise NamingError("dbm error in everything: " + str(e))
finally:
e = None
del e
def close(self):
pass

View File

@@ -0,0 +1,163 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/nsc.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 5712 bytes
"""
Name server control tool.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import sys, os, warnings
from Pyro4 import errors, naming
if sys.version_info < (3, 0):
input = raw_input
def handleCommand(nameserver, options, args):
def printListResult(resultdict, title=''):
print("--------START LIST %s" % title)
for name, (uri, metadata) in sorted(resultdict.items()):
print("%s --> %s" % (name, uri))
if metadata:
print(" metadata:", metadata)
print("--------END LIST %s" % title)
def cmd_ping():
nameserver.ping()
print("Name server ping ok.")
def cmd_listprefix():
if len(args) == 1:
printListResult(nameserver.list(return_metadata=True))
else:
printListResult(nameserver.list(prefix=(args[1]), return_metadata=True), "- prefix '%s'" % args[1])
def cmd_listregex():
if len(args) != 2:
raise SystemExit("requires one argument: pattern")
printListResult(nameserver.list(regex=(args[1]), return_metadata=True), "- regex '%s'" % args[1])
def cmd_lookup():
if len(args) != 2:
raise SystemExit("requires one argument: name")
uri, metadata = nameserver.lookup((args[1]), return_metadata=True)
print(uri)
if metadata:
print("metadata:", metadata)
def cmd_register():
if len(args) != 3:
raise SystemExit("requires two arguments: name uri")
nameserver.register((args[1]), (args[2]), safe=True)
print("Registered %s" % args[1])
def cmd_remove():
if len(args) != 2:
raise SystemExit("requires one argument: name")
else:
count = nameserver.remove(args[1])
if count > 0:
print("Removed %s" % args[1])
else:
print("Nothing removed")
def cmd_removeregex():
if len(args) != 2:
raise SystemExit("requires one argument: pattern")
sure = input("Potentially removing lots of items from the Name server. Are you sure (y/n)?").strip()
if sure in ('y', 'Y'):
count = nameserver.remove(regex=(args[1]))
print("%d items removed." % count)
def cmd_setmeta():
if len(args) < 2:
raise SystemExit("requires at least 2 arguments: uri and zero or more meta tags")
else:
metadata = set(args[2[:None]])
nameserver.set_metadata(args[1], metadata)
if metadata:
print("Metadata updated")
else:
print("Metadata cleared")
def cmd_listmeta_all():
if len(args) < 2:
raise SystemExit("requires at least one metadata tag argument")
metadata = set(args[1[:None]])
printListResult(nameserver.list(metadata_all=metadata, return_metadata=True), " - searched by metadata")
def cmd_listmeta_any():
if len(args) < 2:
raise SystemExit("requires at least one metadata tag argument")
metadata = set(args[1[:None]])
printListResult(nameserver.list(metadata_any=metadata, return_metadata=True), " - searched by metadata")
commands = {
'ping': cmd_ping,
'list': cmd_listprefix,
'listmatching': cmd_listregex,
'listmeta_all': cmd_listmeta_all,
'listmeta_any': cmd_listmeta_any,
'lookup': cmd_lookup,
'register': cmd_register,
'remove': cmd_remove,
'removematching': cmd_removeregex,
'setmeta': cmd_setmeta}
try:
commands[args[0]]()
except Exception as x:
try:
print("Error: %s - %s" % (type(x).__name__, x))
finally:
x = None
del x
def main(args=None):
from optparse import OptionParser
usage = "usage: %prog [options] command [arguments]\nCommands: register remove removematching lookup list listmatching\n listmeta_all listmeta_any setmeta ping"
parser = OptionParser(usage=usage)
parser.add_option("-n", "--host", dest="host", help="hostname of the NS")
parser.add_option("-p", "--port", dest="port", type="int", help="port of the NS (or bc-port if host isn't specified)")
parser.add_option("-u", "--unixsocket", help="Unix domain socket name of the NS")
parser.add_option("-k", "--key", help="the HMAC key to use (deprecated)")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="verbose output")
options, args = parser.parse_args(args)
if options.key:
warnings.warn("using -k to supply HMAC key on the command line is a security problem and is deprecated since Pyro 4.72. See the documentation for an alternative.")
if "PYRO_HMAC_KEY" in os.environ:
if options.key:
raise SystemExit("error: don't use -k and PYRO_HMAC_KEY at the same time")
options.key = os.environ["PYRO_HMAC_KEY"]
if not args or args[0] not in ('register', 'remove', 'removematching', 'list',
'listmatching', 'lookup', 'listmeta_all', 'listmeta_any',
'setmeta', 'ping'):
parser.error("invalid or missing command")
if options.verbose:
print("Locating name server...")
if options.unixsocket:
options.host = "./u:" + options.unixsocket
try:
nameserver = naming.locateNS((options.host), (options.port), hmac_key=(options.key))
except errors.PyroError as x:
try:
print("Error: %s" % x)
return
finally:
x = None
del x
if options.verbose:
print("Name server found: %s" % nameserver._pyroUri)
handleCommand(nameserver, options, args)
if options.verbose:
print("Done.")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,13 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/socketserver/__init__.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 128 bytes
"""
Package for the various server types.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
pass

View File

@@ -0,0 +1,121 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/socketserver/existingconnectionserver.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 4127 bytes
"""
Socket server for a the special case of a single, already existing, connection.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import socket, sys, logging, ssl
from Pyro4 import socketutil, errors, util
from Pyro4.configuration import config
log = logging.getLogger("Pyro4.existingconnectionserver")
class SocketServer_ExistingConnection(object):
def __init__(self):
self.sock = self.daemon = self.locationStr = self.conn = None
self.shutting_down = False
def init(self, daemon, connected_socket):
connected_socket.getpeername()
if config.SSL:
if not isinstance(connected_socket, ssl.SSLSocket):
raise socket.error("SSL configured for Pyro but existing socket is not a SSL socket")
else:
self.daemon = daemon
self.sock = connected_socket
log.info("starting server on user-supplied connected socket " + str(connected_socket))
sn = connected_socket.getsockname()
if hasattr(socket, "AF_UNIX") and connected_socket.family == socket.AF_UNIX:
self.locationStr = "./u:" + (sn or "<<not-bound>>")
else:
host, port = sn[None[:2]]
if ":" in host:
self.locationStr = "[%s]:%d" % (host, port)
else:
self.locationStr = "%s:%d" % (host, port)
self.conn = socketutil.SocketConnection(connected_socket)
def __repr__(self):
return "<%s on %s>" % (self.__class__.__name__, self.locationStr)
def __del__(self):
if self.sock is not None:
self.sock = None
self.conn = None
@property
def selector(self):
raise TypeError("single-connection server doesn't have multiplexing selector")
@property
def sockets(self):
return [self.sock]
def combine_loop(self, server):
raise errors.PyroError("cannot combine servers when using user-supplied connected socket")
def events(self, eventsockets):
raise errors.PyroError("cannot combine events when using user-supplied connected socket")
def shutdown(self):
self.shutting_down = True
self.close()
self.sock = None
self.conn = None
def close(self):
self.sock = None
self.conn = None
def handleRequest(self):
"""Handles a single connection request event and returns if the connection is still active"""
try:
self.daemon.handleRequest(self.conn)
return True
except (socket.error, errors.ConnectionClosedError, errors.SecurityError) as x:
try:
try:
peername = self.conn.sock.getpeername()
log.debug("disconnected %s", peername)
except socket.error:
log.debug("disconnected a client")
self.shutdown()
return False
finally:
x = None
del x
except errors.TimeoutError as x:
try:
log.warning("error during handleRequest: %s" % x)
return False
finally:
x = None
del x
except:
ex_t, ex_v, ex_tb = sys.exc_info()
tb = util.formatTraceback(ex_t, ex_v, ex_tb)
msg = "error during handleRequest: %s; %s" % (ex_v, "".join(tb))
log.warning(msg)
return False
def loop(self, loopCondition=(lambda: True)):
log.debug("entering requestloop")
while loopCondition() and self.sock:
try:
self.handleRequest()
self.daemon._housekeeping()
except socket.timeout:
pass
except KeyboardInterrupt:
log.debug("stopping on break signal")
break

View File

@@ -0,0 +1,221 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/socketserver/multiplexserver.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 9876 bytes
"""
Socket server based on socket multiplexing. Doesn't use threads.
Uses the best available selector (kqueue, poll, select).
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import socket, time, sys, logging, os
from collections import defaultdict
from Pyro4 import socketutil, errors, util
from Pyro4.configuration import config
if sys.version_info >= (3, 5):
import selectors
else:
try:
import selectors2 as selectors
except ImportError:
if sys.version_info >= (3, 4):
import selectors
else:
try:
import selectors34 as selectors
except ImportError:
selectors = None
log = logging.getLogger("Pyro4.multiplexserver")
class SocketServer_Multiplex(object):
__doc__ = "Multiplexed transport server for socket connections (uses select, poll, kqueue, ...)"
def __init__(self):
self.sock = self.daemon = self.locationStr = None
if selectors is None:
raise RuntimeError("This Python installation doesn't have the 'selectors2' or 'selectors34' module installed, which is required to use Pyro's multiplex server. Install it, or use the threadpool server instead.")
self.selector = selectors.DefaultSelector()
self.shutting_down = False
def init(self, daemon, host, port, unixsocket=None):
log.info("starting multiplexed socketserver")
log.debug("selector implementation: %s.%s", self.selector.__class__.__module__, self.selector.__class__.__name__)
self.sock = None
bind_location = unixsocket if unixsocket else (host, port)
if config.SSL:
sslContext = socketutil.getSSLcontext(servercert=(config.SSL_SERVERCERT), serverkey=(config.SSL_SERVERKEY),
keypassword=(config.SSL_SERVERKEYPASSWD),
cacerts=(config.SSL_CACERTS))
log.info("using SSL, cert=%s key=%s cacerts=%s", config.SSL_SERVERCERT, config.SSL_SERVERKEY, config.SSL_CACERTS)
else:
sslContext = None
log.info("not using SSL")
self.sock = socketutil.createSocket(bind=bind_location, reuseaddr=(config.SOCK_REUSE),
timeout=(config.COMMTIMEOUT),
noinherit=True,
nodelay=(config.SOCK_NODELAY),
sslContext=sslContext)
self.daemon = daemon
self._socketaddr = sockaddr = self.sock.getsockname()
if not unixsocket:
if sockaddr[0].startswith("127."):
if (host is None or host.lower()) != "localhost" and not host.startswith("127."):
log.warning("weird DNS setup: %s resolves to localhost (127.x.x.x)", host)
elif unixsocket:
self.locationStr = "./u:" + unixsocket
else:
host = host or sockaddr[0]
port = port or sockaddr[1]
if ":" in host:
self.locationStr = "[%s]:%d" % (host, port)
else:
self.locationStr = "%s:%d" % (host, port)
self.selector.register(self.sock, selectors.EVENT_READ, self)
def __repr__(self):
return "<%s on %s; %d connections>" % (self.__class__.__name__, self.locationStr, len(self.selector.get_map()) - 1)
def __del__(self):
if self.sock is not None:
self.selector.close()
self.sock.close()
self.sock = None
def eventsParse error at or near `COME_FROM' instruction at offset 170_0
def _handleConnection(self, sock):
try:
if sock is None:
return
else:
csock, caddr = sock.accept()
if hasattr(csock, "getpeercert"):
log.debug("connected %s - SSL", caddr)
else:
log.debug("connected %s - unencrypted", caddr)
if config.COMMTIMEOUT:
csock.settimeout(config.COMMTIMEOUT)
except (socket.error, OSError) as x:
try:
err = getattr(x, "errno", x.args[0])
if err in socketutil.ERRNO_BADF or err in socketutil.ERRNO_ENOTSOCK:
raise errors.ConnectionClosedError("server socket closed")
err = getattr(x, "errno", x.args[0])
log.warning("accept() failed '%s' with errno=%d, shouldn't happen", x, err)
return
finally:
x = None
del x
try:
conn = socketutil.SocketConnection(csock)
if self.daemon._handshake(conn):
return conn
conn.close()
except:
ex_t, ex_v, ex_tb = sys.exc_info()
tb = util.formatTraceback(ex_t, ex_v, ex_tb)
log.warning("error during connect/handshake: %s; %s", ex_v, "\n".join(tb))
try:
csock.shutdown(socket.SHUT_RDWR)
except (OSError, socket.error):
pass
csock.close()
def shutdown(self):
self.shutting_down = True
self.wakeup()
time.sleep(0.05)
self.close()
self.sock = None
def close(self):
self.selector.close()
if self.sock:
sockname = None
try:
sockname = self.sock.getsockname()
except (socket.error, OSError):
pass
self.sock.close()
if type(sockname) is str:
if os.path.exists(sockname):
os.remove(sockname)
self.sock = None
@property
def sockets(self):
registrations = self.selector.get_map()
if registrations:
return [sk.fileobj for sk in registrations.values()]
return []
def wakeup(self):
"""bit of a hack to trigger a blocking server to get out of the loop, useful at clean shutdowns"""
socketutil.interruptSocket(self._socketaddr)
def handleRequest(self, conn):
"""Handles a single connection request event and returns if the connection is still active"""
try:
self.daemon.handleRequest(conn)
return True
except (socket.error, errors.ConnectionClosedError, errors.SecurityError):
try:
peername = conn.sock.getpeername()
log.debug("disconnected %s", peername)
except socket.error:
log.debug("disconnected a client")
return False
except errors.TimeoutError as x:
try:
log.warning("error during handleRequest: %s" % x)
return False
finally:
x = None
del x
except:
ex_t, ex_v, ex_tb = sys.exc_info()
tb = util.formatTraceback(ex_t, ex_v, ex_tb)
msg = "error during handleRequest: %s; %s" % (ex_v, "".join(tb))
log.warning(msg)
return False
def loop(self, loopCondition=(lambda: True)):
log.debug("entering multiplexed requestloop")
while loopCondition():
try:
try:
events = self.selector.select(config.POLLTIMEOUT)
except OSError:
events = []
events_per_server = defaultdict(list)
for key, mask in events:
if mask & selectors.EVENT_READ:
events_per_server[key.data].append(key.fileobj)
for server, fileobjs in events_per_server.items():
server.events(fileobjs)
if not events_per_server:
self.daemon._housekeeping()
except socket.timeout:
pass
except KeyboardInterrupt:
log.debug("stopping on break signal")
break
def combine_loop(self, server):
for sock in server.sockets:
self.selector.register(sock, selectors.EVENT_READ, server)
server.selector = self.selector

View File

@@ -0,0 +1,142 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/socketserver/threadpool.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 4327 bytes
"""
Thread pool job processor with variable number of worker threads (between max/min amount).
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import with_statement
import time, logging, threading
from Pyro4.configuration import config
log = logging.getLogger("Pyro4.threadpool")
class PoolError(Exception):
pass
class NoFreeWorkersError(PoolError):
pass
class Worker(threading.Thread):
def __init__(self, pool):
super(Worker, self).__init__()
self.daemon = True
self.name = "Pyro-Worker-%d" % id(self)
self.job_available = threading.Event()
self.job = None
self.pool = pool
def process(self, job):
self.job = job
self.job_available.set()
def run(self):
while True:
self.job_available.wait()
self.job_available.clear()
if self.job is None:
break
try:
self.job()
except Exception as x:
try:
log.exception("unhandled exception from job in worker thread %s: %s", self.name, x)
finally:
x = None
del x
self.job = None
self.pool.notify_done(self)
self.pool = None
class Pool(object):
__doc__ = "\n A job processing pool that is using a pool of worker threads.\n The amount of worker threads in the pool is configurable and scales between min/max size.\n "
def __init__(self):
if config.THREADPOOL_SIZE < 1 or config.THREADPOOL_SIZE_MIN < 1:
raise ValueError("threadpool sizes must be greater than zero")
if config.THREADPOOL_SIZE_MIN > config.THREADPOOL_SIZE:
raise ValueError("minimum threadpool size must be less than or equal to max size")
self.idle = set()
self.busy = set()
self.closed = False
for _ in range(config.THREADPOOL_SIZE_MIN):
worker = Worker(self)
self.idle.add(worker)
worker.start()
log.debug("worker pool created with initial size %d", self.num_workers())
self.count_lock = threading.Lock()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
if not self.closed:
log.debug("closing down")
for w in list(self.busy):
w.process(None)
for w in list(self.idle):
w.process(None)
self.closed = True
time.sleep(0.1)
idle, self.idle = self.idle, set()
busy, self.busy = self.busy, set()
current_thread = threading.current_thread()
while idle:
p = idle.pop()
if p is not current_thread:
p.join(timeout=0.1)
while busy:
p = busy.pop()
if p is not current_thread:
p.join(timeout=0.1)
def __repr__(self):
return "<%s.%s at 0x%x; %d busy workers; %d idle workers>" % (
self.__class__.__module__, self.__class__.__name__, id(self), len(self.busy), len(self.idle))
def num_workers(self):
return len(self.busy) + len(self.idle)
def process(self, job):
if self.closed:
raise PoolError("job queue is closed")
elif self.idle:
worker = self.idle.pop()
else:
if self.num_workers() < config.THREADPOOL_SIZE:
worker = Worker(self)
worker.start()
else:
raise NoFreeWorkersError("no free workers available, increase thread pool size")
self.busy.add(worker)
worker.process(job)
log.debug("worker counts: %d busy, %d idle", len(self.busy), len(self.idle))
def notify_done(self, worker):
if worker in self.busy:
self.busy.remove(worker)
elif self.closed:
worker.process(None)
return
if len(self.idle) >= config.THREADPOOL_SIZE_MIN:
worker.process(None)
else:
self.idle.add(worker)
log.debug("worker counts: %d busy, %d idle", len(self.busy), len(self.idle))

View File

@@ -0,0 +1,262 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/socketserver/threadpoolserver.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 10057 bytes
"""
Socket server based on a worker thread pool. Doesn't use select.
Uses a single worker thread per client connection.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import socket, logging, sys, time, threading, os
from Pyro4 import socketutil, errors, util
from Pyro4.configuration import config
from .threadpool import Pool, NoFreeWorkersError
from .multiplexserver import selectors
log = logging.getLogger("Pyro4.threadpoolserver")
_client_disconnect_lock = threading.Lock()
class ClientConnectionJob(object):
__doc__ = "\n Takes care of a single client connection and all requests\n that may arrive during its life span.\n "
def __init__(self, clientSocket, clientAddr, daemon):
self.csock = socketutil.SocketConnection(clientSocket)
self.caddr = clientAddr
self.daemon = daemon
def __call__(self):
if self.handleConnection():
try:
while True:
try:
self.daemon.handleRequest(self.csock)
except (socket.error, errors.ConnectionClosedError):
log.debug("disconnected %s", self.caddr)
break
except errors.SecurityError:
log.debug("security error on client %s", self.caddr)
break
except errors.TimeoutError as x:
try:
log.warning("error during handleRequest: %s" % x)
break
finally:
x = None
del x
except:
ex_t, ex_v, ex_tb = sys.exc_info()
tb = util.formatTraceback(ex_t, ex_v, ex_tb)
msg = "error during handleRequest: %s; %s" % (ex_v, "".join(tb))
log.warning(msg)
break
finally:
with _client_disconnect_lock:
try:
self.daemon._clientDisconnect(self.csock)
except Exception as x:
try:
log.warning("Error in clientDisconnect: " + str(x))
finally:
x = None
del x
self.csock.close()
def handleConnection(self):
try:
if self.daemon._handshake(self.csock):
return True
self.csock.close()
except:
ex_t, ex_v, ex_tb = sys.exc_info()
tb = util.formatTraceback(ex_t, ex_v, ex_tb)
log.warning("error during connect/handshake: %s; %s", ex_v, "\n".join(tb))
self.csock.close()
return False
def denyConnection(self, reason):
log.warning("client connection was denied: " + reason)
self.daemon._handshake((self.csock), denied_reason=reason)
self.csock.close()
class Housekeeper(threading.Thread):
def __init__(self, daemon):
super(Housekeeper, self).__init__(name="housekeeper")
self.pyroDaemon = daemon
self.stop = threading.Event()
self.daemon = True
self.waittime = min(config.POLLTIMEOUT or 0, max(config.COMMTIMEOUT or 0, 5))
def run(self):
while True:
if self.stop.wait(self.waittime):
break
self.pyroDaemon._housekeeping()
class SocketServer_Threadpool(object):
__doc__ = "transport server for socket connections, worker thread pool version."
def __init__(self):
self.daemon = self.sock = self._socketaddr = self.locationStr = self.pool = None
self.shutting_down = False
self.housekeeper = None
self._selector = selectors.DefaultSelector() if selectors else None
def init(self, daemon, host, port, unixsocket=None):
log.info("starting thread pool socketserver")
self.daemon = daemon
self.sock = None
bind_location = unixsocket if unixsocket else (host, port)
if config.SSL:
sslContext = socketutil.getSSLcontext(servercert=(config.SSL_SERVERCERT), serverkey=(config.SSL_SERVERKEY),
keypassword=(config.SSL_SERVERKEYPASSWD),
cacerts=(config.SSL_CACERTS))
log.info("using SSL, cert=%s key=%s cacerts=%s", config.SSL_SERVERCERT, config.SSL_SERVERKEY, config.SSL_CACERTS)
else:
sslContext = None
log.info("not using SSL")
self.sock = socketutil.createSocket(bind=bind_location, reuseaddr=(config.SOCK_REUSE),
timeout=(config.COMMTIMEOUT),
noinherit=True,
nodelay=(config.SOCK_NODELAY),
sslContext=sslContext)
self._socketaddr = self.sock.getsockname()
if not unixsocket:
if self._socketaddr[0].startswith("127."):
if (host is None or host.lower()) != "localhost" and not host.startswith("127."):
log.warning("weird DNS setup: %s resolves to localhost (127.x.x.x)", host)
elif unixsocket:
self.locationStr = "./u:" + unixsocket
else:
host = host or self._socketaddr[0]
port = port or self._socketaddr[1]
if ":" in host:
self.locationStr = "[%s]:%d" % (host, port)
else:
self.locationStr = "%s:%d" % (host, port)
self.pool = Pool()
self.housekeeper = Housekeeper(daemon)
self.housekeeper.start()
if self._selector:
self._selector.register(self.sock, selectors.EVENT_READ, self)
def __del__(self):
if self.sock is not None:
self.sock.close()
self.sock = None
if self.pool is not None:
self.pool.close()
self.pool = None
if self.housekeeper:
self.housekeeper.stop.set()
self.housekeeper.join()
self.housekeeper = None
def __repr__(self):
return "<%s on %s; %d workers>" % (self.__class__.__name__, self.locationStr, self.pool.num_workers())
def loop(self, loopCondition=(lambda: True)):
log.debug("threadpool server requestloop")
while self.sock is not None and not self.shutting_down:
if loopCondition():
try:
self.events([self.sock])
except (socket.error, OSError) as x:
try:
if not loopCondition():
break
err = getattr(x, "errno", x.args[0])
log.warning("socket error '%s' with errno=%d, shouldn't happen", x, err)
continue
finally:
x = None
del x
except KeyboardInterrupt:
log.debug("stopping on break signal")
break
def combine_loop(self, server):
raise TypeError("You can't use the loop combiner on the threadpool server type")
def events(self, eventsockets):
"""used for external event loops: handle events that occur on one of the sockets of this server"""
assert self.sock in eventsockets
try:
if self._selector:
events = self._selector.select(config.POLLTIMEOUT)
if not events:
return
else:
csock, caddr = self.sock.accept()
if self.shutting_down:
csock.close()
return
if hasattr(csock, "getpeercert"):
log.debug("connected %s - SSL", caddr)
else:
log.debug("connected %s - unencrypted", caddr)
if config.COMMTIMEOUT:
csock.settimeout(config.COMMTIMEOUT)
else:
job = ClientConnectionJob(csock, caddr, self.daemon)
try:
self.pool.process(job)
except NoFreeWorkersError:
job.denyConnection("no free workers, increase server threadpool size")
except socket.timeout:
pass
def shutdown(self):
self.shutting_down = True
self.wakeup()
time.sleep(0.05)
self.close()
self.sock = None
def close(self):
if self.housekeeper:
self.housekeeper.stop.set()
self.housekeeper.join()
self.housekeeper = None
if self.sock:
sockname = None
try:
sockname = self.sock.getsockname()
except (socket.error, OSError):
pass
try:
self.sock.close()
if type(sockname) is str:
if os.path.exists(sockname):
os.remove(sockname)
except Exception:
pass
self.sock = None
self.pool.close()
@property
def sockets(self):
return [
self.sock]
@property
def selector(self):
raise TypeError("threadpool server doesn't have multiplexing selector")
def wakeup(self):
socketutil.interruptSocket(self._socketaddr)

View File

@@ -0,0 +1,641 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/socketutil.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 23920 bytes
"""
Low level socket utilities.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import os, socket, errno, time, sys, select, weakref
try:
import ssl
except ImportError:
ssl = None
from Pyro4.configuration import config
from Pyro4.errors import CommunicationError, TimeoutError, ConnectionClosedError
try:
InterruptedError()
except NameError:
class InterruptedError(Exception):
pass
ERRNO_RETRIES = [
errno.EINTR, errno.EAGAIN, errno.EWOULDBLOCK, errno.EINPROGRESS]
if hasattr(errno, "WSAEINTR"):
ERRNO_RETRIES.append(errno.WSAEINTR)
if hasattr(errno, "WSAEWOULDBLOCK"):
ERRNO_RETRIES.append(errno.WSAEWOULDBLOCK)
if hasattr(errno, "WSAEINPROGRESS"):
ERRNO_RETRIES.append(errno.WSAEINPROGRESS)
ERRNO_BADF = [errno.EBADF]
if hasattr(errno, "WSAEBADF"):
ERRNO_BADF.append(errno.WSAEBADF)
ERRNO_ENOTSOCK = [errno.ENOTSOCK]
if hasattr(errno, "WSAENOTSOCK"):
ERRNO_ENOTSOCK.append(errno.WSAENOTSOCK)
if not hasattr(socket, "SOL_TCP"):
socket.SOL_TCP = socket.IPPROTO_TCP
ERRNO_EADDRNOTAVAIL = [errno.EADDRNOTAVAIL]
if hasattr(errno, "WSAEADDRNOTAVAIL"):
ERRNO_EADDRNOTAVAIL.append(errno.WSAEADDRNOTAVAIL)
ERRNO_EADDRINUSE = [errno.EADDRINUSE]
if hasattr(errno, "WSAEADDRINUSE"):
ERRNO_EADDRINUSE.append(errno.WSAEADDRINUSE)
if sys.version_info >= (3, 0):
basestring = str
def getIpVersion(hostnameOrAddress):
"""
Determine what the IP version is of the given hostname or ip address (4 or 6).
First, it resolves the hostname or address to get an IP address.
Then, if the resolved IP contains a ':' it is considered to be an ipv6 address,
and if it contains a '.', it is ipv4.
"""
address = getIpAddress(hostnameOrAddress)
if "." in address:
return 4
if ":" in address:
return 6
raise CommunicationError("Unknown IP address format" + address)
def getIpAddress(hostname, workaround127=False, ipVersion=None):
"""
Returns the IP address for the given host. If you enable the workaround,
it will use a little hack if the ip address is found to be the loopback address.
The hack tries to discover an externally visible ip address instead (this only works for ipv4 addresses).
Set ipVersion=6 to return ipv6 addresses, 4 to return ipv4, 0 to let OS choose the best one or None to use config.PREFER_IP_VERSION.
"""
def getaddr(ipVersion):
if ipVersion == 6:
family = socket.AF_INET6
else:
if ipVersion == 4:
family = socket.AF_INET
else:
if ipVersion == 0:
family = socket.AF_UNSPEC
else:
raise ValueError("unknown value for argument ipVersion.")
ip = socket.getaddrinfo(hostname or socket.gethostname(), 80, family, socket.SOCK_STREAM, socket.SOL_TCP)[0][4][0]
if workaround127:
if ip.startswith("127.") or ip == "0.0.0.0":
ip = getInterfaceAddress("4.2.2.2")
return ip
try:
if hostname:
if ":" in hostname:
if ipVersion is None:
ipVersion = 0
if ipVersion is None:
return getaddr(config.PREFER_IP_VERSION)
return getaddr(ipVersion)
except socket.gaierror:
if (ipVersion == 6 or ipVersion) is None:
if config.PREFER_IP_VERSION == 6:
raise socket.error("unable to determine IPV6 address")
return getaddr(0)
def getInterfaceAddress(ip_address):
"""tries to find the ip address of the interface that connects to the given host's address"""
family = socket.AF_INET if getIpVersion(ip_address) == 4 else socket.AF_INET6
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((ip_address, 53))
return sock.getsockname()[0]
finally:
sock.close()
def __nextRetrydelay(delay):
if delay == 0.0:
return 0.001
if delay == 0.001:
return 0.01
return delay + 0.1
def receiveData(sock, size):
"""Retrieve a given number of bytes from a socket.
It is expected the socket is able to supply that number of bytes.
If it isn't, an exception is raised (you will not get a zero length result
or a result that is smaller than what you asked for). The partial data that
has been received however is stored in the 'partialData' attribute of
the exception object."""
try:
retrydelay = 0.0
msglen = 0
chunks = []
if config.USE_MSG_WAITALL:
if not hasattr(sock, "getpeercert"):
while True:
try:
data = sock.recv(size, socket.MSG_WAITALL)
if len(data) == size:
return data
msglen = len(data)
chunks = [data]
break
except socket.timeout:
raise TimeoutError("receiving: timeout")
except socket.error as x:
try:
err = getattr(x, "errno", x.args[0])
if err not in ERRNO_RETRIES:
raise ConnectionClosedError("receiving: connection lost: " + str(x))
time.sleep(1e-05 + retrydelay)
retrydelay = __nextRetrydelay(retrydelay)
finally:
x = None
del x
while True:
try:
while msglen < size:
chunk = sock.recv(min(60000, size - msglen))
if not chunk:
break
chunks.append(chunk)
msglen += len(chunk)
data = (b'').join(chunks)
del chunks
if len(data) != size:
err = ConnectionClosedError("receiving: not enough data")
err.partialData = data
raise err
return data
except socket.timeout:
raise TimeoutError("receiving: timeout")
except socket.error:
x = sys.exc_info()[1]
err = getattr(x, "errno", x.args[0])
if err not in ERRNO_RETRIES:
raise ConnectionClosedError("receiving: connection lost: " + str(x))
time.sleep(1e-05 + retrydelay)
retrydelay = __nextRetrydelay(retrydelay)
except socket.timeout:
raise TimeoutError("receiving: timeout")
def sendData(sock, data):
"""
Send some data over a socket.
Some systems have problems with ``sendall()`` when the socket is in non-blocking mode.
For instance, Mac OS X seems to be happy to throw EAGAIN errors too often.
This function falls back to using a regular send loop if needed.
"""
if sock.gettimeout() is None:
try:
sock.sendall(data)
return
except socket.timeout:
raise TimeoutError("sending: timeout")
except socket.error as x:
try:
raise ConnectionClosedError("sending: connection lost: " + str(x))
finally:
x = None
del x
else:
retrydelay = 0.0
while data:
try:
sent = sock.send(data)
data = data[sent[:None]]
except socket.timeout:
raise TimeoutError("sending: timeout")
except socket.error as x:
try:
err = getattr(x, "errno", x.args[0])
if err not in ERRNO_RETRIES:
raise ConnectionClosedError("sending: connection lost: " + str(x))
time.sleep(1e-05 + retrydelay)
retrydelay = __nextRetrydelay(retrydelay)
finally:
x = None
del x
_GLOBAL_DEFAULT_TIMEOUT = object()
def createSocket(bind=None, connect=None, reuseaddr=False, keepalive=True, timeout=_GLOBAL_DEFAULT_TIMEOUT, noinherit=False, ipv6=False, nodelay=True, sslContext=None):
"""
Create a socket. Default socket options are keepalive and IPv4 family, and nodelay (nagle disabled).
If 'bind' or 'connect' is a string, it is assumed a Unix domain socket is requested.
Otherwise, a normal tcp/ip socket is used.
Set ipv6=True to create an IPv6 socket rather than IPv4.
Set ipv6=None to use the PREFER_IP_VERSION config setting.
"""
if bind:
if connect:
raise ValueError("bind and connect cannot both be specified at the same time")
forceIPv6 = ipv6 or ipv6 is None and config.PREFER_IP_VERSION == 6
if isinstance(bind, basestring) or isinstance(connect, basestring):
family = socket.AF_UNIX
else:
if not bind:
if not connect:
family = socket.AF_INET6 if forceIPv6 else socket.AF_INET
else:
if type(bind) is tuple:
if not bind[0]:
family = socket.AF_INET6 if forceIPv6 else socket.AF_INET
else:
if getIpVersion(bind[0]) == 4:
if forceIPv6:
raise ValueError("IPv4 address is used bind argument with forceIPv6 argument:" + bind[0] + ".")
family = socket.AF_INET
else:
if getIpVersion(bind[0]) == 6:
family = socket.AF_INET6
bind = (
bind[0], bind[1], 0, 0)
else:
raise ValueError("unknown bind format.")
else:
if type(connect) is tuple:
if not connect[0]:
family = socket.AF_INET6 if forceIPv6 else socket.AF_INET
else:
if getIpVersion(connect[0]) == 4:
if forceIPv6:
raise ValueError("IPv4 address is used in connect argument with forceIPv6 argument:" + bind[0] + ".")
family = socket.AF_INET
else:
if getIpVersion(connect[0]) == 6:
family = socket.AF_INET6
connect = (
connect[0], connect[1], 0, 0)
else:
raise ValueError("unknown connect format.")
else:
raise ValueError("unknown bind or connect format.")
else:
sock = socket.socket(family, socket.SOCK_STREAM)
if sslContext:
if bind:
sock = sslContext.wrap_socket(sock, server_side=True)
else:
if connect:
sock = sslContext.wrap_socket(sock, server_side=False, server_hostname=(connect[0]))
else:
sock = sslContext.wrap_socket(sock, server_side=False)
if nodelay:
setNoDelay(sock)
if reuseaddr:
setReuseAddr(sock)
if noinherit:
setNoInherit(sock)
if timeout == 0:
timeout = None
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if bind:
if type(bind) is tuple and bind[1] == 0:
bindOnUnusedPort(sock, bind[0])
else:
sock.bind(bind)
try:
sock.listen(100)
except (OSError, IOError):
pass
if connect:
try:
sock.connect(connect)
except socket.error:
xv = sys.exc_info()[1]
errno = getattr(xv, "errno", 0)
if errno in ERRNO_RETRIES and not timeout is _GLOBAL_DEFAULT_TIMEOUT:
if timeout < 0.1:
timeout = 0.1
while True:
try:
sr, sw, se = select.select([], [sock], [sock], timeout)
except InterruptedError:
continue
if sock in sw:
break
elif sock in se:
sock.close()
raise socket.error("connect failed")
else:
sock.close()
raise
if keepalive:
setKeepalive(sock)
return sock
def createBroadcastSocket(bind=None, reuseaddr=False, timeout=_GLOBAL_DEFAULT_TIMEOUT, ipv6=False):
"""
Create a udp broadcast socket.
Set ipv6=True to create an IPv6 socket rather than IPv4.
Set ipv6=None to use the PREFER_IP_VERSION config setting.
"""
forceIPv6 = ipv6 or ipv6 is None and config.PREFER_IP_VERSION == 6
if not bind:
family = socket.AF_INET6 if forceIPv6 else socket.AF_INET
else:
if type(bind) is tuple:
if not bind[0]:
family = socket.AF_INET6 if forceIPv6 else socket.AF_INET
else:
if getIpVersion(bind[0]) == 4:
if forceIPv6:
raise ValueError("IPv4 address is used with forceIPv6 option:" + bind[0] + ".")
family = socket.AF_INET
else:
if getIpVersion(bind[0]) == 6:
family = socket.AF_INET6
bind = (bind[0], bind[1], 0, 0)
else:
raise ValueError("unknown bind format: %r" % (bind,))
else:
raise ValueError("unknown bind format: %r" % (bind,))
sock = socket.socket(family, socket.SOCK_DGRAM)
if family == socket.AF_INET:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
if reuseaddr:
setReuseAddr(sock)
if timeout is None:
sock.settimeout(None)
else:
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
elif bind:
host = bind[0] or ""
port = bind[1]
if port == 0:
bindOnUnusedPort(sock, host)
else:
if len(bind) == 2:
sock.bind((host, port))
else:
if len(bind) == 4:
sock.bind((host, port, 0, 0))
else:
raise ValueError("bind must be None, 2-tuple or 4-tuple")
return sock
def setReuseAddr(sock):
"""sets the SO_REUSEADDR option on the socket, if possible."""
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except Exception:
pass
def setNoDelay(sock):
"""sets the TCP_NODELAY option on the socket (to disable Nagle's algorithm), if possible."""
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception:
pass
def setKeepalive(sock):
"""sets the SO_KEEPALIVE option on the socket, if possible."""
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
except Exception:
pass
try:
import fcntl
def setNoInherit(sock):
"""Mark the given socket fd as non-inheritable to child processes"""
fd = sock.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
except ImportError:
try:
if sys.platform == "cli":
raise NotImplementedError("IronPython can't obtain a proper HANDLE from a socket")
from ctypes import windll, WinError, wintypes
_SetHandleInformation = windll.kernel32.SetHandleInformation
_SetHandleInformation.argtypes = [wintypes.HANDLE, wintypes.DWORD, wintypes.DWORD]
_SetHandleInformation.restype = wintypes.BOOL
def setNoInherit(sock):
"""Mark the given socket fd as non-inheritable to child processes"""
if not _SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
except (ImportError, NotImplementedError):
def setNoInherit(sock):
"""Mark the given socket fd as non-inheritable to child processes (dummy)"""
pass
class SocketConnection(object):
__doc__ = "A wrapper class for plain sockets, containing various methods such as :meth:`send` and :meth:`recv`"
def __init__(self, sock, objectId=None, keep_open=False):
self.sock = sock
self.objectId = objectId
self.pyroInstances = {}
self.tracked_resources = weakref.WeakSet()
self.keep_open = keep_open
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def send(self, data):
sendData(self.sock, data)
def recv(self, size):
return receiveData(self.sock, size)
def close(self):
if self.keep_open:
return
try:
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
try:
self.sock.close()
except:
pass
self.pyroInstances.clear()
for rsc in self.tracked_resources:
try:
rsc.close()
except Exception:
pass
self.tracked_resources.clear()
def fileno(self):
return self.sock.fileno()
def family(self):
return family_str(self.sock)
def setTimeout(self, timeout):
self.sock.settimeout(timeout)
def getTimeout(self):
return self.sock.gettimeout()
def getpeercert(self):
try:
return self.sock.getpeercert()
except AttributeError:
return
timeout = property(getTimeout, setTimeout)
def family_str(sock):
f = sock.family
if f == socket.AF_INET:
return "IPv4"
if f == socket.AF_INET6:
return "IPv6"
if hasattr(socket, "AF_UNIX"):
if f == socket.AF_UNIX:
return "Unix"
return "???"
def findProbablyUnusedPort(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding (likely, but not guaranteed).
This code is copied from the stdlib's test.test_support module."""
tempsock = socket.socket(family, socktype)
try:
port = bindOnUnusedPort(tempsock)
if sys.platform == "cli":
return port + 1
return port
finally:
tempsock.close()
def bindOnUnusedPort(sock, host='localhost'):
"""Bind the socket to a free port and return the port number.
This code is based on the code in the stdlib's test.test_support module."""
if sock.family in (socket.AF_INET, socket.AF_INET6):
if sock.type == socket.SOCK_STREAM:
if hasattr(socket, "SO_EXCLUSIVEADDRUSE"):
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
except socket.error:
pass
if sock.family == socket.AF_INET:
if host == "localhost":
sock.bind(('127.0.0.1', 0))
else:
sock.bind((host, 0))
else:
if sock.family == socket.AF_INET6:
if host == "localhost":
sock.bind(('::1', 0, 0, 0))
else:
sock.bind((host, 0, 0, 0))
else:
raise CommunicationError("unsupported socket family: " + sock.family)
return sock.getsockname()[1]
def interruptSocket(address):
"""bit of a hack to trigger a blocking server to get out of the loop, useful at clean shutdowns"""
try:
sock = createSocket(connect=address, keepalive=False, timeout=None)
try:
sock.sendall(b'!!!!!!!!!!!!!!!!')
except (socket.error, AttributeError):
pass
try:
sock.shutdown(socket.SHUT_RDWR)
except (OSError, socket.error):
pass
sock.close()
except socket.error:
pass
__ssl_server_context = None
__ssl_client_context = None
def getSSLcontext(servercert='', serverkey='', clientcert='', clientkey='', cacerts='', keypassword=''):
"""creates an SSL context and caches it, so you have to set the parameters correctly before doing anything"""
global __ssl_client_context
global __ssl_server_context
if not ssl:
raise ValueError("SSL requested but ssl module is not available")
else:
if sys.version_info < (2, 7, 11):
raise RuntimeError("need Python 2.7.11 or newer to properly use SSL")
else:
if servercert:
if clientcert:
raise ValueError("can't have both server cert and client cert")
elif __ssl_server_context:
return __ssl_server_context
if not os.path.isfile(servercert):
raise IOError("server cert file not found")
if serverkey:
if not os.path.isfile(serverkey):
raise IOError("server key file not found")
__ssl_server_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
__ssl_server_context.load_cert_chain(servercert, serverkey or None, keypassword or None)
if cacerts:
if os.path.isdir(cacerts):
__ssl_server_context.load_verify_locations(capath=cacerts)
else:
__ssl_server_context.load_verify_locations(cafile=cacerts)
if config.SSL_REQUIRECLIENTCERT:
__ssl_server_context.verify_mode = ssl.CERT_REQUIRED
else:
__ssl_server_context.verify_mode = ssl.CERT_NONE
return __ssl_server_context
if __ssl_client_context:
return __ssl_client_context
__ssl_client_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
if clientcert:
if not os.path.isfile(clientcert):
raise IOError("client cert file not found")
else:
__ssl_client_context.load_cert_chain(clientcert, clientkey or None, keypassword or None)
if cacerts:
if os.path.isdir(cacerts):
__ssl_client_context.load_verify_locations(capath=cacerts)
else:
__ssl_client_context.load_verify_locations(cafile=cacerts)
return __ssl_client_context

View File

@@ -0,0 +1,8 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/test/__init__.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 31 bytes
pass

View File

@@ -0,0 +1,208 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/test/echoserver.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 7561 bytes
"""
Echo server for test purposes.
This is usually invoked by starting this module as a script:
:command:`python -m Pyro4.test.echoserver`
or simply: :command:`pyro4-test-echoserver`
It is also possible to use the :class:`EchoServer` in user code
but that is not terribly useful.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import sys, os, time, warnings, threading
from optparse import OptionParser
from Pyro4 import core, naming
from Pyro4.configuration import config
__all__ = [
"EchoServer"]
@core.expose
class EchoServer(object):
__doc__ = "\n The echo server object that is provided as a Pyro object by this module.\n If its :attr:`verbose` attribute is set to ``True``, it will print messages as it receives calls.\n "
_verbose = False
_must_shutdown = False
def echo(self, message):
"""return the message"""
if self._verbose:
message_str = repr(message).encode((sys.stdout.encoding), errors="replace").decode(sys.stdout.encoding)
print("%s - echo: %s" % (time.asctime(), message_str))
return message
def error(self):
"""generates a simple exception without text"""
if self._verbose:
print("%s - error: generating exception" % time.asctime())
raise ValueError("expected error from echoserver error() method")
def error_with_text(self):
"""generates a simple exception with message"""
if self._verbose:
print("%s - error: generating exception" % time.asctime())
raise ValueError("the message of the error")
@core.oneway
def oneway_echo(self, message):
"""just like echo, but oneway; the client won't wait for response"""
if self._verbose:
message_str = repr(message).encode((sys.stdout.encoding), errors="replace").decode(sys.stdout.encoding)
print("%s - oneway_echo: %s" % (time.asctime(), message_str))
return "bogus return value"
def slow(self):
"""returns (and prints) a message after a certain delay"""
if self._verbose:
print("%s - slow: waiting a bit..." % time.asctime())
time.sleep(5)
if self._verbose:
print("%s - slow: returning result" % time.asctime())
return "Finally, an answer!"
def generator(self):
"""a generator function that returns some elements on demand"""
yield "one"
yield "two"
yield "three"
def nan(self):
return float("nan")
def inf(self):
return float("inf")
@core.oneway
def oneway_slow(self):
"""prints a message after a certain delay, and returns; but the client won't wait for it"""
if self._verbose:
print("%s - oneway_slow: waiting a bit..." % time.asctime())
time.sleep(5)
if self._verbose:
print("%s - oneway_slow: returning result" % time.asctime())
return "bogus return value"
def _private(self):
"""a 'private' method that should not be accessible"""
return "should not be allowed"
def __private(self):
"""another 'private' method that should not be accessible"""
return "should not be allowed"
def __dunder__(self):
"""a double underscore method that should be accessible normally"""
return "should be allowed (dunder)"
def shutdown(self):
"""called to signal the echo server to shut down"""
if self._verbose:
print("%s - shutting down" % time.asctime())
self._must_shutdown = True
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, onoff):
self._verbose = bool(onoff)
class NameServer(threading.Thread):
def __init__(self, hostname, hmac=None):
super(NameServer, self).__init__()
self.setDaemon(1)
self.hostname = hostname
self.hmac = hmac
self.started = threading.Event()
def run(self):
self.uri, self.ns_daemon, self.bc_server = naming.startNS((self.hostname), hmac=(self.hmac))
self.started.set()
if self.bc_server:
self.bc_server.runInThread()
self.ns_daemon.requestLoop()
def startNameServer(host, hmac=None):
ns = NameServer(host, hmac=hmac)
ns.start()
ns.started.wait()
return ns
def main(args=None, returnWithoutLooping=False):
parser = OptionParser()
parser.add_option("-H", "--host", default="localhost", help="hostname to bind server on (default=%default)")
parser.add_option("-p", "--port", type="int", default=0, help="port to bind server on")
parser.add_option("-u", "--unixsocket", help="Unix domain socket name to bind server on")
parser.add_option("-n", "--naming", action="store_true", default=False, help="register with nameserver")
parser.add_option("-N", "--nameserver", action="store_true", default=False, help="also start a nameserver")
parser.add_option("-v", "--verbose", action="store_true", default=False, help="verbose output")
parser.add_option("-q", "--quiet", action="store_true", default=False, help="don't output anything")
parser.add_option("-k", "--key", help="the HMAC key to use (deprecated)")
options, args = parser.parse_args(args)
if options.key:
warnings.warn("using -k to supply HMAC key on the command line is a security problem and is deprecated since Pyro 4.72. See the documentation for an alternative.")
if "PYRO_HMAC_KEY" in os.environ:
if options.key:
raise SystemExit("error: don't use -k and PYRO_HMAC_KEY at the same time")
options.key = os.environ["PYRO_HMAC_KEY"]
if options.verbose:
options.quiet = False
if not options.quiet:
print("Starting Pyro's built-in test echo server.")
config.SERVERTYPE = "multiplex"
hmac = (options.key or "").encode("utf-8")
if not hmac:
if not options.quiet:
print("Warning: HMAC key not set. Anyone can connect to this server!")
nameserver = None
if options.nameserver:
options.naming = True
nameserver = startNameServer((options.host), hmac=hmac)
d = core.Daemon(host=(options.host), port=(options.port), unixsocket=(options.unixsocket))
if hmac:
d._pyroHmacKey = hmac
echo = EchoServer()
echo._verbose = options.verbose
objectName = "test.echoserver"
uri = d.register(echo, objectName)
if options.naming:
host, port = (None, None)
if nameserver is not None:
host, port = nameserver.uri.host, nameserver.uri.port
ns = naming.locateNS(host, port, hmac_key=hmac)
ns.register(objectName, uri)
if options.verbose:
print("using name server at %s" % ns._pyroUri)
if nameserver is not None:
if nameserver.bc_server:
print("broadcast server running at %s" % nameserver.bc_server.locationStr)
else:
print("not using a broadcast server")
else:
if options.verbose:
print("not using a name server.")
if not options.quiet:
print("object name: %s" % objectName)
print("echo uri: %s" % uri)
print("echoserver running.")
if returnWithoutLooping:
return (
d, echo, uri)
d.requestLoop(loopCondition=(lambda: not echo._must_shutdown))
d.close()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,976 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/util.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 41036 bytes
"""
Miscellaneous utilities, and serializers.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import array, sys, zlib, uuid, logging, linecache, traceback, inspect, struct, datetime, decimal, numbers
from Pyro4 import errors
from Pyro4.configuration import config
try:
import copyreg
except ImportError:
import copy_reg as copyreg
log = logging.getLogger("Pyro4.util")
def getPyroTraceback(ex_type=None, ex_value=None, ex_tb=None):
"""Returns a list of strings that form the traceback information of a
Pyro exception. Any remote Pyro exception information is included.
Traceback information is automatically obtained via ``sys.exc_info()`` if
you do not supply the objects yourself."""
def formatRemoteTraceback(remote_tb_lines):
result = [
" +--- This exception occured remotely (Pyro) - Remote traceback:"]
for line in remote_tb_lines:
if line.endswith("\n"):
line = line[None[:-1]]
lines = line.split("\n")
for line2 in lines:
result.append("\n | ")
result.append(line2)
result.append("\n +--- End of remote traceback\n")
return result
try:
if ex_type is not None:
if ex_value is None:
if ex_tb is None:
if type(ex_type) is not type:
raise TypeError("invalid argument: ex_type should be an exception type, or just supply no arguments at all")
if ex_type is None:
if ex_tb is None:
ex_type, ex_value, ex_tb = sys.exc_info()
remote_tb = getattr(ex_value, "_pyroTraceback", None)
local_tb = formatTraceback(ex_type, ex_value, ex_tb, config.DETAILED_TRACEBACK)
if remote_tb:
remote_tb = formatRemoteTraceback(remote_tb)
return local_tb + remote_tb
return local_tb
finally:
del ex_type
del ex_value
del ex_tb
def formatTraceback(ex_type=None, ex_value=None, ex_tb=None, detailed=False):
"""Formats an exception traceback. If you ask for detailed formatting,
the result will contain info on the variables in each stack frame.
You don't have to provide the exception info objects, if you omit them,
this function will obtain them itself using ``sys.exc_info()``."""
if ex_type is not None:
if ex_value is None:
if ex_tb is None:
if type(ex_type) is not type:
raise TypeError("invalid argument: ex_type should be an exception type, or just supply no arguments at all")
if ex_type is None:
if ex_tb is None:
ex_type, ex_value, ex_tb = sys.exc_info()
if detailed and sys.platform != "cli":
def makeStrValue(value):
try:
return repr(value)
except:
try:
return str(value)
except:
return "<ERROR>"
try:
result = [
"----------------------------------------------------\n"]
result.append(" EXCEPTION %s: %s\n" % (ex_type, ex_value))
result.append(" Extended stacktrace follows (most recent call last)\n")
skipLocals = True
while ex_tb:
frame = ex_tb.tb_frame
sourceFileName = frame.f_code.co_filename
if "self" in frame.f_locals:
location = "%s.%s" % (frame.f_locals["self"].__class__.__name__, frame.f_code.co_name)
else:
location = frame.f_code.co_name
result.append("----------------------------------------------------\n")
result.append('File "%s", line %d, in %s\n' % (sourceFileName, ex_tb.tb_lineno, location))
result.append("Source code:\n")
result.append(" " + linecache.getline(sourceFileName, ex_tb.tb_lineno).strip() + "\n")
if not skipLocals:
names = set()
names.update(getattr(frame.f_code, "co_varnames", ()))
names.update(getattr(frame.f_code, "co_names", ()))
names.update(getattr(frame.f_code, "co_cellvars", ()))
names.update(getattr(frame.f_code, "co_freevars", ()))
result.append("Local values:\n")
for name2 in sorted(names):
if name2 in frame.f_locals:
value = frame.f_locals[name2]
result.append(" %s = %s\n" % (name2, makeStrValue(value)))
if name2 == "self":
for name3, value in vars(value).items():
result.append(" self.%s = %s\n" % (name3, makeStrValue(value)))
skipLocals = False
ex_tb = ex_tb.tb_next
result.append("----------------------------------------------------\n")
result.append(" EXCEPTION %s: %s\n" % (ex_type, ex_value))
result.append("----------------------------------------------------\n")
return result
except Exception:
return [
"----------------------------------------------------\nError building extended traceback!!! :\n",
"".join((traceback.format_exception)(*sys.exc_info())) + "----------------------------------------------------" + "\n",
"Original Exception follows:\n",
"".join(traceback.format_exception(ex_type, ex_value, ex_tb))]
else:
return traceback.format_exception(ex_type, ex_value, ex_tb)
all_exceptions = {}
if sys.version_info < (3, 0):
import exceptions
for name, t in vars(exceptions).items():
if type(t) is type and issubclass(t, BaseException):
all_exceptions[name] = t
else:
import builtins
for name, t in vars(builtins).items():
if type(t) is type and issubclass(t, BaseException):
all_exceptions[name] = t
buffer = bytearray
for name, t in vars(errors).items():
if type(t) is type and issubclass(t, errors.PyroError):
all_exceptions[name] = t
class SerializerBase(object):
__doc__ = "Base class for (de)serializer implementations (which must be thread safe)"
_SerializerBase__custom_class_to_dict_registry = {}
_SerializerBase__custom_dict_to_class_registry = {}
def serializeData(self, data, compress=False):
"""Serialize the given data object, try to compress if told so.
Returns a tuple of the serialized data (bytes) and a bool indicating if it is compressed or not."""
data = self.dumps(data)
return self._SerializerBase__compressdata(data, compress)
def deserializeData(self, data, compressed=False):
"""Deserializes the given data (bytes). Set compressed to True to decompress the data first."""
if compressed:
if sys.version_info < (3, 0):
data = self._convertToBytes(data)
data = zlib.decompress(data)
return self.loads(data)
def serializeCall(self, obj, method, vargs, kwargs, compress=False):
"""Serialize the given method call parameters, try to compress if told so.
Returns a tuple of the serialized data and a bool indicating if it is compressed or not."""
data = self.dumpsCall(obj, method, vargs, kwargs)
return self._SerializerBase__compressdata(data, compress)
def deserializeCall(self, data, compressed=False):
"""Deserializes the given call data back to (object, method, vargs, kwargs) tuple.
Set compressed to True to decompress the data first."""
if compressed:
if sys.version_info < (3, 0):
data = self._convertToBytes(data)
data = zlib.decompress(data)
return self.loadsCall(data)
def loads(self, data):
raise NotImplementedError("implement in subclass")
def loadsCall(self, data):
raise NotImplementedError("implement in subclass")
def dumps(self, data):
raise NotImplementedError("implement in subclass")
def dumpsCall(self, obj, method, vargs, kwargs):
raise NotImplementedError("implement in subclass")
def _convertToBytes(self, data):
t = type(data)
if t is not bytes:
if t in (bytearray, buffer):
return bytes(data)
if t is memoryview:
return data.tobytes()
return data
def __compressdata(self, data, compress):
if not compress or len(data) < 200:
return (
data, False)
compressed = zlib.compress(data)
if len(compressed) < len(data):
return (
compressed, True)
return (
data, False)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
raise NotImplementedError("implement in subclass")
@classmethod
def register_class_to_dict(cls, clazz, converter, serpent_too=True):
"""Registers a custom function that returns a dict representation of objects of the given class.
The function is called with a single parameter; the object to be converted to a dict."""
cls._SerializerBase__custom_class_to_dict_registry[clazz] = converter
if serpent_too:
try:
get_serializer_by_id(SerpentSerializer.serializer_id)
import serpent
def serpent_converter(obj, serializer, stream, level):
d = converter(obj)
serializer.ser_builtins_dict(d, stream, level)
serpent.register_class(clazz, serpent_converter)
except errors.ProtocolError:
pass
@classmethod
def unregister_class_to_dict(cls, clazz):
"""Removes the to-dict conversion function registered for the given class. Objects of the class
will be serialized by the default mechanism again."""
if clazz in cls._SerializerBase__custom_class_to_dict_registry:
del cls._SerializerBase__custom_class_to_dict_registry[clazz]
try:
get_serializer_by_id(SerpentSerializer.serializer_id)
import serpent
serpent.unregister_class(clazz)
except errors.ProtocolError:
pass
@classmethod
def register_dict_to_class(cls, classname, converter):
"""
Registers a custom converter function that creates objects from a dict with the given classname tag in it.
The function is called with two parameters: the classname and the dictionary to convert to an instance of the class.
This mechanism is not used for the pickle serializer.
"""
cls._SerializerBase__custom_dict_to_class_registry[classname] = converter
@classmethod
def unregister_dict_to_class(cls, classname):
"""
Removes the converter registered for the given classname. Dicts with that classname tag
will be deserialized by the default mechanism again.
This mechanism is not used for the pickle serializer.
"""
if classname in cls._SerializerBase__custom_dict_to_class_registry:
del cls._SerializerBase__custom_dict_to_class_registry[classname]
@classmethod
def class_to_dict(cls, obj):
"""
Convert a non-serializable object to a dict. Partly borrowed from serpent.
Not used for the pickle serializer.
"""
for clazz in cls._SerializerBase__custom_class_to_dict_registry:
if isinstance(obj, clazz):
return cls._SerializerBase__custom_class_to_dict_registry[clazz](obj)
if type(obj) in (set, dict, tuple, list):
raise ValueError("can't serialize type " + str(obj.__class__) + " into a dict")
if hasattr(obj, "_pyroDaemon"):
obj._pyroDaemon = None
if isinstance(obj, BaseException):
return {'__class__':(obj.__class__.__module__ + ".") + (obj.__class__.__name__),
'__exception__':True,
'args':obj.args,
'attributes':vars(obj)}
try:
value = obj.__getstate__()
except AttributeError:
pass
else:
if isinstance(value, dict):
return value
else:
try:
value = dict(vars(obj))
value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__
return value
except TypeError:
if hasattr(obj, "__slots__"):
value = {}
for slot in obj.__slots__:
value[slot] = getattr(obj, slot)
value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__
return value
raise errors.SerializeError("don't know how to serialize class " + str(obj.__class__) + " using serializer " + str(cls.__name__) + ". Give it vars() or an appropriate __getstate__")
@classmethod
def dict_to_class(cls, data):
"""
Recreate an object out of a dict containing the class name and the attributes.
Only a fixed set of classes are recognized.
Not used for the pickle serializer.
"""
from Pyro4 import core, futures
classname = data.get("__class__", "<unknown>")
if isinstance(classname, bytes):
classname = classname.decode("utf-8")
if classname in cls._SerializerBase__custom_dict_to_class_registry:
converter = cls._SerializerBase__custom_dict_to_class_registry[classname]
return converter(classname, data)
if "__" in classname:
raise errors.SecurityError("refused to deserialize types with double underscores in their name: " + classname)
if classname.startswith("Pyro4.core."):
if classname == "Pyro4.core.URI":
uri = core.URI.__new__(core.URI)
uri.__setstate_from_dict__(data["state"])
return uri
if classname == "Pyro4.core.Proxy":
proxy = core.Proxy.__new__(core.Proxy)
proxy.__setstate_from_dict__(data["state"])
return proxy
if classname == "Pyro4.core.Daemon":
daemon = core.Daemon.__new__(core.Daemon)
daemon.__setstate_from_dict__(data["state"])
return daemon
else:
if classname.startswith("Pyro4.util."):
if classname == "Pyro4.util.SerpentSerializer":
return SerpentSerializer()
if classname == "Pyro4.util.PickleSerializer":
return PickleSerializer()
if classname == "Pyro4.util.MarshalSerializer":
return MarshalSerializer()
if classname == "Pyro4.util.JsonSerializer":
return JsonSerializer()
if classname == "Pyro4.util.MsgpackSerializer":
return MsgpackSerializer()
if classname == "Pyro4.util.CloudpickleSerializer":
return CloudpickleSerializer()
if classname == "Pyro4.util.DillSerializer":
return DillSerializer()
elif classname.startswith("Pyro4.errors."):
errortype = getattr(errors, classname.split(".", 2)[2])
if issubclass(errortype, errors.PyroError):
return SerializerBase.make_exception(errortype, data)
elif classname == "Pyro4.futures._ExceptionWrapper":
ex = data["exception"]
if isinstance(ex, dict):
if "__class__" in ex:
ex = SerializerBase.dict_to_class(ex)
return futures._ExceptionWrapper(ex)
if data.get("__exception__", False):
if classname in all_exceptions:
return SerializerBase.make_exception(all_exceptions[classname], data)
namespace, short_classname = classname.split(".", 1)
if namespace in ('builtins', 'exceptions'):
if sys.version_info < (3, 0):
exceptiontype = getattr(exceptions, short_classname)
if issubclass(exceptiontype, BaseException):
return SerializerBase.make_exception(exceptiontype, data)
else:
exceptiontype = getattr(builtins, short_classname)
if issubclass(exceptiontype, BaseException):
return SerializerBase.make_exception(exceptiontype, data)
else:
if namespace == "sqlite3":
if short_classname.endswith("Error"):
import sqlite3
exceptiontype = getattr(sqlite3, short_classname)
if issubclass(exceptiontype, BaseException):
return SerializerBase.make_exception(exceptiontype, data)
log.warning("unsupported serialized class: " + classname)
raise errors.SerializeError("unsupported serialized class: " + classname)
@staticmethod
def make_exception(exceptiontype, data):
ex = exceptiontype(*data["args"])
if "attributes" in data:
for attr, value in data["attributes"].items():
setattr(ex, attr, value)
return ex
def recreate_classesParse error at or near `LOAD_SETCOMP' instruction at offset 20
def __eq__(self, other):
"""this equality method is only to support the unit tests of this class"""
return isinstance(other, SerializerBase) and vars(self) == vars(other)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = object.__hash__
class PickleSerializer(SerializerBase):
__doc__ = "\n A (de)serializer that wraps the Pickle serialization protocol.\n It can optionally compress the serialized data, and is thread safe.\n "
serializer_id = 4
def dumpsCall(self, obj, method, vargs, kwargs):
return pickle.dumps((obj, method, vargs, kwargs), config.PICKLE_PROTOCOL_VERSION)
def dumps(self, data):
return pickle.dumps(data, config.PICKLE_PROTOCOL_VERSION)
def loadsCall(self, data):
data = self._convertToBytes(data)
return pickle.loads(data)
def loads(self, data):
data = self._convertToBytes(data)
return pickle.loads(data)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
def copyreg_function(obj):
return replacement_function(obj).__reduce__()
if not (object_type is type or inspect.isclass(object_type)):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
try:
copyreg.pickle(object_type, copyreg_function)
except TypeError:
pass
class CloudpickleSerializer(SerializerBase):
__doc__ = "\n A (de)serializer that wraps the Cloudpickle serialization protocol.\n It can optionally compress the serialized data, and is thread safe.\n "
serializer_id = 7
def dumpsCall(self, obj, method, vargs, kwargs):
return cloudpickle.dumps((obj, method, vargs, kwargs), config.PICKLE_PROTOCOL_VERSION)
def dumps(self, data):
return cloudpickle.dumps(data, config.PICKLE_PROTOCOL_VERSION)
def loadsCall(self, data):
return cloudpickle.loads(data)
def loads(self, data):
return cloudpickle.loads(data)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
def copyreg_function(obj):
return replacement_function(obj).__reduce__()
if not (object_type is type or inspect.isclass(object_type)):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
try:
copyreg.pickle(object_type, copyreg_function)
except TypeError:
pass
class DillSerializer(SerializerBase):
__doc__ = "\n A (de)serializer that wraps the Dill serialization protocol.\n It can optionally compress the serialized data, and is thread safe.\n "
serializer_id = 5
def dumpsCall(self, obj, method, vargs, kwargs):
return dill.dumps((obj, method, vargs, kwargs), config.DILL_PROTOCOL_VERSION)
def dumps(self, data):
return dill.dumps(data, config.DILL_PROTOCOL_VERSION)
def loadsCall(self, data):
return dill.loads(data)
def loads(self, data):
return dill.loads(data)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
def copyreg_function(obj):
return replacement_function(obj).__reduce__()
if not (object_type is type or inspect.isclass(object_type)):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
try:
copyreg.pickle(object_type, copyreg_function)
except TypeError:
pass
class MarshalSerializer(SerializerBase):
__doc__ = "(de)serializer that wraps the marshal serialization protocol."
serializer_id = 3
def dumpsCallParse error at or near `LOAD_DICTCOMP' instruction at offset 22
def dumps(self, data):
return marshal.dumps(self.convert_obj_into_marshallable(data))
if sys.platform == "cli":
def loadsCall(self, data):
if type(data) is not str:
data = str(data)
obj, method, vargs, kwargs = marshal.loads(data)
vargs = self.recreate_classes(vargs)
kwargs = self.recreate_classes(kwargs)
return (obj, method, vargs, kwargs)
def loads(self, data):
if type(data) is not str:
data = str(data)
return self.recreate_classes(marshal.loads(data))
else:
def loadsCall(self, data):
data = self._convertToBytes(data)
obj, method, vargs, kwargs = marshal.loads(data)
vargs = self.recreate_classes(vargs)
kwargs = self.recreate_classes(kwargs)
return (obj, method, vargs, kwargs)
def loads(self, data):
data = self._convertToBytes(data)
return self.recreate_classes(marshal.loads(data))
marshalable_types = (str, int, float, type(None), bool, complex, bytes, bytearray,
tuple, set, frozenset, list, dict)
if sys.version_info < (3, 0):
marshalable_types += (unicode,)
def convert_obj_into_marshallable(self, obj):
if isinstance(obj, self.marshalable_types):
return obj
if isinstance(obj, array.array):
if obj.typecode == "c":
return obj.tostring()
if obj.typecode == "u":
return obj.tounicode()
return obj.tolist()
return self.class_to_dict(obj)
@classmethod
def class_to_dict(cls, obj):
if isinstance(obj, uuid.UUID):
return str(obj)
return super(MarshalSerializer, cls).class_to_dict(obj)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
pass
class SerpentSerializer(SerializerBase):
__doc__ = "(de)serializer that wraps the serpent serialization protocol."
serializer_id = 1
def dumpsCall(self, obj, method, vargs, kwargs):
return serpent.dumps((obj, method, vargs, kwargs), module_in_classname=True)
def dumps(self, data):
return serpent.dumps(data, module_in_classname=True)
def loadsCall(self, data):
obj, method, vargs, kwargs = serpent.loads(data)
vargs = self.recreate_classes(vargs)
kwargs = self.recreate_classes(kwargs)
return (obj, method, vargs, kwargs)
def loads(self, data):
return self.recreate_classes(serpent.loads(data))
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
def custom_serializer(object, serpent_serializer, outputstream, indentlevel):
replaced = replacement_function(object)
if replaced is object:
serpent_serializer.ser_default_class(replaced, outputstream, indentlevel)
else:
serpent_serializer._serialize(replaced, outputstream, indentlevel)
if not (object_type is type or inspect.isclass(object_type)):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
serpent.register_class(object_type, custom_serializer)
@classmethod
def dict_to_class(cls, data):
if data.get("__class__") == "float":
return float(data["value"])
return super(SerpentSerializer, cls).dict_to_class(data)
class JsonSerializer(SerializerBase):
__doc__ = "(de)serializer that wraps the json serialization protocol."
serializer_id = 2
_JsonSerializer__type_replacements = {}
def dumpsCall(self, obj, method, vargs, kwargs):
data = {
'object': obj, 'method': method, 'params': vargs, 'kwargs': kwargs}
data = json.dumps(data, ensure_ascii=False, default=(self.default))
return data.encode("utf-8")
def dumps(self, data):
data = json.dumps(data, ensure_ascii=False, default=(self.default))
return data.encode("utf-8")
def loadsCall(self, data):
data = self._convertToBytes(data).decode("utf-8")
data = json.loads(data)
vargs = self.recreate_classes(data["params"])
kwargs = self.recreate_classes(data["kwargs"])
return (data["object"], data["method"], vargs, kwargs)
def loads(self, data):
data = self._convertToBytes(data).decode("utf-8")
return self.recreate_classes(json.loads(data))
def default(self, obj):
replacer = self._JsonSerializer__type_replacements.get(type(obj), None)
if replacer:
obj = replacer(obj)
if isinstance(obj, set):
return tuple(obj)
if isinstance(obj, uuid.UUID):
return str(obj)
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return str(obj)
if isinstance(obj, array.array):
if obj.typecode == "c":
return obj.tostring()
if obj.typecode == "u":
return obj.tounicode()
return obj.tolist()
return self.class_to_dict(obj)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
if not (object_type is type or inspect.isclass(object_type)):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
cls._JsonSerializer__type_replacements[object_type] = replacement_function
class MsgpackSerializer(SerializerBase):
__doc__ = "(de)serializer that wraps the msgpack serialization protocol."
serializer_id = 6
_MsgpackSerializer__type_replacements = {}
def dumpsCall(self, obj, method, vargs, kwargs):
return msgpack.packb((obj, method, vargs, kwargs), use_bin_type=True, default=(self.default))
def dumps(self, data):
return msgpack.packb(data, use_bin_type=True, default=(self.default))
def loadsCall(self, data):
data = self._convertToBytes(data)
obj, method, vargs, kwargs = msgpack.unpackb(data, raw=False, object_hook=(self.object_hook))
return (obj, method, vargs, kwargs)
def loads(self, data):
data = self._convertToBytes(data)
return msgpack.unpackb(data, raw=False, object_hook=(self.object_hook), ext_hook=(self.ext_hook))
def default(self, obj):
replacer = self._MsgpackSerializer__type_replacements.get(type(obj), None)
if replacer:
obj = replacer(obj)
if isinstance(obj, set):
return tuple(obj)
if isinstance(obj, uuid.UUID):
return str(obj)
if isinstance(obj, bytearray):
return bytes(obj)
if isinstance(obj, complex):
return msgpack.ExtType(48, struct.pack("dd", obj.real, obj.imag))
if isinstance(obj, datetime.datetime):
if obj.tzinfo:
raise errors.SerializeError("msgpack cannot serialize datetime with timezone info")
return msgpack.ExtType(50, struct.pack("d", obj.timestamp()))
if isinstance(obj, datetime.date):
return msgpack.ExtType(51, struct.pack("l", obj.toordinal()))
if isinstance(obj, decimal.Decimal):
return str(obj)
if isinstance(obj, numbers.Number):
return msgpack.ExtType(49, str(obj).encode("ascii"))
if isinstance(obj, array.array):
if obj.typecode == "c":
return obj.tostring()
if obj.typecode == "u":
return obj.tounicode()
return obj.tolist()
return self.class_to_dict(obj)
def object_hook(self, obj):
if "__class__" in obj:
return self.dict_to_class(obj)
return obj
def ext_hook(self, code, data):
if code == 48:
real, imag = struct.unpack("dd", data)
return complex(real, imag)
if code == 49:
return int(data)
if code == 50:
return datetime.datetime.fromtimestamp(struct.unpack("d", data)[0])
if code == 51:
return datetime.date.fromordinal(struct.unpack("l", data)[0])
raise errors.SerializeError("invalid ext code for msgpack: " + str(code))
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
if not (object_type is type or inspect.isclass(object_type)):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
cls._MsgpackSerializer__type_replacements[object_type] = replacement_function
_serializers = {}
_serializers_by_id = {}
def get_serializer(name):
try:
return _serializers[name]
except KeyError:
raise errors.SerializeError("serializer '%s' is unknown or not available" % name)
def get_serializer_by_id(sid):
try:
return _serializers_by_id[sid]
except KeyError:
raise errors.SerializeError("no serializer available for id %d" % sid)
try:
import cPickle as pickle
except ImportError:
import pickle
assert config.PICKLE_PROTOCOL_VERSION >= 2, "pickle protocol needs to be 2 or higher"
_ser = PickleSerializer()
_serializers["pickle"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
import marshal
_ser = MarshalSerializer()
_serializers["marshal"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
try:
import cloudpickle
_ser = CloudpickleSerializer()
_serializers["cloudpickle"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
except ImportError:
pass
try:
import dill
_ser = DillSerializer()
_serializers["dill"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
except ImportError:
pass
try:
try:
import importlib
json = importlib.import_module(config.JSON_MODULE)
except ImportError:
json = __import__(config.JSON_MODULE)
_ser = JsonSerializer()
_serializers["json"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
except ImportError:
pass
try:
import serpent
_ser = SerpentSerializer()
_serializers["serpent"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
except ImportError:
log.warning("serpent serializer is not available")
try:
import msgpack
_ser = MsgpackSerializer()
_serializers["msgpack"] = _ser
_serializers_by_id[_ser.serializer_id] = _ser
except ImportError:
pass
del _ser
def getAttribute(obj, attr):
"""
Resolves an attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '``_``'.
Doesn't resolve a dotted name, because that is a security vulnerability.
It treats it as a single attribute name (and the lookup will likely fail).
"""
if is_private_attribute(attr):
raise AttributeError("attempt to access private attribute '%s'" % attr)
else:
obj = getattr(obj, attr)
if not config.REQUIRE_EXPOSE or getattr(obj, "_pyroExposed", False):
return obj
raise AttributeError("attempt to access unexposed attribute '%s'" % attr)
def excepthook(ex_type, ex_value, ex_tb):
"""An exception hook you can use for ``sys.excepthook``, to automatically print remote Pyro tracebacks"""
traceback = "".join(getPyroTraceback(ex_type, ex_value, ex_tb))
sys.stderr.write(traceback)
def fixIronPythonExceptionForPickle(exceptionObject, addAttributes):
"""
Function to hack around a bug in IronPython where it doesn't pickle
exception attributes. We piggyback them into the exception's args.
Bug report is at https://github.com/IronLanguages/main/issues/943
Bug is still present in Ironpython 2.7.7
"""
if hasattr(exceptionObject, "args"):
if addAttributes:
ironpythonArgs = vars(exceptionObject)
ironpythonArgs["__ironpythonargs__"] = True
exceptionObject.args += (ironpythonArgs,)
else:
if len(exceptionObject.args) > 0:
piggyback = exceptionObject.args[-1]
if type(piggyback) is dict:
if piggyback.get("__ironpythonargs__"):
del piggyback["__ironpythonargs__"]
exceptionObject.args = exceptionObject.args[None[:-1]]
exceptionObject.__dict__.update(piggyback)
__exposed_member_cache = {}
def reset_exposed_members(obj, only_exposed=True, as_lists=False):
"""Delete any cached exposed members forcing recalculation on next request"""
if not inspect.isclass(obj):
obj = obj.__class__
cache_key = (
obj, only_exposed, as_lists)
__exposed_member_cache.pop(cache_key, None)
def get_exposed_members(obj, only_exposed=True, as_lists=False, use_cache=True):
"""
Return public and exposed members of the given object's class.
You can also provide a class directly.
Private members are ignored no matter what (names starting with underscore).
If only_exposed is True, only members tagged with the @expose decorator are
returned. If it is False, all public members are returned.
The return value consists of the exposed methods, exposed attributes, and methods
tagged as @oneway.
(All this is used as meta data that Pyro sends to the proxy if it asks for it)
as_lists is meant for python 2 compatibility.
"""
if not inspect.isclass(obj):
obj = obj.__class__
cache_key = (obj, only_exposed, as_lists)
if use_cache:
if cache_key in __exposed_member_cache:
return __exposed_member_cache[cache_key]
methods = set()
oneway = set()
attrs = set()
for m in dir(obj):
if is_private_attribute(m):
continue
v = getattr(obj, m)
if inspect.ismethod(v) or inspect.isfunction(v) or inspect.ismethoddescriptor(v):
if getattr(v, "_pyroExposed", not only_exposed):
methods.add(m)
if getattr(v, "_pyroOneway", False):
oneway.add(m)
elif inspect.isdatadescriptor(v):
func = getattr(v, "fget", None) or getattr(v, "fset", None) or getattr(v, "fdel", None)
if func is not None:
if getattr(func, "_pyroExposed", not only_exposed):
attrs.add(m)
if as_lists:
methods = list(methods)
oneway = list(oneway)
attrs = list(attrs)
result = {'methods':methods, 'oneway':oneway,
'attrs':attrs}
__exposed_member_cache[cache_key] = result
return result
def get_exposed_property_value(obj, propname, only_exposed=True):
"""
Return the value of an @exposed @property.
If the requested property is not a @property or not exposed,
an AttributeError is raised instead.
"""
v = getattr(obj.__class__, propname)
if inspect.isdatadescriptor(v):
if v.fget:
if getattr(v.fget, "_pyroExposed", not only_exposed):
return v.fget(obj)
raise AttributeError("attempt to access unexposed or unknown remote attribute '%s'" % propname)
def set_exposed_property_value(obj, propname, value, only_exposed=True):
"""
Sets the value of an @exposed @property.
If the requested property is not a @property or not exposed,
an AttributeError is raised instead.
"""
v = getattr(obj.__class__, propname)
if inspect.isdatadescriptor(v):
pfunc = v.fget or v.fset or v.fdel
if v.fset:
if getattr(pfunc, "_pyroExposed", not only_exposed):
return v.fset(obj, value)
raise AttributeError("attempt to access unexposed or unknown remote attribute '%s'" % propname)
_private_dunder_methods = frozenset([
'__init__', '__init_subclass__', '__class__', '__module__', '__weakref__',
'__call__',
'__new__', '__del__', '__repr__', '__unicode__',
'__str__', '__format__',
'__nonzero__', '__bool__', '__coerce__',
'__cmp__', '__eq__', '__ne__',
'__hash__', '__ge__', '__gt__', '__le__', '__lt__',
'__dir__', '__enter__',
'__exit__', '__copy__', '__deepcopy__', '__sizeof__',
'__getattr__', '__setattr__',
'__hasattr__', '__getattribute__', '__delattr__',
'__instancecheck__',
'__subclasscheck__', '__getinitargs__', '__getnewargs__',
'__getstate__',
'__setstate__', '__reduce__', '__reduce_ex__',
'__getstate_for_dict__',
'__setstate_from_dict__', '__subclasshook__'])
def is_private_attribute(attr_name):
"""returns if the attribute name is to be considered private or not."""
if attr_name in _private_dunder_methods:
return True
if not attr_name.startswith("_"):
return False
elif len(attr_name) > 4:
if attr_name.startswith("__") and attr_name.endswith("__"):
return False
return True

View File

@@ -0,0 +1,8 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/utils/__init__.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 31 bytes
pass

View File

@@ -0,0 +1,305 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/utils/flame.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 11884 bytes
"""
Pyro FLAME: Foreign Location Automatic Module Exposer.
Easy but potentially very dangerous way of exposing remote modules and builtins.
Flame requires the pickle serializer to be used.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import sys, types, code, os, stat
from Pyro4 import constants, errors, core
from Pyro4.configuration import config
try:
import importlib
except ImportError:
importlib = None
try:
import builtins
except ImportError:
import __builtin__ as builtins
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
__all__ = [
"connect", "start", "createModule", "Flame"]
if sys.version_info > (3, 0):
def exec_function(source, filename, global_map):
source = fixExecSourceNewlines(source)
exec(compile(source, filename, "exec"), global_map)
else:
eval(compile('def exec_function(source, filename, global_map):\n source=fixExecSourceNewlines(source)\n exec compile(source, filename, "exec") in global_map\n', "<exec_function>", "exec"))
def fixExecSourceNewlines(source):
if sys.version_info < (2, 7) or sys.version_info[None[:2]] in ((3, 0), (3, 1)):
source = source.replace("\r\n", "\n")
source = source.rstrip() + "\n"
source = source.rstrip()
return source
class FlameModule(object):
__doc__ = "Proxy to a remote module."
def __init__(self, flameserver, module):
self.flameserver = core.Proxy(flameserver._pyroDaemon.uriFor(flameserver))
self.module = module
def __getattr__(self, item):
if item in ('__getnewargs__', '__getnewargs_ex__', '__getinitargs__'):
raise AttributeError(item)
return core._RemoteMethod(self._FlameModule__invoke, "%s.%s" % (self.module, item), 0)
def __getstate__(self):
return self.__dict__
def __setstate__(self, args):
self.__dict__ = args
def __invoke(self, module, args, kwargs):
return self.flameserver.invokeModule(module, args, kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.flameserver._pyroRelease()
def __repr__(self):
return "<%s.%s at 0x%x; module '%s' at %s>" % (self.__class__.__module__, self.__class__.__name__,
id(self), self.module, self.flameserver._pyroUri.location)
class FlameBuiltin(object):
__doc__ = "Proxy to a remote builtin function."
def __init__(self, flameserver, builtin):
self.flameserver = core.Proxy(flameserver._pyroDaemon.uriFor(flameserver))
self.builtin = builtin
def __call__(self, *args, **kwargs):
return self.flameserver.invokeBuiltin(self.builtin, args, kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.flameserver._pyroRelease()
def __repr__(self):
return "<%s.%s at 0x%x; builtin '%s' at %s>" % (self.__class__.__module__, self.__class__.__name__,
id(self), self.builtin, self.flameserver._pyroUri.location)
class RemoteInteractiveConsole(object):
__doc__ = "Proxy to a remote interactive console."
class LineSendingConsole(code.InteractiveConsole):
__doc__ = "makes sure the lines are sent to the remote console"
def __init__(self, remoteconsole):
code.InteractiveConsole.__init__(self, filename="<remoteconsole>")
self.remoteconsole = remoteconsole
def push(self, line):
output, more = self.remoteconsole.push_and_get_output(line)
if output:
sys.stdout.write(output)
return more
def __init__(self, remoteconsoleuri):
self.remoteconsole = core.Proxy(remoteconsoleuri)
def interact(self):
console = self.LineSendingConsole(self.remoteconsole)
console.interact(banner=(self.remoteconsole.get_banner()))
print("(Remote session ended)")
def close(self):
self.remoteconsole.terminate()
self.remoteconsole._pyroRelease()
def terminate(self):
self.close()
def __repr__(self):
return "<%s.%s at 0x%x; for %s>" % (self.__class__.__module__, self.__class__.__name__,
id(self), self.remoteconsole._pyroUri.location)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@core.expose
class InteractiveConsole(code.InteractiveConsole):
__doc__ = "Interactive console wrapper that saves output written to stdout so it can be returned as value"
def push_and_get_output(self, line):
output, more = ('', False)
stdout_save = sys.stdout
try:
sys.stdout = StringIO()
more = self.push(line)
output = sys.stdout.getvalue()
sys.stdout.close()
finally:
sys.stdout = stdout_save
return (
output, more)
def get_banner(self):
return self.banner
def write(self, data):
sys.stdout.write(data)
def terminate(self):
self._pyroDaemon.unregister(self)
self.resetbuffer()
@core.expose
class Flame(object):
__doc__ = "\n The actual FLAME server logic.\n Usually created by using :py:meth:`core.Daemon.startFlame`.\n Be *very* cautious before starting this: it allows the clients full access to everything on your system.\n "
def __init__(self):
if set(config.SERIALIZERS_ACCEPTED) != {"pickle"}:
raise RuntimeError("flame requires the pickle serializer exclusively")
def module(self, name):
"""
Import a module on the server given by the module name and returns a proxy to it.
The returned proxy does not support direct attribute access, if you want that,
you should use the ``evaluate`` method instead.
"""
if importlib:
importlib.import_module(name)
else:
__import__(name)
return FlameModule(self, name)
def builtin(self, name):
"""returns a proxy to the given builtin on the server"""
return FlameBuiltin(self, name)
def execute(self, code):
"""execute a piece of code"""
exec_function(code, "<remote-code>", globals())
def evaluate(self, expression):
"""evaluate an expression and return its result"""
return eval(expression)
def sendmodule(self, modulename, modulesource):
"""
Send the source of a module to the server and make the server load it.
Note that you still have to actually ``import`` it on the server to access it.
Sending a module again will replace the previous one with the new.
"""
createModule(modulename, modulesource)
def getmodule(self, modulename):
"""obtain the source code from a module on the server"""
import inspect
module = __import__(modulename, globals={}, locals={})
return inspect.getsource(module)
def sendfile(self, filename, filedata):
"""store a new file on the server"""
with open(filename, "wb") as targetfile:
os.chmod(filename, stat.S_IRUSR | stat.S_IWUSR)
targetfile.write(filedata)
def getfile(self, filename):
"""read any accessible file from the server"""
with open(filename, "rb") as diskfile:
return diskfile.read()
def console(self):
"""get a proxy for a remote interactive console session"""
console = InteractiveConsole(filename="<remoteconsole>")
uri = self._pyroDaemon.register(console)
console.banner = "Python %s on %s\n(Remote console on %s)" % (sys.version, sys.platform, uri.location)
return RemoteInteractiveConsole(uri)
@core.expose
def invokeBuiltin(self, builtin, args, kwargs):
return (getattr(builtins, builtin))(*args, **kwargs)
@core.expose
def invokeModule(self, dottedname, args, kwargs):
modulename, dottedname = dottedname.split(".", 1)
module = sys.modules[modulename]
method = module
for attr in dottedname.split("."):
method = getattr(method, attr)
return method(*args, **kwargs)
def createModule(name, source, filename='<dynamic-module>', namespace=None):
"""
Utility function to create a new module with the given name (dotted notation allowed), directly from the source string.
Adds it to sys.modules, and returns the new module object.
If you provide a namespace dict (such as ``globals()``), it will import the module into that namespace too.
"""
path = ""
components = name.split(".")
module = types.ModuleType("pyro-flame-module-context")
for component in components:
path += "." + component
real_path = path[1[:None]]
if real_path in sys.modules:
module = sys.modules[real_path]
else:
setattr(module, component, types.ModuleType(real_path))
module = getattr(module, component)
sys.modules[real_path] = module
exec_function(source, filename, module.__dict__)
if namespace is not None:
namespace[components[0]] = __import__(name)
return module
def start(daemon):
"""
Create and register a Flame server in the given daemon.
Be *very* cautious before starting this: it allows the clients full access to everything on your system.
"""
if config.FLAME_ENABLED:
if set(config.SERIALIZERS_ACCEPTED) != {"pickle"}:
raise errors.SerializeError("Flame requires the pickle serializer exclusively")
return daemon.register(Flame(), constants.FLAME_NAME)
raise errors.SecurityError("Flame is disabled in the server configuration")
def connect(location, hmac_key=None):
"""
Connect to a Flame server on the given location, for instance localhost:9999 or ./u:unixsock
This is just a convenience function to creates an appropriate Pyro proxy.
"""
if config.SERIALIZER != "pickle":
raise errors.SerializeError("Flame requires the pickle serializer")
proxy = core.Proxy("PYRO:%s@%s" % (constants.FLAME_NAME, location))
if hmac_key:
proxy._pyroHmacKey = hmac_key
proxy._pyroBind()
return proxy

View File

@@ -0,0 +1,68 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/utils/flameserver.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 2593 bytes
"""
Pyro FLAME: Foreign Location Automatic Module Exposer.
Easy but potentially very dangerous way of exposing remote modules and builtins.
This is the commandline server.
You can start this module as a script from the command line, to easily get a
flame server running:
:command:`python -m Pyro4.utils.flameserver`
or simply: :command:`pyro4-flameserver`
You have to explicitly enable Flame first though by setting the FLAME_ENABLED config item.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import sys, os, warnings
from Pyro4.configuration import config
from Pyro4 import core
from Pyro4.utils import flame
def main(args=None, returnWithoutLooping=False):
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-H", "--host", default="localhost", help="hostname to bind server on (default=%default)")
parser.add_option("-p", "--port", type="int", default=0, help="port to bind server on")
parser.add_option("-u", "--unixsocket", help="Unix domain socket name to bind server on")
parser.add_option("-q", "--quiet", action="store_true", default=False, help="don't output anything")
parser.add_option("-k", "--key", help="the HMAC key to use (deprecated)")
options, args = parser.parse_args(args)
if options.key:
warnings.warn("using -k to supply HMAC key on the command line is a security problem and is deprecated since Pyro 4.72. See the documentation for an alternative.")
if "PYRO_HMAC_KEY" in os.environ:
if options.key:
raise SystemExit("error: don't use -k and PYRO_HMAC_KEY at the same time")
options.key = os.environ["PYRO_HMAC_KEY"]
if not options.quiet:
print("Starting Pyro Flame server.")
hmac = (options.key or "").encode("utf-8")
if not hmac:
if not options.quiet:
print("Warning: HMAC key not set. Anyone can connect to this server!")
config.SERIALIZERS_ACCEPTED = {
"pickle"}
daemon = core.Daemon(host=(options.host), port=(options.port), unixsocket=(options.unixsocket))
if hmac:
daemon._pyroHmacKey = hmac
uri = flame.start(daemon)
if not options.quiet:
print("server uri: %s" % uri)
print("server is running.")
if returnWithoutLooping:
return (
daemon, uri)
daemon.requestLoop()
daemon.close()
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -0,0 +1,276 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/Pyro4/utils/httpgateway.py
# Compiled at: 2024-04-18 03:12:55
# Size of source mod 2**32: 16750 bytes
"""
HTTP gateway: connects the web browser's world of javascript+http and Pyro.
Creates a stateless HTTP server that essentially is a proxy for the Pyro objects behind it.
It exposes the Pyro objects through a HTTP interface and uses the JSON serializer,
so that you can immediately process the response data in the browser.
You can start this module as a script from the command line, to easily get a
http gateway server running:
:command:`python -m Pyro4.utils.httpgateway`
or simply: :command:`pyro4-httpgateway`
It is also possible to import the 'pyro_app' function and stick that into a WSGI
server of your choice, to have more control.
The javascript code in the web page of the gateway server works with the same-origin
browser policy because it is served by the gateway itself. If you want to access it
from scripts in different sites, you have to work around this or embed the gateway app
in your site. Non-browser clients that access the http api have no problems.
See the `http` example for two of such clients (node.js and python).
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import sys, re, cgi, os, uuid, warnings
from wsgiref.simple_server import make_server
import traceback
from Pyro4.util import json
from Pyro4.configuration import config
from Pyro4 import constants, errors, core, message, util, naming
__all__ = [
"pyro_app", "main"]
_nameserver = None
def get_nameserver(hmac=None):
global _nameserver
if not _nameserver:
_nameserver = naming.locateNS(hmac_key=hmac)
try:
_nameserver.ping()
return _nameserver
except errors.ConnectionClosedError:
_nameserver = None
print("Connection with nameserver lost, reconnecting...")
return get_nameserver(hmac)
def invalid_request(start_response):
"""Called if invalid http method."""
start_response("405 Method Not Allowed", [('Content-Type', 'text/plain')])
return [b'Error 405: Method Not Allowed']
def not_found(start_response):
"""Called if Url not found."""
start_response("404 Not Found", [('Content-Type', 'text/plain')])
return [b'Error 404: Not Found']
def redirect(start_response, target):
"""Called to do a redirect"""
start_response("302 Found", [("Location", target)])
return []
index_page_template = '<!DOCTYPE html>\n<html>\n<head>\n <title>Pyro HTTP gateway</title>\n <style type="text/css">\n body {{ margin: 1em; }}\n table, th, td {{border: 1px solid #bbf; padding: 4px;}}\n table {{border-collapse: collapse;}}\n pre {{border: 1px solid #bbf; padding: 1ex; margin: 1ex; white-space: pre-wrap;}}\n #title-logo {{ float: left; margin: 0 1em 0 0; }}\n </style>\n</head>\n<body>\n <script src="//code.jquery.com/jquery-2.1.3.min.js"></script>\n <script>\n "use strict";\n function pyro_call(name, method, params) {{\n $.ajax({{\n url: name+"/"+method,\n type: "GET",\n data: params,\n dataType: "json",\n // headers: {{ "X-Pyro-Correlation-Id": "11112222-1111-2222-3333-222244449999" }},\n // headers: {{ "X-Pyro-Gateway-Key": "secret-key" }},\n // headers: {{ "X-Pyro-Options": "oneway" }},\n beforeSend: function(xhr, settings) {{\n $("#pyro_call").text(settings.type+" "+settings.url);\n }},\n error: function(xhr, status, error) {{\n var errormessage = "ERROR: "+xhr.status+" "+error+" \\n"+xhr.responseText;\n $("#pyro_response").text(errormessage);\n }},\n success: function(data) {{\n $("#pyro_response").text(JSON.stringify(data, null, 4));\n }}\n }});\n }}\n </script>\n<div id="title-logo"><img src="http://pyro4.readthedocs.io/en/stable/_static/pyro.png"></div>\n<div id="title-text">\n<h1>Pyro HTTP gateway</h1>\n<p>\n Use http+json to talk to Pyro objects.\n <a href="http://pyro4.readthedocs.io/en/stable/tipstricks.html#pyro-via-http-and-json">Docs.</a>\n</p>\n</div>\n<p><em>Note: performance isn\'t a key concern here; it is a stateless server.\n It does a name lookup and uses a new Pyro proxy for each request.</em></p>\n<h2>Currently exposed contents of name server on {hostname}:</h2>\n<p>(Limited to 10 entries, exposed name pattern = \'{ns_regex}\')</p>\n{name_server_contents_list}\n<p>Name server examples: (these examples are working if you expose the Pyro.NameServer object)</p>\n<ul>\n<li><a href="Pyro.NameServer/$meta" onclick="pyro_call(\'Pyro.NameServer\',\'$meta\'); return false;">Pyro.NameServer/$meta</a>\n -- gives meta info of the name server (methods)</li>\n<li><a href="Pyro.NameServer/list" onclick="pyro_call(\'Pyro.NameServer\',\'list\'); return false;">Pyro.NameServer/list</a>\n -- lists the contents of the name server</li>\n<li><a href="Pyro.NameServer/list?prefix=test."\n onclick="pyro_call(\'Pyro.NameServer\',\'list\', {{\'prefix\':\'test.\'}}); return false;">\n Pyro.NameServer/list?prefix=test.</a> -- lists the contents of the name server starting with \'test.\'</li>\n<li><a href="Pyro.NameServer/lookup?name=Pyro.NameServer"\n onclick="pyro_call(\'Pyro.NameServer\',\'lookup\', {{\'name\':\'Pyro.NameServer\'}}); return false;">\n Pyro.NameServer/lookup?name=Pyro.NameServer</a> -- perform lookup method of the name server</li>\n<li><a href="Pyro.NameServer/lookup?name=test.echoserver"\n onclick="pyro_call(\'Pyro.NameServer\',\'lookup\', {{\'name\':\'test.echoserver\'}}); return false;">\n Pyro.NameServer/lookup?name=test.echoserver</a> -- perform lookup method of the echo server</li>\n</ul>\n<p>Echoserver examples: (these examples are working if you expose the test.echoserver object)</p>\n<ul>\n<li><a href="test.echoserver/error" onclick="pyro_call(\'test.echoserver\',\'error\'); return false;">test.echoserver/error</a>\n -- perform error call on echoserver</li>\n<li><a href="test.echoserver/echo?message=Hi there, browser script!"\n onclick="pyro_call(\'test.echoserver\',\'echo\', {{\'message\':\'Hi there, browser script!\'}}); return false;">\n test.echoserver/echo?message=Hi there, browser script!</a> -- perform echo call on echoserver</li>\n</ul>\n<h2>Pyro response data (via Ajax):</h2>\nCall: <pre id="pyro_call"> &nbsp; </pre>\nResponse: <pre id="pyro_response"> &nbsp; </pre>\n<p>Pyro version: {pyro_version} &mdash; &copy; Irmen de Jong</p>\n</body>\n</html>\n'
def return_homepage(environ, start_response):
try:
nameserver = get_nameserver(hmac=(pyro_app.hmac_key))
except errors.NamingError as x:
try:
print("Name server error:", x)
start_response("500 Internal Server Error", [('Content-Type', 'text/plain')])
return [b'Cannot connect to the Pyro name server. Is it running? Refresh page to retry.']
finally:
x = None
del x
start_response("200 OK", [('Content-Type', 'text/html')])
nslist = ["<table><tr><th>Name</th><th>methods</th><th>attributes (zero-param methods)</th></tr>"]
names = sorted(list(nameserver.list(regex=(pyro_app.ns_regex)).keys())[None[:10]])
with core.batch(nameserver) as nsbatch:
for name in names:
nsbatch.lookup(name)
for name, uri in zip(names, nsbatch()):
attributes = "-"
try:
with core.Proxy(uri) as proxy:
proxy._pyroHmacKey = pyro_app.hmac_key
proxy._pyroBind()
methods = " &nbsp; ".join(proxy._pyroMethods) or "-"
attributes = ['<a href="{name}/{attribute}" onclick="pyro_call(\'{name}\',\'{attribute}\'); return false;">{attribute}</a>'.format(name=name, attribute=attribute) for attribute in proxy._pyroAttrs]
attributes = " &nbsp; ".join(attributes) or "-"
except errors.PyroError as x:
try:
stderr = environ["wsgi.errors"]
print(("ERROR getting metadata for {0}:".format(uri)), file=stderr)
traceback.print_exc(file=stderr)
methods = "??error:%s??" % str(x)
finally:
x = None
del x
nslist.append('<tr><td><a href="{name}/$meta" onclick="pyro_call(\'{name}\',\'$meta\'); return false;">{name}</a></td><td>{methods}</td><td>{attributes}</td></tr>'.format(name=name,
methods=methods,
attributes=attributes))
nslist.append("</table>")
index_page = index_page_template.format(ns_regex=(pyro_app.ns_regex), name_server_contents_list=("".join(nslist)),
pyro_version=(constants.VERSION),
hostname=(nameserver._pyroUri.location))
return [index_page.encode("utf-8")]
def process_pyro_request(environ, path, parameters, start_response):
pyro_options = environ.get("HTTP_X_PYRO_OPTIONS", "").split(",")
if not path:
return return_homepage(environ, start_response)
else:
matches = re.match("(.+)/(.+)", path)
return matches or not_found(start_response)
object_name, method = matches.groups()
if pyro_app.gateway_key:
gateway_key = environ.get("HTTP_X_PYRO_GATEWAY_KEY", "") or parameters.get("$key", "")
gateway_key = gateway_key.encode("utf-8")
if gateway_key != pyro_app.gateway_key:
start_response("403 Forbidden", [('Content-Type', 'text/plain')])
return [b'403 Forbidden - incorrect gateway api key']
if "$key" in parameters:
del parameters["$key"]
if pyro_app.ns_regex:
if not re.match(pyro_app.ns_regex, object_name):
start_response("403 Forbidden", [('Content-Type', 'text/plain')])
return [b'403 Forbidden - access to the requested object has been denied']
try:
nameserver = get_nameserver(hmac=(pyro_app.hmac_key))
uri = nameserver.lookup(object_name)
with core.Proxy(uri) as proxy:
header_corr_id = environ.get("HTTP_X_PYRO_CORRELATION_ID", "")
if header_corr_id:
core.current_context.correlation_id = uuid.UUID(header_corr_id)
else:
core.current_context.correlation_id = uuid.uuid4()
proxy._pyroHmacKey = pyro_app.hmac_key
proxy._pyroGetMetadata()
if "oneway" in pyro_options:
proxy._pyroOneway.add(method)
elif method == "$meta":
result = {'methods':tuple(proxy._pyroMethods),
'attributes':tuple(proxy._pyroAttrs)}
reply = json.dumps(result).encode("utf-8")
start_response("200 OK", [('Content-Type', 'application/json; charset=utf-8'),
(
"X-Pyro-Correlation-Id", str(core.current_context.correlation_id))])
return [reply]
proxy._pyroRawWireResponse = True
if method in proxy._pyroAttrs:
if parameters:
raise AssertionError("attribute lookup can't have query parameters")
msg = getattr(proxy, method)
else:
msg = (getattr(proxy, method))(**parameters)
if msg is None or "oneway" in pyro_options:
start_response("200 OK", [('Content-Type', 'application/json; charset=utf-8'),
(
"X-Pyro-Correlation-Id", str(core.current_context.correlation_id))])
return []
if msg.flags & message.FLAGS_EXCEPTION:
start_response("500 Internal Server Error", [('Content-Type', 'application/json; charset=utf-8')])
return [msg.data]
start_response("200 OK", [('Content-Type', 'application/json; charset=utf-8'),
(
"X-Pyro-Correlation-Id", str(core.current_context.correlation_id))])
return [msg.data]
except Exception as x:
try:
stderr = environ["wsgi.errors"]
print(("ERROR handling {0} with params {1}:".format(path, parameters)), file=stderr)
traceback.print_exc(file=stderr)
start_response("500 Internal Server Error", [('Content-Type', 'application/json; charset=utf-8')])
reply = json.dumps(util.SerializerBase.class_to_dict(x)).encode("utf-8")
return [reply]
finally:
x = None
del x
def pyro_app(environ, start_response):
"""
The WSGI app function that is used to process the requests.
You can stick this into a wsgi server of your choice, or use the main() method
to use the default wsgiref server.
"""
config.SERIALIZER = "json"
config.COMMTIMEOUT = pyro_app.comm_timeout
method = environ.get("REQUEST_METHOD")
path = environ.get("PATH_INFO", "").lstrip("/")
if not path:
return redirect(start_response, "/pyro/")
if path.startswith("pyro/"):
if method in ('GET', 'POST'):
parameters = singlyfy_parameters(cgi.parse(environ["wsgi.input"], environ))
return process_pyro_request(environ, path[5[:None]], parameters, start_response)
return invalid_request(start_response)
return not_found(start_response)
def singlyfy_parameters(parameters):
"""
Makes a cgi-parsed parameter dictionary into a dict where the values that
are just a list of a single value, are converted to just that single value.
"""
for key, value in parameters.items():
if isinstance(value, (list, tuple)) and len(value) == 1:
parameters[key] = value[0]
return parameters
pyro_app.ns_regex = "http\\."
pyro_app.hmac_key = None
pyro_app.gateway_key = None
pyro_app.comm_timeout = config.COMMTIMEOUT
def main(args=None):
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-H", "--host", default="localhost", help="hostname to bind server on (default=%default)")
parser.add_option("-p", "--port", type="int", default=8080, help="port to bind server on (default=%default)")
parser.add_option("-e", "--expose", default=(pyro_app.ns_regex), help="a regex of object names to expose (default=%default)")
parser.add_option("-k", "--pyrokey", help="the HMAC key to use to connect with Pyro (deprecated)")
parser.add_option("-g", "--gatewaykey", help="the api key to use to connect to the gateway itself")
parser.add_option("-t", "--timeout", type="float", default=(pyro_app.comm_timeout), help="Pyro timeout value to use (COMMTIMEOUT setting, default=%default)")
options, args = parser.parse_args(args)
if options.pyrokey or options.gatewaykey:
warnings.warn("using -k and/or -g to supply keys on the command line is a security problem and is deprecated since Pyro 4.72. See the documentation for an alternative.")
if "PYRO_HMAC_KEY" in os.environ:
if options.pyrokey:
raise SystemExit("error: don't use -k and PYRO_HMAC_KEY at the same time")
options.pyrokey = os.environ["PYRO_HMAC_KEY"]
elif "PYRO_HTTPGATEWAY_KEY" in os.environ:
if options.gatewaykey:
raise SystemExit("error: don't use -g and PYRO_HTTPGATEWAY_KEY at the same time")
options.gatewaykey = os.environ["PYRO_HTTPGATEWAY_KEY"]
pyro_app.hmac_key = (options.pyrokey or "").encode("utf-8")
pyro_app.gateway_key = (options.gatewaykey or "").encode("utf-8")
pyro_app.ns_regex = options.expose
pyro_app.comm_timeout = config.COMMTIMEOUT = options.timeout
if pyro_app.ns_regex:
print("Exposing objects with names matching: ", pyro_app.ns_regex)
else:
print("Warning: exposing all objects (no expose regex set)")
try:
ns = get_nameserver(hmac=(pyro_app.hmac_key))
except errors.PyroError:
print("Not yet connected to a name server.")
else:
print("Connected to name server at: ", ns._pyroUri)
server = make_server(options.host, options.port, pyro_app)
print(("Pyro HTTP gateway running on http://{0}:{1}/pyro/".format)(*server.socket.getsockname()))
server.serve_forever()
server.server_close()
return 0
if __name__ == "__main__":
sys.exit(main())

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1962 bytes
""" Azure IoT Device Library
This library provides clients and associated models for communicating with Azure IoT services
from an IoT device.
"""
from .iothub import *
from .provisioning import *
from .common import *
from . import iothub
from . import provisioning
from . import common
from . import patch_documentation
patch_documentation.execute_patch_for_sync()
__all__ = iothub.__all__ + provisioning.__all__ + common.__all__

View File

@@ -0,0 +1,18 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/aio/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1836 bytes
"""Azure IoT Device Library - Asynchronous
This library provides asynchronous clients for communicating with Azure IoT services
from an IoT device.
"""
from azure.iot.device.iothub.aio import *
from azure.iot.device.provisioning.aio import *
import azure.iot.device.iothub.aio, azure.iot.device.provisioning.aio
from . import patch_documentation
patch_documentation.execute_patch_for_async()
__all__ = azure.iot.device.iothub.aio.__all__ + azure.iot.device.provisioning.aio.__all__

View File

@@ -0,0 +1,189 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/aio/patch_documentation.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 10588 bytes
"""This module provides hard coded patches used to modify items from the libraries.
Currently we have to do like this so that we don't use exec anywhere"""
def execute_patch_for_async():
from azure.iot.device.iothub.aio.async_clients import IoTHubDeviceClient as IoTHubDeviceClient_
async def connect(self):
return await super(IoTHubDeviceClient_, self).connect()
connect.__doc__ = IoTHubDeviceClient_.connect.__doc__
setattr(IoTHubDeviceClient_, "connect", connect)
async def disconnect(self):
return await super(IoTHubDeviceClient_, self).disconnect()
disconnect.__doc__ = IoTHubDeviceClient_.disconnect.__doc__
setattr(IoTHubDeviceClient_, "disconnect", disconnect)
async def get_twin(self):
return await super(IoTHubDeviceClient_, self).get_twin()
get_twin.__doc__ = IoTHubDeviceClient_.get_twin.__doc__
setattr(IoTHubDeviceClient_, "get_twin", get_twin)
async def patch_twin_reported_properties(self, reported_properties_patch):
return await super(IoTHubDeviceClient_, self).patch_twin_reported_properties(reported_properties_patch)
patch_twin_reported_properties.__doc__ = IoTHubDeviceClient_.patch_twin_reported_properties.__doc__
setattr(IoTHubDeviceClient_, "patch_twin_reported_properties", patch_twin_reported_properties)
async def receive_method_request(self, method_name=None):
return await super(IoTHubDeviceClient_, self).receive_method_request(method_name)
receive_method_request.__doc__ = IoTHubDeviceClient_.receive_method_request.__doc__
setattr(IoTHubDeviceClient_, "receive_method_request", receive_method_request)
async def receive_twin_desired_properties_patch(self):
return await super(IoTHubDeviceClient_, self).receive_twin_desired_properties_patch()
receive_twin_desired_properties_patch.__doc__ = IoTHubDeviceClient_.receive_twin_desired_properties_patch.__doc__
setattr(IoTHubDeviceClient_, "receive_twin_desired_properties_patch", receive_twin_desired_properties_patch)
async def send_message(self, message):
return await super(IoTHubDeviceClient_, self).send_message(message)
send_message.__doc__ = IoTHubDeviceClient_.send_message.__doc__
setattr(IoTHubDeviceClient_, "send_message", send_message)
async def send_method_response(self, method_response):
return await super(IoTHubDeviceClient_, self).send_method_response(method_response)
send_method_response.__doc__ = IoTHubDeviceClient_.send_method_response.__doc__
setattr(IoTHubDeviceClient_, "send_method_response", send_method_response)
def update_sastoken(self, sastoken):
return super(IoTHubDeviceClient_, self).update_sastoken(sastoken)
update_sastoken.__doc__ = IoTHubDeviceClient_.update_sastoken.__doc__
setattr(IoTHubDeviceClient_, "update_sastoken", update_sastoken)
def create_from_connection_string(cls, connection_string, **kwargs):
return (super(IoTHubDeviceClient_, cls).create_from_connection_string)(
connection_string, **kwargs)
create_from_connection_string.__doc__ = IoTHubDeviceClient_.create_from_connection_string.__doc__
setattr(IoTHubDeviceClient_, "create_from_connection_string", classmethod(create_from_connection_string))
def create_from_sastoken(cls, sastoken, **kwargs):
return (super(IoTHubDeviceClient_, cls).create_from_sastoken)(sastoken, **kwargs)
create_from_sastoken.__doc__ = IoTHubDeviceClient_.create_from_sastoken.__doc__
setattr(IoTHubDeviceClient_, "create_from_sastoken", classmethod(create_from_sastoken))
def create_from_symmetric_key(cls, symmetric_key, hostname, device_id, **kwargs):
return (super(IoTHubDeviceClient_, cls).create_from_symmetric_key)(
symmetric_key, hostname, device_id, **kwargs)
create_from_symmetric_key.__doc__ = IoTHubDeviceClient_.create_from_symmetric_key.__doc__
setattr(IoTHubDeviceClient_, "create_from_symmetric_key", classmethod(create_from_symmetric_key))
def create_from_x509_certificate(cls, x509, hostname, device_id, **kwargs):
return (super(IoTHubDeviceClient_, cls).create_from_x509_certificate)(
x509, hostname, device_id, **kwargs)
create_from_x509_certificate.__doc__ = IoTHubDeviceClient_.create_from_x509_certificate.__doc__
setattr(IoTHubDeviceClient_, "create_from_x509_certificate", classmethod(create_from_x509_certificate))
from azure.iot.device.iothub.aio.async_clients import IoTHubModuleClient as IoTHubModuleClient_
async def connect(self):
return await super(IoTHubModuleClient_, self).connect()
connect.__doc__ = IoTHubModuleClient_.connect.__doc__
setattr(IoTHubModuleClient_, "connect", connect)
async def disconnect(self):
return await super(IoTHubModuleClient_, self).disconnect()
disconnect.__doc__ = IoTHubModuleClient_.disconnect.__doc__
setattr(IoTHubModuleClient_, "disconnect", disconnect)
async def get_twin(self):
return await super(IoTHubModuleClient_, self).get_twin()
get_twin.__doc__ = IoTHubModuleClient_.get_twin.__doc__
setattr(IoTHubModuleClient_, "get_twin", get_twin)
async def patch_twin_reported_properties(self, reported_properties_patch):
return await super(IoTHubModuleClient_, self).patch_twin_reported_properties(reported_properties_patch)
patch_twin_reported_properties.__doc__ = IoTHubModuleClient_.patch_twin_reported_properties.__doc__
setattr(IoTHubModuleClient_, "patch_twin_reported_properties", patch_twin_reported_properties)
async def receive_method_request(self, method_name=None):
return await super(IoTHubModuleClient_, self).receive_method_request(method_name)
receive_method_request.__doc__ = IoTHubModuleClient_.receive_method_request.__doc__
setattr(IoTHubModuleClient_, "receive_method_request", receive_method_request)
async def receive_twin_desired_properties_patch(self):
return await super(IoTHubModuleClient_, self).receive_twin_desired_properties_patch()
receive_twin_desired_properties_patch.__doc__ = IoTHubModuleClient_.receive_twin_desired_properties_patch.__doc__
setattr(IoTHubModuleClient_, "receive_twin_desired_properties_patch", receive_twin_desired_properties_patch)
async def send_message(self, message):
return await super(IoTHubModuleClient_, self).send_message(message)
send_message.__doc__ = IoTHubModuleClient_.send_message.__doc__
setattr(IoTHubModuleClient_, "send_message", send_message)
async def send_method_response(self, method_response):
return await super(IoTHubModuleClient_, self).send_method_response(method_response)
send_method_response.__doc__ = IoTHubModuleClient_.send_method_response.__doc__
setattr(IoTHubModuleClient_, "send_method_response", send_method_response)
def update_sastoken(self, sastoken):
return super(IoTHubModuleClient_, self).update_sastoken(sastoken)
update_sastoken.__doc__ = IoTHubModuleClient_.update_sastoken.__doc__
setattr(IoTHubModuleClient_, "update_sastoken", update_sastoken)
def create_from_connection_string(cls, connection_string, **kwargs):
return (super(IoTHubModuleClient_, cls).create_from_connection_string)(
connection_string, **kwargs)
create_from_connection_string.__doc__ = IoTHubModuleClient_.create_from_connection_string.__doc__
setattr(IoTHubModuleClient_, "create_from_connection_string", classmethod(create_from_connection_string))
def create_from_edge_environment(cls, **kwargs):
return (super(IoTHubModuleClient_, cls).create_from_edge_environment)(**kwargs)
create_from_edge_environment.__doc__ = IoTHubModuleClient_.create_from_edge_environment.__doc__
setattr(IoTHubModuleClient_, "create_from_edge_environment", classmethod(create_from_edge_environment))
def create_from_sastoken(cls, sastoken, **kwargs):
return (super(IoTHubModuleClient_, cls).create_from_sastoken)(sastoken, **kwargs)
create_from_sastoken.__doc__ = IoTHubModuleClient_.create_from_sastoken.__doc__
setattr(IoTHubModuleClient_, "create_from_sastoken", classmethod(create_from_sastoken))
def create_from_x509_certificate(cls, x509, hostname, device_id, module_id, **kwargs):
return (super(IoTHubModuleClient_, cls).create_from_x509_certificate)(
x509, hostname, device_id, module_id, **kwargs)
create_from_x509_certificate.__doc__ = IoTHubModuleClient_.create_from_x509_certificate.__doc__
setattr(IoTHubModuleClient_, "create_from_x509_certificate", classmethod(create_from_x509_certificate))
from azure.iot.device.provisioning.aio.async_provisioning_device_client import ProvisioningDeviceClient as ProvisioningDeviceClient_
def create_from_symmetric_key(cls, provisioning_host, registration_id, id_scope, symmetric_key, **kwargs):
return (super(ProvisioningDeviceClient_, cls).create_from_symmetric_key)(
provisioning_host, registration_id, id_scope, symmetric_key, **kwargs)
create_from_symmetric_key.__doc__ = ProvisioningDeviceClient_.create_from_symmetric_key.__doc__
setattr(ProvisioningDeviceClient_, "create_from_symmetric_key", classmethod(create_from_symmetric_key))
def create_from_x509_certificate(cls, provisioning_host, registration_id, id_scope, x509, **kwargs):
return (super(ProvisioningDeviceClient_, cls).create_from_x509_certificate)(
provisioning_host, registration_id, id_scope, x509, **kwargs)
create_from_x509_certificate.__doc__ = ProvisioningDeviceClient_.create_from_x509_certificate.__doc__
setattr(ProvisioningDeviceClient_, "create_from_x509_certificate", classmethod(create_from_x509_certificate))

View File

@@ -0,0 +1,16 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 218 bytes
"""Azure IoT Device Common
This package provides shared modules for use with various Azure IoT device-side clients.
INTERNAL USAGE ONLY
"""
from .models import X509, ProxyOptions
__all__ = [
"X509", "ProxyOptions"]

View File

@@ -0,0 +1,77 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/async_adapter.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3754 bytes
"""This module contains tools for adapting sync code for use in async coroutines."""
import functools, logging, traceback
import azure.iot.device.common.asyncio_compat as asyncio_compat
logger = logging.getLogger(__name__)
def emulate_async(fn):
"""Returns a coroutine function that calls a given function with emulated asynchronous
behavior via use of mulithreading.
Can be applied as a decorator.
:param fn: The sync function to be run in async.
:returns: A coroutine function that will call the given sync function.
"""
@functools.wraps(fn)
async def async_fn_wrapper(*args, **kwargs):
loop = asyncio_compat.get_running_loop()
return await loop.run_in_executor(None, (functools.partial)(fn, *args, **kwargs))
return async_fn_wrapper
class AwaitableCallback(object):
__doc__ = "A sync callback whose completion can be waited upon.\n "
def __init__(self, return_arg_name=None):
"""Creates an instance of an AwaitableCallback
"""
if return_arg_name:
if not isinstance(return_arg_name, str):
raise TypeError("internal error: return_arg_name must be a string")
loop = asyncio_compat.get_running_loop()
self.future = asyncio_compat.create_future(loop)
def wrapping_callback(*args, **kwargs):
if "error" in kwargs and kwargs["error"]:
exception = kwargs["error"]
else:
if return_arg_name:
if return_arg_name in kwargs:
exception = None
result = kwargs[return_arg_name]
else:
raise TypeError("internal error: excepected argument with name '{}', did not get".format(return_arg_name))
else:
exception = None
result = None
if exception:
logger.info("Callback completed with error {}".format(exception))
logger.info(traceback.format_exception_only(type(exception), exception))
loop.call_soon_threadsafe(self.future.set_exception, exception)
else:
logger.debug("Callback completed with result {}".format(result))
loop.call_soon_threadsafe(self.future.set_result, result)
self.callback = wrapping_callback
def __call__(self, *args, **kwargs):
"""Calls the callback. Returns the result.
"""
return (self.callback)(*args, **kwargs)
async def completion(self):
"""Awaitable coroutine method that will return once the AwaitableCallback
has been completed.
:returns: Result of the callback when it was called.
"""
return await self.future

View File

@@ -0,0 +1,79 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/asyncio_compat.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 2455 bytes
"""This module contains compatibility tools for bridging different versions of asyncio"""
import asyncio, sys
def get_running_loop():
"""Gets the currently running event loop
Uses asyncio.get_running_loop() if available (Python 3.7+) or a backported
version of the same function in 3.5/3.6.
"""
try:
loop = asyncio.get_running_loop()
except AttributeError:
loop = asyncio._get_running_loop()
if loop is None:
raise RuntimeError("no running event loop")
return loop
def create_task(coro):
"""Creates a Task object.
If avaialable (Python 3.7+), use asyncio.create_task, which is preferred as it is
more specific for the goal of immediately scheduling a task from a coroutine. If
not available, use the more general puprose asyncio.ensure_future.
:returns: A new Task object.
"""
try:
task = asyncio.create_task(coro)
except AttributeError:
task = asyncio.ensure_future(coro)
return task
def create_future(loop):
"""Creates a Future object.
Uses loop.create_future if it is available. Otherwise, create the object directly.
Use of loop.create_future is preferred because it allows third parties to provide their own
Future object, but it is only available in 3.5.2+
:returns: A new Future object.
"""
try:
future = loop.create_future()
except AttributeError:
future = asyncio.Future(loop=loop)
return future
def run(coro):
"""Execute the coroutine coro and return the result.
It creates a new event loop and closes it at the end.
Cannot be called when another asyncio event loop is running in the same thread.
If available (Python 3.7+) use asyncio.run. If not available, use a custom implementation
that achieves the same thing
"""
if sys.version_info >= (3, 7):
return asyncio.run(coro)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(coro)
finally:
loop.close()
asyncio.set_event_loop(None)

View File

@@ -0,0 +1,8 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/auth/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 303 bytes
from .signing_mechanism import SymmetricKeySigningMechanism

View File

@@ -0,0 +1,93 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/auth/connection_string.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3582 bytes
"""This module contains tools for working with Connection Strings"""
__all__ = [
"ConnectionString"]
CS_DELIMITER = ";"
CS_VAL_SEPARATOR = "="
HOST_NAME = "HostName"
SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"
SHARED_ACCESS_KEY = "SharedAccessKey"
SHARED_ACCESS_SIGNATURE = "SharedAccessSignature"
DEVICE_ID = "DeviceId"
MODULE_ID = "ModuleId"
GATEWAY_HOST_NAME = "GatewayHostName"
_valid_keys = [
HOST_NAME,
SHARED_ACCESS_KEY_NAME,
SHARED_ACCESS_KEY,
SHARED_ACCESS_SIGNATURE,
DEVICE_ID,
MODULE_ID,
GATEWAY_HOST_NAME]
def _parse_connection_string(connection_string):
"""Return a dictionary of values contained in a given connection string
"""
try:
cs_args = connection_string.split(CS_DELIMITER)
except (AttributeError, TypeError):
raise TypeError("Connection String must be of type str")
try:
d = dict((arg.split(CS_VAL_SEPARATOR, 1) for arg in cs_args))
except ValueError:
raise ValueError("Invalid Connection String - Unable to parse")
if len(cs_args) != len(d):
raise ValueError("Invalid Connection String - Unable to parse")
if not all((key in _valid_keys for key in d.keys())):
raise ValueError("Invalid Connection String - Invalid Key")
_validate_keys(d)
return d
def _validate_keys(d):
"""Raise ValueError if incorrect combination of keys in dict d
"""
host_name = d.get(HOST_NAME)
shared_access_key_name = d.get(SHARED_ACCESS_KEY_NAME)
shared_access_key = d.get(SHARED_ACCESS_KEY)
device_id = d.get(DEVICE_ID)
if host_name and device_id and shared_access_key:
pass
elif host_name and shared_access_key and shared_access_key_name:
pass
else:
raise ValueError("Invalid Connection String - Incomplete")
class ConnectionString(object):
__doc__ = "Key/value mappings for connection details.\n Uses the same syntax as dictionary\n "
def __init__(self, connection_string):
"""Initializer for ConnectionString
:param str connection_string: String with connection details provided by Azure
:raises: ValueError if provided connection_string is invalid
"""
self._dict = _parse_connection_string(connection_string)
self._strrep = connection_string
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
return self._strrep
def get(self, key, default=None):
"""Return the value for key if key is in the dictionary, else default
:param str key: The key to retrieve a value for
:param str default: The default value returned if a key is not found
:returns: The value for the given key
"""
try:
return self._dict[key]
except KeyError:
return default

View File

@@ -0,0 +1,130 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/auth/sastoken.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 5796 bytes
"""This module contains tools for working with Shared Access Signature (SAS) Tokens"""
import base64, hmac, hashlib, time
import six.moves.urllib as urllib
from azure.iot.device.common.chainable_exception import ChainableException
class SasTokenError(ChainableException):
__doc__ = "Error in SasToken"
class RenewableSasToken(object):
__doc__ = "Renewable Shared Access Signature Token used to authenticate a request.\n\n This token is 'renewable', which means that it can be updated when necessary to\n prevent expiry, by using the .refresh() method.\n\n Data Attributes:\n expiry_time (int): Time that token will expire (in UTC, since epoch)\n ttl (int): Time to live for the token, in seconds\n "
_auth_rule_token_format = "SharedAccessSignature sr={resource}&sig={signature}&se={expiry}&skn={keyname}"
_simple_token_format = "SharedAccessSignature sr={resource}&sig={signature}&se={expiry}"
def __init__(self, uri, signing_mechanism, key_name=None, ttl=3600):
"""
:param str uri: URI of the resouce to be accessed
:param signing_mechanism: The signing mechanism to use in the SasToken
:type signing_mechanism: Child classes of :class:`azure.iot.common.SigningMechanism`
:param str key_name: Symmetric Key Name (optional)
:param int ttl: Time to live for the token, in seconds (default 3600)
:raises: SasTokenError if an error occurs building a SasToken
"""
self._uri = uri
self._signing_mechanism = signing_mechanism
self._key_name = key_name
self._expiry_time = None
self._token = None
self.ttl = ttl
self.refresh()
def __str__(self):
return self._token
def refresh(self):
"""
Refresh the SasToken lifespan, giving it a new expiry time, and generating a new token.
"""
self._expiry_time = int(time.time() + self.ttl)
self._token = self._build_token()
def _build_token(self):
"""Buid SasToken representation
:returns: String representation of the token
"""
url_encoded_uri = urllib.parse.quote((self._uri), safe="")
message = url_encoded_uri + "\n" + str(self.expiry_time)
try:
signature = self._signing_mechanism.sign(message)
except Exception as e:
try:
raise SasTokenError("Unable to build SasToken from given values", e)
finally:
e = None
del e
url_encoded_signature = urllib.parse.quote(signature, safe="")
if self._key_name:
token = self._auth_rule_token_format.format(resource=url_encoded_uri,
signature=url_encoded_signature,
expiry=(str(self.expiry_time)),
keyname=(self._key_name))
else:
token = self._simple_token_format.format(resource=url_encoded_uri,
signature=url_encoded_signature,
expiry=(str(self.expiry_time)))
return token
@property
def expiry_time(self):
"""Expiry Time is READ ONLY"""
return self._expiry_time
class NonRenewableSasToken(object):
__doc__ = "NonRenewable Shared Access Signature Token used to authenticate a request.\n\n This token is 'non-renewable', which means that it is invalid once it expires, and there\n is no way to keep it alive. Instead, a new token must be created.\n\n Data Attributes:\n expiry_time (int): Time that token will expire (in UTC, since epoch)\n resource_uri (str): URI for the resource the Token provides authentication to access\n "
def __init__(self, sastoken_string):
"""
:param str sastoken_string: A string representation of a SAS token
"""
self._token = sastoken_string
self._token_info = get_sastoken_info_from_string(self._token)
def __str__(self):
return self._token
@property
def expiry_time(self):
"""Expiry Time is READ ONLY"""
return int(self._token_info["se"])
@property
def resource_uri(self):
"""Resource URI is READ ONLY"""
uri = self._token_info["sr"]
return urllib.parse.unquote(uri)
REQUIRED_SASTOKEN_FIELDS = [
"sr", "sig", "se"]
VALID_SASTOKEN_FIELDS = REQUIRED_SASTOKEN_FIELDS + ["skn"]
def get_sastoken_info_from_string(sastoken_string):
pieces = sastoken_string.split("SharedAccessSignature ")
if len(pieces) != 2:
raise SasTokenError("Invalid SasToken string: Not a SasToken ")
try:
sastoken_info = dict((map(str.strip, sub.split("=", 1)) for sub in pieces[1].split("&")))
except Exception as e:
try:
raise SasTokenError("Invalid SasToken string: Incorrectly formatted", e)
finally:
e = None
del e
if not all((key in sastoken_info for key in REQUIRED_SASTOKEN_FIELDS)):
raise SasTokenError("Invalid SasToken string: Not all required fields present")
if not all((key in VALID_SASTOKEN_FIELDS for key in sastoken_info)):
raise SasTokenError("Invalid SasToken string: Unexpected fields present")
return sastoken_info

View File

@@ -0,0 +1,63 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/auth/signing_mechanism.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 2316 bytes
"""This module defines an abstract SigningMechanism, as well as common child implementations of it
"""
import six, abc, hmac, hashlib, base64
from six.moves import urllib
@six.add_metaclass(abc.ABCMeta)
class SigningMechanism(object):
@abc.abstractmethod
def sign(self, data_str):
pass
class SymmetricKeySigningMechanism(SigningMechanism):
def __init__(self, key):
"""
A mechanism that signs data using a symmetric key
:param key: Symmetric Key (base64 encoded)
:type key: str or bytes
"""
try:
key = key.encode("utf-8")
except AttributeError:
pass
try:
self._signing_key = base64.b64decode(key)
except (base64.binascii.Error, TypeError):
raise ValueError("Invalid Symmetric Key")
def sign(self, data_str):
"""
Sign a data string with symmetric key and the HMAC-SHA256 algorithm.
:param data_str: Data string to be signed
:type data_str: str or bytes
:returns: The signed data
:rtype: str
"""
try:
data_str = data_str.encode("utf-8")
except AttributeError:
pass
try:
hmac_digest = hmac.HMAC(key=(self._signing_key),
msg=data_str,
digestmod=(hashlib.sha256)).digest()
signed_data = base64.b64encode(hmac_digest)
except TypeError:
raise ValueError("Unable to sign string using the provided symmetric key")
return signed_data.decode("utf-8")

View File

@@ -0,0 +1,29 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/callable_weak_method.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3180 bytes
import weakref
class CallableWeakMethod(object):
__doc__ = '\n Object which makes a weak reference to a method call. Similar to weakref.WeakMethod,\n but works on Python 2.7 and returns an object which is callable.\n\n This objet is used primarily for callbacks and it prevents circular references in the\n garbage collector. It is used specifically in the scenario where object holds a\n refernce to object b and b holds a callback into a (which creates a rererence\n back into a)\n\n By default, method references are _strong_, and we end up with we have a situation\n where a has a _strong) reference to b and b has a _strong_ reference to a.\n\n The Python 3.4+ garbage collectors handle this circular reference just fine, but the\n 2.7 garbage collector fails, but only when one of the objects has a finalizer method.\n\n \'\'\'\n # example of bad (strong) circular dependency:\n class A(object):\n def --init__(self):\n self.b = B() # A objects now have a strong refernce to B objects\n b.handler = a.method() # and B object have a strong reference back into A objects\n def method(self):\n pass\n \'\'\'\n\n In the example above, if a or B has a finalizer, that object will be considered uncollectable\n (on 2.7) and both objects will leak\n\n However, if we use this object, a will a _strong_ reference to b, and b will have a _weak_\n reference =back to a, and the circular depenency chain is broken.\n\n ```\n # example of better (weak) circular dependency:\n class A(object):\n def --init__(self):\n self.b = B() # A objects now have a strong refernce to B objects\n b.handler = CallableWeakMethod(a, "method") # and B objects have a WEAK reference back into A objects\n def method(self):\n pass\n ```\n\n In this example, there is no circular reference, and the Python 2.7 garbage collector is able\n to collect both objects, even if one of them has a finalizer.\n\n When we reach the point where all supported interpreters implement PEP 442, we will\n no longer need this object\n\n ref: https://www.python.org/dev/peps/pep-0442/\n '
def __init__(self, object, method_name):
self.object_weakref = weakref.ref(object)
self.method_name = method_name
def _get_method(self):
return getattr(self.object_weakref(), self.method_name, None)
def __call__(self, *args, **kwargs):
return (self._get_method())(*args, **kwargs)
def __eq__(self, other):
return self._get_method() == other
def __repr__(self):
if self.object_weakref():
return "CallableWeakMethod for {}".format(self._get_method())
return "CallableWeakMethod for {} (DEAD)".format(self.method_name)

View File

@@ -0,0 +1,20 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/chainable_exception.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1013 bytes
class ChainableException(Exception):
__doc__ = "This exception stores a reference to a previous exception which has caused\n the current one"
def __init__(self, message=None, cause=None):
self.__cause__ = cause
super(ChainableException, self).__init__(message)
def __str__(self):
if self.__cause__:
return "{} caused by {}".format(super(ChainableException, self).__repr__(), self.__cause__.__repr__())
return super(ChainableException, self).__repr__()

View File

@@ -0,0 +1,58 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/evented_callback.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 2555 bytes
import threading, logging, six, traceback
logger = logging.getLogger(__name__)
class EventedCallback(object):
__doc__ = "\n A sync callback whose completion can be waited upon.\n "
def __init__(self, return_arg_name=None):
"""
Creates an instance of an EventedCallback.
"""
if return_arg_name:
if not isinstance(return_arg_name, six.string_types):
raise TypeError("internal error: return_arg_name must be a string")
self.completion_event = threading.Event()
self.exception = None
self.result = None
def wrapping_callback(*args, **kwargs):
if "error" in kwargs and kwargs["error"]:
self.exception = kwargs["error"]
else:
if return_arg_name:
if return_arg_name in kwargs:
self.result = kwargs[return_arg_name]
else:
raise TypeError("internal error: excepected argument with name '{}', did not get".format(return_arg_name))
elif self.exception:
logger.info("Callback completed with error {}".format(self.exception))
logger.info(traceback.format_exc())
else:
logger.debug("Callback completed with result {}".format(self.result))
self.completion_event.set()
self.callback = wrapping_callback
def __call__(self, *args, **kwargs):
"""
Calls the callback.
"""
(self.callback)(*args, **kwargs)
def wait_for_completion(self, *args, **kwargs):
"""
Wait for the callback to be called, and return the results.
"""
(self.completion_event.wait)(*args, **kwargs)
if self.exception:
raise self.exception
else:
return self.result

View File

@@ -0,0 +1,54 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/handle_exceptions.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 2233 bytes
import logging, traceback
logger = logging.getLogger(__name__)
def handle_background_exception(e):
"""
Function which handled exceptions that are caught in background thread. This is
typically called from the callback thread inside the pipeline. These exceptions
need special handling because callback functions are typically called inside a
non-application thread in response to non-user-initiated actions, so there's
nobody else to catch them.
This function gets called from inside an arbitrary thread context, so code that
runs from this function should be limited to the bare minumum.
:param Error e: Exception object raised from inside a background thread
"""
logger.error(msg="Exception caught in background thread. Unable to handle.")
logger.error(traceback.format_exception_only(type(e), e))
def swallow_unraised_exception(e, log_msg=None, log_lvl='warning'):
"""Swallow and log an exception object.
Convenience function for logging, as exceptions can only be logged correctly from within a
except block.
:param Exception e: Exception object to be swallowed.
:param str log_msg: Optional message to use when logging.
:param str log_lvl: The log level to use for logging. Default "warning".
"""
try:
raise e
except Exception:
if log_lvl == "warning":
logger.warning(log_msg)
logger.warning(traceback.format_exc())
else:
if log_lvl == "error":
logger.error(log_msg)
logger.error(traceback.format_exc())
else:
if log_lvl == "info":
logger.info(log_msg)
logger.info(traceback.format_exc())
else:
logger.debug(log_msg)
logger.debug(traceback.format_exc())

View File

@@ -0,0 +1,104 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/http_transport.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 6031 bytes
import logging, uuid, threading, json, ssl
from . import transport_exceptions as exceptions
from .pipeline import pipeline_thread
from six.moves import http_client
logger = logging.getLogger(__name__)
class HTTPTransport(object):
__doc__ = "\n A wrapper class that provides an implementation-agnostic HTTP interface.\n "
def __init__(self, hostname, server_verification_cert=None, x509_cert=None, cipher=None):
"""
Constructor to instantiate an HTTP protocol wrapper.
:param str hostname: Hostname or IP address of the remote host.
:param str server_verification_cert: Certificate which can be used to validate a server-side TLS connection (optional).
:param str cipher: Cipher string in OpenSSL cipher list format (optional)
:param x509_cert: Certificate which can be used to authenticate connection to a server in lieu of a password (optional).
"""
self._hostname = hostname
self._server_verification_cert = server_verification_cert
self._x509_cert = x509_cert
self._cipher = cipher
self._ssl_context = self._create_ssl_context()
def _create_ssl_context(self):
"""
This method creates the SSLContext object used to authenticate the connection. The generated context is used by the http_client and is necessary when authenticating using a self-signed X509 cert or trusted X509 cert
"""
logger.debug("creating a SSL context")
ssl_context = ssl.SSLContext(protocol=(ssl.PROTOCOL_TLSv1_2))
if self._server_verification_cert:
ssl_context.load_verify_locations(cadata=(self._server_verification_cert))
else:
ssl_context.load_default_certs()
if self._cipher:
try:
ssl_context.set_ciphers(self._cipher)
except ssl.SSLError as e:
try:
raise e
finally:
e = None
del e
if self._x509_cert is not None:
logger.debug("configuring SSL context with client-side certificate and key")
ssl_context.load_cert_chain(self._x509_cert.certificate_file, self._x509_cert.key_file, self._x509_cert.pass_phrase)
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.check_hostname = True
return ssl_context
@pipeline_thread.invoke_on_http_thread_nowait
def request(self, method, path, callback, body="", headers={}, query_params=""):
"""
This method creates a connection to a remote host, sends a request to that host, and then waits for and reads the response from that request.
:param str method: The request method (e.g. "POST")
:param str path: The path for the URL
:param Function callback: The function that gets called when this operation is complete or has failed. The callback function must accept an error and a response dictionary, where the response dictionary contains a status code, a reason, and a response string.
:param str body: The body of the HTTP request to be sent following the headers.
:param dict headers: A dictionary that provides extra HTTP headers to be sent with the request.
:param str query_params: The optional query parameters to be appended at the end of the URL.
"""
logger.info("sending https {} request to {} .".format(method, path))
try:
logger.debug("creating an https connection")
connection = http_client.HTTPSConnection((self._hostname), context=(self._ssl_context))
logger.debug("connecting to host tcp socket")
connection.connect()
logger.debug("connection succeeded")
url = "https://{hostname}/{path}{query_params}".format(hostname=(self._hostname),
path=path,
query_params=("?" + query_params if query_params else ""))
logger.debug("Sending Request to HTTP URL: {}".format(url))
logger.debug("HTTP Headers: {}".format(headers))
logger.debug("HTTP Body: {}".format(body))
connection.request(method, url, body=body, headers=headers)
response = connection.getresponse()
status_code = response.status
reason = response.reason
response_string = response.read()
logger.debug("response received")
logger.debug("closing connection to https host")
connection.close()
logger.debug("connection closed")
logger.info("https {} request sent to {}, and {} response received.".format(method, path, status_code))
response_obj = {'status_code':status_code,
'reason':reason, 'resp':response_string}
callback(response=response_obj)
except Exception as e:
try:
logger.info("Error in HTTP Transport: {}".format(e))
callback(error=exceptions.ProtocolClientError(message="Unexpected HTTPS failure during connect",
cause=e))
finally:
e = None
del e

View File

@@ -0,0 +1,13 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/models/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 206 bytes
"""Azure Device Models
This package provides object models for use within the Azure Provisioning Device SDK and Azure IoTHub Device SDK.
"""
from .x509 import X509
from .proxy_options import ProxyOptions

View File

@@ -0,0 +1,49 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/models/proxy_options.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 2074 bytes
"""
This module represents proxy options to enable sending traffic through proxy servers.
"""
class ProxyOptions(object):
__doc__ = "\n A class containing various options to send traffic through proxy servers by enabling\n proxying of MQTT connection.\n "
def __init__(self, proxy_type, proxy_addr, proxy_port, proxy_username=None, proxy_password=None):
"""
Initializer for proxy options.
:param proxy_type: The type of the proxy server. This can be one of three possible choices:socks.HTTP, socks.SOCKS4, or socks.SOCKS5
:param proxy_addr: IP address or DNS name of proxy server
:param proxy_port: The port of the proxy server. Defaults to 1080 for socks and 8080 for http.
:param proxy_username: (optional) username for SOCKS5 proxy, or userid for SOCKS4 proxy.This parameter is ignored if an HTTP server is being used.
If it is not provided, authentication will not be used (servers may accept unauthenticated requests).
:param proxy_password: (optional) This parameter is valid only for SOCKS5 servers and specifies the respective password for the username provided.
"""
self._proxy_type = proxy_type
self._proxy_addr = proxy_addr
self._proxy_port = proxy_port
self._proxy_username = proxy_username
self._proxy_password = proxy_password
@property
def proxy_type(self):
return self._proxy_type
@property
def proxy_address(self):
return self._proxy_addr
@property
def proxy_port(self):
return self._proxy_port
@property
def proxy_username(self):
return self._proxy_username
@property
def proxy_password(self):
return self._proxy_password

View File

@@ -0,0 +1,37 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/models/x509.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1463 bytes
"""This module represents a certificate that is responsible for providing client provided x509 certificates
that will eventually establish the authenticity of devices to IoTHub and Provisioning Services.
"""
class X509(object):
__doc__ = "\n A class with references to the certificate, key, and optional pass-phrase used to authenticate\n a TLS connection using x509 certificates\n "
def __init__(self, cert_file, key_file, pass_phrase=None):
"""
Initializer for X509 Certificate
:param cert_file: The file path to contents of the certificate (or certificate chain)
used to authenticate the device.
:param key_file: The file path to the key associated with the certificate
:param pass_phrase: (optional) The pass_phrase used to encode the key file
"""
self._cert_file = cert_file
self._key_file = key_file
self._pass_phrase = pass_phrase
@property
def certificate_file(self):
return self._cert_file
@property
def key_file(self):
return self._key_file
@property
def pass_phrase(self):
return self._pass_phrase

View File

@@ -0,0 +1,513 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/mqtt_transport.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 28817 bytes
import paho.mqtt.client as mqtt
import logging, ssl, sys, threading, traceback, weakref, socket
from . import transport_exceptions as exceptions
import socks
logger = logging.getLogger(__name__)
paho_connack_rc_to_error = {(mqtt.CONNACK_REFUSED_PROTOCOL_VERSION): (exceptions.ProtocolClientError),
(mqtt.CONNACK_REFUSED_IDENTIFIER_REJECTED): (exceptions.ProtocolClientError),
(mqtt.CONNACK_REFUSED_SERVER_UNAVAILABLE): (exceptions.ConnectionFailedError),
(mqtt.CONNACK_REFUSED_BAD_USERNAME_PASSWORD): (exceptions.UnauthorizedError),
(mqtt.CONNACK_REFUSED_NOT_AUTHORIZED): (exceptions.UnauthorizedError)}
paho_rc_to_error = {(mqtt.MQTT_ERR_NOMEM): (exceptions.ProtocolClientError),
(mqtt.MQTT_ERR_PROTOCOL): (exceptions.ProtocolClientError),
(mqtt.MQTT_ERR_INVAL): (exceptions.ProtocolClientError),
(mqtt.MQTT_ERR_NO_CONN): (exceptions.ConnectionDroppedError),
(mqtt.MQTT_ERR_CONN_REFUSED): (exceptions.ConnectionFailedError),
(mqtt.MQTT_ERR_NOT_FOUND): (exceptions.ConnectionFailedError),
(mqtt.MQTT_ERR_CONN_LOST): (exceptions.ConnectionDroppedError),
(mqtt.MQTT_ERR_TLS): (exceptions.UnauthorizedError),
(mqtt.MQTT_ERR_PAYLOAD_SIZE): (exceptions.ProtocolClientError),
(mqtt.MQTT_ERR_NOT_SUPPORTED): (exceptions.ProtocolClientError),
(mqtt.MQTT_ERR_AUTH): (exceptions.UnauthorizedError),
(mqtt.MQTT_ERR_ACL_DENIED): (exceptions.UnauthorizedError),
(mqtt.MQTT_ERR_UNKNOWN): (exceptions.ProtocolClientError),
(mqtt.MQTT_ERR_ERRNO): (exceptions.ProtocolClientError),
(mqtt.MQTT_ERR_QUEUE_SIZE): (exceptions.ProtocolClientError)}
def _create_error_from_connack_rc_code(rc):
"""
Given a paho CONNACK rc code, return an Exception that can be raised
"""
message = mqtt.connack_string(rc)
if rc in paho_connack_rc_to_error:
return paho_connack_rc_to_error[rc](message)
return exceptions.ProtocolClientError("Unknown CONNACK rc={}".format(rc))
def _create_error_from_rc_code(rc):
"""
Given a paho rc code, return an Exception that can be raised
"""
if rc == 1:
return exceptions.ConnectionDroppedError("Paho returned rc==1")
if rc in paho_rc_to_error:
message = mqtt.error_string(rc)
return paho_rc_to_error[rc](message)
return exceptions.ProtocolClientError("Unknown CONNACK rc=={}".format(rc))
class MQTTTransport(object):
__doc__ = "\n A wrapper class that provides an implementation-agnostic MQTT message broker interface.\n\n :ivar on_mqtt_connected_handler: Event handler callback, called upon establishing a connection.\n :type on_mqtt_connected_handler: Function\n :ivar on_mqtt_disconnected_handler: Event handler callback, called upon a disconnection.\n :type on_mqtt_disconnected_handler: Function\n :ivar on_mqtt_message_received_handler: Event handler callback, called upon receiving a message.\n :type on_mqtt_message_received_handler: Function\n :ivar on_mqtt_connection_failure_handler: Event handler callback, called upon a connection failure.\n :type on_mqtt_connection_failure_handler: Function\n "
def __init__(self, client_id, hostname, username, server_verification_cert=None, x509_cert=None, websockets=False, cipher=None, proxy_options=None, keep_alive=None):
"""
Constructor to instantiate an MQTT protocol wrapper.
:param str client_id: The id of the client connecting to the broker.
:param str hostname: Hostname or IP address of the remote broker.
:param str username: Username for login to the remote broker.
:param str server_verification_cert: Certificate which can be used to validate a server-side TLS connection (optional).
:param x509_cert: Certificate which can be used to authenticate connection to a server in lieu of a password (optional).
:param bool websockets: Indicates whether or not to enable a websockets connection in the Transport.
:param str cipher: Cipher string in OpenSSL cipher list format
:param proxy_options: Options for sending traffic through proxy servers.
"""
self._client_id = client_id
self._hostname = hostname
self._username = username
self._mqtt_client = None
self._server_verification_cert = server_verification_cert
self._x509_cert = x509_cert
self._websockets = websockets
self._cipher = cipher
self._proxy_options = proxy_options
self._keep_alive = keep_alive
self.on_mqtt_connected_handler = None
self.on_mqtt_disconnected_handler = None
self.on_mqtt_message_received_handler = None
self.on_mqtt_connection_failure_handler = None
self._op_manager = OperationManager()
self._mqtt_client = self._create_mqtt_client()
def _create_mqtt_client(self):
"""
Create the MQTT client object and assign all necessary event handler callbacks.
"""
logger.debug("creating mqtt client")
if self._websockets:
logger.info("Creating client for connecting using MQTT over websockets")
mqtt_client = mqtt.Client(client_id=(self._client_id),
clean_session=False,
protocol=(mqtt.MQTTv311),
transport="websockets")
mqtt_client.ws_set_options(path="/$iothub/websocket")
else:
logger.info("Creating client for connecting using MQTT over TCP")
mqtt_client = mqtt.Client(client_id=(self._client_id),
clean_session=False,
protocol=(mqtt.MQTTv311))
if self._proxy_options:
logger.info("Setting custom proxy options on mqtt client")
mqtt_client.proxy_set(proxy_type=(self._proxy_options.proxy_type),
proxy_addr=(self._proxy_options.proxy_address),
proxy_port=(self._proxy_options.proxy_port),
proxy_username=(self._proxy_options.proxy_username),
proxy_password=(self._proxy_options.proxy_password))
mqtt_client.enable_logger(logging.getLogger("paho"))
ssl_context = self._create_ssl_context()
mqtt_client.tls_set_context(context=ssl_context)
self_weakref = weakref.ref(self)
def on_connect(client, userdata, flags, rc):
this = self_weakref()
logger.info("connected with result code: {}".format(rc))
if rc:
if this.on_mqtt_connection_failure_handler:
try:
this.on_mqtt_connection_failure_handler(_create_error_from_connack_rc_code(rc))
except Exception:
logger.error("Unexpected error calling on_mqtt_connection_failure_handler")
logger.error(traceback.format_exc())
else:
logger.error("connection failed, but no on_mqtt_connection_failure_handler handler callback provided")
else:
if this.on_mqtt_connected_handler:
try:
this.on_mqtt_connected_handler()
except Exception:
logger.error("Unexpected error calling on_mqtt_connected_handler")
logger.error(traceback.format_exc())
else:
logger.error("No event handler callback set for on_mqtt_connected_handler")
def on_disconnect(client, userdata, rc):
this = self_weakref()
logger.info("disconnected with result code: {}".format(rc))
cause = None
if rc:
logger.debug("".join(traceback.format_stack()))
cause = _create_error_from_rc_code(rc)
if this:
this._cleanup_transport_on_error()
elif not this:
logger.info("on_disconnect called with transport==None. Transport must have been garbage collected. stopping loop")
client.loop_stop()
else:
if this.on_mqtt_disconnected_handler:
try:
this.on_mqtt_disconnected_handler(cause)
except Exception:
logger.error("Unexpected error calling on_mqtt_disconnected_handler")
logger.error(traceback.format_exc())
else:
logger.error("No event handler callback set for on_mqtt_disconnected_handler")
def on_subscribe(client, userdata, mid, granted_qos):
this = self_weakref()
logger.info("suback received for {}".format(mid))
this._op_manager.complete_operation(mid)
def on_unsubscribe(client, userdata, mid):
this = self_weakref()
logger.info("UNSUBACK received for {}".format(mid))
this._op_manager.complete_operation(mid)
def on_publish(client, userdata, mid):
this = self_weakref()
logger.info("payload published for {}".format(mid))
this._op_manager.complete_operation(mid)
def on_message(client, userdata, mqtt_message):
this = self_weakref()
logger.info("message received on {}".format(mqtt_message.topic))
if this.on_mqtt_message_received_handler:
try:
this.on_mqtt_message_received_handler(mqtt_message.topic, mqtt_message.payload)
except Exception:
logger.error("Unexpected error calling on_mqtt_message_received_handler")
logger.error(traceback.format_exc())
else:
logger.error("No event handler callback set for on_mqtt_message_received_handler - DROPPING MESSAGE")
mqtt_client.on_connect = on_connect
mqtt_client.on_disconnect = on_disconnect
mqtt_client.on_subscribe = on_subscribe
mqtt_client.on_unsubscribe = on_unsubscribe
mqtt_client.on_publish = on_publish
mqtt_client.on_message = on_message
mqtt_client.reconnect_delay_set(7200)
logger.debug("Created MQTT protocol client, assigned callbacks")
return mqtt_client
def _cleanup_transport_on_error(self):
"""
After disconnecting because of an error, Paho was designed to keep the loop running and
to try reconnecting after the reconnect interval. We don't want Paho to reconnect because
we want to control the timing of the reconnect, so we force the loop to stop.
We are relying on intimite knowledge of Paho behavior here. If this becomes a problem,
it may be necessary to write our own Paho thread and stop using thread_start()/thread_stop().
This is certainly supported by Paho, but the thread that Paho provides works well enough
(so far) and making our own would be more complex than is currently justified.
"""
logger.info("Forcing paho disconnect to prevent it from automatically reconnecting")
self._mqtt_client.disconnect()
self._mqtt_client.loop_stop()
if threading.current_thread() == self._mqtt_client._thread:
logger.debug("in paho thread. nulling _thread")
self._mqtt_client._thread = None
logger.debug("Done forcing paho disconnect")
def _create_ssl_context(self):
"""
This method creates the SSLContext object used by Paho to authenticate the connection.
"""
logger.debug("creating a SSL context")
ssl_context = ssl.SSLContext(protocol=(ssl.PROTOCOL_TLSv1_2))
if self._server_verification_cert:
logger.debug("configuring SSL context with custom server verification cert")
ssl_context.load_verify_locations(cadata=(self._server_verification_cert))
else:
logger.debug("configuring SSL context with default certs")
ssl_context.load_default_certs()
if self._cipher:
try:
logger.debug("configuring SSL context with cipher suites")
ssl_context.set_ciphers(self._cipher)
except ssl.SSLError as e:
try:
raise e
finally:
e = None
del e
if self._x509_cert is not None:
logger.debug("configuring SSL context with client-side certificate and key")
ssl_context.load_cert_chain(self._x509_cert.certificate_file, self._x509_cert.key_file, self._x509_cert.pass_phrase)
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.check_hostname = True
return ssl_context
def connect(self, password=None):
"""
Connect to the MQTT broker, using hostname and username set at instantiation.
This method should be called as an entry point before sending any telemetry.
The password is not required if the transport was instantiated with an x509 certificate.
If MQTT connection has been proxied, connection will take a bit longer to allow negotiation
with the proxy server. Any errors in the proxy connection process will trigger exceptions
:param str password: The password for connecting with the MQTT broker (Optional).
:raises: ConnectionFailedError if connection could not be established.
:raises: ConnectionDroppedError if connection is dropped during execution.
:raises: UnauthorizedError if there is an error authenticating.
:raises: ProtocolClientError if there is some other client error.
"""
logger.debug("connecting to mqtt broker")
self._mqtt_client.username_pw_set(username=(self._username), password=password)
try:
if self._websockets:
logger.info("Connect using port 443 (websockets)")
rc = self._mqtt_client.connect(host=(self._hostname),
port=443,
keepalive=(self._keep_alive))
else:
logger.info("Connect using port 8883 (TCP)")
rc = self._mqtt_client.connect(host=(self._hostname),
port=8883,
keepalive=(self._keep_alive))
except socket.error as e:
try:
self._cleanup_transport_on_error()
if isinstance(e, ssl.SSLError) and e.strerror is not None and "CERTIFICATE_VERIFY_FAILED" in e.strerror:
raise exceptions.TlsExchangeAuthError(cause=e)
else:
if isinstance(e, socks.ProxyError):
if isinstance(e, socks.SOCKS5AuthError):
raise exceptions.UnauthorizedError(cause=e)
else:
raise exceptions.ProtocolProxyError(cause=e)
else:
raise exceptions.ConnectionFailedError(cause=e)
finally:
e = None
del e
except socks.ProxyError as pe:
try:
self._cleanup_transport_on_error()
if isinstance(pe, socks.SOCKS5AuthError):
raise exceptions.UnauthorizedError(cause=pe)
else:
raise exceptions.ProtocolProxyError(cause=pe)
finally:
pe = None
del pe
except Exception as e:
try:
self._cleanup_transport_on_error()
raise exceptions.ProtocolClientError(message="Unexpected Paho failure during connect",
cause=e)
finally:
e = None
del e
logger.debug("_mqtt_client.connect returned rc={}".format(rc))
if rc:
raise _create_error_from_rc_code(rc)
self._mqtt_client.loop_start()
def disconnect(self):
"""
Disconnect from the MQTT broker.
:raises: ProtocolClientError if there is some client error.
"""
logger.info("disconnecting MQTT client")
try:
try:
rc = self._mqtt_client.disconnect()
except Exception as e:
try:
raise exceptions.ProtocolClientError(message="Unexpected Paho failure during disconnect",
cause=e)
finally:
e = None
del e
finally:
self._mqtt_client.loop_stop()
if threading.current_thread() == self._mqtt_client._thread:
logger.debug("in paho thread. nulling _thread")
self._mqtt_client._thread = None
logger.debug("_mqtt_client.disconnect returned rc={}".format(rc))
if rc:
err = _create_error_from_rc_code(rc)
raise err
def subscribe(self, topic, qos=1, callback=None):
"""
This method subscribes the client to one topic from the MQTT broker.
:param str topic: a single string specifying the subscription topic to subscribe to
:param int qos: the desired quality of service level for the subscription. Defaults to 1.
:param callback: A callback to be triggered upon completion (Optional).
:return: message ID for the subscribe request.
:raises: ValueError if qos is not 0, 1 or 2.
:raises: ValueError if topic is None or has zero string length.
:raises: ConnectionDroppedError if connection is dropped during execution.
:raises: ProtocolClientError if there is some other client error.
"""
logger.info("subscribing to {} with qos {}".format(topic, qos))
try:
rc, mid = self._mqtt_client.subscribe(topic, qos=qos)
except ValueError:
raise
except Exception as e:
try:
raise exceptions.ProtocolClientError(message="Unexpected Paho failure during subscribe",
cause=e)
finally:
e = None
del e
logger.debug("_mqtt_client.subscribe returned rc={}".format(rc))
if rc:
raise _create_error_from_rc_code(rc)
self._op_manager.establish_operation(mid, callback)
def unsubscribe(self, topic, callback=None):
"""
Unsubscribe the client from one topic on the MQTT broker.
:param str topic: a single string which is the subscription topic to unsubscribe from.
:param callback: A callback to be triggered upon completion (Optional).
:raises: ValueError if topic is None or has zero string length.
:raises: ConnectionDroppedError if connection is dropped during execution.
:raises: ProtocolClientError if there is some other client error.
"""
logger.info("unsubscribing from {}".format(topic))
try:
rc, mid = self._mqtt_client.unsubscribe(topic)
except ValueError:
raise
except Exception as e:
try:
raise exceptions.ProtocolClientError(message="Unexpected Paho failure during unsubscribe",
cause=e)
finally:
e = None
del e
logger.debug("_mqtt_client.unsubscribe returned rc={}".format(rc))
if rc:
raise _create_error_from_rc_code(rc)
self._op_manager.establish_operation(mid, callback)
def publish(self, topic, payload, qos=1, callback=None):
"""
Send a message via the MQTT broker.
:param str topic: topic: The topic that the message should be published on.
:param payload: The actual message to send.
:type payload: str, bytes, int, float or None
:param int qos: the desired quality of service level for the subscription. Defaults to 1.
:param callback: A callback to be triggered upon completion (Optional).
:raises: ValueError if qos is not 0, 1 or 2
:raises: ValueError if topic is None or has zero string length
:raises: ValueError if topic contains a wildcard ("+")
:raises: ValueError if the length of the payload is greater than 268435455 bytes
:raises: TypeError if payload is not a valid type
:raises: ConnectionDroppedError if connection is dropped during execution.
:raises: ProtocolClientError if there is some other client error.
"""
logger.info("publishing on {}".format(topic))
try:
rc, mid = self._mqtt_client.publish(topic=topic, payload=payload, qos=qos)
except ValueError:
raise
except TypeError:
raise
except Exception as e:
try:
raise exceptions.ProtocolClientError(message="Unexpected Paho failure during publish",
cause=e)
finally:
e = None
del e
logger.debug("_mqtt_client.publish returned rc={}".format(rc))
if rc:
raise _create_error_from_rc_code(rc)
self._op_manager.establish_operation(mid, callback)
class OperationManager(object):
__doc__ = "Tracks pending operations and thier associated callbacks until completion.\n "
def __init__(self):
self._pending_operation_callbacks = {}
self._unknown_operation_completions = {}
self._lock = threading.Lock()
def establish_operation(self, mid, callback=None):
"""Establish a pending operation identified by MID, and store its completion callback.
If the operation has already been completed, the callback will be triggered.
"""
trigger_callback = False
with self._lock:
if mid in self._unknown_operation_completions:
del self._unknown_operation_completions[mid]
trigger_callback = True
else:
self._pending_operation_callbacks[mid] = callback
logger.debug("Waiting for response on MID: {}".format(mid))
if trigger_callback:
logger.debug("Response for MID: {} was received early - triggering callback".format(mid))
if callback:
try:
callback()
except Exception:
logger.error("Unexpected error calling callback for MID: {}".format(mid))
logger.error(traceback.format_exc())
else:
logger.debug("No callback for MID: {}".format(mid))
def complete_operation(self, mid):
"""Complete an operation identified by MID and trigger the associated completion callback.
If the operation MID is unknown, the completion status will be stored until
the operation is established.
"""
callback = None
trigger_callback = False
with self._lock:
if mid in self._pending_operation_callbacks:
callback = self._pending_operation_callbacks[mid]
del self._pending_operation_callbacks[mid]
trigger_callback = True
else:
logger.debug("Response received for unknown MID: {}".format(mid))
self._unknown_operation_completions[mid] = mid
if trigger_callback:
logger.debug("Response received for recognized MID: {} - triggering callback".format(mid))
if callback:
try:
callback()
except Exception:
logger.error("Unexpected error calling callback for MID: {}".format(mid))
logger.error(traceback.format_exc())
else:
logger.debug("No callback set for MID: {}".format(mid))

View File

@@ -0,0 +1,17 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 342 bytes
"""Azure IoT Hub Device SDK Pipeline
This package provides pipeline objects for use with the Azure IoT Hub Device SDK.
INTERNAL USAGE ONLY
"""
from .pipeline_events_base import PipelineEvent
from .pipeline_ops_base import PipelineOperation
from .pipeline_stages_base import PipelineStage
from .pipeline_exceptions import OperationCancelled

View File

@@ -0,0 +1,41 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/config.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3947 bytes
import logging, six, abc
from azure.iot.device import constant
logger = logging.getLogger(__name__)
DEFAULT_KEEPALIVE = 60
@six.add_metaclass(abc.ABCMeta)
class BasePipelineConfig(object):
__doc__ = "A base class for storing all configurations/options shared across the Azure IoT Python Device Client Library.\n More specific configurations such as those that only apply to the IoT Hub Client will be found in the respective\n config files.\n "
def __init__Parse error at or near `LOAD_FAST' instruction at offset 86
@staticmethod
def _sanitize_cipher(cipher):
"""Sanitize the cipher input and convert to a string in OpenSSL list format
"""
if isinstance(cipher, list):
cipher = ":".join(cipher)
elif isinstance(cipher, str):
cipher = cipher.upper()
cipher = cipher.replace("_", "-")
else:
raise TypeError("Invalid type for 'cipher'")
return cipher
@staticmethod
def _validate_keep_alive(keep_alive):
try:
keep_alive = int(keep_alive)
except (ValueError, TypeError):
raise ValueError("Invalid type for 'keep alive'. Permissible types are integer.")
if keep_alive <= 0 or keep_alive > constant.MAX_KEEP_ALIVE_SECS:
raise ValueError("'keep alive' can not be zero OR negative AND can not be more than 29 minutes. It is recommended to choose 'keep alive' around 60 secs.")
return keep_alive

View File

@@ -0,0 +1,38 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/pipeline_events_base.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3234 bytes
class PipelineEvent(object):
__doc__ = "\n A base class for data objects representing events that travels up the pipeline.\n\n PipelineEvent objects are used for anything that happens inside the pipeine that\n cannot be attributed to a specific operation, such as a spontaneous disconnect.\n\n PipelineEvents flow up the pipeline until they reach the client. Every stage\n has the opportunity to handle a given event. If they don't handle it, they\n should pass it up to the next stage (this is the default behavior). Stages\n have the opportunity to tie a PipelineEvent to a PipelineOperation object\n if they are waiting for a response for that particular operation.\n\n :ivar name: The name of the event. This is used primarily for logging\n :type name: str\n "
def __init__(self):
"""
Initializer for PipelineEvent objects.
"""
if self.__class__ == PipelineEvent:
raise TypeError("Cannot instantiate PipelineEvent object. You need to use a derived class")
self.name = self.__class__.__name__
class ResponseEvent(PipelineEvent):
__doc__ = "\n A PipelineEvent object which is the second part of an RequestAndResponseOperation operation\n (the response). The RequestAndResponseOperation represents the common operation of sending\n a request to iothub with a request_id ($rid) value and waiting for a response with\n the same $rid value. This convention is used by both Twin and Provisioning features.\n\n The response represented by this event has not yet been matched to the corresponding\n RequestOperation operation. That matching is done by the CoordinateRequestAndResponseStage\n stage which takes the contents of this event and puts it into the RequestAndResponseOperation\n operation with the matching $rid value.\n\n :ivar request_id: The request ID which will eventually be used to match a RequestOperation\n operation to this event.\n :type request_id: str\n :ivar status_code: The status code returned by the response. Any value under 300 is\n considered success.\n :type status_code: int\n :ivar response_body: The body of the response.\n :type response_body: str\n :ivar retry_after: A retry interval value that was extracted from the topic.\n :type retry_after: int\n "
def __init__(self, request_id, status_code, response_body, retry_after=None):
super(ResponseEvent, self).__init__()
self.request_id = request_id
self.status_code = status_code
self.response_body = response_body
self.retry_after = retry_after
class ConnectedEvent(PipelineEvent):
__doc__ = "\n A PipelineEvent object indicating a connection has been established.\n "
class DisconnectedEvent(PipelineEvent):
__doc__ = "\n A PipelineEvent object indicating a connection has been dropped.\n "

View File

@@ -0,0 +1,22 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/pipeline_events_mqtt.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 890 bytes
from . import PipelineEvent
class IncomingMQTTMessageEvent(PipelineEvent):
__doc__ = "\n A PipelineEvent object which represents an incoming MQTT message on some MQTT topic\n "
def __init__(self, topic, payload):
"""
Initializer for IncomingMQTTMessageEvent objects.
:param str topic: The name of the topic that the incoming message arrived on.
:param str payload: The payload of the message
"""
super(IncomingMQTTMessageEvent, self).__init__()
self.topic = topic
self.payload = payload

View File

@@ -0,0 +1,28 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/pipeline_exceptions.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 961 bytes
"""This module defines exceptions that may be raised from a pipeline"""
from azure.iot.device.common.chainable_exception import ChainableException
class PipelineException(ChainableException):
__doc__ = "Generic pipeline exception"
class OperationCancelled(PipelineException):
__doc__ = "Operation was cancelled"
class OperationError(PipelineException):
__doc__ = "Error while executing an Operation"
class PipelineTimeoutError(PipelineException):
__doc__ = "\n Pipeline operation timed out\n "
class PipelineError(PipelineException):
__doc__ = "Error caused by incorrect pipeline configuration"

View File

@@ -0,0 +1,273 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/pipeline_ops_base.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 19917 bytes
import sys, logging, traceback
from . import pipeline_exceptions
from . import pipeline_thread
from azure.iot.device.common import handle_exceptions
logger = logging.getLogger(__name__)
class PipelineOperation(object):
__doc__ = '\n A base class for data objects representing operations that travels down the pipeline.\n\n Each PipelineOperation object represents a single asyncroneous operation that is performed\n by the pipeline. The PipelineOperation objects travel through "stages" of the pipeline,\n and each stage has the opportunity to act on each specific operation that it\n receives. If a stage does not handle a particular operation, it needs to pass it to the\n next stage. If the operation gets to the end of the pipeline without being handled\n (completed), then it is treated as an error.\n\n :ivar name: The name of the operation. This is used primarily for logging\n :type name: str\n :ivar callback: The callback that is called when the operation is completed, either\n successfully or with a failure.\n :type callback: Function\n :ivar needs_connection: This is an attribute that indicates whether a particular operation\n requires a connection to operate. This is currently used by the AutoConnectStage\n stage, but this functionality will be revamped shortly.\n :type needs_connection: Boolean\n :ivar error: The presence of a value in the error attribute indicates that the operation failed,\n absence of this value indicates that the operation either succeeded or hasn\'t been handled yet.\n :type error: Error\n '
def __init__(self, callback):
"""
Initializer for PipelineOperation objects.
:param Function callback: The function that gets called when this operation is complete or has
failed. The callback function must accept A PipelineOperation object which indicates
the specific operation which has completed or failed.
"""
if self.__class__ == PipelineOperation:
raise TypeError("Cannot instantiate PipelineOperation object. You need to use a derived class")
self.name = self.__class__.__name__
self.callback_stack = []
self.needs_connection = False
self.completed = False
self.completing = False
self.error = None
self.add_callback(callback)
def add_callback(self, callback):
"""Adds a callback to the Operation that will be triggered upon Operation completion.
When an Operation is completed, all callbacks will be resolved in LIFO order.
Callbacks cannot be added to an already completed operation, or an operation that is
currently undergoing a completion process.
:param callback: The callback to add to the operation.
:raises: OperationError if the operation is already completed, or is in the process of
completing.
"""
if self.completed:
raise pipeline_exceptions.OperationError("{}: Attempting to add a callback to an already-completed operation!".format(self.name))
elif self.completing:
raise pipeline_exceptions.OperationError("{}: Attempting to add a callback to a operation with completion in progress!".format(self.name))
else:
self.callback_stack.append(callback)
@pipeline_thread.runs_on_pipeline_thread
def complete(self, error=None):
""" Complete the operation, and trigger all callbacks in LIFO order.
The operation is completed successfully be default, or completed unsucessfully if an error
is provided.
An operation that is already fully completed, or in the process of completion cannot be
completed again.
This process can be halted if a callback for the operation invokes the .halt_completion()
method on this Operation.
:param error: Optionally provide an Exception object indicating the error that caused
the completion. Providing an error indicates that the operation was unsucessful.
"""
if error:
logger.debug("{}: completing with error {}".format(self.name, error))
else:
logger.debug("{}: completing without error".format(self.name))
if self.completed or self.completing:
e = pipeline_exceptions.OperationError("Attempting to complete an already-completed operation: {}".format(self.name))
handle_exceptions.handle_background_exception(e)
else:
self.completing = True
self.error = error
while self.callback_stack:
if not self.completing:
logger.debug("{}: Completion halted!".format(self.name))
break
if self.completed:
e = pipeline_exceptions.OperationError("Operation reached fully completed state while still resolving completion: {}".format(self.name))
handle_exceptions.handle_background_exception(e)
break
callback = self.callback_stack.pop()
try:
callback(op=self, error=error)
except Exception as e:
try:
logger.warning("Unhandled error while triggering callback for {}".format(self.name))
handle_exceptions.handle_background_exception(e)
finally:
e = None
del e
if self.completing:
self.completing = False
self.completed = True
@pipeline_thread.runs_on_pipeline_thread
def halt_completion(self):
"""Halt the completion of an operation that is currently undergoing a completion process
as a result of a call to .complete().
Completion cannot be halted if there is no currently ongoing completion process. The only
way to successfully invoke this method is from within a callback on the Operation in
question.
This method will leave any yet-untriggered callbacks on the Operation to be triggered upon
a later completion.
This method will clear any error associated with the currently ongoing completion process
from the Operation.
"""
if not self.completing:
e = pipeline_exceptions.OperationError("Attempting to halt completion of an operation not in the process of completion: {}".format(self.name))
handle_exceptions.handle_background_exception(e)
else:
logger.debug("{}: Halting completion...".format(self.name))
self.completing = False
self.error = None
@pipeline_thread.runs_on_pipeline_thread
def spawn_worker_op(self, worker_op_type, **kwargs):
"""Create and return a new operation, which, when completed, will complete the operation
it was spawned from.
:param worker_op_type: The type (class) of the new worker operation.
:param **kwargs: The arguments to instantiate the new worker operation with. Note that a
callback is not required, but if provided, will be triggered prior to completing the
operation that spawned the worker operation.
:returns: A new worker operation of the type specified in the worker_op_type parameter.
"""
logger.debug("{}: creating worker op of type {}".format(self.name, worker_op_type.__name__))
@pipeline_thread.runs_on_pipeline_thread
def on_worker_op_complete(op, error):
logger.debug("{}: Worker op ({}) has been completed".format(self.name, op.name))
self.complete(error=error)
if "callback" in kwargs:
provided_callback = kwargs["callback"]
kwargs["callback"] = on_worker_op_complete
worker_op = worker_op_type(**kwargs)
worker_op.add_callback(provided_callback)
else:
kwargs["callback"] = on_worker_op_complete
worker_op = worker_op_type(**kwargs)
return worker_op
class InitializePipelineOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation for doing initial setup of the pipeline\n\n Attributes can be dynamically added to this operation for use in other stages if necessary\n (e.g. initialization requires a derived value)\n "
class ConnectOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which tells the pipeline to connect to whatever service it needs to connect to.\n\n This operation is in the group of base operations because connecting is a common operation that many clients might need to do.\n\n Even though this is an base operation, it will most likely be handled by a more specific stage (such as an IoTHub or MQTT stage).\n "
def __init__(self, callback):
self.watchdog_timer = None
super(ConnectOperation, self).__init__(callback)
class ReauthorizeConnectionOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which tells the pipeline to reauthorize the connection to whatever service it is connected to.\n\n Clients will most-likely submit a ReauthorizeConnectionOperation when some credential (such as a sas token) has changed and the protocol client\n needs to re-establish the connection to refresh the credentials\n\n This operation is in the group of base operations because reauthorizinging is a common operation that many clients might need to do.\n\n Even though this is an base operation, it will most likely be handled by a more specific stage (such as an IoTHub or MQTT stage).\n "
def __init__(self, callback):
self.watchdog_timer = None
super(ReauthorizeConnectionOperation, self).__init__(callback)
class DisconnectOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which tells the pipeline to disconnect from whatever service it might be connected to.\n\n This operation is in the group of base operations because disconnecting is a common operation that many clients might need to do.\n\n Even though this is an base operation, it will most likely be handled by a more specific stage (such as an IoTHub or MQTT stage).\n "
class EnableFeatureOperation(PipelineOperation):
__doc__ = '\n A PipelineOperation object which tells the pipeline to "enable" a particular feature.\n\n A "feature" is just a string which represents some set of functionality that needs to be enabled, such as "C2D" or "Twin".\n\n This object has no notion of what it means to "enable" a feature. That knowledge is handled by stages in the pipeline which might convert\n this operation to a more specific operation (such as an MQTT subscribe operation with a specific topic name).\n\n This operation is in the group of base operations because disconnecting is a common operation that many clients might need to do.\n\n Even though this is an base operation, it will most likely be handled by a more specific stage (such as an IoTHub or MQTT stage).\n '
def __init__(self, feature_name, callback):
"""
Initializer for EnableFeatureOperation objects.
:param str feature_name: Name of the feature that is being enabled. The meaning of this
string is defined in the stage which handles this operation.
:param Function callback: The function that gets called when this operation is complete or has
failed. The callback function must accept A PipelineOperation object which indicates
the specific operation which has completed or failed.
"""
super(EnableFeatureOperation, self).__init__(callback=callback)
self.feature_name = feature_name
class DisableFeatureOperation(PipelineOperation):
__doc__ = '\n A PipelineOperation object which tells the pipeline to "disable" a particular feature.\n\n A "feature" is just a string which represents some set of functionality that needs to be disabled, such as "C2D" or "Twin".\n\n This object has no notion of what it means to "disable" a feature. That knowledge is handled by stages in the pipeline which might convert\n this operation to a more specific operation (such as an MQTT unsubscribe operation with a specific topic name).\n\n This operation is in the group of base operations because disconnecting is a common operation that many clients might need to do.\n\n Even though this is an base operation, it will most likely be handled by a more specific stage (such as an IoTHub or MQTT stage).\n '
def __init__(self, feature_name, callback):
"""
Initializer for DisableFeatureOperation objects.
:param str feature_name: Name of the feature that is being disabled. The meaning of this
string is defined in the stage which handles this operation.
:param Function callback: The function that gets called when this operation is complete or has
failed. The callback function must accept A PipelineOperation object which indicates
the specific operation which has completed or failed.
"""
super(DisableFeatureOperation, self).__init__(callback=callback)
self.feature_name = feature_name
class RequestAndResponseOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which wraps the common operation of sending a request to iothub with a request_id ($rid)\n value and waiting for a response with the same $rid value. This convention is used by both Twin and Provisioning\n features.\n\n Even though this is an base operation, it will most likely be generated and also handled by more specifics stages\n (such as IoTHub or MQTT stages).\n\n The type of the request payload and the response payload is undefined at this level. The type of the payload is defined\n based on the type of request that is being executed. If types need to be converted, that is the responsibility of\n the stage which creates this operation, and also the stage which executes on the operation.\n\n :ivar status_code: The status code returned by the response. Any value under 300 is considered success.\n :type status_code: int\n :ivar response_body: The body of the response.\n :type response_body: Undefined\n :ivar query_params: Any query parameters that need to be sent with the request.\n Example is the id of the operation as returned by the initial provisioning request.\n "
def __init__(self, request_type, method, resource_location, request_body, callback, query_params=None):
"""
Initializer for RequestAndResponseOperation objects
:param str request_type: The type of request. This is a string which is used by protocol-specific stages to
generate the actual request. For example, if request_type is "twin", then the iothub_mqtt stage will convert
the request into an MQTT publish with topic that begins with $iothub/twin
:param str method: The method for the request, in the REST sense of the word, such as "POST", "GET", etc.
:param str resource_location: The resource that the method is acting on, in the REST sense of the word.
For twin request with method "GET", this is most likely the string "/" which retrieves the entire twin
:param request_body: The body of the request. This is a required field, and a single space can be used to denote
an empty body.
:type request_body: Undefined
:param Function callback: The function that gets called when this operation is complete or has
failed. The callback function must accept A PipelineOperation object which indicates
the specific operation which has completed or failed.
"""
super(RequestAndResponseOperation, self).__init__(callback=callback)
self.request_type = request_type
self.method = method
self.resource_location = resource_location
self.request_body = request_body
self.status_code = None
self.response_body = None
self.query_params = query_params
class RequestOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which is the first part of an RequestAndResponseOperation operation (the request). The second\n part of the RequestAndResponseOperation operation (the response) is returned via an ResponseEvent event.\n\n Even though this is an base operation, it will most likely be generated and also handled by more specifics stages\n (such as IoTHub or MQTT stages).\n "
def __init__(self, request_type, method, resource_location, request_body, request_id, callback, query_params=None):
"""
Initializer for RequestOperation objects
:param str request_type: The type of request. This is a string which is used by protocol-specific stages to
generate the actual request. For example, if request_type is "twin", then the iothub_mqtt stage will convert
the request into an MQTT publish with topic that begins with $iothub/twin
:param str method: The method for the request, in the REST sense of the word, such as "POST", "GET", etc.
:param str resource_location: The resource that the method is acting on, in the REST sense of the word.
For twin request with method "GET", this is most likely the string "/" which retrieves the entire twin
:param request_body: The body of the request. This is a required field, and a single space can be used to denote
an empty body.
:type request_body: dict, str, int, float, bool, or None (JSON compatible values)
:param Function callback: The function that gets called when this operation is complete or has
failed. The callback function must accept A PipelineOperation object which indicates
the specific operation which has completed or failed.
:type query_params: Any query parameters that need to be sent with the request.
Example is the id of the operation as returned by the initial provisioning request.
"""
super(RequestOperation, self).__init__(callback=callback)
self.method = method
self.resource_location = resource_location
self.request_type = request_type
self.request_body = request_body
self.request_id = request_id
self.query_params = query_params

View File

@@ -0,0 +1,33 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/pipeline_ops_http.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1765 bytes
from . import PipelineOperation
class HTTPRequestAndResponseOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which contains arguments used to connect to a server using the HTTP protocol.\n\n This operation is in the group of HTTP operations because its attributes are very specific to the HTTP protocol.\n "
def __init__(self, method, path, headers, body, query_params, callback):
"""
Initializer for HTTPPublishOperation objects.
:param str method: The HTTP method used in the request
:param str path: The path to be used in the request url
:param dict headers: The headers to be used in the HTTP request
:param str body: The body to be provided with the HTTP request
:param str query_params: The query parameters to be used in the request url
:param Function callback: The function that gets called when this operation is complete or has failed.
The callback function must accept A PipelineOperation object which indicates the specific operation which
has completed or failed.
"""
super(HTTPRequestAndResponseOperation, self).__init__(callback=callback)
self.method = method
self.path = path
self.headers = headers
self.body = body
self.query_params = query_params
self.status_code = None
self.response_body = None
self.reason = None

View File

@@ -0,0 +1,65 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/pipeline_ops_mqtt.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3316 bytes
from . import PipelineOperation
class MQTTPublishOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which contains arguments used to publish a specific payload on a specific topic using the MQTT protocol.\n\n This operation is in the group of MQTT operations because its attributes are very specific to the MQTT protocol.\n "
def __init__(self, topic, payload, callback):
"""
Initializer for MQTTPublishOperation objects.
:param str topic: The name of the topic to publish to
:param str payload: The payload to publish
:param Function callback: The function that gets called when this operation is complete or has failed.
The callback function must accept A PipelineOperation object which indicates the specific operation which
has completed or failed.
"""
super(MQTTPublishOperation, self).__init__(callback=callback)
self.topic = topic
self.payload = payload
self.needs_connection = True
self.retry_timer = None
class MQTTSubscribeOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which contains arguments used to subscribe to a specific MQTT topic using the MQTT protocol.\n\n This operation is in the group of MQTT operations because its attributes are very specific to the MQTT protocol.\n "
def __init__(self, topic, callback):
"""
Initializer for MQTTSubscribeOperation objects.
:param str topic: The name of the topic to subscribe to
:param Function callback: The function that gets called when this operation is complete or has failed.
The callback function must accept A PipelineOperation object which indicates the specific operation which
has completed or failed.
"""
super(MQTTSubscribeOperation, self).__init__(callback=callback)
self.topic = topic
self.needs_connection = True
self.timeout_timer = None
self.retry_timer = None
class MQTTUnsubscribeOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which contains arguments used to unsubscribe from a specific MQTT topic using the MQTT protocol.\n\n This operation is in the group of MQTT operations because its attributes are very specific to the MQTT protocol.\n "
def __init__(self, topic, callback):
"""
Initializer for MQTTUnsubscribeOperation objects.
:param str topic: The name of the topic to unsubscribe from
:param Function callback: The function that gets called when this operation is complete or has failed.
The callback function must accept A PipelineOperation object which indicates the specific operation which
has completed or failed.
"""
super(MQTTUnsubscribeOperation, self).__init__(callback=callback)
self.topic = topic
self.needs_connection = True
self.timeout_timer = None
self.retry_timer = None

View File

@@ -0,0 +1,685 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/pipeline_stages_base.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 53129 bytes
import logging, abc, six, sys, time, traceback, uuid, weakref, threading
from six.moves import queue
from . import pipeline_events_base
from . import pipeline_ops_base, pipeline_ops_mqtt
from . import pipeline_thread
from . import pipeline_exceptions
from azure.iot.device.common import handle_exceptions, transport_exceptions
from azure.iot.device.common.auth import sastoken as st
from azure.iot.device.common.callable_weak_method import CallableWeakMethod
logger = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class PipelineStage(object):
__doc__ = '\n Base class representing a stage in the processing pipeline. Each stage is responsible for receiving\n PipelineOperation objects from the top, possibly processing them, and possibly passing them down. It\n is also responsible for receiving PipelineEvent objects from the bottom, possibly processing them, and\n possibly passing them up.\n\n Each PipelineStage in the pipeline, is expected to act on some well-defined set of PipelineOperation\n types and/or some set of PipelineEvent types. If any stage does not act on an operation or event, it\n should pass it to the next stage (for operations) or the previous stage (for events). In this way, the\n pipeline implements the "chain of responsibility" design pattern (Gamma, et.al. "Design Patterns".\n Addison Wesley. 1995), with each stage being responsible for implementing some "rule" or "policy" of the\n pipeline, and each stage being ignorant of the stages that are before or after it in the pipeline.\n\n Each stage in the pipeline should act on the smallest set of rules possible, thus making stages small\n and easily testable. Complex logic should be the exception and not the rule, and complex stages should\n operate on the most generic type of operation possible, thus allowing us to re-use complex logic for\n multiple cases. The best way to do this is with "converter" stages that convert a specific operation to\n a more general one and with other converter stages that convert general operations to more specific ones.\n\n An example of a specific-to-generic stage is UseSkAuthProviderStage which takes a specific operation\n (use an auth provider) and converts it into something more generic (here is your device_id, etc, and use\n this SAS token when connecting).\n\n An example of a generic-to-specific stage is IoTHubMQTTTranslationStage which converts IoTHub operations\n (such as SendD2CMessageOperation) to MQTT operations (such as Publish).\n\n Each stage should also work in the broadest domain possible. For example a generic stage (say\n "AutoConnectStage") that initiates a connection if any arbitrary operation needs a connection is more useful\n than having some MQTT-specific code that re-connects to the MQTT broker if the user calls Publish and\n there\'s no connection.\n\n One way to think about stages is to look at every "block of functionality" in your code and ask yourself\n "is this the one and only time I will need this code"? If the answer is no, it might be worthwhile to\n implement that code in it\'s own stage in a very generic way.\n\n\n :ivar name: The name of the stage. This is used primarily for logging\n :type name: str\n :ivar next: The next stage in the pipeline. Set to None if this is the last stage in the pipeline.\n :type next: PipelineStage\n :ivar previous: The previous stage in the pipeline. Set to None if this is the first stage in the pipeline.\n :type previous: PipelineStage\n :ivar pipeline_root: The first stage (root) of the pipeline. This is useful if a stage wants to\n submit an operation to the pipeline starting at the root. This type of behavior is uncommon but not\n unexpected.\n :type pipeline_root: PipelineStage\n '
def __init__(self):
"""
Initializer for PipelineStage objects.
"""
self.name = self.__class__.__name__
self.next = None
self.previous = None
self.pipeline_root = None
@pipeline_thread.runs_on_pipeline_thread
def run_op(self, op):
"""
Run the given operation. This is the public function that outside callers would call to run an
operation. Derived classes should override the private _run_op function to implement
stage-specific behavior. When run_op returns, that doesn't mean that the operation has executed
to completion. Rather, it means that the pipeline has done something that will cause the
operation to eventually execute to completion. That might mean that something was sent over
the network and some stage is waiting for a reply, or it might mean that the operation is sitting
in a queue until something happens, or it could mean something entirely different. The only
thing you can assume is that the operation will _eventually_ complete successfully or fail, and the
operation's callback will be called when that happens.
:param PipelineOperation op: The operation to run.
"""
try:
self._run_op(op)
except Exception as e:
try:
logger.warning(msg=("Unexpected error in {}._run_op() call".format(self)))
logger.warning(traceback.format_exc())
op.complete(error=e)
finally:
e = None
del e
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
"""
Implementation of the stage-specific function of .run_op(). Override this method instead of
.run_op() in child classes in order to change how a stage behaves when running an operation.
See the description of the .run_op() method for more discussion on what it means to "run"
an operation.
:param PipelineOperation op: The operation to run.
"""
self.send_op_down(op)
@pipeline_thread.runs_on_pipeline_thread
def handle_pipeline_event(self, event):
"""
Handle a pipeline event that arrives from the stage below this stage. Derived
classes should not override this function. Any stage-specific handling of
PipelineEvent objects should be implemented by overriding the private
_handle_pipeline_event function in the derived stage.
:param PipelineEvent event: The event that is being passed back up the pipeline
"""
try:
self._handle_pipeline_event(event)
except Exception as e:
try:
logger.error(msg=("Unexpected error in {}._handle_pipeline_event() call".format(self)))
handle_exceptions.handle_background_exception(e)
finally:
e = None
del e
@pipeline_thread.runs_on_pipeline_thread
def _handle_pipeline_event(self, event):
"""
Handle a pipeline event that arrives from the stage below this stage. This
is a function that is intended to be overridden in any stages that want to implement
stage-specific handling of any events
:param PipelineEvent event: The event that is being passed back up the pipeline
"""
self.send_event_up(event)
@pipeline_thread.runs_on_pipeline_thread
def send_op_down(self, op):
"""
Helper function to continue a given operation by passing it to the next stage
in the pipeline. If there is no next stage in the pipeline, this function
will fail the operation and call complete_op to return the failure back up the
pipeline.
:param PipelineOperation op: Operation which is being passed on
"""
if not self.next:
logger.error("{}({}): no next stage. completing with error".format(self.name, op.name))
error = pipeline_exceptions.PipelineError("{} not handled after {} stage with no next stage".format(op.name, self.name))
op.complete(error=error)
else:
self.next.run_op(op)
@pipeline_thread.runs_on_pipeline_thread
def send_event_up(self, event):
"""
Helper function to pass an event to the previous stage of the pipeline. This is the default
behavior of events while traveling through the pipeline. They start somewhere (maybe the
bottom) and move up the pipeline until they're handled or until they error out.
"""
if self.previous:
self.previous.handle_pipeline_event(event)
else:
error = pipeline_exceptions.PipelineError("{} unhandled at {} stage with no previous stage".format(event.name, self.name))
handle_exceptions.handle_background_exception(error)
class PipelineRootStage(PipelineStage):
__doc__ = '\n Object representing the root of a pipeline. This is where the functions to build\n the pipeline exist. This is also where clients can add event handlers to receive\n events from the pipeline.\n\n :ivar on_pipeline_event_handler: Handler which can be set by users of the pipeline to\n receive PipelineEvent objects. This is how users receive any "unsolicited"\n events from the pipeline (such as C2D messages). This function is called with\n a PipelineEvent object every time any such event occurs.\n :type on_pipeline_event_handler: Function\n :ivar on_connected_handler: Handler which can be set by users of the pipeline to\n receive events every time the underlying transport connects\n :type on_connected_handler: Function\n :ivar on_disconnected_handler: Handler which can be set by users of the pipeline to\n receive events every time the underlying transport disconnects\n :type on_disconnected_handler: Function\n '
def __init__(self, pipeline_configuration):
super(PipelineRootStage, self).__init__()
self.on_pipeline_event_handler = None
self.on_connected_handler = None
self.on_disconnected_handler = None
self.connected = False
self.pipeline_configuration = pipeline_configuration
def run_op(self, op):
op.callback_stack[0] = pipeline_thread.invoke_on_callback_thread_nowait(op.callback_stack[0])
pipeline_thread.invoke_on_pipeline_thread(super(PipelineRootStage, self).run_op)(op)
def append_stage(self, new_stage):
"""
Add the next stage to the end of the pipeline. This is the function that callers
use to build the pipeline by appending stages. This function returns the root of
the pipeline so that calls to this function can be chained together.
:param PipelineStage new_stage: Stage to add to the end of the pipeline
:returns: The root of the pipeline.
"""
old_tail = self
while old_tail.next:
old_tail = old_tail.next
old_tail.next = new_stage
new_stage.previous = old_tail
new_stage.pipeline_root = self
return self
@pipeline_thread.runs_on_pipeline_thread
def _handle_pipeline_event(self, event):
"""
Override of the PipelineEvent handler. Because this is the root of the pipeline,
this function calls the on_pipeline_event_handler to pass the event to the
caller.
:param PipelineEvent event: Event to be handled, i.e. returned to the caller
through the handle_pipeline_event (if provided).
"""
if isinstance(event, pipeline_events_base.ConnectedEvent):
logger.debug("{}: ConnectedEvent received. Calling on_connected_handler".format(self.name))
self.connected = True
if self.on_connected_handler:
pipeline_thread.invoke_on_callback_thread_nowait(self.on_connected_handler)()
elif isinstance(event, pipeline_events_base.DisconnectedEvent):
logger.debug("{}: DisconnectedEvent received. Calling on_disconnected_handler".format(self.name))
self.connected = False
if self.on_disconnected_handler:
pipeline_thread.invoke_on_callback_thread_nowait(self.on_disconnected_handler)()
elif self.on_pipeline_event_handler:
pipeline_thread.invoke_on_callback_thread_nowait(self.on_pipeline_event_handler)(event)
else:
logger.error("incoming {} event with no handler. dropping.".format(event.name))
class SasTokenRenewalStage(PipelineStage):
DEFAULT_TOKEN_RENEWAL_MARGIN = 120
def __init__(self):
super(SasTokenRenewalStage, self).__init__()
self._token_renewal_timer = None
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if isinstance(op, pipeline_ops_base.InitializePipelineOperation) and isinstance(self.pipeline_root.pipeline_configuration.sastoken, st.RenewableSasToken):
self._start_renewal_timer()
self.send_op_down(op)
else:
self.send_op_down(op)
@pipeline_thread.runs_on_pipeline_thread
def _cancel_token_renewal_timer(self):
"""Cancel and delete any pending renewal timer"""
timer = self._token_renewal_timer
self._token_renewal_timer = None
if timer:
logger.debug("Cancelling SAS Token renewal timer")
timer.cancel()
@pipeline_thread.runs_on_pipeline_thread
def _start_renewal_timer(self):
"""Begin a renewal timer.
When the timer expires, and the token is renewed, a new timer will be set"""
self._cancel_token_renewal_timer()
seconds_until_renewal = self.pipeline_root.pipeline_configuration.sastoken.ttl - self.DEFAULT_TOKEN_RENEWAL_MARGIN
if seconds_until_renewal < 0:
handle_exceptions.handle_background_exception(pipeline_exceptions.PipelineError("SasToken TTL less than Renewal Margin!"))
else:
logger.debug("Scheduling SAS Token renewal for {} seconds in the future".format(seconds_until_renewal))
self_weakref = weakref.ref(self)
@pipeline_thread.runs_on_pipeline_thread
def on_reauthorize_complete(op, error):
this = self_weakref()
if error:
logger.info("{}({}): reauthorize connection operation failed. Error={}".format(this.name, op.name, error))
handle_exceptions.handle_background_exception(error)
else:
logger.info("{}({}): reauthorize connection operation is complete".format(this.name, op.name))
@pipeline_thread.invoke_on_pipeline_thread_nowait
def renew_token():
this = self_weakref()
logger.info("Renewing SAS Token")
sastoken = this.pipeline_root.pipeline_configuration.sastoken
sastoken.refresh()
if this.pipeline_root.connected:
this.send_op_down(pipeline_ops_base.ReauthorizeConnectionOperation(callback=on_reauthorize_complete))
this._start_renewal_timer()
self._token_renewal_timer = threading.Timer(seconds_until_renewal, renew_token)
self._token_renewal_timer.daemon = True
self._token_renewal_timer.start()
class AutoConnectStage(PipelineStage):
__doc__ = "\n This stage is responsible for ensuring that the protocol is connected when\n it needs to be connected.\n "
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if op.needs_connection:
if self.pipeline_root.connected:
@pipeline_thread.runs_on_pipeline_thread
def check_for_connection_failure(op, error):
if error:
if not self.pipeline_root.connected:
logger.debug("{}({}): op failed with {} and we're not conencted. Re-submitting.".format(self.name, op.name, error))
op.halt_completion()
self.run_op(op)
op.add_callback(check_for_connection_failure)
logger.debug("{}({}): Connected. Sending down and adding callback to check result".format(self.name, op.name))
self.send_op_down(op)
else:
logger.debug("{}({}): Op needs connection. Queueing this op and starting a ConnectionOperation".format(self.name, op.name))
self._do_connect(op)
else:
self.send_op_down(op)
@pipeline_thread.runs_on_pipeline_thread
def _do_connect(self, op):
"""
Start connecting the transport in response to some operation
"""
op_needs_complete = op
@pipeline_thread.runs_on_pipeline_thread
def on_connect_op_complete(op, error):
if error:
logger.debug("{}({}): Connection failed. Completing with failure because of connection failure: {}".format(self.name, op_needs_complete.name, error))
op_needs_complete.complete(error=error)
else:
logger.debug("{}({}): connection is complete. Running op that triggered connection.".format(self.name, op_needs_complete.name))
self.run_op(op_needs_complete)
logger.debug("{}({}): calling down with Connect operation".format(self.name, op.name))
self.send_op_down(pipeline_ops_base.ConnectOperation(callback=on_connect_op_complete))
class ConnectionLockStage(PipelineStage):
__doc__ = '\n This stage is responsible for serializing connect, disconnect, and reauthorize ops on\n the pipeline, such that only a single one of these ops can go past this stage at a\n time. This way, we don\'t have to worry about cases like "what happens if we try to\n disconnect if we\'re in the middle of reauthorizing." This stage will wait for the\n reauthorize to complete before letting the disconnect past.\n '
def __init__(self):
super(ConnectionLockStage, self).__init__()
self.queue = queue.Queue()
self.blocked = False
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if self.blocked:
logger.debug("{}({}): pipeline is blocked waiting for a prior connect/disconnect/reauthorize to complete. queueing.".format(self.name, op.name))
self.queue.put_nowait(op)
else:
if isinstance(op, pipeline_ops_base.ConnectOperation) and self.pipeline_root.connected:
logger.info("{}({}): Transport is already connected. Completing.".format(self.name, op.name))
op.complete()
else:
if isinstance(op, pipeline_ops_base.DisconnectOperation):
self.pipeline_root.connected or logger.info("{}({}): Transport is already disconnected. Completing.".format(self.name, op.name))
op.complete()
else:
if not isinstance(op, pipeline_ops_base.DisconnectOperation):
if isinstance(op, pipeline_ops_base.ConnectOperation) or isinstance(op, pipeline_ops_base.ReauthorizeConnectionOperation):
self._block(op)
@pipeline_thread.runs_on_pipeline_thread
def on_operation_complete(op, error):
if error:
logger.debug("{}({}): op failed. Unblocking queue with error: {}".format(self.name, op.name, error))
else:
logger.debug("{}({}): op succeeded. Unblocking queue".format(self.name, op.name))
self._unblock(op, error)
op.add_callback(on_operation_complete)
self.send_op_down(op)
else:
self.send_op_down(op)
@pipeline_thread.runs_on_pipeline_thread
def _block(self, op):
"""
block this stage while we're waiting for the connect/disconnect/reauthorize operation to complete.
"""
logger.debug("{}({}): blocking".format(self.name, op.name))
self.blocked = True
@pipeline_thread.runs_on_pipeline_thread
def _unblock(self, op, error):
"""
Unblock this stage after the connect/disconnect/reauthorize operation is complete. This also means
releasing all the operations that were queued up.
"""
logger.debug("{}({}): unblocking and releasing queued ops.".format(self.name, op.name))
self.blocked = False
logger.debug("{}({}): processing {} items in queue for error={}".format(self.name, op.name, self.queue.qsize(), error))
old_queue = self.queue
self.queue = queue.Queue()
while not old_queue.empty():
op_to_release = old_queue.get_nowait()
if error:
logger.debug("{}({}): failing {} op because of error".format(self.name, op.name, op_to_release.name))
op_to_release.complete(error=error)
else:
logger.debug("{}({}): releasing {} op.".format(self.name, op.name, op_to_release.name))
self.run_op(op_to_release)
class CoordinateRequestAndResponseStage(PipelineStage):
__doc__ = "\n Pipeline stage which is responsible for coordinating RequestAndResponseOperation operations. For each\n RequestAndResponseOperation operation, this stage passes down a RequestOperation operation and waits for\n an ResponseEvent event. All other events are passed down unmodified.\n "
def __init__(self):
super(CoordinateRequestAndResponseStage, self).__init__()
self.pending_responses = {}
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if isinstance(op, pipeline_ops_base.RequestAndResponseOperation):
request_id = str(uuid.uuid4())
logger.debug("{}({}): adding request {} to pending list".format(self.name, op.name, request_id))
self.pending_responses[request_id] = op
self._send_request_down(request_id, op)
else:
self.send_op_down(op)
@pipeline_thread.runs_on_pipeline_thread
def _send_request_down(self, request_id, op):
op_waiting_for_response = op
@pipeline_thread.runs_on_pipeline_thread
def on_send_request_doneParse error at or near `COME_FROM' instruction at offset 84_0
logger.debug("{}({}): Sending {} request to {} resource {}".format(self.name, op.name, op.request_type, op.method, op.resource_location))
new_op = pipeline_ops_base.RequestOperation(method=(op.method),
resource_location=(op.resource_location),
request_body=(op.request_body),
request_id=request_id,
request_type=(op.request_type),
callback=on_send_request_done,
query_params=(op.query_params))
self.send_op_down(new_op)
@pipeline_thread.runs_on_pipeline_thread
def _handle_pipeline_event(self, event):
if isinstance(event, pipeline_events_base.ResponseEvent):
logger.debug("{}({}): Handling event with request_id {}".format(self.name, event.name, event.request_id))
if event.request_id in self.pending_responses:
op = self.pending_responses[event.request_id]
del self.pending_responses[event.request_id]
op.status_code = event.status_code
op.response_body = event.response_body
op.retry_after = event.retry_after
logger.debug("{}({}): Completing {} request to {} resource {} with status {}".format(self.name, op.name, op.request_type, op.method, op.resource_location, op.status_code))
op.complete()
else:
logger.info("{}({}): request_id {} not found in pending list. Nothing to do. Dropping".format(self.name, event.name, event.request_id))
else:
if isinstance(event, pipeline_events_base.ConnectedEvent):
self.send_event_up(event)
for request_id in self.pending_responses:
logger.info("{stage}: ConnectedEvent: re-publishing request {id} for {method} {type} ".format(stage=(self.name),
id=request_id,
method=(self.pending_responses[request_id].method),
type=(self.pending_responses[request_id].request_type)))
self._send_request_down(request_id, self.pending_responses[request_id])
else:
self.send_event_up(event)
class OpTimeoutStage(PipelineStage):
__doc__ = '\n The purpose of the timeout stage is to add timeout errors to select operations\n\n The timeout_intervals attribute contains a list of operations to track along with\n their timeout values. Right now this list is hard-coded but the operations and\n intervals will eventually become a parameter.\n\n For each operation that needs a timeout check, this stage will add a timer to\n the operation. If the timer elapses, this stage will fail the operation with\n a PipelineTimeoutError. The intention is that a higher stage will know what to\n do with that error and act accordingly (either return the error to the user or\n retry).\n\n This stage currently assumes that all timed out operation are just "lost".\n It does not attempt to cancel the operation, as Paho doesn\'t have a way to\n cancel an operation, and with QOS=1, sending a pub or sub twice is not\n catastrophic.\n\n Also, as a long-term plan, the operations that need to be watched for timeout\n will become an initialization parameter for this stage so that differet\n instances of this stage can watch for timeouts on different operations.\n This will be done because we want a lower-level timeout stage which can watch\n for timeouts at the MQTT level, and we want a higher-level timeout stage which\n can watch for timeouts at the iothub level. In this way, an MQTT operation that\n times out can be retried as an MQTT operation and a higher-level IoTHub operation\n which times out can be retried as an IoTHub operation (which might necessitate\n redoing multiple MQTT operations).\n '
def __init__(self):
super(OpTimeoutStage, self).__init__()
self.timeout_intervals = {(pipeline_ops_mqtt.MQTTSubscribeOperation): 10,
(pipeline_ops_mqtt.MQTTUnsubscribeOperation): 10}
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if type(op) in self.timeout_intervals:
self_weakref = weakref.ref(self)
@pipeline_thread.invoke_on_pipeline_thread_nowait
def on_timeout():
this = self_weakref()
logger.info("{}({}): returning timeout error".format(this.name, op.name))
op.complete(error=(pipeline_exceptions.PipelineTimeoutError("operation timed out before protocol client could respond")))
logger.debug("{}({}): Creating timer".format(self.name, op.name))
op.timeout_timer = threading.Timer(self.timeout_intervals[type(op)], on_timeout)
op.timeout_timer.start()
op.add_callback(self._clear_timer)
logger.debug("{}({}): Sending down".format(self.name, op.name))
self.send_op_down(op)
else:
self.send_op_down(op)
@pipeline_thread.runs_on_pipeline_thread
def _clear_timer(self, op, error):
if op.timeout_timer:
logger.debug("{}({}): Cancelling timer".format(self.name, op.name))
op.timeout_timer.cancel()
op.timeout_timer = None
class RetryStage(PipelineStage):
__doc__ = '\n The purpose of the retry stage is to watch specific operations for specific\n errors and retry the operations as appropriate.\n\n Unlike the OpTimeoutStage, this stage will never need to worry about cancelling\n failed operations. When an operation is retried at this stage, it is already\n considered "failed", so no cancellation needs to be done.\n '
def __init__(self):
super(RetryStage, self).__init__()
self.retry_intervals = {(pipeline_ops_mqtt.MQTTSubscribeOperation): 20,
(pipeline_ops_mqtt.MQTTUnsubscribeOperation): 20,
(pipeline_ops_mqtt.MQTTPublishOperation): 20}
self.ops_waiting_to_retry = []
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
"""
Send all ops down and intercept their return to "watch for retry"
"""
if self._should_watch_for_retry(op):
op.add_callback(self._do_retry_if_necessary)
self.send_op_down(op)
else:
self.send_op_down(op)
@pipeline_thread.runs_on_pipeline_thread
def _should_watch_for_retry(self, op):
"""
Return True if this op needs to be watched for retry. This can be
called before the op runs.
"""
return type(op) in self.retry_intervals
@pipeline_thread.runs_on_pipeline_thread
def _should_retry(self, op, error):
"""
Return True if this op needs to be retried. This must be called after
the op completes.
"""
if error:
if self._should_watch_for_retry(op):
if isinstance(error, pipeline_exceptions.PipelineTimeoutError):
return True
return False
@pipeline_thread.runs_on_pipeline_thread
def _do_retry_if_necessary(self, op, error):
"""
Handler which gets called when operations are complete. This function
is where we check to see if a retry is necessary and set a "retry timer"
which can be used to send the op down again.
"""
if self._should_retry(op, error):
self_weakref = weakref.ref(self)
@pipeline_thread.invoke_on_pipeline_thread_nowait
def do_retry():
this = self_weakref()
logger.debug("{}({}): retrying".format(this.name, op.name))
op.retry_timer.cancel()
op.retry_timer = None
this.ops_waiting_to_retry.remove(op)
this.run_op(op)
interval = self.retry_intervals[type(op)]
logger.info("{}({}): Op needs retry with interval {} because of {}. Setting timer.".format(self.name, op.name, interval, error))
op.halt_completion()
self.ops_waiting_to_retry.append(op)
op.retry_timer = threading.Timer(self.retry_intervals[type(op)], do_retry)
op.retry_timer.start()
else:
if op.retry_timer:
op.retry_timer.cancel()
op.retry_timer = None
transient_connect_errors = [
pipeline_exceptions.OperationCancelled,
pipeline_exceptions.PipelineTimeoutError,
pipeline_exceptions.OperationError,
transport_exceptions.ConnectionFailedError,
transport_exceptions.ConnectionDroppedError]
class ReconnectState(object):
__doc__ = "\n Class which holds reconenct states as class variables. Created to make code that reads like an enum without using an enum.\n\n WAITING_TO_RECONNECT: This stage is in a waiting period before reconnecting. This state implies\n that the user wants the pipeline to be connected. ie. After a successful connection, the\n state will change to LOGICALLY_CONNECTED\n\n LOGICALLY_CONNECTED: The client wants the pipeline to be connected. This state is independent\n of the actual connection state since the pipeline could be logically connected but\n physically disconnected (this is a temporary condition though. If we're logically connected\n and physically disconnected, then we should be waiting to reconnect.\n\n LOGICALLY_DISCONNECTED: The client does not want the pipeline to be connected or the pipeline had\n a permanent errors error and was forced to disconnect. If the state is LOGICALLY_DISCONNECTED, then the pipeline\n should be physically disconnected since there is no reason to leave the pipeline connected in this state.\n "
WAITING_TO_RECONNECT = "WAITING_TO_RECONNECT"
LOGICALLY_CONNECTED = "LOGICALLY_CONNECTED"
LOGICALLY_DISCONNECTED = "LOGICALLY_DISCONNECTED"
class ReconnectStage(PipelineStage):
def __init__(self):
super(ReconnectStage, self).__init__()
self.reconnect_timer = None
self.state = ReconnectState.LOGICALLY_DISCONNECTED
self.never_connected = True
self.reconnect_delay = 10
self.waiting_connect_ops = []
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if isinstance(op, pipeline_ops_base.ConnectOperation):
if self.state == ReconnectState.WAITING_TO_RECONNECT:
logger.debug("{}({}): State is {}. Adding to wait list".format(self.name, op.name, self.state))
self.waiting_connect_ops.append(op)
else:
logger.info("{}({}): State changes {}->LOGICALLY_CONNECTED. Adding to wait list and sending new connect op down".format(self.name, op.name, self.state))
self.state = ReconnectState.LOGICALLY_CONNECTED
self.waiting_connect_ops.append(op)
self._send_new_connect_op_down()
else:
if isinstance(op, pipeline_ops_base.DisconnectOperation):
if self.state == ReconnectState.WAITING_TO_RECONNECT:
logger.info("{}({}): State changes {}->LOGICALLY_DISCONNECTED. Canceling waiting ops and sending disconnect down.".format(self.name, op.name, self.state))
self.state = ReconnectState.LOGICALLY_DISCONNECTED
self._clear_reconnect_timer()
self._complete_waiting_connect_ops(pipeline_exceptions.OperationCancelled("Explicit disconnect invoked"))
op.complete()
else:
logger.info("{}({}): State changes {}->LOGICALLY_DISCONNECTED. Sending op down.".format(self.name, op.name, self.state))
self.state = ReconnectState.LOGICALLY_DISCONNECTED
self.send_op_down(op)
else:
self.send_op_down(op)
@pipeline_thread.runs_on_pipeline_thread
def _handle_pipeline_event(self, event):
if isinstance(event, pipeline_events_base.DisconnectedEvent):
logger.debug("{}({}): State is {} Connected is {}.".format(self.name, event.name, self.state, self.pipeline_root.connected))
if self.pipeline_root.connected and self.state == ReconnectState.LOGICALLY_CONNECTED:
self.state = ReconnectState.WAITING_TO_RECONNECT
self._start_reconnect_timer(0.01)
else:
self.send_event_up(event)
else:
self.send_event_up(event)
@pipeline_thread.runs_on_pipeline_thread
def _send_new_connect_op_down(self):
self_weakref = weakref.ref(self)
@pipeline_thread.runs_on_pipeline_thread
def on_connect_complete(op, error):
this = self_weakref()
if this:
logger.debug("{}({}): on_connect_complete error={} state={} never_connected={} connected={} ".format(this.name, op.name, error, this.state, this.never_connected, this.pipeline_root.connected))
if error:
if this.never_connected:
this.state = ReconnectState.LOGICALLY_DISCONNECTED
this._clear_reconnect_timer()
this._complete_waiting_connect_ops(error)
else:
if type(error) in transient_connect_errors:
self.state = ReconnectState.WAITING_TO_RECONNECT
self._start_reconnect_timer(self.reconnect_delay)
else:
this.state = ReconnectState.LOGICALLY_DISCONNECTED
this._clear_reconnect_timer()
this._complete_waiting_connect_ops(error)
else:
this.never_connected = False
this.state = ReconnectState.LOGICALLY_CONNECTED
this._clear_reconnect_timer()
this._complete_waiting_connect_ops()
logger.debug("{}: sending new connect op down".format(self.name))
op = pipeline_ops_base.ConnectOperation(callback=on_connect_complete)
self.send_op_down(op)
@pipeline_thread.runs_on_pipeline_thread
def _start_reconnect_timer(self, delay):
"""
Set a timer to reconnect after some period of time
"""
logger.debug("{}: State is {}. Connected={} Starting reconnect timer".format(self.name, self.state, self.pipeline_root.connected))
self._clear_reconnect_timer()
self_weakref = weakref.ref(self)
@pipeline_thread.invoke_on_pipeline_thread_nowait
def on_reconnect_timer_expired():
this = self_weakref()
logger.debug("{}: Reconnect timer expired. State is {} Connected is {}.".format(self.name, self.state, self.pipeline_root.connected))
this.reconnect_timer = None
if this.state == ReconnectState.WAITING_TO_RECONNECT:
if not self.pipeline_root.connected:
this.state = ReconnectState.LOGICALLY_CONNECTED
this._send_new_connect_op_down()
self.reconnect_timer = threading.Timer(delay, on_reconnect_timer_expired)
self.reconnect_timer.start()
@pipeline_thread.runs_on_pipeline_thread
def _clear_reconnect_timer(self):
"""
Clear any previous reconnect timer
"""
if self.reconnect_timer:
logger.debug("{}: clearing reconnect timer".format(self.name))
self.reconnect_timer.cancel()
self.reconnect_timer = None
@pipeline_thread.runs_on_pipeline_thread
def _complete_waiting_connect_ops(self, error=None):
"""
A note of explanation: when we are waiting to reconnect, we need to keep a list of
all connect ops that come through here. We do this for 2 reasons:
1. We don't want to pass them down immediately because we want to honor the waiting
period. If we passed them down immediately, we'd try to reconnect immediately
instead of waiting until reconnect_timer fires.
2. When we're retrying, there are new ConnectOperation ops sent down regularly.
Any of the ops could be the one that succeeds. When that happens, we need a
way to to complete all of the ops that are patiently waiting for the connection.
Right now, we only need to do this with ConnectOperation ops because these are the
only ops that need to wait because these are the only ops that cause a connection
to be established. Other ops pass through this stage, and might fail in later
stages, but that's OK. If they needed a connection, the AutoConnectStage before
this stage should be taking care of that.
"""
logger.debug("{}: completing waiting ops with error={}".format(self.name, error))
list_copy = self.waiting_connect_ops
self.waiting_connect_ops = []
for op in list_copy:
op.complete(error)

View File

@@ -0,0 +1,67 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/pipeline_stages_http.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 5150 bytes
import logging, six, traceback, copy
from . import pipeline_ops_base, PipelineStage, pipeline_ops_http, pipeline_thread, pipeline_exceptions
from azure.iot.device.common.http_transport import HTTPTransport
from azure.iot.device.common import handle_exceptions, transport_exceptions
from azure.iot.device.common.callable_weak_method import CallableWeakMethod
logger = logging.getLogger(__name__)
class HTTPTransportStage(PipelineStage):
__doc__ = "\n PipelineStage object which is responsible for interfacing with the HTTP protocol wrapper object.\n This stage handles all HTTP operations that are not specific to IoT Hub.\n "
def __init__(self):
super(HTTPTransportStage, self).__init__()
self.sas_token = None
self.transport = None
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if isinstance(op, pipeline_ops_base.InitializePipelineOperation):
if self.pipeline_root.pipeline_configuration.gateway_hostname:
logger.debug("Gateway Hostname Present. Setting Hostname to: {}".format(self.pipeline_root.pipeline_configuration.gateway_hostname))
hostname = self.pipeline_root.pipeline_configuration.gateway_hostname
else:
logger.debug("Gateway Hostname not present. Setting Hostname to: {}".format(self.pipeline_root.pipeline_configuration.hostname))
hostname = self.pipeline_root.pipeline_configuration.hostname
logger.debug("{}({}): got connection args".format(self.name, op.name))
self.transport = HTTPTransport(hostname=hostname,
server_verification_cert=(self.pipeline_root.pipeline_configuration.server_verification_cert),
x509_cert=(self.pipeline_root.pipeline_configuration.x509),
cipher=(self.pipeline_root.pipeline_configuration.cipher))
self.pipeline_root.transport = self.transport
op.complete()
else:
if isinstance(op, pipeline_ops_http.HTTPRequestAndResponseOperation):
logger.debug("{}({}): Generating HTTP request and setting callback before completing.".format(self.name, op.name))
@pipeline_thread.invoke_on_pipeline_thread_nowait
def on_request_completed(error=None, response=None):
if error:
logger.debug("{}({}): Error passed to on_request_completed. Error={}".format(self.name, op.name, error))
op.complete(error=error)
else:
logger.debug("{}({}): Request completed. Completing op.".format(self.name, op.name))
logger.debug("HTTP Response Status: {}".format(response["status_code"]))
logger.debug("HTTP Response: {}".format(response["resp"].decode("utf-8")))
op.response_body = response["resp"]
op.status_code = response["status_code"]
op.reason = response["reason"]
op.complete()
http_headers = copy.deepcopy(op.headers)
if self.pipeline_root.pipeline_configuration.sastoken:
http_headers["Authorization"] = str(self.pipeline_root.pipeline_configuration.sastoken)
self.transport.request(method=(op.method),
path=(op.path),
headers=http_headers,
query_params=(op.query_params),
body=(op.body),
callback=on_request_completed)
else:
self.send_op_down(op)

View File

@@ -0,0 +1,151 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/pipeline_stages_mqtt.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 17042 bytes
import logging, six, traceback, threading, weakref
from . import pipeline_ops_base, PipelineStage, pipeline_ops_mqtt, pipeline_events_mqtt, pipeline_thread, pipeline_exceptions, pipeline_events_base
from azure.iot.device.common.mqtt_transport import MQTTTransport
from azure.iot.device.common import handle_exceptions, transport_exceptions
from azure.iot.device.common.callable_weak_method import CallableWeakMethod
logger = logging.getLogger(__name__)
WATCHDOG_INTERVAL = 10
class MQTTTransportStage(PipelineStage):
__doc__ = "\n PipelineStage object which is responsible for interfacing with the MQTT protocol wrapper object.\n This stage handles all MQTT operations and any other operations (such as ConnectOperation) which\n is not in the MQTT group of operations, but can only be run at the protocol level.\n "
def __init__(self):
super(MQTTTransportStage, self).__init__()
self.transport = None
self._pending_connection_op = None
@pipeline_thread.runs_on_pipeline_thread
def _cancel_pending_connection_op(self, error=None):
"""
Cancel any running connect, disconnect or reauthorize_connection op. Since our ability to "cancel" is fairly limited,
all this does (for now) is to fail the operation
"""
op = self._pending_connection_op
if op:
if not error:
error = pipeline_exceptions.OperationCancelled("Cancelling because new ConnectOperation, DisconnectOperation, or ReauthorizeConnectionOperation was issued")
self._cancel_connection_watchdog(op)
op.complete(error=error)
self._pending_connection_op = None
@pipeline_thread.runs_on_pipeline_thread
def _start_connection_watchdog(self, connection_op):
logger.debug("{}({}): Starting watchdog".format(self.name, connection_op.name))
self_weakref = weakref.ref(self)
op_weakref = weakref.ref(connection_op)
@pipeline_thread.invoke_on_pipeline_thread
def watchdog_function():
this = self_weakref()
op = op_weakref()
if this:
if op:
if this._pending_connection_op is op:
logger.info("{}({}): Connection watchdog expired. Cancelling op".format(this.name, op.name))
this.transport.disconnect()
if this.pipeline_root.connected:
logger.info("{}({}): Pipeline is still connected on watchdog expiration. Sending DisconnectedEvent".format(this.name, op.name))
this.send_event_up(pipeline_events_base.DisconnectedEvent())
this._cancel_pending_connection_op(error=(pipeline_exceptions.OperationCancelled("Transport timeout on connection operation")))
connection_op.watchdog_timer = threading.Timer(WATCHDOG_INTERVAL, watchdog_function)
connection_op.watchdog_timer.daemon = True
connection_op.watchdog_timer.start()
@pipeline_thread.runs_on_pipeline_thread
def _cancel_connection_watchdog(self, op):
try:
if op.watchdog_timer:
logger.debug("{}({}): cancelling watchdog".format(self.name, op.name))
op.watchdog_timer.cancel()
op.watchdog_timer = None
except AttributeError:
pass
@pipeline_thread.runs_on_pipeline_thread
def _run_opParse error at or near `COME_FROM' instruction at offset 920_0
@pipeline_thread.invoke_on_pipeline_thread_nowait
def _on_mqtt_message_received(self, topic, payload):
"""
Handler that gets called by the protocol library when an incoming message arrives.
Convert that message into a pipeline event and pass it up for someone to handle.
"""
logger.debug("{}: message received on topic {}".format(self.name, topic))
self.send_event_up(pipeline_events_mqtt.IncomingMQTTMessageEvent(topic=topic, payload=payload))
@pipeline_thread.invoke_on_pipeline_thread_nowait
def _on_mqtt_connected(self):
"""
Handler that gets called by the transport when it connects.
"""
logger.info("_on_mqtt_connected called")
self.send_event_up(pipeline_events_base.ConnectedEvent())
if isinstance(self._pending_connection_op, pipeline_ops_base.ConnectOperation):
logger.debug("completing connect op")
op = self._pending_connection_op
self._cancel_connection_watchdog(op)
self._pending_connection_op = None
op.complete()
else:
logger.info("Connection was unexpected")
@pipeline_thread.invoke_on_pipeline_thread_nowait
def _on_mqtt_connection_failure(self, cause):
"""
Handler that gets called by the transport when a connection fails.
:param Exception cause: The Exception that caused the connection failure.
"""
logger.info("{}: _on_mqtt_connection_failure called: {}".format(self.name, cause))
if isinstance(self._pending_connection_op, pipeline_ops_base.ConnectOperation):
logger.debug("{}: failing connect op".format(self.name))
op = self._pending_connection_op
self._cancel_connection_watchdog(op)
self._pending_connection_op = None
op.complete(error=cause)
else:
logger.info("{}: Connection failure was unexpected".format(self.name))
handle_exceptions.swallow_unraised_exception(cause,
log_msg="Unexpected connection failure. Safe to ignore.", log_lvl="info")
@pipeline_thread.invoke_on_pipeline_thread_nowait
def _on_mqtt_disconnected(self, cause=None):
"""
Handler that gets called by the transport when the transport disconnects.
:param Exception cause: The Exception that caused the disconnection, if any (optional)
"""
if cause:
logger.info("{}: _on_mqtt_disconnect called: {}".format(self.name, cause))
else:
logger.info("{}: _on_mqtt_disconnect called".format(self.name))
self.send_event_up(pipeline_events_base.DisconnectedEvent())
if self._pending_connection_op:
logger.debug("{}: completing pending {} op".format(self.name, self._pending_connection_op.name))
op = self._pending_connection_op
self._cancel_connection_watchdog(op)
self._pending_connection_op = None
if isinstance(op, pipeline_ops_base.DisconnectOperation) or isinstance(op, pipeline_ops_base.ReauthorizeConnectionOperation):
if cause:
handle_exceptions.swallow_unraised_exception(cause,
log_msg="Unexpected disconnect with error while disconnecting - swallowing error")
op.complete()
else:
if cause:
op.complete(error=cause)
else:
op.complete(error=(transport_exceptions.ConnectionDroppedError("transport disconnected")))
else:
logger.info("{}: disconnection was unexpected".format(self.name))
e = transport_exceptions.ConnectionDroppedError(cause=cause)
handle_exceptions.swallow_unraised_exception(e,
log_msg="Unexpected disconnection. Safe to ignore since other stages will reconnect.",
log_lvl="info")

View File

@@ -0,0 +1,134 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/pipeline/pipeline_thread.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 9542 bytes
import functools, logging, threading, traceback
from multiprocessing.pool import ThreadPool
from concurrent.futures import ThreadPoolExecutor
from azure.iot.device.common import handle_exceptions
logger = logging.getLogger(__name__)
_executors = {}
def _get_named_executor(thread_name):
"""
Get a ThreadPoolExecutor object with the given name. If no such executor exists,
this function will create on with a single worker and assign it to the provided
name.
"""
global _executors
if thread_name not in _executors:
logger.debug("Creating {} executor".format(thread_name))
_executors[thread_name] = ThreadPoolExecutor(max_workers=1)
return _executors[thread_name]
def _invoke_on_executor_thread(func, thread_name, block=True):
"""
Return wrapper to run the function on a given thread. If block==False,
the call returns immediately without waiting for the decorated function to complete.
If block==True, the call waits for the decorated function to complete before returning.
"""
try:
function_name = func.__name__
function_has_name = True
except AttributeError:
function_name = str(func)
function_has_name = False
def wrapper(*args, **kwargs):
if threading.current_thread().name is not thread_name:
logger.debug("Starting {} in {} thread".format(function_name, thread_name))
def thread_proc():
threading.current_thread().name = thread_name
try:
return func(*args, **kwargs)
except Exception as e:
try:
if not block:
handle_exceptions.handle_background_exception(e)
else:
raise
finally:
e = None
del e
except BaseException:
if not block:
logger.critical("Unhandled exception in background thread")
logger.critical("This may cause the background thread to abort and may result in system instability.")
traceback.print_exc()
raise
future = _get_named_executor(thread_name).submit(thread_proc)
if block:
return future.result()
return future
else:
logger.debug("Already in {} thread for {}".format(thread_name, function_name))
return func(*args, **kwargs)
if function_has_name:
return functools.update_wrapper(wrapped=func, wrapper=wrapper)
wrapper.__wrapped__ = func
return wrapper
def invoke_on_pipeline_thread(func):
"""
Run the decorated function on the pipeline thread.
"""
return _invoke_on_executor_thread(func=func, thread_name="pipeline")
def invoke_on_pipeline_thread_nowait(func):
"""
Run the decorated function on the pipeline thread, but don't wait for it to complete
"""
return _invoke_on_executor_thread(func=func, thread_name="pipeline", block=False)
def invoke_on_callback_thread_nowait(func):
"""
Run the decorated function on the callback thread, but don't wait for it to complete
"""
return _invoke_on_executor_thread(func=func, thread_name="callback", block=False)
def invoke_on_http_thread_nowait(func):
"""
Run the decorated function on the callback thread, but don't wait for it to complete
"""
return _invoke_on_executor_thread(func=func, thread_name="azure_iot_http", block=False)
def _assert_executor_thread(func, thread_name):
"""
Decorator which asserts that the given function only gets called inside the given
thread.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
assert threading.current_thread().name == thread_name, "\n Function {function_name} is not running inside {thread_name} thread.\n It should be. You should use invoke_on_{thread_name}_thread(_nowait) to enter the\n {thread_name} thread before calling this function. If you're hitting this from\n inside a test function, you may need to add the fake_pipeline_thread fixture to\n your test. (generally applied on the global pytestmark in a module) ".format(function_name=(func.__name__),
thread_name=thread_name)
return func(*args, **kwargs)
return wrapper
def runs_on_pipeline_thread(func):
"""
Decorator which marks a function as only running inside the pipeline thread.
"""
return _assert_executor_thread(func=func, thread_name="pipeline")
def runs_on_http_thread(func):
"""
Decorator which marks a function as only running inside the http thread.
"""
return _assert_executor_thread(func=func, thread_name="azure_iot_http")

View File

@@ -0,0 +1,32 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/transport_exceptions.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1275 bytes
"""This module defines errors that may be raised from a transport"""
from .chainable_exception import ChainableException
class ConnectionFailedError(ChainableException):
__doc__ = "\n Connection failed to be established\n "
class ConnectionDroppedError(ChainableException):
__doc__ = "\n Previously established connection was dropped\n "
class UnauthorizedError(ChainableException):
__doc__ = "\n Authorization was rejected\n "
class ProtocolClientError(ChainableException):
__doc__ = "\n Error returned from protocol client library\n "
class TlsExchangeAuthError(ChainableException):
__doc__ = "\n Error returned when transport layer exchanges\n result in a SSLCertVerification error.\n "
class ProtocolProxyError(ChainableException):
__doc__ = "\n All proxy-related errors.\n TODO : Not sure what to name it here. There is a class called Proxy Error already in Pysocks\n "

View File

@@ -0,0 +1,31 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/common/version_compat.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1561 bytes
"""This module defines functions intended for providing compatibility between
different versions of Python"""
from six.moves import urllib
def urlencode(query, quote_via=urllib.parse.quote_plus, safe=""):
""" Custom implementation of urllib.parse.urlencode().
This is necessary because prior to Python 3.5, urlencode() always encodes via
quote_plus() rather than quote(). This is generally not desirable for MQTT, as
it will translate ' ' into '+' rather than '%20', and '+' is not allowed in the
topic strings for MQTT publish.
Starting in 3.5, the included library function for urlencode() allows you to specify
which style of encoding you want, however this feature is not available in 2.7 and so
we must implement it ourselves.
"""
if isinstance(query, list):
encoded = "&".join(["{}={}".format(quote_via(k, safe=safe), quote_via(v, safe=safe)) for k, v in query])
else:
if isinstance(query, dict):
encoded = "&".join(["{}={}".format(quote_via(k, safe=safe), quote_via(v, safe=safe)) for k, v in query.items()])
else:
raise TypeError("Invalid type for 'query'")
return encoded

View File

@@ -0,0 +1,20 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/constant.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1015 bytes
"""This module defines constants for use across the azure-iot-device package
"""
VERSION = "2.4.0"
IOTHUB_IDENTIFIER = "azure-iot-device-iothub-py"
PROVISIONING_IDENTIFIER = "azure-iot-device-provisioning-py"
IOTHUB_API_VERSION = "2019-10-01"
PROVISIONING_API_VERSION = "2019-03-31"
SECURITY_MESSAGE_INTERFACE_ID = "urn:azureiot:Security:SecurityAgent:1"
TELEMETRY_MESSAGE_SIZE_LIMIT = 262144
MAX_KEEP_ALIVE_SECS = 1740
DIGITAL_TWIN_PREFIX = "dtmi"
DIGITAL_TWIN_API_VERSION = "2020-09-30"
DIGITAL_TWIN_QUERY_HEADER = "model-id"

View File

@@ -0,0 +1,32 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/exceptions.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3770 bytes
"""This module defines an exception surface, exposed as part of the azure.iot.device library API"""
from azure.iot.device.common.chainable_exception import ChainableException
class OperationCancelled(ChainableException):
__doc__ = "An operation was cancelled"
class ClientError(ChainableException):
__doc__ = "Generic error for a client"
class ConnectionFailedError(ClientError):
__doc__ = "Failed to establish a connection"
class ConnectionDroppedError(ClientError):
__doc__ = "Lost connection while executing operation"
class CredentialError(ClientError):
__doc__ = "Could not connect client using given credentials"
class ServiceError(ChainableException):
__doc__ = "Error received from an Azure IoT service"

View File

@@ -0,0 +1,16 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 364 bytes
"""Azure IoT Hub Device Library
This library provides functionality for communicating with the Azure IoT Hub
as a Device or Module.
"""
from .sync_clients import IoTHubDeviceClient, IoTHubModuleClient
from .models import Message, MethodRequest, MethodResponse
__all__ = [
'IoTHubDeviceClient', 'IoTHubModuleClient', 'Message', 'MethodRequest', 'MethodResponse']

View File

@@ -0,0 +1,621 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/abstract_clients.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 32494 bytes
"""This module contains abstract classes for the various clients of the Azure IoT Hub Device SDK
"""
import six, abc, logging, threading, os, io, time
from . import pipeline
from azure.iot.device.common.auth import connection_string as cs
from azure.iot.device.common.auth import sastoken as st
from azure.iot.device import exceptions
from azure.iot.device.common import auth
from . import edge_hsm
logger = logging.getLogger(__name__)
def _validate_kwargs(exclude=[], **kwargs):
"""Helper function to validate user provided kwargs.
Raises TypeError if an invalid option has been provided"""
valid_kwargs = [
'product_info',
'websockets',
'cipher',
'server_verification_cert',
'proxy_options',
'sastoken_ttl',
'keep_alive']
for kwarg in kwargs:
if kwarg not in valid_kwargs or kwarg in exclude:
raise TypeError("Unsupported keyword argument: '{}'".format(kwarg))
def _get_config_kwargs(**kwargs):
"""Get the subset of kwargs which pertain the config object"""
valid_config_kwargs = [
'product_info',
'websockets',
'cipher',
'server_verification_cert',
'proxy_options',
'keep_alive']
config_kwargs = {}
for kwarg in kwargs:
if kwarg in valid_config_kwargs:
config_kwargs[kwarg] = kwargs[kwarg]
return config_kwargs
def _form_sas_uri(hostname, device_id, module_id=None):
if module_id:
return "{hostname}/devices/{device_id}/modules/{module_id}".format(hostname=hostname,
device_id=device_id,
module_id=module_id)
return "{hostname}/devices/{device_id}".format(hostname=hostname, device_id=device_id)
def _extract_sas_uri_values(uri):
d = {}
items = uri.split("/")
if len(items) != 3:
if len(items) != 5:
raise ValueError("Invalid SAS URI")
if items[1] != "devices":
raise ValueError("Cannot extract device id from SAS URI")
if len(items) > 3:
if items[3] != "modules":
raise ValueError("Cannot extract module id from SAS URI")
d["hostname"] = items[0]
d["device_id"] = items[2]
try:
d["module_id"] = items[4]
except IndexError:
d["module_id"] = None
return d
RECEIVE_TYPE_NONE_SET = "none_set"
RECEIVE_TYPE_HANDLER = "handler"
RECEIVE_TYPE_API = "api"
@six.add_metaclass(abc.ABCMeta)
class AbstractIoTHubClient(object):
__doc__ = " A superclass representing a generic IoTHub client.\n This class needs to be extended for specific clients.\n "
def __init__(self, mqtt_pipeline, http_pipeline):
"""Initializer for a generic client.
:param mqtt_pipeline: The pipeline used to connect to the IoTHub endpoint.
:type mqtt_pipeline: :class:`azure.iot.device.iothub.pipeline.MQTTPipeline`
"""
self._mqtt_pipeline = mqtt_pipeline
self._http_pipeline = http_pipeline
self._inbox_manager = None
self._handler_manager = None
self._receive_type = RECEIVE_TYPE_NONE_SET
self._client_lock = threading.Lock()
def _on_connected(self):
"""Helper handler that is called upon an iothub pipeline connect"""
logger.info("Connection State - Connected")
self._handler_manager.ensure_running()
def _on_disconnected(self):
"""Helper handler that is called upon an iothub pipeline disconnect"""
logger.info("Connection State - Disconnected")
self._inbox_manager.clear_all_method_requests()
logger.info("Cleared all pending method requests due to disconnect")
def _check_receive_mode_is_apiParse error at or near `POP_BLOCK' instruction at offset 48
def _check_receive_mode_is_handlerParse error at or near `POP_BLOCK' instruction at offset 56
def _replace_user_supplied_sastoken(self, sastoken_str):
"""
Replaces the pipeline's NonRenewableSasToken with a new one based on a provided
sastoken string. Also does validation.
This helper only updates the PipelineConfig - it does not reauthorize the connection.
"""
if not isinstance(self._mqtt_pipeline.pipeline_configuration.sastoken, st.NonRenewableSasToken):
raise exceptions.ClientError("Cannot update sastoken when client was not created with one")
else:
try:
new_token_o = st.NonRenewableSasToken(sastoken_str)
except st.SasTokenError as e:
try:
new_err = ValueError("Invalid SasToken provided")
new_err.__cause__ = e
raise new_err
finally:
e = None
del e
vals = _extract_sas_uri_values(new_token_o.resource_uri)
if type(self).__name__ == "IoTHubDeviceClient":
if vals["module_id"]:
raise ValueError("Provided SasToken is for a module")
if type(self).__name__ == "IoTHubModuleClient" and not vals["module_id"]:
raise ValueError("Provided SasToken is for a device")
if self._mqtt_pipeline.pipeline_configuration.device_id != vals["device_id"]:
raise ValueError("Provided SasToken does not match existing device id")
if self._mqtt_pipeline.pipeline_configuration.module_id != vals["module_id"]:
raise ValueError("Provided SasToken does not match existing module id")
if self._mqtt_pipeline.pipeline_configuration.hostname != vals["hostname"]:
raise ValueError("Provided SasToken does not match existing hostname")
if new_token_o.expiry_time < int(time.time()):
raise ValueError("Provided SasToken has already expired")
self._mqtt_pipeline.pipeline_configuration.sastoken = new_token_o
@classmethod
def create_from_connection_string(cls, connection_string, **kwargs):
"""
Instantiate the client from a IoTHub device or module connection string.
:param str connection_string: The connection string for the IoTHub you wish to connect to.
:param str server_verification_cert: Configuration Option. The trusted certificate chain.
Necessary when using connecting to an endpoint which has a non-standard root of trust,
such as a protocol gateway.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int sastoken_ttl: The time to live (in seconds) for the created SasToken used for
authentication. Default is 3600 seconds (1 hour)
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:raises: ValueError if given an invalid connection_string.
:raises: TypeError if given an unsupported parameter.
:returns: An instance of an IoTHub client that uses a connection string for authentication.
"""
_validate_kwargs(**kwargs)
connection_string = cs.ConnectionString(connection_string)
uri = _form_sas_uri(hostname=(connection_string[cs.HOST_NAME]),
device_id=(connection_string[cs.DEVICE_ID]),
module_id=(connection_string.get(cs.MODULE_ID)))
signing_mechanism = auth.SymmetricKeySigningMechanism(key=(connection_string[cs.SHARED_ACCESS_KEY]))
token_ttl = kwargs.get("sastoken_ttl", 3600)
try:
sastoken = st.RenewableSasToken(uri, signing_mechanism, ttl=token_ttl)
except st.SasTokenError as e:
try:
new_err = ValueError("Could not create a SasToken using provided values")
new_err.__cause__ = e
raise new_err
finally:
e = None
del e
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = (pipeline.IoTHubPipelineConfig)(device_id=connection_string[cs.DEVICE_ID],
module_id=connection_string.get(cs.MODULE_ID),
hostname=connection_string[cs.HOST_NAME],
gateway_hostname=connection_string.get(cs.GATEWAY_HOST_NAME),
sastoken=sastoken, **config_kwargs)
if cls.__name__ == "IoTHubDeviceClient":
pipeline_configuration.blob_upload = True
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@classmethod
def create_from_sastoken(cls, sastoken, **kwargs):
"""Instantiate the client from a pre-created SAS Token string
:param str sastoken: The SAS Token string
:param str server_verification_cert: Configuration Option. The trusted certificate chain.
Necessary when using connecting to an endpoint which has a non-standard root of trust,
such as a protocol gateway.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:raises: TypeError if given an unsupported parameter.
:raises: ValueError if the sastoken parameter is invalid.
"""
excluded_kwargs = [
"sastoken_ttl"]
_validate_kwargs(exclude=excluded_kwargs, **kwargs)
try:
sastoken_o = st.NonRenewableSasToken(sastoken)
except st.SasTokenError as e:
try:
new_err = ValueError("Invalid SasToken provided")
new_err.__cause__ = e
raise new_err
finally:
e = None
del e
vals = _extract_sas_uri_values(sastoken_o.resource_uri)
if cls.__name__ == "IoTHubDeviceClient":
if vals["module_id"]:
raise ValueError("Provided SasToken is for a module")
if cls.__name__ == "IoTHubModuleClient":
if not vals["module_id"]:
raise ValueError("Provided SasToken is for a device")
if sastoken_o.expiry_time < int(time.time()):
raise ValueError("Provided SasToken has already expired")
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = (pipeline.IoTHubPipelineConfig)(device_id=vals["device_id"],
module_id=vals["module_id"],
hostname=vals["hostname"],
sastoken=sastoken_o, **config_kwargs)
if cls.__name__ == "IoTHubDeviceClient":
pipeline_configuration.blob_upload = True
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@abc.abstractmethod
def connect(self):
pass
@abc.abstractmethod
def disconnect(self):
pass
@abc.abstractmethod
def update_sastoken(self, sastoken):
pass
@abc.abstractmethod
def send_message(self, message):
pass
@abc.abstractmethod
def receive_method_request(self, method_name=None):
pass
@abc.abstractmethod
def send_method_response(self, method_request, payload, status):
pass
@abc.abstractmethod
def get_twin(self):
pass
@abc.abstractmethod
def patch_twin_reported_properties(self, reported_properties_patch):
pass
@abc.abstractmethod
def receive_twin_desired_properties_patch(self):
pass
@property
def connected(self):
"""
Read-only property to indicate if the transport is connected or not.
"""
return self._mqtt_pipeline.connected
@abc.abstractproperty
def on_message_received(self):
pass
@abc.abstractproperty
def on_method_request_received(self):
pass
@abc.abstractproperty
def on_twin_desired_properties_patch_received(self):
pass
@six.add_metaclass(abc.ABCMeta)
class AbstractIoTHubDeviceClient(AbstractIoTHubClient):
@classmethod
def create_from_x509_certificate(cls, x509, hostname, device_id, **kwargs):
"""
Instantiate a client using X509 certificate authentication.
:param str hostname: Host running the IotHub.
Can be found in the Azure portal in the Overview tab as the string hostname.
:param x509: The complete x509 certificate object.
To use the certificate the enrollment object needs to contain cert
(either the root certificate or one of the intermediate CA certificates).
If the cert comes from a CER file, it needs to be base64 encoded.
:type x509: :class:`azure.iot.device.X509`
:param str device_id: The ID used to uniquely identify a device in the IoTHub
:param str server_verification_cert: Configuration Option. The trusted certificate chain.
Necessary when using connecting to an endpoint which has a non-standard root of trust,
such as a protocol gateway.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:raises: TypeError if given an unsupported parameter.
:returns: An instance of an IoTHub client that uses an X509 certificate for authentication.
"""
excluded_kwargs = [
"sastoken_ttl"]
_validate_kwargs(exclude=excluded_kwargs, **kwargs)
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = (pipeline.IoTHubPipelineConfig)(device_id=device_id,
hostname=hostname, x509=x509, **config_kwargs)
pipeline_configuration.blob_upload = True
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@classmethod
def create_from_symmetric_key(cls, symmetric_key, hostname, device_id, **kwargs):
"""
Instantiate a client using symmetric key authentication.
:param symmetric_key: The symmetric key.
:param str hostname: Host running the IotHub.
Can be found in the Azure portal in the Overview tab as the string hostname.
:param device_id: The device ID
:param str server_verification_cert: Configuration Option. The trusted certificate chain.
Necessary when using connecting to an endpoint which has a non-standard root of trust,
such as a protocol gateway.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int sastoken_ttl: The time to live (in seconds) for the created SasToken used for
authentication. Default is 3600 seconds (1 hour)
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:raises: TypeError if given an unsupported parameter.
:raises: ValueError if the provided parameters are invalid.
:return: An instance of an IoTHub client that uses a symmetric key for authentication.
"""
_validate_kwargs(**kwargs)
uri = _form_sas_uri(hostname=hostname, device_id=device_id)
signing_mechanism = auth.SymmetricKeySigningMechanism(key=symmetric_key)
token_ttl = kwargs.get("sastoken_ttl", 3600)
try:
sastoken = st.RenewableSasToken(uri, signing_mechanism, ttl=token_ttl)
except st.SasTokenError as e:
try:
new_err = ValueError("Could not create a SasToken using provided values")
new_err.__cause__ = e
raise new_err
finally:
e = None
del e
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = (pipeline.IoTHubPipelineConfig)(device_id=device_id,
hostname=hostname, sastoken=sastoken, **config_kwargs)
pipeline_configuration.blob_upload = True
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@abc.abstractmethod
def receive_message(self):
pass
@abc.abstractmethod
def get_storage_info_for_blob(self, blob_name):
pass
@abc.abstractmethod
def notify_blob_upload_status(self, correlation_id, is_success, status_code, status_description):
pass
@six.add_metaclass(abc.ABCMeta)
class AbstractIoTHubModuleClient(AbstractIoTHubClient):
@classmethod
def create_from_edge_environment(cls, **kwargs):
"""
Instantiate the client from the IoT Edge environment.
This method can only be run from inside an IoT Edge container, or in a debugging
environment configured for Edge development (e.g. Visual Studio, Visual Studio Code)
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int sastoken_ttl: The time to live (in seconds) for the created SasToken used for
authentication. Default is 3600 seconds (1 hour)
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:raises: OSError if the IoT Edge container is not configured correctly.
:raises: ValueError if debug variables are invalid.
:raises: TypeError if given an unsupported parameter.
:returns: An instance of an IoTHub client that uses the IoT Edge environment for
authentication.
"""
excluded_kwargs = [
"server_verification_cert"]
_validate_kwargs(exclude=excluded_kwargs, **kwargs)
try:
hostname = os.environ["IOTEDGE_IOTHUBHOSTNAME"]
device_id = os.environ["IOTEDGE_DEVICEID"]
module_id = os.environ["IOTEDGE_MODULEID"]
gateway_hostname = os.environ["IOTEDGE_GATEWAYHOSTNAME"]
module_generation_id = os.environ["IOTEDGE_MODULEGENERATIONID"]
workload_uri = os.environ["IOTEDGE_WORKLOADURI"]
api_version = os.environ["IOTEDGE_APIVERSION"]
except KeyError:
try:
connection_string = os.environ["EdgeHubConnectionString"]
ca_cert_filepath = os.environ["EdgeModuleCACertificateFile"]
except KeyError as e:
try:
new_err = OSError("IoT Edge environment not configured correctly")
new_err.__cause__ = e
raise new_err
finally:
e = None
del e
try:
with io.open(ca_cert_filepath, mode="r") as ca_cert_file:
server_verification_cert = ca_cert_file.read()
except (OSError, IOError) as e:
try:
new_err = ValueError("Invalid CA certificate file")
new_err.__cause__ = e
raise new_err
finally:
e = None
del e
connection_string = cs.ConnectionString(connection_string)
try:
device_id = connection_string[cs.DEVICE_ID]
module_id = connection_string[cs.MODULE_ID]
hostname = connection_string[cs.HOST_NAME]
gateway_hostname = connection_string[cs.GATEWAY_HOST_NAME]
except KeyError:
raise ValueError("Invalid Connection String")
signing_mechanism = auth.SymmetricKeySigningMechanism(key=(connection_string[cs.SHARED_ACCESS_KEY]))
else:
hsm = edge_hsm.IoTEdgeHsm(module_id=module_id,
generation_id=module_generation_id,
workload_uri=workload_uri,
api_version=api_version)
try:
server_verification_cert = hsm.get_certificate()
except edge_hsm.IoTEdgeError as e:
try:
new_err = OSError("Unexpected failure in IoTEdge")
new_err.__cause__ = e
raise new_err
finally:
e = None
del e
signing_mechanism = hsm
uri = _form_sas_uri(hostname=hostname, device_id=device_id, module_id=module_id)
token_ttl = kwargs.get("sastoken_ttl", 3600)
try:
sastoken = st.RenewableSasToken(uri, signing_mechanism, ttl=token_ttl)
except st.SasTokenError as e:
try:
new_err = ValueError("Could not create a SasToken using the values provided, or in the Edge environment")
new_err.__cause__ = e
raise new_err
finally:
e = None
del e
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = (pipeline.IoTHubPipelineConfig)(**, **config_kwargs)
pipeline_configuration.method_invoke = True
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@classmethod
def create_from_x509_certificate(cls, x509, hostname, device_id, module_id, **kwargs):
"""
Instantiate a client using X509 certificate authentication.
:param str hostname: Host running the IotHub.
Can be found in the Azure portal in the Overview tab as the string hostname.
:param x509: The complete x509 certificate object.
To use the certificate the enrollment object needs to contain cert
(either the root certificate or one of the intermediate CA certificates).
If the cert comes from a CER file, it needs to be base64 encoded.
:type x509: :class:`azure.iot.device.X509`
:param str device_id: The ID used to uniquely identify a device in the IoTHub
:param str module_id: The ID used to uniquely identify a module on a device on the IoTHub.
:param str server_verification_cert: Configuration Option. The trusted certificate chain.
Necessary when using connecting to an endpoint which has a non-standard root of trust,
such as a protocol gateway.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param str product_info: Configuration Option. Default is empty string. The string contains
arbitrary product info which is appended to the user agent string.
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int keep_alive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:raises: TypeError if given an unsupported parameter.
:returns: An instance of an IoTHub client that uses an X509 certificate for authentication.
"""
excluded_kwargs = [
"sastoken_ttl"]
_validate_kwargs(exclude=excluded_kwargs, **kwargs)
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = (pipeline.IoTHubPipelineConfig)(**, **config_kwargs)
http_pipeline = pipeline.HTTPPipeline(pipeline_configuration)
mqtt_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_pipeline, http_pipeline)
@abc.abstractmethod
def send_message_to_output(self, message, output_name):
pass
@abc.abstractmethod
def receive_message_on_input(self, input_name):
pass
@abc.abstractmethod
def invoke_method(self, method_params, device_id, module_id=None):
pass

View File

@@ -0,0 +1,15 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/aio/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 280 bytes
"""Azure IoT Hub Device SDK - Asynchronous
This SDK provides asynchronous functionality for communicating with the Azure IoT Hub
as a Device or Module.
"""
from .async_clients import IoTHubDeviceClient, IoTHubModuleClient
__all__ = [
"IoTHubDeviceClient", "IoTHubModuleClient"]

View File

@@ -0,0 +1,583 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/aio/async_clients.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 32314 bytes
"""This module contains user-facing asynchronous clients for the
Azure IoTHub Device SDK for Python.
"""
import logging, asyncio, deprecation
from azure.iot.device.common import async_adapter
from azure.iot.device.iothub.abstract_clients import AbstractIoTHubClient, AbstractIoTHubDeviceClient, AbstractIoTHubModuleClient
from azure.iot.device.iothub.models import Message
from azure.iot.device.iothub.pipeline import constant
from azure.iot.device.iothub.pipeline import exceptions as pipeline_exceptions
from azure.iot.device import exceptions
from azure.iot.device.iothub.inbox_manager import InboxManager
from .async_inbox import AsyncClientInbox
from . import async_handler_manager, loop_management
from azure.iot.device import constant as device_constant
logger = logging.getLogger(__name__)
async def handle_result(callback):
try:
return await callback.completion()
except pipeline_exceptions.ConnectionDroppedError as e:
try:
raise exceptions.ConnectionDroppedError(message="Lost connection to IoTHub", cause=e)
finally:
e = None
del e
except pipeline_exceptions.ConnectionFailedError as e:
try:
raise exceptions.ConnectionFailedError(message="Could not connect to IoTHub", cause=e)
finally:
e = None
del e
except pipeline_exceptions.UnauthorizedError as e:
try:
raise exceptions.CredentialError(message="Credentials invalid, could not connect", cause=e)
finally:
e = None
del e
except pipeline_exceptions.ProtocolClientError as e:
try:
raise exceptions.ClientError(message="Error in the IoTHub client", cause=e)
finally:
e = None
del e
except pipeline_exceptions.TlsExchangeAuthError as e:
try:
raise exceptions.ClientError(message="Error in the IoTHub client due to TLS exchanges.",
cause=e)
finally:
e = None
del e
except pipeline_exceptions.ProtocolProxyError as e:
try:
raise exceptions.ClientError(message="Error in the IoTHub client raised due to proxy connections.",
cause=e)
finally:
e = None
del e
except Exception as e:
try:
raise exceptions.ClientError(message="Unexpected failure", cause=e)
finally:
e = None
del e
class GenericIoTHubClient(AbstractIoTHubClient):
__doc__ = "A super class representing a generic asynchronous client.\n This class needs to be extended for specific clients.\n "
def __init__(self, **kwargs):
"""Initializer for a generic asynchronous client.
This initializer should not be called directly.
Instead, use one of the 'create_from_' classmethods to instantiate
:param mqtt_pipeline: The MQTTPipeline used for the client
:type mqtt_pipeline: :class:`azure.iot.device.iothub.pipeline.MQTTPipeline`
:param http_pipeline: The HTTPPipeline used for the client
:type http_pipeline: :class:`azure.iot.device.iothub.pipeline.HTTPPipeline`
"""
(super().__init__)(**kwargs)
self._inbox_manager = InboxManager(inbox_type=AsyncClientInbox)
self._handler_manager = async_handler_manager.AsyncHandlerManager(self._inbox_manager)
self._mqtt_pipeline.on_connected = self._on_connected
self._mqtt_pipeline.on_disconnected = self._on_disconnected
self._mqtt_pipeline.on_method_request_received = self._inbox_manager.route_method_request
self._mqtt_pipeline.on_twin_patch_received = self._inbox_manager.route_twin_patch
async def _enable_feature(self, feature_name):
"""Enable an Azure IoT Hub feature
:param feature_name: The name of the feature to enable.
See azure.iot.device.common.pipeline.constant for possible values.
"""
logger.info("Enabling feature:" + feature_name + "...")
if not self._mqtt_pipeline.feature_enabled[feature_name]:
enable_feature_async = async_adapter.emulate_async(self._mqtt_pipeline.enable_feature)
callback = async_adapter.AwaitableCallback()
await enable_feature_async(feature_name, callback=callback)
await handle_result(callback)
logger.info("Successfully enabled feature:" + feature_name)
else:
logger.info("Feature ({}) already enabled - skipping".format(feature_name))
async def _disable_feature(self, feature_name):
"""Disable an Azure IoT Hub feature
:param feature_name: The name of the feature to enable.
See azure.iot.device.common.pipeline.constant for possible values.
"""
logger.info("Disabling feature: {}...".format(feature_name))
if self._mqtt_pipeline.feature_enabled[feature_name]:
disable_feature_async = async_adapter.emulate_async(self._mqtt_pipeline.disable_feature)
callback = async_adapter.AwaitableCallback()
await disable_feature_async(feature_name, callback=callback)
await handle_result(callback)
logger.info("Successfully disabled feature: {}".format(feature_name))
else:
logger.info("Feature ({}) already disabled - skipping".format(feature_name))
async def connect(self):
"""Connects the client to an Azure IoT Hub or Azure IoT Edge Hub instance.
The destination is chosen based on the credentials passed via the auth_provider parameter
that was provided when this object was initialized.
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
logger.info("Connecting to Hub...")
connect_async = async_adapter.emulate_async(self._mqtt_pipeline.connect)
callback = async_adapter.AwaitableCallback()
await connect_async(callback=callback)
await handle_result(callback)
logger.info("Successfully connected to Hub")
async def disconnect(self):
"""Disconnect the client from the Azure IoT Hub or Azure IoT Edge Hub instance.
It is recommended that you make sure to call this coroutine when you are completely done
with the your client instance.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
logger.info("Disconnecting from Hub...")
logger.debug("Executing initial disconnect")
disconnect_async = async_adapter.emulate_async(self._mqtt_pipeline.disconnect)
callback = async_adapter.AwaitableCallback()
await disconnect_async(callback=callback)
await handle_result(callback)
logger.debug("Successfully executed initial disconnect")
logger.debug("Stopping handlers...")
self._handler_manager.stop()
logger.debug("Successfully stopped handlers")
logger.debug("Executing secondary disconnect...")
disconnect_async = async_adapter.emulate_async(self._mqtt_pipeline.disconnect)
callback = async_adapter.AwaitableCallback()
await disconnect_async(callback=callback)
await handle_result(callback)
logger.debug("Successfully executed secondary disconnect")
logger.info("Successfully disconnected from Hub")
async def update_sastoken(self, sastoken):
"""
Update the client's SAS Token used for authentication, then reauthorizes the connection.
This API can only be used if the client was initially created with a SAS Token.
Note also that this API may return before the reauthorization/reconnection is completed.
This means that some errors that may occur as part of the reconnection could occur in the
background, and will not be raised by this method.
:param str sastoken: The new SAS Token string for the client to use
:raises: :class:`azure.iot.device.exceptions.ClientError` if the client was not initially
created with a SAS token.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
:raises: ValueError if the sastoken parameter is invalid
"""
self._replace_user_supplied_sastoken(sastoken)
logger.info("Reauthorizing connection with Hub...")
reauth_connection_async = async_adapter.emulate_async(self._mqtt_pipeline.reauthorize_connection)
callback = async_adapter.AwaitableCallback()
await reauth_connection_async(callback=callback)
await handle_result(callback)
logger.info("Successfully reauthorized connection to Hub")
async def send_message(self, message):
"""Sends a message to the default events endpoint on the Azure IoT Hub or Azure IoT Edge Hub instance.
If the connection to the service has not previously been opened by a call to connect, this
function will open the connection before sending the event.
:param message: The actual message to send. Anything passed that is not an instance of the
Message class will be converted to Message object.
:type message: :class:`azure.iot.device.Message` or str
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
:raises: ValueError if the message fails size validation.
"""
if not isinstance(message, Message):
message = Message(message)
if message.get_size() > device_constant.TELEMETRY_MESSAGE_SIZE_LIMIT:
raise ValueError("Size of telemetry message can not exceed 256 KB.")
logger.info("Sending message to Hub...")
send_message_async = async_adapter.emulate_async(self._mqtt_pipeline.send_message)
callback = async_adapter.AwaitableCallback()
await send_message_async(message, callback=callback)
await handle_result(callback)
logger.info("Successfully sent message to Hub")
@deprecation.deprecated(deprecated_in="2.3.0",
current_version=(device_constant.VERSION),
details="We recommend that you use the .on_method_request_received property to set a handler instead")
async def receive_method_request(self, method_name=None):
"""Receive a method request via the Azure IoT Hub or Azure IoT Edge Hub.
If no method request is yet available, will wait until it is available.
:param str method_name: Optionally provide the name of the method to receive requests for.
If this parameter is not given, all methods not already being specifically targeted by
a different call to receive_method will be received.
:returns: MethodRequest object representing the received method request.
:rtype: :class:`azure.iot.device.MethodRequest`
"""
self._check_receive_mode_is_api()
if not self._mqtt_pipeline.feature_enabled[constant.METHODS]:
await self._enable_feature(constant.METHODS)
method_inbox = self._inbox_manager.get_method_request_inbox(method_name)
logger.info("Waiting for method request...")
method_request = await method_inbox.get()
logger.info("Received method request")
return method_request
async def send_method_response(self, method_response):
"""Send a response to a method request via the Azure IoT Hub or Azure IoT Edge Hub.
If the connection to the service has not previously been opened by a call to connect, this
function will open the connection before sending the event.
:param method_response: The MethodResponse to send
:type method_response: :class:`azure.iot.device.MethodResponse`
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
logger.info("Sending method response to Hub...")
send_method_response_async = async_adapter.emulate_async(self._mqtt_pipeline.send_method_response)
callback = async_adapter.AwaitableCallback()
await send_method_response_async(method_response, callback=callback)
await handle_result(callback)
logger.info("Successfully sent method response to Hub")
async def get_twin(self):
"""
Gets the device or module twin from the Azure IoT Hub or Azure IoT Edge Hub service.
:returns: Complete Twin as a JSON dict
:rtype: dict
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
logger.info("Getting twin")
if not self._mqtt_pipeline.feature_enabled[constant.TWIN]:
await self._enable_feature(constant.TWIN)
get_twin_async = async_adapter.emulate_async(self._mqtt_pipeline.get_twin)
callback = async_adapter.AwaitableCallback(return_arg_name="twin")
await get_twin_async(callback=callback)
twin = await handle_result(callback)
logger.info("Successfully retrieved twin")
return twin
async def patch_twin_reported_properties(self, reported_properties_patch):
"""
Update reported properties with the Azure IoT Hub or Azure IoT Edge Hub service.
If the service returns an error on the patch operation, this function will raise the
appropriate error.
:param reported_properties_patch: Twin Reported Properties patch as a JSON dict
:type reported_properties_patch: dict
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
logger.info("Patching twin reported properties")
if not self._mqtt_pipeline.feature_enabled[constant.TWIN]:
await self._enable_feature(constant.TWIN)
patch_twin_async = async_adapter.emulate_async(self._mqtt_pipeline.patch_twin_reported_properties)
callback = async_adapter.AwaitableCallback()
await patch_twin_async(patch=reported_properties_patch, callback=callback)
await handle_result(callback)
logger.info("Successfully sent twin patch")
@deprecation.deprecated(deprecated_in="2.3.0",
current_version=(device_constant.VERSION),
details="We recommend that you use the .on_twin_desired_properties_patch_received property to set a handler instead")
async def receive_twin_desired_properties_patch(self):
"""
Receive a desired property patch via the Azure IoT Hub or Azure IoT Edge Hub.
If no method request is yet available, will wait until it is available.
:returns: Twin Desired Properties patch as a JSON dict
:rtype: dict
"""
self._check_receive_mode_is_api()
if not self._mqtt_pipeline.feature_enabled[constant.TWIN_PATCHES]:
await self._enable_feature(constant.TWIN_PATCHES)
twin_patch_inbox = self._inbox_manager.get_twin_patch_inbox()
logger.info("Waiting for twin patches...")
patch = await twin_patch_inbox.get()
logger.info("twin patch received")
return patch
def _generic_handler_setter(self, handler_name, feature_name, new_handler):
self._check_receive_mode_is_handler()
setattr(self._handler_manager, handler_name, new_handler)
if new_handler is not None:
loop = self._mqtt_pipeline.feature_enabled[feature_name] or loop_management.get_client_internal_loop()
fut = asyncio.run_coroutine_threadsafe((self._enable_feature(feature_name)), loop=loop)
fut.result()
else:
if new_handler is None:
if self._mqtt_pipeline.feature_enabled[feature_name]:
loop = loop_management.get_client_internal_loop()
fut = asyncio.run_coroutine_threadsafe((self._disable_feature(feature_name)), loop=loop)
fut.result()
@property
def on_twin_desired_properties_patch_received(self):
"""The handler function or coroutine that will be called when a twin desired properties
patch is received.
The function or coroutine definition should take one positional argument (the twin patch
in the form of a JSON dictionary object)"""
return self._handler_manager.on_twin_desired_properties_patch_received
@on_twin_desired_properties_patch_received.setter
def on_twin_desired_properties_patch_received(self, value):
self._generic_handler_setter("on_twin_desired_properties_patch_received", constant.TWIN_PATCHES, value)
@property
def on_method_request_received(self):
"""The handler function or coroutine that will be called when a method request is received.
The function or coroutine definition should take one positional argument (the
:class:`azure.iot.device.MethodRequest` object)"""
return self._handler_manager.on_method_request_received
@on_method_request_received.setter
def on_method_request_received(self, value):
self._generic_handler_setter("on_method_request_received", constant.METHODS, value)
class IoTHubDeviceClient(GenericIoTHubClient, AbstractIoTHubDeviceClient):
__doc__ = "An asynchronous device client that connects to an Azure IoT Hub instance.\n\n Intended for usage with Python 3.5.3+\n "
def __init__(self, mqtt_pipeline, http_pipeline):
"""Initializer for a IoTHubDeviceClient.
This initializer should not be called directly.
Instead, use one of the 'create_from_' classmethods to instantiate
:param mqtt_pipeline: The pipeline used to connect to the IoTHub endpoint.
:type mqtt_pipeline: :class:`azure.iot.device.iothub.pipeline.MQTTPipeline`
"""
super().__init__(mqtt_pipeline=mqtt_pipeline, http_pipeline=http_pipeline)
self._mqtt_pipeline.on_c2d_message_received = self._inbox_manager.route_c2d_message
@deprecation.deprecated(deprecated_in="2.3.0",
current_version=(device_constant.VERSION),
details="We recommend that you use the .on_message_received property to set a handler instead")
async def receive_message(self):
"""Receive a message that has been sent from the Azure IoT Hub.
If no message is yet available, will wait until an item is available.
:returns: Message that was sent from the Azure IoT Hub.
:rtype: :class:`azure.iot.device.Message`
"""
self._check_receive_mode_is_api()
if not self._mqtt_pipeline.feature_enabled[constant.C2D_MSG]:
await self._enable_feature(constant.C2D_MSG)
c2d_inbox = self._inbox_manager.get_c2d_message_inbox()
logger.info("Waiting for message from Hub...")
message = await c2d_inbox.get()
logger.info("Message received")
return message
async def get_storage_info_for_blob(self, blob_name):
"""Sends a POST request over HTTP to an IoTHub endpoint that will return information for uploading via the Azure Storage Account linked to the IoTHub your device is connected to.
:param str blob_name: The name in string format of the blob that will be uploaded using the storage API. This name will be used to generate the proper credentials for Storage, and needs to match what will be used with the Azure Storage SDK to perform the blob upload.
:returns: A JSON-like (dictionary) object from IoT Hub that will contain relevant information including: correlationId, hostName, containerName, blobName, sasToken.
"""
get_storage_info_for_blob_async = async_adapter.emulate_async(self._http_pipeline.get_storage_info_for_blob)
callback = async_adapter.AwaitableCallback(return_arg_name="storage_info")
await get_storage_info_for_blob_async(blob_name=blob_name, callback=callback)
storage_info = await handle_result(callback)
logger.info("Successfully retrieved storage_info")
return storage_info
async def notify_blob_upload_status(self, correlation_id, is_success, status_code, status_description):
"""When the upload is complete, the device sends a POST request to the IoT Hub endpoint with information on the status of an upload to blob attempt. This is used by IoT Hub to notify listening clients.
:param str correlation_id: Provided by IoT Hub on get_storage_info_for_blob request.
:param bool is_success: A boolean that indicates whether the file was uploaded successfully.
:param int status_code: A numeric status code that is the status for the upload of the fiel to storage.
:param str status_description: A description that corresponds to the status_code.
"""
notify_blob_upload_status_async = async_adapter.emulate_async(self._http_pipeline.notify_blob_upload_status)
callback = async_adapter.AwaitableCallback()
await notify_blob_upload_status_async(correlation_id=correlation_id,
is_success=is_success,
status_code=status_code,
status_description=status_description,
callback=callback)
await handle_result(callback)
logger.info("Successfully notified blob upload status")
@property
def on_message_received(self):
"""The handler function or coroutine that will be called when a message is received.
The function or coroutine definition should take one positional argument (the
:class:`azure.iot.device.Message` object)"""
return self._handler_manager.on_message_received
@on_message_received.setter
def on_message_received(self, value):
self._generic_handler_setter("on_message_received", constant.C2D_MSG, value)
class IoTHubModuleClient(GenericIoTHubClient, AbstractIoTHubModuleClient):
__doc__ = "An asynchronous module client that connects to an Azure IoT Hub or Azure IoT Edge instance.\n\n Intended for usage with Python 3.5.3+\n "
def __init__(self, mqtt_pipeline, http_pipeline):
"""Intializer for a IoTHubModuleClient.
This initializer should not be called directly.
Instead, use one of the 'create_from_' classmethods to instantiate
:param mqtt_pipeline: The pipeline used to connect to the IoTHub endpoint.
:type mqtt_pipeline: :class:`azure.iot.device.iothub.pipeline.MQTTPipeline`
"""
super().__init__(mqtt_pipeline=mqtt_pipeline, http_pipeline=http_pipeline)
self._mqtt_pipeline.on_input_message_received = self._inbox_manager.route_input_message
async def send_message_to_output(self, message, output_name):
"""Sends an event/message to the given module output.
These are outgoing events and are meant to be "output events"
If the connection to the service has not previously been opened by a call to connect, this
function will open the connection before sending the event.
:param message: Message to send to the given output. Anything passed that is not an
instance of the Message class will be converted to Message object.
:type message: :class:`azure.iot.device.Message` or str
:param str output_name: Name of the output to send the event to.
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
:raises: ValueError if the message fails size validation.
"""
if not isinstance(message, Message):
message = Message(message)
if message.get_size() > device_constant.TELEMETRY_MESSAGE_SIZE_LIMIT:
raise ValueError("Size of message can not exceed 256 KB.")
message.output_name = output_name
logger.info("Sending message to output:" + output_name + "...")
send_output_message_async = async_adapter.emulate_async(self._mqtt_pipeline.send_output_message)
callback = async_adapter.AwaitableCallback()
await send_output_message_async(message, callback=callback)
await handle_result(callback)
logger.info("Successfully sent message to output: " + output_name)
@deprecation.deprecated(deprecated_in="2.3.0",
current_version=(device_constant.VERSION),
details="We recommend that you use the .on_message_received property to set a handler instead")
async def receive_message_on_input(self, input_name):
"""Receive an input message that has been sent from another Module to a specific input.
If no message is yet available, will wait until an item is available.
:param str input_name: The input name to receive a message on.
:returns: Message that was sent to the specified input.
:rtype: :class:`azure.iot.device.Message`
"""
self._check_receive_mode_is_api()
if not self._mqtt_pipeline.feature_enabled[constant.INPUT_MSG]:
await self._enable_feature(constant.INPUT_MSG)
inbox = self._inbox_manager.get_input_message_inbox(input_name)
logger.info("Waiting for input message on: " + input_name + "...")
message = await inbox.get()
logger.info("Input message received on: " + input_name)
return message
async def invoke_method(self, method_params, device_id, module_id=None):
"""Invoke a method from your client onto a device or module client, and receive the response to the method call.
:param dict method_params: Should contain a methodName (str), payload (str),
connectTimeoutInSeconds (int), responseTimeoutInSeconds (int).
:param str device_id: Device ID of the target device where the method will be invoked.
:param str module_id: Module ID of the target module where the method will be invoked. (Optional)
:returns: method_result should contain a status, and a payload
:rtype: dict
"""
logger.info("Invoking {} method on {}{}".format(method_params["methodName"], device_id, module_id))
invoke_method_async = async_adapter.emulate_async(self._http_pipeline.invoke_method)
callback = async_adapter.AwaitableCallback(return_arg_name="invoke_method_response")
await invoke_method_async(device_id, method_params, callback=callback, module_id=module_id)
method_response = await handle_result(callback)
logger.info("Successfully invoked method")
return method_response
@property
def on_message_received(self):
"""The handler function or coroutine that will be called when an input message is received.
The function definitionor coroutine should take one positional argument (the
:class:`azure.iot.device.Message` object)"""
return self._handler_manager.on_message_received
@on_message_received.setter
def on_message_received(self, value):
self._generic_handler_setter("on_message_received", constant.INPUT_MSG, value)

View File

@@ -0,0 +1,115 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/aio/async_handler_manager.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 8569 bytes
""" This module contains the manager for handler methods used by the aio clients"""
import asyncio, logging, inspect, threading, concurrent.futures
from azure.iot.device.common import asyncio_compat, handle_exceptions
from azure.iot.device.iothub.sync_handler_manager import AbstractHandlerManager, HandlerManagerException, HandlerRunnerKillerSentinel
from . import loop_management
logger = logging.getLogger(__name__)
class AsyncHandlerManager(AbstractHandlerManager):
__doc__ = "Handler manager for use with asynchronous clients"
async def _inbox_handler_runner(self, inbox, handler_name):
"""Run infinite loop that waits for an inbox to receive an object from it, then calls
the handler with that object
"""
logger.debug("HANDLER RUNNER ({}): Starting runner".format(handler_name))
def _handler_callback(future):
try:
e = future.exception(timeout=0)
except Exception as raised_e:
try:
new_err = HandlerManagerException(message=("HANDLER ({}): Unable to retrieve exception data from incomplete invocation".format(handler_name)),
cause=raised_e)
handle_exceptions.handle_background_exception(new_err)
finally:
raised_e = None
del raised_e
else:
if e:
new_err = HandlerManagerException(message=("HANDLER ({}): Error during invocation".format(handler_name)),
cause=e)
handle_exceptions.handle_background_exception(new_err)
else:
logger.debug("HANDLER ({}): Successfully completed invocation".format(handler_name))
tpe = concurrent.futures.ThreadPoolExecutor(max_workers=4)
while True:
handler_arg = await inbox.get()
if isinstance(handler_arg, HandlerRunnerKillerSentinel):
logger.debug("HANDLER RUNNER ({}): HandlerRunnerKillerSentinel found in inbox. Exiting.".format(handler_name))
tpe.shutdown()
break
handler = getattr(self, handler_name)
logger.debug("HANDLER RUNNER ({}): Invoking handler".format(handler_name))
if inspect.iscoroutinefunction(handler):
handler_loop = loop_management.get_client_handler_loop()
fut = asyncio.run_coroutine_threadsafe(handler(handler_arg), handler_loop)
fut.add_done_callback(_handler_callback)
else:
fut = tpe.submit(handler, handler_arg)
fut.add_done_callback(_handler_callback)
async def _event_handler_runner(self, handler_name):
logger.error("._event_handler_runner() not yet implemented")
def _start_handler_runner(self, handler_name):
"""Create, and store a task for running a handler
"""
if self._handler_runners[handler_name] is not None:
raise HandlerManagerException("Cannot create task for handler runner: {}. Task already exists".format(handler_name))
else:
inbox = self._get_inbox_for_handler(handler_name)
if inbox:
coro = self._inbox_handler_runner(inbox, handler_name)
else:
coro = self._event_handler_runner(handler_name)
runner_loop = loop_management.get_client_handler_runner_loop()
future = asyncio.run_coroutine_threadsafe(coro, runner_loop)
def _handler_runner_callback(completed_future):
try:
e = completed_future.exception(timeout=0)
except Exception as raised_e:
try:
new_err = HandlerManagerException(message=("HANDLER RUNNER ({}): Unable to retrieve exception data from incomplete task".format(handler_name)),
cause=raised_e)
handle_exceptions.handle_background_exception(new_err)
finally:
raised_e = None
del raised_e
else:
if e:
new_err = HandlerManagerException(message=("HANDLER RUNNER ({}): Unexpected error during task".format(handler_name)),
cause=e)
handle_exceptions.handle_background_exception(new_err)
self._handler_runners[handler_name] = None
self._start_handler_runner(handler_name)
else:
logger.debug("HANDLER RUNNER ({}): Task successfully completed without exception".format(handler_name))
future.add_done_callback(_handler_runner_callback)
self._handler_runners[handler_name] = future
logger.debug("Future for Handler Runner ({}) was stored".format(handler_name))
def _stop_handler_runner(self, handler_name):
"""Stop and remove a handler runner task.
All pending items in the corresponding inbox will be handled by the handler before stoppage.
"""
logger.debug("Adding HandlerRunnerKillerSentinel to inbox corresponding to {} handler runner".format(handler_name))
inbox = self._get_inbox_for_handler(handler_name)
inbox._put(HandlerRunnerKillerSentinel())
logger.debug("Waiting for {} handler runner to exit...".format(handler_name))
future = self._handler_runners[handler_name]
future.result()
self._handler_runners[handler_name] = None
logger.debug("Handler runner for {} has been stopped".format(handler_name))

View File

@@ -0,0 +1,69 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/aio/async_inbox.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3526 bytes
"""This module contains an Inbox class for use with an asynchronous client"""
import asyncio, threading, janus
from azure.iot.device.iothub.sync_inbox import AbstractInbox
from . import loop_management
class AsyncClientInbox(AbstractInbox):
__doc__ = "Holds generic incoming data for an asynchronous client.\n\n All methods implemented in this class are threadsafe.\n "
def __init__(self):
"""Initializer for AsyncClientInbox."""
async def make_queue():
return janus.Queue()
loop = loop_management.get_client_internal_loop()
fut = asyncio.run_coroutine_threadsafe(make_queue(), loop)
self._queue = fut.result()
def __contains__(self, item):
"""Return True if item is in Inbox, False otherwise"""
with self._queue._sync_mutex:
return item in self._queue._queue
def _put(self, item):
"""Put an item into the Inbox.
Block if necessary until a free slot is available.
Only to be used by the InboxManager.
:param item: The item to be put in the Inbox.
"""
self._queue.sync_q.put(item)
async def get(self):
"""Remove and return an item from the Inbox.
If Inbox is empty, wait until an item is available.
:returns: An item from the Inbox.
"""
loop = loop_management.get_client_internal_loop()
fut = asyncio.run_coroutine_threadsafe(self._queue.async_q.get(), loop)
return await asyncio.wrap_future(fut)
def empty(self):
"""Returns True if the inbox is empty, False otherwise
Note that there is a race condition here, and this may not be accurate. This is because
the .empty() operation on a janus queue is not threadsafe.
:returns: Boolean indicating if the inbox is empty
"""
return self._queue.async_q.empty()
def clear(self):
"""Remove all items from the inbox.
"""
while True:
try:
self._queue.sync_q.get_nowait()
except janus.SyncQueueEmpty:
break

View File

@@ -0,0 +1,57 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/aio/loop_management.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 2400 bytes
""" This module contains functions of managing event loops for the IoTHub client
"""
import asyncio, threading, logging
from azure.iot.device.common import asyncio_compat
logger = logging.getLogger(__name__)
loops = {'CLIENT_HANDLER_LOOP':None,
'CLIENT_INTERNAL_LOOP':None,
'CLIENT_HANDLER_RUNNER_LOOP':None}
def _cleanup():
"""Clear all running loops and end respective threads.
ONLY FOR TESTING USAGE
By using this function, you can wipe all global loops.
DO NOT USE THIS IN PRODUCTION CODE
"""
for loop_name, loop in loops.items():
if loop is not None:
logger.debug("Stopping event loop - {}".format(loop_name))
loop.call_soon_threadsafe(loop.stop)
loops[loop_name] = None
def _make_new_loop(loop_name):
logger.debug("Creating new event loop - {}".format(loop_name))
new_loop = asyncio.new_event_loop()
loop_thread = threading.Thread(target=(new_loop.run_forever))
loop_thread.daemon = True
loop_thread.start()
loops[loop_name] = new_loop
def get_client_internal_loop():
"""Return the loop for internal client operations"""
if loops["CLIENT_INTERNAL_LOOP"] is None:
_make_new_loop("CLIENT_INTERNAL_LOOP")
return loops["CLIENT_INTERNAL_LOOP"]
def get_client_handler_runner_loop():
"""Return the loop for handler runners"""
if loops["CLIENT_HANDLER_RUNNER_LOOP"] is None:
_make_new_loop("CLIENT_HANDLER_RUNNER_LOOP")
return loops["CLIENT_HANDLER_RUNNER_LOOP"]
def get_client_handler_loop():
"""Return the loop for invoking user-provided handlers on the client"""
if loops["CLIENT_HANDLER_LOOP"] is None:
_make_new_loop("CLIENT_HANDLER_LOOP")
return loops["CLIENT_HANDLER_LOOP"]

View File

@@ -0,0 +1,164 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/edge_hsm.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 6242 bytes
import logging, json, base64, requests, requests_unixsocket
from six.moves import urllib, http_client
from azure.iot.device.common.chainable_exception import ChainableException
from azure.iot.device.common.auth.signing_mechanism import SigningMechanism
from azure.iot.device import user_agent
requests_unixsocket.monkeypatch()
logger = logging.getLogger(__name__)
class IoTEdgeError(ChainableException):
pass
class IoTEdgeHsm(SigningMechanism):
__doc__ = "\n Constructor for instantiating a iot hsm object. This is an object that\n communicates with the Azure IoT Edge HSM in order to get connection credentials\n for an Azure IoT Edge module. The credentials that this object return come in\n two forms:\n\n 1. The trust bundle, which is a certificate that can be used as a trusted cert\n to authenticate the SSL connection between the IoE Edge module and IoT Edge\n 2. A signing function, which can be used to create the sig field for a\n SharedAccessSignature string which can be used to authenticate with Iot Edge\n "
def __init__(self, module_id, generation_id, workload_uri, api_version):
"""
Constructor for instantiating a Azure IoT Edge HSM object
:param str module_id: The module id
:param str api_version: The API version
:param str generation_id: The module generation id
:param str workload_uri: The workload uri
"""
self.module_id = urllib.parse.quote(module_id, safe="")
self.api_version = api_version
self.generation_id = generation_id
self.workload_uri = _format_socket_uri(workload_uri)
def get_certificate(self):
"""
Return the server verification certificate from the trust bundle that can be used to
validate the server-side SSL TLS connection that we use to talk to Edge
:return: The server verification certificate to use for connections to the Azure IoT Edge
instance, as a PEM certificate in string form.
:raises: IoTEdgeError if unable to retrieve the certificate.
"""
r = requests.get((self.workload_uri + "trust-bundle"),
params={"api-version": (self.api_version)},
headers={"User-Agent": (urllib.parse.quote_plus(user_agent.get_iothub_user_agent()))})
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
try:
raise IoTEdgeError(message="Unable to get trust bundle from Edge", cause=e)
finally:
e = None
del e
try:
bundle = r.json()
except ValueError as e:
try:
raise IoTEdgeError(message="Unable to decode trust bundle", cause=e)
finally:
e = None
del e
try:
cert = bundle["certificate"]
except KeyError as e:
try:
raise IoTEdgeError(message="No certificate in trust bundle", cause=e)
finally:
e = None
del e
return cert
def sign(self, data_str):
"""
Use the IoTEdge HSM to sign a piece of string data. The caller should then insert the
returned value (the signature) into the 'sig' field of a SharedAccessSignature string.
:param str data_str: The data string to sign
:return: The signature, as a URI-encoded and base64-encoded value that is ready to
directly insert into the SharedAccessSignature string.
:raises: IoTEdgeError if unable to sign the data.
"""
encoded_data_str = base64.b64encode(data_str.encode("utf-8")).decode()
path = "{workload_uri}modules/{module_id}/genid/{gen_id}/sign".format(workload_uri=(self.workload_uri),
module_id=(self.module_id),
gen_id=(self.generation_id))
sign_request = {'keyId':"primary",
'algo':"HMACSHA256", 'data':encoded_data_str}
r = requests.post(url=path,
params={"api-version": (self.api_version)},
headers={"User-Agent": (urllib.parse.quote((user_agent.get_iothub_user_agent()), safe=""))},
data=(json.dumps(sign_request)))
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
try:
raise IoTEdgeError(message="Unable to sign data", cause=e)
finally:
e = None
del e
try:
sign_response = r.json()
except ValueError as e:
try:
raise IoTEdgeError(message="Unable to decode signed data", cause=e)
finally:
e = None
del e
try:
signed_data_str = sign_response["digest"]
except KeyError as e:
try:
raise IoTEdgeError(message="No signed data received", cause=e)
finally:
e = None
del e
return signed_data_str
def _format_socket_uri(old_uri):
"""
This function takes a socket URI in one form and converts it into another form.
The source form is based on what we receive inside the IOTEDGE_WORKLOADURI
environment variable, and it looks like this:
"unix:///var/run/iotedge/workload.sock"
The destination form is based on what the requests_unixsocket library expects
and it looks like this:
"http+unix://%2Fvar%2Frun%2Fiotedge%2Fworkload.sock/"
The function changes the prefix, uri-encodes the path, and adds a slash
at the end.
If the socket URI does not start with unix:// this function only adds
a slash at the end.
:param old_uri: The URI in IOTEDGE_WORKLOADURI form
:return: The URI in requests_unixsocket form
"""
old_prefix = "unix://"
new_prefix = "http+unix://"
if old_uri.startswith(old_prefix):
stripped_uri = old_uri[len(old_prefix)[:None]]
if stripped_uri.endswith("/"):
stripped_uri = stripped_uri[None[:-1]]
new_uri = new_prefix + urllib.parse.quote(stripped_uri, safe="")
else:
new_uri = old_uri
if not new_uri.endswith("/"):
new_uri += "/"
return new_uri

View File

@@ -0,0 +1,163 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/inbox_manager.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 7035 bytes
"""This module contains a manager for inboxes."""
import logging
logger = logging.getLogger(__name__)
class InboxManager(object):
__doc__ = "Manages the various Inboxes for a client.\n\n :ivar c2d_message_inbox: The C2D message Inbox.\n :ivar input_message_inboxes: A dictionary mapping input names to input message Inboxes.\n :ivar generic_method_request_inbox: The generic method request Inbox.\n :ivar named_method_request_inboxes: A dictionary mapping method names to method request Inboxes.\n "
def __init__(self, inbox_type):
"""Initializer for the InboxManager.
:param inbox_type: An Inbox class that the manager will use to create Inboxes.
"""
self._create_inbox = inbox_type
self.unified_message_inbox = self._create_inbox()
self.generic_method_request_inbox = self._create_inbox()
self.twin_patch_inbox = self._create_inbox()
self.c2d_message_inbox = self._create_inbox()
self.input_message_inboxes = {}
self.named_method_request_inboxes = {}
self.use_unified_msg_mode = False
def get_unified_message_inbox(self):
"""Retrieve the Inbox for all messages (C2D and Input)
"""
return self.unified_message_inbox
def get_input_message_inbox(self, input_name):
"""Retrieve the input message Inbox for a given input.
If the Inbox does not already exist, it will be created.
:param str input_name: The name of the input for which the associated Inbox is desired.
:returns: An Inbox for input messages on the selected input.
"""
try:
inbox = self.input_message_inboxes[input_name]
except KeyError:
inbox = self._create_inbox()
self.input_message_inboxes[input_name] = inbox
return inbox
def get_c2d_message_inbox(self):
"""Retrieve the Inbox for C2D messages.
:returns: An Inbox for C2D messages.
"""
return self.c2d_message_inbox
def get_method_request_inbox(self, method_name=None):
"""Retrieve the method request Inbox for a given method name if provided,
or for generic method requests if not.
If the Inbox does not already exist, it will be created.
:param str method_name: Optional. The name of the method for which the
associated Inbox is desired.
:returns: An Inbox for method requests.
"""
if method_name:
try:
inbox = self.named_method_request_inboxes[method_name]
except KeyError:
inbox = self._create_inbox()
self.named_method_request_inboxes[method_name] = inbox
else:
inbox = self.generic_method_request_inbox
return inbox
def get_twin_patch_inbox(self):
"""Retrieve the Inbox for twin patches that arrive from the service
:returns: An Inbox for twin patches
"""
return self.twin_patch_inbox
def clear_all_method_requests(self):
"""Delete all method requests currently in inboxes.
"""
self.generic_method_request_inbox.clear()
for inbox in self.named_method_request_inboxes.values():
inbox.clear()
def route_input_message(self, incoming_message):
"""Route an incoming input message
In unified message mode, route to the unified message inbox
In standard mode, route to the corresponding input message Inbox. If the input
is unknown, the message will be dropped.
:param incoming_message: The message to be routed.
:returns: Boolean indicating if message was successfuly routed or not.
"""
input_name = incoming_message.input_name
if self.use_unified_msg_mode:
self.unified_message_inbox._put(incoming_message)
return True
try:
inbox = self.input_message_inboxes[input_name]
except KeyError:
logger.warning("No input message inbox for {} - dropping message".format(input_name))
return False
else:
inbox._put(incoming_message)
logger.debug("Input message sent to {} inbox".format(input_name))
return True
def route_c2d_message(self, incoming_message):
"""Route an incoming C2D message
In unified message mode, route to the unified message inbox.
In standard mode, route to to the C2D message Inbox.
:param incoming_message: The message to be routed.
:returns: Boolean indicating if message was successfully routed or not.
"""
if self.use_unified_msg_mode:
self.unified_message_inbox._put(incoming_message)
return True
self.c2d_message_inbox._put(incoming_message)
logger.debug("C2D message sent to inbox")
return True
def route_method_request(self, incoming_method_request):
"""Route an incoming method request to the correct method request Inbox.
If the method name is recognized, it will be routed to a method-specific Inbox.
Otherwise, it will be routed to the generic method request Inbox.
:param incoming_method_request: The method request to be routed.
:returns: Boolean indicating if the method request was successfully routed or not.
"""
try:
inbox = self.named_method_request_inboxes[incoming_method_request.name]
except KeyError:
inbox = self.generic_method_request_inbox
inbox._put(incoming_method_request)
return True
def route_twin_patch(self, incoming_patch):
"""Route an incoming twin patch to the twin patch Inbox.
:param incoming_patch: The patch to be routed.
:returns: Boolean indicating if patch was successfully routed or not.
"""
self.twin_patch_inbox._put(incoming_patch)
logger.debug("twin patch message sent to inbox")
return True

View File

@@ -0,0 +1,13 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/models/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 202 bytes
"""Azure IoT Hub Device SDK Models
This package provides object models for use within the Azure IoT Hub Device SDK.
"""
from .message import Message
from .methods import MethodRequest, MethodResponse

View File

@@ -0,0 +1,58 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/models/message.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3686 bytes
"""This module contains a class representing messages that are sent or received.
"""
from azure.iot.device import constant
import sys
class Message(object):
__doc__ = "Represents a message to or from IoTHub\n\n :ivar data: The data that constitutes the payload\n :ivar custom_properties: Dictionary of custom message properties. The keys and values of these properties will always be string.\n :ivar message id: A user-settable identifier for the message used for request-reply patterns. Format: A case-sensitive string (up to 128 characters long) of ASCII 7-bit alphanumeric characters + {'-', ':', '.', '+', '%', '_', '#', '*', '?', '!', '(', ')', ',', '=', '@', ';', '$', '''}\n :ivar expiry_time_utc: Date and time of message expiration in UTC format\n :ivar correlation_id: A property in a response message that typically contains the message_id of the request, in request-reply patterns\n :ivar user_id: An ID to specify the origin of messages\n :ivar content_encoding: Content encoding of the message data. Can be 'utf-8', 'utf-16' or 'utf-32'\n :ivar content_type: Content type property used to route messages with the message-body. Can be 'application/json'\n :ivar output_name: Name of the output that the message is being sent to.\n :ivar input_name: Name of the input that the message was received on.\n "
def __init__(self, data, message_id=None, content_encoding=None, content_type=None, output_name=None):
"""
Initializer for Message
:param data: The data that constitutes the payload
:param str message_id: A user-settable identifier for the message used for request-reply patterns. Format: A case-sensitive string (up to 128 characters long) of ASCII 7-bit alphanumeric characters + {'-', ':', '.', '+', '%', '_', '#', '*', '?', '!', '(', ')', ',', '=', '@', ';', '$', '''}
:param str content_encoding: Content encoding of the message data. Other values can be utf-16' or 'utf-32'
:param str content_type: Content type property used to routes with the message body.
:param str output_name: Name of the output that the is being sent to.
"""
self.data = data
self.custom_properties = {}
self.message_id = message_id
self.expiry_time_utc = None
self.correlation_id = None
self.user_id = None
self.content_encoding = content_encoding
self.content_type = content_type
self.output_name = output_name
self.input_name = None
self._iothub_interface_id = None
@property
def iothub_interface_id(self):
return self._iothub_interface_id
def set_as_security_message(self):
"""
Set the message as a security message.
This is a provisional API. Functionality not yet guaranteed.
"""
self._iothub_interface_id = constant.SECURITY_MESSAGE_INTERFACE_ID
def __str__(self):
return str(self.data)
def get_size(self):
total = 0
total = total + sum((sys.getsizeof(v) for v in self.__dict__.values() if v is not None if v is not self.custom_properties))
if self.custom_properties:
total = total + sum((sys.getsizeof(v) for v in self.custom_properties.values() if v is not None))
return total

View File

@@ -0,0 +1,62 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/models/methods.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 2728 bytes
"""This module contains classes related to direct method invocations.
"""
class MethodRequest(object):
__doc__ = "Represents a request to invoke a direct method.\n\n :ivar str request_id: The request id.\n :ivar str name: The name of the method to be invoked.\n :ivar dict payload: The JSON payload being sent with the request.\n "
def __init__(self, request_id, name, payload):
"""Initializer for a MethodRequest.
:param str request_id: The request id.
:param str name: The name of the method to be invoked
:param dict payload: The JSON payload being sent with the request.
"""
self._request_id = request_id
self._name = name
self._payload = payload
@property
def request_id(self):
return self._request_id
@property
def name(self):
return self._name
@property
def payload(self):
return self._payload
class MethodResponse(object):
__doc__ = "Represents a response to a direct method.\n\n :ivar str request_id: The request id of the MethodRequest being responded to.\n :ivar int status: The status of the execution of the MethodRequest.\n :ivar payload: The JSON payload to be sent with the response.\n :type payload: dict, str, int, float, bool, or None (JSON compatible values)\n "
def __init__(self, request_id, status, payload=None):
"""Initializer for MethodResponse.
:param str request_id: The request id of the MethodRequest being responded to.
:param int status: The status of the execution of the MethodRequest.
:param payload: The JSON payload to be sent with the response. (OPTIONAL)
:type payload: dict, str, int, float, bool, or None (JSON compatible values)
"""
self.request_id = request_id
self.status = status
self.payload = payload
@classmethod
def create_from_method_request(cls, method_request, status, payload=None):
"""Factory method for creating a MethodResponse from a MethodRequest.
:param method_request: The MethodRequest object to respond to.
:type method_request: MethodRequest.
:param int status: The status of the execution of the MethodRequest.
:type payload: dict, str, int, float, bool, or None (JSON compatible values)
"""
return cls(request_id=(method_request.request_id), status=status, payload=payload)

View File

@@ -0,0 +1,18 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/models/twin.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1329 bytes
"""This module contains classes related to device twin and module twin functionality
"""
class Twin(object):
__doc__ = "Represents a device twin or module twin\n\n :ivar desired_properties: The desired properties for the Twin. These are properties\n which are sent _to_ the device or module to indicate the _desired_ state of the device\n or module\n :type desired_properties: dict, str, int, float, bool, or None (JSON compatible values)\n :ivar reported_properties: The reported properties for the Twin. These are properties\n which are sent _from_ the device or module to indicate the _actual_ state of the device.\n :type reported_properties: dict, str, int, float, bool, or None (JSON compatible values)\n "
def __init__(self):
"""Initializer for a Twin object
"""
self.desiried_properties = None
self.reported_properties = None

View File

@@ -0,0 +1,16 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 270 bytes
"""Azure IoT Hub Device SDK Pipeline
This package provides a protocol pipeline for use with the Azure IoT Hub Device SDK.
INTERNAL USAGE ONLY
"""
from .mqtt_pipeline import MQTTPipeline
from .http_pipeline import HTTPPipeline
from .config import IoTHubPipelineConfig

View File

@@ -0,0 +1,30 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/config.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1962 bytes
import logging
from azure.iot.device.common.pipeline.config import BasePipelineConfig
logger = logging.getLogger(__name__)
class IoTHubPipelineConfig(BasePipelineConfig):
__doc__ = "A class for storing all configurations/options for IoTHub clients in the Azure IoT Python Device Client Library.\n "
def __init__(self, hostname, device_id, module_id=None, product_info='', **kwargs):
"""Initializer for IoTHubPipelineConfig which passes all unrecognized keyword-args down to BasePipelineConfig
to be evaluated. This stacked options setting is to allow for unique configuration options to exist between the
multiple clients, while maintaining a base configuration class with shared config options.
:param str hostname: The hostname of the IoTHub to connect to
:param str device_id: The device identity being used with the IoTHub
:param str module_id: The module identity being used with the IoTHub
:param str product_info: A custom identification string for the type of device connecting to Azure IoT Hub.
"""
(super(IoTHubPipelineConfig, self).__init__)(hostname=hostname, **kwargs)
self.device_id = device_id
self.module_id = module_id
self.product_info = product_info
self.blob_upload = False
self.method_invoke = False

View File

@@ -0,0 +1,14 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/constant.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 498 bytes
"""This module contains constants realted to the pipeline package.
"""
C2D_MSG = "c2d"
INPUT_MSG = "input"
METHODS = "methods"
TWIN = "twin"
TWIN_PATCHES = "twin_patches"

View File

@@ -0,0 +1,10 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/exceptions.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1125 bytes
"""This module defines an exception surface, exposed as part of the pipeline API"""
from azure.iot.device.common.pipeline.pipeline_exceptions import *
from azure.iot.device.common.transport_exceptions import ConnectionFailedError, ConnectionDroppedError, UnauthorizedError, ProtocolClientError, TlsExchangeAuthError, ProtocolProxyError

View File

@@ -0,0 +1,61 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/http_map_error.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3301 bytes
def translate_error(sc, reason):
"""
Codes_SRS_NODE_IOTHUB_REST_API_CLIENT_16_012: [Any error object returned by translate_error shall inherit from the generic Error Javascript object and have 3 properties:
- response shall contain the IncomingMessage object returned by the HTTP layer.
- reponseBody shall contain the content of the HTTP response.
- message shall contain a human-readable error message.]
"""
message = "Error: {}".format(reason)
if sc == 400:
error = "ArgumentError({})".format(message)
else:
if sc == 401:
error = "UnauthorizedError({})".format(message)
else:
if sc == 403:
error = "TooManyDevicesError({})".format(message)
else:
if sc == 404:
if reason == "Device Not Found":
error = "DeviceNotFoundError({})".format(message)
else:
if reason == "IoTHub Not Found":
error = "IotHubNotFoundError({})".format(message)
else:
error = "Error('Not found')"
else:
if sc == 408:
error = "DeviceTimeoutError({})".format(message)
else:
if sc == 409:
error = "DeviceAlreadyExistsError({})".format(message)
else:
if sc == 412:
error = "InvalidEtagError({})".format(message)
else:
if sc == 429:
error = "ThrottlingError({})".format(message)
else:
if sc == 500:
error = "InternalServerError({})".format(message)
else:
if sc == 502:
error = "BadDeviceResponseError({})".format(message)
else:
if sc == 503:
error = "ServiceUnavailableError({})".format(message)
else:
if sc == 504:
error = "GatewayTimeoutError({})".format(message)
else:
error = "Error({})".format(message)
return error

View File

@@ -0,0 +1,40 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/http_path_iothub.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 1833 bytes
import logging
import six.moves.urllib as urllib
logger = logging.getLogger(__name__)
def get_method_invoke_path(device_id, module_id=None):
"""
:return: The path for invoking methods from one module to a device or module. It is of the format
twins/uri_encode($device_id)/modules/uri_encode($module_id)/methods
"""
if module_id:
return "twins/{device_id}/modules/{module_id}/methods".format(device_id=(urllib.parse.quote_plus(device_id)),
module_id=(urllib.parse.quote_plus(module_id)))
return "twins/{device_id}/methods".format(device_id=(urllib.parse.quote_plus(device_id)))
def get_storage_info_for_blob_path(device_id):
"""
This does not take a module_id since get_storage_info_for_blob_path should only ever be invoked on device clients.
:return: The path for getting the storage sdk credential information from IoT Hub. It is of the format
devices/uri_encode($device_id)/files
"""
return "devices/{}/files".format(urllib.parse.quote_plus(device_id))
def get_notify_blob_upload_status_path(device_id):
"""
This does not take a module_id since get_notify_blob_upload_status_path should only ever be invoked on device clients.
:return: The path for getting the storage sdk credential information from IoT Hub. It is of the format
devices/uri_encode($device_id)/files/notifications
"""
return "devices/{}/files/notifications".format(urllib.parse.quote_plus(device_id))

View File

@@ -0,0 +1,115 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/http_pipeline.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 8011 bytes
import logging, sys
from azure.iot.device.common.evented_callback import EventedCallback
from azure.iot.device.common.pipeline import pipeline_stages_base, pipeline_ops_base, pipeline_stages_http
from azure.iot.device.iothub.pipeline import exceptions as pipeline_exceptions
from . import constant, pipeline_stages_iothub, pipeline_ops_iothub, pipeline_ops_iothub_http, pipeline_stages_iothub_http
logger = logging.getLogger(__name__)
class HTTPPipeline(object):
__doc__ = "Pipeline to communicate with Edge.\n Uses HTTP.\n "
def __init__(self, pipeline_configuration):
"""
Constructor for instantiating a pipeline adapter object.
:param auth_provider: The authentication provider
:param pipeline_configuration: The configuration generated based on user inputs
"""
self._pipeline = pipeline_stages_base.PipelineRootStage(pipeline_configuration).append_stage(pipeline_stages_iothub_http.IoTHubHTTPTranslationStage()).append_stage(pipeline_stages_http.HTTPTransportStage())
callback = EventedCallback()
op = pipeline_ops_base.InitializePipelineOperation(callback=callback)
self._pipeline.run_op(op)
callback.wait_for_completion()
def invoke_method(self, device_id, method_params, callback, module_id=None):
"""
Send a request to the service to invoke a method on a target device or module.
:param device_id: The target device id
:param method_params: The method parameters to be invoked on the target client
:param callback: callback which is called when request has been fulfilled.
On success, this callback is called with the error=None.
On failure, this callback is called with error set to the cause of the failure.
:param module_id: The target module id
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
logger.debug("HTTPPipeline invoke_method called")
if not self._pipeline.pipeline_configuration.method_invoke:
error = pipeline_exceptions.PipelineError("invoke_method called, but it is only supported on module clients generated from an edge environment. If you are not using a module generated from an edge environment, you cannot use invoke_method")
return callback(error=error)
def on_complete(op, error):
callback(error=error, invoke_method_response=(op.method_response))
self._pipeline.run_op(pipeline_ops_iothub_http.MethodInvokeOperation(target_device_id=device_id,
target_module_id=module_id,
method_params=method_params,
callback=on_complete))
def get_storage_info_for_blob(self, blob_name, callback):
"""
Sends a POST request to the IoT Hub service endpoint to retrieve an object that contains information for uploading via the Storage SDK.
:param blob_name: The name of the blob that will be uploaded via the Azure Storage SDK.
:param callback: callback which is called when request has been fulfilled.
On success, this callback is called with the error=None, and the storage_info set to the information JSON received from the service.
On failure, this callback is called with error set to the cause of the failure, and the storage_info=None.
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
logger.debug("HTTPPipeline get_storage_info_for_blob called")
if not self._pipeline.pipeline_configuration.blob_upload:
error = pipeline_exceptions.PipelineError("get_storage_info_for_blob called, but it is only supported for use with device clients. Ensure you are using a device client.")
return callback(error=error)
def on_complete(op, error):
callback(error=error, storage_info=(op.storage_info))
self._pipeline.run_op(pipeline_ops_iothub_http.GetStorageInfoOperation(blob_name=blob_name,
callback=on_complete))
def notify_blob_upload_status(self, correlation_id, is_success, status_code, status_description, callback):
"""
Sends a POST request to a IoT Hub service endpoint to notify the status of the Storage SDK call for a blob upload.
:param str correlation_id: Provided by IoT Hub on get_storage_info_for_blob request.
:param bool is_success: A boolean that indicates whether the file was uploaded successfully.
:param int status_code: A numeric status code that is the status for the upload of the fiel to storage.
:param str status_description: A description that corresponds to the status_code.
:param callback: callback which is called when request has been fulfilled.
On success, this callback is called with the error=None.
On failure, this callback is called with error set to the cause of the failure.
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
logger.debug("HTTPPipeline notify_blob_upload_status called")
if not self._pipeline.pipeline_configuration.blob_upload:
error = pipeline_exceptions.PipelineError("notify_blob_upload_status called, but it is only supported for use with device clients. Ensure you are using a device client.")
return callback(error=error)
def on_complete(op, error):
callback(error=error)
self._pipeline.run_op(pipeline_ops_iothub_http.NotifyBlobUploadStatusOperation(correlation_id=correlation_id,
is_success=is_success,
status_code=status_code,
status_description=status_description,
callback=on_complete))

View File

@@ -0,0 +1,308 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/mqtt_pipeline.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 18148 bytes
import logging, sys
from azure.iot.device.common.evented_callback import EventedCallback
from azure.iot.device.common.pipeline import pipeline_stages_base, pipeline_ops_base, pipeline_stages_mqtt
from . import constant, pipeline_stages_iothub, pipeline_events_iothub, pipeline_ops_iothub, pipeline_stages_iothub_mqtt
logger = logging.getLogger(__name__)
class MQTTPipeline(object):
def __init__(self, pipeline_configuration):
"""
Constructor for instantiating a pipeline adapter object
:param auth_provider: The authentication provider
:param pipeline_configuration: The configuration generated based on user inputs
"""
self.feature_enabled = {(constant.C2D_MSG): False,
(constant.INPUT_MSG): False,
(constant.METHODS): False,
(constant.TWIN): False,
(constant.TWIN_PATCHES): False}
self.on_connected = None
self.on_disconnected = None
self.on_c2d_message_received = None
self.on_input_message_received = None
self.on_method_request_received = None
self.on_twin_patch_received = None
self._pipeline = pipeline_stages_base.PipelineRootStage(pipeline_configuration).append_stage(pipeline_stages_base.SasTokenRenewalStage()).append_stage(pipeline_stages_iothub.EnsureDesiredPropertiesStage()).append_stage(pipeline_stages_iothub.TwinRequestResponseStage()).append_stage(pipeline_stages_base.CoordinateRequestAndResponseStage()).append_stage(pipeline_stages_iothub_mqtt.IoTHubMQTTTranslationStage()).append_stage(pipeline_stages_base.AutoConnectStage()).append_stage(pipeline_stages_base.ReconnectStage()).append_stage(pipeline_stages_base.ConnectionLockStage()).append_stage(pipeline_stages_base.RetryStage()).append_stage(pipeline_stages_base.OpTimeoutStage()).append_stage(pipeline_stages_mqtt.MQTTTransportStage())
def _on_pipeline_event(event):
if isinstance(event, pipeline_events_iothub.C2DMessageEvent):
if self.on_c2d_message_received:
self.on_c2d_message_received(event.message)
else:
logger.error("C2D message event received with no handler. dropping.")
else:
if isinstance(event, pipeline_events_iothub.InputMessageEvent):
if self.on_input_message_received:
self.on_input_message_received(event.message)
else:
logger.error("input message event received with no handler. dropping.")
else:
if isinstance(event, pipeline_events_iothub.MethodRequestEvent):
if self.on_method_request_received:
self.on_method_request_received(event.method_request)
else:
logger.error("Method request event received with no handler. Dropping.")
else:
if isinstance(event, pipeline_events_iothub.TwinDesiredPropertiesPatchEvent):
if self.on_twin_patch_received:
self.on_twin_patch_received(event.patch)
else:
logger.error("Twin patch event received with no handler. Dropping.")
else:
logger.error("Dropping unknown pipeline event {}".format(event.name))
def _on_connected():
if self.on_connected:
self.on_connected()
def _on_disconnected():
if self.on_disconnected:
self.on_disconnected()
self._pipeline.on_pipeline_event_handler = _on_pipeline_event
self._pipeline.on_connected_handler = _on_connected
self._pipeline.on_disconnected_handler = _on_disconnected
callback = EventedCallback()
op = pipeline_ops_base.InitializePipelineOperation(callback=callback)
self._pipeline.run_op(op)
callback.wait_for_completion()
def connect(self, callback):
"""
Connect to the service.
:param callback: callback which is called when the connection to the service is complete.
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionFailedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionDroppedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.UnauthorizedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
logger.debug("Starting ConnectOperation on the pipeline")
def on_complete(op, error):
callback(error=error)
self._pipeline.run_op(pipeline_ops_base.ConnectOperation(callback=on_complete))
def disconnect(self, callback):
"""
Disconnect from the service.
:param callback: callback which is called when the connection to the service has been disconnected
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
logger.debug("Starting DisconnectOperation on the pipeline")
def on_complete(op, error):
callback(error=error)
self._pipeline.run_op(pipeline_ops_base.DisconnectOperation(callback=on_complete))
def reauthorize_connection(self, callback):
"""
Reauthorize connection to the service.
Technically, this function will return upon disconnection. The disconnection will then
immediately trigger a reconnect, but this function will not wait for that to return.
This is (unfortunately) necessary while supporting MQTT3.
:param callback: callback which is called when the connection to the service has been disconnected
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
logger.debug("Starting ReauthorizeConnectionOperation on the pipeline")
def on_complete(op, error):
callback(error=error)
self._pipeline.run_op(pipeline_ops_base.ReauthorizeConnectionOperation(callback=on_complete))
def send_message(self, message, callback):
"""
Send a telemetry message to the service.
:param message: message to send.
:param callback: callback which is called when the message publish has been acknowledged by the service.
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionFailedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionDroppedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.UnauthorizedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
def on_complete(op, error):
callback(error=error)
self._pipeline.run_op(pipeline_ops_iothub.SendD2CMessageOperation(message=message, callback=on_complete))
def send_output_message(self, message, callback):
"""
Send an output message to the service.
:param message: message to send.
:param callback: callback which is called when the message publish has been acknowledged by the service.
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionFailedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionDroppedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.UnauthorizedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
def on_complete(op, error):
callback(error=error)
self._pipeline.run_op(pipeline_ops_iothub.SendOutputMessageOperation(message=message, callback=on_complete))
def send_method_response(self, method_response, callback):
"""
Send a method response to the service.
:param method_response: the method response to send
:param callback: callback which is called when response has been acknowledged by the service
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionFailedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionDroppedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.UnauthorizedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
logger.debug("MQTTPipeline send_method_response called")
def on_complete(op, error):
callback(error=error)
self._pipeline.run_op(pipeline_ops_iothub.SendMethodResponseOperation(method_response=method_response,
callback=on_complete))
def get_twin(self, callback):
"""
Send a request for a full twin to the service.
:param callback: callback which is called when request has been acknowledged by the service.
This callback should have two parameters. On success, this callback is called with the
requested twin and error=None. On failure, this callback is called with None for the requested
twin and error set to the cause of the failure.
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionFailedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionDroppedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.UnauthorizedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
def on_complete(op, error):
if error:
callback(error=error, twin=None)
else:
callback(twin=(op.twin))
self._pipeline.run_op(pipeline_ops_iothub.GetTwinOperation(callback=on_complete))
def patch_twin_reported_properties(self, patch, callback):
"""
Send a patch for a twin's reported properties to the service.
:param patch: the reported properties patch to send
:param callback: callback which is called when request has been acknowledged by the service.
The following exceptions are not "raised", but rather returned via the "error" parameter
when invoking "callback":
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionFailedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ConnectionDroppedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.UnauthorizedError`
:raises: :class:`azure.iot.device.iothub.pipeline.exceptions.ProtocolClientError`
"""
def on_complete(op, error):
callback(error=error)
self._pipeline.run_op(pipeline_ops_iothub.PatchTwinReportedPropertiesOperation(patch=patch,
callback=on_complete))
def enable_feature(self, feature_name, callback):
"""
Enable the given feature by subscribing to the appropriate topics.
:param feature_name: one of the feature name constants from constant.py
:param callback: callback which is called when the feature is enabled
:raises: ValueError if feature_name is invalid
"""
logger.debug("enable_feature {} called".format(feature_name))
if feature_name not in self.feature_enabled:
raise ValueError("Invalid feature_name")
def on_complete(op, error):
if error:
logger.error("Subscribe for {} failed. Not enabling feature".format(feature_name))
else:
self.feature_enabled[feature_name] = True
callback(error=error)
self._pipeline.run_op(pipeline_ops_base.EnableFeatureOperation(feature_name=feature_name,
callback=on_complete))
def disable_feature(self, feature_name, callback):
"""
Disable the given feature by subscribing to the appropriate topics.
:param callback: callback which is called when the feature is disabled
:param feature_name: one of the feature name constants from constant.py
:raises: ValueError if feature_name is invalid
"""
logger.debug("disable_feature {} called".format(feature_name))
if feature_name not in self.feature_enabled:
raise ValueError("Invalid feature_name")
self.feature_enabled[feature_name] = False
def on_complete(op, error):
callback(error=error)
self._pipeline.run_op(pipeline_ops_base.DisableFeatureOperation(feature_name=feature_name,
callback=on_complete))
@property
def pipeline_configuration(self):
"""
Pipeline Configuration for the pipeline. Note that while a new config object cannot be
provided (read-only), the values stored in the config object CAN be changed.
"""
return self._pipeline.pipeline_configuration
@property
def connected(self):
"""
Read-only property to indicate if the transport is connected or not.
"""
return self._pipeline.connected

View File

@@ -0,0 +1,342 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/mqtt_topic_iothub.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 15230 bytes
import logging
from datetime import date
import six.moves.urllib as urllib
from azure.iot.device.common import version_compat
logger = logging.getLogger(__name__)
def _get_topic_base(device_id, module_id=None):
"""
return the string that is at the beginning of all topics for this
device/module
"""
topic = "devices/" + str(device_id)
if module_id:
topic = topic + "/modules/" + str(module_id)
return topic
def get_c2d_topic_for_subscribe(device_id):
"""
:return: The topic for cloud to device messages.It is of the format
"devices/<deviceid>/messages/devicebound/#"
"""
return _get_topic_base(device_id) + "/messages/devicebound/#"
def get_input_topic_for_subscribe(device_id, module_id):
"""
:return: The topic for input messages. It is of the format
"devices/<deviceId>/modules/<moduleId>/inputs/#"
"""
return _get_topic_base(device_id, module_id) + "/inputs/#"
def get_method_topic_for_subscribe():
"""
:return: The topic for ALL incoming methods. It is of the format
"$iothub/methods/POST/#"
"""
return "$iothub/methods/POST/#"
def get_twin_response_topic_for_subscribe():
"""
:return: The topic for ALL incoming twin responses. It is of the format
"$iothub/twin/res/#"
"""
return "$iothub/twin/res/#"
def get_twin_patch_topic_for_subscribe():
"""
:return: The topic for ALL incoming twin patches. It is of the format
"$iothub/twin/PATCH/properties/desired/#
"""
return "$iothub/twin/PATCH/properties/desired/#"
def get_telemetry_topic_for_publish(device_id, module_id):
"""
return the topic string used to publish telemetry
"""
return _get_topic_base(device_id, module_id) + "/messages/events/"
def get_method_topic_for_publish(request_id, status):
"""
:return: The topic for publishing method responses. It is of the format
"$iothub/methods/res/<status>/?$rid=<requestId>
"""
return "$iothub/methods/res/{status}/?$rid={request_id}".format(status=urllib.parse.quote((str(status)), safe=""),
request_id=urllib.parse.quote((str(request_id)), safe=""))
def get_twin_topic_for_publish(method, resource_location, request_id):
"""
:return: The topic for publishing twin requests / patches. It is of the format
"$iothub/twin/<method><resourceLocation>?$rid=<requestId>
"""
return "$iothub/twin/{method}{resource_location}?$rid={request_id}".format(method=method,
resource_location=resource_location,
request_id=urllib.parse.quote((str(request_id)), safe=""))
def is_c2d_topic(topic, device_id):
"""
Topics for c2d message are of the following format:
devices/<deviceId>/messages/devicebound
:param topic: The topic string
"""
if "devices/{}/messages/devicebound".format(device_id) in topic:
return True
return False
def is_input_topic(topic, device_id, module_id):
"""
Topics for inputs are of the following format:
devices/<deviceId>/modules/<moduleId>/inputs/<inputName>
:param topic: The topic string
"""
return device_id and module_id or False
if "devices/{}/modules/{}/inputs/".format(device_id, module_id) in topic:
return True
return False
def is_method_topic(topic):
"""
Topics for methods are of the following format:
"$iothub/methods/POST/{method name}/?$rid={request id}"
:param str topic: The topic string.
"""
if "$iothub/methods/POST" in topic:
return True
return False
def is_twin_response_topic(topic):
"""Topics for twin responses are of the following format:
$iothub/twin/res/{status}/?$rid={rid}
:param str topic: The topic string
"""
return topic.startswith("$iothub/twin/res/")
def is_twin_desired_property_patch_topic(topic):
"""Topics for twin desired property patches are of the following format:
$iothub/twin/PATCH/properties/desired
:param str topic: The topic string
"""
return topic.startswith("$iothub/twin/PATCH/properties/desired")
def get_input_name_from_topic(topic):
"""
Extract the input channel from the topic name
Topics for inputs are of the following format:
devices/<deviceId>/modules/<moduleId>/inputs/<inputName>
:param topic: The topic string
"""
parts = topic.split("/")
if len(parts) > 5:
if parts[4] == "inputs":
return urllib.parse.unquote(parts[5])
raise ValueError("topic has incorrect format")
def get_method_name_from_topic(topic):
"""
Extract the method name from the method topic.
Topics for methods are of the following format:
"$iothub/methods/POST/{method name}/?$rid={request id}"
:param str topic: The topic string
"""
parts = topic.split("/")
if is_method_topic(topic):
if len(parts) >= 4:
return urllib.parse.unquote(parts[3])
raise ValueError("topic has incorrect format")
def get_method_request_id_from_topic(topic):
"""
Extract the Request ID (RID) from the method topic.
Topics for methods are of the following format:
"$iothub/methods/POST/{method name}/?$rid={request id}"
:param str topic: the topic string
:raises: ValueError if topic has incorrect format
:returns: request id from topic string
"""
parts = topic.split("/")
if is_method_topic(topic):
if len(parts) >= 4:
properties = _extract_properties(topic.split("?")[1])
return properties["rid"]
raise ValueError("topic has incorrect format")
def get_twin_request_id_from_topic(topic):
"""
Extract the Request ID (RID) from the twin response topic.
Topics for twin response are in the following format:
"$iothub/twin/res/{status}/?$rid={rid}"
:param str topic: The topic string
:raises: ValueError if topic has incorrect format
:returns: request id from topic string
"""
parts = topic.split("/")
if is_twin_response_topic(topic):
if len(parts) >= 4:
properties = _extract_properties(topic.split("?")[1])
return properties["rid"]
raise ValueError("topic has incorrect format")
def get_twin_status_code_from_topic(topic):
"""
Extract the status code from the twin response topic.
Topics for twin response are in the following format:
"$iothub/twin/res/{status}/?$rid={rid}"
:param str topic: The topic string
:raises: ValueError if the topic has incorrect format
:returns status code from topic string
"""
parts = topic.split("/")
if is_twin_response_topic(topic):
if len(parts) >= 4:
return urllib.parse.unquote(parts[3])
raise ValueError("topic has incorrect format")
def extract_message_properties_from_topic(topic, message_received):
"""
Extract key=value pairs from custom properties and set the properties on the received message.
For extracting values corresponding to keys the following rules are followed:-
If there is NO "=", the value is None
If there is "=" with no value, the value is an empty string
For anything else the value after "=" and before `&` is considered as the proper value
:param topic: The topic string
:param message_received: The message received with the payload in bytes
"""
parts = topic.split("/")
if len(parts) > 4:
if parts[4] == "inputs":
if len(parts) > 6:
properties = parts[6]
else:
properties = None
else:
if len(parts) > 3:
if parts[3] == "devicebound":
if len(parts) > 4:
properties = parts[4]
else:
properties = None
else:
raise ValueError("topic has incorrect format")
ignored_extraction_values = ["iothub-ack", "$.to"]
if properties:
key_value_pairs = properties.split("&")
for entry in key_value_pairs:
pair = entry.split("=")
key = urllib.parse.unquote(pair[0])
if len(pair) > 1:
value = urllib.parse.unquote(pair[1])
else:
value = None
if key in ignored_extraction_values:
continue
elif key == "$.mid":
message_received.message_id = value
elif key == "$.cid":
message_received.correlation_id = value
elif key == "$.uid":
message_received.user_id = value
elif key == "$.ct":
message_received.content_type = value
elif key == "$.ce":
message_received.content_encoding = value
elif key == "$.exp":
message_received.expiry_time_utc = value
else:
message_received.custom_properties[key] = value
def encode_message_properties_in_topic(message_to_send, topic):
"""
uri-encode the system properties of a message as key-value pairs on the topic with defined keys.
Additionally if the message has user defined properties, the property keys and values shall be
uri-encoded and appended at the end of the above topic with the following convention:
'<key>=<value>&<key2>=<value2>&<key3>=<value3>(...)'
:param message_to_send: The message to send
:param topic: The topic which has not been encoded yet. For a device it looks like
"devices/<deviceId>/messages/events/" and for a module it looks like
"devices/<deviceId>/modules/<moduleId>/messages/events/
:return: The topic which has been uri-encoded
"""
system_properties = []
if message_to_send.output_name:
system_properties.append(("$.on", str(message_to_send.output_name)))
elif message_to_send.message_id:
system_properties.append(("$.mid", str(message_to_send.message_id)))
if message_to_send.correlation_id:
system_properties.append(("$.cid", str(message_to_send.correlation_id)))
if message_to_send.user_id:
system_properties.append(("$.uid", str(message_to_send.user_id)))
if message_to_send.content_type:
system_properties.append(("$.ct", str(message_to_send.content_type)))
if message_to_send.content_encoding:
system_properties.append(("$.ce", str(message_to_send.content_encoding)))
if message_to_send.iothub_interface_id:
system_properties.append(("$.ifid", str(message_to_send.iothub_interface_id)))
if message_to_send.expiry_time_utc:
system_properties.append((
"$.exp",
message_to_send.expiry_time_utc.isoformat() if isinstance(message_to_send.expiry_time_utc, date) else message_to_send.expiry_time_utc))
system_properties_encoded = version_compat.urlencode(system_properties,
quote_via=(urllib.parse.quote))
topic += system_properties_encoded
if message_to_send.custom_properties and len(message_to_send.custom_properties) > 0:
if system_properties:
if len(system_properties) > 0:
topic += "&"
custom_prop_seq = [(str(i[0]), str(i[1])) for i in list(message_to_send.custom_properties.items())]
custom_prop_seq.sort()
keys = [i[0] for i in custom_prop_seq]
if len(keys) != len(set(keys)):
raise ValueError("Duplicate keys in custom properties!")
user_properties_encoded = version_compat.urlencode(custom_prop_seq,
quote_via=(urllib.parse.quote))
topic += user_properties_encoded
return topic
def _extract_properties(properties_str):
"""Return a dictionary of properties from a string in the format
${key1}={value1}&${key2}={value2}...&${keyn}={valuen}
"""
d = {}
kv_pairs = properties_str.split("&")
for entry in kv_pairs:
pair = entry.split("=")
key = urllib.parse.unquote(pair[0]).lstrip("$")
value = urllib.parse.unquote(pair[1])
d[key] = value
return d

View File

@@ -0,0 +1,50 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/pipeline_events_iothub.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 2120 bytes
from azure.iot.device.common.pipeline import PipelineEvent
class C2DMessageEvent(PipelineEvent):
__doc__ = "\n A PipelineEvent object which represents an incoming C2D event. This object is probably\n created by some converter stage based on a protocol-specific event\n "
def __init__(self, message):
"""
Initializer for C2DMessageEvent objects.
:param Message message: The Message object for the message that was received.
"""
super(C2DMessageEvent, self).__init__()
self.message = message
class InputMessageEvent(PipelineEvent):
__doc__ = "\n A PipelineEvent object which represents an incoming input message event. This object is probably\n created by some converter stage based on a protocol-specific event\n "
def __init__(self, message):
"""
Initializer for InputMessageEvent objects.
:param Message message: The Message object for the message that was received. This message
is expected to have had the .input_name attribute set
"""
super(InputMessageEvent, self).__init__()
self.message = message
class MethodRequestEvent(PipelineEvent):
__doc__ = "\n A PipelineEvent object which represents an incoming MethodRequest event.\n This object is probably created by some converter stage based on a protocol-specific event.\n "
def __init__(self, method_request):
super(MethodRequestEvent, self).__init__()
self.method_request = method_request
class TwinDesiredPropertiesPatchEvent(PipelineEvent):
__doc__ = "\n A PipelineEvent object which represents an incoming twin desired properties patch. This\n object is probably created by some converter stage based on a protocol-specific event.\n "
def __init__(self, patch):
super(TwinDesiredPropertiesPatchEvent, self).__init__()
self.patch = patch

View File

@@ -0,0 +1,83 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/pipeline_ops_iothub.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 4380 bytes
from azure.iot.device.common.pipeline import PipelineOperation
class SendD2CMessageOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which contains arguments used to send a telemetry message to an IoTHub or EdegHub server.\n\n This operation is in the group of IoTHub operations because it is very specific to the IoTHub client\n "
def __init__(self, message, callback):
"""
Initializer for SendD2CMessageOperation objects.
:param Message message: The message that we're sending to the service
:param Function callback: The function that gets called when this operation is complete or has failed.
The callback function must accept A PipelineOperation object which indicates the specific operation which
has completed or failed.
"""
super(SendD2CMessageOperation, self).__init__(callback=callback)
self.message = message
class SendOutputMessageOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which contains arguments used to send an output message to an EdgeHub server.\n\n This operation is in the group of IoTHub operations because it is very specific to the IoTHub client\n "
def __init__(self, message, callback):
"""
Initializer for SendOutputMessageOperation objects.
:param Message message: The output message that we're sending to the service. The name of the output is
expected to be stored in the output_name attribute of this object
:param Function callback: The function that gets called when this operation is complete or has failed.
The callback function must accept A PipelineOperation object which indicates the specific operation which
has completed or failed.
"""
super(SendOutputMessageOperation, self).__init__(callback=callback)
self.message = message
class SendMethodResponseOperation(PipelineOperation):
__doc__ = "\n A PipleineOperation object which contains arguments used to send a method response to an IoTHub or EdgeHub server.\n\n This operation is in the group of IoTHub operations because it is very specific to the IoTHub client.\n "
def __init__(self, method_response, callback):
"""
Initializer for SendMethodResponseOperation objects.
:param method_response: The method response to be sent to IoTHub/EdgeHub
:type method_response: MethodResponse
:param callback: The function that gets called when this operation is complete or has failed.
The callback function must accept a PipelineOperation object which indicates the specific operation has which
has completed or failed.
:type callback: Function/callable
"""
super(SendMethodResponseOperation, self).__init__(callback=callback)
self.method_response = method_response
class GetTwinOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which represents a request to get a device twin or a module twin from an Azure\n IoT Hub or Azure Iot Edge Hub service.\n\n :ivar twin: Upon completion, this contains the twin which was retrieved from the service.\n :type twin: Twin\n "
def __init__(self, callback):
"""
Initializer for GetTwinOperation objects.
"""
super(GetTwinOperation, self).__init__(callback=callback)
self.twin = None
class PatchTwinReportedPropertiesOperation(PipelineOperation):
__doc__ = "\n A PipelineOperation object which contains arguments used to send a reported properties patch to the Azure\n IoT Hub or Azure IoT Edge Hub service.\n "
def __init__(self, patch, callback):
"""
Initializer for PatchTwinReportedPropertiesOperation object
:param patch: The reported properties patch to send to the service.
:type patch: dict, str, int, float, bool, or None (JSON compatible values)
"""
super(PatchTwinReportedPropertiesOperation, self).__init__(callback=callback)
self.patch = patch

View File

@@ -0,0 +1,72 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/pipeline_ops_iothub_http.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3939 bytes
from azure.iot.device.common.pipeline import PipelineOperation
class MethodInvokeOperation(PipelineOperation):
__doc__ = "\n A PipleineOperation object which contains arguments used to send a method invoke to an IoTHub or EdgeHub server.\n\n This operation is in the group of EdgeHub operations because it is very specific to the EdgeHub client.\n "
def __init__(self, target_device_id, target_module_id, method_params, callback):
"""
Initializer for MethodInvokeOperation objects.
:param str target_device_id: The device id of the target device/module
:param str target_module_id: The module id of the target module
:param method_params: The parameters used to invoke the method, as defined by the IoT Hub specification.
:param callback: The function that gets called when this operation is complete or has failed.
The callback function must accept a PipelineOperation object which indicates the specific operation has which
has completed or failed.
:type callback: Function/callable
"""
super(MethodInvokeOperation, self).__init__(callback=callback)
self.target_device_id = target_device_id
self.target_module_id = target_module_id
self.method_params = method_params
self.method_response = None
class GetStorageInfoOperation(PipelineOperation):
__doc__ = "\n A PipleineOperation object which contains arguments used to get the storage information from IoT Hub.\n "
def __init__(self, blob_name, callback):
"""
Initializer for GetStorageInfo objects.
:param str blob_name: The name of the blob that will be created in Azure Storage
:param callback: The function that gets called when this operation is complete or has failed.
The callback function must accept a PipelineOperation object which indicates the specific operation has which
has completed or failed.
:type callback: Function/callable
:ivar dict storage_info: Upon completion, this contains the storage information which was retrieved from the service.
"""
super(GetStorageInfoOperation, self).__init__(callback=callback)
self.blob_name = blob_name
self.storage_info = None
class NotifyBlobUploadStatusOperation(PipelineOperation):
__doc__ = "\n A PipleineOperation object which contains arguments used to get the storage information from IoT Hub.\n "
def __init__(self, correlation_id, is_success, status_code, status_description, callback):
"""
Initializer for GetStorageInfo objects.
:param str correlation_id: Provided by IoT Hub on get_storage_info_for_blob request.
:param bool is_success: A boolean that indicates whether the file was uploaded successfully.
:param int request_status_code: A numeric status code that is the status for the upload of the fiel to storage.
:param str status_description: A description that corresponds to the status_code.
:param callback: The function that gets called when this operation is complete or has failed.
The callback function must accept a PipelineOperation object which indicates the specific operation has which
has completed or failed.
:type callback: Function/callable
"""
super(NotifyBlobUploadStatusOperation, self).__init__(callback=callback)
self.correlation_id = correlation_id
self.is_success = is_success
self.request_status_code = status_code
self.status_description = status_description

View File

@@ -0,0 +1,128 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/pipeline_stages_iothub.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 9111 bytes
import json, logging
from azure.iot.device.common.pipeline import pipeline_events_base, pipeline_ops_base, PipelineStage, pipeline_thread
from azure.iot.device import exceptions
from azure.iot.device.common import handle_exceptions
from azure.iot.device.common.callable_weak_method import CallableWeakMethod
from . import pipeline_events_iothub, pipeline_ops_iothub
from . import constant
logger = logging.getLogger(__name__)
class EnsureDesiredPropertiesStage(PipelineStage):
__doc__ = "\n Pipeline stage Responsible for making sure that desired properties are always kept up to date.\n It does this by sending diwn a GetTwinOperation after a connection is reestablished, and, if\n the desired properties have changed since the last time a patch was received, it will send up\n an artificial patch event to send those updated properties to the app.\n "
def __init__(self):
self.last_version_seen = None
self.pending_get_request = None
super(EnsureDesiredPropertiesStage, self).__init__()
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if isinstance(op, pipeline_ops_base.EnableFeatureOperation):
if op.feature_name == constant.TWIN_PATCHES:
logger.debug("{}: enabling twin patches. setting last_version_seen".format(self.name))
self.last_version_seen = -1
self.send_op_down(op)
@pipeline_thread.runs_on_pipeline_thread
def _ensure_get_op(self):
"""
Function which makes sure we have a GetTwin operation in progress. If we've
already sent one down and we're waiting for it to return, we don't want to send
a new one down. This is because layers below us (especially CoordinateRequestAndResponseStage)
will do everything they can to ensure we get a response on the already-pending
GetTwinOperation.
"""
if not self.pending_get_request:
logger.info("{}: sending twin GET to ensure freshness".format(self.name))
self.pending_get_request = pipeline_ops_iothub.GetTwinOperation(callback=(CallableWeakMethod(self, "_on_get_twin_complete")))
self.send_op_down(self.pending_get_request)
else:
logger.debug("{}: Outstanding twin GET already exists. Not sending anything".format(self.name))
@pipeline_thread.runs_on_pipeline_thread
def _on_get_twin_complete(self, op, error):
"""
Function that gets called when a GetTwinOperation _that_we_initiated_ is complete.
This is where we compare $version values and decide if we want to create an artificial
TwinDesiredPropertiesPatchEvent or not.
"""
self.pending_get_request = None
if error:
logger.debug("{}: Twin GET failed with error {}. Resubmitting.".format(self, error))
self._ensure_get_op()
else:
logger.debug("{} Twin GET response received. Checking versions".format(self))
new_version = op.twin["desired"]["$version"]
logger.debug("{}: old version = {}, new version = {}".format(self.name, self.last_version_seen, new_version))
if self.last_version_seen != new_version:
logger.debug("{}: Version changed. Sending up new patch event".format(self.name))
self.last_version_seen = new_version
self.send_event_up(pipeline_events_iothub.TwinDesiredPropertiesPatchEvent(op.twin["desired"]))
@pipeline_thread.runs_on_pipeline_thread
def _handle_pipeline_event(self, event):
if isinstance(event, pipeline_events_iothub.TwinDesiredPropertiesPatchEvent):
version = event.patch["$version"]
logger.debug("{}: Desired patch received. Saving $version={}".format(self.name, version))
self.last_version_seen = version
else:
if isinstance(event, pipeline_events_base.ConnectedEvent):
if self.last_version_seen:
logger.info("{}: Reconnected. Getting twin")
self._ensure_get_op()
self.send_event_up(event)
class TwinRequestResponseStage(PipelineStage):
__doc__ = "\n PipelineStage which handles twin operations. In particular, it converts twin GET and PATCH\n operations into RequestAndResponseOperation operations. This is done at the IoTHub level because\n there is nothing protocol-specific about this code. The protocol-specific implementation\n for twin requests and responses is handled inside IoTHubMQTTTranslationStage, when it converts\n the RequestOperation to a protocol-specific send operation and when it converts the\n protocol-specific receive event into an ResponseEvent event.\n "
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
def map_twin_error(error, twin_op):
if error:
return error
if twin_op.status_code >= 300:
logger.info("Error {} received from twin operation".format(twin_op.status_code))
logger.info("response body: {}".format(twin_op.response_body))
return exceptions.ServiceError("twin operation returned status {}".format(twin_op.status_code))
if isinstance(op, pipeline_ops_iothub.GetTwinOperation):
op_waiting_for_response = op
def on_twin_response(op, error):
logger.debug("{}({}): Got response for GetTwinOperation".format(self.name, op.name))
error = map_twin_error(error=error, twin_op=op)
if not error:
op_waiting_for_response.twin = json.loads(op.response_body.decode("utf-8"))
op_waiting_for_response.complete(error=error)
self.send_op_down(pipeline_ops_base.RequestAndResponseOperation(request_type=(constant.TWIN),
method="GET",
resource_location="/",
request_body=" ",
callback=on_twin_response))
else:
if isinstance(op, pipeline_ops_iothub.PatchTwinReportedPropertiesOperation):
op_waiting_for_response = op
def on_twin_response(op, error):
logger.debug("{}({}): Got response for PatchTwinReportedPropertiesOperation operation".format(self.name, op.name))
error = map_twin_error(error=error, twin_op=op)
op_waiting_for_response.complete(error=error)
logger.debug("{}({}): Sending reported properties patch: {}".format(self.name, op.name, op.patch))
self.send_op_down(pipeline_ops_base.RequestAndResponseOperation(request_type=(constant.TWIN),
method="PATCH",
resource_location="/properties/reported/",
request_body=(json.dumps(op.patch)),
callback=on_twin_response))
else:
super(TwinRequestResponseStage, self)._run_op(op)

View File

@@ -0,0 +1,114 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/pipeline_stages_iothub_http.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 8258 bytes
import logging, json
import six.moves.urllib as urllib
from azure.iot.device.common.pipeline import pipeline_events_base, pipeline_ops_base, pipeline_ops_http, PipelineStage, pipeline_thread
from . import pipeline_ops_iothub, pipeline_ops_iothub_http, http_path_iothub, http_map_error
from azure.iot.device import exceptions
from azure.iot.device import constant as pkg_constant
from azure.iot.device import user_agent
logger = logging.getLogger(__name__)
@pipeline_thread.runs_on_pipeline_thread
def map_http_error(error, http_op):
if error:
return error
if http_op.status_code >= 300:
translated_error = http_map_error.translate_error(http_op.status_code, http_op.reason)
return exceptions.ServiceError("HTTP operation returned: {} {}".format(http_op.status_code, translated_error))
class IoTHubHTTPTranslationStage(PipelineStage):
__doc__ = "\n PipelineStage which converts other Iot and EdgeHub operations into HTTP operations. This stage also\n converts http pipeline events into Iot and EdgeHub pipeline events.\n "
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if isinstance(op, pipeline_ops_iothub_http.MethodInvokeOperation):
logger.debug("{}({}): Translating Method Invoke Operation for HTTP.".format(self.name, op.name))
query_params = "api-version={apiVersion}".format(apiVersion=(pkg_constant.IOTHUB_API_VERSION))
body = json.dumps(op.method_params)
path = http_path_iothub.get_method_invoke_path(op.target_device_id, op.target_module_id)
x_ms_edge_string = "{deviceId}/{moduleId}".format(deviceId=(self.pipeline_root.pipeline_configuration.device_id),
moduleId=(self.pipeline_root.pipeline_configuration.module_id))
user_agent_string = urllib.parse.quote_plus(user_agent.get_iothub_user_agent() + str(self.pipeline_root.pipeline_configuration.product_info))
headers = {'Host':(self.pipeline_root.pipeline_configuration).gateway_hostname,
'Content-Type':"application/json",
'Content-Length':len(str(body)),
'x-ms-edge-moduleId':x_ms_edge_string,
'User-Agent':user_agent_string}
op_waiting_for_response = op
def on_request_response(op, error):
logger.debug("{}({}): Got response for MethodInvokeOperation".format(self.name, op.name))
error = map_http_error(error=error, http_op=op)
if not error:
op_waiting_for_response.method_response = json.loads(op.response_body.decode("utf-8"))
op_waiting_for_response.complete(error=error)
self.send_op_down(pipeline_ops_http.HTTPRequestAndResponseOperation(method="POST",
path=path,
headers=headers,
body=body,
query_params=query_params,
callback=on_request_response))
else:
if isinstance(op, pipeline_ops_iothub_http.GetStorageInfoOperation):
logger.debug("{}({}): Translating Get Storage Info Operation to HTTP.".format(self.name, op.name))
query_params = "api-version={apiVersion}".format(apiVersion=(pkg_constant.IOTHUB_API_VERSION))
path = http_path_iothub.get_storage_info_for_blob_path(self.pipeline_root.pipeline_configuration.device_id)
body = json.dumps({"blobName": (op.blob_name)})
user_agent_string = urllib.parse.quote_plus(user_agent.get_iothub_user_agent() + str(self.pipeline_root.pipeline_configuration.product_info))
headers = {'Host':(self.pipeline_root.pipeline_configuration).hostname,
'Accept':"application/json",
'Content-Type':"application/json",
'Content-Length':len(str(body)),
'User-Agent':user_agent_string}
op_waiting_for_response = op
def on_request_response(op, error):
logger.debug("{}({}): Got response for GetStorageInfoOperation".format(self.name, op.name))
error = map_http_error(error=error, http_op=op)
if not error:
op_waiting_for_response.storage_info = json.loads(op.response_body.decode("utf-8"))
op_waiting_for_response.complete(error=error)
self.send_op_down(pipeline_ops_http.HTTPRequestAndResponseOperation(method="POST",
path=path,
headers=headers,
body=body,
query_params=query_params,
callback=on_request_response))
else:
if isinstance(op, pipeline_ops_iothub_http.NotifyBlobUploadStatusOperation):
logger.debug("{}({}): Translating Get Storage Info Operation to HTTP.".format(self.name, op.name))
query_params = "api-version={apiVersion}".format(apiVersion=(pkg_constant.IOTHUB_API_VERSION))
path = http_path_iothub.get_notify_blob_upload_status_path(self.pipeline_root.pipeline_configuration.device_id)
body = json.dumps({'correlationId':op.correlation_id,
'isSuccess':op.is_success,
'statusCode':op.request_status_code,
'statusDescription':op.status_description})
user_agent_string = urllib.parse.quote_plus(user_agent.get_iothub_user_agent() + str(self.pipeline_root.pipeline_configuration.product_info))
headers = {'Host':(self.pipeline_root.pipeline_configuration).hostname,
'Content-Type':"application/json; charset=utf-8",
'Content-Length':len(str(body)),
'User-Agent':user_agent_string}
op_waiting_for_response = op
def on_request_response(op, error):
logger.debug("{}({}): Got response for GetStorageInfoOperation".format(self.name, op.name))
error = map_http_error(error=error, http_op=op)
op_waiting_for_response.complete(error=error)
self.send_op_down(pipeline_ops_http.HTTPRequestAndResponseOperation(method="POST",
path=path,
headers=headers,
body=body,
query_params=query_params,
callback=on_request_response))
else:
self.send_op_down(op)

View File

@@ -0,0 +1,149 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/pipeline/pipeline_stages_iothub_mqtt.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 11208 bytes
import logging, json
from six.moves import urllib
from azure.iot.device.common import version_compat
from azure.iot.device.common.pipeline import pipeline_events_base, pipeline_ops_base, pipeline_ops_mqtt, pipeline_events_mqtt, PipelineStage, pipeline_thread
from azure.iot.device.iothub.models import Message, MethodRequest
from . import pipeline_ops_iothub, pipeline_events_iothub, mqtt_topic_iothub
from . import constant as pipeline_constant
from . import exceptions as pipeline_exceptions
from azure.iot.device import constant as pkg_constant
from azure.iot.device import user_agent
logger = logging.getLogger(__name__)
class IoTHubMQTTTranslationStage(PipelineStage):
__doc__ = "\n PipelineStage which converts other Iot and IoTHub operations into MQTT operations. This stage also\n converts mqtt pipeline events into Iot and IoTHub pipeline events.\n "
@pipeline_thread.runs_on_pipeline_thread
def _run_op(self, op):
if isinstance(op, pipeline_ops_base.InitializePipelineOperation):
if self.pipeline_root.pipeline_configuration.module_id:
client_id = "{}/{}".format(self.pipeline_root.pipeline_configuration.device_id, self.pipeline_root.pipeline_configuration.module_id)
else:
client_id = self.pipeline_root.pipeline_configuration.device_id
query_param_seq = []
custom_product_info = str(self.pipeline_root.pipeline_configuration.product_info)
if custom_product_info.startswith(pkg_constant.DIGITAL_TWIN_PREFIX):
query_param_seq.append(("api-version", pkg_constant.DIGITAL_TWIN_API_VERSION))
query_param_seq.append(("DeviceClientType", user_agent.get_iothub_user_agent()))
query_param_seq.append((
pkg_constant.DIGITAL_TWIN_QUERY_HEADER, custom_product_info))
else:
query_param_seq.append(("api-version", pkg_constant.IOTHUB_API_VERSION))
query_param_seq.append((
"DeviceClientType", user_agent.get_iothub_user_agent() + custom_product_info))
username = "{hostname}/{client_id}/?{query_params}".format(hostname=(self.pipeline_root.pipeline_configuration.hostname),
client_id=client_id,
query_params=version_compat.urlencode(query_param_seq,
quote_via=(urllib.parse.quote)))
op.username = username
op.client_id = client_id
self.send_op_down(op)
else:
if isinstance(op, pipeline_ops_iothub.SendD2CMessageOperation) or isinstance(op, pipeline_ops_iothub.SendOutputMessageOperation):
telemetry_topic = mqtt_topic_iothub.get_telemetry_topic_for_publish(device_id=(self.pipeline_root.pipeline_configuration.device_id),
module_id=(self.pipeline_root.pipeline_configuration.module_id))
topic = mqtt_topic_iothub.encode_message_properties_in_topic(op.message, telemetry_topic)
worker_op = op.spawn_worker_op(worker_op_type=(pipeline_ops_mqtt.MQTTPublishOperation),
topic=topic,
payload=(op.message.data))
self.send_op_down(worker_op)
else:
if isinstance(op, pipeline_ops_iothub.SendMethodResponseOperation):
topic = mqtt_topic_iothub.get_method_topic_for_publish(op.method_response.request_id, op.method_response.status)
payload = json.dumps(op.method_response.payload)
worker_op = op.spawn_worker_op(worker_op_type=(pipeline_ops_mqtt.MQTTPublishOperation),
topic=topic,
payload=payload)
self.send_op_down(worker_op)
else:
if isinstance(op, pipeline_ops_base.EnableFeatureOperation):
topic = self._get_feature_subscription_topic(op.feature_name)
worker_op = op.spawn_worker_op(worker_op_type=(pipeline_ops_mqtt.MQTTSubscribeOperation),
topic=topic)
self.send_op_down(worker_op)
else:
if isinstance(op, pipeline_ops_base.DisableFeatureOperation):
topic = self._get_feature_subscription_topic(op.feature_name)
worker_op = op.spawn_worker_op(worker_op_type=(pipeline_ops_mqtt.MQTTUnsubscribeOperation),
topic=topic)
self.send_op_down(worker_op)
else:
if isinstance(op, pipeline_ops_base.RequestOperation):
if op.request_type == pipeline_constant.TWIN:
topic = mqtt_topic_iothub.get_twin_topic_for_publish(method=(op.method),
resource_location=(op.resource_location),
request_id=(op.request_id))
worker_op = op.spawn_worker_op(worker_op_type=(pipeline_ops_mqtt.MQTTPublishOperation),
topic=topic,
payload=(op.request_body))
self.send_op_down(worker_op)
else:
raise pipeline_exceptions.OperationError("RequestOperation request_type {} not supported".format(op.request_type))
else:
super(IoTHubMQTTTranslationStage, self)._run_op(op)
@pipeline_thread.runs_on_pipeline_thread
def _get_feature_subscription_topic(self, feature):
if feature == pipeline_constant.C2D_MSG:
return mqtt_topic_iothub.get_c2d_topic_for_subscribe(self.pipeline_root.pipeline_configuration.device_id)
if feature == pipeline_constant.INPUT_MSG:
return mqtt_topic_iothub.get_input_topic_for_subscribe(self.pipeline_root.pipeline_configuration.device_id, self.pipeline_root.pipeline_configuration.module_id)
if feature == pipeline_constant.METHODS:
return mqtt_topic_iothub.get_method_topic_for_subscribe()
if feature == pipeline_constant.TWIN:
return mqtt_topic_iothub.get_twin_response_topic_for_subscribe()
if feature == pipeline_constant.TWIN_PATCHES:
return mqtt_topic_iothub.get_twin_patch_topic_for_subscribe()
logger.error("Cannot retrieve MQTT topic for subscription to invalid feature")
raise pipeline_exceptions.OperationError("Trying to enable/disable invalid feature - {}".format(feature))
@pipeline_thread.runs_on_pipeline_thread
def _handle_pipeline_event(self, event):
"""
Pipeline Event handler function to convert incoming MQTT messages into the appropriate IoTHub
events, based on the topic of the message
"""
if isinstance(event, pipeline_events_mqtt.IncomingMQTTMessageEvent):
topic = event.topic
device_id = self.pipeline_root.pipeline_configuration.device_id
module_id = self.pipeline_root.pipeline_configuration.module_id
if mqtt_topic_iothub.is_c2d_topic(topic, device_id):
message = Message(event.payload)
mqtt_topic_iothub.extract_message_properties_from_topic(topic, message)
self.send_event_up(pipeline_events_iothub.C2DMessageEvent(message))
else:
if mqtt_topic_iothub.is_input_topic(topic, device_id, module_id):
message = Message(event.payload)
mqtt_topic_iothub.extract_message_properties_from_topic(topic, message)
message.input_name = mqtt_topic_iothub.get_input_name_from_topic(topic)
self.send_event_up(pipeline_events_iothub.InputMessageEvent(message))
else:
if mqtt_topic_iothub.is_method_topic(topic):
request_id = mqtt_topic_iothub.get_method_request_id_from_topic(topic)
method_name = mqtt_topic_iothub.get_method_name_from_topic(topic)
method_received = MethodRequest(request_id=request_id,
name=method_name,
payload=(json.loads(event.payload.decode("utf-8"))))
self.send_event_up(pipeline_events_iothub.MethodRequestEvent(method_received))
else:
if mqtt_topic_iothub.is_twin_response_topic(topic):
request_id = mqtt_topic_iothub.get_twin_request_id_from_topic(topic)
status_code = int(mqtt_topic_iothub.get_twin_status_code_from_topic(topic))
self.send_event_up(pipeline_events_base.ResponseEvent(request_id=request_id,
status_code=status_code,
response_body=(event.payload)))
else:
if mqtt_topic_iothub.is_twin_desired_property_patch_topic(topic):
self.send_event_up(pipeline_events_iothub.TwinDesiredPropertiesPatchEvent(patch=(json.loads(event.payload.decode("utf-8")))))
else:
logger.debug("Unknown topic: {} passing up to next handler".format(topic))
self.send_event_up(event)
else:
self.send_event_up(event)

View File

@@ -0,0 +1,627 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/sync_clients.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 33436 bytes
"""This module contains user-facing synchronous clients for the
Azure IoTHub Device SDK for Python.
"""
import logging, deprecation
from .abstract_clients import AbstractIoTHubClient, AbstractIoTHubDeviceClient, AbstractIoTHubModuleClient
from .models import Message
from .inbox_manager import InboxManager
from .sync_inbox import SyncClientInbox, InboxEmpty
from . import sync_handler_manager
from .pipeline import constant as pipeline_constant
from .pipeline import exceptions as pipeline_exceptions
from azure.iot.device import exceptions
from azure.iot.device.common.evented_callback import EventedCallback
from azure.iot.device.common.callable_weak_method import CallableWeakMethod
from azure.iot.device import constant as device_constant
logger = logging.getLogger(__name__)
def handle_result(callback):
try:
return callback.wait_for_completion()
except pipeline_exceptions.ConnectionDroppedError as e:
try:
raise exceptions.ConnectionDroppedError(message="Lost connection to IoTHub", cause=e)
finally:
e = None
del e
except pipeline_exceptions.ConnectionFailedError as e:
try:
raise exceptions.ConnectionFailedError(message="Could not connect to IoTHub", cause=e)
finally:
e = None
del e
except pipeline_exceptions.UnauthorizedError as e:
try:
raise exceptions.CredentialError(message="Credentials invalid, could not connect", cause=e)
finally:
e = None
del e
except pipeline_exceptions.ProtocolClientError as e:
try:
raise exceptions.ClientError(message="Error in the IoTHub client", cause=e)
finally:
e = None
del e
except pipeline_exceptions.TlsExchangeAuthError as e:
try:
raise exceptions.ClientError(message="Error in the IoTHub client due to TLS exchanges.",
cause=e)
finally:
e = None
del e
except pipeline_exceptions.ProtocolProxyError as e:
try:
raise exceptions.ClientError(message="Error in the IoTHub client raised due to proxy connections.",
cause=e)
finally:
e = None
del e
except Exception as e:
try:
raise exceptions.ClientError(message="Unexpected failure", cause=e)
finally:
e = None
del e
class GenericIoTHubClient(AbstractIoTHubClient):
__doc__ = "A superclass representing a generic synchronous client.\n This class needs to be extended for specific clients.\n "
def __init__(self, **kwargs):
"""Initializer for a generic synchronous client.
This initializer should not be called directly.
Instead, use one of the 'create_from_' classmethods to instantiate
:param mqtt_pipeline: The MQTTPipeline used for the client
:type mqtt_pipeline: :class:`azure.iot.device.iothub.pipeline.MQTTPipeline`
:param http_pipeline: The HTTPPipeline used for the client
:type http_pipeline: :class:`azure.iot.device.iothub.pipeline.HTTPPipeline`
"""
(super(GenericIoTHubClient, self).__init__)(**kwargs)
self._inbox_manager = InboxManager(inbox_type=SyncClientInbox)
self._handler_manager = sync_handler_manager.SyncHandlerManager(self._inbox_manager)
self._mqtt_pipeline.on_connected = CallableWeakMethod(self, "_on_connected")
self._mqtt_pipeline.on_disconnected = CallableWeakMethod(self, "_on_disconnected")
self._mqtt_pipeline.on_method_request_received = CallableWeakMethod(self._inbox_manager, "route_method_request")
self._mqtt_pipeline.on_twin_patch_received = CallableWeakMethod(self._inbox_manager, "route_twin_patch")
def _enable_feature(self, feature_name):
"""Enable an Azure IoT Hub feature.
This is a synchronous call, meaning that this function will not return until the feature
has been enabled.
:param feature_name: The name of the feature to enable.
See azure.iot.device.common.pipeline.constant for possible values
"""
logger.info("Enabling feature:" + feature_name + "...")
if not self._mqtt_pipeline.feature_enabled[feature_name]:
callback = EventedCallback()
self._mqtt_pipeline.enable_feature(feature_name, callback=callback)
callback.wait_for_completion()
logger.info("Successfully enabled feature:" + feature_name)
else:
logger.info("Feature ({}) already disabled - skipping".format(feature_name))
def _disable_feature(self, feature_name):
"""Disable an Azure IoT Hub feature
This is a synchronous call, meaning that this function will not return until the feature
has been disabled.
:param feature_name: The name of the feature to disable.
See azure.iot.device.common.pipeline.constant for possible values
"""
logger.info("Disabling feature: {}...".format(feature_name))
if self._mqtt_pipeline.feature_enabled[feature_name]:
callback = EventedCallback()
self._mqtt_pipeline.disable_feature(feature_name, callback=callback)
callback.wait_for_completion()
logger.info("Successfully disabled feature: {}".format(feature_name))
else:
logger.info("Feature ({}) already disabled - skipping".format(feature_name))
def connect(self):
"""Connects the client to an Azure IoT Hub or Azure IoT Edge Hub instance.
The destination is chosen based on the credentials passed via the auth_provider parameter
that was provided when this object was initialized.
This is a synchronous call, meaning that this function will not return until the connection
to the service has been completely established.
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
logger.info("Connecting to Hub...")
callback = EventedCallback()
self._mqtt_pipeline.connect(callback=callback)
handle_result(callback)
logger.info("Successfully connected to Hub")
def disconnect(self):
"""Disconnect the client from the Azure IoT Hub or Azure IoT Edge Hub instance.
It is recommended that you make sure to call this function when you are completely done
with the your client instance.
This is a synchronous call, meaning that this function will not return until the connection
to the service has been completely closed.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
logger.info("Disconnecting from Hub...")
logger.debug("Executing initial disconnect")
callback = EventedCallback()
self._mqtt_pipeline.disconnect(callback=callback)
handle_result(callback)
logger.debug("Successfully executed initial disconnect")
logger.debug("Stopping handlers...")
self._handler_manager.stop()
logger.debug("Successfully stopped handlers")
logger.debug("Executing secondary disconnect...")
callback = EventedCallback()
self._mqtt_pipeline.disconnect(callback=callback)
handle_result(callback)
logger.debug("Successfully executed secondary disconnect")
logger.info("Successfully disconnected from Hub")
def update_sastoken(self, sastoken):
"""
Update the client's SAS Token used for authentication, then reauthorizes the connection.
This API can only be used if the client was initially created with a SAS Token.
Note also that this API may return before the reauthorization/reconnection is completed.
This means that some errors that may occur as part of the reconnection could occur in the
background, and will not be raised by this method.
:param str sastoken: The new SAS Token string for the client to use
:raises: :class:`azure.iot.device.exceptions.ClientError` if the client was not initially
created with a SAS token.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
:raises: ValueError if the sastoken parameter is invalid
"""
self._replace_user_supplied_sastoken(sastoken)
logger.info("Reauthorizing connection with Hub...")
callback = EventedCallback()
self._mqtt_pipeline.reauthorize_connection(callback=callback)
handle_result(callback)
logger.info("Successfully reauthorized connection to Hub")
def send_message(self, message):
"""Sends a message to the default events endpoint on the Azure IoT Hub or Azure IoT Edge Hub instance.
This is a synchronous event, meaning that this function will not return until the event
has been sent to the service and the service has acknowledged receipt of the event.
If the connection to the service has not previously been opened by a call to connect, this
function will open the connection before sending the event.
:param message: The actual message to send. Anything passed that is not an instance of the
Message class will be converted to Message object.
:type message: :class:`azure.iot.device.Message` or str
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
:raises: ValueError if the message fails size validation.
"""
if not isinstance(message, Message):
message = Message(message)
if message.get_size() > device_constant.TELEMETRY_MESSAGE_SIZE_LIMIT:
raise ValueError("Size of telemetry message can not exceed 256 KB.")
logger.info("Sending message to Hub...")
callback = EventedCallback()
self._mqtt_pipeline.send_message(message, callback=callback)
handle_result(callback)
logger.info("Successfully sent message to Hub")
@deprecation.deprecated(deprecated_in="2.3.0",
current_version=(device_constant.VERSION),
details="We recommend that you use the .on_method_request_received property to set a handler instead")
def receive_method_request(self, method_name=None, block=True, timeout=None):
"""Receive a method request via the Azure IoT Hub or Azure IoT Edge Hub.
:param str method_name: Optionally provide the name of the method to receive requests for.
If this parameter is not given, all methods not already being specifically targeted by
a different request to receive_method will be received.
:param bool block: Indicates if the operation should block until a request is received.
:param int timeout: Optionally provide a number of seconds until blocking times out.
:returns: MethodRequest object representing the received method request, or None if
no method request has been received by the end of the blocking period.
"""
self._check_receive_mode_is_api()
if not self._mqtt_pipeline.feature_enabled[pipeline_constant.METHODS]:
self._enable_feature(pipeline_constant.METHODS)
method_inbox = self._inbox_manager.get_method_request_inbox(method_name)
logger.info("Waiting for method request...")
try:
method_request = method_inbox.get(block=block, timeout=timeout)
except InboxEmpty:
method_request = None
logger.info("Received method request")
return method_request
def send_method_response(self, method_response):
"""Send a response to a method request via the Azure IoT Hub or Azure IoT Edge Hub.
This is a synchronous event, meaning that this function will not return until the event
has been sent to the service and the service has acknowledged receipt of the event.
If the connection to the service has not previously been opened by a call to connect, this
function will open the connection before sending the event.
:param method_response: The MethodResponse to send.
:type method_response: :class:`azure.iot.device.MethodResponse`
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
logger.info("Sending method response to Hub...")
callback = EventedCallback()
self._mqtt_pipeline.send_method_response(method_response, callback=callback)
handle_result(callback)
logger.info("Successfully sent method response to Hub")
def get_twin(self):
"""
Gets the device or module twin from the Azure IoT Hub or Azure IoT Edge Hub service.
This is a synchronous call, meaning that this function will not return until the twin
has been retrieved from the service.
:returns: Complete Twin as a JSON dict
:rtype: dict
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
if not self._mqtt_pipeline.feature_enabled[pipeline_constant.TWIN]:
self._enable_feature(pipeline_constant.TWIN)
callback = EventedCallback(return_arg_name="twin")
self._mqtt_pipeline.get_twin(callback=callback)
twin = handle_result(callback)
logger.info("Successfully retrieved twin")
return twin
def patch_twin_reported_properties(self, reported_properties_patch):
"""
Update reported properties with the Azure IoT Hub or Azure IoT Edge Hub service.
This is a synchronous call, meaning that this function will not return until the patch
has been sent to the service and acknowledged.
If the service returns an error on the patch operation, this function will raise the
appropriate error.
:param reported_properties_patch: Twin Reported Properties patch as a JSON dict
:type reported_properties_patch: dict
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
if not self._mqtt_pipeline.feature_enabled[pipeline_constant.TWIN]:
self._enable_feature(pipeline_constant.TWIN)
callback = EventedCallback()
self._mqtt_pipeline.patch_twin_reported_properties(patch=reported_properties_patch,
callback=callback)
handle_result(callback)
logger.info("Successfully patched twin")
@deprecation.deprecated(deprecated_in="2.3.0",
current_version=(device_constant.VERSION),
details="We recommend that you use the .on_twin_desired_properties_patch_received property to set a handler instead")
def receive_twin_desired_properties_patch(self, block=True, timeout=None):
"""
Receive a desired property patch via the Azure IoT Hub or Azure IoT Edge Hub.
This is a synchronous call, which means the following:
1. If block=True, this function will block until one of the following happens:
* a desired proprety patch is received from the Azure IoT Hub or Azure IoT Edge Hub.
* the timeout period, if provided, elapses. If a timeout happens, this function will
raise a InboxEmpty exception
2. If block=False, this function will return any desired property patches which may have
been received by the pipeline, but not yet returned to the application. If no
desired property patches have been received by the pipeline, this function will raise
an InboxEmpty exception
:param bool block: Indicates if the operation should block until a request is received.
:param int timeout: Optionally provide a number of seconds until blocking times out.
:returns: Twin Desired Properties patch as a JSON dict, or None if no patch has been
received by the end of the blocking period
:rtype: dict or None
"""
self._check_receive_mode_is_api()
if not self._mqtt_pipeline.feature_enabled[pipeline_constant.TWIN_PATCHES]:
self._enable_feature(pipeline_constant.TWIN_PATCHES)
twin_patch_inbox = self._inbox_manager.get_twin_patch_inbox()
logger.info("Waiting for twin patches...")
try:
patch = twin_patch_inbox.get(block=block, timeout=timeout)
except InboxEmpty:
return
else:
logger.info("twin patch received")
return patch
def _generic_handler_setter(self, handler_name, feature_name, new_handler):
self._check_receive_mode_is_handler()
setattr(self._handler_manager, handler_name, new_handler)
if new_handler is not None:
self._mqtt_pipeline.feature_enabled[feature_name] or self._enable_feature(feature_name)
else:
if new_handler is None:
if self._mqtt_pipeline.feature_enabled[feature_name]:
self._disable_feature(feature_name)
@property
def on_twin_desired_properties_patch_received(self):
"""The handler function that will be called when a twin desired properties patch
is received.
The function definition should take one positional argument (the twin patch in the form
of a JSON dictionary object)"""
return self._handler_manager.on_twin_desired_properties_patch_received
@on_twin_desired_properties_patch_received.setter
def on_twin_desired_properties_patch_received(self, value):
self._generic_handler_setter("on_twin_desired_properties_patch_received", pipeline_constant.TWIN_PATCHES, value)
@property
def on_method_request_received(self):
"""The handler function that will be called when a method request is received.
The function definition should take one positional argument (the
:class:`azure.iot.device.MethodRequest` object)"""
return self._handler_manager.on_method_request_received
@on_method_request_received.setter
def on_method_request_received(self, value):
self._generic_handler_setter("on_method_request_received", pipeline_constant.METHODS, value)
class IoTHubDeviceClient(GenericIoTHubClient, AbstractIoTHubDeviceClient):
__doc__ = "A synchronous device client that connects to an Azure IoT Hub instance.\n\n Intended for usage with Python 2.7 or compatibility scenarios for Python 3.5.3+.\n "
def __init__(self, mqtt_pipeline, http_pipeline):
"""Initializer for a IoTHubDeviceClient.
This initializer should not be called directly.
Instead, use one of the 'create_from_' classmethods to instantiate
:param mqtt_pipeline: The pipeline used to connect to the IoTHub endpoint.
:type mqtt_pipeline: :class:`azure.iot.device.iothub.pipeline.MQTTPipeline`
"""
super(IoTHubDeviceClient, self).__init__(mqtt_pipeline=mqtt_pipeline,
http_pipeline=http_pipeline)
self._mqtt_pipeline.on_c2d_message_received = CallableWeakMethod(self._inbox_manager, "route_c2d_message")
@deprecation.deprecated(deprecated_in="2.3.0",
current_version=(device_constant.VERSION),
details="We recommend that you use the .on_message_received property to set a handler instead")
def receive_message(self, block=True, timeout=None):
"""Receive a message that has been sent from the Azure IoT Hub.
:param bool block: Indicates if the operation should block until a message is received.
:param int timeout: Optionally provide a number of seconds until blocking times out.
:returns: Message that was sent from the Azure IoT Hub, or None if
no method request has been received by the end of the blocking period.
:rtype: :class:`azure.iot.device.Message` or None
"""
self._check_receive_mode_is_api()
if not self._mqtt_pipeline.feature_enabled[pipeline_constant.C2D_MSG]:
self._enable_feature(pipeline_constant.C2D_MSG)
c2d_inbox = self._inbox_manager.get_c2d_message_inbox()
logger.info("Waiting for message from Hub...")
try:
message = c2d_inbox.get(block=block, timeout=timeout)
except InboxEmpty:
message = None
logger.info("Message received")
return message
def get_storage_info_for_blob(self, blob_name):
"""Sends a POST request over HTTP to an IoTHub endpoint that will return information for uploading via the Azure Storage Account linked to the IoTHub your device is connected to.
:param str blob_name: The name in string format of the blob that will be uploaded using the storage API. This name will be used to generate the proper credentials for Storage, and needs to match what will be used with the Azure Storage SDK to perform the blob upload.
:returns: A JSON-like (dictionary) object from IoT Hub that will contain relevant information including: correlationId, hostName, containerName, blobName, sasToken.
"""
callback = EventedCallback(return_arg_name="storage_info")
self._http_pipeline.get_storage_info_for_blob(blob_name, callback=callback)
storage_info = handle_result(callback)
logger.info("Successfully retrieved storage_info")
return storage_info
def notify_blob_upload_status(self, correlation_id, is_success, status_code, status_description):
"""When the upload is complete, the device sends a POST request to the IoT Hub endpoint with information on the status of an upload to blob attempt. This is used by IoT Hub to notify listening clients.
:param str correlation_id: Provided by IoT Hub on get_storage_info_for_blob request.
:param bool is_success: A boolean that indicates whether the file was uploaded successfully.
:param int status_code: A numeric status code that is the status for the upload of the fiel to storage.
:param str status_description: A description that corresponds to the status_code.
"""
callback = EventedCallback()
self._http_pipeline.notify_blob_upload_status(correlation_id=correlation_id,
is_success=is_success,
status_code=status_code,
status_description=status_description,
callback=callback)
handle_result(callback)
logger.info("Successfully notified blob upload status")
@property
def on_message_received(self):
"""The handler function that will be called when a message is received.
The function definition should take one positional argument (the
:class:`azure.iot.device.Message` object)"""
return self._handler_manager.on_message_received
@on_message_received.setter
def on_message_received(self, value):
self._generic_handler_setter("on_message_received", pipeline_constant.C2D_MSG, value)
class IoTHubModuleClient(GenericIoTHubClient, AbstractIoTHubModuleClient):
__doc__ = "A synchronous module client that connects to an Azure IoT Hub or Azure IoT Edge instance.\n\n Intended for usage with Python 2.7 or compatibility scenarios for Python 3.5.3+.\n "
def __init__(self, mqtt_pipeline, http_pipeline):
"""Intializer for a IoTHubModuleClient.
This initializer should not be called directly.
Instead, use one of the 'create_from_' classmethods to instantiate
:param mqtt_pipeline: The pipeline used to connect to the IoTHub endpoint.
:type mqtt_pipeline: :class:`azure.iot.device.iothub.pipeline.MQTTPipeline`
:param http_pipeline: The pipeline used to connect to the IoTHub endpoint via HTTP.
:type http_pipeline: :class:`azure.iot.device.iothub.pipeline.HTTPPipeline`
"""
super(IoTHubModuleClient, self).__init__(mqtt_pipeline=mqtt_pipeline,
http_pipeline=http_pipeline)
self._mqtt_pipeline.on_input_message_received = CallableWeakMethod(self._inbox_manager, "route_input_message")
def send_message_to_output(self, message, output_name):
"""Sends an event/message to the given module output.
These are outgoing events and are meant to be "output events".
This is a synchronous event, meaning that this function will not return until the event
has been sent to the service and the service has acknowledged receipt of the event.
If the connection to the service has not previously been opened by a call to connect, this
function will open the connection before sending the event.
:param message: Message to send to the given output. Anything passed that is not an instance of the
Message class will be converted to Message object.
:type message: :class:`azure.iot.device.Message` or str
:param str output_name: Name of the output to send the event to.
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
:raises: ValueError if the message fails size validation.
"""
if not isinstance(message, Message):
message = Message(message)
if message.get_size() > device_constant.TELEMETRY_MESSAGE_SIZE_LIMIT:
raise ValueError("Size of message can not exceed 256 KB.")
message.output_name = output_name
logger.info("Sending message to output:" + output_name + "...")
callback = EventedCallback()
self._mqtt_pipeline.send_output_message(message, callback=callback)
handle_result(callback)
logger.info("Successfully sent message to output: " + output_name)
@deprecation.deprecated(deprecated_in="2.3.0",
current_version=(device_constant.VERSION),
details="We recommend that you use the .on_message_received property to set a handler instead")
def receive_message_on_input(self, input_name, block=True, timeout=None):
"""Receive an input message that has been sent from another Module to a specific input.
:param str input_name: The input name to receive a message on.
:param bool block: Indicates if the operation should block until a message is received.
:param int timeout: Optionally provide a number of seconds until blocking times out.
:returns: Message that was sent to the specified input, or None if
no method request has been received by the end of the blocking period.
"""
self._check_receive_mode_is_api()
if not self._mqtt_pipeline.feature_enabled[pipeline_constant.INPUT_MSG]:
self._enable_feature(pipeline_constant.INPUT_MSG)
input_inbox = self._inbox_manager.get_input_message_inbox(input_name)
logger.info("Waiting for input message on: " + input_name + "...")
try:
message = input_inbox.get(block=block, timeout=timeout)
except InboxEmpty:
message = None
logger.info("Input message received on: " + input_name)
return message
def invoke_method(self, method_params, device_id, module_id=None):
"""Invoke a method from your client onto a device or module client, and receive the response to the method call.
:param dict method_params: Should contain a methodName (str), payload (str),
connectTimeoutInSeconds (int), responseTimeoutInSeconds (int).
:param str device_id: Device ID of the target device where the method will be invoked.
:param str module_id: Module ID of the target module where the method will be invoked. (Optional)
:returns: method_result should contain a status, and a payload
:rtype: dict
"""
logger.info("Invoking {} method on {}{}".format(method_params["methodName"], device_id, module_id))
callback = EventedCallback(return_arg_name="invoke_method_response")
self._http_pipeline.invoke_method(device_id,
method_params, callback=callback, module_id=module_id)
invoke_method_response = handle_result(callback)
logger.info("Successfully invoked method")
return invoke_method_response
@property
def on_message_received(self):
"""The handler function that will be called when an input message is received.
The function definition should take one positional argument (the
:class:`azure.iot.device.Message` object)"""
return self._handler_manager.on_message_received
@on_message_received.setter
def on_message_received(self, value):
self._generic_handler_setter("on_message_received", pipeline_constant.INPUT_MSG, value)

View File

@@ -0,0 +1,197 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/sync_handler_manager.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 10716 bytes
"""This module contains the manager for handler methods used by the callback client"""
import logging, threading, abc, six
from azure.iot.device.common import handle_exceptions
from azure.iot.device.common.chainable_exception import ChainableException
from azure.iot.device.iothub.sync_inbox import InboxEmpty
import concurrent.futures
logger = logging.getLogger(__name__)
MESSAGE = "_on_message_received"
METHOD = "_on_method_request_received"
TWIN_DP_PATCH = "_on_twin_desired_properties_patch_received"
class HandlerManagerException(ChainableException):
__doc__ = "An exception raised by a HandlerManager\n "
class HandlerRunnerKillerSentinel(object):
__doc__ = "An object that functions according to the sentinel design pattern.\n Insert into an Inbox in order to indicate that the Handler Runner associated with that\n Inbox should be stopped.\n "
@six.add_metaclass(abc.ABCMeta)
class AbstractHandlerManager(object):
__doc__ = "Partial class that defines handler manager functionality shared between sync/async"
def __init__(self, inbox_manager):
self._inbox_manager = inbox_manager
self._handler_runners = {MESSAGE: None,
METHOD: None,
TWIN_DP_PATCH: None}
self._on_message_received = None
self._on_method_request_received = None
self._on_twin_desired_properties_patch_received = None
def _get_inbox_for_handler(self, handler_name):
"""Retrieve the inbox relevant to the handler"""
if handler_name == METHOD:
return self._inbox_manager.get_method_request_inbox()
if handler_name == TWIN_DP_PATCH:
return self._inbox_manager.get_twin_patch_inbox()
if handler_name == MESSAGE:
return self._inbox_manager.get_unified_message_inbox()
return
@abc.abstractmethod
def _inbox_handler_runner(self, inbox, handler_name):
"""Run infinite loop that waits for an inbox to receive an object from it, then calls
the handler with that object
"""
pass
@abc.abstractmethod
def _event_handler_runner(self, handler_name):
pass
@abc.abstractmethod
def _start_handler_runner(self, handler_name):
"""Create, and store a handler runner
"""
pass
@abc.abstractmethod
def _stop_handler_runner(self, handler_name):
"""Cancel and remove a handler runner"""
pass
def _generic_handler_setter(self, handler_name, new_handler):
"""Set a handler"""
curr_handler = getattr(self, handler_name)
if new_handler is not None and curr_handler is None:
logger.debug("Creating new handler runner for handler: {}".format(handler_name))
setattr(self, handler_name, new_handler)
self._start_handler_runner(handler_name)
else:
if new_handler is None and curr_handler is not None:
logger.debug("Removing handler runner for handler: {}".format(handler_name))
self._stop_handler_runner(handler_name)
setattr(self, handler_name, new_handler)
else:
logger.debug("Updating set handler: {}".format(handler_name))
setattr(self, handler_name, new_handler)
def stop(self):
"""Stop the process of invoking handlers in response to events.
All pending items will be handled prior to stoppage.
"""
for handler_name in self._handler_runners:
if self._handler_runners[handler_name] is not None:
self._stop_handler_runner(handler_name)
def ensure_running(self):
"""Ensure the process of invoking handlers in response to events is running"""
for handler_name in self._handler_runners:
if self._handler_runners[handler_name] is None and getattr(self, handler_name) is not None:
self._start_handler_runner(handler_name)
@property
def on_message_received(self):
return self._on_message_received
@on_message_received.setter
def on_message_received(self, value):
self._generic_handler_setter(MESSAGE, value)
@property
def on_method_request_received(self):
return self._on_method_request_received
@on_method_request_received.setter
def on_method_request_received(self, value):
self._generic_handler_setter(METHOD, value)
@property
def on_twin_desired_properties_patch_received(self):
return self._on_twin_desired_properties_patch_received
@on_twin_desired_properties_patch_received.setter
def on_twin_desired_properties_patch_received(self, value):
self._generic_handler_setter(TWIN_DP_PATCH, value)
class SyncHandlerManager(AbstractHandlerManager):
__doc__ = "Handler manager for use with synchronous clients"
def _inbox_handler_runner(self, inbox, handler_name):
"""Run infinite loop that waits for an inbox to receive an object from it, then calls
the handler with that object
"""
logger.debug("HANDLER RUNNER ({}): Starting runner".format(handler_name))
def _handler_callback(future):
try:
e = future.exception(timeout=0)
except Exception as raised_e:
try:
new_err = HandlerManagerException(message=("HANDLER ({}): Unable to retrieve exception data from incomplete invocation".format(handler_name)),
cause=raised_e)
handle_exceptions.handle_background_exception(new_err)
finally:
raised_e = None
del raised_e
else:
if e:
new_err = HandlerManagerException(message=("HANDLER ({}): Error during invocation".format(handler_name)),
cause=e)
handle_exceptions.handle_background_exception(new_err)
else:
logger.debug("HANDLER ({}): Successfully completed invocation".format(handler_name))
tpe = concurrent.futures.ThreadPoolExecutor(max_workers=4)
while True:
handler_arg = inbox.get()
if isinstance(handler_arg, HandlerRunnerKillerSentinel):
logger.debug("HANDLER RUNNER ({}): HandlerRunnerKillerSentinel found in inbox. Exiting.".format(handler_name))
tpe.shutdown()
break
handler = getattr(self, handler_name)
logger.debug("HANDLER RUNNER ({}): Invoking handler".format(handler_name))
fut = tpe.submit(handler, handler_arg)
fut.add_done_callback(_handler_callback)
def _event_handler_runner(self, handler_name):
logger.error(".event_handler_runner() not yet implemented")
def _start_handler_runner(self, handler_name):
"""Start and store a handler runner thread
"""
if self._handler_runners[handler_name] is not None:
raise HandlerManagerException("Cannot create thread for handler runner: {}. Runner thread already exists".format(handler_name))
else:
inbox = self._get_inbox_for_handler(handler_name)
if inbox:
thread = threading.Thread(target=(self._inbox_handler_runner), args=[inbox, handler_name])
else:
thread = threading.Thread(target=(self._event_handler_runner), args=[handler_name])
thread.daemon = True
self._handler_runners[handler_name] = thread
thread.start()
def _stop_handler_runner(self, handler_name):
"""Stop and remove a handler runner task.
All pending items in the corresponding inbox will be handled by the handler before stoppage.
"""
logger.debug("Adding HandlerRunnerKillerSentinel to inbox corresponding to {} handler runner".format(handler_name))
inbox = self._get_inbox_for_handler(handler_name)
inbox._put(HandlerRunnerKillerSentinel())
logger.debug("Waiting for {} handler runner to exit...".format(handler_name))
thread = self._handler_runners[handler_name]
thread.join()
self._handler_runners[handler_name] = None
logger.debug("Handler runner for {} has been stopped".format(handler_name))

View File

@@ -0,0 +1,119 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/iothub/sync_inbox.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 3726 bytes
"""This module contains an Inbox class for use with a synchronous client."""
from six.moves import queue
import six
from abc import ABCMeta, abstractmethod
class InboxEmpty(Exception):
pass
@six.add_metaclass(ABCMeta)
class AbstractInbox:
__doc__ = "Abstract Base Class for Inbox.\n\n Holds generic incoming data for a client.\n\n All methods, when implemented, should be threadsafe.\n "
@abstractmethod
def _put(self, item):
"""Put an item into the Inbox.
Implementation should block until a free slot is available.
Implementation MUST be a synchronous function.
Only to be used by the InboxManager.
:param item: The item to put in the Inbox.
"""
pass
@abstractmethod
def get(self):
"""Remove and return an item from the inbox.
Implementation should have the capability to block until an item is available.
Implementation can be a synchronous function or an asynchronous coroutine.
:returns: An item from the Inbox.
"""
pass
@abstractmethod
def empty(self):
"""Returns True if the inbox is empty, False otherwise
:returns: Boolean indicating if the inbox is empty
"""
pass
@abstractmethod
def clear(self):
"""Remove all items from the inbox.
"""
pass
class SyncClientInbox(AbstractInbox):
__doc__ = "Holds generic incoming data for a synchronous client.\n\n All methods implemented in this class are threadsafe.\n "
def __init__(self):
"""Initializer for SyncClientInbox"""
self._queue = queue.Queue()
def __contains__(self, item):
"""Return True if item is in Inbox, False otherwise"""
with self._queue.mutex:
return item in self._queue.queue
def _put(self, item):
"""Put an item into the inbox.
Block if necessary until a free slot is available.
Only to be used by the InboxManager.
:param item: The item to put in the inbox.
"""
self._queue.put(item)
def get(self, block=True, timeout=None):
"""Remove and return an item from the inbox.
:param bool block: Indicates if the operation should block until an item is available.
Default True.
:param int timeout: Optionally provide a number of seconds until blocking times out.
:raises: InboxEmpty if timeout occurs because the inbox is empty
:raises: InboxEmpty if inbox is empty in non-blocking mode
:returns: An item from the Inbox
"""
try:
return self._queue.get(block=block, timeout=timeout)
except queue.Empty:
raise InboxEmpty("Inbox is empty")
def empty(self):
"""Returns True if the inbox is empty, False otherwise.
Note that there is a race condition here, and this may not be accurate as the queue size
may change while this operation is occurring.
:returns: Boolean indicating if the inbox is empty
"""
return self._queue.empty()
def join(self):
"""Block until all items in the inbox have been gotten and processed.
Only really used for test code.
"""
return self._queue.join()
def clear(self):
"""Remove all items from the inbox.
"""
with self._queue.mutex:
self._queue.queue.clear()

View File

@@ -0,0 +1,88 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/patch.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 7347 bytes
"""This module provides patches used to dynamically modify items from the libraries"""
import sys, inspect, logging
logger = logging.getLogger(__name__)
shim_scope = {}
def add_shims_for_inherited_methods(target_class):
"""Dynamically add overriding, pass-through shim methods for all public inherited methods
on a child class, which simply call into the parent class implementation of the same method.
These shim methods will include the same docstrings as the method from the parent class.
This currently only works for Python 3.5+
Using DEBUG logging will allow you to see output of all dynamic operations that occur within
for debugging purposes.
:param target_class: The child class to add shim methods to
"""
class_functions = inspect.getmembers(target_class, predicate=(inspect.isfunction))
class_methods = inspect.getmembers(target_class, predicate=(inspect.ismethod))
all_methods = class_functions + class_methods
class_attributes = inspect.classify_class_attrs(target_class)
classname_alias = target_class.__name__
while classname_alias in shim_scope:
classname_alias += "_"
class_module = inspect.getmodule(target_class)
import_cmdstr = "from {module} import {target_class} as {alias}".format(module=(class_module.__name__),
target_class=(target_class.__name__),
alias=classname_alias)
logger.debug("exec: " + import_cmdstr)
for method in all_methods:
method_name = method[0]
method_obj = method[1]
method_attribute = [att for att in class_attributes if att.name == method_name][0]
originating_class_obj = method_attribute.defining_class
if method_name[0] != "_":
if originating_class_obj != target_class:
method_sig = inspect.signature(method_obj)
sig_params = method_sig.parameters
if inspect.ismethod(method_obj):
complete_params = []
complete_params.append(inspect.Parameter("cls", inspect.Parameter.POSITIONAL_OR_KEYWORD))
complete_params += list(sig_params.values())
method_sig = method_sig.replace(parameters=complete_params)
else:
invoke_params_list = []
for param in sig_params.values():
if param.name != "self" and param.name != "cls":
new_param = param.replace(default=(inspect.Parameter.empty))
invoke_params_list.append(new_param)
invoke_params = method_sig.replace(parameters=invoke_params_list)
if inspect.ismethod(method_obj):
obj_or_type = "cls"
else:
obj_or_type = "self"
if inspect.iscoroutine(method_obj) or inspect.iscoroutinefunction(method_obj):
def_syntax = "async def"
ret_syntax = "return await"
else:
def_syntax = "def"
ret_syntax = "return"
fn_def_cmdstr = "{def_syntax} {method_name}{signature}: {ret_syntax} super({leaf_class}, {object_or_type}).{method_name}{invocation}".format(def_syntax=def_syntax,
method_name=method_name,
signature=(str(method_sig)),
ret_syntax=ret_syntax,
leaf_class=classname_alias,
object_or_type=obj_or_type,
invocation=(str(invoke_params)))
logger.debug("exec: " + fn_def_cmdstr)
set_doc_cmdstr = "{method_name}.__doc__ = {leaf_class}.{method_name}.__doc__".format(method_name=method_name,
leaf_class=classname_alias)
logger.debug("exec: " + set_doc_cmdstr)
if inspect.ismethod(method_obj):
attach_shim_cmdstr = "setattr({leaf_class}, '{method_name}', classmethod({method_name}))".format(leaf_class=classname_alias,
method_name=method_name)
else:
attach_shim_cmdstr = "setattr({leaf_class}, '{method_name}', {method_name})".format(leaf_class=classname_alias,
method_name=method_name)
logger.debug("exec: " + attach_shim_cmdstr)

View File

@@ -0,0 +1,189 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/patch_documentation.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 10406 bytes
"""This module provides hard coded patches used to modify items from the libraries.
Currently we have to do like this so that we don't use exec anywhere"""
def execute_patch_for_sync():
from azure.iot.device.iothub.sync_clients import IoTHubDeviceClient
def connect(self):
return super(IoTHubDeviceClient, self).connect()
connect.__doc__ = IoTHubDeviceClient.connect.__doc__
setattr(IoTHubDeviceClient, "connect", connect)
def disconnect(self):
return super(IoTHubDeviceClient, self).disconnect()
disconnect.__doc__ = IoTHubDeviceClient.disconnect.__doc__
setattr(IoTHubDeviceClient, "disconnect", disconnect)
def get_twin(self):
return super(IoTHubDeviceClient, self).get_twin()
get_twin.__doc__ = IoTHubDeviceClient.get_twin.__doc__
setattr(IoTHubDeviceClient, "get_twin", get_twin)
def patch_twin_reported_properties(self, reported_properties_patch):
return super(IoTHubDeviceClient, self).patch_twin_reported_properties(reported_properties_patch)
patch_twin_reported_properties.__doc__ = IoTHubDeviceClient.patch_twin_reported_properties.__doc__
setattr(IoTHubDeviceClient, "patch_twin_reported_properties", patch_twin_reported_properties)
def receive_method_request(self, method_name=None, block=True, timeout=None):
return super(IoTHubDeviceClient, self).receive_method_request(method_name, block, timeout)
receive_method_request.__doc__ = IoTHubDeviceClient.receive_method_request.__doc__
setattr(IoTHubDeviceClient, "receive_method_request", receive_method_request)
def receive_twin_desired_properties_patch(self, block=True, timeout=None):
return super(IoTHubDeviceClient, self).receive_twin_desired_properties_patch(block, timeout)
receive_twin_desired_properties_patch.__doc__ = IoTHubDeviceClient.receive_twin_desired_properties_patch.__doc__
setattr(IoTHubDeviceClient, "receive_twin_desired_properties_patch", receive_twin_desired_properties_patch)
def send_message(self, message):
return super(IoTHubDeviceClient, self).send_message(message)
send_message.__doc__ = IoTHubDeviceClient.send_message.__doc__
setattr(IoTHubDeviceClient, "send_message", send_message)
def send_method_response(self, method_response):
return super(IoTHubDeviceClient, self).send_method_response(method_response)
send_method_response.__doc__ = IoTHubDeviceClient.send_method_response.__doc__
setattr(IoTHubDeviceClient, "send_method_response", send_method_response)
def update_sastoken(self, sastoken):
return super(IoTHubDeviceClient, self).update_sastoken(sastoken)
update_sastoken.__doc__ = IoTHubDeviceClient.update_sastoken.__doc__
setattr(IoTHubDeviceClient, "update_sastoken", update_sastoken)
def create_from_connection_string(cls, connection_string, **kwargs):
return (super(IoTHubDeviceClient, cls).create_from_connection_string)(
connection_string, **kwargs)
create_from_connection_string.__doc__ = IoTHubDeviceClient.create_from_connection_string.__doc__
setattr(IoTHubDeviceClient, "create_from_connection_string", classmethod(create_from_connection_string))
def create_from_sastoken(cls, sastoken, **kwargs):
return (super(IoTHubDeviceClient, cls).create_from_sastoken)(sastoken, **kwargs)
create_from_sastoken.__doc__ = IoTHubDeviceClient.create_from_sastoken.__doc__
setattr(IoTHubDeviceClient, "create_from_sastoken", classmethod(create_from_sastoken))
def create_from_symmetric_key(cls, symmetric_key, hostname, device_id, **kwargs):
return (super(IoTHubDeviceClient, cls).create_from_symmetric_key)(
symmetric_key, hostname, device_id, **kwargs)
create_from_symmetric_key.__doc__ = IoTHubDeviceClient.create_from_symmetric_key.__doc__
setattr(IoTHubDeviceClient, "create_from_symmetric_key", classmethod(create_from_symmetric_key))
def create_from_x509_certificate(cls, x509, hostname, device_id, **kwargs):
return (super(IoTHubDeviceClient, cls).create_from_x509_certificate)(
x509, hostname, device_id, **kwargs)
create_from_x509_certificate.__doc__ = IoTHubDeviceClient.create_from_x509_certificate.__doc__
setattr(IoTHubDeviceClient, "create_from_x509_certificate", classmethod(create_from_x509_certificate))
from azure.iot.device.iothub.sync_clients import IoTHubModuleClient
def connect(self):
return super(IoTHubModuleClient, self).connect()
connect.__doc__ = IoTHubModuleClient.connect.__doc__
setattr(IoTHubModuleClient, "connect", connect)
def disconnect(self):
return super(IoTHubModuleClient, self).disconnect()
disconnect.__doc__ = IoTHubModuleClient.disconnect.__doc__
setattr(IoTHubModuleClient, "disconnect", disconnect)
def get_twin(self):
return super(IoTHubModuleClient, self).get_twin()
get_twin.__doc__ = IoTHubModuleClient.get_twin.__doc__
setattr(IoTHubModuleClient, "get_twin", get_twin)
def patch_twin_reported_properties(self, reported_properties_patch):
return super(IoTHubModuleClient, self).patch_twin_reported_properties(reported_properties_patch)
patch_twin_reported_properties.__doc__ = IoTHubModuleClient.patch_twin_reported_properties.__doc__
setattr(IoTHubModuleClient, "patch_twin_reported_properties", patch_twin_reported_properties)
def receive_method_request(self, method_name=None, block=True, timeout=None):
return super(IoTHubModuleClient, self).receive_method_request(method_name, block, timeout)
receive_method_request.__doc__ = IoTHubModuleClient.receive_method_request.__doc__
setattr(IoTHubModuleClient, "receive_method_request", receive_method_request)
def receive_twin_desired_properties_patch(self, block=True, timeout=None):
return super(IoTHubModuleClient, self).receive_twin_desired_properties_patch(block, timeout)
receive_twin_desired_properties_patch.__doc__ = IoTHubModuleClient.receive_twin_desired_properties_patch.__doc__
setattr(IoTHubModuleClient, "receive_twin_desired_properties_patch", receive_twin_desired_properties_patch)
def send_message(self, message):
return super(IoTHubModuleClient, self).send_message(message)
send_message.__doc__ = IoTHubModuleClient.send_message.__doc__
setattr(IoTHubModuleClient, "send_message", send_message)
def send_method_response(self, method_response):
return super(IoTHubModuleClient, self).send_method_response(method_response)
send_method_response.__doc__ = IoTHubModuleClient.send_method_response.__doc__
setattr(IoTHubModuleClient, "send_method_response", send_method_response)
def update_sastoken(self, sastoken):
return super(IoTHubModuleClient, self).update_sastoken(sastoken)
update_sastoken.__doc__ = IoTHubModuleClient.update_sastoken.__doc__
setattr(IoTHubModuleClient, "update_sastoken", update_sastoken)
def create_from_connection_string(cls, connection_string, **kwargs):
return (super(IoTHubModuleClient, cls).create_from_connection_string)(
connection_string, **kwargs)
create_from_connection_string.__doc__ = IoTHubModuleClient.create_from_connection_string.__doc__
setattr(IoTHubModuleClient, "create_from_connection_string", classmethod(create_from_connection_string))
def create_from_edge_environment(cls, **kwargs):
return (super(IoTHubModuleClient, cls).create_from_edge_environment)(**kwargs)
create_from_edge_environment.__doc__ = IoTHubModuleClient.create_from_edge_environment.__doc__
setattr(IoTHubModuleClient, "create_from_edge_environment", classmethod(create_from_edge_environment))
def create_from_sastoken(cls, sastoken, **kwargs):
return (super(IoTHubModuleClient, cls).create_from_sastoken)(sastoken, **kwargs)
create_from_sastoken.__doc__ = IoTHubModuleClient.create_from_sastoken.__doc__
setattr(IoTHubModuleClient, "create_from_sastoken", classmethod(create_from_sastoken))
def create_from_x509_certificate(cls, x509, hostname, device_id, module_id, **kwargs):
return (super(IoTHubModuleClient, cls).create_from_x509_certificate)(
x509, hostname, device_id, module_id, **kwargs)
create_from_x509_certificate.__doc__ = IoTHubModuleClient.create_from_x509_certificate.__doc__
setattr(IoTHubModuleClient, "create_from_x509_certificate", classmethod(create_from_x509_certificate))
from azure.iot.device.provisioning.provisioning_device_client import ProvisioningDeviceClient
def create_from_symmetric_key(cls, provisioning_host, registration_id, id_scope, symmetric_key, **kwargs):
return (super(ProvisioningDeviceClient, cls).create_from_symmetric_key)(
provisioning_host, registration_id, id_scope, symmetric_key, **kwargs)
create_from_symmetric_key.__doc__ = ProvisioningDeviceClient.create_from_symmetric_key.__doc__
setattr(ProvisioningDeviceClient, "create_from_symmetric_key", classmethod(create_from_symmetric_key))
def create_from_x509_certificate(cls, provisioning_host, registration_id, id_scope, x509, **kwargs):
return (super(ProvisioningDeviceClient, cls).create_from_x509_certificate)(
provisioning_host, registration_id, id_scope, x509, **kwargs)
create_from_x509_certificate.__doc__ = ProvisioningDeviceClient.create_from_x509_certificate.__doc__
setattr(ProvisioningDeviceClient, "create_from_x509_certificate", classmethod(create_from_x509_certificate))

View File

@@ -0,0 +1,17 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/provisioning/__init__.py
# Compiled at: 2024-04-18 03:12:59
# Size of source mod 2**32: 440 bytes
"""Azure Provisioning Device Library
This library provides functionality that enables zero-touch, just-in-time provisioning to the right IoT hub without requiring
human intervention, enabling customers to provision millions of devices in a secure and scalable manner.
"""
from .provisioning_device_client import ProvisioningDeviceClient
from .models import RegistrationResult
__all__ = [
"ProvisioningDeviceClient", "RegistrationResult"]

View File

@@ -0,0 +1,197 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/provisioning/abstract_provisioning_device_client.py
# Compiled at: 2024-04-18 03:12:59
# Size of source mod 2**32: 10465 bytes
"""
This module provides an abstract interface representing clients which can communicate with the
Device Provisioning Service.
"""
import abc, six, logging
from azure.iot.device.provisioning import pipeline
from azure.iot.device.common.auth import sastoken as st
from azure.iot.device.common import auth
logger = logging.getLogger(__name__)
def _validate_kwargs(exclude=[], **kwargs):
"""Helper function to validate user provided kwargs.
Raises TypeError if an invalid option has been provided"""
valid_kwargs = [
'websockets', 'cipher', 'proxy_options', 'sastoken_ttl', 'keep_alive']
for kwarg in kwargs:
if kwarg not in valid_kwargs or kwarg in exclude:
raise TypeError("Unsupported keyword argument '{}'".format(kwarg))
def validate_registration_id(reg_id):
if not (reg_id and reg_id.strip()):
raise ValueError("Registration Id can not be none, empty or blank.")
def _get_config_kwargs(**kwargs):
"""Get the subset of kwargs which pertain the config object"""
valid_config_kwargs = [
"websockets", "cipher", "proxy_options", "keep_alive"]
config_kwargs = {}
for kwarg in kwargs:
if kwarg in valid_config_kwargs:
config_kwargs[kwarg] = kwargs[kwarg]
return config_kwargs
def _form_sas_uri(id_scope, registration_id):
return "{id_scope}/registrations/{registration_id}".format(id_scope=id_scope,
registration_id=registration_id)
@six.add_metaclass(abc.ABCMeta)
class AbstractProvisioningDeviceClient(object):
__doc__ = "\n Super class for any client that can be used to register devices to Device Provisioning Service.\n "
def __init__(self, pipeline):
"""
Initializes the provisioning client.
NOTE: This initializer should not be called directly.
Instead, the class methods that start with `create_from_` should be used to create a
client object.
:param pipeline: Instance of the provisioning pipeline object.
:type pipeline: :class:`azure.iot.device.provisioning.pipeline.MQTTPipeline`
"""
self._pipeline = pipeline
self._provisioning_payload = None
@classmethod
def create_from_symmetric_key(cls, provisioning_host, registration_id, id_scope, symmetric_key, **kwargs):
"""
Create a client which can be used to run the registration of a device with provisioning service
using Symmetric Key authentication.
:param str provisioning_host: Host running the Device Provisioning Service.
Can be found in the Azure portal in the Overview tab as the string Global device endpoint.
:param str registration_id: The registration ID used to uniquely identify a device in the
Device Provisioning Service. The registration ID is alphanumeric, lowercase string
and may contain hyphens.
:param str id_scope: The ID scope used to uniquely identify the specific provisioning
service the device will register through. The ID scope is assigned to a
Device Provisioning Service when it is created by the user and is generated by the
service and is immutable, guaranteeing uniqueness.
:param str symmetric_key: The key which will be used to create the shared access signature
token to authenticate the device with the Device Provisioning Service. By default,
the Device Provisioning Service creates new symmetric keys with a default length of
32 bytes when new enrollments are saved with the Auto-generate keys option enabled.
Users can provide their own symmetric keys for enrollments by disabling this option
within 16 bytes and 64 bytes and in valid Base64 format.
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int keepalive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:raises: TypeError if given an unrecognized parameter.
:returns: A ProvisioningDeviceClient instance which can register via Symmetric Key.
"""
validate_registration_id(registration_id)
_validate_kwargs(**kwargs)
uri = _form_sas_uri(id_scope=id_scope, registration_id=registration_id)
signing_mechanism = auth.SymmetricKeySigningMechanism(key=symmetric_key)
token_ttl = kwargs.get("sastoken_ttl", 3600)
try:
sastoken = st.RenewableSasToken(uri, signing_mechanism, ttl=token_ttl)
except st.SasTokenError as e:
try:
new_err = ValueError("Could not create a SasToken using the provided values")
new_err.__cause__ = e
raise new_err
finally:
e = None
del e
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = (pipeline.ProvisioningPipelineConfig)(**, **config_kwargs)
mqtt_provisioning_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_provisioning_pipeline)
@classmethod
def create_from_x509_certificate(cls, provisioning_host, registration_id, id_scope, x509, **kwargs):
"""
Create a client which can be used to run the registration of a device with
provisioning service using X509 certificate authentication.
:param str provisioning_host: Host running the Device Provisioning Service. Can be found in
the Azure portal in the Overview tab as the string Global device endpoint.
:param str registration_id: The registration ID used to uniquely identify a device in the
Device Provisioning Service. The registration ID is alphanumeric, lowercase string
and may contain hyphens.
:param str id_scope: The ID scope is used to uniquely identify the specific
provisioning service the device will register through. The ID scope is assigned to a
Device Provisioning Service when it is created by the user and is generated by the
service and is immutable, guaranteeing uniqueness.
:param x509: The x509 certificate, To use the certificate the enrollment object needs to
contain cert (either the root certificate or one of the intermediate CA certificates).
If the cert comes from a CER file, it needs to be base64 encoded.
:type x509: :class:`azure.iot.device.X509`
:param bool websockets: Configuration Option. Default is False. Set to true if using MQTT
over websockets.
:param cipher: Configuration Option. Cipher suite(s) for TLS/SSL, as a string in
"OpenSSL cipher list format" or as a list of cipher suite strings.
:type cipher: str or list(str)
:param proxy_options: Options for sending traffic through proxy servers.
:type proxy_options: :class:`azure.iot.device.ProxyOptions`
:param int keepalive: Maximum period in seconds between communications with the
broker. If no other messages are being exchanged, this controls the
rate at which the client will send ping messages to the broker.
If not provided default value of 60 secs will be used.
:raises: TypeError if given an unrecognized parameter.
:returns: A ProvisioningDeviceClient which can register via X509 client certificates.
"""
validate_registration_id(registration_id)
excluded_kwargs = [
"sastoken_ttl"]
_validate_kwargs(exclude=excluded_kwargs, **kwargs)
config_kwargs = _get_config_kwargs(**kwargs)
pipeline_configuration = (pipeline.ProvisioningPipelineConfig)(**, **config_kwargs)
mqtt_provisioning_pipeline = pipeline.MQTTPipeline(pipeline_configuration)
return cls(mqtt_provisioning_pipeline)
@abc.abstractmethod
def register(self):
"""
Register the device with the Device Provisioning Service.
"""
pass
@property
def provisioning_payload(self):
return self._provisioning_payload
@provisioning_payload.setter
def provisioning_payload(self, provisioning_payload):
"""
Set the payload that will form the request payload in a registration request.
:param provisioning_payload: The payload that can be supplied by the user.
:type provisioning_payload: This can be an object or dictionary or a string or an integer.
"""
self._provisioning_payload = provisioning_payload
def log_on_register_complete(result=None):
if result is not None:
if result.status == "assigned":
logger.info("Successfully registered with Provisioning Service")
else:
logger.info("Failed registering with Provisioning Service")

View File

@@ -0,0 +1,15 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/provisioning/aio/__init__.py
# Compiled at: 2024-04-18 03:12:59
# Size of source mod 2**32: 282 bytes
"""Azure IoT Provisioning Service SDK - Asynchronous
This SDK provides asynchronous functionality for communicating with the Azure Provisioning Service
as a Device.
"""
from .async_provisioning_device_client import ProvisioningDeviceClient
__all__ = [
"ProvisioningDeviceClient"]

View File

@@ -0,0 +1,103 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/provisioning/aio/async_provisioning_device_client.py
# Compiled at: 2024-04-18 03:12:59
# Size of source mod 2**32: 4263 bytes
"""
This module contains user-facing asynchronous Provisioning Device Client for Azure Provisioning
Device SDK. This client uses Symmetric Key and X509 authentication to register devices with an
IoT Hub via the Device Provisioning Service.
"""
import logging
from azure.iot.device.common import async_adapter
from azure.iot.device.provisioning.abstract_provisioning_device_client import AbstractProvisioningDeviceClient
from azure.iot.device.provisioning.abstract_provisioning_device_client import log_on_register_complete
from azure.iot.device.provisioning.pipeline import exceptions as pipeline_exceptions
from azure.iot.device import exceptions
from azure.iot.device.provisioning.pipeline import constant as dps_constant
logger = logging.getLogger(__name__)
async def handle_result(callback):
try:
return await callback.completion()
except pipeline_exceptions.ConnectionDroppedError as e:
try:
raise exceptions.ConnectionDroppedError(message="Lost connection to IoTHub", cause=e)
finally:
e = None
del e
except pipeline_exceptions.ConnectionFailedError as e:
try:
raise exceptions.ConnectionFailedError(message="Could not connect to IoTHub", cause=e)
finally:
e = None
del e
except pipeline_exceptions.UnauthorizedError as e:
try:
raise exceptions.CredentialError(message="Credentials invalid, could not connect", cause=e)
finally:
e = None
del e
except pipeline_exceptions.ProtocolClientError as e:
try:
raise exceptions.ClientError(message="Error in the IoTHub client", cause=e)
finally:
e = None
del e
except Exception as e:
try:
raise exceptions.ClientError(message="Unexpected failure", cause=e)
finally:
e = None
del e
class ProvisioningDeviceClient(AbstractProvisioningDeviceClient):
__doc__ = "\n Client which can be used to run the registration of a device with provisioning service\n using Symmetric Key or X509 authentication.\n "
async def register(self):
"""
Register the device with the provisioning service.
Before returning the client will also disconnect from the provisioning service.
If a registration attempt is made while a previous registration is in progress it may
throw an error.
:returns: RegistrationResult indicating the result of the registration.
:rtype: :class:`azure.iot.device.RegistrationResult`
:raises: :class:`azure.iot.device.exceptions.CredentialError` if credentials are invalid
and a connection cannot be established.
:raises: :class:`azure.iot.device.exceptions.ConnectionFailedError` if a establishing a
connection results in failure.
:raises: :class:`azure.iot.device.exceptions.ConnectionDroppedError` if connection is lost
during execution.
:raises: :class:`azure.iot.device.exceptions.ClientError` if there is an unexpected failure
during execution.
"""
logger.info("Registering with Provisioning Service...")
if not self._pipeline.responses_enabled[dps_constant.REGISTER]:
await self._enable_responses()
register_async = async_adapter.emulate_async(self._pipeline.register)
register_complete = async_adapter.AwaitableCallback(return_arg_name="result")
await register_async(payload=(self._provisioning_payload), callback=register_complete)
result = await handle_result(register_complete)
log_on_register_complete(result)
return result
async def _enable_responses(self):
"""Enable to receive responses from Device Provisioning Service.
"""
logger.info("Enabling reception of response from Device Provisioning Service...")
subscribe_async = async_adapter.emulate_async(self._pipeline.enable_responses)
subscription_complete = async_adapter.AwaitableCallback()
await subscribe_async(callback=subscription_complete)
await handle_result(subscription_complete)
logger.info("Successfully subscribed to Device Provisioning Service to receive responses")

View File

@@ -0,0 +1,12 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/provisioning/models/__init__.py
# Compiled at: 2024-04-18 03:12:59
# Size of source mod 2**32: 180 bytes
"""Azure Provisioning Device Models
This package provides object models for use within the Azure Provisioning Device SDK.
"""
from .registration_result import RegistrationResult

View File

@@ -0,0 +1,93 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/provisioning/models/registration_result.py
# Compiled at: 2024-04-18 03:12:59
# Size of source mod 2**32: 4292 bytes
import json
class RegistrationResult(object):
__doc__ = '\n The final result of a completed or failed registration attempt\n :ivar:request_id: The request id to which the response is being obtained\n :ivar:operation_id: The id of the operation as returned by the registration request.\n :ivar status: The status of the registration process as returned by the provisioning service.\n Values can be "unassigned", "assigning", "assigned", "failed", "disabled"\n :ivar registration_state : Details like device id, assigned hub , date times etc returned\n from the provisioning service.\n '
def __init__(self, operation_id, status, registration_state=None):
"""
:param operation_id: The id of the operation as returned by the initial registration request.
:param status: The status of the registration process.
Values can be "unassigned", "assigning", "assigned", "failed", "disabled"
:param registration_state : Details like device id, assigned hub , date times etc returned
from the provisioning service.
"""
self._operation_id = operation_id
self._status = status
self._registration_state = registration_state
@property
def operation_id(self):
return self._operation_id
@property
def status(self):
return self._status
@property
def registration_state(self):
return self._registration_state
def __str__(self):
return "\n".join([str(self.registration_state), self.status])
class RegistrationState(object):
__doc__ = '\n The registration state regarding the device.\n :ivar device_id: Desired device id for the provisioned device\n :ivar assigned_hub: Desired IoT Hub to which the device is linked.\n :ivar sub_status: Substatus for \'Assigned\' devices. Possible values are\n "initialAssignment", "deviceDataMigrated", "deviceDataReset"\n :ivar created_date_time: Registration create date time (in UTC).\n :ivar last_update_date_time: Last updated date time (in UTC).\n :ivar etag: The entity tag associated with the resource.\n '
def __init__(self, device_id=None, assigned_hub=None, sub_status=None, created_date_time=None, last_update_date_time=None, etag=None, payload=None):
"""
:param device_id: Desired device id for the provisioned device
:param assigned_hub: Desired IoT Hub to which the device is linked.
:param sub_status: Substatus for 'Assigned' devices. Possible values are
"initialAssignment", "deviceDataMigrated", "deviceDataReset"
:param created_date_time: Registration create date time (in UTC).
:param last_update_date_time: Last updated date time (in UTC).
:param etag: The entity tag associated with the resource.
:param payload: The payload with which hub is responding
"""
self._device_id = device_id
self._assigned_hub = assigned_hub
self._sub_status = sub_status
self._created_date_time = created_date_time
self._last_update_date_time = last_update_date_time
self._etag = etag
self._response_payload = payload
@property
def device_id(self):
return self._device_id
@property
def assigned_hub(self):
return self._assigned_hub
@property
def sub_status(self):
return self._sub_status
@property
def created_date_time(self):
return self._created_date_time
@property
def last_update_date_time(self):
return self._last_update_date_time
@property
def etag(self):
return self._etag
@property
def response_payload(self):
return json.dumps((self._response_payload), default=(lambda o: o.__dict__), sort_keys=True)
def __str__(self):
return "\n".join([
self.device_id, self.assigned_hub, self.sub_status, self.response_payload])

View File

@@ -0,0 +1,15 @@
# uncompyle6 version 3.9.2
# Python bytecode version base 3.7.0 (3394)
# Decompiled from: Python 3.8.19 (default, Mar 20 2024, 15:27:52)
# [Clang 14.0.6 ]
# Embedded file name: /var/user/app/device_supervisorbak/device_supervisor/lib/azure/iot/device/provisioning/pipeline/__init__.py
# Compiled at: 2024-04-18 03:12:58
# Size of source mod 2**32: 244 bytes
"""Azure Provisioning Device Communication Pipeline
This package provides pipeline for use with the Azure Provisioning Device SDK.
INTERNAL USAGE ONLY
"""
from .mqtt_pipeline import MQTTPipeline
from .config import ProvisioningPipelineConfig

Some files were not shown because too many files have changed in this diff Show More