Skip to content

Onionbalance API

common

argparser

get_common_argparser()

Parses and returns command line arguments.

Source code in repos/onionbalance/onionbalance/common/argparser.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def get_common_argparser():
    """
    Parses and returns command line arguments.
    """

    parser = argparse.ArgumentParser(
        description="onionbalance distributes the requests for a Tor hidden "
        "services across multiple Tor instances.")

    parser.add_argument("--hs-version", type=str, default='v3',
                        choices=('v3',),
                        help="Onion service version (only v3 is supported)")

    parser.add_argument("-i", "--ip", type=str, default='127.0.0.1',
                        help="Tor controller IP address")

    parser.add_argument("-p", "--port", type=int, default=9051,
                        help="Tor controller port")

    parser.add_argument("-s", "--socket", type=str, default=TOR_CONTROL_SOCKET,
                        help="Tor unix domain control socket location")

    parser.add_argument("-c", "--config", type=str,
                        default=os.environ.get('ONIONBALANCE_CONFIG',
                                               "config.yaml"),
                        help="Config file location")

    parser.add_argument("-v", "--verbosity", type=str, default=None,
                        help="Minimum verbosity level for logging.  Available "
                             "in ascending order: debug, info, warning, "
                             "error, critical).  The default is info.")

    parser.add_argument('--version', action='version',
                        version='onionbalance %s' % onionbalance.__version__)

    parser.add_argument("--is-testnet", action='store_true',
                        help="Is this onionbalance on a test net? (Default: no)")

    return parser

descriptor

upload_descriptor(controller, signed_descriptor, hsdirs=None, v3_onion_address=None)

Upload descriptor via the Tor control port

If no HSDirs are specified, Tor will upload to what it thinks are the responsible directories

If 'v3_onion_address' is set, this is a v3 HSPOST request, and the address needs to be embedded in the request.

Source code in repos/onionbalance/onionbalance/common/descriptor.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
def upload_descriptor(controller, signed_descriptor, hsdirs=None, v3_onion_address=None):
    """
    Upload descriptor via the Tor control port

    If no HSDirs are specified, Tor will upload to what it thinks are the
    responsible directories

    If 'v3_onion_address' is set, this is a v3 HSPOST request, and the address
    needs to be embedded in the request.
    """
    logger.debug("Beginning service descriptor upload.")

    server_args = ""

    # Provide server fingerprints to control command if HSDirs are specified.
    if hsdirs:
        server_args = ' '.join([("SERVER={}".format(hsdir))
                                for hsdir in hsdirs])

    if v3_onion_address:
        server_args += " HSADDRESS=%s" % v3_onion_address.replace(".onion", "")

    # Stem will insert the leading + and trailing '\r\n.\r\n'
    response = controller.msg("HSPOST %s\n%s" %
                              (server_args, signed_descriptor))

    (response_code, divider, response_content) = response.content()[0]
    if not response.is_ok():
        if response_code == "552":
            raise stem.InvalidRequest(response_code, response_content)
        else:
            raise stem.ProtocolError("HSPOST returned unexpected response "
                                     "code: %s\n%s" % (response_code,
                                                       response_content))

instance

Instance

Bases: object

Instance represents a back-end load balancing hidden service.

Source code in repos/onionbalance/onionbalance/common/instance.py
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
class Instance(object):
    """
    Instance represents a back-end load balancing hidden service.
    """

    def __init__(self, controller, onion_address, authentication_cookie=None):
        """
        Initialise an Instance object.
        """
        self.controller = controller

        # Onion address for the service instance.
        if onion_address:
            onion_address = onion_address.replace('.onion', '')
        self.onion_address = onion_address

        # Flag this instance with its introduction points change. A new
        # master descriptor will then be published as the introduction
        # points have changed.
        self.intro_set_changed_since_published = False

    def fetch_descriptor(self):
        """
        Try fetch a fresh descriptor for this service instance from the HSDirs
        """
        logger.debug("Trying to fetch a descriptor for instance %s.onion.",
                     self.onion_address)
        try:
            self.controller.get_hidden_service_descriptor(self.onion_address,
                                                          await_result=False)
        except stem.SocketClosed:
            # Tor maybe restarting.
            raise
        except stem.DescriptorUnavailable:
            # Could not find the descriptor on the HSDir
            self.received = None
            logger.warning("No descriptor received for instance %s.onion, "
                           "the instance may be offline.", self.onion_address)

    def __eq__(self, other):
        """
        Instance objects are equal if they have the same onion address.
        """
        if isinstance(other, Instance):
            return self.onion_address == other.onion_address
        else:
            return False

    def __hash__(self):
        """
        Define __hash__ method allowing for set comparison between instances.
        """
        return hash(self.onion_address)
__eq__(other)

Instance objects are equal if they have the same onion address.

Source code in repos/onionbalance/onionbalance/common/instance.py
90
91
92
93
94
95
96
97
def __eq__(self, other):
    """
    Instance objects are equal if they have the same onion address.
    """
    if isinstance(other, Instance):
        return self.onion_address == other.onion_address
    else:
        return False
__hash__()

Define hash method allowing for set comparison between instances.

Source code in repos/onionbalance/onionbalance/common/instance.py
 99
100
101
102
103
def __hash__(self):
    """
    Define __hash__ method allowing for set comparison between instances.
    """
    return hash(self.onion_address)
__init__(controller, onion_address, authentication_cookie=None)

Initialise an Instance object.

Source code in repos/onionbalance/onionbalance/common/instance.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
def __init__(self, controller, onion_address, authentication_cookie=None):
    """
    Initialise an Instance object.
    """
    self.controller = controller

    # Onion address for the service instance.
    if onion_address:
        onion_address = onion_address.replace('.onion', '')
    self.onion_address = onion_address

    # Flag this instance with its introduction points change. A new
    # master descriptor will then be published as the introduction
    # points have changed.
    self.intro_set_changed_since_published = False
fetch_descriptor()

Try fetch a fresh descriptor for this service instance from the HSDirs

Source code in repos/onionbalance/onionbalance/common/instance.py
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
def fetch_descriptor(self):
    """
    Try fetch a fresh descriptor for this service instance from the HSDirs
    """
    logger.debug("Trying to fetch a descriptor for instance %s.onion.",
                 self.onion_address)
    try:
        self.controller.get_hidden_service_descriptor(self.onion_address,
                                                      await_result=False)
    except stem.SocketClosed:
        # Tor maybe restarting.
        raise
    except stem.DescriptorUnavailable:
        # Could not find the descriptor on the HSDir
        self.received = None
        logger.warning("No descriptor received for instance %s.onion, "
                       "the instance may be offline.", self.onion_address)

helper_fetch_all_instance_descriptors(controller, instances, control_password=None)

Try fetch fresh descriptors for all HS instances

Source code in repos/onionbalance/onionbalance/common/instance.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
def helper_fetch_all_instance_descriptors(controller, instances, control_password=None):
    """
    Try fetch fresh descriptors for all HS instances
    """
    logger.info("Initiating fetch of descriptors for all service instances.")

    # pylint: disable=no-member

    while True:
        try:
            # Clear Tor descriptor cache before making fetches by sending
            # the NEWNYM singal
            controller.signal(stem.control.Signal.NEWNYM)
            time.sleep(5)  # Sleep to allow Tor time to build new circuits
            pass
        except stem.SocketClosed:
            logger.error("Failed to send NEWNYM signal, socket is closed.")
            onionbalance.common.util.reauthenticate(controller, logger, control_password)
        else:
            break

    unique_instances = set(instances)

    # Only try to retrieve the descriptor once for each unique instance
    # address. An instance may be configured under multiple master
    # addressed. We do not want to request the same instance descriptor
    # multiple times.
    # Onionbalance will update all of the matching instances when a
    # descriptor is received.
    for instance in unique_instances:
        while True:
            try:
                instance.fetch_descriptor()
            except stem.SocketClosed:
                logger.error("Failed to fetch descriptor, socket is closed")
                onionbalance.common.util.reauthenticate(controller, logger, control_password)
            else:
                break

intro_point_set

IntroductionPointSet

Bases: object

A set of introduction points to included in a HS descriptor.

Provided with a list of available introduction points for each backend instance for an onionbalance service. This object will store the set of available introduction points and allow IPs to be selected from the available set.

This class tracks which introduction points have already been provided and tries to provide the most diverse set of IPs.

Source code in repos/onionbalance/onionbalance/common/intro_point_set.py
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
class IntroductionPointSet(object):
    """
    A set of introduction points to included in a HS descriptor.

    Provided with a list of available introduction points for each backend
    instance for an onionbalance service. This object will store the set of
    available introduction points and allow IPs to be selected from the
    available set.

    This class tracks which introduction points have already been provided
    and tries to provide the most diverse set of IPs.
    """

    def __init__(self, intro_points):
        """
        'intro_points' is a list of lists that looks like this:
        [
          [<intro#1 of Instance#1, intro#2 of Instance#1...>],
          [<intro#1 of Instance#2, intro#2 of Instance#2...>],
          [<intro#1 of Instance#3, intro#2 of Instance#3...>],
          ...
        ]
        """
        # Shuffle the introduction point order before selecting IPs.
        # Randomizing now allows later calls to .choose() to be
        # deterministic.
        for instance_intro_points in intro_points:
            random.shuffle(instance_intro_points)
        random.shuffle(intro_points)

        self.intro_points = intro_points
        self._intro_point_generator = self._get_intro_point()

    def __len__(self):
        """Provide the total number of available introduction points"""
        return sum(len(ips) for ips in self.intro_points)

    def _get_intro_point(self):
        """
        [Private function]

        Generator function which yields an introduction point

        Iterates through all available introduction points and try
        to pick IPs breath first across all backend instances. The
        intro point set is wrapped in `itertools.cycle` and will provided
        an infinite series of introduction points.
        """

        # Combine intro points from across the backend instances and flatten
        intro_points = itertools.zip_longest(*self.intro_points)
        flat_intro_points = itertools.chain.from_iterable(intro_points)
        for intro_point in itertools.cycle(flat_intro_points):
            if intro_point:
                yield intro_point

    def choose(self, count=10, shuffle=True):
        """
        [Public API]

        Retrieve N introduction points from the set of IPs

        Where more than `count` IPs are available, introduction points are
        selected to try and achieve the greatest distribution of introduction
        points across all of the available backend instances.

        Return a list of IntroductionPoints.
        """

        # Limit `count` to the available number of IPs to avoid repeats.
        count = min(len(self), count)
        choosen_ips = list(itertools.islice(self._intro_point_generator, count))

        if shuffle:
            random.shuffle(choosen_ips)
        return choosen_ips
__init__(intro_points)

'intro_points' is a list of lists that looks like this: [ [], [], [], ... ]

Source code in repos/onionbalance/onionbalance/common/intro_point_set.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
def __init__(self, intro_points):
    """
    'intro_points' is a list of lists that looks like this:
    [
      [<intro#1 of Instance#1, intro#2 of Instance#1...>],
      [<intro#1 of Instance#2, intro#2 of Instance#2...>],
      [<intro#1 of Instance#3, intro#2 of Instance#3...>],
      ...
    ]
    """
    # Shuffle the introduction point order before selecting IPs.
    # Randomizing now allows later calls to .choose() to be
    # deterministic.
    for instance_intro_points in intro_points:
        random.shuffle(instance_intro_points)
    random.shuffle(intro_points)

    self.intro_points = intro_points
    self._intro_point_generator = self._get_intro_point()
__len__()

Provide the total number of available introduction points

Source code in repos/onionbalance/onionbalance/common/intro_point_set.py
38
39
40
def __len__(self):
    """Provide the total number of available introduction points"""
    return sum(len(ips) for ips in self.intro_points)
choose(count=10, shuffle=True)

[Public API]

Retrieve N introduction points from the set of IPs

Where more than count IPs are available, introduction points are selected to try and achieve the greatest distribution of introduction points across all of the available backend instances.

Return a list of IntroductionPoints.

Source code in repos/onionbalance/onionbalance/common/intro_point_set.py
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def choose(self, count=10, shuffle=True):
    """
    [Public API]

    Retrieve N introduction points from the set of IPs

    Where more than `count` IPs are available, introduction points are
    selected to try and achieve the greatest distribution of introduction
    points across all of the available backend instances.

    Return a list of IntroductionPoints.
    """

    # Limit `count` to the available number of IPs to avoid repeats.
    count = min(len(self), count)
    choosen_ips = list(itertools.islice(self._intro_point_generator, count))

    if shuffle:
        random.shuffle(choosen_ips)
    return choosen_ips

log

get_config_generator_logger()

Simplified logger for interactive config generator CLI

Source code in repos/onionbalance/onionbalance/common/log.py
32
33
34
35
36
37
38
39
40
41
42
43
def get_config_generator_logger():
    """
    Simplified logger for interactive config generator CLI
    """
    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter(fmt="[%(levelname)s]: "
                                               "%(message)s"))

    logger = logging.getLogger("onionbalance-config")
    logger.addHandler(handler)
    logger.setLevel(logging.INFO)
    return logger

get_logger()

Returns a logger.

Source code in repos/onionbalance/onionbalance/common/log.py
14
15
16
17
18
def get_logger():
    """
    Returns a logger.
    """
    return logger

setup_file_logger(log_file)

Add log file handler to the existing logger

Source code in repos/onionbalance/onionbalance/common/log.py
21
22
23
24
25
26
27
28
29
def setup_file_logger(log_file):
    """
    Add log file handler to the existing logger
    """
    handler = logging.handlers.RotatingFileHandler(
        log_file, maxBytes=10485760, backupCount=3)
    handler.setFormatter(logging.Formatter(fmt="%(asctime)s [%(levelname)s]: "
                                           "%(message)s"))
    logging.getLogger('onionbalance').addHandler(handler)

scheduler

Simple scheduler for running jobs at regular intervals

Job

Bases: object

Object to represent a scheduled job task

Source code in repos/onionbalance/onionbalance/common/scheduler.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
class Job(object):
    """
    Object to represent a scheduled job task
    """

    def __init__(self, interval, job_func, *job_args, **job_kwargs):
        self.interval = interval
        self.planned_run_time = time.time()

        # Configure the job function and calling arguments
        self.job_func = functools.partial(job_func, *job_args, **job_kwargs)
        functools.update_wrapper(self.job_func, job_func)

    def __lt__(self, other):
        """
        Jobs are sorted based on their next scheduled run time
        """
        return self.planned_run_time < other.planned_run_time

    @property
    def should_run(self):
        """
        Check if the job should be run now
        """
        return self.planned_run_time <= time.time()

    def run(self, override_run_time=None):
        """
        Run job then reschedule it in the job list
        """
        ret = self.job_func()

        # Pretend the job was scheduled now, if we ran it early with run_all()
        if override_run_time:
            self.planned_run_time = time.time()
        self.planned_run_time += self.interval

        return ret

    def __repr__(self):
        """
        Return human readable representation of the Job and arguments
        """
        args = [repr(x) for x in self.job_func.args]
        kwargs = ["{}={}".format(k, repr(v)) for
                  k, v in self.job_func.keywords.items()]
        return "{}({})".format(self.job_func.__name__,
                               ', '.join(args + kwargs))
should_run property

Check if the job should be run now

__lt__(other)

Jobs are sorted based on their next scheduled run time

Source code in repos/onionbalance/onionbalance/common/scheduler.py
28
29
30
31
32
def __lt__(self, other):
    """
    Jobs are sorted based on their next scheduled run time
    """
    return self.planned_run_time < other.planned_run_time
__repr__()

Return human readable representation of the Job and arguments

Source code in repos/onionbalance/onionbalance/common/scheduler.py
54
55
56
57
58
59
60
61
62
def __repr__(self):
    """
    Return human readable representation of the Job and arguments
    """
    args = [repr(x) for x in self.job_func.args]
    kwargs = ["{}={}".format(k, repr(v)) for
              k, v in self.job_func.keywords.items()]
    return "{}({})".format(self.job_func.__name__,
                           ', '.join(args + kwargs))
run(override_run_time=None)

Run job then reschedule it in the job list

Source code in repos/onionbalance/onionbalance/common/scheduler.py
41
42
43
44
45
46
47
48
49
50
51
52
def run(self, override_run_time=None):
    """
    Run job then reschedule it in the job list
    """
    ret = self.job_func()

    # Pretend the job was scheduled now, if we ran it early with run_all()
    if override_run_time:
        self.planned_run_time = time.time()
    self.planned_run_time += self.interval

    return ret

add_job(interval, function, *job_args, **job_kwargs)

Add a job to be executed at regular intervals

The interval value is in seconds, starting from now.

Source code in repos/onionbalance/onionbalance/common/scheduler.py
65
66
67
68
69
70
71
72
def add_job(interval, function, *job_args, **job_kwargs):
    """
    Add a job to be executed at regular intervals

    The `interval` value is in seconds, starting from now.
    """
    job = Job(interval, function, *job_args, **job_kwargs)
    jobs.append(job)

run_all(delay_seconds=0)

Run all jobs at delay_seconds regardless of their schedule

Source code in repos/onionbalance/onionbalance/common/scheduler.py
82
83
84
85
86
87
88
def run_all(delay_seconds=0):
    """
    Run all jobs at `delay_seconds` regardless of their schedule
    """
    for job in jobs:
        _run_job(job, override_run_time=True)
        time.sleep(delay_seconds)

run_forever(check_interval=1)

Run jobs forever

Source code in repos/onionbalance/onionbalance/common/scheduler.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
def run_forever(check_interval=1):
    """
    Run jobs forever
    """
    while True:
        if not jobs:
            logger.error("No scheduled jobs found, scheduler exiting.")
            return None

        jobs_to_run = (job for job in jobs if job.should_run)
        for job in sorted(jobs_to_run):
            _run_job(job)

        time.sleep(check_interval)

signalhandler

SignalHandler

Bases: object

Handle signals sent to the Onionbalance daemon process

Source code in repos/onionbalance/onionbalance/common/signalhandler.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
class SignalHandler(object):
    """
    Handle signals sent to the Onionbalance daemon process
    """

    def __init__(self, version, controller, status_socket=None):
        """
        Setup signal handler
        """
        self._version = version
        self._tor_controller = controller
        self._status_socket = status_socket

        # Register signal handlers
        signal.signal(signal.SIGTERM, self._handle_sigint_sigterm)
        signal.signal(signal.SIGINT, self._handle_sigint_sigterm)
        if self._version == 'v3':
            signal.signal(signal.SIGHUP, self._handle_sighup)

    def _handle_sigint_sigterm(self, signum, frame):
        """
        Handle SIGINT (Ctrl-C) and SIGTERM

        Disconnect from control port and cleanup the status socket
        """
        logger.info("Signal %d received, exiting", signum)
        self._tor_controller.close()
        if self._status_socket:
            self._status_socket.close()
        logging.shutdown()
        sys.exit(0)

    def _handle_sighup(self, signum, frame):
        """
        Handle SIGHUP (v3 only)

        Reload configuration
        """
        logger.info("Signal SIGHUP received, reloading configuration")
        onionbalance_v3.my_onionbalance.reload_config()
__init__(version, controller, status_socket=None)

Setup signal handler

Source code in repos/onionbalance/onionbalance/common/signalhandler.py
16
17
18
19
20
21
22
23
24
25
26
27
28
def __init__(self, version, controller, status_socket=None):
    """
    Setup signal handler
    """
    self._version = version
    self._tor_controller = controller
    self._status_socket = status_socket

    # Register signal handlers
    signal.signal(signal.SIGTERM, self._handle_sigint_sigterm)
    signal.signal(signal.SIGINT, self._handle_sigint_sigterm)
    if self._version == 'v3':
        signal.signal(signal.SIGHUP, self._handle_sighup)

status

Base class to provide status over Unix socket Default path: /var/run/onionbalance/control

BaseStatusSocket

Bases: object

For creating a Unix domain socket which emits a summary of the Onionbalance status when a client connects.

Source code in repos/onionbalance/onionbalance/common/status.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
class BaseStatusSocket(object):
    """
    For creating a Unix domain socket which emits a summary of the Onionbalance
    status when a client connects.
    """
    def __init__(self, unix_socket_filename):
        self.unix_socket_filename = unix_socket_filename

    def cleanup_socket_file(self):
        """
        Try to remove the socket file if it exists already
        """
        try:
            os.unlink(self.unix_socket_filename)
        except OSError as e:
            # Reraise if its not a FileNotFound exception
            if e.errno != errno.ENOENT:
                raise

    def close(self):
        """
        Close the unix domain socket and remove its file
        """
        try:
            self.server.shutdown()
            self.server.server_close()
            self.cleanup_socket_file()
        except AttributeError:
            pass
        except OSError:
            logger.exception("Error when removing the status socket")
cleanup_socket_file()

Try to remove the socket file if it exists already

Source code in repos/onionbalance/onionbalance/common/status.py
23
24
25
26
27
28
29
30
31
32
def cleanup_socket_file(self):
    """
    Try to remove the socket file if it exists already
    """
    try:
        os.unlink(self.unix_socket_filename)
    except OSError as e:
        # Reraise if its not a FileNotFound exception
        if e.errno != errno.ENOENT:
            raise
close()

Close the unix domain socket and remove its file

Source code in repos/onionbalance/onionbalance/common/status.py
34
35
36
37
38
39
40
41
42
43
44
45
def close(self):
    """
    Close the unix domain socket and remove its file
    """
    try:
        self.server.shutdown()
        self.server.server_close()
        self.cleanup_socket_file()
    except AttributeError:
        pass
    except OSError:
        logger.exception("Error when removing the status socket")

util

reauthenticate(controller, logger, control_password=None)

Tries to authenticate to the controller

Source code in repos/onionbalance/onionbalance/common/util.py
64
65
66
67
68
69
70
71
72
def reauthenticate(controller, logger, control_password=None):
    """
    Tries to authenticate to the controller
    """
    time.sleep(10)
    try:
        controller.authenticate(password=control_password)
    except stem.connection.AuthenticationFailure:
        logger.error("Failed to re-authenticate controller.")

config_generator

config_generator

ConfigGenerator

Bases: object

Source code in repos/onionbalance/onionbalance/config_generator/config_generator.py
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
class ConfigGenerator(object):
    def __init__(self, args, interactive):
        self.args = args
        self.interactive = interactive

        self.output_path = None

        # A dictionary that maps services to their keys and instances:
        # { <service_onion_address> : (<ed25519_key>, instances) , ... }
        self.services = {}

        self.num_instances = None
        self.tag = None
        self.torrc_port_line = None
        self.instances = None
        self.master_dir = None
        self.config_file_path = None
        self.master_key_path = None

        # If this is set, it means that we read our private key from a tor
        # instance and hence we should just copy the private key over instead
        # of recreating the file (so that the Tor private key semantics are
        # maintained (see self.is_priv_key_in_tor_format in service.py)
        self.v3_loaded_key_from_file = False

        # Gather information required to create config file!
        self.gather_information()

        # Create config file!
        self.generate_config()

    def try_make_dir(self, path):
        """
        Try to create a directory (including any parent directories)
        """
        try:
            os.makedirs(path)
        except OSError:
            if not os.path.isdir(path):
                raise

    def gather_information(self):
        # Check if output directory exists, if not try create it
        self.output_path = self.get_output_path()
        self.master_dir = self.output_path

        # Allow the creation of multiple services for v3
        n_services = self.get_num_services()

        # Gather information for each service
        for i, _ in enumerate(range(n_services), start=1):
            # Load or generate the master key
            master_key, master_onion_address = self.load_master_key(i)

            # Generate keys for each instance
            self.num_instances, self.tag = self.get_num_instances(i)

            instances = self.create_instances()
            self.services[master_onion_address] = (master_key, instances)

    def generate_config(self):
        # Write master key for each service
        for onion_address, (master_key, _) in self.services.items():
            self.write_master_key_to_disk(onion_address, master_key)

        # Create the onionbalance config file
        self.create_yaml_config_file()

        logger.info("Done! Successfully generated Onionbalance config.")
        logger.info("Now please edit '%s' with a text editor to add/remove/edit your backend instances.",
                    self.config_file_path)

    def get_output_path(self):
        """
        Get path to output directory and create if needed
        """
        output_path = None
        if self.interactive:
            output_path = input("Enter path to store generated config "
                                "[{}]: ".format(os.path.abspath(self.args.output)))
        output_path = output_path or self.args.output
        try:
            self.try_make_dir(output_path)
        except OSError:
            logger.exception("Problem encountered when trying to create the "
                             "output directory %s.", os.path.abspath(output_path))
            sys.exit(1)
        else:
            logger.debug("Created the output directory '%s'.",
                         os.path.abspath(output_path))

        config_path = os.path.join(output_path, 'config.yaml')
        if os.path.isfile(config_path):
            logger.error("The specified output directory '%s' already contains a 'config.yaml' "
                         "file. Please clean the directory before starting config_generator.",
                         output_path)
            sys.exit(1)

        return output_path

    def load_master_key(self, i):
        """
        Return the key and onion address of the frontend service.
        """
        self.master_key_path = self.get_master_key_path(i)

        # master_key_path is now either None (if no key path is specified) or
        # set to the actual path
        return self.load_v3_master_key(self.master_key_path)

    def get_master_key_path(self, i):
        # Load master key if specified
        master_key_path = None
        helper = " (i.e. path to 'hs_ed25519_secret_key')"
        if self.interactive:
            # Read key path from user
            master_key_path = input("Service #%d: Enter path to master service private key%s "
                                    "(Leave empty to generate a key): " %
                                    (i, helper))
        master_key_path = self.args.key or master_key_path

        # If a key path was specified make sure it exists
        if master_key_path:
            if not os.path.isfile(master_key_path):
                logger.error("The specified master service private key '%s' "
                             "could not be found. Please confirm the path and "
                             "file permissions are correct.", master_key_path)
                sys.exit(1)

        return master_key_path

    def _load_v3_master_key_from_file(self, master_key_path):
        """
        Load a private key straight from a Tor instance (no OBv3 keys supported)
        and return the private key and onion address.
        """
        try:
            with open(master_key_path, 'rb') as handle:
                pem_key_bytes = handle.read()
        except EnvironmentError as e:
            logger.error("Unable to read service private key file ('%s')", e)
            sys.exit(1)

        try:
            master_private_key = tor_ed25519.load_tor_key_from_disk(pem_key_bytes)
        except ValueError:
            logger.error("Please provide path to a valid Tor master key")
            sys.exit(1)
        identity_pub_key = master_private_key.public_key()
        identity_pub_key_bytes = identity_pub_key.public_bytes(encoding=serialization.Encoding.Raw,
                                                               format=serialization.PublicFormat.Raw)
        master_onion_address = HiddenServiceDescriptorV3.address_from_identity_key(identity_pub_key_bytes)

        # remove the trailing .onion
        master_onion_address = master_onion_address.replace(".onion", "")

        self.v3_loaded_key_from_file = True

        return master_private_key, master_onion_address

    def load_v3_master_key(self, master_key_path):
        if master_key_path: # load key from file
            # here we need to make many of these
            return self._load_v3_master_key_from_file(master_key_path)
        else: # generate new v3 key
            master_private_key = Ed25519PrivateKey.generate()
            master_public_key = master_private_key.public_key()
            master_pub_key_bytes = master_public_key.public_bytes(encoding=serialization.Encoding.Raw,
                                                                  format=serialization.PublicFormat.Raw)
            master_onion_address = HiddenServiceDescriptorV3.address_from_identity_key(master_pub_key_bytes)
            # cut out the onion since that's what the rest of the code expects
            master_onion_address = master_onion_address.replace(".onion", "")

            return master_private_key, master_onion_address

    def get_num_services(self):
        """
        Get the number of services this OnionBalance should support
        """
        num_services = None
        if self.interactive:
            num_services = input("Number of services (frontends) to create (default: %d): " %
                                 self.args.num_services)
            # Cast to int if a number was specified
            try:
                num_services = int(num_services)
            except ValueError:
                num_services = None

        num_services = num_services or self.args.num_services
        logger.debug("Creating %d services", num_services)
        return num_services

    def get_num_instances(self, i):
        """
        Get the number of instances and a tag name for them.
        """
        num_instances = None
        if self.interactive:
            limits = " (min: 1, max: 8)"
            num_instances = input("Service #%d: Number of instance services to create (default: %d)%s: " %
                                  (i, self.args.num_instances, limits))
            # Cast to int if a number was specified
            try:
                num_instances = int(num_instances)
            except ValueError:
                num_instances = None
        num_instances = num_instances or self.args.num_instances
        logger.debug("Creating %d service instances.", num_instances)

        tag = None
        if self.interactive:
            tag = input("Service #%d: Provide a tag name to group these instances [%s]:" %
                        (i, self.args.tag))
        tag = tag or self.args.tag

        return num_instances, tag

    def get_torrc_port_line(self):
        """
        Get the HiddenServicePort line for the instance torrc file
        """
        service_virtual_port = None
        if self.interactive:
            service_virtual_port = input("Specify the service virtual port (for "
                                         "client connections) [{}]: ".format(
                                             self.args.service_virtual_port))
        service_virtual_port = service_virtual_port or self.args.service_virtual_port

        service_target = None
        if self.interactive:
            # In interactive mode, change default target to match the specified
            # virtual port
            default_service_target = u'127.0.0.1:{}'.format(service_virtual_port)
            service_target = input("Specify the service target IP and port (where "
                                   "your service is listening) [{}]: ".format(
                                       default_service_target))
            service_target = service_target or default_service_target
        service_target = service_target or self.args.service_target
        torrc_port_line = u'HiddenServicePort {} {}'.format(service_virtual_port,
                                                            service_target)
        return torrc_port_line

    def create_instances(self):
        instances = []

        for i in range(0, self.num_instances):
            instances.append(("<Enter the instance onion address here>", None))

        return instances

    def get_master_key_passphrase(self):
        # Get optional passphrase for master key
        # [TODO: Implement for v3]
        master_passphrase = None
        if self.interactive:
            master_passphrase = getpass.getpass(
                "Provide an optional password to encrypt the master private "
                "key (Not encrypted if no password is specified): ")
        return master_passphrase or self.args.password

    def write_master_key_to_disk(self, onion_address, master_key):
        # Finished reading input, starting to write config files.
        self.try_make_dir(self.master_dir)
        master_key_file = os.path.join(self.master_dir,
                                       '{}.key'.format(onion_address))
        with open(master_key_file, "wb") as key_file:
            os.chmod(master_key_file, 384)  # chmod 0600 in decimal

            if self.v3_loaded_key_from_file:
                # If we loaded a v3 key from a file, copy the file directly
                # (see loaded_key_from_file comments).
                shutil.copyfile(self.master_key_path, master_key_file)
                logger.info("Copied v3 master key from %s to %s.",
                            self.master_key_path, master_key_file)
            else:
                # If we generated our own v3 master key, write it to file. If
                # 'master_key' does not exist, it means that we are loading it
                # from a file, so we dont need to write it to disk.
                master_key_formatted = master_key.private_bytes(encoding=serialization.Encoding.PEM,
                                                                format=serialization.PrivateFormat.PKCS8,
                                                                encryption_algorithm=serialization.NoEncryption())
                key_file.write(master_key_formatted)

            logger.debug("Successfully wrote master key to file %s.",
                         os.path.abspath(master_key_file))

    def create_yaml_config_file(self):
        services_data = []

        # Create an entry for each service
        for onion_address, (_, instances) in self.services.items():
            # Create YAML Onionbalance settings file for these instances
            service_data = {'key': '{}.key'.format(onion_address)}
            service_data['instances'] = [{'address': address,
                                          'name': '{}{}'.format(self.tag, i + 1)} for
                                         i, (address, _) in enumerate(instances)]
            services_data.append(service_data)

        # Yamlify the config
        settings_data = {'services': services_data}
        config_yaml = yaml.safe_dump(settings_data, default_flow_style=False)

        self.config_file_path = os.path.join(self.master_dir, 'config.yaml')
        with open(self.config_file_path, "w") as config_file:
            config_file.write(u"# Onionbalance Config File\n")
            config_file.write(config_yaml)
            logger.info("Wrote master service config file '%s'.",
                        os.path.abspath(self.config_file_path))
get_num_instances(i)

Get the number of instances and a tag name for them.

Source code in repos/onionbalance/onionbalance/config_generator/config_generator.py
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
def get_num_instances(self, i):
    """
    Get the number of instances and a tag name for them.
    """
    num_instances = None
    if self.interactive:
        limits = " (min: 1, max: 8)"
        num_instances = input("Service #%d: Number of instance services to create (default: %d)%s: " %
                              (i, self.args.num_instances, limits))
        # Cast to int if a number was specified
        try:
            num_instances = int(num_instances)
        except ValueError:
            num_instances = None
    num_instances = num_instances or self.args.num_instances
    logger.debug("Creating %d service instances.", num_instances)

    tag = None
    if self.interactive:
        tag = input("Service #%d: Provide a tag name to group these instances [%s]:" %
                    (i, self.args.tag))
    tag = tag or self.args.tag

    return num_instances, tag
get_num_services()

Get the number of services this OnionBalance should support

Source code in repos/onionbalance/onionbalance/config_generator/config_generator.py
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
def get_num_services(self):
    """
    Get the number of services this OnionBalance should support
    """
    num_services = None
    if self.interactive:
        num_services = input("Number of services (frontends) to create (default: %d): " %
                             self.args.num_services)
        # Cast to int if a number was specified
        try:
            num_services = int(num_services)
        except ValueError:
            num_services = None

    num_services = num_services or self.args.num_services
    logger.debug("Creating %d services", num_services)
    return num_services
get_output_path()

Get path to output directory and create if needed

Source code in repos/onionbalance/onionbalance/config_generator/config_generator.py
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
def get_output_path(self):
    """
    Get path to output directory and create if needed
    """
    output_path = None
    if self.interactive:
        output_path = input("Enter path to store generated config "
                            "[{}]: ".format(os.path.abspath(self.args.output)))
    output_path = output_path or self.args.output
    try:
        self.try_make_dir(output_path)
    except OSError:
        logger.exception("Problem encountered when trying to create the "
                         "output directory %s.", os.path.abspath(output_path))
        sys.exit(1)
    else:
        logger.debug("Created the output directory '%s'.",
                     os.path.abspath(output_path))

    config_path = os.path.join(output_path, 'config.yaml')
    if os.path.isfile(config_path):
        logger.error("The specified output directory '%s' already contains a 'config.yaml' "
                     "file. Please clean the directory before starting config_generator.",
                     output_path)
        sys.exit(1)

    return output_path
get_torrc_port_line()

Get the HiddenServicePort line for the instance torrc file

Source code in repos/onionbalance/onionbalance/config_generator/config_generator.py
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def get_torrc_port_line(self):
    """
    Get the HiddenServicePort line for the instance torrc file
    """
    service_virtual_port = None
    if self.interactive:
        service_virtual_port = input("Specify the service virtual port (for "
                                     "client connections) [{}]: ".format(
                                         self.args.service_virtual_port))
    service_virtual_port = service_virtual_port or self.args.service_virtual_port

    service_target = None
    if self.interactive:
        # In interactive mode, change default target to match the specified
        # virtual port
        default_service_target = u'127.0.0.1:{}'.format(service_virtual_port)
        service_target = input("Specify the service target IP and port (where "
                               "your service is listening) [{}]: ".format(
                                   default_service_target))
        service_target = service_target or default_service_target
    service_target = service_target or self.args.service_target
    torrc_port_line = u'HiddenServicePort {} {}'.format(service_virtual_port,
                                                        service_target)
    return torrc_port_line
load_master_key(i)

Return the key and onion address of the frontend service.

Source code in repos/onionbalance/onionbalance/config_generator/config_generator.py
122
123
124
125
126
127
128
129
130
def load_master_key(self, i):
    """
    Return the key and onion address of the frontend service.
    """
    self.master_key_path = self.get_master_key_path(i)

    # master_key_path is now either None (if no key path is specified) or
    # set to the actual path
    return self.load_v3_master_key(self.master_key_path)
try_make_dir(path)

Try to create a directory (including any parent directories)

Source code in repos/onionbalance/onionbalance/config_generator/config_generator.py
53
54
55
56
57
58
59
60
61
def try_make_dir(self, path):
    """
    Try to create a directory (including any parent directories)
    """
    try:
        os.makedirs(path)
    except OSError:
        if not os.path.isdir(path):
            raise

main()

Entry point for interactive config file generation.

Source code in repos/onionbalance/onionbalance/config_generator/config_generator.py
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
def main():
    """
    Entry point for interactive config file generation.
    """

    # Parse initial command line options
    args = parse_cmd_args().parse_args()

    logger.info("Beginning Onionbalance config generation.")

    # If CLI options have been provided, don't enter interactive mode
    # Crude check to see if any options beside --verbosity are set.
    verbose = True if '-v' in sys.argv else False

    if ((len(sys.argv) > 1 and not verbose) or len(sys.argv) > 3 or args.no_interactive):
        interactive = False
        logger.info("Entering non-interactive mode.")
    else:
        interactive = True
        logger.info("No command line arguments found, entering interactive "
                    "mode.")

    logger.setLevel(logging.__dict__[args.verbosity.upper()])

    # Start the config generator!
    try:
        ConfigGenerator(args, interactive)
    except KeyboardInterrupt:
        logger.warning("\nConfig generator got interrupted! There might be temporary configuration files left over... Bye!")

    sys.exit(0)

parse_cmd_args()

Parses and returns command line arguments for config generator

Source code in repos/onionbalance/onionbalance/config_generator/config_generator.py
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
def parse_cmd_args():
    """
    Parses and returns command line arguments for config generator
    """

    parser = argparse.ArgumentParser(
        description="onionbalance-config generates config files and keys for "
        "Onionbalance instances and management servers. Calling without any "
        "options will initiate an interactive mode.")

    parser.add_argument("--hs-version", type=str,
                        default="v3", choices=("v3", ),
                        help="Onion service version (only v3 is supported.")

    parser.add_argument("--key", type=str, default=None,
                        help="RSA private key for the master onion service.")

    parser.add_argument("-p", "--password", type=str, default=None,
                        help="Optional password which can be used to encrypt "
                        "the master service private key.")

    parser.add_argument("-n", type=int, default=2, dest="num_instances",
                        help="Number of instances to generate (default: "
                        "%(default)s).")

    parser.add_argument("-s", type=int, default=1, dest="num_services",
                        help="Number of services to generate (default: "
                        "%(default)s).")

    parser.add_argument("-t", "--tag", type=str, default='node',
                        help="Prefix name for the service instances "
                        "(default: %(default)s).")

    parser.add_argument("--output", type=str, default='config/',
                        help="Directory to store generate config files. "
                        "The directory will be created if it does not "
                        "already exist.")

    parser.add_argument("--no-interactive", action='store_true',
                        help="Try to run automatically without prompting for "
                        "user input.")

    parser.add_argument("-v", type=str, default="info", dest='verbosity',
                        help="Minimum verbosity level for logging. Available "
                        "in ascending order: debug, info, warning, error, "
                        "critical).  The default is info.")

    parser.add_argument("--service-virtual-port", type=str,
                        default="80",
                        help="Onion service port for external client "
                        "connections (default: %(default)s).")

    # TODO: Add validator to check if the target host:port line makes sense.
    parser.add_argument("--service-target", type=str,
                        default="127.0.0.1:80",
                        help="Target IP and port where your service is "
                        "listening (default: %(default)s).")

    # .. todo:: Add option to specify HS host and port for instance torrc

    parser.add_argument('--version', action='version',
                        version='onionbalance %s' % onionbalance.__version__)

    return parser

hs_v3

consensus

Consensus

Bases: object

This represents a consensus object.

It's initialized once in startup and refreshed during the runtime using the refresh() method to get the latest consensus.

Source code in repos/onionbalance/onionbalance/hs_v3/consensus.py
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
class Consensus(object):
    """
    This represents a consensus object.

    It's initialized once in startup and refreshed during the runtime using the
    refresh() method to get the latest consensus.
    """

    def __init__(self, do_refresh_consensus=True):
        # A list of tor_node:Node objects contained in the current consensus
        self.nodes = None
        # A stem NetworkStatusDocumentV3 object representing the current consensus
        self.consensus = None

        if not do_refresh_consensus:
            return

        # Set self.consensus to a NetworkStatusDocumentV3 object
        # and initialize the nodelist
        self.refresh()

    def refresh(self):
        """
        Attempt to refresh the consensus with the latest one available.
        """
        from onionbalance.hs_v3.onionbalance import my_onionbalance

        # Fetch the current md consensus from the control port
        md_consensus_str = my_onionbalance.controller.get_md_consensus().encode()
        try:
            self.consensus = NetworkStatusDocumentV3(md_consensus_str)
        except ValueError:
            logger.warning("No valid consensus received. Waiting for one...")
            return

        # Check if it's live
        if not self.is_live():
            logger.info("Loaded consensus is not live. Waiting for a live one.")
            return

        self.nodes = self._initialize_nodes()

    def get_routerstatuses(self):
        """Give access to the routerstatus entries in this consensus"""

        # We should never be asked for routerstatuses with a non-live consensus
        # so make sure this is the case.
        assert (self.is_live())

        return self.consensus.routers

    def is_live(self):
        """
        Return True if the consensus is live.

        This function replicates the behavior of the little-t-tor
        networkstatus_get_reasonably_live_consensus() function.
        """
        if not self.consensus:
            return False

        REASONABLY_LIVE_TIME = 24 * 60 * 60
        now = datetime.datetime.utcnow()

        return now >= self.consensus.valid_after - datetime.timedelta(seconds=REASONABLY_LIVE_TIME) and \
            now <= self.consensus.valid_until + datetime.timedelta(seconds=REASONABLY_LIVE_TIME)

    def _initialize_nodes(self):
        """
        Initialize self.nodes with the list of current nodes.
        """
        from onionbalance.hs_v3.onionbalance import my_onionbalance

        nodes = []

        try:
            microdescriptors_list = list(my_onionbalance.controller.controller.get_microdescriptors())
        except stem.DescriptorUnavailable:
            logger.warning("Can't get microdescriptors from Tor. Delaying...")
            return

        # Turn the mds into a dictionary indexed by the digest as an
        # optimization while matching them with routerstatuses.
        microdescriptors_dict = {}
        for md in microdescriptors_list:
            microdescriptors_dict[md.digest()] = md

        # Go through the routerstatuses and match them up with
        # microdescriptors, and create a Node object for each match. If there
        # is no match we don't register it as a node.
        for relay_fpr, relay_routerstatus in self.get_routerstatuses().items():
            logger.debug("Checking routerstatus with md digest %s", relay_routerstatus.microdescriptor_digest)

            # Skip routerstatuses for which we cannot find a microdescriptor
            if relay_routerstatus.microdescriptor_digest not in microdescriptors_dict:
                logger.debug("Could not find microdesc for rs with fpr %s", relay_fpr)
                continue

            node_microdescriptor = microdescriptors_dict[relay_routerstatus.microdescriptor_digest]
            node = tor_node.Node(node_microdescriptor, relay_routerstatus)
            nodes.append(node)

        logger.debug("Initialized %d nodes (%d routerstatuses / %d microdescriptors)",
                     len(nodes), len(self.get_routerstatuses()), len(microdescriptors_list))

        return nodes

    def _get_disaster_srv(self, time_period_num):
        """
        Return disaster SRV for 'time_period_num'.
        """
        time_period_length = self.get_time_period_length()

        disaster_body = b"shared-random-disaster" + time_period_length.to_bytes(8, 'big') + time_period_num.to_bytes(8, 'big')
        return hashlib.sha3_256(disaster_body).digest()

    def get_current_srv(self, time_period_num):
        if self.consensus.shared_randomness_current_value:
            return base64.b64decode(self.consensus.shared_randomness_current_value)
        elif time_period_num:
            logger.info("SRV not found so falling back to disaster mode")
            return self._get_disaster_srv(time_period_num)
        else:
            return None

    def get_previous_srv(self, time_period_num):
        if self.consensus.shared_randomness_previous_value:
            return base64.b64decode(self.consensus.shared_randomness_previous_value)
        elif time_period_num:
            logger.info("SRV not found so falling back to disaster mode")
            return self._get_disaster_srv(time_period_num)
        else:
            return None

    def _get_srv_phase_duration(self):
        """
        Return the length of the phase of a shared random protocol run in minutes.
        """

        # Each SRV phase takes 12 rounds. But the duration of the round depends
        # on how big the voting rounds are which differs between live and
        # testing network:
        from onionbalance.hs_v3.onionbalance import my_onionbalance
        if my_onionbalance.is_testnet:
            return (12 * 20) // 60
        else:
            return 12 * 60

    def get_time_period_length(self):
        """
        Get the HSv3 time period length in minutes
        """
        from onionbalance.hs_v3.onionbalance import my_onionbalance
        if my_onionbalance.is_testnet:
            # This is a chutney network! Use hs_common.c:get_time_period_length()
            # logic to calculate time period length
            return (24 * 20) // 60
        else:
            # This is not a chutney network, so time period length is 1440 minutes (24 hours)
            return 24 * 60

    def get_blinding_param(self, identity_pubkey, time_period_number):
        """
        Calculate the HSv3 blinding parameter as specified in rend-spec-v3.txt section A.2:

         h = H(BLIND_STRING | A | s | B | N)
         BLIND_STRING = "Derive temporary signing key" | INT_1(0)
         N = "key-blind" | INT_8(period-number) | INT_8(period_length)
         B = "(1511[...]2202, 4631[...]5960)"

        Use the time period number in 'time_period_number'.
        """
        ED25519_BASEPOINT = b"(15112221349535400772501151409588531511" \
            b"454012693041857206046113283949847762202, " \
            b"463168356949264781694283940034751631413" \
            b"07993866256225615783033603165251855960)"
        BLIND_STRING = b"Derive temporary signing key" + bytes([0])

        period_length = self.get_time_period_length()
        N = b"key-blind" + time_period_number.to_bytes(8, 'big') + period_length.to_bytes(8, 'big')

        return hashlib.sha3_256(BLIND_STRING + identity_pubkey + ED25519_BASEPOINT + N).digest()

    def get_next_time_period_num(self, valid_after=None):
        return self.get_time_period_num(valid_after) + 1

    def get_time_period_num(self, valid_after=None):
        """
        Get time period number for this 'valid_after'.

        valid_after is a datetime (if not set, we get it ourselves)
        time_period_length set to default value of 1440 minutes == 1 day
        """
        if not valid_after:
            assert (self.is_live())
            valid_after = self.consensus.valid_after
            valid_after = stem.util.datetime_to_unix(valid_after)

        time_period_length = self.get_time_period_length()

        seconds_since_epoch = valid_after
        minutes_since_epoch = seconds_since_epoch // 60

        # Calculate offset as specified in rend-spec-v3.txt [TIME-PERIODS]
        time_period_rotation_offset = self._get_srv_phase_duration()

        assert (minutes_since_epoch > time_period_rotation_offset)
        minutes_since_epoch -= time_period_rotation_offset

        time_period_num = minutes_since_epoch // time_period_length
        return int(time_period_num)

    def get_start_time_of_current_srv_run(self):
        """
        Return the start time of the current SR protocol run using the times from
        the current consensus. For example, if the latest consensus valid-after is
        23/06/2017 23:00:00 and a full SR protocol run is 24 hours, this function
        returns 23/06/2017 00:00:00.

        TODO: unittest
        """
        from onionbalance.hs_v3.onionbalance import my_onionbalance

        assert (self.is_live())

        beginning_of_current_round = stem.util.datetime_to_unix(self.consensus.valid_after)

        # Voting interval is 20 secs in chutney and one hour in real network
        if my_onionbalance.is_testnet:
            voting_interval_secs = 20
        else:
            voting_interval_secs = 60 * 60

        # Get current SR protocol round (an SR protocol run has 24 rounds)
        curr_round_slot = (beginning_of_current_round // voting_interval_secs) % 24
        time_elapsed_since_start_of_run = curr_round_slot * voting_interval_secs

        logger.debug("Current SRV proto run: Start of current round: %s. "
                     "Time elapsed: %s (%s)", beginning_of_current_round,
                     time_elapsed_since_start_of_run, voting_interval_secs)

        return int(beginning_of_current_round - time_elapsed_since_start_of_run)

    def get_start_time_of_previous_srv_run(self):
        from onionbalance.hs_v3.onionbalance import my_onionbalance

        start_time_of_current_run = self.get_start_time_of_current_srv_run()
        if my_onionbalance.is_testnet:
            return start_time_of_current_run - 24 * 20
        else:
            return start_time_of_current_run - 24 * 3600

    def get_start_time_of_next_time_period(self, valid_after=None):
        """
        Return the start time of the upcoming time period
        """
        assert (self.is_live())

        # Get start time of next time period
        time_period_length = self.get_time_period_length()
        next_time_period_num = self.get_next_time_period_num(valid_after)
        start_of_next_tp_in_mins = next_time_period_num * time_period_length

        # Apply rotation offset as specified by prop224 section [TIME-PERIODS]
        time_period_rotation_offset = self._get_srv_phase_duration()

        return (start_of_next_tp_in_mins + time_period_rotation_offset) * 60
get_blinding_param(identity_pubkey, time_period_number)

Calculate the HSv3 blinding parameter as specified in rend-spec-v3.txt section A.2:

h = H(BLIND_STRING | A | s | B | N) BLIND_STRING = "Derive temporary signing key" | INT_1(0) N = "key-blind" | INT_8(period-number) | INT_8(period_length) B = "(1511[...]2202, 4631[...]5960)"

Use the time period number in 'time_period_number'.

Source code in repos/onionbalance/onionbalance/hs_v3/consensus.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
def get_blinding_param(self, identity_pubkey, time_period_number):
    """
    Calculate the HSv3 blinding parameter as specified in rend-spec-v3.txt section A.2:

     h = H(BLIND_STRING | A | s | B | N)
     BLIND_STRING = "Derive temporary signing key" | INT_1(0)
     N = "key-blind" | INT_8(period-number) | INT_8(period_length)
     B = "(1511[...]2202, 4631[...]5960)"

    Use the time period number in 'time_period_number'.
    """
    ED25519_BASEPOINT = b"(15112221349535400772501151409588531511" \
        b"454012693041857206046113283949847762202, " \
        b"463168356949264781694283940034751631413" \
        b"07993866256225615783033603165251855960)"
    BLIND_STRING = b"Derive temporary signing key" + bytes([0])

    period_length = self.get_time_period_length()
    N = b"key-blind" + time_period_number.to_bytes(8, 'big') + period_length.to_bytes(8, 'big')

    return hashlib.sha3_256(BLIND_STRING + identity_pubkey + ED25519_BASEPOINT + N).digest()
get_routerstatuses()

Give access to the routerstatus entries in this consensus

Source code in repos/onionbalance/onionbalance/hs_v3/consensus.py
58
59
60
61
62
63
64
65
def get_routerstatuses(self):
    """Give access to the routerstatus entries in this consensus"""

    # We should never be asked for routerstatuses with a non-live consensus
    # so make sure this is the case.
    assert (self.is_live())

    return self.consensus.routers
get_start_time_of_current_srv_run()

Return the start time of the current SR protocol run using the times from the current consensus. For example, if the latest consensus valid-after is 23/06/2017 23:00:00 and a full SR protocol run is 24 hours, this function returns 23/06/2017 00:00:00.

TODO: unittest

Source code in repos/onionbalance/onionbalance/hs_v3/consensus.py
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
def get_start_time_of_current_srv_run(self):
    """
    Return the start time of the current SR protocol run using the times from
    the current consensus. For example, if the latest consensus valid-after is
    23/06/2017 23:00:00 and a full SR protocol run is 24 hours, this function
    returns 23/06/2017 00:00:00.

    TODO: unittest
    """
    from onionbalance.hs_v3.onionbalance import my_onionbalance

    assert (self.is_live())

    beginning_of_current_round = stem.util.datetime_to_unix(self.consensus.valid_after)

    # Voting interval is 20 secs in chutney and one hour in real network
    if my_onionbalance.is_testnet:
        voting_interval_secs = 20
    else:
        voting_interval_secs = 60 * 60

    # Get current SR protocol round (an SR protocol run has 24 rounds)
    curr_round_slot = (beginning_of_current_round // voting_interval_secs) % 24
    time_elapsed_since_start_of_run = curr_round_slot * voting_interval_secs

    logger.debug("Current SRV proto run: Start of current round: %s. "
                 "Time elapsed: %s (%s)", beginning_of_current_round,
                 time_elapsed_since_start_of_run, voting_interval_secs)

    return int(beginning_of_current_round - time_elapsed_since_start_of_run)
get_start_time_of_next_time_period(valid_after=None)

Return the start time of the upcoming time period

Source code in repos/onionbalance/onionbalance/hs_v3/consensus.py
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
def get_start_time_of_next_time_period(self, valid_after=None):
    """
    Return the start time of the upcoming time period
    """
    assert (self.is_live())

    # Get start time of next time period
    time_period_length = self.get_time_period_length()
    next_time_period_num = self.get_next_time_period_num(valid_after)
    start_of_next_tp_in_mins = next_time_period_num * time_period_length

    # Apply rotation offset as specified by prop224 section [TIME-PERIODS]
    time_period_rotation_offset = self._get_srv_phase_duration()

    return (start_of_next_tp_in_mins + time_period_rotation_offset) * 60
get_time_period_length()

Get the HSv3 time period length in minutes

Source code in repos/onionbalance/onionbalance/hs_v3/consensus.py
164
165
166
167
168
169
170
171
172
173
174
175
def get_time_period_length(self):
    """
    Get the HSv3 time period length in minutes
    """
    from onionbalance.hs_v3.onionbalance import my_onionbalance
    if my_onionbalance.is_testnet:
        # This is a chutney network! Use hs_common.c:get_time_period_length()
        # logic to calculate time period length
        return (24 * 20) // 60
    else:
        # This is not a chutney network, so time period length is 1440 minutes (24 hours)
        return 24 * 60
get_time_period_num(valid_after=None)

Get time period number for this 'valid_after'.

valid_after is a datetime (if not set, we get it ourselves) time_period_length set to default value of 1440 minutes == 1 day

Source code in repos/onionbalance/onionbalance/hs_v3/consensus.py
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
def get_time_period_num(self, valid_after=None):
    """
    Get time period number for this 'valid_after'.

    valid_after is a datetime (if not set, we get it ourselves)
    time_period_length set to default value of 1440 minutes == 1 day
    """
    if not valid_after:
        assert (self.is_live())
        valid_after = self.consensus.valid_after
        valid_after = stem.util.datetime_to_unix(valid_after)

    time_period_length = self.get_time_period_length()

    seconds_since_epoch = valid_after
    minutes_since_epoch = seconds_since_epoch // 60

    # Calculate offset as specified in rend-spec-v3.txt [TIME-PERIODS]
    time_period_rotation_offset = self._get_srv_phase_duration()

    assert (minutes_since_epoch > time_period_rotation_offset)
    minutes_since_epoch -= time_period_rotation_offset

    time_period_num = minutes_since_epoch // time_period_length
    return int(time_period_num)
is_live()

Return True if the consensus is live.

This function replicates the behavior of the little-t-tor networkstatus_get_reasonably_live_consensus() function.

Source code in repos/onionbalance/onionbalance/hs_v3/consensus.py
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
def is_live(self):
    """
    Return True if the consensus is live.

    This function replicates the behavior of the little-t-tor
    networkstatus_get_reasonably_live_consensus() function.
    """
    if not self.consensus:
        return False

    REASONABLY_LIVE_TIME = 24 * 60 * 60
    now = datetime.datetime.utcnow()

    return now >= self.consensus.valid_after - datetime.timedelta(seconds=REASONABLY_LIVE_TIME) and \
        now <= self.consensus.valid_until + datetime.timedelta(seconds=REASONABLY_LIVE_TIME)
refresh()

Attempt to refresh the consensus with the latest one available.

Source code in repos/onionbalance/onionbalance/hs_v3/consensus.py
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
def refresh(self):
    """
    Attempt to refresh the consensus with the latest one available.
    """
    from onionbalance.hs_v3.onionbalance import my_onionbalance

    # Fetch the current md consensus from the control port
    md_consensus_str = my_onionbalance.controller.get_md_consensus().encode()
    try:
        self.consensus = NetworkStatusDocumentV3(md_consensus_str)
    except ValueError:
        logger.warning("No valid consensus received. Waiting for one...")
        return

    # Check if it's live
    if not self.is_live():
        logger.info("Loaded consensus is not live. Waiting for a live one.")
        return

    self.nodes = self._initialize_nodes()

descriptor

IntroductionPointSetV3

Bases: IntroductionPointSet

This class represents a set of introduction points (which are actually stem.descriptor.hidden_service.IntroductionPointV3 objects)

It gives us a nice way to compare sets of introduction poinst between them, to see if they are different.

It also preserves all the functionality of onionbalance.common.intro_point_set.IntroductionPointSet which allows you to sample introduction points out of the set.

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
class IntroductionPointSetV3(intro_point_set.IntroductionPointSet):
    """
    This class represents a set of introduction points (which are actually
    stem.descriptor.hidden_service.IntroductionPointV3 objects)

    It gives us a nice way to compare sets of introduction poinst between them,
    to see if they are different.

    It also preserves all the functionality of
    onionbalance.common.intro_point_set.IntroductionPointSet which allows you to
    sample introduction points out of the set.
    """

    def __init__(self, introduction_points):
        """
        'introduction_points' is a list of lists where each internal list contains
        the introduction points of an instance
        """
        for instance_ips in introduction_points:
            for ip in instance_ips:
                if ip.legacy_key_raw:
                    logger.info("Ignoring introduction point with legacy key.")
                    instance_ips.remove(ip)

        super().__init__(introduction_points)

    def get_intro_points_flat(self):
        """
        Flatten the .intro_points list of list into a single list and return it
        """
        return list(itertools.chain(*self.intro_points))

    def __eq__(self, other):
        """
        Compares two IntroductionPointSetV3 objects and returns True
        if they have the same introduction points in them.
        """
        # XXX we are currently using onion_key_raw as the identifier for the
        # intro point. is there a better thing to use?
        intro_set_1 = set(ip.onion_key_raw for ip in other.get_intro_points_flat())
        intro_set_2 = set(ip.onion_key_raw for ip in self.get_intro_points_flat())

        # TODO: unittests
        return intro_set_1 == intro_set_2
__eq__(other)

Compares two IntroductionPointSetV3 objects and returns True if they have the same introduction points in them.

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
55
56
57
58
59
60
61
62
63
64
65
66
def __eq__(self, other):
    """
    Compares two IntroductionPointSetV3 objects and returns True
    if they have the same introduction points in them.
    """
    # XXX we are currently using onion_key_raw as the identifier for the
    # intro point. is there a better thing to use?
    intro_set_1 = set(ip.onion_key_raw for ip in other.get_intro_points_flat())
    intro_set_2 = set(ip.onion_key_raw for ip in self.get_intro_points_flat())

    # TODO: unittests
    return intro_set_1 == intro_set_2
__init__(introduction_points)

'introduction_points' is a list of lists where each internal list contains the introduction points of an instance

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
36
37
38
39
40
41
42
43
44
45
46
47
def __init__(self, introduction_points):
    """
    'introduction_points' is a list of lists where each internal list contains
    the introduction points of an instance
    """
    for instance_ips in introduction_points:
        for ip in instance_ips:
            if ip.legacy_key_raw:
                logger.info("Ignoring introduction point with legacy key.")
                instance_ips.remove(ip)

    super().__init__(introduction_points)
get_intro_points_flat()

Flatten the .intro_points list of list into a single list and return it

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
49
50
51
52
53
def get_intro_points_flat(self):
    """
    Flatten the .intro_points list of list into a single list and return it
    """
    return list(itertools.chain(*self.intro_points))

OBDescriptor

Bases: V3Descriptor

A v3 descriptor created by Onionbalance and meant to be published to the network.

This class supports generating descriptors.

Can raise BadDescriptor if we can't or should not generate a valid descriptor.

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
class OBDescriptor(V3Descriptor):
    """
    A v3 descriptor created by Onionbalance and meant to be published to the
    network.

    This class supports generating descriptors.

    Can raise BadDescriptor if we can't or should not generate a valid descriptor.
    """

    def __init__(self, onion_address, identity_priv_key,
                 blinding_param, intro_points, is_first_desc):
        # Timestamp of the last attempt to assemble this descriptor
        self.last_publish_attempt_ts = None
        # Timestamp we last uploaded this descriptor
        self.last_upload_ts = None
        # Set of responsible HSDirs for last time we uploaded this descriptor
        self.responsible_hsdirs = None

        # Start generating descriptor
        desc_signing_key = Ed25519PrivateKey.generate()

        # Get the intro points for this descriptor and recertify them!
        recertified_intro_points = []
        for ip in intro_points:
            recertified_intro_points.append(self._recertify_intro_point(ip, desc_signing_key))

        rev_counter = self._get_revision_counter(identity_priv_key, is_first_desc)

        v3_desc_inner_layer = InnerLayer.create(introduction_points=recertified_intro_points)
        v3_desc = HiddenServiceDescriptorV3.create(
            blinding_nonce=blinding_param,
            identity_key=identity_priv_key,
            signing_key=desc_signing_key,
            inner_layer=v3_desc_inner_layer,
            revision_counter=int(rev_counter),
        )

        # TODO stem should probably initialize it itself so that it has balance
        # between descriptor creation (where this is not inted) and descriptor
        # parsing (where this is inited)
        v3_desc._inner_layer = v3_desc_inner_layer

        # Check max size is within range
        if len(str(v3_desc)) > params.MAX_DESCRIPTOR_SIZE:
            logger.error("Created descriptor is too big (%d intros). Consider "
                         "relaxing number of instances or intro points per instance "
                         "(see N_INTROS_PER_INSTANCE)")
            raise BadDescriptor

        super().__init__(onion_address, v3_desc)

    def set_last_publish_attempt_ts(self, last_publish_attempt_ts):
        self.last_publish_attempt_ts = last_publish_attempt_ts

    def set_last_upload_ts(self, last_upload_ts):
        self.last_upload_ts = last_upload_ts

    def set_responsible_hsdirs(self, responsible_hsdirs):
        self.responsible_hsdirs = responsible_hsdirs

    def _recertify_intro_point(self, intro_point, descriptor_signing_key):
        """
        Given an IntroductionPointV3 object, re-certify it using the
        'descriptor_signing_key' for this new descriptor.

        Return the recertified intro point.
        """
        original_auth_key_cert = intro_point.auth_key_cert
        original_enc_key_cert = intro_point.enc_key_cert

        # We have already removed all the intros with legacy keys. Make sure that
        # no legacy intros sneaks up on us, becausey they would result in
        # unparseable descriptors if we don't recertify them (and we won't).
        assert (not intro_point.legacy_key_cert)

        # Get all the certs we need to recertify
        # [we need to use the _replace method of namedtuples because there is no
        # setter for those attributes due to the way stem sets those fields. If we
        # attempt to normally replace the attributes we get the following
        # exception: AttributeError: can't set attribute]
        recertified_intro_point = intro_point._replace(auth_key_cert=self._recertify_ed_certificate(original_auth_key_cert,
                                                                                                    descriptor_signing_key),
                                                       enc_key_cert=self._recertify_ed_certificate(original_enc_key_cert,
                                                                                                   descriptor_signing_key))

        return recertified_intro_point

    def _recertify_ed_certificate(self, ed_cert, descriptor_signing_key):
        """
        Recertify an HSv3 intro point certificate using the new descriptor signing
        key so that it can be accepted as part of a new descriptor.

        "Recertifying" means taking the certified key and signing it with a new
        key.

        Return the new certificate.
        """
        # pylint: disable=no-member
        extensions = [Ed25519Extension(ExtensionType.HAS_SIGNING_KEY, None, stem.util._pubkey_bytes(descriptor_signing_key))]
        new_cert = Ed25519CertificateV1(cert_type=ed_cert.type,
                                        expiration=ed_cert.expiration,
                                        key_type=ed_cert.key_type,
                                        key=ed_cert.key,
                                        extensions=extensions,
                                        signing_key=descriptor_signing_key)

        return new_cert

    def _get_revision_counter(self, identity_priv_key, is_first_desc):
        """
        Get the revision counter using the order-preserving-encryption scheme from
        rend-spec-v3.txt section F.2.
        """
        from onionbalance.hs_v3.onionbalance import my_onionbalance
        now = int(stem.util.datetime_to_unix(datetime.datetime.utcnow()))

        # TODO: Mention that this is done with the private key instead of the blinded priv key
        # this means that this won't cooperate with normal tor
        privkey_bytes = identity_priv_key.private_bytes(encoding=serialization.Encoding.Raw,
                                                        format=serialization.PrivateFormat.Raw,
                                                        encryption_algorithm=serialization.NoEncryption())
        cipher_key = hashlib.sha3_256(b"rev-counter-generation" + privkey_bytes).digest()

        if is_first_desc:
            srv_start = my_onionbalance.consensus.get_start_time_of_previous_srv_run()
        else:
            srv_start = my_onionbalance.consensus.get_start_time_of_current_srv_run()
        srv_start = int(srv_start)

        seconds_since_srv_start = now - srv_start
        # This must be strictly positive
        seconds_since_srv_start += 1

        ope_result = sum(w for w, _ in zip(self._get_ope_scheme_words(cipher_key),
                                           range(seconds_since_srv_start)))

        logger.debug("Rev counter for %s descriptor (SRV secs %s, OPE %s)",
                     "first" if is_first_desc else "second",
                     seconds_since_srv_start, ope_result)

        return ope_result

    def _get_ope_scheme_words(self, cipher_key):
        IV = b'\x00' * 16

        cipher = Cipher(algorithms.AES(cipher_key), modes.CTR(IV), backend=backend)
        e = cipher.encryptor()
        while True:
            v = e.update(b'\x00\x00')
            yield v[0] + 256 * v[1] + 1

ReceivedDescriptor

Bases: V3Descriptor

An instance v3 descriptor received from the network.

This class supports parsing descriptors.

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
class ReceivedDescriptor(V3Descriptor):
    """
    An instance v3 descriptor received from the network.

    This class supports parsing descriptors.
    """

    def __init__(self, desc_text, onion_address):
        """
        Parse a descriptor in 'desc_text' and return an ReceivedDescriptor object.

        Raises BadDescriptor if the descriptor cannot be used.
        """
        try:
            v3_desc = HiddenServiceDescriptorV3.from_str(desc_text)
            v3_desc.decrypt(onion_address)
        except ValueError as err:
            logger.warning("Descriptor is corrupted (%s).", err)
            raise BadDescriptor

        self.received_ts = datetime.datetime.utcnow()

        logger.debug("Successfuly decrypted descriptor for %s!", onion_address)

        super().__init__(onion_address, v3_desc)

    def is_old(self):
        """
        Return True if this received descriptor is old and we should consider the
        instance as offline.
        """
        from onionbalance.hs_v3.onionbalance import my_onionbalance

        received_age = datetime.datetime.utcnow() - self.received_ts
        received_age = received_age.total_seconds()

        if my_onionbalance.is_testnet:
            too_old_threshold = params.INSTANCE_DESCRIPTOR_TOO_OLD_TESTNET
        else:
            too_old_threshold = params.INSTANCE_DESCRIPTOR_TOO_OLD

        if received_age > too_old_threshold:
            return True

        return False
__init__(desc_text, onion_address)

Parse a descriptor in 'desc_text' and return an ReceivedDescriptor object.

Raises BadDescriptor if the descriptor cannot be used.

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
def __init__(self, desc_text, onion_address):
    """
    Parse a descriptor in 'desc_text' and return an ReceivedDescriptor object.

    Raises BadDescriptor if the descriptor cannot be used.
    """
    try:
        v3_desc = HiddenServiceDescriptorV3.from_str(desc_text)
        v3_desc.decrypt(onion_address)
    except ValueError as err:
        logger.warning("Descriptor is corrupted (%s).", err)
        raise BadDescriptor

    self.received_ts = datetime.datetime.utcnow()

    logger.debug("Successfuly decrypted descriptor for %s!", onion_address)

    super().__init__(onion_address, v3_desc)
is_old()

Return True if this received descriptor is old and we should consider the instance as offline.

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
def is_old(self):
    """
    Return True if this received descriptor is old and we should consider the
    instance as offline.
    """
    from onionbalance.hs_v3.onionbalance import my_onionbalance

    received_age = datetime.datetime.utcnow() - self.received_ts
    received_age = received_age.total_seconds()

    if my_onionbalance.is_testnet:
        too_old_threshold = params.INSTANCE_DESCRIPTOR_TOO_OLD_TESTNET
    else:
        too_old_threshold = params.INSTANCE_DESCRIPTOR_TOO_OLD

    if received_age > too_old_threshold:
        return True

    return False

V3Descriptor

Bases: object

A generic v3 descriptor.

Serves as the base class for OBDescriptor and ReceivedDescriptor which implement more specific functionalities.

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
class V3Descriptor(object):
    """
    A generic v3 descriptor.

    Serves as the base class for OBDescriptor and ReceivedDescriptor which
    implement more specific functionalities.
    """

    def __init__(self, onion_address, v3_desc):
        self.onion_address = onion_address

        self.v3_desc = v3_desc

        # An IntroductionPointSetV3 object with the intros of this descriptor
        self.intro_set = IntroductionPointSetV3([self.v3_desc._inner_layer.introduction_points])

    def get_intro_points(self):
        """
        Get the raw intro points for this descriptor.
        """
        return self.intro_set.get_intro_points_flat()

    def get_blinded_key(self):
        """
        Extract and return the blinded key from the descriptor
        """

        # The descriptor signing cert, signs the descriptor signing key using
        # the blinded key. So the signing key should be the one we want here.
        return self.v3_desc.signing_cert.signing_key()

    def get_size(self):
        """
        Return size of v3 descriptor in bytes
        """
        return len(str(self.v3_desc))
get_blinded_key()

Extract and return the blinded key from the descriptor

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
91
92
93
94
95
96
97
98
def get_blinded_key(self):
    """
    Extract and return the blinded key from the descriptor
    """

    # The descriptor signing cert, signs the descriptor signing key using
    # the blinded key. So the signing key should be the one we want here.
    return self.v3_desc.signing_cert.signing_key()
get_intro_points()

Get the raw intro points for this descriptor.

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
85
86
87
88
89
def get_intro_points(self):
    """
    Get the raw intro points for this descriptor.
    """
    return self.intro_set.get_intro_points_flat()
get_size()

Return size of v3 descriptor in bytes

Source code in repos/onionbalance/onionbalance/hs_v3/descriptor.py
100
101
102
103
104
def get_size(self):
    """
    Return size of v3 descriptor in bytes
    """
    return len(str(self.v3_desc))

ext

ed25519_exts_ref

Reference implementations for the ed25519 tweaks that Tor uses.

Includes self-tester and test vector generator.

hashring

get_responsible_hsdirs(blinded_pubkey, is_first_descriptor)

Return a list with the responsible HSDirs for a service with 'blinded_pubkey'.

The returned list is a list of fingerprints.

Source code in repos/onionbalance/onionbalance/hs_v3/hashring.py
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
def get_responsible_hsdirs(blinded_pubkey, is_first_descriptor):
    """
    Return a list with the responsible HSDirs for a service with 'blinded_pubkey'.

    The returned list is a list of fingerprints.
    """
    from onionbalance.hs_v3.onionbalance import my_onionbalance

    # Always use a live consensus when calculating responsible HSDirs
    assert (my_onionbalance.consensus.is_live())

    responsible_hsdirs = []

    # TODO: Improve representation of hash ring here... No need to go
    # between list and dictionary...

    # dictionary { <node hsdir index> : Node , .... }
    node_hash_ring = _get_hash_ring_for_descriptor(is_first_descriptor)
    if not node_hash_ring:
        raise EmptyHashRing

    sorted_hash_ring_list = sorted(list(node_hash_ring.keys()))

    logger.info("Initialized hash ring of size %d (blinded key: %s)",
                len(node_hash_ring), base64.b64encode(blinded_pubkey))

    for replica_num in range(1, params.HSDIR_N_REPLICAS + 1):
        # The HSDirs that we are gonna store this replica in
        replica_store_hsdirs = []

        hidden_service_index = _get_hidden_service_index(blinded_pubkey, replica_num, is_first_descriptor)

        # Find position of descriptor ID in the HSDir list
        index = bisect.bisect_left(sorted_hash_ring_list, hidden_service_index)

        logger.info("\t Tried with HS index %s got position %d", hidden_service_index.hex(), index)

        while len(replica_store_hsdirs) < params.HSDIR_SPREAD_STORE:
            try:
                hsdir_key = sorted_hash_ring_list[index]
                index += 1
            except IndexError:
                # Wrap around when we reach the end of the HSDir list
                index = 0
                hsdir_key = sorted_hash_ring_list[index]

            hsdir_node = node_hash_ring[hsdir_key]

            # Check if we have already added this node to this
            # replica. This should never happen on the real network but
            # might happen in small testnets like chutney!
            if hsdir_node.get_hex_fingerprint() in replica_store_hsdirs:
                logger.debug("Ignoring already added HSDir to this replica!")
                break

            # Check if we have already added this node to the responsible
            # HSDirs. This can happen in the second replica and we should
            # skip the node
            if hsdir_node.get_hex_fingerprint() in responsible_hsdirs:
                logger.debug("Ignoring already added HSDir!")
                continue

            logger.debug("%d: %s: %s", index, hsdir_node.get_hex_fingerprint(), hsdir_key.hex())

            replica_store_hsdirs.append(hsdir_node.get_hex_fingerprint())

        responsible_hsdirs.extend(replica_store_hsdirs)

    # Do a sanity check
    if my_onionbalance.is_testnet:
        # If we are on chutney it's normal to not have enough nodes to populate the hashring
        assert (len(responsible_hsdirs) <= params.HSDIR_N_REPLICAS * params.HSDIR_SPREAD_STORE)
    else:
        if (len(responsible_hsdirs) != params.HSDIR_N_REPLICAS * params.HSDIR_SPREAD_STORE):
            logger.critical("Got the wrong number of responsible HSDirs: %d. Aborting", len(responsible_hsdirs))
            raise EmptyHashRing

    return responsible_hsdirs

get_srv_and_time_period(is_first_descriptor)

Return SRV and time period based on current consensus time

Source code in repos/onionbalance/onionbalance/hs_v3/hashring.py
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def get_srv_and_time_period(is_first_descriptor):
    """
    Return SRV and time period based on current consensus time
    """
    from onionbalance.hs_v3.onionbalance import my_onionbalance

    valid_after = my_onionbalance.consensus.consensus.valid_after

    current_tp = my_onionbalance.consensus.get_time_period_num()
    previous_tp = current_tp - 1
    next_tp = current_tp + 1
    assert (previous_tp > 0)

    # Get the right TP/SRV
    if is_first_descriptor:
        if _time_between_tp_and_srv(valid_after):
            srv = my_onionbalance.consensus.get_previous_srv(previous_tp)
            tp = previous_tp
            _case = 1  # just for debugging
        else:
            srv = my_onionbalance.consensus.get_previous_srv(current_tp)
            tp = current_tp
            _case = 2  # just for debugging
    else:
        if _time_between_tp_and_srv(valid_after):
            srv = my_onionbalance.consensus.get_current_srv(current_tp)
            tp = current_tp
            _case = 3  # just for debugging
        else:
            srv = my_onionbalance.consensus.get_current_srv(next_tp)
            tp = next_tp
            _case = 4  # just for debugging

    srv_b64 = base64.b64encode(srv) if srv else None
    logger.debug("For valid_after %s we got SRV %s and TP %s (case: #%d)",
                 valid_after, srv_b64, tp, _case)

    return srv, tp

instance

InstanceV3

Bases: Instance

This is a V3 onionbalance instance

Source code in repos/onionbalance/onionbalance/hs_v3/instance.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
class InstanceV3(onionbalance.common.instance.Instance):
    """
    This is a V3 onionbalance instance
    """

    def __init__(self, onion_address):
        # Get the controller
        from onionbalance.hs_v3.onionbalance import my_onionbalance
        controller = my_onionbalance.controller.controller

        # Initialize the common Instance class.
        super().__init__(controller, onion_address)

        # Onion address does not contain the '.onion'.
        logger.warning("Loaded instance %s", onion_address)

        self.descriptor = None

        # When was the intro set of this instance last modified?
        self.intro_set_modified_timestamp = None

    def has_onion_address(self, onion_address):
        """
        Return True if this instance has this onion address
        """
        # Strip the ".onion" part of the address if it exists since some
        # subsystems don't use it (e.g. Tor sometimes omits it from control
        # port responses)
        my_onion_address = self.onion_address.replace(".onion", "")
        their_onion_address = onion_address.replace(".onion", "")

        return my_onion_address == their_onion_address

    def register_descriptor(self, descriptor_text, onion_address):
        """
        We received a descriptor (with 'descriptor_text') for 'onion_address'.
        Register it to this instance.
        """
        logger.info("Found instance %s for this new descriptor!", self.onion_address)

        assert (onion_address == self.onion_address)

        # Parse descriptor. If it parsed correctly, we know that this
        # descriptor is truly for this instance (since the onion address
        # matches)
        try:
            new_descriptor = ob_descriptor.ReceivedDescriptor(descriptor_text, onion_address)
        except ob_descriptor.BadDescriptor:
            logger.warning("Received bad descriptor for %s. Ignoring.", self.onion_address)
            return

        # Before replacing the current descriptor with this one, check if the
        # introduction point set changed:

        # If this is the first descriptor for this instance, the intro point set changed
        if not self.descriptor:
            logger.info("This is the first time we see a descriptor for instance %s!", self.onion_address)
            self.intro_set_modified_timestamp = datetime.datetime.utcnow()
            self.descriptor = new_descriptor
            return

        assert (self.descriptor)
        assert (new_descriptor.intro_set)

        # We already have a descriptor but this is a new one. Check the intro points!
        if new_descriptor.intro_set != self.descriptor.intro_set:
            logger.info("We got a new descriptor for instance %s and the intro set changed!", self.onion_address)
            self.intro_set_modified_timestamp = datetime.datetime.utcnow()
        else:
            logger.info("We got a new descriptor for instance %s but the intro set did not change.", self.onion_address)

        self.descriptor = new_descriptor

    def get_intros_for_publish(self):
        """
        Get a list of stem.descriptor.IntroductionPointV3 objects for this descriptor

        Raise :InstanceHasNoDescriptor: if there is no descriptor for this instance
        Raise :InstanceIsOffline: if the instance is offline.
        """
        if not self.descriptor:
            raise InstanceHasNoDescriptor

        if self.descriptor.is_old():
            raise InstanceIsOffline

        return self.descriptor.get_intro_points()
get_intros_for_publish()

Get a list of stem.descriptor.IntroductionPointV3 objects for this descriptor

Raise :InstanceHasNoDescriptor: if there is no descriptor for this instance Raise :InstanceIsOffline: if the instance is offline.

Source code in repos/onionbalance/onionbalance/hs_v3/instance.py
84
85
86
87
88
89
90
91
92
93
94
95
96
97
def get_intros_for_publish(self):
    """
    Get a list of stem.descriptor.IntroductionPointV3 objects for this descriptor

    Raise :InstanceHasNoDescriptor: if there is no descriptor for this instance
    Raise :InstanceIsOffline: if the instance is offline.
    """
    if not self.descriptor:
        raise InstanceHasNoDescriptor

    if self.descriptor.is_old():
        raise InstanceIsOffline

    return self.descriptor.get_intro_points()
has_onion_address(onion_address)

Return True if this instance has this onion address

Source code in repos/onionbalance/onionbalance/hs_v3/instance.py
32
33
34
35
36
37
38
39
40
41
42
def has_onion_address(self, onion_address):
    """
    Return True if this instance has this onion address
    """
    # Strip the ".onion" part of the address if it exists since some
    # subsystems don't use it (e.g. Tor sometimes omits it from control
    # port responses)
    my_onion_address = self.onion_address.replace(".onion", "")
    their_onion_address = onion_address.replace(".onion", "")

    return my_onion_address == their_onion_address
register_descriptor(descriptor_text, onion_address)

We received a descriptor (with 'descriptor_text') for 'onion_address'. Register it to this instance.

Source code in repos/onionbalance/onionbalance/hs_v3/instance.py
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
def register_descriptor(self, descriptor_text, onion_address):
    """
    We received a descriptor (with 'descriptor_text') for 'onion_address'.
    Register it to this instance.
    """
    logger.info("Found instance %s for this new descriptor!", self.onion_address)

    assert (onion_address == self.onion_address)

    # Parse descriptor. If it parsed correctly, we know that this
    # descriptor is truly for this instance (since the onion address
    # matches)
    try:
        new_descriptor = ob_descriptor.ReceivedDescriptor(descriptor_text, onion_address)
    except ob_descriptor.BadDescriptor:
        logger.warning("Received bad descriptor for %s. Ignoring.", self.onion_address)
        return

    # Before replacing the current descriptor with this one, check if the
    # introduction point set changed:

    # If this is the first descriptor for this instance, the intro point set changed
    if not self.descriptor:
        logger.info("This is the first time we see a descriptor for instance %s!", self.onion_address)
        self.intro_set_modified_timestamp = datetime.datetime.utcnow()
        self.descriptor = new_descriptor
        return

    assert (self.descriptor)
    assert (new_descriptor.intro_set)

    # We already have a descriptor but this is a new one. Check the intro points!
    if new_descriptor.intro_set != self.descriptor.intro_set:
        logger.info("We got a new descriptor for instance %s and the intro set changed!", self.onion_address)
        self.intro_set_modified_timestamp = datetime.datetime.utcnow()
    else:
        logger.info("We got a new descriptor for instance %s but the intro set did not change.", self.onion_address)

    self.descriptor = new_descriptor

manager

main(args)

This is the entry point of v3 functionality.

Initialize onionbalance, schedule future jobs and let the scheduler do its thing.

Source code in repos/onionbalance/onionbalance/hs_v3/manager.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
def main(args):
    """
    This is the entry point of v3 functionality.

    Initialize onionbalance, schedule future jobs and let the scheduler do its thing.
    """

    # Override the log level if specified on the command line.
    if args.verbosity:
        params.DEFAULT_LOG_LEVEL = args.verbosity.upper()
    logger.setLevel(logging.__dict__[params.DEFAULT_LOG_LEVEL.upper()])

    # Get the global onionbalance singleton
    my_onionbalance = onionbalance.my_onionbalance

    try:
        my_onionbalance.init_subsystems(args)
    except onionbalance.ConfigError as err:
        logger.error("%s", err)
        sys.exit(1)

    from onionbalance.hs_v3 import status
    status_socket = None
    if status_socket_location(my_onionbalance.config_data) is not None:
        status_socket = status.StatusSocket(status_socket_location(my_onionbalance.config_data), my_onionbalance)
    signalhandler.SignalHandler('v3', my_onionbalance.controller.controller, status_socket)

    # Schedule descriptor fetch and upload events
    init_scheduler(my_onionbalance)

    # Begin main loop to poll for HS descriptors
    scheduler.run_forever()

    return 0

onionbalance

Onionbalance

Bases: object

Onionbalance singleton that represents this onionbalance runtime.

Contains various objects that are useful to other onionbalance modules so this is imported from all over the codebase.

Source code in repos/onionbalance/onionbalance/hs_v3/onionbalance.py
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
class Onionbalance(object):
    """
    Onionbalance singleton that represents this onionbalance runtime.

    Contains various objects that are useful to other onionbalance modules so
    this is imported from all over the codebase.
    """

    def __init__(self):
        # This is kept minimal so that it's quick (it's executed at program
        # launch because of the onionbalance singleton). The actual init work
        # happens in init_subsystems()

        # True if this onionbalance operates in a testnet (e.g. chutney)
        self.is_testnet = False

    def init_subsystems(self, args):
        """
        Initialize subsystems (this is resource intensive)
        """
        self.args = args
        self.config_path = os.path.abspath(self.args.config)
        self.config_data = self.load_config_file()
        self.is_testnet = args.is_testnet

        if self.is_testnet:
            logger.warning("Onionbalance configured on a testnet!")

        # Create stem controller and connect to the Tor network
        self.controller = stem_controller.StemController(address=args.ip, port=args.port, socket=args.socket)
        self.consensus = ob_consensus.Consensus()

        # Initialize our service
        self.services = self.initialize_services_from_config_data()

        # Catch interesting events (like receiving descriptors etc.)
        self.controller.add_event_listeners()

        logger.warning("Onionbalance initialized (stem version: %s) (tor version: %s)!",
                       stem.__version__, self.controller.controller.get_version())
        logger.warning("=" * 80)

    def initialize_services_from_config_data(self):
        services = []
        try:
            for service in self.config_data['services']:
                services.append(ob_service.OnionbalanceService(service, self.config_path))
        except ob_service.BadServiceInit:
            sys.exit(1)

        return services

    def load_config_file(self):
        config_data = util.read_config_data_from_file(self.config_path)
        logger.debug("Onionbalance config data: %s", config_data)

        # Do some basic validation
        if "services" not in config_data:
            raise ConfigError("Config file is bad. 'services' is missing. Did you make it with onionbalance-config?")

        # More validation
        for service in config_data["services"]:
            if "key" not in service:
                raise ConfigError("Config file is bad. 'key' is missing. Did you make it with onionbalance-config?")

            if "instances" not in service:
                raise ConfigError("Config file is bad. 'instances' is missing. Did you make it with "
                                  "onionbalance-config?")

            if not service["instances"]:
                raise ConfigError("Config file is bad. No backend instances are set. Onionbalance needs at least 1.")

            for instance in service["instances"]:
                if "address" not in instance:
                    raise ConfigError("Config file is wrong. 'address' missing from instance.")

                if not instance["address"]:
                    raise ConfigError("Config file is bad. Address field is not set.")

                # Validate that the onion address is legit
                try:
                    _ = HiddenServiceDescriptorV3.identity_key_from_address(instance["address"])
                except ValueError:
                    raise ConfigError("Cannot load instance with address: '{}'".format(instance["address"]))

        return config_data

    def fetch_instance_descriptors(self):
        logger.info("[*] fetch_instance_descriptors() called [*]")

        # TODO: Don't do this here. Instead do it on a specialized function
        self.controller.mark_tor_as_active()

        if not self.consensus.is_live():
            logger.warning("No live consensus. Waiting before fetching descriptors...")
            return

        all_instances = self._get_all_instances()

        onionbalance.common.instance.helper_fetch_all_instance_descriptors(self.controller.controller,
                                                                           all_instances)

    def handle_new_desc_content_event(self, desc_content_event):
        """
        Parse HS_DESC_CONTENT response events for descriptor content

        Update the HS instance object with the data from the new descriptor.
        """
        onion_address = desc_content_event.address
        logger.debug("Received descriptor for %s.onion from %s",
                     onion_address, desc_content_event.directory)

        #  Check that the HSDir returned a descriptor that is not empty
        descriptor_text = str(desc_content_event.descriptor).encode('utf-8')

        # HSDirs provide a HS_DESC_CONTENT response with either one or two
        # CRLF lines when they do not have a matching descriptor. Using
        # len() < 5 should ensure all empty HS_DESC_CONTENT events are matched.
        if len(descriptor_text) < 5:
            logger.debug("Empty descriptor received for %s.onion", onion_address)
            return None

        # OK this descriptor seems plausible: Find the instances that this
        # descriptor belongs to:
        for instance in self._get_all_instances():
            if instance.onion_address == onion_address:
                instance.register_descriptor(descriptor_text, onion_address)

    def publish_all_descriptors(self):
        """
        For each service attempt to publish all descriptors
        """
        logger.info("[*] publish_all_descriptors() called [*]")

        if not self.consensus.is_live():
            logger.info("No live consensus. Waiting before publishing descriptors...")
            return

        for service in self.services:
            service.publish_descriptors()

    def _get_all_instances(self):
        """
        Get all instances for all services
        """
        instances = []

        for service in self.services:
            instances.extend(service.instances)

        return instances

    def handle_new_status_event(self, status_event):
        """
        Parse Tor status events such as "STATUS_GENERAL"
        """
        # pylint: disable=no-member
        if status_event.action == "CONSENSUS_ARRIVED":
            logger.info("Received new consensus!")
            self.consensus.refresh()
            # Call all callbacks in case we just got a live consensus
            my_onionbalance.publish_all_descriptors()
            my_onionbalance.fetch_instance_descriptors()

    def _address_is_instance(self, onion_address):
        """
        Return True if 'onion_address' is one of our instances.
        """
        for service in self.services:
            for instance in service.instances:
                if instance.has_onion_address(onion_address):
                    return True
        return False

    def _address_is_frontend(self, onion_address):
        for service in self.services:
            if service.has_onion_address(onion_address):
                return True
        return False

    def handle_new_desc_event(self, desc_event):
        """
        Parse HS_DESC response events
        """
        action = desc_event.action

        if action == "RECEIVED":
            pass  # We already log in HS_DESC_CONTENT so no need to do it here too
        elif action == "UPLOADED":
            logger.debug("Successfully uploaded descriptor for %s to %s", desc_event.address, desc_event.directory)
        elif action == "FAILED":
            if self._address_is_instance(desc_event.address):
                logger.info("Descriptor fetch failed for instance %s from %s (%s)",
                            desc_event.address, desc_event.directory, desc_event.reason)
            elif self._address_is_frontend(desc_event.address):
                logger.warning("Descriptor upload failed for frontend %s to %s (%s)",
                               desc_event.address, desc_event.directory, desc_event.reason)
            else:
                logger.warning("Descriptor action failed for unknown service %s to %s (%s)",
                               desc_event.address, desc_event.directory, desc_event.reason)
        elif action == "REQUESTED":
            logger.debug("Requested descriptor for %s from %s...", desc_event.address, desc_event.directory)
        else:
            pass

    def reload_config(self):
        """
        Reload configuration and reset job scheduler
        """

        try:
            self.init_subsystems(self.args)
            manager.init_scheduler(self)
        except ConfigError as err:
            logger.error("%s", err)
            sys.exit(1)
handle_new_desc_content_event(desc_content_event)

Parse HS_DESC_CONTENT response events for descriptor content

Update the HS instance object with the data from the new descriptor.

Source code in repos/onionbalance/onionbalance/hs_v3/onionbalance.py
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
def handle_new_desc_content_event(self, desc_content_event):
    """
    Parse HS_DESC_CONTENT response events for descriptor content

    Update the HS instance object with the data from the new descriptor.
    """
    onion_address = desc_content_event.address
    logger.debug("Received descriptor for %s.onion from %s",
                 onion_address, desc_content_event.directory)

    #  Check that the HSDir returned a descriptor that is not empty
    descriptor_text = str(desc_content_event.descriptor).encode('utf-8')

    # HSDirs provide a HS_DESC_CONTENT response with either one or two
    # CRLF lines when they do not have a matching descriptor. Using
    # len() < 5 should ensure all empty HS_DESC_CONTENT events are matched.
    if len(descriptor_text) < 5:
        logger.debug("Empty descriptor received for %s.onion", onion_address)
        return None

    # OK this descriptor seems plausible: Find the instances that this
    # descriptor belongs to:
    for instance in self._get_all_instances():
        if instance.onion_address == onion_address:
            instance.register_descriptor(descriptor_text, onion_address)
handle_new_desc_event(desc_event)

Parse HS_DESC response events

Source code in repos/onionbalance/onionbalance/hs_v3/onionbalance.py
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
def handle_new_desc_event(self, desc_event):
    """
    Parse HS_DESC response events
    """
    action = desc_event.action

    if action == "RECEIVED":
        pass  # We already log in HS_DESC_CONTENT so no need to do it here too
    elif action == "UPLOADED":
        logger.debug("Successfully uploaded descriptor for %s to %s", desc_event.address, desc_event.directory)
    elif action == "FAILED":
        if self._address_is_instance(desc_event.address):
            logger.info("Descriptor fetch failed for instance %s from %s (%s)",
                        desc_event.address, desc_event.directory, desc_event.reason)
        elif self._address_is_frontend(desc_event.address):
            logger.warning("Descriptor upload failed for frontend %s to %s (%s)",
                           desc_event.address, desc_event.directory, desc_event.reason)
        else:
            logger.warning("Descriptor action failed for unknown service %s to %s (%s)",
                           desc_event.address, desc_event.directory, desc_event.reason)
    elif action == "REQUESTED":
        logger.debug("Requested descriptor for %s from %s...", desc_event.address, desc_event.directory)
    else:
        pass
handle_new_status_event(status_event)

Parse Tor status events such as "STATUS_GENERAL"

Source code in repos/onionbalance/onionbalance/hs_v3/onionbalance.py
173
174
175
176
177
178
179
180
181
182
183
def handle_new_status_event(self, status_event):
    """
    Parse Tor status events such as "STATUS_GENERAL"
    """
    # pylint: disable=no-member
    if status_event.action == "CONSENSUS_ARRIVED":
        logger.info("Received new consensus!")
        self.consensus.refresh()
        # Call all callbacks in case we just got a live consensus
        my_onionbalance.publish_all_descriptors()
        my_onionbalance.fetch_instance_descriptors()
init_subsystems(args)

Initialize subsystems (this is resource intensive)

Source code in repos/onionbalance/onionbalance/hs_v3/onionbalance.py
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def init_subsystems(self, args):
    """
    Initialize subsystems (this is resource intensive)
    """
    self.args = args
    self.config_path = os.path.abspath(self.args.config)
    self.config_data = self.load_config_file()
    self.is_testnet = args.is_testnet

    if self.is_testnet:
        logger.warning("Onionbalance configured on a testnet!")

    # Create stem controller and connect to the Tor network
    self.controller = stem_controller.StemController(address=args.ip, port=args.port, socket=args.socket)
    self.consensus = ob_consensus.Consensus()

    # Initialize our service
    self.services = self.initialize_services_from_config_data()

    # Catch interesting events (like receiving descriptors etc.)
    self.controller.add_event_listeners()

    logger.warning("Onionbalance initialized (stem version: %s) (tor version: %s)!",
                   stem.__version__, self.controller.controller.get_version())
    logger.warning("=" * 80)
publish_all_descriptors()

For each service attempt to publish all descriptors

Source code in repos/onionbalance/onionbalance/hs_v3/onionbalance.py
149
150
151
152
153
154
155
156
157
158
159
160
def publish_all_descriptors(self):
    """
    For each service attempt to publish all descriptors
    """
    logger.info("[*] publish_all_descriptors() called [*]")

    if not self.consensus.is_live():
        logger.info("No live consensus. Waiting before publishing descriptors...")
        return

    for service in self.services:
        service.publish_descriptors()
reload_config()

Reload configuration and reset job scheduler

Source code in repos/onionbalance/onionbalance/hs_v3/onionbalance.py
226
227
228
229
230
231
232
233
234
235
236
def reload_config(self):
    """
    Reload configuration and reset job scheduler
    """

    try:
        self.init_subsystems(self.args)
        manager.init_scheduler(self)
    except ConfigError as err:
        logger.error("%s", err)
        sys.exit(1)

service

OnionbalanceService

Bases: object

Service represents a front-facing hidden service which should be load-balanced.

Source code in repos/onionbalance/onionbalance/hs_v3/service.py
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
class OnionbalanceService(object):
    """
    Service represents a front-facing hidden service which should
    be load-balanced.
    """

    def __init__(self, service_config_data, config_path):
        """
        With 'config_data' straight out of the config file, create the service and its instances.
        'config_path' is the full path to the config file.

        Raise ValueError if the config file is not well formatted
        """
        # Is our private key in Tor's extended key format?
        self.is_priv_key_in_tor_format = False

        # Load private key and onion address from config
        # (the onion_address also includes the ".onion")
        self.identity_priv_key, self.onion_address = self._load_service_keys(service_config_data, config_path)

        # XXX This is an epic hack! If we are using keys in tor's extended
        # format, we basically override stem's function for signing with
        # blinded keys because it assumes that its keys are in standard
        # non-extended format. To avoid a double key extension we use our own
        # function...  This will prove to be a problem if we ever move to
        # multiple services per onionbalance, or if stem changes its code
        # behind our backs.
        if self.is_priv_key_in_tor_format:
            stem.descriptor.hidden_service._blinded_sign = tor_ed25519._blinded_sign_with_tor_key

        # Now load up the instances
        self.instances = self._load_instances(service_config_data)

        # First descriptor for this service (the one we uploaded last)
        self.first_descriptor = None
        # Second descriptor for this service (the one we uploaded last)
        self.second_descriptor = None

    def has_onion_address(self, onion_address):
        """
        Return True if this service has this onion address
        """
        # Strip the ".onion" part of the address if it exists since some
        # subsystems don't use it (e.g. Tor sometimes omits it from control
        # port responses)
        my_onion_address = self.onion_address.replace(".onion", "")
        their_onion_address = onion_address.replace(".onion", "")

        return my_onion_address == their_onion_address

    def _load_instances(self, service_config_data):
        instances = []

        for config_instance in service_config_data['instances']:
            new_instance = onionbalance.hs_v3.instance.InstanceV3(config_instance['address'])
            instances.append(new_instance)

        # Some basic validation
        for instance in instances:
            if self.has_onion_address(instance.onion_address):
                logger.error("Config file error. Did you configure your frontend (%s) as an instance?",
                             self.onion_address)
                raise BadServiceInit

        return instances

    def _load_service_keys(self, service_config_data, config_path):
        # First of all let's load up the private key
        key_fname = service_config_data['key']
        config_directory = os.path.dirname(config_path)
        if not os.path.isabs(key_fname):
            key_fname = os.path.join(config_directory, key_fname)

        try:
            with open(key_fname, 'rb') as handle:
                pem_key_bytes = handle.read()
        except EnvironmentError as e:
            logger.critical("Unable to read service private key file ('%s')", e)
            raise BadServiceInit

        # Get the service private key
        # First try with the OBv3 PEM format
        identity_priv_key = None
        try:
            identity_priv_key = serialization.load_pem_private_key(pem_key_bytes, password=None, backend=default_backend())
        except ValueError as e:
            logger.warning("Service private key not in OBv3 format ('%s'). Trying tor's format...", e)

        # If the key was not in OBv3 PEM format, try the Tor binary format
        if not identity_priv_key:
            try:
                identity_priv_key = tor_ed25519.load_tor_key_from_disk(pem_key_bytes)
                self.is_priv_key_in_tor_format = True
            except ValueError as e:
                logger.warning("Service private key not in Tor format either ('%s'). Aborting.", e)
                raise BadServiceInit

        # Get onion address
        identity_pub_key = identity_priv_key.public_key()
        identity_pub_key_bytes = identity_pub_key.public_bytes(encoding=serialization.Encoding.Raw,
                                                               format=serialization.PublicFormat.Raw)
        onion_address = HiddenServiceDescriptorV3.address_from_identity_key(identity_pub_key_bytes)

        logger.warning("Loaded onion %s from %s", onion_address, key_fname)

        return identity_priv_key, onion_address

    def _intro_set_modified(self, is_first_desc):
        """
        Check if the introduction point set has changed since last publish.
        """
        if is_first_desc:
            last_upload_ts = self.first_descriptor.last_upload_ts
        else:
            last_upload_ts = self.second_descriptor.last_upload_ts

        if not last_upload_ts:
            logger.info("\t Descriptor never published before. Do it now!")
            return True

        for instance in self.instances:
            if not instance.intro_set_modified_timestamp:
                logger.info("\t Still dont have a descriptor for this instance")
                continue

            if instance.intro_set_modified_timestamp > last_upload_ts:
                logger.info("\t Intro set modified")
                return True

        logger.info("\t Intro set not modified")
        return False

    def _get_descriptor_lifetime(self):
        from onionbalance.hs_v3.onionbalance import my_onionbalance
        if my_onionbalance.is_testnet:
            return params.FRONTEND_DESCRIPTOR_LIFETIME_TESTNET
        else:
            return params.FRONTEND_DESCRIPTOR_LIFETIME

    def _descriptor_has_expired(self, is_first_desc):
        """
        Check if the descriptor has expired (hasn't been uploaded recently).

        If 'is_first_desc' is set then check the first descriptor of the
        service, otherwise the second.
        """
        if is_first_desc:
            last_upload_ts = self.first_descriptor.last_upload_ts
        else:
            last_upload_ts = self.second_descriptor.last_upload_ts

        descriptor_age = (datetime.datetime.utcnow() - last_upload_ts)
        descriptor_age = int(descriptor_age.total_seconds())
        if (descriptor_age > self._get_descriptor_lifetime()):
            logger.info("\t Our %s descriptor has expired (%s seconds old). Uploading new one.",
                        "first" if is_first_desc else "second", descriptor_age)
            return True
        else:
            logger.info("\t Our %s descriptor is still fresh (%s seconds old).",
                        "first" if is_first_desc else "second", descriptor_age)
            return False

    def _hsdir_set_changed(self, is_first_desc):
        """
        Return True if the HSDir has changed between the last upload of this
        descriptor and the current state of things
        """
        from onionbalance.hs_v3.onionbalance import my_onionbalance

        # Derive blinding parameter
        _, time_period_number = hashring.get_srv_and_time_period(is_first_desc)
        blinded_param = my_onionbalance.consensus.get_blinding_param(self._get_identity_pubkey_bytes(),
                                                                     time_period_number)

        # Get blinded key
        # TODO: hoho! this is dirty we are poking into internal stem API. We
        #       should ask atagar to make it public for us! :)
        blinded_key = stem.descriptor.hidden_service._blinded_pubkey(self._get_identity_pubkey_bytes(), blinded_param)

        # Calculate current responsible HSDirs
        try:
            responsible_hsdirs = hashring.get_responsible_hsdirs(blinded_key, is_first_desc)
        except hashring.EmptyHashRing:
            return False

        if is_first_desc:
            previous_responsible_hsdirs = self.first_descriptor.responsible_hsdirs
        else:
            previous_responsible_hsdirs = self.second_descriptor.responsible_hsdirs

        if set(responsible_hsdirs) != set(previous_responsible_hsdirs):
            logger.info("\t HSDir set changed (%s vs %s)",
                        set(responsible_hsdirs), set(previous_responsible_hsdirs))
            return True
        else:
            logger.info("\t HSDir set remained the same")
            return False

    def _should_publish_descriptor_now(self, is_first_desc, force_publish=False):
        """
        Return True if we should publish a descriptor right now
        """
        # If descriptor not yet uploaded, do it now!
        if is_first_desc and not self.first_descriptor:
            return True
        if not is_first_desc and not self.second_descriptor:
            return True

        # OK this is not the first time we publish a descriptor. Check various
        # parameters to see if we should try to publish again:
        return any([self._intro_set_modified(is_first_desc),
                    self._descriptor_has_expired(is_first_desc),
                    self._hsdir_set_changed(is_first_desc),
                    force_publish])

    def get_all_intros_for_publish(self):
        """
        Return an IntroductionPointSetV3 with all the intros of all the instances
        of this service.
        """
        all_intros = []

        for instance in self.instances:
            try:
                instance_intros = instance.get_intros_for_publish()
            except onionbalance.hs_v3.instance.InstanceHasNoDescriptor:
                logger.info("Entirely missing a descriptor for instance %s. Continuing anyway if possible",
                            instance.onion_address)
                continue
            except onionbalance.hs_v3.instance.InstanceIsOffline:
                logger.info("Instance %s is offline. Ignoring its intro points...",
                            instance.onion_address)
                continue

            all_intros.append(instance_intros)

        return descriptor.IntroductionPointSetV3(all_intros)

    def publish_descriptors(self):
        self._publish_descriptor(is_first_desc=True)
        self._publish_descriptor(is_first_desc=False)

    def _get_intros_for_desc(self):
        """
        Get the intros that should be included in a descriptor for this service.
        """
        all_intros = self.get_all_intros_for_publish()

        # Get number of instances that contributed to final intro point list
        n_instances = len(all_intros.intro_points)
        n_intros_wanted = n_instances * params.N_INTROS_PER_INSTANCE

        final_intros = all_intros.choose(n_intros_wanted)

        if (len(final_intros) == 0):
            logger.info("Got no usable intro points from our instances. Delaying descriptor push...")
            raise NotEnoughIntros

        logger.info("We got %d intros from %d instances. We want %d intros ourselves (got: %d)",
                    len(all_intros.get_intro_points_flat()), n_instances,
                    n_intros_wanted, len(final_intros))

        return final_intros

    def _publish_descriptor(self, is_first_desc):
        """
        Attempt to publish descriptor if needed.

        If 'is_first_desc' is set then attempt to upload the first descriptor
        of the service, otherwise the second.
        """
        from onionbalance.hs_v3.onionbalance import my_onionbalance

        if not self._should_publish_descriptor_now(is_first_desc):
            logger.info("No reason to publish %s descriptor for %s",
                        "first" if is_first_desc else "second",
                        self.onion_address)
            return

        try:
            intro_points = self._get_intros_for_desc()
        except NotEnoughIntros:
            return

        # Derive blinding parameter
        _, time_period_number = hashring.get_srv_and_time_period(is_first_desc)
        blinding_param = my_onionbalance.consensus.get_blinding_param(self._get_identity_pubkey_bytes(),
                                                                      time_period_number)

        try:
            desc = descriptor.OBDescriptor(self.onion_address, self.identity_priv_key,
                                           blinding_param, intro_points, is_first_desc)
        except descriptor.BadDescriptor:
            return

        logger.info("Service %s created %s descriptor (%s intro points) (blinding param: %s) (size: %s bytes). About to publish:",
                    self.onion_address, "first" if is_first_desc else "second",
                    len(desc.intro_set), blinding_param.hex(), len(str(desc.v3_desc)))

        # When we do a v3 HSPOST on the control port, Tor decodes the
        # descriptor and extracts the blinded pubkey to be used when uploading
        # the descriptor. So let's do the same to compute the responsible
        # HSDirs:
        blinded_key = desc.get_blinded_key()

        # Calculate responsible HSDirs for our service
        try:
            responsible_hsdirs = hashring.get_responsible_hsdirs(blinded_key, is_first_desc)
        except hashring.EmptyHashRing:
            logger.warning("Can't publish desc with no hash ring. Delaying...")
            return

        desc.set_last_publish_attempt_ts(datetime.datetime.utcnow())

        logger.info("Uploading %s descriptor for %s to %s",
                    "first" if is_first_desc else "second",
                    self.onion_address, responsible_hsdirs)

        # Upload descriptor
        self._upload_descriptor(my_onionbalance.controller.controller,
                                desc, responsible_hsdirs)

        # It would be better to set last_upload_ts when an upload succeeds and
        # not when an upload is just attempted. Unfortunately the HS_DESC #
        # UPLOADED event does not provide information about the service and
        # so it can't be used to determine when descriptor upload succeeds
        desc.set_last_upload_ts(datetime.datetime.utcnow())
        desc.set_responsible_hsdirs(responsible_hsdirs)

        # Set the descriptor
        if is_first_desc:
            self.first_descriptor = desc
        else:
            self.second_descriptor = desc

    def _upload_descriptor(self, controller, ob_desc, hsdirs):
        """
        Convenience method to upload a descriptor
        Handle some error checking and logging inside the Service class
        """
        if hsdirs and not isinstance(hsdirs, list):
            hsdirs = [hsdirs]

        while True:
            try:
                onionbalance.common.descriptor.upload_descriptor(controller,
                                                                 ob_desc.v3_desc,
                                                                 hsdirs=hsdirs,
                                                                 v3_onion_address=ob_desc.onion_address)
                break
            except stem.SocketClosed:
                logger.error("Error uploading descriptor for service "
                             "%s.onion. Control port socket is closed.",
                             self.onion_address)
                onionbalance.common.util.reauthenticate(controller, logger)
            except stem.ControllerError:
                logger.exception("Error uploading descriptor for service "
                                 "%s.onion.", self.onion_address)
                break

    def _get_identity_pubkey_bytes(self):
        identity_pub_key = self.identity_priv_key.public_key()
        return identity_pub_key.public_bytes(encoding=serialization.Encoding.Raw,
                                             format=serialization.PublicFormat.Raw)
__init__(service_config_data, config_path)

With 'config_data' straight out of the config file, create the service and its instances. 'config_path' is the full path to the config file.

Raise ValueError if the config file is not well formatted

Source code in repos/onionbalance/onionbalance/hs_v3/service.py
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def __init__(self, service_config_data, config_path):
    """
    With 'config_data' straight out of the config file, create the service and its instances.
    'config_path' is the full path to the config file.

    Raise ValueError if the config file is not well formatted
    """
    # Is our private key in Tor's extended key format?
    self.is_priv_key_in_tor_format = False

    # Load private key and onion address from config
    # (the onion_address also includes the ".onion")
    self.identity_priv_key, self.onion_address = self._load_service_keys(service_config_data, config_path)

    # XXX This is an epic hack! If we are using keys in tor's extended
    # format, we basically override stem's function for signing with
    # blinded keys because it assumes that its keys are in standard
    # non-extended format. To avoid a double key extension we use our own
    # function...  This will prove to be a problem if we ever move to
    # multiple services per onionbalance, or if stem changes its code
    # behind our backs.
    if self.is_priv_key_in_tor_format:
        stem.descriptor.hidden_service._blinded_sign = tor_ed25519._blinded_sign_with_tor_key

    # Now load up the instances
    self.instances = self._load_instances(service_config_data)

    # First descriptor for this service (the one we uploaded last)
    self.first_descriptor = None
    # Second descriptor for this service (the one we uploaded last)
    self.second_descriptor = None
get_all_intros_for_publish()

Return an IntroductionPointSetV3 with all the intros of all the instances of this service.

Source code in repos/onionbalance/onionbalance/hs_v3/service.py
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
def get_all_intros_for_publish(self):
    """
    Return an IntroductionPointSetV3 with all the intros of all the instances
    of this service.
    """
    all_intros = []

    for instance in self.instances:
        try:
            instance_intros = instance.get_intros_for_publish()
        except onionbalance.hs_v3.instance.InstanceHasNoDescriptor:
            logger.info("Entirely missing a descriptor for instance %s. Continuing anyway if possible",
                        instance.onion_address)
            continue
        except onionbalance.hs_v3.instance.InstanceIsOffline:
            logger.info("Instance %s is offline. Ignoring its intro points...",
                        instance.onion_address)
            continue

        all_intros.append(instance_intros)

    return descriptor.IntroductionPointSetV3(all_intros)
has_onion_address(onion_address)

Return True if this service has this onion address

Source code in repos/onionbalance/onionbalance/hs_v3/service.py
62
63
64
65
66
67
68
69
70
71
72
def has_onion_address(self, onion_address):
    """
    Return True if this service has this onion address
    """
    # Strip the ".onion" part of the address if it exists since some
    # subsystems don't use it (e.g. Tor sometimes omits it from control
    # port responses)
    my_onion_address = self.onion_address.replace(".onion", "")
    their_onion_address = onion_address.replace(".onion", "")

    return my_onion_address == their_onion_address

status

Provide status over Unix socket Default path: /var/run/onionbalance/control

StatusSocket

Bases: BaseStatusSocket

Create a Unix domain socket which emits a summary of the Onionbalance status when a client connects.

Source code in repos/onionbalance/onionbalance/hs_v3/status.py
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
class StatusSocket(BaseStatusSocket):
    """
    Create a Unix domain socket which emits a summary of the Onionbalance
    status when a client connects.
    """

    def __init__(self, status_socket_location, balance: Onionbalance):
        """
        Create the Unix domain socket status server and start in a thread

        Example::
            socat - unix-connect:/var/run/onionbalance/control

            {"services": [{"instances": [{"introModified": "2020-06-16 19:35:17", "ipsNum": 3, "onionAddress": "vkmiy6biqcyphtx5exswxl5sjus2vn2b6pzir7lz5akudhwbqk5muead.onion"}], "onionAddress": "bvy46sg2b5dokczabwv2pabqlrps3lppweyrebhat6gjieo2avojdvad.onion.onion", "timestamp": "2020-06-16 19:36:01"}]}
        """
        super().__init__(status_socket_location)

        self.cleanup_socket_file()

        logger.debug("Creating status socket at %s", self.unix_socket_filename)
        try:
            self.server = ThreadingSocketServer(self.unix_socket_filename,
                                                create_status_socket_handler(balance))

            # Start running the socket server in a another thread
            server_thread = threading.Thread(target=self.server.serve_forever)
            server_thread.daemon = True  # Exit daemon when main thread stops
            server_thread.start()

        except (OSError, socket.error):
            logger.error("Could not start status socket at %s. Does the path "
                         "exist? Do you have permission?",
                         status_socket_location)
__init__(status_socket_location, balance)

Create the Unix domain socket status server and start in a thread

Example:: socat - unix-connect:/var/run/onionbalance/control

{"services": [{"instances": [{"introModified": "2020-06-16 19:35:17", "ipsNum": 3, "onionAddress": "vkmiy6biqcyphtx5exswxl5sjus2vn2b6pzir7lz5akudhwbqk5muead.onion"}], "onionAddress": "bvy46sg2b5dokczabwv2pabqlrps3lppweyrebhat6gjieo2avojdvad.onion.onion", "timestamp": "2020-06-16 19:36:01"}]}
Source code in repos/onionbalance/onionbalance/hs_v3/status.py
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
def __init__(self, status_socket_location, balance: Onionbalance):
    """
    Create the Unix domain socket status server and start in a thread

    Example::
        socat - unix-connect:/var/run/onionbalance/control

        {"services": [{"instances": [{"introModified": "2020-06-16 19:35:17", "ipsNum": 3, "onionAddress": "vkmiy6biqcyphtx5exswxl5sjus2vn2b6pzir7lz5akudhwbqk5muead.onion"}], "onionAddress": "bvy46sg2b5dokczabwv2pabqlrps3lppweyrebhat6gjieo2avojdvad.onion.onion", "timestamp": "2020-06-16 19:36:01"}]}
    """
    super().__init__(status_socket_location)

    self.cleanup_socket_file()

    logger.debug("Creating status socket at %s", self.unix_socket_filename)
    try:
        self.server = ThreadingSocketServer(self.unix_socket_filename,
                                            create_status_socket_handler(balance))

        # Start running the socket server in a another thread
        server_thread = threading.Thread(target=self.server.serve_forever)
        server_thread.daemon = True  # Exit daemon when main thread stops
        server_thread.start()

    except (OSError, socket.error):
        logger.error("Could not start status socket at %s. Does the path "
                     "exist? Do you have permission?",
                     status_socket_location)

StatusSocketHandlerImpl

Bases: BaseRequestHandler, StatusSocketHandlerMixin

Handler for new domain socket connections

Source code in repos/onionbalance/onionbalance/hs_v3/status.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
class StatusSocketHandlerImpl(BaseRequestHandler, StatusSocketHandlerMixin):
    """
    Handler for new domain socket connections
    """

    def __init__(self, balance: Onionbalance, *args, **kwargs):
        StatusSocketHandlerMixin.__init__(self, balance)
        BaseRequestHandler.__init__(self, *args, **kwargs)

    def handle(self):
        """
        Prepare and output the status summary when a connection is received
        """
        self.request.sendall(self._outputString().encode('utf-8'))
handle()

Prepare and output the status summary when a connection is received

Source code in repos/onionbalance/onionbalance/hs_v3/status.py
65
66
67
68
69
def handle(self):
    """
    Prepare and output the status summary when a connection is received
    """
    self.request.sendall(self._outputString().encode('utf-8'))

ThreadingSocketServer

Bases: ThreadingMixIn, UnixStreamServer

Unix socket server with threading

Source code in repos/onionbalance/onionbalance/hs_v3/status.py
82
83
84
85
86
class ThreadingSocketServer(ThreadingMixIn, UnixStreamServer):
    """
    Unix socket server with threading
    """
    pass

stem_controller

StemController

Bases: object

This class is our interface to the control port

Source code in repos/onionbalance/onionbalance/hs_v3/stem_controller.py
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
class StemController(object):
    """This class is our interface to the control port"""

    def __init__(self, address=None, port=None, socket=None):
        self.controller = onionbalance.common.util.connect_to_control_port(tor_socket=socket,
                                                                           tor_address=address,
                                                                           tor_port=port)
        assert (self.controller.is_authenticated())

    def mark_tor_as_active(self):
        """
        Send the ACTIVE signal to the control port so that Tor does not become dormant.
        """
        # pylint: disable=no-member
        try:
            self.controller.signal(Signal.ACTIVE)
        except stem.SocketClosed:
            logger.warning("Can't connect to the control port to send ACTIVE signal. Moving on...")

    def get_md_consensus(self):
        return self.controller.get_info("dir/status-vote/current/consensus-microdesc")

    def add_event_listeners(self):
        # pylint: disable=no-member

        self.controller.add_event_listener(handle_new_status_event_wrapper, EventType.STATUS_CLIENT)
        self.controller.add_event_listener(handle_new_desc_event_wrapper, EventType.HS_DESC)
        self.controller.add_event_listener(handle_new_desc_content_event_wrapper, EventType.HS_DESC_CONTENT)

    def shutdown(self):
        self.controller.close()
mark_tor_as_active()

Send the ACTIVE signal to the control port so that Tor does not become dormant.

Source code in repos/onionbalance/onionbalance/hs_v3/stem_controller.py
53
54
55
56
57
58
59
60
61
def mark_tor_as_active(self):
    """
    Send the ACTIVE signal to the control port so that Tor does not become dormant.
    """
    # pylint: disable=no-member
    try:
        self.controller.signal(Signal.ACTIVE)
    except stem.SocketClosed:
        logger.warning("Can't connect to the control port to send ACTIVE signal. Moving on...")

handle_new_desc_content_event_wrapper(desc_content_event)

A wrapper for this control port event (see above)

Source code in repos/onionbalance/onionbalance/hs_v3/stem_controller.py
35
36
37
38
39
40
41
def handle_new_desc_content_event_wrapper(desc_content_event):
    """  A wrapper for this control port event (see above) """
    from onionbalance.hs_v3.onionbalance import my_onionbalance
    try:
        my_onionbalance.handle_new_desc_content_event(desc_content_event)
    except BaseException:
        print(traceback.format_exc())

handle_new_desc_event_wrapper(desc_event)

A wrapper for this control port event (see above)

Source code in repos/onionbalance/onionbalance/hs_v3/stem_controller.py
26
27
28
29
30
31
32
def handle_new_desc_event_wrapper(desc_event):
    """  A wrapper for this control port event (see above) """
    from onionbalance.hs_v3.onionbalance import my_onionbalance
    try:
        my_onionbalance.handle_new_desc_event(desc_event)
    except BaseException:
        print(traceback.format_exc())

handle_new_status_event_wrapper(status_event)

A wrapper for this control port event. We need this so that we print tracebacks on the listener thread (also see https://stem.torproject.org/tutorials/tortoise_and_the_hare.html#advanced-listeners)

Source code in repos/onionbalance/onionbalance/hs_v3/stem_controller.py
13
14
15
16
17
18
19
20
21
22
23
def handle_new_status_event_wrapper(status_event):
    """
    A wrapper for this control port event. We need this so that we print
    tracebacks on the listener thread (also see
    https://stem.torproject.org/tutorials/tortoise_and_the_hare.html#advanced-listeners)
    """
    from onionbalance.hs_v3.onionbalance import my_onionbalance
    try:
        my_onionbalance.handle_new_status_event(status_event)
    except BaseException:
        print(traceback.format_exc())

tor_ed25519

TorEd25519PrivateKey

Bases: object

Represents the private part of a blinded ed25519 key of an onion service and should expose a public_key() method and a sign() method.

Source code in repos/onionbalance/onionbalance/hs_v3/tor_ed25519.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
class TorEd25519PrivateKey(object):
    """
    Represents the private part of a blinded ed25519 key of an onion service
    and should expose a public_key() method and a sign() method.
    """
    def __init__(self, expanded_sk):
        self.priv_key = expanded_sk
        self.pub_key_bytes = ed25519_exts_ref.publickeyFromESK(self.priv_key)
        self.pub_key = TorEd25519PublicKey(self.pub_key_bytes)

    def public_key(self):
        return self.pub_key

    def private_bytes(self, encoding=None, format=None, encryption_algorithm=None):
        return self.priv_key

    def sign(self, msg):
        return ed25519_exts_ref.signatureWithESK(msg, self.priv_key, self.pub_key_bytes)

    @property
    def __class__(self):
        """
        This is an epic hack to make this class look like a hazmat ed25519 public
        key in the eyes of stem:
          https://github.com/asn-d6/onionbalance/issues/10#issuecomment-610425916

        The __class_ attribute is what's being used by unittest.mock and the C
        API to trick isinstance() checks, so as long as stem uses isinstance()
        this is gonna work:
          https://docs.python.org/3/library/unittest.mock.html#unittest.mock.Mock.__class__
          https://docs.python.org/3/c-api/object.html#c.PyObject_IsInstance
        """
        return Ed25519PrivateKey
__class__ property

This is an epic hack to make this class look like a hazmat ed25519 public key in the eyes of stem: https://github.com/asn-d6/onionbalance/issues/10#issuecomment-610425916

The class_ attribute is what's being used by unittest.mock and the C API to trick isinstance() checks, so as long as stem uses isinstance() this is gonna work: https://docs.python.org/3/library/unittest.mock.html#unittest.mock.Mock.__class https://docs.python.org/3/c-api/object.html#c.PyObject_IsInstance

TorEd25519PublicKey

Bases: object

Represents the public blinded ed25519 key of an onion service and should expose a public_bytes() method and a verify() method.

Source code in repos/onionbalance/onionbalance/hs_v3/tor_ed25519.py
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
class TorEd25519PublicKey(object):
    """
    Represents the public blinded ed25519 key of an onion service and should
    expose a public_bytes() method and a verify() method.
    """
    def __init__(self, public_key):
        self.public_key = public_key

    def public_bytes(self, encoding=None, format=None):
        return self.public_key

    def verify(self, signature, message):
        """
        raises exception if sig not valid
        """
        slow_ed25519.checkvalid(signature, message, self.public_key)

    @property
    def __class__(self):
        return Ed25519PublicKey
verify(signature, message)

raises exception if sig not valid

Source code in repos/onionbalance/onionbalance/hs_v3/tor_ed25519.py
120
121
122
123
124
def verify(self, signature, message):
    """
    raises exception if sig not valid
    """
    slow_ed25519.checkvalid(signature, message, self.public_key)

load_tor_key_from_disk(key_bytes)

Load a private identity key from little-t-tor.

Source code in repos/onionbalance/onionbalance/hs_v3/tor_ed25519.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
def load_tor_key_from_disk(key_bytes):
    """
    Load a private identity key from little-t-tor.
    """
    # Verify header
    if (key_bytes[:29] != b'== ed25519v1-secret: type0 =='):
        raise ValueError("Tor key does not start with Tor header")

    expanded_sk = key_bytes[32:]

    # The rest should be 64 bytes (a,h):
    # 32 bytes for secret scalar 'a'
    # 32 bytes for PRF key 'h'
    if (len(expanded_sk) != 64):
        raise ValueError("Tor private key has the wrong length")

    return TorEd25519PrivateKey(expanded_sk)

tor_node

Node

Bases: object

Represents a Tor node.

A Node instance gets created for each node of a consensus. When we fetch a new consensus, we create new Node instances for the routers found inside.

The 'microdescriptor' and 'routerstatus' fields of this object are immutable: They are set once when we receive the consensus based on the state of the network at that point, and they stay like that until we get a new consensus.

Source code in repos/onionbalance/onionbalance/hs_v3/tor_node.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
class Node(object):
    """
    Represents a Tor node.

    A Node instance gets created for each node of a consensus. When we fetch a
    new consensus, we create new Node instances for the routers found inside.

    The 'microdescriptor' and 'routerstatus' fields of this object are
    immutable: They are set once when we receive the consensus based on the
    state of the network at that point, and they stay like that until we get a
    new consensus.
    """

    def __init__(self, microdescriptor, routerstatus):
        assert (microdescriptor and routerstatus)

        logger.debug("Initializing node with fpr %s", routerstatus.fingerprint)

        # The microdescriptor of this node
        self.microdescriptor = microdescriptor
        # The consensus routerstatus for this node
        self.routerstatus = routerstatus

    def get_hex_fingerprint(self):
        return self.routerstatus.fingerprint

    def get_hsdir_index(self, srv, period_num):
        """
        Get the HSDir index for this node:

           hsdir_index(node) = H("node-idx" | node_identity |
                                 shared_random_value |
                                 INT_8(period_num) |
                                 INT_8(period_length) )

        Raises NoHSDir or NoEd25519Identity in case of errors.
        """
        from onionbalance.hs_v3.onionbalance import my_onionbalance

        # See if this node can be an HSDir (it needs to be supported both in
        # protover and in flags)
        if 'HSDir' not in self.routerstatus.protocols or \
           2 not in self.routerstatus.protocols['HSDir'] or \
           'HSDir' not in self.routerstatus.flags:
            raise NoHSDir

        # See if ed25519 identity is supported for this node
        if 'ed25519' not in self.microdescriptor.identifiers:
            raise NoEd25519Identity

        # In stem the ed25519 identity is a base64 string and we need to add
        # the missing padding so that the python base64 module can successfuly
        # decode it.
        # TODO: Abstract this into its own function...
        ed25519_node_identity_b64 = self.microdescriptor.identifiers['ed25519']
        missing_padding = len(ed25519_node_identity_b64) % 4
        ed25519_node_identity_b64 += '=' * missing_padding
        ed25519_node_identity = base64.b64decode(ed25519_node_identity_b64)

        period_num_int_8 = period_num.to_bytes(8, 'big')
        period_length = my_onionbalance.consensus.get_time_period_length()
        period_length_int_8 = period_length.to_bytes(8, 'big')

        hash_body = b"%s%s%s%s%s" % (b"node-idx",
                                     ed25519_node_identity,
                                     srv,
                                     period_num_int_8, period_length_int_8)
        hsdir_index = hashlib.sha3_256(hash_body).digest()

        return hsdir_index
get_hsdir_index(srv, period_num)

Get the HSDir index for this node:

hsdir_index(node) = H("node-idx" | node_identity | shared_random_value | INT_8(period_num) | INT_8(period_length) )

Raises NoHSDir or NoEd25519Identity in case of errors.

Source code in repos/onionbalance/onionbalance/hs_v3/tor_node.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def get_hsdir_index(self, srv, period_num):
    """
    Get the HSDir index for this node:

       hsdir_index(node) = H("node-idx" | node_identity |
                             shared_random_value |
                             INT_8(period_num) |
                             INT_8(period_length) )

    Raises NoHSDir or NoEd25519Identity in case of errors.
    """
    from onionbalance.hs_v3.onionbalance import my_onionbalance

    # See if this node can be an HSDir (it needs to be supported both in
    # protover and in flags)
    if 'HSDir' not in self.routerstatus.protocols or \
       2 not in self.routerstatus.protocols['HSDir'] or \
       'HSDir' not in self.routerstatus.flags:
        raise NoHSDir

    # See if ed25519 identity is supported for this node
    if 'ed25519' not in self.microdescriptor.identifiers:
        raise NoEd25519Identity

    # In stem the ed25519 identity is a base64 string and we need to add
    # the missing padding so that the python base64 module can successfuly
    # decode it.
    # TODO: Abstract this into its own function...
    ed25519_node_identity_b64 = self.microdescriptor.identifiers['ed25519']
    missing_padding = len(ed25519_node_identity_b64) % 4
    ed25519_node_identity_b64 += '=' * missing_padding
    ed25519_node_identity = base64.b64decode(ed25519_node_identity_b64)

    period_num_int_8 = period_num.to_bytes(8, 'big')
    period_length = my_onionbalance.consensus.get_time_period_length()
    period_length_int_8 = period_length.to_bytes(8, 'big')

    hash_body = b"%s%s%s%s%s" % (b"node-idx",
                                 ed25519_node_identity,
                                 srv,
                                 period_num_int_8, period_length_int_8)
    hsdir_index = hashlib.sha3_256(hash_body).digest()

    return hsdir_index

manager

Global entry point