Skip to content
This repository has been archived by the owner on Aug 1, 2023. It is now read-only.

Python3 compatibility #5

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions blockstack_zones/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from parse_zone_file import parse_zone_file
from make_zone_file import make_zone_file
from exceptions import InvalidLineException
# flake8: noqa
from .parse_zone_file import parse_zone_file
from .make_zone_file import make_zone_file
from .exceptions import InvalidLineException
2 changes: 1 addition & 1 deletion blockstack_zones/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,4 @@
{spf}\n\
\n\
{uri}\n\
"""
"""
2 changes: 1 addition & 1 deletion blockstack_zones/exceptions.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
class InvalidLineException(Exception):
pass
pass
2 changes: 1 addition & 1 deletion blockstack_zones/make_zone_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def make_zone_file(json_zone_file_input, origin=None, ttl=None, template=None):
# careful...
json_zone_file = copy.deepcopy(json_zone_file_input)
if origin is not None:
json_zone_file['$origin'] = origin
json_zone_file['$origin'] = origin

if ttl is not None:
json_zone_file['$ttl'] = ttl
Expand Down
38 changes: 18 additions & 20 deletions blockstack_zones/parse_zone_file.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/python

"""
Known limitations:
Expand All @@ -10,13 +10,10 @@
'TXT', 'SRV', 'SPF', 'URI'
"""

import copy
import datetime
import time
import argparse
from collections import defaultdict

from .configs import SUPPORTED_RECORDS, DEFAULT_TEMPLATE
from .configs import SUPPORTED_RECORDS # flake8: noqa
from .exceptions import InvalidLineException


Expand Down Expand Up @@ -73,7 +70,8 @@ def make_parser():
make_rr_subparser(subparsers, "MX", [("preference", str), ("host", str)])
make_rr_subparser(subparsers, "TXT", [("txt", str)])
make_rr_subparser(subparsers, "PTR", [("host", str)])
make_rr_subparser(subparsers, "SRV", [("priority", int), ("weight", int), ("port", int), ("target", str)])
make_rr_subparser(subparsers, "SRV", [("priority", int), ("weight", int), ("port", int),
("target", str)])
make_rr_subparser(subparsers, "SPF", [("data", str)])
make_rr_subparser(subparsers, "URI", [("priority", int), ("weight", int), ("target", str)])

Expand Down Expand Up @@ -131,11 +129,11 @@ def tokenize_line(line):
continue
elif c == ';':
if not escape:
# comment
# comment
ret.append(tokbuf)
tokbuf = ""
break

# normal character
tokbuf += c
escape = False
Expand Down Expand Up @@ -173,7 +171,7 @@ def remove_comments(text):
lines = text.split("\n")
for line in lines:
if len(line) == 0:
continue
continue

line = serialize(tokenize_line(line))
ret.append(line)
Expand All @@ -185,18 +183,18 @@ def flatten(text):
"""
Flatten the text:
* make sure each record is on one line.
* remove parenthesis
* remove parenthesis
"""
lines = text.split("\n")

# tokens: sequence of non-whitespace separated by '' where a newline was
tokens = []
for l in lines:
if len(l) == 0:
continue
continue

l = l.replace("\t", " ")
tokens += filter(lambda x: len(x) > 0, l.split(" ")) + ['']
tokens += list(filter(lambda x: len(x) > 0, l.split(" "))) + ['']

# find (...) and turn it into a single line ("capture" it)
capturing = False
Expand All @@ -210,7 +208,7 @@ def flatten(text):
if len(captured) > 0:
flattened.append(" ".join(captured))
captured = []
continue
continue

if tok.startswith("("):
# begin grouping
Expand All @@ -220,7 +218,7 @@ def flatten(text):
if capturing and tok.endswith(")"):
# end grouping. next end-of-line will turn this sequence into a flat line
tok = tok.rstrip(")")
capturing = False
capturing = False

captured.append(tok)

Expand Down Expand Up @@ -257,7 +255,7 @@ def remove_class(text):

def add_default_name(text):
"""
Go through each line of the text and ensure that
Go through each line of the text and ensure that
a name is defined. Use '@' if there is none.
"""
global SUPPORTED_RECORDS
Expand All @@ -271,7 +269,7 @@ def add_default_name(text):

if tokens[0] in SUPPORTED_RECORDS and not tokens[0].startswith("$"):
# add back the name
tokens = ['@'] + tokens
tokens = ['@'] + tokens

ret.append(serialize(tokens))

Expand All @@ -280,7 +278,7 @@ def add_default_name(text):

def parse_line(parser, record_token, parsed_records):
"""
Given the parser, capitalized list of a line's tokens, and the current set of records
Given the parser, capitalized list of a line's tokens, and the current set of records
parsed so far, parse it into a dictionary.

Return the new set of parsed records.
Expand All @@ -303,7 +301,7 @@ def parse_line(parser, record_token, parsed_records):
rr, unmatched = parser.parse_known_args(record_token)
assert len(unmatched) == 0, "Unmatched fields: %s" % unmatched
except (SystemExit, AssertionError, InvalidLineException):
# invalid argument
# invalid argument
raise InvalidLineException(line)

record_dict = rr.__dict__
Expand All @@ -320,7 +318,7 @@ def parse_line(parser, record_token, parsed_records):
assert record_type is not None, "Unknown record type in %s" % rr

# clean fields
for field in record_dict.keys():
for field in list(record_dict.keys()):
if record_dict[field] is None:
del record_dict[field]

Expand All @@ -329,7 +327,7 @@ def parse_line(parser, record_token, parsed_records):
# special record-specific fix-ups
if record_type == 'PTR':
record_dict['fullname'] = record_dict['name'] + '.' + current_origin

if len(record_dict) > 0:
if record_type.startswith("$"):
# put the value directly
Expand Down
21 changes: 11 additions & 10 deletions blockstack_zones/record_processors.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import copy
from builtins import range


def process_origin(data, template):
Expand Down Expand Up @@ -30,7 +31,7 @@ def process_soa(data, template):
record = template[:]

if data is not None:

assert len(data) == 1, "Only support one SOA RR at this time"
data = data[0]

Expand All @@ -45,8 +46,8 @@ def process_soa(data, template):
soadat.append(data_name)

if data.get('ttl') is not None:
soadat.append( str(data['ttl']) )
soadat.append(str(data['ttl']))

soadat.append("IN")
soadat.append("SOA")

Expand All @@ -66,7 +67,7 @@ def process_soa(data, template):
record = record.replace("{soa}", soa_txt)

else:
# clear all SOA fields
# clear all SOA fields
record = record.replace("{soa}", "")

return record
Expand All @@ -78,10 +79,10 @@ def quote_field(data, field):
Return the new data records.
"""
if data is None:
return None
return None

data_dup = copy.deepcopy(data)
for i in xrange(0, len(data_dup)):
for i in range(0, len(data_dup)):
data_dup[i][field] = '"%s"' % data_dup[i][field]
data_dup[i][field] = data_dup[i][field].replace(";", "\;")

Expand All @@ -107,15 +108,15 @@ def process_rr(data, record_type, record_keys, field, template):
assert type(data) == list, "Data must be a list"

record = ""
for i in xrange(0, len(data)):
for i in range(0, len(data)):

for record_key in record_keys:
assert record_key in data[i].keys(), "Missing '%s'" % record_key

record_data = []
record_data.append( str(data[i].get('name', '@')) )
record_data.append(str(data[i].get('name', '@')))
if data[i].get('ttl') is not None:
record_data.append( str(data[i]['ttl']) )
record_data.append(str(data[i]['ttl']))

record_data.append(record_type)
record_data += [str(data[i][record_key]) for record_key in record_keys]
Expand Down Expand Up @@ -193,6 +194,6 @@ def process_uri(data, template):
"""
Replace {uri} in templtae with the serialized URI records
"""
# quote target
# quote target
data_dup = quote_field(data, "target")
return process_rr(data_dup, "URI", ["priority", "weight", "target"], "{uri}", template)
4 changes: 4 additions & 0 deletions circle.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
dependencies:
override:
- pip install tox tox-pyenv
- pyenv local 2.7.10 3.4.3 3.5.0
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
packages=find_packages(),
zip_safe=False,
install_requires=[
'future'
],
classifiers=[
'Intended Audience :: Developers',
Expand Down
60 changes: 31 additions & 29 deletions test_sample_data.py → tests/test_sample_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,33 +77,35 @@
"minimum": 86400
},
"ns": [
{ "host": "NS1.NAMESERVER.NET." },
{ "host": "NS2.NAMESERVER.NET." }
{"host": "NS1.NAMESERVER.NET."},
{"host": "NS2.NAMESERVER.NET."}
],
"a": [
{ "name": "@", "ip": "127.0.0.1" },
{ "name": "www", "ip": "127.0.0.1" },
{ "name": "mail", "ip": "127.0.0.1" }
{"name": "@", "ip": "127.0.0.1"},
{"name": "www", "ip": "127.0.0.1"},
{"name": "mail", "ip": "127.0.0.1"}
],
"aaaa": [
{ "ip": "::1" },
{ "name": "mail", "ip": "2001:db8::1" }
{"ip": "::1"},
{"name": "mail", "ip": "2001:db8::1"}
],
"cname": [
{ "name": "mail1", "alias": "mail" },
{ "name": "mail2", "alias": "mail" }
{"name": "mail1", "alias": "mail"},
{"name": "mail2", "alias": "mail"}
],
"mx": [
{ "preference": 0, "host": "mail1" },
{ "preference": 10, "host": "mail2" }
{"preference": 0, "host": "mail1"},
{"preference": 10, "host": "mail2"}
],
"txt": [
{ "name": "txt1", "txt": "hello" },
{ "name": "txt2", "txt": "world" }
{"name": "txt1", "txt": "hello"},
{"name": "txt2", "txt": "world"}
],
"srv": [
{ "name": "_xmpp-client._tcp", "target": "jabber", "priority": 10, "weight": 0, "port": 5222 },
{ "name": "_xmpp-server._tcp", "target": "jabber", "priority": 10, "weight": 0, "port": 5269 }
{"name": "_xmpp-client._tcp", "target": "jabber", "priority": 10,
"weight": 0, "port": 5222},
{"name": "_xmpp-server._tcp", "target": "jabber", "priority": 10,
"weight": 0, "port": 5269}
]
},
"sample_3": {
Expand All @@ -119,25 +121,25 @@
"minimum": 86400
},
"ns": [
{ "host": "NS1.NAMESERVER.NET." },
{ "host": "NS2.NAMESERVER.NET." }
{"host": "NS1.NAMESERVER.NET."},
{"host": "NS2.NAMESERVER.NET."}
],
"a": [
{ "name": "@", "ip": "127.0.0.1" },
{ "name": "www", "ip": "127.0.0.1" },
{ "name": "mail", "ip": "127.0.0.1" }
{"name": "@", "ip": "127.0.0.1"},
{"name": "www", "ip": "127.0.0.1"},
{"name": "mail", "ip": "127.0.0.1"}
],
"aaaa": [
{ "ip": "::1" },
{ "name": "mail", "ip": "2001:db8::1" }
{"ip": "::1"},
{"name": "mail", "ip": "2001:db8::1"}
],
"cname":[
{ "name": "mail1", "alias": "mail" },
{ "name": "mail2", "alias": "mail" }
"cname": [
{"name": "mail1", "alias": "mail"},
{"name": "mail2", "alias": "mail"}
],
"mx":[
{ "preference": 0, "host": "mail1" },
{ "preference": 10, "host": "mail2" }
"mx": [
{"preference": 0, "host": "mail1"},
{"preference": 10, "host": "mail2"}
]
}
}
}
Loading