Skip to content
Commits on Source (1)
  • guglielmo's avatar
    script to compute public interest and classify participation level · 68d6ad9f
    guglielmo authored
    meta script to launch all upgrade tasks from atoka implemented
    requirements do not use hashes, click dependency on flex was corrected upstream and the normal package is now installed, not a branch
    jq added to web container image
    Bump version: 1.1.6 → 1.1.7
    68d6ad9f
......@@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## [1.1.7]
### Add
- script to compute public interest and classify participation level
- meta script to launch all upgrade tasks from atoka implemented
## [1.1.6]
### Changed
......
......@@ -13,6 +13,7 @@ RUN apt-get -qy update \
python-dev \
libxml2-dev libxslt-dev \
tmux \
jq \
less \
vim \
&& rm -rf /var/lib/apt/lists/*
......
......@@ -17,7 +17,7 @@ fi
# Arguments passed to `pip-compile`
args=$@
# Append default arguments
args+=" --generate-hashes"
# args+=" --generate-hashes"
export CUSTOM_COMPILE_COMMAND="./$(basename $0) --upgrade"
......
......@@ -3,4 +3,4 @@
Openpolis Data Manager service package (backend)
"""
__version__ = '1.1.6'
__version__ = '1.1.7'
......@@ -6,7 +6,12 @@ from taskmanager.utils import LoggingBaseCommand
class Command(LoggingBaseCommand):
help = "Metacommand that executes all etl procedures"
help = "Metacommand that executes all etl procedures, for a given shares level"
verbosity = None
batchsize = None
datapath = None
shares_level = None
clear_cache = False
def add_arguments(self, parser):
parser.add_argument(
......@@ -28,73 +33,96 @@ class Command(LoggingBaseCommand):
default=0,
help="Level of the public share, starting from public institution = 0"
)
parser.add_argument(
"--clear-cache",
dest="clear_cache",
action='store_true',
help="Clear atoka.json and atoka_economics.json files and fetch them anew from atoka"
)
def handle(self, *args, **options):
self.setup_logger(__name__, formatter_key='simple', **options)
batchsize = options['batchsize']
datapath = options['datapath']
shares_level = options['shares_level']
self.batchsize = options['batchsize']
self.datapath = options['datapath']
self.shares_level = options['shares_level']
self.clear_cache = options['clear_cache']
self.logger.info("Start overall procedure")
if shares_level < 0 or shares_level > 2:
if self.shares_level < 0 or self.shares_level > 2:
raise Exception("--shares-level must be between 0 and 2")
verbosity = options.get("verbosity", 1)
self.verbosity = options.get("verbosity", 1)
if self.clear_cache:
if os.path.exists(os.path.join(self.datapath, 'atoka.json')):
os.unlink(os.path.join(self.datapath, 'atoka.json'))
if os.path.exists(os.path.join(self.datapath, 'atoka_economics.json')):
os.unlink(os.path.join(self.datapath, 'atoka_economics.json'))
if not os.path.exists(os.path.join(self.datapath, 'atoka.json')):
self.extract_json()
self.transform_and_load_organizations()
self.transform_and_load_ownerships()
self.transform_and_load_persons_and_memberships()
self.logger.info("End overall procedure")
def extract_json(self):
management.call_command(
'import_atoka_extract_json',
verbosity=verbosity,
json_file=os.path.join(datapath, 'atoka.json'),
batchsize=batchsize,
shares_level=shares_level
verbosity=self.verbosity,
json_file=os.path.join(self.datapath, 'atoka.json'),
batchsize=self.batchsize,
shares_level=self.shares_level,
tax_ids=['02438750586', '00484960588', '80003170661']
)
# transform and load organizations
def transform_and_load_organizations(self):
management.call_command(
'import_atoka_transform_json',
verbosity=verbosity,
json_source_file=os.path.join(datapath, 'atoka.json'),
json_output_path=datapath,
verbosity=self.verbosity,
json_source_file=os.path.join(self.datapath, 'atoka.json'),
json_output_path=self.datapath,
context='organizations'
)
management.call_command(
'import_orgs_from_json', os.path.join(datapath, 'atoka_organizations.json'),
verbosity=verbosity,
'import_orgs_from_json', os.path.join(self.datapath, 'atoka_organizations.json'),
verbosity=self.verbosity,
lookup_strategy='mixed_current',
log_step=100
)
# transform and load ownerships
def transform_and_load_ownerships(self):
management.call_command(
'import_atoka_transform_json',
verbosity=verbosity,
json_source_file=os.path.join(datapath, 'atoka.json'),
json_output_path=datapath,
verbosity=self.verbosity,
json_source_file=os.path.join(self.datapath, 'atoka.json'),
json_output_path=self.datapath,
context='ownerships'
)
management.call_command(
'import_ownerships_from_json', os.path.join(datapath, 'atoka_ownerships.json'),
verbosity=verbosity,
'import_ownerships_from_json', os.path.join(self.datapath, 'atoka_ownerships.json'),
'http://api.atoka.io',
verbosity=self.verbosity,
lookup_strategy='identifier_current',
log_step=100
)
# transform and load persons and memberships
def transform_and_load_persons_and_memberships(self):
management.call_command(
'import_atoka_transform_json',
verbosity=verbosity,
json_source_file=os.path.join(datapath, 'atoka.json'),
json_output_path=datapath,
verbosity=self.verbosity,
json_source_file=os.path.join(self.datapath, 'atoka.json'),
json_output_path=self.datapath,
context='persons_memberships'
)
management.call_command(
'import_persons_memberships_from_json', os.path.join(datapath, 'atoka_persons_memberships.json'),
verbosity=verbosity,
'import_persons_memberships_from_json', os.path.join(self.datapath, 'atoka_persons_memberships.json'),
verbosity=self.verbosity,
check_membership_label=True,
log_step=100, context='atoka',
use_dummy_transformation=True
)
self.logger.info("End overall procedure")
# -*- coding: utf-8 -*-
from django.core import management
from django.db.models import Sum
from models import Classification, Organization
from taskmanager.utils import LoggingBaseCommand
from project.atoka.models import OrganizationEconomics
class Command(LoggingBaseCommand):
help = "Metacommand that executes all etl procedures to import data from atoka"
def add_arguments(self, parser):
parser.add_argument(
"--batch-size",
dest="batchsize", type=int,
default=50,
help="Size of the batch of organizations processed at once",
)
parser.add_argument(
"--data-path",
dest="datapath",
default="./resources/data/atoka",
help="Complete path to json files"
)
parser.add_argument(
"--min-shares-level",
dest="min_shares_level",
type=int,
default=1,
help="Minimum level of the public share"
)
parser.add_argument(
"--max-shares-level",
dest="max_shares_level",
type=int,
default=2,
help="Maximum level of the public share"
)
@staticmethod
def compute_and_update_public_interest(c_level, threshold=25.):
"""Compute and update the total percentage of public partecipation
for all organizations having given level"""
pi = Classification.objects.get(scheme='INTERESSE_PUBBLICO_OP', code='1')
orgs = Organization.objects.filter(
classifications__classification=c_level
).annotate(sum=Sum('ownerships_as_owned__percentage'))
for org in orgs:
if hasattr(org, 'economics'):
OrganizationEconomics.objects.update_or_create(
organization=org,
defaults={
'public_shares_percentage': org.sum
}
)
org.economics.pub_part_percentage = org.sum
org.save()
if org.sum >= threshold:
org.add_classification_rel(pi)
def handle(self, *args, **options):
self.setup_logger(__name__, formatter_key='simple', **options)
batchsize = options['batchsize']
datapath = options['datapath']
max_shares_level = options['max_shares_level']
min_shares_level = options['min_shares_level']
self.logger.info("Start overall procedure")
if min_shares_level < 0 or min_shares_level > 2:
raise Exception("--min-shares-level must be between 1 and 2")
if max_shares_level < 1 or max_shares_level > 3:
raise Exception("--max-shares-level must be between 1 and 3")
if min_shares_level > max_shares_level:
raise Exception("--max-shares-level must be greater or equal to --min-shares-level")
verbosity = options.get("verbosity", 1)
for shares_level in range(min_shares_level, max_shares_level+1):
c = Classification.objects.get(scheme='LIVELLO_PARTECIPAZIONE_OP', code=str(shares_level))
next_c = Classification.objects.get(scheme='LIVELLO_PARTECIPAZIONE_OP', code=str(shares_level+1))
management.call_command(
'import_atoka',
verbosity=verbosity,
datapath=datapath,
batchsize=batchsize,
shares_level=shares_level
)
# fetch partecipate di livello uno
orgs = Organization.objects.filter(
ownerships_as_owned__isnull=False,
).distinct().filter(
ownerships__owner_organization__classifications__classification=c
)
n_orgs = orgs.count()
for n, org in enumerate(orgs, start=1):
org.add_classification_rel(next_c)
if n % 1000 == 0:
self.logger.info(" processed {0}/{1}".format(n, n_orgs))
# calcolo percentuali e definizione di pubblico interesse per l1vello successivo
self.compute_and_update_public_interest(next_c)
management.call_command(
'import_atoka_economics',
verbosity=verbosity,
datapath=datapath,
batchsize=batchsize
)
self.logger.info("End overall procedure")
# -*- coding: utf-8 -*-
from django.core import management
from taskmanager.utils import LoggingBaseCommand
class Command(LoggingBaseCommand):
"""This management tasks is a metacommand that executes a complex series of procedures in one single step.
It should be used when upgrading, and called by:
.. code::python
python manage.py import_atoka_meta --batch-size 100 --data-path ./data/atoka --threshold 0.33
It actually substitutes the following sequence of invocations:
.. code::python
#
# Setup: tag institutions (level 0)
#
# compute public interest and classify LIVELLO_PARTECIPAZIONE_OP
python manage.py script_compute_public_interest \
--shares-level=0 --threshold=0.33 -v2
#
# First step: institutions (level 0) -> directly owned (level 1)
#
# extract ownerships and roles information
python manage.py import_atoka_extract_json \
--json-file ./data/atoka/atoka.json \
--batch-size=100 --shares-level=0 -v2
# transform and load organizations
python manage.py import_atoka_transform_json\
--json-source-file ./data/atoka/atoka.json\
--json-output-path ./data/atoka\
--contexts organizations -v2
python manage.py import_orgs_from_json \
./data/atoka/atoka_organizations.json \
--lookup-strategy=mixed_current --log-step=100 -v2
# transform and load ownerships
python manage.py import_atoka_transform_json \
--json-source-file ./data/atoka/atoka.json\
--json-output-path ./data/atoka \
--contexts ownerships -v2
python manage.py import_ownerships_from_json \
./data/atoka/atoka_ownerships.json http://api.atoka.io \
--lookup-strategy=identifier_current --log-step=100 -v2
# transform and load persons and memberships
python manage.py import_atoka_transform_json \
--json-source-file ./data/atoka/atoka.json\
--json-output-path ./data/atoka \
--contexts persons_memberships -v2
python manage.py import_persons_memberships_from_json \
./data/atoka/atoka_persons_memberships.json \
--check-membership-label --log-step=100 --context=atoka \
--use-dummy-transformation -v2
# compute public interest and classify LIVELLO_PARTECIPAZIONE_OP
python manage.py script_compute_public_interest \
--shares-level=1 --threshold=0.33 -v2
#
# Second step: directly owned (level 1) -> indirectly owned (level 2)
#
# extract ownerships and roles information
python manage.py import_atoka_extract_json \
--json-file ./data/atoka/atoka.json \
--batch-size=100 --shares-level=1 -v2
# transform and load organizations
python manage.py import_atoka_transform_json\
--json-source-file ./data/atoka/atoka.json\
--json-output-path ./data/atoka\
--contexts organizations -v2
python manage.py import_orgs_from_json \
./data/atoka/atoka_organizations.json \
--lookup-strategy=mixed_current --log-step=100 -v2
# transform and load ownerships
python manage.py import_atoka_transform_json \
--json-source-file ./data/atoka/atoka.json\
--json-output-path ./data/atoka \
--contexts ownerships -v2
python manage.py import_ownerships_from_json \
./data/atoka/atoka_ownerships.json http://api.atoka.io \
--lookup-strategy=identifier_current --log-step=100 -v2
# transform and load persons and memberships
python manage.py import_atoka_transform_json \
--json-source-file ./data/atoka/atoka.json\
--json-output-path ./data/atoka \
--contexts persons_memberships -v2
python manage.py import_persons_memberships_from_json \
./data/atoka/atoka_persons_memberships.json \
--check-membership-label --log-step=100 --context=atoka \
--use-dummy-transformation -v2
# compute public interest and classify LIVELLO_PARTECIPAZIONE_OP
python manage.py script_compute_public_interest \
--shares-level=2 --threshold=0.33 -v2
"""
help = "Metacommand that executes etl procedures for all shares levels (0, 1, 2)"
verbosity = None
batchsize = None
datapath = None
threshold = None
def add_arguments(self, parser):
parser.add_argument(
"--batch-size",
dest="batchsize", type=int,
default=50,
help="Size of the batch of organizations processed at once",
)
parser.add_argument(
"--data-path",
dest="datapath",
default="./resources/data/atoka",
help="Complete path to json files"
)
parser.add_argument(
"--threshold",
dest="threshold", type=float,
default=.25,
help="Threshold for public control",
)
def handle(self, *args, **options):
self.setup_logger(__name__, formatter_key='simple', **options)
self.batchsize = options['batchsize']
self.datapath = options['datapath']
self.threshold = options['threshold']
self.logger.info("Start overall procedure")
self.verbosity = options.get("verbosity", 1)
management.call_command(
'script_compute_public_interest',
verbosity=self.verbosity,
threshold=self.threshold,
shares_level=0
)
for shares_level in range(0, 3):
management.call_command(
'import_atoka',
verbosity=self.verbosity,
datapath=self.datapath,
batchsize=self.batchsize,
shares_level=shares_level,
)
management.call_command(
'script_compute_public_interest',
verbosity=self.verbosity,
threshold=self.threshold,
shares_level=shares_level + 1
)
management.call_command(
'import_atoka_economics',
verbosity=self.verbosity,
datapath=self.datapath,
batchsize=self.batchsize
)
# -*- coding: utf-8 -*-
from django.db.models import Sum
from models import Classification, Organization
from taskmanager.utils import LoggingBaseCommand
from project.atoka.models import OrganizationEconomics
class Command(LoggingBaseCommand):
help = "Compute and update the percentage of public partecipation for given organizations"
def add_arguments(self, parser):
parser.add_argument(
"--threshold",
dest="threshold", type=float,
default=.25,
help="Threshold for public control",
)
parser.add_argument(
"--shares-level",
dest="shares_level",
type=int,
default=0,
help="Level of the public share, starting from public institution = 0"
)
def handle(self, *args, **options):
self.setup_logger(__name__, formatter_key='simple', **options)
threshold = options['threshold']
shares_level = options['shares_level']
self.logger.info("Start of procedure")
if shares_level < 0 or shares_level > 2:
raise Exception("--shares-level must be between 1 and 2")
cs = [
Classification.objects.get(scheme='LIVELLO_PARTECIPAZIONE_OP', code='0'),
Classification.objects.get(scheme='LIVELLO_PARTECIPAZIONE_OP', code='1'),
Classification.objects.get(scheme='LIVELLO_PARTECIPAZIONE_OP', code='2')
]
pi = Classification.objects.get(scheme='INTERESSE_PUBBLICO_OP', code='1')
no_pi = Classification.objects.get(scheme='INTERESSE_PUBBLICO_OP', code='0')
if shares_level > 0:
previous_cs = cs[shares_level-1]
c = cs[shares_level]
# tag org di livello shares_level,
# select di tutte quelle che risultano partecipate, ma non nei livelli
orgs = Organization.objects.filter(
ownerships_as_owned__isnull=False
).distinct().filter(
ownerships_as_owned__owner_organization__classifications__classification=previous_cs
)
n_orgs = orgs.count()
for n, org in enumerate(orgs, start=1):
org.add_classification_rel(c)
if n % 1000 == 0:
print(" processed {0}/{1}".format(n, n_orgs))
# calcolo interesse pubblico per le org di livello shares_level
n_pi = n_nopi = 0
n_orgs = orgs.count()
for n, org in enumerate(orgs.annotate(sum=Sum('ownerships_as_owned__percentage')), start=1):
e, _ = OrganizationEconomics.objects.update_or_create(
organization=org,
defaults={
'pub_part_percentage': org.sum
}
)
if org.sum >= threshold:
org.add_classification_rel(pi)
n_pi += 1
else:
org.add_classification_rel(no_pi)
n_nopi += 1
if n % 100 == 0:
self.logger.info(
"{0}/{1} processed: {2}pi vs {3}nopi".format(n, n_orgs, n_pi, n_nopi)
)
else:
# for Level 0 organizations (institutions)
# start filtering current organizations with a tax_id,
# excluding those classified with a classification FORMA_GIURIDICA_OP
# that refers to a **private** organization
orgs = Organization.objects.filter(
classifications__classification__scheme='FORMA_GIURIDICA_OP'
).current().exclude(
classifications__classification_id__in=[
11, 20, 24, 29, 48, 69, 83, 295, 321, 346, 403, 621, 941, 730, 1182, 1183, 1184, 1185, 1186, 1187,
1188, 1190, 1189, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202
]
).filter(identifier__isnull=False)
n_orgs = orgs.count()
for n, org in enumerate(orgs, start=1):
org.add_classification_rel(cs[0])
if n % 1000 == 0:
print(" processed {0}/{1}".format(n, n_orgs))
self.logger.info("End of procedure")
......@@ -18,10 +18,7 @@ django-filter # QuerySet filtering from URL parameters
drf-yasg # Swagger/OpenAPI generator
drf-rw-serializers # Read and write serializers for DRF
# flex # Swagger/OpenAPI validator
# Use blueyed fork temporarily, due to click version limitation
# see https://github.com/pipermerriam/flex/pull/207)
-e git+https://github.com/blueyed/flex.git@click#egg=flex
flex # Swagger/OpenAPI validator
# Database drivers
psycopg2-binary # PostgreSQL adapter
......
This diff is collapsed.
......@@ -4,18 +4,7 @@
#
# ./compile-requirements.sh --upgrade
#
flake8==3.5.0 \
--hash=sha256:7253265f7abd8b313e3892944044a365e3f4ac3fcdcfb4298f55ee9ddf188ba0 \
--hash=sha256:c7841163e2b576d435799169b78703ad6ac1bbb0f199994fc05f700b2a90ea37
mccabe==0.6.1 \
--hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
--hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f \
# via flake8
pycodestyle==2.3.1 \
--hash=sha256:682256a5b318149ca0d2a9185d365d8864a768a28db66a84a2ea946bcc426766 \
--hash=sha256:6c4245ade1edfad79c3446fadfc96b0de2759662dc29d07d80a6f27ad1ca6ba9 \
# via flake8
pyflakes==1.6.0 \
--hash=sha256:08bd6a50edf8cffa9fa09a463063c425ecaaf10d1eb0335a7e8b1401aef89e6f \
--hash=sha256:8d616a382f243dbf19b54743f280b80198be0bca3a5396f1d2e1fca6223e8805 \
# via flake8
flake8==3.5.0
mccabe==0.6.1 # via flake8
pycodestyle==2.3.1 # via flake8
pyflakes==1.6.0 # via flake8
......@@ -4,105 +4,20 @@
#
# ./compile-requirements.sh --upgrade
#
certifi==2018.11.29 \
--hash=sha256:47f9c83ef4c0c621eaef743f133f09fa8a74a9b75f037e8624f83bd1b6626cb7 \
--hash=sha256:993f830721089fef441cdfeb4b2c8c9df86f0c63239f06bd025a76a7daddb033 \
# via requests
chardet==3.0.4 \
--hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \
--hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \
# via requests
click==7.0 \
--hash=sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13 \
--hash=sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7 \
# via safety
coverage==4.5.2 \
--hash=sha256:09e47c529ff77bf042ecfe858fb55c3e3eb97aac2c87f0349ab5a7efd6b3939f \
--hash=sha256:0a1f9b0eb3aa15c990c328535655847b3420231af299386cfe5efc98f9c250fe \
--hash=sha256:0cc941b37b8c2ececfed341444a456912e740ecf515d560de58b9a76562d966d \
--hash=sha256:10e8af18d1315de936d67775d3a814cc81d0747a1a0312d84e27ae5610e313b0 \
--hash=sha256:1b4276550b86caa60606bd3572b52769860a81a70754a54acc8ba789ce74d607 \
--hash=sha256:1e8a2627c48266c7b813975335cfdea58c706fe36f607c97d9392e61502dc79d \
--hash=sha256:2b224052bfd801beb7478b03e8a66f3f25ea56ea488922e98903914ac9ac930b \
--hash=sha256:447c450a093766744ab53bf1e7063ec82866f27bcb4f4c907da25ad293bba7e3 \
--hash=sha256:46101fc20c6f6568561cdd15a54018bb42980954b79aa46da8ae6f008066a30e \
--hash=sha256:4710dc676bb4b779c4361b54eb308bc84d64a2fa3d78e5f7228921eccce5d815 \
--hash=sha256:510986f9a280cd05189b42eee2b69fecdf5bf9651d4cd315ea21d24a964a3c36 \
--hash=sha256:5535dda5739257effef56e49a1c51c71f1d37a6e5607bb25a5eee507c59580d1 \
--hash=sha256:5a7524042014642b39b1fcae85fb37556c200e64ec90824ae9ecf7b667ccfc14 \
--hash=sha256:5f55028169ef85e1fa8e4b8b1b91c0b3b0fa3297c4fb22990d46ff01d22c2d6c \
--hash=sha256:6694d5573e7790a0e8d3d177d7a416ca5f5c150742ee703f3c18df76260de794 \
--hash=sha256:6831e1ac20ac52634da606b658b0b2712d26984999c9d93f0c6e59fe62ca741b \
--hash=sha256:77f0d9fa5e10d03aa4528436e33423bfa3718b86c646615f04616294c935f840 \
--hash=sha256:828ad813c7cdc2e71dcf141912c685bfe4b548c0e6d9540db6418b807c345ddd \
--hash=sha256:85a06c61598b14b015d4df233d249cd5abfa61084ef5b9f64a48e997fd829a82 \
--hash=sha256:8cb4febad0f0b26c6f62e1628f2053954ad2c555d67660f28dfb1b0496711952 \
--hash=sha256:a5c58664b23b248b16b96253880b2868fb34358911400a7ba39d7f6399935389 \
--hash=sha256:aaa0f296e503cda4bc07566f592cd7a28779d433f3a23c48082af425d6d5a78f \
--hash=sha256:ab235d9fe64833f12d1334d29b558aacedfbca2356dfb9691f2d0d38a8a7bfb4 \
--hash=sha256:b3b0c8f660fae65eac74fbf003f3103769b90012ae7a460863010539bb7a80da \
--hash=sha256:bab8e6d510d2ea0f1d14f12642e3f35cefa47a9b2e4c7cea1852b52bc9c49647 \
--hash=sha256:c45297bbdbc8bb79b02cf41417d63352b70bcb76f1bbb1ee7d47b3e89e42f95d \
--hash=sha256:d19bca47c8a01b92640c614a9147b081a1974f69168ecd494687c827109e8f42 \
--hash=sha256:d64b4340a0c488a9e79b66ec9f9d77d02b99b772c8b8afd46c1294c1d39ca478 \
--hash=sha256:da969da069a82bbb5300b59161d8d7c8d423bc4ccd3b410a9b4d8932aeefc14b \
--hash=sha256:ed02c7539705696ecb7dc9d476d861f3904a8d2b7e894bd418994920935d36bb \
--hash=sha256:ee5b8abc35b549012e03a7b1e86c09491457dba6c94112a2482b18589cc2bdb9
dparse==0.4.1 \
--hash=sha256:00a5fdfa900629e5159bf3600d44905b333f4059a3366f28e0dbd13eeab17b19 \
--hash=sha256:cef95156fa0adedaf042cd42f9990974bec76f25dfeca4dc01f381a243d5aa5b \
# via safety
factory-boy==2.11.1 \
--hash=sha256:6f25cc4761ac109efd503f096e2ad99421b1159f01a29dbb917359dcd68e08ca \
--hash=sha256:d552cb872b310ae78bd7429bf318e42e1e903b1a109e899a523293dfa762ea4f
faker==1.0.1 \
--hash=sha256:228419b0a788a7ac867ebfafdd438461559ab1a0975edb607300852d9acaa78d \
--hash=sha256:52a3dcc6a565b15fe1c95090321756d5a8a7c1caf5ab3df2f573ed70936ff518
idna==2.8 \
--hash=sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407 \
--hash=sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c \
# via requests
packaging==18.0 \
--hash=sha256:0886227f54515e592aaa2e5a553332c73962917f2831f1b0f9b9f4380a4b9807 \
--hash=sha256:f95a1e147590f204328170981833854229bb2912ac3d5f89e2a8ccd2834800c9 \
# via dparse, safety
pyparsing==2.3.1 \
--hash=sha256:66c9268862641abcac4a96ba74506e594c884e3f57690a696d21ad8210ed667a \
--hash=sha256:f6c5ef0d7480ad048c054c37632c67fca55299990fff127850181659eea33fc3 \
# via packaging
python-dateutil==2.7.5 \
--hash=sha256:063df5763652e21de43de7d9e00ccf239f953a832941e37be541614732cdfc93 \
--hash=sha256:88f9287c0174266bb0d8cedd395cfba9c58e87e5ad86b2ce58859bc11be3cf02 \
# via faker
pyyaml==3.13 \
--hash=sha256:3d7da3009c0f3e783b2c873687652d83b1bbfd5c88e9813fb7e5b03c0dd3108b \
--hash=sha256:3ef3092145e9b70e3ddd2c7ad59bdd0252a94dfe3949721633e41344de00a6bf \
--hash=sha256:40c71b8e076d0550b2e6380bada1f1cd1017b882f7e16f09a65be98e017f211a \
--hash=sha256:558dd60b890ba8fd982e05941927a3911dc409a63dcb8b634feaa0cda69330d3 \
--hash=sha256:a7c28b45d9f99102fa092bb213aa12e0aaf9a6a1f5e395d36166639c1f96c3a1 \
--hash=sha256:aa7dd4a6a427aed7df6fb7f08a580d68d9b118d90310374716ae90b710280af1 \
--hash=sha256:bc558586e6045763782014934bfaf39d48b8ae85a2713117d16c39864085c613 \
--hash=sha256:d46d7982b62e0729ad0175a9bc7e10a566fc07b224d2c79fafb5e032727eaa04 \
--hash=sha256:d5eef459e30b09f5a098b9cea68bebfeb268697f78d647bd255a085371ac7f3f \
--hash=sha256:e01d3203230e1786cd91ccfdc8f8454c8069c91bee3962ad93b87a4b2860f537 \
--hash=sha256:e170a9e6fcfd19021dd29845af83bb79236068bf5fd4df3327c1be18182b2531 \
# via dparse
requests==2.21.0 \
--hash=sha256:502a824f31acdacb3a35b6690b5fbf0bc41d63a24a45c4004352b0242707598e \
--hash=sha256:7bf2a778576d825600030a110f3c0e3e8edc51dfaafe1c146e39a2027784957b \
# via safety
safety==1.8.4 \
--hash=sha256:399511524f47230d5867f1eb75548f9feefb7a2711a4985cb5be0e034f87040f \
--hash=sha256:69b970918324865dcd7b92337e07152a0ea1ceecaf92f4d3b38529ee0ca83441
six==1.12.0 \
--hash=sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c \
--hash=sha256:d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73 \
# via dparse, faker, packaging, python-dateutil
text-unidecode==1.2 \
--hash=sha256:5a1375bb2ba7968740508ae38d92e1f889a0832913cb1c447d5e2046061a396d \
--hash=sha256:801e38bd550b943563660a91de8d4b6fa5df60a542be9093f7abf819f86050cc \
# via faker
urllib3==1.24.1 \
--hash=sha256:61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39 \
--hash=sha256:de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22 \
# via requests
certifi==2018.11.29 # via requests
chardet==3.0.4 # via requests
click==7.0 # via safety
coverage==4.5.2
dparse==0.4.1 # via safety
factory-boy==2.11.1
faker==1.0.1
idna==2.8 # via requests
packaging==18.0 # via dparse, safety
pyparsing==2.3.1 # via packaging
python-dateutil==2.7.5 # via faker
pyyaml==3.13 # via dparse
requests==2.21.0 # via safety
safety==1.8.4
six==1.12.0 # via dparse, faker, packaging, python-dateutil
text-unidecode==1.2 # via faker
urllib3==1.24.1 # via requests
[bumpversion]
current_version = 1.1.6
current_version = 1.1.7
commit = True
tag = True
tag_name = v{new_version}
......