Skip to content
Commits on Source (40)
......@@ -135,6 +135,7 @@ deploy to staging:
cache: {}
variables:
DEBUG: "on"
DEBUG_TOOLBAR: "off"
DOCKER_TLS_VERIFY: "1"
DOCKER_HOST: "tcp://${STAGING_HOST_IP}:2376"
DOCKER_CERT_PATH: "certs"
......@@ -147,8 +148,8 @@ deploy to staging:
- echo "$CLIENT_CERT" > $DOCKER_CERT_PATH/cert.pem
- echo "$CLIENT_KEY" > $DOCKER_CERT_PATH/key.pem
- docker build --compress -t openpolis/opdm/opdm-service:latest .
- docker-compose -f docker-compose.yml -f docker-compose.staging.yml down
- docker-compose -f docker-compose.yml -f docker-compose.staging.yml up -d --build
- docker-compose down
- docker-compose up -d --build
- docker exec opdm-service_web python manage.py makemigrations popolo
- docker exec opdm-service_web python manage.py migrate popolo
- docker exec opdm-service_web python manage.py collectstatic --noinput
......
......@@ -7,7 +7,7 @@ root: ./
# socket_name: foo
# Ensure used daemons are used before start
on_project_start: if [ $(pg_ctl -D /usr/local/var/postgres status | grep 'no server running' | wc -l) == 1 ]; then pg_ctl -D /usr/local/var/postgres start; fi
# on_project_start: if [ $(pg_ctl -D /usr/local/var/postgres status | grep 'no server running' | wc -l) == 1 ]; then pg_ctl -D /usr/local/var/postgres start; fi
# Runs in each window and pane before window/pane specific commands. Useful for setting up interpreter versions.
# pre_window: rbenv shell 2.0.0-p247
......@@ -25,7 +25,7 @@ on_project_start: if [ $(pg_ctl -D /usr/local/var/postgres status | grep 'no ser
# attach: false
# Runs after everything. Use it to attach to tmux with custom options etc.
on_project_exit: if [ $(tmux ls| wc -l) == 0 ]; then pg_ctl -D /usr/local/var/postgres stop; else echo there are $(tmux ls | wc -l) tmux sessions running, could not stop postgres; fi
# on_project_exit: if [ $(tmux ls| wc -l) == 0 ]; then pg_ctl -D /usr/local/var/postgres stop; else echo there are $(tmux ls | wc -l) tmux sessions running, could not stop postgres; fi
windows:
- shell_plus:
......
......@@ -5,10 +5,23 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](<http://keepachangelog.com/en/1.0.0/>)
and this project adheres to [Semantic Versioning](<http://semver.org/spec/v2.0.0.html>).
## [unchanged]
## [1.3.4]
### Fixed
- export_geojson task only extracts current identifiers, this fixes a issue on openpolis/geojson_italy
## Changed
- ``/areas`` endpoint show identifiers start and end dates in inlines
## [1.3.3]
### Added
- Task to import world geometries, with dependencies from various sources;
- Task to import electoral constituencies' geoms for 2018, from ISTAT.
### Changed
``django-popolo`` release upgraded to 3.0.1 in requirements.txt
- ``django-popolo`` release upgraded to 3.0.2 in requirements.txt
## [1.3.2]
......@@ -576,8 +589,10 @@ in the test stage.
[atoka]: https://atoka.io
[unchanged]: https://gitlab.depp.it/openpolis/opdm/opdm-service/compare/v1.3.2...master
[1.3.1]: https://gitlab.depp.it/openpolis/opdm/opdm-service/compare/v1.3.1...v1.3.2
[unchanged]: https://gitlab.depp.it/openpolis/opdm/opdm-service/compare/v1.3.4...master
[1.3.4]: https://gitlab.depp.it/openpolis/opdm/opdm-service/compare/v1.3.3...v1.3.4
[1.3.3]: https://gitlab.depp.it/openpolis/opdm/opdm-service/compare/v1.3.2...v1.3.3
[1.3.2]: https://gitlab.depp.it/openpolis/opdm/opdm-service/compare/v1.3.1...v1.3.2
[1.3.1]: https://gitlab.depp.it/openpolis/opdm/opdm-service/compare/v1.3.0...v1.3.1
[1.3.0]: https://gitlab.depp.it/openpolis/opdm/opdm-service/compare/v1.2.3...v1.3.0
[1.2.3]: https://gitlab.depp.it/openpolis/opdm/opdm-service/compare/v1.2.2...v1.2.3
......
......@@ -14,7 +14,7 @@ $$;
CREATE TEMP VIEW av as
SELECT
'AREA:' || a.id::text as "id:ID(Area-ID)",
a.id as "opdm_id",
a.id as "opdm_id:INT",
CASE
WHEN a.istat_classification = 'COM' THEN 'Comune di ' || a.name || ' (' || ap.identifier || ')'
WHEN a.istat_classification = 'PROV' THEN 'Provincia di ' || a.name
......@@ -34,7 +34,7 @@ SELECT
a.inhabitants as "inhabitants:int",
CASE WHEN a.end_date > current_date::text or a.end_date is null THEN 'true'
ELSE 'false'
END as "is_current:boolean",
END as "is_current:BOOLEAN",
st_y(st_centroid(a.geometry)::geometry) as "lat:float",
st_x(st_centroid(a.geometry)::geometry) as "long:float",
'Area'::text as ":LABEL"
......@@ -48,14 +48,14 @@ FROM popolo_area as a
CREATE TEMP VIEW ov as
SELECT
'ORG:' || o.id::text as "id:ID(Organization-ID)",
o.id as "opdm_id",
o.id as "opdm_id:INT",
o.name,
c.descr as label,
o.classification,
date_part('years', age(to_timestamp(date_2_ts(o.founding_date)))) as "age",
date_part('years', age(to_timestamp(date_2_ts(o.founding_date)))) as "age:int",
CASE WHEN o.dissolution_date > current_date::text or o.dissolution_date is null THEN 'true'
ELSE 'false'
END as "is_current:boolean",
END as "is_current:BOOLEAN",
CASE
WHEN oe.revenue is null THEN null
WHEN oe.revenue between 0 and 250000 THEN 1
......@@ -63,7 +63,7 @@ SELECT
WHEN oe.revenue between 2000001 and 10000000 THEN 3
WHEN oe.revenue between 10000001 and 50000000 THEN 4
ELSE 5
END as "revenue_class",
END as "revenue_class:int",
CASE
WHEN oe.employees is null THEN null
WHEN oe.employees between 0 and 2 THEN 1
......@@ -71,7 +71,7 @@ SELECT
WHEN oe.employees between 11 and 50 THEN 3
WHEN oe.employees between 51 and 250 THEN 4
ELSE 5
END as "employees_class",
END as "employees_class:int",
'Organizzazione'::text AS ":LABEL"
FROM popolo_organization as o
LEFT JOIN popolo_identifier i on i.object_id=o.id and i.content_type_id=28 and i.scheme='CF'
......@@ -82,14 +82,14 @@ WHERE c.scheme = 'OPDM_ORGANIZATION_LABEL'
UNION
SELECT
'ORG:' || o.id::text as "id:ID(Organization-ID)",
o.id as "opdm_id",
o.id as "opdm_id:INT",
o.name,
null as label,
o.classification,
date_part('years', age(to_timestamp(date_2_ts(o.founding_date)))) as "age",
date_part('years', age(to_timestamp(date_2_ts(o.founding_date)))) as "age:int",
CASE WHEN o.dissolution_date > current_date::text or o.dissolution_date is null THEN 'true'
ELSE 'false'
END as "is_current:boolean",
END as "is_current:BOOLEAN",
CASE
WHEN oe.revenue is null THEN null
WHEN oe.revenue between 0 and 250000 THEN 1
......@@ -97,7 +97,7 @@ SELECT
WHEN oe.revenue between 2000001 and 10000000 THEN 3
WHEN oe.revenue between 10000001 and 50000000 THEN 4
ELSE 5
END as "revenue_class",
END as "revenue_class:int",
CASE
WHEN oe.employees is null THEN null
WHEN oe.employees between 0 and 2 THEN 1
......@@ -105,7 +105,7 @@ SELECT
WHEN oe.employees between 11 and 50 THEN 3
WHEN oe.employees between 51 and 250 THEN 4
ELSE 5
END as "employees_class",
END as "employees_class:int",
'Organizzazione'::text AS ":LABEL"
FROM popolo_organization as o
LEFT JOIN popolo_identifier i on i.object_id=o.id and i.content_type_id=28 and i.scheme='CF'
......@@ -124,36 +124,36 @@ WHERE o.id NOT IN
CREATE TEMP VIEW pv as
SELECT
'PERSON:' || p.id::text as "id:ID(Person-ID)",
p.id as "opdm_id",
p.id as "opdm_id:INT",
p.name, p.image,
p.gender,
date_part('years', age(to_timestamp(date_2_ts(p.birth_date)))) as "age",
date_part('years', age(to_timestamp(date_2_ts(p.birth_date)))) as "age:int",
p.birth_location as birth_location_str,
string_agg(c.descr, ', ' order by c.descr) AS "labels",
CASE WHEN p.death_date is null THEN 'true'
ELSE 'false'
END as "is_alive:boolean",
END as "is_alive:BOOLEAN",
'Persona'::text AS ":LABEL"
FROM popolo_person p
LEFT JOIN popolo_classificationrel cr on cr.object_id=p.id and cr.content_type_id=33
LEFT JOIN popolo_classification c on cr.classification_id=c.id
WHERE c.scheme = 'OPDM_PERSON_LABEL'
GROUP BY "opdm_id",
p.name, p.image, p.gender, age,
GROUP BY "opdm_id:INT",
p.name, p.image, p.gender, "age:int",
birth_location_str,
"is_alive:boolean"
"is_alive:BOOLEAN"
UNION
SELECT
'PERSON:' || p.id::text as "id:ID(Person-ID)",
p.id as "opdm_id",
p.id as "opdm_id:INT",
p.name, p.image,
p.gender,
date_part('years', age(to_timestamp(date_2_ts(p.birth_date)))) as "age",
date_part('years', age(to_timestamp(date_2_ts(p.birth_date)))) as "age:int",
p.birth_location as birth_location_str,
null as labels,
CASE WHEN p.death_date is null THEN 'true'
ELSE 'false'
END as "is_alive:boolean",
END as "is_alive:BOOLEAN",
'Persona'::text AS ":LABEL"
FROM popolo_person p
WHERE p.id NOT IN
......@@ -219,7 +219,7 @@ SELECT
m.end_date as "end_date",
CASE WHEN m.end_date > current_date::text or m.end_date is null THEN 'true'
ELSE 'false'
END as "is_current:boolean",
END as "is_current:BOOLEAN",
'INCARICO_IN'::text as ":TYPE"
FROM popolo_membership as m
LEFT JOIN popolo_organization as o on o.id=m.organization_id
......@@ -239,7 +239,7 @@ SELECT
md5(owner_person_id::text || ':' || owned_organization_id::text || ':' || coalesce(start_date, 'NULL')::text)::uuid as "id",
'PERSON:' || owner_person_id::text as ":START_ID(Person-ID)",
'ORG:' || owned_organization_id::text as ":END_ID(Organization-ID)",
percentage,
percentage as "percentage:float",
start_date as "start_date",
end_date as "end_date",
CASE WHEN end_date > current_date::text or end_date is null THEN 'true'
......@@ -258,13 +258,13 @@ SELECT
md5(owner_organization_id::text || ':' || owned_organization_id::text || ':' || coalesce(start_date, 'NULL'))::uuid as "id",
'ORG:' || owner_organization_id::text as ":START_ID(Organization-ID)",
'ORG:' || owned_organization_id::text as ":END_ID(Organization-ID)",
percentage,
percentage as "percentage:float",
start_date as "start_date",
end_date as "end_date",
end_reason,
CASE WHEN end_date > current_date::text or end_date is null THEN 'true'
ELSE 'false'
END as "is_current:boolean",
END as "is_current:BOOLEAN",
'DETIENE_QUOTA_DI'::text as ":TYPE"
FROM popolo_ownership
WHERE owner_person_id is null
......@@ -284,7 +284,7 @@ SELECT
pm.end_reason,
CASE WHEN pm.end_date > current_date::text or pm.end_date is null THEN 'true'
ELSE 'false'
END as "is_current:boolean",
END as "is_current:BOOLEAN",
'HA_NOMINATO'::text as ":TYPE"
from popolo_membership pm, popolo_membership am
where pm.appointed_by_id is not null and pm.appointed_by_id=am.id
......@@ -295,7 +295,7 @@ where pm.appointed_by_id is not null and pm.appointed_by_id=am.id
CREATE TEMP VIEW topics as
SELECT
'TOPIC:' || t.id::text as "id:ID(Topic-ID)",
t.id as "opdm_id",
t.id as "opdm_id:INT",
t.descr as name,
'Tema'::text AS ":LABEL"
FROM popolo_classification as t
......@@ -322,13 +322,13 @@ SELECT
md5(source_organization_id::text || ':' || dest_organization_id::text || ':' || c.descr || ':' || coalesce(r.start_date, 'NULL')::text)::uuid as "id",
'ORG:' || r.source_organization_id::text as ":START_ID(Organization-ID)",
'ORG:' || r.dest_organization_id::text as ":END_ID(Organization-ID)",
r.weight,
r.weight as "weight:int",
c.descr as "classification",
r.start_date,
r.end_date,
CASE WHEN r.end_date > current_date::text or r.end_date is null THEN 'true'
ELSE 'false'
END as "is_current:boolean",
END as "is_current:BOOLEAN",
'IN_RELAZIONE_DI'::text as ":TYPE"
FROM popolo_organizationrelationship r
LEFT JOIN popolo_classification c on r.classification_id=c.id
......
......@@ -472,9 +472,9 @@ UWSGI_TASKMANAGER_NOTIFICATION_HANDLERS = {
# Audit log settings
# -----------------------------------------------------------------------------
DJANGO_EASY_AUDIT_WATCH_MODEL_EVENTS = True
DJANGO_EASY_AUDIT_WATCH_AUTH_EVENTS = True
DJANGO_EASY_AUDIT_WATCH_REQUEST_EVENTS = True
DJANGO_EASY_AUDIT_WATCH_MODEL_EVENTS = env.bool("DJANGO_EASY_AUDIT_WATCH_MODEL_EVENTS", default=True)
DJANGO_EASY_AUDIT_WATCH_AUTH_EVENTS = env.bool("DJANGO_EASY_AUDIT_WATCH_AUTH_EVENTS", default=True)
DJANGO_EASY_AUDIT_WATCH_REQUEST_EVENTS = env.bool("DJANGO_EASY_AUDIT_WATCH_REQUEST_EVENTS", default=True)
DJANGO_EASY_AUDIT_UNREGISTERED_CLASSES_EXTRA = []
DJANGO_EASY_AUDIT_REGISTERED_CLASSES = [
"popolo.Area",
......
# nginx overrides to deploy on standard staging docker-compose stack
# use docker-compose -f docker-compose.yml -f docker-compose.staging.yml up -d
version: "3.5"
services:
nginx:
networks:
- default
- gw
web:
networks:
- default
- gw
networks:
default:
external: false
name: opdm
gw:
external: true
......@@ -42,6 +42,9 @@ services:
- data:/app/data
- uwsgi_spooler:/var/lib/uwsgi
- weblogs:/var/log
networks:
- default
- gw
command: /usr/local/bin/uwsgi --socket=:8000 --master --env DJANGO_SETTINGS_MODULE=config.settings --pythonpath=/app --module=config.wsgi --callable=application --processes=4 --spooler=/var/lib/uwsgi --spooler-processes=2
nginx:
......@@ -62,6 +65,9 @@ services:
- "traefik.http.routers.opdmsvc.tls.certresolver=${CERT_RESOLVER}"
volumes:
- public:/app/public
networks:
- default
- gw
links:
- web:web
......@@ -74,8 +80,12 @@ services:
- neo4j_import:/var/lib/postgresql/neo4j_import
environment:
POSTGRES_PASSWORD: ${POSTGRES_DEFAULT_PASSWORD}
expose:
- "5432"
ports:
- "5433:5432"
networks:
- default
- kafka-connect
- metabase-net
graphql:
container_name: opdm-service_graphql
......@@ -97,6 +107,15 @@ services:
VIRTUAL_PORT: 8080
LETSENCRYPT_HOST: ${HASURA_DOMAIN}
LETSENCRYPT_EMAIL: ${LETSENCRYPT_EMAIL}
labels:
- "traefik.enable=true"
- "traefik.docker.network=gw"
- "traefik.http.routers.opdmgraphql.rule=Host(`${HASURA_DOMAIN}`)"
- "traefic.http.routers.opdmgraphql.entrypoint=websecure"
- "traefik.http.routers.opdmgraphql.tls.certresolver=${CERT_RESOLVER}"
networks:
- default
- gw
command:
- graphql-engine
- serve
......@@ -109,13 +128,15 @@ services:
- redis_data:/data
expose:
- "6379"
networks:
- default
solr:
restart: always
container_name: opdm-service_solr
build: compose/solr/
ports:
- "8983"
- "8983:8983"
volumes:
- solr_data:/opt/solr/server/solr/mycores/opdm/data
entrypoint:
......@@ -123,11 +144,13 @@ services:
- solr-precreate
- opdm
- /opt/solr/server/solr/mycores/opdm
networks:
- default
neo4j:
restart: always
container_name: opdm-service_neo4j
image: neo4j:3.5.12-enterprise
image: library/neo4j:4.2.1-enterprise
ports:
- "7474:7474"
- "7687:7687"
......@@ -148,6 +171,9 @@ services:
- neo4j_logs:/logs
- neo4j_import:/var/lib/neo4j/import
- neo4j_plugins:/var/lib/neo4j/plugins
networks:
- default
- kafka-connect
volumes:
public:
......@@ -177,5 +203,11 @@ volumes:
networks:
default:
external:
name: webproxy
external: false
name: opdm
gw:
external: true
kafka-connect:
external: true
metabase-net:
external: true
......@@ -3,7 +3,7 @@ Openpolis Data Manager service package (backend)
"""
from typing import Optional
__version__ = (1, 3, 2)
__version__ = (1, 3, 4)
def get_version_str() -> str:
......
......@@ -83,8 +83,6 @@ class IdentifierInlineSerializer(serializers.ModelSerializer):
exclude = (
"content_type",
"object_id",
"start_date",
"end_date",
"end_reason",
"source",
)
......
......@@ -406,6 +406,7 @@ class AreaSerializer(serializers.HyperlinkedModelSerializer):
source="get_former_parents", many=True, read_only=True
)
new_places = AreaInlineSerializer(many=True, read_only=True)
old_places = AreaInlineSerializer(many=True, read_only=True)
identifiers = IdentifierSerializer(many=True, read_only=True)
other_names = OtherNameSerializer(many=True, read_only=True)
links = LinkRelSerializer(many=True, read_only=True)
......@@ -425,13 +426,14 @@ class AreaSerializer(serializers.HyperlinkedModelSerializer):
"istat_classification",
"is_provincial_capital",
"inhabitants",
"geometry",
# "geometry",
"gps_lat",
"gps_lon",
"parent",
"former_parents",
"related_areas",
"new_places",
"old_places",
"start_date",
"end_date",
"end_reason",
......
from rest_framework.generics import get_object_or_404
class DetailActionsMixin(object):
"""
A mixin which is useful to extend, in case a viewset needs to generate actions subviews,
ie: `/objects/123/subobjects
"""
def get_object_no_filter(self):
"""
Returns the object the view is displaying, does not apply filters.
"""
queryset = self.get_queryset()
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
assert lookup_url_kwarg in self.kwargs, (
'Expected view %s to be called with a URL keyword argument '
'named "%s". Fix your URL conf, or set the `.lookup_field` '
'attribute on the view correctly.' %
(self.__class__.__name__, lookup_url_kwarg)
)
filter_kwargs = {self.lookup_field: self.kwargs[lookup_url_kwarg]}
obj = get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
......@@ -32,6 +32,7 @@ from rest_framework.decorators import action
from rest_framework.exceptions import APIException, MethodNotAllowed
from rest_framework.response import Response
from api_v1.views.DetailActionsMixin import DetailActionsMixin
from project.akas.models import AKA
from project.api_v1.filters import (
AreaFilterSet,
......@@ -103,7 +104,7 @@ from project.core.exceptions import (
)
class AreaViewSet(IdentifierTypesMixin, rw_viewsets.ModelViewSet):
class AreaViewSet(IdentifierTypesMixin, DetailActionsMixin, rw_viewsets.ModelViewSet):
"""
A ViewsSet for viewing and editing Area resources.
......@@ -188,7 +189,7 @@ class AreaViewSet(IdentifierTypesMixin, rw_viewsets.ModelViewSet):
@action(detail=True, serializer_class=OrganizationListResultSerializer)
def organizations(self, request, pk=None, **kwargs):
""" Shows all organizations of a given area"""
instance = self.get_object()
instance = self.get_object_no_filter()
items = instance.organizations.all().order_by("id")
page = self.paginate_queryset(items)
serializer = self.get_serializer(page, many=True)
......@@ -198,7 +199,7 @@ class AreaViewSet(IdentifierTypesMixin, rw_viewsets.ModelViewSet):
def children(self, request, pk=None, **kwargs):
""" Shows all areas children of a given one """
self.serializer_class = AreaListResultSerializer
instance = self.get_object()
instance = self.get_object_no_filter()
items = instance.children.all().order_by("id")
page = self.paginate_queryset(items)
serializer = self.get_serializer(page, many=True)
......@@ -208,7 +209,7 @@ class AreaViewSet(IdentifierTypesMixin, rw_viewsets.ModelViewSet):
def former_children(self, request, pk=None, **kwargs):
""" Shows an area's former children """
self.serializer_class = FormerChildrenSerializer
instance = self.get_object()
instance = self.get_object_no_filter()
items = (
instance.to_relationships.filter(classification="FIP")
.select_related("source_area")
......@@ -335,7 +336,7 @@ class OrganizationRelationshipViewSet(
class OrganizationViewSet(
IdentifierTypesMixin, ClassificationTypesMixin, rw_viewsets.ModelViewSet
IdentifierTypesMixin, ClassificationTypesMixin, DetailActionsMixin, rw_viewsets.ModelViewSet
):
"""
A ViewsSet for viewing and editing Organization resources.
......@@ -460,7 +461,7 @@ class OrganizationViewSet(
will only show **current owned organizations** for the organization.
"""
instance = self.get_object()
instance = self.get_object_no_filter()
self.serializer_class = OwnershipListResultSerializer
......@@ -497,7 +498,7 @@ class OrganizationViewSet(
will only show **current owning organizations** for the organization.
"""
instance = self.get_object()
instance = self.get_object_no_filter()
self.serializer_class = OwnershipListResultSerializer
......@@ -533,7 +534,7 @@ class OrganizationViewSet(
will only show **current memberships** in the organization.
"""
instance = self.get_object()
instance = self.get_object_no_filter()
self.serializer_class = MembershipListResultSerializer
......@@ -566,7 +567,7 @@ class OrganizationViewSet(
This is internally used to detect starting and ending dates
of *legislatures* and *governments*.
"""
instance = self.get_object()
instance = self.get_object_no_filter()
start_date_groups = (
instance.memberships.values("start_date")
......@@ -598,7 +599,7 @@ class OrganizationViewSet(
will only show **current posts** in the organization.
"""
instance = self.get_object()
instance = self.get_object_no_filter()
self.serializer_class = PostListResultSerializer
......@@ -634,7 +635,7 @@ class OrganizationViewSet(
will only show **current relations** of the organization.
"""
instance = self.get_object()
instance = self.get_object_no_filter()
or_viewset = OrganizationRelationshipViewSet(request=request)
self.search_fields = or_viewset.search_fields
......@@ -668,7 +669,7 @@ class OrganizationViewSet(
will only show **current relations** of the organization.
"""
instance = self.get_object()
instance = self.get_object_no_filter()
or_viewset = OrganizationRelationshipViewSet(request=request)
self.search_fields = or_viewset.search_fields
......@@ -1018,7 +1019,7 @@ class OwnershipViewSet(rw_viewsets.ModelViewSet):
return super().partial_update(request, *args, **kwargs)
class MembershipViewSet(BulkPartialUpdateMixin, ClassificationTypesMixin, rw_viewsets.ModelViewSet):
class MembershipViewSet(BulkPartialUpdateMixin, ClassificationTypesMixin, DetailActionsMixin, rw_viewsets.ModelViewSet):
"""
A ViewsSet for viewing and editing Membership resources.
......@@ -1087,7 +1088,7 @@ class MembershipViewSet(BulkPartialUpdateMixin, ClassificationTypesMixin, rw_vie
will only show **current appointed memberships** in the organization.
"""
instance = self.get_object()
instance = self.get_object_no_filter()
self.serializer_class = MembershipInlineSerializer
......@@ -1236,7 +1237,7 @@ class MembershipViewSet(BulkPartialUpdateMixin, ClassificationTypesMixin, rw_vie
)
class PostViewSet(rw_viewsets.ModelViewSet):
class PostViewSet(DetailActionsMixin, rw_viewsets.ModelViewSet):
"""
A ViewsSet for viewing and editing Post resources.
......@@ -1312,7 +1313,7 @@ class PostViewSet(rw_viewsets.ModelViewSet):
will only show **current appointed posts**
"""
instance = self.get_object()
instance = self.get_object_no_filter()
self.serializer_class = PostInlineSerializer
......@@ -1348,7 +1349,7 @@ class PostViewSet(rw_viewsets.ModelViewSet):
will only show **current memberships** in the organization.
"""
instance = self.get_object()
instance = self.get_object_no_filter()
self.serializer_class = MembershipListResultSerializer
......
......@@ -13,11 +13,11 @@ def _classification_to_label(org):
def _roletype_to_label(m):
mapping = m.post.role_type.labels_mapping.first()
if mapping:
return mapping.label_classification
else:
return None
if m.post and m.post.role_type:
mapping = m.post.role_type.labels_mapping.first()
if mapping:
return mapping.label_classification
return None
def add_label_to_org(sender, instance: ClassificationRel, **kwargs):
......
......@@ -45,6 +45,54 @@ class PopoloAreaLoader(PopoloLoader):
super(PopoloAreaLoader, self).load(**kwargs)
def add_nuts_identifier(self, area, scheme, start_year, identifier):
""" add NUTS identifiers, only if not already there
Extract the current identifier and, if the value to add is different, then
closes the validity of the old one and add the new one.
:param area: the area the identifiers refer to
:param scheme: NUTS[123]_CODE
:param start_year: the year identifying the identifier (2006, 2010, 2021)
:param identifier: the identifier value (IT1, ITC, IT23, ...)
:return: void
"""
nuts_current_identifiers = area.identifiers.filter(scheme=scheme, end_date__isnull=True)
n_current_identifiers = nuts_current_identifiers.count()
current_identifier = nuts_current_identifiers.first()
if n_current_identifiers > 1:
self.logger.warn(
"Found multiple current {0} identifiers for {1}".format(scheme, area)
)
return
if current_identifier is None or current_identifier.identifier != identifier:
# close current identifier if started before the new one (replacement)
# delete it if started on the same day, or after (substitution)
# do nothing (return) if the current identifier starts later than the new one
if current_identifier:
if (
current_identifier.start_date < "{0}-01-01".format(start_year) or
current_identifier.start_date is None
):
nuts_current_identifiers.update(end_date="{0}-01-01".format(start_year))
elif current_identifier.start_date == "{0}-01-01".format(start_year):
nuts_current_identifiers.delete()
else:
return
try:
area.add_identifier(
scheme=scheme,
identifier=identifier,
source=self.csv_source,
start_date='{0}-01-01'.format(start_year)
)
except Exception as e:
self.logger.warn(
"While importing {0} identifier for {1}, "
"got the following exceptions: {2}".format(scheme, area, e)
)
def load_item(self, area, **kwargs):
self.logger.debug(area['den_full'])
......@@ -63,22 +111,9 @@ class PopoloAreaLoader(PopoloLoader):
}
)
if created:
try:
rip.add_identifiers([
{
'identifier': area['code_nuts1'],
'scheme': 'NUTS1_CODE',
'source': self.csv_source,
'start_date': '2010-01-01'
}
])
except Exception as e:
self.logger.warn(
"While importing NUTS identifiers for {0}, "
"got the following exceptions: {1}".format(rip, e)
)
self.logger.warn(e)
self.add_nuts_identifier(rip, 'NUTS1_CODE', 2010, area['code_nuts1_2010'])
self.add_nuts_identifier(rip, 'NUTS1_CODE', 2021, area['code_nuts1_2021'])
# get_or_create the Regione
reg, created = Area.objects.get_or_create(
......@@ -99,21 +134,9 @@ class PopoloAreaLoader(PopoloLoader):
reg.parent = rip
reg.save()
try:
reg.add_identifiers([
{
'identifier': area['code_nuts2'],
'scheme': 'NUTS2_CODE',
'source': self.csv_source,
'start_date': '2010-01-01'
}
])
except Exception as e:
self.logger.warn(
"While importing NUTS identifiers for {0}, "
"got the following exceptions: {1}".format(reg, e)
)
self.logger.warn(e)
# add NUTS identifiers, only if not already there
self.add_nuts_identifier(reg, 'NUTS2_CODE', 2010, area['code_nuts2_2010'])
self.add_nuts_identifier(reg, 'NUTS2_CODE', 2021, area['code_nuts2_2021'])
# get_or_create the Città Metropolitana or Provinca
den = area['den_prov']
......@@ -165,25 +188,14 @@ class PopoloAreaLoader(PopoloLoader):
"got the following exceptions: {1}".format(cp, e)
)
try:
cp.add_identifiers([
{
'identifier': area['code_prov'],
'scheme': 'ISTAT_CODE_PROV',
'source': self.csv_source,
},
{
'identifier': area['code_nuts3'],
'scheme': 'NUTS3_CODE',
'source': self.csv_source,
'start_date': '2010-01-01'
}
])
except Exception as e:
self.logger.error(
"While importing NUTS identifiers for {0}, "
"got the following exceptions: {1}".format(cp, e)
)
cp.add_identifier(
scheme='ISTAT_CODE_PROV',
identifier=area['code_prov'],
source=self.csv_source
)
self.add_nuts_identifier(cp, 'NUTS3_CODE', 2010, area['code_nuts3_2010'])
self.add_nuts_identifier(cp, 'NUTS3_CODE', 2021, area['code_nuts3_2021'])
# get_or_create the Comune
c, created = Area.objects.get_or_create(
......
......@@ -53,7 +53,7 @@ class SparqlToJsonCommand(BaseCommand):
self.logger.info(f"Got {len(bindings)} bindings. Processing...")
output = self.handle_bindings(bindings, **options)
if output:
path = Path(options["output_dir"]) / self._get_file_name(**options)
path = Path(options["output_dir"]) / self.get_json_name(**options)
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w") as f:
self.logger.info(f"Dumping JSON as {path.as_uri()}...")
......@@ -62,8 +62,9 @@ class SparqlToJsonCommand(BaseCommand):
else:
self.logger.info(f"Handling of bindings returned {len(output)}.")
def _get_file_name(self, **options) -> str:
return f'{self.json_filename}_{options["legislature"]}.json'
@classmethod
def get_json_name(cls, **options) -> str:
return f'{cls.json_filename}_{options["legislature"]}.json'
def query(self, **options) -> Optional[List[Dict]]:
raise NotImplementedError
......
......@@ -36,8 +36,8 @@ class Command(LoggingBaseCommand):
self.logger.info("Serializing comuni ...")
comuni_areas = Area.objects.comuni().filter(
geometry__isnull=False, end_date__isnull=True
).distinct()
geometry__isnull=False, end_date__isnull=True, identifiers__end_date__isnull=True
).select_related("parent").prefetch_related("identifiers", "i18n_names").distinct()
comuni_json = serialize(
'geojson', comuni_areas,
geometry_field='geometry',
......
......@@ -47,9 +47,12 @@ class Istat2PopoloTransformation(Transformation):
"Codice Comune numerico con 103 province (dal 1995 al 2005)": "code_com_103",
"Codice Catastale del comune": "code_catasto",
"Popolazione legale 2011 (09/10/2011)": "pop_2011",
"NUTS1": "code_nuts1",
"NUTS2(3)": "code_nuts2",
"NUTS3": "code_nuts3",
"Codice NUTS1 2010": "code_nuts1_2010",
"Codice NUTS2 2010 (3)": "code_nuts2_2010",
"Codice NUTS3 2010": "code_nuts3_2010",
"Codice NUTS1 2021": "code_nuts1_2021",
"Codice NUTS2 2021 (3)": "code_nuts2_2021",
"Codice NUTS3 2021": "code_nuts3_2021",
},
inplace=True,
)
......@@ -75,9 +78,12 @@ class Istat2PopoloTransformation(Transformation):
"code_com_110",
"code_com_107",
"code_com_103",
"code_nuts1",
"code_nuts2",
"code_nuts3",
"code_nuts1_2010",
"code_nuts2_2010",
"code_nuts3_2010",
"code_nuts1_2021",
"code_nuts2_2021",
"code_nuts3_2021",
]
)
......@@ -122,7 +128,9 @@ class Istat2PopoloTransformation(Transformation):
# (od.den_it == 'Telti') |
# (od.den_it == 'Teltì')
# ]
# od = od[
# (od.den_reg == 'Sardegna')
# ]
# store processed data into the ETL instance
self.etl.processed_data = od
......@@ -194,6 +202,8 @@ class Command(CacheArgumentsCommandMixin, LoggingBaseCommand):
super(Command, self).handle(__name__, *args, formatter_key="simple", **options)
self.handle_cache(*args, **options)
self.logger.info("Cleaning up some data")
self.logger.info("Starting composite ETL process")
CSVDiffCompositeETL(
......
# -*- coding: utf-8 -*-
import os
from django.contrib.gis.db.models import Union
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import Polygon, MultiPolygon
from popolo.models import Area
from taskmanager.management.base import LoggingBaseCommand
class Command(LoggingBaseCommand):
"""This command will import the 2018 electoral constituencies geometries from given shp files.
Data come from https://www.istat.it/it/archivio/208278
and have been transformed into separated, simplified shp files with mapshaper, using:
mapshaper -i shp/COLLEGI_ELETTORALI_2017.shp \
-proj wgs84 -simplify 10% visvalingam weighted -clean \
-o /tmp/collegi_tot.shp
mapshaper -i /tmp/collegi_tot.shp \
-filter-fields CIRCO17_C,CIRCO17_D,COD_REG \
-dissolve CIRCO17_C copy-fields CIRCO17_D,COD_REG \
-rename-layers circoscrizioni target=1 \
-o shp/circoscrizioni_camera_2017.shp
mapshaper -i /tmp/collegi_tot.shp \
-filter-fields CAM17P_COD,CAM17P_DEN,CIRCO17_C \
-dissolve CAM17P_COD copy-fields CAM17P_DEN,CIRCO17_C \
-rename-layers collegi_pluri target=1 \
-o shp/collegi_pluri_camera_2017.shp
mapshaper -i /tmp/collegi_tot.shp \
-filter-fields CAM17U_COD,CAM17U_DEN,CAM17U_NOM,CAM17P_COD \
-dissolve CAM17U_COD copy-fields CAM17U_DEN,CAM17U_NOM,CAM17P_COD \
-rename-layers collegi_uni target=1 \
-o shp/collegi_uni_camera_2017.shp
mapshaper -i /tmp/collegi_tot.shp \
-filter-fields SEN17P_COD,SEN17P_DEN,COD_REG \
-dissolve SEN17P_COD copy-fields SEN17P_DEN,COD_REG \
-rename-layers collegi_pluri target=1 \
-o shp/collegi_pluri_senato_2017.shp
mapshaper -i /tmp/collegi_tot.shp \
-filter-fields SEN17U_COD,SEN17U_DEN,SEN17U_NOM,SEN17P_COD \
-dissolve SEN17U_COD copy-fields SEN17U_DEN,SEN17U_NOM,SEN17P_COD \
-rename-layers collegi_uni target=1 \
-o shp/collegi_uni_senato_2017.shp
"""
help = "Import Areas geographic limits for electoral constituencies in 2018 political elections"
shp_files_path = None
branch = None
def add_arguments(self, parser):
parser.add_argument(
dest='shp_files_path',
help="Absolute path to the compressed shp files directory"
)
parser.add_argument(
dest='branch',
help="The branch to process: camera|senato"
)
def handle(self, *args, **options):
super(Command, self).handle(__name__, *args, formatter_key="simple", **options)
self.shp_files_path = options['shp_files_path']
self.branch = options['branch'].lower()
self.logger.info(f"Starting import process.")
try:
if self.branch == 'camera':
circoscrizioni = self.fetch_features(
f"{self.shp_files_path}/circoscrizioni_{self.branch}_2017.shp"
)
for n, feature in enumerate(circoscrizioni, start=1):
self.update_or_create_area(
feature, 'CIRCO17_C',
lambda x: x.get('CIRCO17_D'),
'ELECT_CIRC',
parent_field='COD_REG'
)
self.logger.info(f"{len(circoscrizioni)} geometries for circoscrizioni updated or created")
branch3 = self.branch[:3].upper()
branch1 = self.branch[0].upper()
if branch1 == 'C':
parent_field = 'CIRCO17_C'
else:
parent_field = 'COD_REG'
collegi_pluri = self.fetch_features(
f"{self.shp_files_path}/collegi_pluri_{self.branch}_2017.shp"
)
for n, feature in enumerate(collegi_pluri, start=1):
self.update_or_create_area(
feature, f'{branch3}17P_COD',
lambda x: x.get(f'{branch3}17P_DEN'),
f'ELECT_COLL',
parent_field=parent_field
)
collegi_uni = self.fetch_features(
f"{self.shp_files_path}/collegi_uni_{self.branch}_2017.shp"
)
for n, feature in enumerate(collegi_uni, start=1):
self.update_or_create_area(
feature, f'{branch3}17U_COD',
lambda x: x.get(f'{branch3}17U_DEN') + " " + x.get(f'{branch3}17U_NOM'),
f'ELECT_COLL',
parent_field=f"{branch3}17P_COD"
)
except Exception as e:
self.logger.error(e)
exit(-1)
self.logger.info("End of import process")
def update_or_create_area(self, feat, identifier_field, name_field, classification, parent_field=None) -> None:
"""Update or create an Area from a feature,
identifier and name are extracted from the given feature fields
the classification is used also as a prefix for the identifier
:param feat: the geometric feature, extracted from the shape file
:param identifier_field: field to use building the identifier (with classification as prefix)
:param name_field: field used for the area name
:param classification: classification and prefix for the identifier
:param parent_field: field of the parent's identifier
:return:
"""
area, created = Area.objects.update_or_create(
identifier=f"{classification}_{feat.get(identifier_field)}",
defaults={
'name': name_field(feat),
'classification': classification,
'start_date': '2018-03-01'
}
)
geom = feat.geom.transform(4326, clone=True)
geos = geom.geos
if isinstance(geos, Polygon):
geos = MultiPolygon(geos)
area.geometry = geos
try:
if parent_field == 'COD_REG':
area.parent = Area.objects.get(
identifier=f"{int(feat.get(parent_field)):02}",
classification='ADM1'
)
elif 'CIRCO' in parent_field:
parent_classification = 'ELECT_CIRC'
area.parent = Area.objects.get(
identifier=f"{parent_classification}_{feat.get(parent_field)}",
classification=parent_classification
)
elif '_COD' in parent_field:
parent_classification = 'ELECT_COLL'
area.parent = Area.objects.get(
identifier=f"{parent_classification}_{feat.get(parent_field)}",
classification=parent_classification
)
except Exception as e:
self.logger.error(
f"parent area could not be found: {e}"
)
area.save()
if created:
self.logger.debug(
f"area and geometry created for area {area.name}, "
f"with identifier {area.identifier}, parent: {area.parent.identifier}"
)
else:
self.logger.debug(
f"geometry created for area {area.name}, with identifier {area.identifier}, "
f"parent: {area.parent.identifier}"
)
return area
def fetch_features(self, shp_file) -> Union(list, None):
"""Return list of geometric features from shp file
:returns
"""
if not os.path.exists(shp_file):
self.logger.error(f"File {shp_file} was not found. Terminating.")
return None
# create DataSource instance from shp file
ds = DataSource(shp_file)
self.logger.info(f"geographic features layer read from {shp_file}")
# extract layer
layer = ds[0]
if layer is None:
raise Exception(f"No layer found in shp file. Terminating.")
# transform the layer into a dict
features_list = list(layer)
return features_list
# -*- coding: utf-8 -*-
import os
import zipfile
import requests
from django.contrib.gis.db.models import Union
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import Polygon, MultiPolygon
from popolo.models import Area, AreaRelationship
from taskmanager.management.base import LoggingBaseCommand
class Command(LoggingBaseCommand):
"""This command will import the countries geometries from a given shp file, compressed as zip file.
The shapefile is in ./data/shp/world_countries_shp.zip;
it is a WGS 84 projection and has been downloaded from
https://hub.arcgis.com/datasets/a21fdb46d23e4ef896f31475217cbb08_1/data,
simplified to 50%, and repaired through Mapshaper.com
Other details are taken from geoname.org's API.
"""
help = "Import Areas geographic limits for world countries from an archived shp file"
shp_file = None
overwrite = False
def add_arguments(self, parser):
parser.add_argument(
dest='shp_file',
help="Absolute path to the compressed shp file"
)
parser.add_argument(
'--overwrite',
dest='overwrite', action='store_true',
help="Whether to overwrite all geometries or only write missing ones"
)
def handle(self, *args, **options):
super(Command, self).handle(__name__, *args, formatter_key="simple", **options)
self.shp_file = options['shp_file']
self.overwrite = options['overwrite']
self.logger.info(f"Starting import process.")
features = geonames_it = None
try:
geonames_it = self.fetch_geonames_dict()
features = self.fetch_shp_dict()
except Exception as e:
self.logger.error(e)
exit(-1)
if features and geonames_it:
# create earth and continents
continents = {(f['continentName'], f['continent']) for f in geonames_it.values()}
earth, created = Area.objects.get_or_create(
identifier=f"EARTH",
defaults={
'name': 'Terra',
}
)
for c_name, c_code in continents:
Area.objects.get_or_create(
identifier=f"CONT_{c_code}",
defaults={
'name': c_name,
'classification': 'CONT',
'parent': earth
}
)
independent_features = {code: feat for code, feat in features.items() if feat.get('AFF_ISO') == code}
dependent_features = {code: feat for code, feat in features.items() if feat.get('AFF_ISO') != code}
n = 0
for code, feature in independent_features.items():
self.update_or_create_area(code, feature, 'PCL', geonames_it)
n = n + 1
self.logger.info(f"{n} independent geometries updated or created")
n = 0
for code, feature in dependent_features.items():
self.update_or_create_area(code, feature, 'PCLD', geonames_it)
n = n + 1
self.logger.info(f"{n} dependent geometries updated or created")
# generate continents' boundaires
for c_name, c_code in continents:
continent = Area.objects.get(
identifier=f"CONT_{c_code}",
)
continent.geometry = Area.objects.filter(
istat_classification='NAZ', parent=continent
).aggregate(Union('geometry'))['geometry__union']
continent.save()
self.logger.info("End of import process")
def update_or_create_area(self, feat_iso_code, feat, classification, geonames):
"""Insert or update an Area instance, with geometry, identifiers and sources
:param feat_iso_code: the 2-letters 3166 ISO code
:param feat: the complete feature, from the layer
:param classification: the geonames classification (PCL, PCLD)
:param geonames: a dict containing geonames data for the country
:return:
"""
geo_data = geonames[feat_iso_code]
area, created = Area.objects.update_or_create(
identifier=f"NAZ_{feat.get('ISO')}",
istat_classification='NAZ',
defaults={
'name': geo_data['countryName'],
'classification': classification,
'inhabitants': int(geo_data['population'])
}
)
geom = feat.geom.transform(4326, clone=True)
geos = geom.geos
if isinstance(geos, Polygon):
geos = MultiPolygon(geos)
area.geometry = geos
area.parent = Area.objects.get(identifier=f"CONT_{geo_data['continent']}")
area.add_identifier(geo_data['countryCode'], 'COUNTRY_ISO_ALPHA_2')
area.add_identifier(geo_data['isoAlpha3'], 'COUNTRY_ISO_ALPHA_3')
area.add_identifier(geo_data['fipsCode'], 'COUNTRY_FIPS')
area.add_identifier(geo_data['isoNumeric'], 'COUNTRY_ISO_NUMERIC')
area.add_identifier(geo_data['geonameId'], 'COUNTRY_GEONAMES_ID')
area.add_source("https://hub.arcgis.com/datasets/esri::world-countries-generalized")
area.add_source("http://api.geonames.org/")
if classification == 'PCLD':
mother_area = Area.objects.get(identifier=f"NAZ_{feat.get('AFF_ISO')}")
area.add_relationship(mother_area, AreaRelationship.CLASSIFICATION_TYPES.depends_on)
area.save()
if created:
self.logger.debug(
f"area and geometry created for area {area.name}, with identifier {area.identifier}"
)
else:
self.logger.debug(
f"geometry created for area {area.name}, with identifier {area.identifier}"
)
return area
def fetch_shp_dict(self):
"""Return geo layer from zipped shp file
:return: a DataSource layer (iterable for features)
"""
if not os.path.exists(self.shp_file):
self.logger.error(f"File {self.shp_file} was not found. Terminating.")
return None
# unzip all files in /tmp (needed), to create a datasource
with zipfile.ZipFile(self.shp_file) as iz:
shp_filename = [n for n in iz.namelist() if '.shp' in n][0]
iz.extractall(path="/tmp")
ds = DataSource(f"/tmp/{shp_filename}")
self.logger.info(f"geographic features layer read from {self.shp_file}")
# extract layer
layer = ds[0]
if layer is None:
raise Exception(f"No layer found in shp file. Terminating.")
elif 'ISO' not in layer.fields:
raise Exception(f"Wrong fields in shp file. Terminating.")
# transform the layer into a dict
features_list = list(layer)
features_dict = {feat.get('ISO'): feat for feat in features_list}
return features_dict
def fetch_geonames_dict(self, language='it'):
"""Read a few country details from geonames.org and return them as a dictionary by 3166 2-digit codes
:param language:
:return: a dictionary, containing details, with ISO 3166 2-digit code as key
"""
r = requests.get(
f"http://api.geonames.org/countryInfoJSON?formatted=true&lang={language}&username=openpolis&style=full"
)
if r.status_code != 200:
raise Exception(f"Could not access geonames API: {r.reason}")
geonames = r.json()
self.logger.info(f"country details read from geonames'API; language: {language}")
return {country['countryCode']: country for country in geonames['geonames']}
......@@ -35,12 +35,10 @@ class IstatVariations2PopoloTransformation(Transformation):
"Anno": "anno",
"Tipo variazione": "tipo_variazione",
"Codice Regione": "regional_code",
"Codice Istat del Comune": "city_code_1",
"Codice Comune formato alfanumerico": "city_code_1",
"Denominazione Comune": "city_desc_1",
"Codice Istat del Comune associato alla variazione o "
"nuovo codice Istat del Comune": "city_code_2",
"Denominazione Comune associata alla variazione "
"o nuova denominazione": "city_desc_2",
"Codice del Comune associato alla variazione o nuovo codice Istat del Comune": "city_code_2",
"Denominazione Comune associata alla variazione o nuova denominazione": "city_desc_2",
"Provvedimento e Documento": "doc_provv",
"Contenuto del provvedimento": "descr_provv",
"Data decorrenza validità amministrativa": "validity_date",
......