Skip to content

Commit

Permalink
Merge pull request #2947 from fedspendingtransparency/qat
Browse files Browse the repository at this point in the history
Sprint 125 Production Deploy
  • Loading branch information
tony-sappe authored Feb 10, 2021
2 parents 2206f31 + 579bff2 commit 67432e7
Show file tree
Hide file tree
Showing 111 changed files with 1,529 additions and 1,346 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ repos:
- id: debug-statements
- id: flake8
- repo: https://github.com/psf/black
rev: 19.10b0
rev: 20.8b1
hooks:
- id: black
language_version: python3.7
59 changes: 18 additions & 41 deletions requirements/requirements-app.txt
Original file line number Diff line number Diff line change
@@ -1,62 +1,39 @@
appdirs==1.4.3
asyncpg==0.18.3
asyncpg==0.21.*
atomicwrites==1.3.0
attrs==19.1.0
boto3==1.9.224
botocore==1.12.224
certifi==2019.6.16
chardet==3.0.4
attrs==20.*
boto3==1.16.*
certifi==2020.12.5
dataclasses-json==0.5.*
django_cte==1.1.*
ddtrace==0.37.1
dj-database-url==0.5.0
django-cors-headers==2.5.3
django-debug-toolbar==2.1
django-debug-toolbar==3.2
django-extensions==2.2.5
django-filter==0.14.0
django-partial-index==0.6.0
django-queryset-csv==1.0.1
django-simple-history==2.7.3
django-spaghetti-and-meatballs==0.2.2
django-spaghetti-and-meatballs==0.4.0
Django==2.2.*
djangorestframework==3.11.0
django_cte==1.1.*
djangorestframework==3.12.2
docutils==0.15.2
drf-extensions==0.3.1
drf-tracking==1.4.0
elasticsearch==7.1.0
drf-api-tracking==1.7.8
drf-extensions==0.6.*
elasticsearch-dsl==7.1.0
entrypoints==0.3
elasticsearch==7.1.0
et-xmlfile==1.0.1
filechunkio==1.8
filelock==3.0.12
fiscalyear==0.1.0
idna==2.8
importlib-metadata==0.20
jdcal==1.4.1
jmespath==0.9.4
jsonpickle==1.2
fiscalyear==0.2.0
Markdown<3.0
more-itertools==7.2.0
numpy==1.17.2
numpy==1.19.*
openpyxl==2.4.7
pandas==0.25.1
pandas==1.1.*
pip==20.*
pluggy==0.12.0
psutil==5.6.*
psycopg2-binary==2.7.5
psycopg2-binary==2.8.*
py-gfm==0.1.4
py==1.8.0
pycodestyle==2.5.0
pyflakes==2.1.1
python-dateutil==2.8.0
python-json-logger==0.1.9
pytz==2019.2
requests==2.22.0
requests==2.25.*
retrying==1.3.3
s3transfer==0.2.1
six==1.12.0
sqlparse==0.3.0
unicodecsv==0.14.1
urllib3==1.25.*
xlrd==1.0.0
zipp==0.6.0
urllib3==1.26.*
xlrd3==1.0.0
18 changes: 7 additions & 11 deletions requirements/requirements-dev.txt
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
black==19.10b0
coverage==4.5.4
docker==4.1.0
black==20.8b1
docker==4.4.1
dredd-hooks==0.2.0
flake8==3.7.8
mccabe==0.6.1
flake8==3.8.4
mock==3.0.5
model-mommy==1.6.0
model-mommy==2.0.0
pre-commit==1.20.0
pycodestyle==2.5.0
pyflakes==2.1.1
pytest-cov==2.8.1
pytest-django==3.5.1
pytest==4.3.1
pytest-cov==2.10.1
pytest-django==3.10.0
pytest==6.2.1
6 changes: 3 additions & 3 deletions requirements/requirements-server.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
django-elasticache==1.0.2
django-redis==4.8.0
django-elasticache==1.0.*
django-redis==4.12.*
packaging==16.8
pylibmc==1.5.2
pyparsing==2.2.0
supervisor==4.0.2
uwsgi==2.0.17.1
uwsgi==2.0.*
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ addopts=--cov=usaspending_api

[flake8]
select=C,E,F,W,B,B950
ignore=E501,W503,E203
ignore=E501,W503,E203,F541
exclude=.venv,usaspending_api/*/migrations/*
max-line-length=120

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
@pytest.mark.django_db
def test_federal_account_spending_by_category_unique_program_activity_names(client):
"""
Test the spending by category endpoint for the federal account profile page to ensure a unique set of
program activity names are returned
Test the spending by category endpoint for the federal account profile page to ensure a unique set of
program activity names are returned
"""

models_to_mock = [
Expand Down
2 changes: 1 addition & 1 deletion usaspending_api/awards/tests/test_transactions.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def test_transaction_endpoint_v2_award_fk(client):
)
mommy.make("awards.TransactionNormalized", description="this should match", _fill_optional=True, award=awd)

resp = client.post("/api/v2/transactions/", {"award_id": "10"})
resp = client.post("/api/v2/transactions/", {"award_id": 10})
assert resp.status_code == status.HTTP_200_OK
assert json.loads(resp.content.decode("utf-8"))["results"][0]["description"] == "this should match"

Expand Down
24 changes: 12 additions & 12 deletions usaspending_api/awards/v2/data_layer/orm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,21 +26,21 @@ def delete_keys_from_dict(dictionary):

def split_mapper_into_qs(mapper):
"""
Django ORM has trouble using .annotate() when the destination field conflicts with an
existing model field, even if it's the same source field (no renaming occurring)
Django ORM has trouble using .annotate() when the destination field conflicts with an
existing model field, even if it's the same source field (no renaming occurring)
Assuming there is a dictionary with model field names as keys and the target field as the value,
Split that into two objects:
values_list: a list of fields which you wish to retrieve without renaming
aka when `key` == `value`
annotate_dict: a dictionary/OrderedDict of target and source fields to rename
Assuming there is a dictionary with model field names as keys and the target field as the value,
Split that into two objects:
values_list: a list of fields which you wish to retrieve without renaming
aka when `key` == `value`
annotate_dict: a dictionary/OrderedDict of target and source fields to rename
parameters
- mapper: dictionary/OrderedDict
parameters
- mapper: dictionary/OrderedDict
return:
- values_list: list
-annotate_dict: OrderedDict
return:
- values_list: list
-annotate_dict: OrderedDict
"""
values_list = [k for k, v in mapper.items() if k == v]
annotate_dict = OrderedDict([(v, F(k)) for k, v in mapper.items() if k != v])
Expand Down
16 changes: 8 additions & 8 deletions usaspending_api/awards/v2/filters/filter_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@

def merge_date_ranges(date_range_list):
"""
Given a list of date ranges (using the defined namedtuple "Range"), combine overlapping date ranges
While adjacent fiscal years do not overlap the desired behavior is to combine them
FY2010 ends on 2010-09-30, FY2011 start on 2010-10-01.
To address this, when comparing ranges 1 day is removed from the start date and 1 day is added to the end date
Then the overlapping ranges must be > 1 instead of > 0
Inspired by Raymond Hettinger [https://stackoverflow.com/a/9044111]
Given a list of date ranges (using the defined namedtuple "Range"), combine overlapping date ranges
While adjacent fiscal years do not overlap the desired behavior is to combine them
FY2010 ends on 2010-09-30, FY2011 start on 2010-10-01.
To address this, when comparing ranges 1 day is removed from the start date and 1 day is added to the end date
Then the overlapping ranges must be > 1 instead of > 0
Inspired by Raymond Hettinger [https://stackoverflow.com/a/9044111]
"""
ordered_list = sorted([sorted(t) for t in date_range_list])
saved_range = Range(start=ordered_list[0][0], end=ordered_list[0][1])
Expand Down Expand Up @@ -142,7 +142,7 @@ def total_obligation_queryset(amount_obj, model, filters):

def can_use_month_aggregation(time_period):
"""
time_period is the list of action_date ranges from API
time_period is the list of action_date ranges from API
"""
try:
for v in time_period:
Expand Down Expand Up @@ -173,7 +173,7 @@ def can_use_total_obligation_enum(amount_obj):

def only_action_date_type(time_period):
"""
if a date_type is last_modified_date, don't use the matview this applies to
if a date_type is last_modified_date, don't use the matview this applies to
"""
try:
for v in time_period:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from usaspending_api.accounts.views.federal_accounts_v2 import filter_on
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.search.filters.postgres.psc import PSCCodes
from usaspending_api.search.models import AwardSearchView, UniversalTransactionView
from usaspending_api.search.models import AwardSearchView, TransactionSearch
from usaspending_api.awards.v2.filters.filter_helpers import combine_date_range_queryset, total_obligation_queryset
from usaspending_api.awards.v2.filters.location_filter_geocode import geocode_filter_locations
from usaspending_api.common.exceptions import InvalidParameterException, NotImplementedException
Expand All @@ -26,8 +26,8 @@ def universal_award_matview_filter(filters):
return matview_search_filter(filters, AwardSearchView, for_downloads=True)


def universal_transaction_matview_filter(filters):
return matview_search_filter(filters, UniversalTransactionView, for_downloads=True)
def transaction_search_filter(filters):
return matview_search_filter(filters, TransactionSearch, for_downloads=True)


def matview_search_filter_determine_award_matview_model(filters):
Expand Down
20 changes: 10 additions & 10 deletions usaspending_api/broker/helpers/award_category_helper.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
def award_types(row):
"""
"Award Type" for FPDS transactions
if award <> IDV (`pulled_from` <> 'IDV'): use `contract_award_type`
elif `idv_type` == B &`type_of_idc` is present: use "IDV_B_" + `type_of_idc`
elif `idv_type` == B & ("case" for type_of_idc_description for specific IDC type): use IDV_B_*
else use "IDV_" + `idv_type`
"Award Type" for FPDS transactions
if award <> IDV (`pulled_from` <> 'IDV'): use `contract_award_type`
elif `idv_type` == B &`type_of_idc` is present: use "IDV_B_" + `type_of_idc`
elif `idv_type` == B & ("case" for type_of_idc_description for specific IDC type): use IDV_B_*
else use "IDV_" + `idv_type`
"Award Type Description" for FPDS transactions
if award <> IDV (`pulled_from` <> 'IDV'): use `contract_award_type_desc`
elif `idv_type` == B & `type_of_idc_description` <> null/NAN: use `type_of_idc_description`
elif `idv_type` == B: use "INDEFINITE DELIVERY CONTRACT"
else: use `idv_type_description`
"Award Type Description" for FPDS transactions
if award <> IDV (`pulled_from` <> 'IDV'): use `contract_award_type_desc`
elif `idv_type` == B & `type_of_idc_description` <> null/NAN: use `type_of_idc_description`
elif `idv_type` == B: use "INDEFINITE DELIVERY CONTRACT"
else: use `idv_type_description`
"""
pulled_from = row.get("pulled_from", None)
idv_type = row.get("idv_type", None)
Expand Down
2 changes: 2 additions & 0 deletions usaspending_api/broker/helpers/delete_stale_fabs.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,13 @@ def delete_stale_fabs(ids_to_delete):
if delete_transaction_ids:
fabs = 'DELETE FROM "transaction_fabs" tf WHERE tf."transaction_id" IN ({});'
tn = 'DELETE FROM "transaction_normalized" tn WHERE tn."id" IN ({});'
ts = 'DELETE FROM "transaction_search" ts WHERE ts."transaction_id" IN ({});'
td = "DELETE FROM transaction_delta td WHERE td.transaction_id in ({});"
queries.extend(
[
fabs.format(delete_transaction_str_ids),
tn.format(delete_transaction_str_ids),
ts.format(delete_transaction_str_ids),
td.format(delete_transaction_str_ids),
]
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def get_fabs_transaction_ids(ids, afa_ids, start_datetime, end_datetime):

def read_afa_ids_from_file(afa_id_file_path):
with RetrieveFileFromUri(afa_id_file_path).get_file_object() as f:
return {l.decode("utf-8").rstrip() for l in f if l}
return {line.decode("utf-8").rstrip() for line in f if line}


class Command(BaseCommand):
Expand Down
8 changes: 2 additions & 6 deletions usaspending_api/broker/tests/integration/test_delete_fabs.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,8 @@ def test_delete_fabs_success():

# Award kept despite having one of their associated transactions removed
mommy.make(Award, id=2, latest_transaction_id=2, generated_unique_award_id="TEST_AWARD_2")
mommy.make(
TransactionNormalized, id=2, award_id=2, action_date="2019-01-01", unique_award_key="TEST_AWARD_2",
)
mommy.make(
TransactionNormalized, id=3, award_id=2, action_date="2019-01-02", unique_award_key="TEST_AWARD_2",
)
mommy.make(TransactionNormalized, id=2, award_id=2, action_date="2019-01-01", unique_award_key="TEST_AWARD_2")
mommy.make(TransactionNormalized, id=3, award_id=2, action_date="2019-01-02", unique_award_key="TEST_AWARD_2")
mommy.make(
TransactionFABS, transaction_id=2, published_award_financial_assistance_id=302, unique_award_key="TEST_AWARD_2"
)
Expand Down
14 changes: 7 additions & 7 deletions usaspending_api/common/csv_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,14 @@

def count_rows_in_delimited_file(filename, has_header=True, safe=True, delimiter=","):
"""
Simple and efficient utility function to provide the rows in a valid delimited file
If a header is not present, set head_header parameter to False
Simple and efficient utility function to provide the rows in a valid delimited file
If a header is not present, set head_header parameter to False
Added "safe" mode which will handle any NUL BYTE characters in delimited files
It does increase the function runtime by approx 10%.
Example:
"non-safe" counting ~1 million records in a delimited file takes 9s
using "safe mode", it now takes 10s
Added "safe" mode which will handle any NUL BYTE characters in delimited files
It does increase the function runtime by approx 10%.
Example:
"non-safe" counting ~1 million records in a delimited file takes 9s
using "safe mode", it now takes 10s
"""
with codecs.open(filename, "r") as f:
Expand Down
2 changes: 1 addition & 1 deletion usaspending_api/common/etl/mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def _execute_dml_sql_file(self, file_path: [str, Path], timer_message: Optional[
"""
Read in a SQL file and execute it. Assumes the file's path has already been
determined. If no message is provided, the file named will be logged (if logging
is enabled). """
is enabled)."""

file_path = Path(file_path)
return self._execute_dml_sql(file_path.read_text(), timer_message or file_path.stem)
Expand Down
4 changes: 2 additions & 2 deletions usaspending_api/common/experimental_api_flags.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ def is_experimental_elasticsearch_api(request: Request) -> bool:


def mirror_request_to_elasticsearch(request: Union[HttpRequest, Request]):
""" Duplicate request and send-again against this server, with the ES header attached to mirror
non-elasticsearch load against elasticsearch for load testing
"""Duplicate request and send-again against this server, with the ES header attached to mirror
non-elasticsearch load against elasticsearch for load testing
"""
url = request.build_absolute_uri()
data = json.dumps(request.data)
Expand Down
14 changes: 7 additions & 7 deletions usaspending_api/common/helpers/api_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,12 @@ def alias_response(field_to_alias_dict, results):
def raise_if_award_types_not_valid_subset(award_type_codes, is_subaward=False):
"""Test to ensure the award types are a subset of only one group.
For Awards: contracts, idvs, direct_payments, loans, grants, other assistance
For Sub-Awards: procurement, assistance
For Awards: contracts, idvs, direct_payments, loans, grants, other assistance
For Sub-Awards: procurement, assistance
If the types are not a valid subset:
Raise API exception with a JSON response describing the award type groupings
"""
If the types are not a valid subset:
Raise API exception with a JSON response describing the award type groupings
"""
msg_head = '"message": "\'award_type_codes\' must only contain types from one group.","award_type_groups": '
if is_subaward:
if not subaward_types_are_valid_groups(award_type_codes):
Expand Down Expand Up @@ -75,8 +75,8 @@ def raise_if_award_types_not_valid_subset(award_type_codes, is_subaward=False):
def raise_if_sort_key_not_valid(sort_key, field_list, is_subaward=False):
"""Test to ensure sort key is present for the group of Awards or Sub-Awards
Raise API exception if sort key is not present
"""
Raise API exception if sort key is not present
"""
msg_prefix = ""
if is_subaward:
msg_prefix = "Sub-"
Expand Down
2 changes: 1 addition & 1 deletion usaspending_api/common/helpers/business_logic_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

def cfo_presentation_order(agency_list):
cfo_agencies = sorted(
[a for a in agency_list if a["toptier_code"] in CFO_CGACS], key=lambda a: CFO_CGACS.index(a["toptier_code"]),
[a for a in agency_list if a["toptier_code"] in CFO_CGACS], key=lambda a: CFO_CGACS.index(a["toptier_code"])
)
other_agencies = sorted([a for a in agency_list if a["toptier_code"] not in CFO_CGACS], key=lambda a: a["name"])
return {"cfo_agencies": cfo_agencies, "other_agencies": other_agencies}
Expand Down
Loading

0 comments on commit 67432e7

Please sign in to comment.