Commit e3fd4173 authored by Nic's avatar Nic
Browse files

[validation] early-report type-checking issues

parent 59a34371
......@@ -323,7 +323,31 @@ class DataFrameSchematizer:
def validate_dataframe(self, df):
"""validate dataframe against self.schema()"""
schema = self.build()
intermediate_report = defaultdict(list) # what is not raised by jsonschema
validator = jsonschema.Draft7Validator(schema)
# =====================================================================
# Early validation for typing
# This is usefull since validation should occures at last,
# after units conversion and holes filling.
# If a data has the wrong type, this process would fail before reaching
# validation
# =====================================================================
types_checking = df.copy()
if self._is_units:
# remove first row where are described the units
types_checking = types_checking.drop(0)
early_report = _validate(
json.loads(
json.dumps(types_checking.to_dict(orient="list"), ignore_nan=True)
),
validator,
check_types=True,
)
# if len(early_report) > 0:
# # we probably have false-positive here, since an empty cell may
# # be filled later on...
# if self._is_units:
# self._is_units = False
# df = types_checking
# =====================================================================
# builds mult-header from dataframes if schemas are units-aware
# =====================================================================
......@@ -360,31 +384,19 @@ class DataFrameSchematizer:
if unit in exc.args
]
for colname in colnames:
intermediate_report[(colname, "*")].append(
early_report[(colname, "*")].append(
"undefined units '%s'" % self._source_units[colname]
)
df = df[1:]
except Exception as exc:
self._is_units = False
intermediate_report["uncatched unit error"].extend(list(exc.args))
early_report["uncatched unit error"].extend(list(exc.args))
else:
# -------------------------------------------------------------
# everything was fine, commit temporary `_df` to `df`:
df = _df
del _df
# =====================================================================
# first: fill empty values as requested by schema
# =====================================================================
_fillnas = {
k: schema["properties"][k]["items"].get("default")
for k, v in schema["properties"].items()
}
fillnas = {
k: v for k, v in _fillnas.items() if k in df.columns and v is not None
}
if fillnas:
df = df.fillna(value=fillnas, downcast="infer")
# =====================================================================
# convert read units to schema expected units (if required)
# =====================================================================
if self._is_units:
......@@ -400,28 +412,77 @@ class DataFrameSchematizer:
except KeyError:
pass
except:
# maybe a string where a number is expected...
# let's investigate it for a better user feedback
breakpoint()
df = df.pint.dequantify()
df.columns = [t[0] for t in df.columns.tolist()] # restrict to level 0
# =====================================================================
# second: validate
# fill empty values as requested by schema
# =====================================================================
_fillnas = {
k: schema["properties"][k]["items"].get("default")
for k, v in schema["properties"].items()
}
fillnas = {
k: v for k, v in _fillnas.items() if k in df.columns and v is not None
}
if fillnas:
try:
df = df.fillna(value=fillnas, downcast="infer")
except:
breakpoint()
# =====================================================================
# second validation
# =====================================================================
validator = jsonschema.Draft7Validator(schema)
# df -> dict -> json -> dict to convert NaN to None
document = json.loads(json.dumps(df.to_dict(orient="list"), ignore_nan=True))
report = _validate(
json.loads(json.dumps(df.to_dict(orient="list"), ignore_nan=True)),
validator=validator,
check_types=None,
# report=early_report,
)
# =====================================================================
# it may happen that first validation has raised false-positive
# example: NaN for an integer-expected column, with defaulting
# occurring in-between
# =====================================================================
for k, errors in early_report.items():
filtered_errors = [e for e in errors if "is not of type" not in e]
if filtered_errors:
report[k].extend(filtered_errors)
return df, len(report) == 0, dict(report)
def _validate(document, validator, check_types=None, report=None):
"""if check_types is:
* None (default) report everythong
* True: will only report typing
* False: will NOT report typing
"""
type_checking_clue = "is not of type"
if report is None:
report = defaultdict(list)
for error in validator.iter_errors(document):
try:
col, row, *rows = error.absolute_path
except ValueError:
report["general"].append(error.message)
else:
for error in validator.iter_errors(document):
if type_checking_clue in error.message:
# if error reports a type checking issue, we skip in case
# `check_types` is False
if check_types is False:
continue
elif check_types is True:
# otherwise, for non-typing related issue, we skip if we **only**
# want typing-related issues
continue
try:
col, row, *rows = error.absolute_path
except ValueError:
report["general"].append(error.message)
else:
if error.message not in report[(col, row)]:
report[(col, row)].append(error.message)
report[(col, row)].extend([e.message for e in error.context])
report = {**dict(report), **intermediate_report}
return df, len(report) == 0, report
return report
class ValidatorMixin:
......
......@@ -247,7 +247,7 @@ def test_IVData_00(datadir):
assert is_ok is True
except:
print("OUPS!")
breakpoint()
pp(errors)
else:
print("OK")
assert errors == {}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment