Commit 1ff3dbfd authored by Nic's avatar Nic
Browse files

continue working on code robustness

parent 69b7c6ab
......@@ -112,27 +112,51 @@ def load_yaml(*files, clean=True, debug=False):
return specs
def quantify_df(df):
def quantify_df(df, target_units, errors):
"""preprocess a dataframe assuming units strings are on first line"""
df = df.copy()
df.columns = pd.MultiIndex.from_tuples(zip(df.columns, df.iloc[0]))
df = df.iloc[1:] # drop first line which has been merged previously
# units = dict(df.columns.tolist()) # {'id': '', 'distA': 'm',... }
units = {col: u for col, u in df.columns if isinstance(u, str)}
_source_units = {} # {col: u for col, u in df.columns if isinstance(u, str)}
_units_to_col = defaultdict(list)
# -------------------------------------------------------------------------
# split df between numeric / non-numeric columns
df_num = pd.DataFrame(index=df.index)
df_nonnum = pd.DataFrame(index=df.index)
# -------------------------------------------------------------------------
# test each column and column units
for col, col_units in df.columns:
if isinstance(col_units, str):
df_num[(col, col_units)] = pd.to_numeric(df[(col, col_units)])
# found a column with a specified units.
# test specified units for being known by pint
if col_units.strip() != "":
try:
getattr(ureg, col_units)
except AttributeError:
errors[(col, None)].append("undefined units '%s'" % col_units)
df_nonnum[(col, col_units)] = df[(col, col_units)]
continue
# Try to convert the whole column # to numeric:
try:
df_num[(col, col_units)] = pd.to_numeric(df[(col, col_units)])
except ValueError as exc:
errors[(col, "?")] = "cannot convert some values to numeric"
df_nonnum[(col, col_units)] = df[(col, col_units)]
continue
else:
df_nonnum[(col, col_units)] = df[(col, col_units)]
df_num.columns = pd.MultiIndex.from_tuples(df_num.columns)
try:
if col in target_units:
errors[(col, None)].append("no units specified in source file")
# -------------------------------------------------------------------------
# calculate source units
for col, u in df_num.columns:
_source_units[col] = u
_units_to_col[u].append(col)
if len(df_num.columns) > 0:
df_num.columns = pd.MultiIndex.from_tuples(df_num.columns)
if len(df_nonnum.columns) > 0:
df_nonnum.columns = pd.MultiIndex.from_tuples(df_nonnum.columns)
except:
pass
return df_num, df_nonnum, units
return df_num, df_nonnum, _source_units, _units_to_col
# use short units
......@@ -346,50 +370,50 @@ class DataFrameSchematizer:
self.columns_specs[name] = ref
def validate_dataframe(self, df):
"""validate dataframe against self.schema()"""
"""validate dataframe against self.schema():
1. type validation
2. units conversion
3. default values are applied
4. final validation
"""
schema = self.build()
validator = jsonschema.Draft7Validator(schema)
# =====================================================================
# Early validation for typing
# 1. Early validation for typing
# This is usefull since validation should occures at last,
# after units conversion and holes filling.
# If a data has the wrong type, this process would fail before reaching
# validation
# =====================================================================
types_checking = df.copy()
initial_without_urow = df.copy()
if self._is_units:
# remove first row where are described the units
types_checking = types_checking.drop(0)
initial_without_urow = initial_without_urow.drop(0)
# check **types only** before any further DataFrame transformation
# this will catch any type error
early_report = _validate(
json.loads(
json.dumps(types_checking.to_dict(orient="list"), ignore_nan=True)
json.dumps(initial_without_urow.to_dict(orient="list"), ignore_nan=True)
),
validator,
check_types=True,
)
# =====================================================================
# builds mult-header from dataframes if schemas are units-aware
# builds multi-header from dataframes if schemas are units-aware
# =====================================================================
if self._is_units:
# recover target units as specified in the validation schema
_props = schema["properties"]
self._target_units = {k: v for k in _props if (v := _props[k].get("units"))}
df_num, df_nonnum, self._source_units = quantify_df(df)
# split dataframe in two parts: one that will be quantified, the other not
# will be quantified the columns:
# * have a specified unit (source units)
# * are pd.to_numeric compliant
df_num, df_nonnum, self._source_units, u2cols = quantify_df(
df, self._target_units, early_report
)
try:
# insert the correct to workaround this bug:
# https://github.com/hgrecco/pint-pandas/issues/49
df_num = df_num.pint.quantify(level=-1)
except pint.errors.UndefinedUnitError as exc:
colnames = [
colname
for colname, unit in self._source_units.items()
if unit in exc.args
]
for colname in colnames:
early_report[(colname, None)].append(
"undefined units '%s'" % self._source_units[colname]
)
self._is_units = False
df = df[1:]
except Exception as exc:
early_report["uncatched unit error"].extend(list(exc.args))
self._is_units = False
......@@ -399,19 +423,19 @@ class DataFrameSchematizer:
# =====================================================================
if self._is_units:
# at this point, we still have df_num and df_nonnum
for col, units in self._target_units.copy().items():
try:
df_num[col] = df_num[col].pint.to(units)
except KeyError:
early_report[(col, None)].append("no units specified in source file")
# no units specified in source file"
pass
except:
# maybe a string where a number is expected...
# let's investigate it for a better user feedback
df[col] = pd.Series(
types_checking[col].values,
initial_without_urow[col].values,
dtype="pint[ ]",
index=types_checking.index,
index=initial_without_urow.index,
)
self._target_units[col] = "" # .pop(col)
self._source_units[col] = "" # .pop(col)
......@@ -421,7 +445,7 @@ class DataFrameSchematizer:
df = pd.concat((df_num, df_nonnum), axis=1)
# reorder columns as initially
df.columns = [t[0] for t in df.columns.tolist()] # restrict to level 0
df.columns = [c for c in types_checking.columns]
df = df[[c for c in initial_without_urow.columns]]
# =====================================================================
# fill empty values as requested by schema
# =====================================================================
......
......@@ -15,6 +15,7 @@ mass:
test_mass_tonne:
types: number
<<: *mass
mandatory: true
test_mass_g:
types: number
<<: *mass
......
......@@ -168,7 +168,6 @@ def test_DFS_02_validation():
"""test DataFrameSchematizer class validation.
specifically testing enums with default
"""
v = DataFrameSchematizer()
columns_specs = """\
---
CA:
......@@ -196,6 +195,7 @@ CF:
default: 0
units: kg
"""
v = DataFrameSchematizer()
v.add_columns(columns_specs)
df = pd.DataFrame(
{
......@@ -208,30 +208,37 @@ CF:
}
)
df, is_valid, errors = v.validate_dataframe(df)
breakpoint()
assert errors == {}
assert (
pd.testing.assert_frame_equal(
df, pd.DataFrame({"da": {1: 0.05, 2: 0.15}, "db": {1: "aft", 2: "fwd"}})
)
is None
)
assert errors == {
("CD", None): ["no units specified in source file"],
("CF", None): ["undefined units 'lbm'"],
}
# let's correct the previous errors
v = DataFrameSchematizer()
v.add_columns(columns_specs)
df = pd.DataFrame(
{
"da": {0: "cm", 2: 15},
"db": {0: "", 2: "fwd"},
"CA": {0: "cm", 1: 5, 2: 15},
"CB": {0: "", 1: 5.3}, # empty units specified, but that's fine
"CC": {1: 5.3}, # no units specified, but that's fine
"CD": {0: "lbf", 1: 54.3}, # no units specified, that's an issue!
"CE": {1: "fwd"},
"CF": {0: "lb", 1: 2}, # "lbm" doesn't exist
}
)
df, is_valid, errors = v.validate_dataframe(df)
assert errors == {}
breakpoint()
assert (
pd.testing.assert_frame_equal(
df, pd.DataFrame({"da": {1: 0.05, 2: 0.15}, "db": {1: "aft", 2: "fwd"}})
)
is None
exp = pd.DataFrame(
{
"CA": {1: 0.05, 2: 0.15},
"CB": {1: 5.3, 2: 1.0},
"CC": {1: 5.3, 2: 1.0},
"CD": {1: 241.53843370864516, 2: 0.0},
"CE": {1: "fwd", 2: "aft"},
"CF": {1: 0.9071847400000002, 2: 0.0},
}
)
assert pd.testing.assert_frame_equal(df, exp) is None
def test_VData_00():
......@@ -550,10 +557,7 @@ def test_IVData_05_failing_units(datadir):
("test_mass_g", 2): ["None is not of type 'number'"],
("test_mass_lbm", None): ["undefined units 'lbm'"],
},
"volume": {
"general": ["'test_volume_foot3' is a required property"],
("test_volume_foot3", None): ["no units specified in source file"],
},
"volume": {"general": ["'test_volume_foot3' is a required property"]},
}
assert expected_report == ret
......@@ -682,12 +686,15 @@ def test_IVData_07_failing_units(datadir):
rep = data_si.validate()
assert rep == {
"mass": {
"general": ["'test_mass_g' is a required property"],
("test_mass_lb", None): ["unable to convert this column"],
"general": [
"'test_mass_tonne' is a required property",
"'test_mass_g' is a required property",
],
("test_mass_lb", 0): [
"'a' is not valid under any of the given " "schemas",
"'a' is not of type 'number'",
"'a' is not of type 'null'",
],
("test_mass_lb", "?"): "cannot convert some values to numeric",
}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment