Commit 201cc0e2 authored by Nic's avatar Nic
Browse files

add spec; fix reports rownb ambiguity; fix tests

parent aca0d8bc
......@@ -146,7 +146,13 @@ class VDataAutodoc:
makefile = self.target_dir / "Makefile"
return conf.exists() and makefile.exists()
def dump_data(self, skip_tabs=(), drop_columns=(), rename_columns=(), order_columns=("column",)):
def dump_data(
self,
skip_tabs=(),
drop_columns=(),
rename_columns=(),
order_columns=("column",),
):
# =====================================================================
# index.rst call master file `input_data.rst`
# =====================================================================
......@@ -185,7 +191,11 @@ class VDataAutodoc:
filename = tabdir / f"{tab}.rst"
if not filename.exists():
with open(filename, "w") as fh:
fh.write(_write(f"Please edit ``{filename}`` to provide adequate description"))
fh.write(
_write(
f"Please edit ``{filename}`` to provide adequate description"
)
)
# -----------------------------------------------------------------
# create private tab description file `source/tabs/.<tab>.rst`
_filename = tabdir / f".{tab}.rst"
......@@ -223,7 +233,7 @@ class VDataAutodoc:
df["set"] = pd.Series(uniq)
df["set"] = df.set.fillna("")
if mandatory:
df["mandatory"] = pd.Series(mandatory)
df["mandatory"] = pd.Series(mandatory)
df["mandatory"] = df.mandatory.fillna("")
if "type" in df.columns:
df["type"] = df["type"].replace(
......@@ -250,7 +260,9 @@ class VDataAutodoc:
df = df.drop(columns=_drop_columns)
# -------------------------------------------------------------
# rename columns
_rename_columns = {prev: new for prev, new in rename_columns.items() if prev in df}
_rename_columns = {
prev: new for prev, new in rename_columns.items() if prev in df
}
if _rename_columns:
df = df.rename(columns=_rename_columns)
# -------------------------------------------------------------
......@@ -261,10 +273,14 @@ class VDataAutodoc:
)
fh.write(_write(table))
if _uniq:
fh.write(_write("the following sets of columns combination need to be **unique**:"))
fh.write(
_write(
"the following sets of columns combination need to be **unique**:"
)
)
for uniq, cols in _uniq.items():
columns = ", ".join([f"``{c}``" for c in cols])
fh.write(_write(f" * ``{uniq}``: {columns}"))
fh.write(_write(f" * ``{uniq}``: {columns}"))
if __name__ == "__main__":
......
......@@ -297,6 +297,7 @@ class DataFrameSchematizer:
self._source_units = None
self._target_units = None
self.uniqueness_sets = defaultdict(list)
self.xcalling = {}
def build(self):
"""build and return schema"""
......@@ -316,6 +317,14 @@ class DataFrameSchematizer:
"type": "array",
"uniqueItems": True,
}
for colname, (ext_tabname, ext_colname) in self.xcalling.items():
if "xcalls" not in schema:
schema["xcalls"] = {}
schema["xcalls"][colname] = {
"tabname": ext_tabname,
"colname": ext_colname,
}
return schema
def _add_columns_from_json(self, jsontxt):
......@@ -349,6 +358,7 @@ class DataFrameSchematizer:
unique=False,
mandatory=False,
uniqueness_id=None,
must_exist_in=None,
units=None,
**kwargs,
):
......@@ -362,6 +372,8 @@ class DataFrameSchematizer:
types.append("null")
if uniqueness_id:
self.uniqueness_sets[uniqueness_id].append(name)
if must_exist_in:
self.xcalling[name] = tuple(must_exist_in.split("::"))
# ---------------------------------------------------------
if len(types) > 1:
items = {"anyOf": [{"type": typ} for typ in types]}
......@@ -389,7 +401,7 @@ class DataFrameSchematizer:
3. default values are applied
4. final validation
"""
_df_debug = df.copy()
# _df_debug = df.copy()
schema = self.build()
validator = jsonschema.Draft7Validator(schema)
# =====================================================================
......@@ -399,9 +411,14 @@ class DataFrameSchematizer:
# If a data has the wrong type, this process would fail before reaching
# validation
# =====================================================================
# ensure line#0 is first line of data
offset = {True: -1, False: 0}
df = df.reset_index(drop=True)
df.index += offset[self._is_units]
initial_without_urow = df.copy()
if self._is_units:
initial_without_urow = initial_without_urow.drop(0)
initial_without_urow = initial_without_urow.iloc[1:]
# check **types only** before any further DataFrame transformation
# this will catch any type error
early_report = _validate(
......@@ -457,7 +474,9 @@ class DataFrameSchematizer:
k: schema["properties"][k]["items"].get("default")
for k, v in schema["properties"].items()
}
padded_columns = [colname for colname, value in _fillnas.items() if value == "_pad_"]
padded_columns = [
colname for colname, value in _fillnas.items() if value == "_pad_"
]
fillnas = {
k: v for k, v in _fillnas.items() if k in df.columns and v is not None
}
......@@ -466,7 +485,7 @@ class DataFrameSchematizer:
if padded_columns:
df = df.replace({"_pad_": np.nan})
df[padded_columns] = df[padded_columns].fillna(method="pad")
# df = df.fillna(method="pad",
# df = df.fillna(method="pad",
# =====================================================================
# build dummy columns if required by the multiple-columns uniqueness
# =====================================================================
......@@ -648,7 +667,7 @@ class ValidatorMixin:
self._schemas[tabname].add_columns(schema)
def read_schema(self, *filepath):
""" assign a global schema by parsing the given filepath"""
"""assign a global schema by parsing the given filepath"""
if len(filepath) > 1:
return self.read_multiple_yamls_schemas(*filepath)
else:
......@@ -674,19 +693,27 @@ class ValidatorMixin:
def _validate_tab(self, tabname):
"""validate a tab using the provided scheme"""
if tabname not in self._schemas:
return None, True, {}
return self._schemas[tabname].validate_dataframe(self._data[tabname])
return (None, True, {}), {}
schema = self._schemas[tabname]
result = schema.validate_dataframe(self._data[tabname])
return result, schema.xcalling
def validate(self):
def validate(self, row_reporting_offset="data"):
"""
iterate through all tabs and validate eachone
row_reporting_offset can be used to offset rows reporting
"""
# keep initial data before processing them
if not hasattr(self, "_raw_data"):
self._raw_data = {tabname: df.copy() for tabname, df in self._data.items()}
ret = {}
ret = defaultdict(dict)
xcallings = {}
for tabname, df in self._data.copy().items():
df, is_ok, report = self._validate_tab(tabname)
(df, is_ok, report), xcalling = self._validate_tab(tabname)
if tabname in self._schemas:
if xcalling := self._schemas[tabname].xcalling:
xcallings[tabname] = xcalling
if df is None:
# tabname not described in schema; drop it
logging.info('drop tab "%s" which is not described in schema.', tabname)
......@@ -695,7 +722,43 @@ class ValidatorMixin:
self._data[tabname] = df # override with filled (fillna) dataframe
if not is_ok:
ret[tabname] = report
return ret
# ---------------------------------------------------------------------
# check cross-references calls
for src_tabname, targets in xcallings.items():
for src_colname, (target_tabname, target_colname) in targets.items():
target = self._data[target_tabname][target_colname]
src = self._data[src_tabname][src_colname]
missing = src[~src.isin(target)]
report = ret[src_tabname]
for row_nb, _missing_ref in missing.items():
if (src_colname, row_nb) not in ret[src_tabname]:
ret[src_tabname][(src_colname, row_nb)] = []
ret[src_tabname][(src_colname, row_nb)].append(
f"'value `{_missing_ref}` not found in `{target_tabname}::{target_colname}`"
)
# ---------------------------------------------------------------------
# apply offset
if row_reporting_offset:
offset_ret = defaultdict(dict)
for tabname, report in ret.items():
is_units = len(self.getunits(tabname)) > 0
offset = _get_rows_offset(row_reporting_offset, is_units)
for index, _ in report.items():
try:
(colname, rownb) = index
if rownb in (None, "?"):
raise ValueError("switch...")
except:
offset_ret[tabname][index] = report[
index
] # "general" section, without row clue
else:
offset_ret[tabname][(colname, rownb + offset)] = report[
(colname, rownb)
]
ret = offset_ret
return dict(ret)
def dump_template(self):
"""return list of columns ready to be dumped as XLSX template"""
......@@ -705,6 +768,23 @@ class ValidatorMixin:
return dic
def _get_rows_offset(row_reporting_offset, is_units):
"""by default (row_reporting_offset = 0),
row#0 means row#:
| Data 0-based |Data 1-based | Spreadsheet |
with units | 0 |1 | 3 |
witthout units | 0 |1 | 2 |
"""
if isinstance(row_reporting_offset, int):
offset = row_reporting_offset
elif row_reporting_offset == "data":
offset = 1 # equivalent to row_reporting_offset = 1
elif row_reporting_offset == "spreadsheet":
offset = 3 if is_units else 2
return offset
if __name__ == "__main__":
import doctest
......
---
beam_def:
id:
types: integer
unique: true
mandatory: true
E:
types: number
mandatory: true
units: MPa
nu:
types: number
mandatory: true
beams:
beam_id:
types: integer
mandatory: true
must_exist_in: beam_def::id
length:
types: number
mandatory: true
units: m
color_id:
types: integer
must_exist_in: colors::id
mandatory: true
colors:
id:
types: integer
unique: true
mandatory: true
name:
types: string
mandatory: true
......@@ -149,18 +149,17 @@ dh:
}
assert errors == expected_errors
# check that values have been correctly filled
expected_df = pd.DataFrame(
{
"da": {1: None},
"db": {1: np.nan},
"dc": {1: 1},
"dd": {1: 1},
"de": {1: 2},
"df": {1: 2},
"dg": {1: 2},
"dh": {1: 2},
}
)
expected_data = {
"da": {0: None},
"db": {0: np.nan},
"dc": {0: 1},
"dd": {0: 1},
"de": {0: 2},
"df": {0: 2},
"dg": {0: 2},
"dh": {0: 2},
}
expected_df = pd.DataFrame(expected_data)
assert pd.testing.assert_frame_equal(df, expected_df, check_dtype=False) is None
......@@ -230,12 +229,12 @@ CF:
assert errors == {}
exp = pd.DataFrame(
{
"CA": {1: 0.05, 2: 0.15},
"CB": {1: 5.3, 2: 1.0},
"CC": {1: 5.3, 2: 1.0},
"CD": {1: 241.53843370864516, 2: 0.0},
"CE": {1: "fwd", 2: "aft"},
"CF": {1: 0.9071847400000002, 2: 0.0},
"CA": {0: 0.05, 1: 0.15},
"CB": {0: 5.3, 1: 1.0},
"CC": {0: 5.3, 1: 1.0},
"CD": {0: 241.53843370864516, 1: 0.0},
"CE": {0: "fwd", 1: "aft"},
"CF": {0: 0.9071847400000002, 1: 0.0},
}
)
assert pd.testing.assert_frame_equal(df, exp) is None
......@@ -291,9 +290,10 @@ def test_VData_00():
("life_nb", 1): ["'hg' is not of type 'integer'"],
("life_nb", 2): ["15 is greater than the maximum of 4"],
}
df, is_valid, errors = data._validate_tab("test")
(df, is_valid, errors), xcalling = data._validate_tab("test")
assert is_valid is False
assert errors == expected_report
assert xcalling == {}
# =============================================================================
......@@ -323,15 +323,10 @@ def test_IVData_00(datadir):
data.read_schema(os.path.join(indir, "test_00.schema.yaml"))
for tab in ("names", "cars", "empty"):
print('checking "%s"' % tab, end="... ")
df, is_ok, errors = data._validate_tab(tab)
try:
assert is_ok is True
except:
print("OUPS!")
pp(errors)
else:
print("OK")
(df, is_ok, errors), xcalling = data._validate_tab(tab)
assert is_ok is True
assert errors == {}
assert xcalling == {}
# ------------------------------------------------------------------------
# export and reimport to/from various formats
for extension in (".cfg", ".xlsx", ".ini"):
......@@ -347,7 +342,7 @@ def test_IVData_00(datadir):
data_new.read_schema(os.path.join(indir, "test_00.schema.yaml"))
for tab in data._data.keys():
print('checking "%s"' % tab, end="... ")
df, is_ok, errors = data_new._validate_tab(tab)
(df, is_ok, errors), xcalling = data_new._validate_tab(tab)
try:
assert is_ok is True
except:
......@@ -371,9 +366,20 @@ def test_IVData_02(datadir):
data = IVData()
data.read_excel(os.path.join(indir, "test_00.xlsx"))
data.read_schema(os.path.join(indir, "test_00.schema2.yaml"))
# default row reporting as "data"
ret = data.validate()
assert ret == {
"cars": {("Year", 2): ["None is not of type 'integer'"]},
"cars": {("Year", 3): ["None is not of type 'integer'"]},
"names": {"general": ["'name' is a required property"]},
}
ret = data.validate(10)
assert ret == {
"cars": {("Year", 12): ["None is not of type 'integer'"]},
"names": {"general": ["'name' is a required property"]},
}
ret = data.validate("spreadsheet")
assert ret == {
"cars": {("Year", 4): ["None is not of type 'integer'"]},
"names": {"general": ["'name' is a required property"]},
}
......@@ -384,13 +390,13 @@ def test_IVData_03(datadir):
data = IVData()
data.read_excel(os.path.join(indir, "test_02.xlsx"))
data.read_schema(os.path.join(indir, "test_02.schema.yaml"))
ret = data.validate()
ret = data.validate("data")
print(ret) # TODO: remove me!
assert len(ret) == 2
expected_errors = {
"names": {"general": ["'life_nb' is a required property"]},
"french_cars": {
("brand", 2): [
("brand", 3): [
"'Citroën' is not one of ['Peugeot', 'Toyota', 'Ford', 'Renault']"
]
},
......@@ -440,16 +446,17 @@ def test_IVData_04(datadir):
"type": "object",
}
assert data._schemas["geom"].build() == expected_schema
ret = data.validate()
ret = data.validate("spreadsheet")
expected_geom = pd.DataFrame(
{
"id": {1: 1.0, 2: 2.0},
"distA": {1: 0.0010000000000000005, 2: 0.002000000000000001},
"distB": {1: 14.3, 2: 15.0},
"Volume": {1: 1.5064562386943998, 2: 0.8183568665087998},
"id": {0: 1.0, 1: 2.0},
"distA": {0: 0.0010000000000000005, 1: 0.002000000000000001},
"distB": {0: 14.3, 1: 15.0},
"Volume": {0: 1.5064562386943998, 1: 0.8183568665087998},
}
)
assert pd.testing.assert_frame_equal(data._data["geom"], expected_geom) is None
actual_geom = data._data["geom"]
assert pd.testing.assert_frame_equal(actual_geom, expected_geom) is None
def test_IVData_05_failing_units(datadir):
......@@ -552,10 +559,10 @@ def test_IVData_05_failing_units(datadir):
}
assert data._schemas["volume"].build() == expected_schema
ret = data.validate()
ret = data.validate("data")
expected_report = {
"mass": {
("test_mass_g", 2): ["None is not of type 'number'"],
("test_mass_g", 3): ["None is not of type 'number'"],
("test_mass_lbm", None): ["undefined units 'lbm'"],
},
"volume": {"general": ["'test_volume_foot3' is a required property"]},
......@@ -696,7 +703,7 @@ def test_IVData_07_failing_units(datadir):
"'test_mass_tonne' is a required property",
"'test_mass_g' is a required property",
],
("test_mass_lb", 0): [
("test_mass_lb", 1): [
"'a' is not valid under any of the given " "schemas",
"'a' is not of type 'number'",
"'a' is not of type 'null'",
......@@ -734,7 +741,7 @@ def test_IVData_08_multi_cols_uniqueness(datadir):
df = data_si.get("WithPadding_ok")
expected_df = pd.DataFrame(
{
1: {
0: {
"another": 1.0,
"block_label": "RECARO1",
"block_ref_point_id": 2.0,
......@@ -742,7 +749,7 @@ def test_IVData_08_multi_cols_uniqueness(datadir):
"rail_tag": "RBL11",
"x_loc": 3500.0,
},
2: {
1: {
"another": 2.0,
"block_label": "RECARO1",
"block_ref_point_id": 2.0,
......@@ -750,7 +757,7 @@ def test_IVData_08_multi_cols_uniqueness(datadir):
"rail_tag": "RBL11",
"x_loc": 8500.0,
},
3: {
2: {
"another": 3.0,
"block_label": "s1",
"block_ref_point_id": 2.0,
......@@ -758,7 +765,7 @@ def test_IVData_08_multi_cols_uniqueness(datadir):
"rail_tag": "RBL12",
"x_loc": 3800.0,
},
4: {
3: {
"another": 4.0,
"block_label": "s2",
"block_ref_point_id": 2.0,
......@@ -766,7 +773,7 @@ def test_IVData_08_multi_cols_uniqueness(datadir):
"rail_tag": "RBL12",
"x_loc": 3800.0,
},
5: {
4: {
"another": 5.0,
"block_label": "s1",
"block_ref_point_id": 2.0,
......@@ -774,7 +781,7 @@ def test_IVData_08_multi_cols_uniqueness(datadir):
"rail_tag": "RBL12",
"x_loc": 8500.0,
},
6: {
5: {
"another": 6.0,
"block_label": "s1",
"block_ref_point_id": 2.0,
......@@ -786,3 +793,27 @@ def test_IVData_08_multi_cols_uniqueness(datadir):
).T
expected_df = expected_df[[c for c in df]]
assert pd.testing.assert_frame_equal(df, expected_df, check_dtype=False) is None
def test_IVData_09_must_exist_in(datadir):
"""check must exist req"""
indir, outdir = datadir
data = IVData()
data.read_excel(os.path.join(indir, "test_06_must_exist_in.xlsx"))
data.read_schema(os.path.join(indir, "test_06_must_exist_in.yaml"))
ret = data.validate("spreadsheet")
expected_errors = {
"beams": {
("beam_id", 5): ["'value `6.0` not found in `beam_def::id`"],
("color_id", 7): [
"'a' is not of type 'integer'",
"'value `a` not found in `colors::id`",
],
},
"colors": {
("id", 4): ["'tata' is not of type 'integer'"],
("name", 4): ["5 is not of type 'string'"],
},
}
assert ret == expected_errors
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment