Commit 8e425e88 authored by Nic's avatar Nic
Browse files

add multi-columns uniqueness validation

parent f163eebb
......@@ -93,7 +93,10 @@ def read_configobj(fpath, sheet_name=None, row_prefix="sniff"):
config = ConfigObj(fpath, indent_type=" ", unrepr=True, write_empty_values=True)
# transform sections to tabs and data to columns...
if row_prefix == "sniff":
row_prefix = _sniff_row_prefix(config[config.sections[0]].sections[0])
try:
row_prefix = _sniff_row_prefix(config[config.sections[0]].sections[0])
except IndexError:
breakpoint()
else:
row_prefix = ROW_PREFIX
for tabname, data in config.items():
......
......@@ -6,7 +6,7 @@
import logging
import os
import re
from collections import defaultdict
from collections import defaultdict, Counter
from io import StringIO
from pprint import pprint
......@@ -20,6 +20,7 @@ import simplejson as json
import yaml
ureg = pint.UnitRegistry()
UNIQUENESS_SEP = " & "
def yamlcat(*files):
......@@ -295,6 +296,7 @@ class DataFrameSchematizer:
self._is_units = False
self._source_units = None
self._target_units = None
self.uniqueness_sets = defaultdict(list)
def build(self):
"""build and return schema"""
......@@ -307,6 +309,13 @@ class DataFrameSchematizer:
for colname, desc in self.columns_specs.items():
schema["properties"][colname] = desc
schema["required"] = self.required
for uniqueness_tag, columns in self.uniqueness_sets.items():
dummy_col = UNIQUENESS_SEP.join(columns)
schema["properties"][dummy_col] = {
"items": {"type": "string"},
"type": "array",
"uniqueItems": True,
}
return schema
def _add_columns_from_json(self, jsontxt):
......@@ -339,6 +348,7 @@ class DataFrameSchematizer:
types=("integer",),
unique=False,
mandatory=False,
uniqueness_id=None,
units=None,
**kwargs,
):
......@@ -350,6 +360,8 @@ class DataFrameSchematizer:
self.required.append(name)
else:
types.append("null")
if uniqueness_id:
self.uniqueness_sets[uniqueness_id].append(name)
# ---------------------------------------------------------
if len(types) > 1:
items = {"anyOf": [{"type": typ} for typ in types]}
......@@ -444,6 +456,16 @@ class DataFrameSchematizer:
if fillnas:
df = df.fillna(value=fillnas, downcast="infer")
# =====================================================================
# build dummy columns if required by the multiple-columns uniqueness
# =====================================================================
dummies = []
for uniqueness_tag, columns in self.uniqueness_sets.items():
_df = df[columns].astype(str)
dummy_col = UNIQUENESS_SEP.join(columns)
dummies.append(dummy_col) # to delete them later on
df[dummy_col] = _df.apply(UNIQUENESS_SEP.join, axis=1)
# =====================================================================
# second validation
# =====================================================================
# df -> dict -> json -> dict to convert NaN to None
......@@ -453,6 +475,8 @@ class DataFrameSchematizer:
check_types=False,
# report=early_report,
)
if dummies:
df.drop(columns=dummies, inplace=True)
report = {**early_report, **report}
return df, len(report) == 0, dict(report)
......@@ -487,13 +511,23 @@ def _validate(document, validator, check_types=None, report=None):
# want typing-related issues
continue
try:
# generic regular validation catcher
col, row, *rows = error.absolute_path
except ValueError:
report["general"].append(error.message)
else:
if error.message not in report[(col, row)]:
report[(col, row)].append(error.message)
report[(col, row)].extend([e.message for e in error.context])
if error.absolute_schema_path[-1] == "uniqueItems":
non_uniques = {k for k, v in Counter(error.instance).items() if v>1}
col = error.absolute_schema_path[-2]
row = ("?")
if len(non_uniques) > 1:
error.message = "values %s are not unique" % non_uniques
else:
error.message = "value %s is not unique" % non_uniques
else:
report["general"].append(error.message)
continue
if error.message not in report[(col, row)]:
report[(col, row)].append(error.message)
report[(col, row)].extend([e.message for e in error.context])
return report
......
---
asm: &asm
rail_tag:
types: string
unique: false
uniqueness_id: toto
mandatory: true
x_loc:
<<: *length
mandatory: true
uniqueness_id: toto
block_label:
types: string
unique: false
mandatory: true
uniqueness_id: toto
block_ref_point_id:
mandatory: true
another:
unique: true
asm_ok:
<<: *asm
asm_failing:
<<: *asm
[asm_ok]
[[_row_0]]
rail_tag = ''
x_loc = 'mm'
block_label = ''
block_ref_point_id = ''
another = ''
[[_row_1]]
rail_tag = 'RBL11'
x_loc = 3500
block_label = 'RECARO1'
block_ref_point_id = 2.0
another = 1.0
[[_row_2]]
rail_tag = 'RBL11'
x_loc = 8500
block_label = 'RECARO1'
block_ref_point_id = 2.0
another = 2.0
[asm_failing]
[[_row_0]]
rail_tag = ''
x_loc = 'mm'
block_label = ''
block_ref_point_id = ''
another = ''
[[_row_1]]
rail_tag = 'RBL11'
x_loc = 3500
block_label = 'RECARO1'
block_ref_point_id = 2.0
another = 1.0
[[_row_2]]
rail_tag = 'RBL11'
x_loc = 3500
block_label = 'RECARO1'
block_ref_point_id = 2.0
another = 1.0
[[_row_3]]
rail_tag = 'RBL11'
x_loc = 8500
block_label = 'RECARO1'
block_ref_point_id = 3.0
another = 2.0
......@@ -273,7 +273,7 @@ def test_VData_00():
# create dummy data
data._data["test"] = pd.DataFrame(
{
"id": {7: 0, 1: 1, 2: 5},
"id": {7: 0, 1: 1, 2: 1},
"name": {7: "Doe", 1: "Fante", 2: "Mercury"},
"firstname": {7: "John", 2: "Freddy", 1: "Richard"},
"age": {7: "42", 1: 22},
......@@ -286,6 +286,7 @@ def test_VData_00():
"'42' is not of type 'integer'",
"'42' is not of type 'null'",
],
("id", "?"): ["value {1} is not unique"],
("life_nb", 0): ["5 is greater than the maximum of 4"],
("life_nb", 1): ["'hg' is not of type 'integer'"],
("life_nb", 2): ["15 is greater than the maximum of 4"],
......@@ -698,3 +699,25 @@ def test_IVData_07_failing_units(datadir):
("test_mass_lb", "?"): "cannot convert some values to numeric",
}
}
def test_IVData_08_multi_cols_uniqueness(datadir):
"""check units"""
indir, outdir = datadir
data_si = IVData()
data_si.read(os.path.join(indir, "test_06_multi-columns_uniqueness.ini"))
# -------------------------------------------------------------------------
# read and check schema
data_si.read_schema(
os.path.join(indir, "test_05_SI.yaml"),
os.path.join(indir, "test_06_data.yaml"),
)
rep = data_si.validate()
assert rep == {
"asm_failing": {
("another", "?"): ["value {1.0} is not unique"],
("rail_tag & x_loc & block_label", "?"): [
"value {'RBL11 & 3500 & RECARO1'} is not unique"
],
}
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment