Commit ce7d8057 authored by Nic's avatar Nic
Browse files

improve autodoc

parent 03018925
......@@ -23,10 +23,13 @@ HEADERS = ["=", "-", '"', "'", "~"]
# =============================================================================
# a few RST helpers
# =============================================================================
def _write(txt="", indent=0, header=None):
def _write(txt="", indent=0, header=None, label=True):
_txt = txt
if header:
txt = "\n%s" % txt
txt += "\n" + len(txt) * HEADERS[header - 1] + "\n"
if label:
txt = "\n" + _make_label(_txt) + txt
# indenting
txt = textwrap.indent(txt, prefix=INDENT * indent, predicate=lambda line: True)
......@@ -66,6 +69,28 @@ def _include(filename, relative_to=False, relative_prefix=None, literal=False):
return f"\n.. {include}:: {filename}\n\n"
# =============================================================================
# slugification and label forger
# =============================================================================
ROLES = re.compile(":\w+:")
DUPLICATED = re.compile("(_){2,}")
REMAINS = re.compile("\W")
def _slugify(text):
text = ROLES.sub("", text)
text = REMAINS.sub("_", text)
text = DUPLICATED.sub("_", text)
return text.strip("_").strip("=").lower()
def _make_label(text, prefix=""):
text = _slugify(text)
if prefix:
return f"\n.. _{prefix}_{text}:\n"
return f"\n.. _{text}:\n"
def _directive(name="", arg=None, fields=None, content=None, indent=0):
"""
:param name: the directive itself to use
......@@ -188,21 +213,58 @@ class VDataAutodoc:
# =====================================================================
# updating expected mastertoctree with head and tail chapters
# =====================================================================
mastertoctree = ["head.rst"] + kwargs.pop("mastertoctree") + ["tail.rst"]
mastertoctree = (
["head.rst"] + kwargs.pop("mastertoctree") + ["tail.rst", "glossary.rst"]
)
mastertoctree = _indent("\n".join(mastertoctree), 1)
cmd += f"-d mastertoctree='{mastertoctree}' "
cmd += " ".join([f"-d {k}={v}" for k, v in kwargs.items()])
print(40 * "*")
print(cmd)
print(40 * "*")
args = shlex.split(cmd)
ret = sp.run(args)
sp.run(shlex.split(cmd))
self.src_dir = self.target_dir / "source"
# ---------------------------------------------------------------------
# create head and tails files
for chapter in ("head.rst", "tail.rst"):
path = self.src_dir / Path(chapter)
_new_public_file(path, titles=(("Change Me", 1),))
title = chapter.split(".")[0].title() + " Main Title: Change me!"
_new_public_file(path, titles=((f"{title}", 1),))
with open(path, "a") as fh:
fh.write("\nExemple for a link to a :term:`environment`")
with open(self.src_dir / "head.rst", "a") as fh:
fh.write(_write("Fields Naming Conventions", header=2))
p = "Each sheet Columns (or Fields) will be presented as follows:\n\n"
p += "* :gray:`<SHEET>`-:orange:`<field>`: Mandatory field\n"
p += "* :gray:`<SHEET>`-:green:`<field>`: Padded field\n"
p += "* :gray:`<SHEET>`-<field>: Optional field\n"
p += "\nWhere ``<SHEET>`` is the tab featuring the field, and ``<field>`` is the field name."
fh.write(_write(p))
# create glossary
glossary_file = self.src_dir / "glossary.rst"
_new_public_file(glossary_file, titles=(("Glossary", 1),))
with open(glossary_file, "a") as fh:
# fh.write((
# ".. Glossary\n"
# " (cf. https://www.sphinx-doc.org/en/master/usage"
# "/restructuredtext/directives.html#glossary\n"))
fh.write(
"""
.. glossary::
:sorted:
root directory
source directory
The directory which, including its subdirectories, contains all
source files for one Sphinx project.
environment
A structure where information about all documents under the root is
saved, and used for cross-referencing. The environment is pickled
after the parsing stage, so that successive runs only need to read
and parse new and changed documents.
"""
)
def exists(self):
if not self.target_dir.exists():
......@@ -221,11 +283,13 @@ class VDataAutodoc:
tabs_chapters=((None, "*"),),
):
processed_tabs = set()
for tab_chapter, tabnames in tabs_chapters:
slugged = tab_chapter.lower().replace(" ", "_") + ".rst"
master_file = self.src_dir / f"{slugged}"
_master_file = self.src_dir / f".{slugged}"
_master_file.touch()
slugged = tab_chapter.lower().replace(" ", "_")
master_file = self.src_dir / f"{slugged}.rst"
_master_file = self.src_dir / f".{slugged}.inc"
with open(_master_file, "w") as fh:
pass
# ---------------------------------------------------------------------
# Public master file is called "input_data.rst" and is created only
# if it doesn't exist
......@@ -246,17 +310,8 @@ class VDataAutodoc:
root_fielddir = root_tabdir / "fields"
root_tabdir.mkdir(exist_ok=True)
root_fielddir.mkdir(exist_ok=True)
processed_tabs = set()
# -----------------------------------------------------------------
tab_header_level = 3
# update master field with chapter
with open(_master_file, "a") as fh:
fh.write(_write("Conventions", header=2))
p = "Each sheet Columns (or Fields) can be either:\n"
p += "* :orange:`<field name>`: Mandatory field\n"
p += "* :green:`<field name>`: Padded field\n"
p += "* <**field name**>: Optional field\n"
fh.write(_write(p))
tab_header_level = 2
if tabnames == "*":
# retrieve remaining tabs
tabnames = set(self.schemas.keys()) - processed_tabs
......@@ -269,7 +324,7 @@ class VDataAutodoc:
schema = self.schemas[tab]
# -----------------------------------------------------------------
# create public tab description file `source/tabs/<tab>.rst`
filename = root_tabdir / f"{tab}.rst"
filename = root_tabdir / f"{tab}.inc"
if not filename.exists():
_new_public_file(filename)
# -----------------------------------------------------------------
......@@ -278,66 +333,61 @@ class VDataAutodoc:
fh.write(_write(f'Sheet "``{tab}``"', header=tab_header_level))
# include public tab description file `source/tabs/<tab>.rst`
fh.write(_include(filename, relative_to=self.src_dir))
# -------------------------------------------------------------
# fh.write(
# _write(
# f'"``{tab}``" Columns Description',
# header=tab_header_level + 1,
# )
# )
# =================================================================
# columns description
# =================================================================
fh.write(_write(".. glossary::\n\n"))
columns = {k: v["items"] for k, v in schema.columns_specs.items()}
for colname, colspecs in columns.items():
col_status = field_status(colname, schema)
# add label
if col_status == "padded":
fh.write(_write(f"\n{tab}/:green:`{colname}`"))
_col_status = ":green:`padded`"
definition_key = f":green:`{colname}`"
elif col_status == "mandatory":
fh.write(_write(f"\n{tab}/:orange:`{colname}`"))
_col_status = ":orange:`mandatory`"
definition_key = f":orange:`{colname}`"
else:
fh.write(_write(f"\n{tab}/**{colname}**"))
_col_status = ""
definition_key = f"{colname}"
definition_key = f"\n:gray:`{tab.upper()}`-" + definition_key
fh.write(_write(definition_key, 1))
if "anyOf" in colspecs:
anyOf = [d["type"] for d in colspecs.pop("anyOf")]
colspecs["type"] = ", ".join(anyOf)
avoid = ("example",)
if "default" in colspecs and colspecs["default"] == "":
colspecs["default"] = '""'
# if colname == "repeat_discard_nth":
# breakpoint()
avoid = ("example",)
colspecs = {k: v for k, v in colspecs.items() if k not in avoid}
definition = (
" | ".join([f"{k}: ``{v}``" for k, v in colspecs.items()])
+ "\n"
)
field_public_file = root_fielddir / f"{tab}::{colname}.rst"
colspecs["status"] = _col_status
field_public_file = root_fielddir / f"{tab}::{colname}.inc"
_new_public_file(field_public_file, clue_message=False)
definition_include = _include(
field_public_file,
relative_to=self.src_dir,
)
fh.write(_write(definition + definition_include, indent=1))
fh.write(_write(definition + definition_include, indent=2))
columns[colname] = colspecs.copy()
# ---------------------------------------------------------
# columns description summary
df = pd.DataFrame.from_dict(columns, orient="index")
# -----------------------------------------------------------------
# make anyOf more readable
if "anyOf" in df:
anyOf = (
df.anyOf.explode().dropna().apply(lambda row: row["type"])
)
anyOf = anyOf.replace("null", np.NaN).dropna()
anyOf = anyOf.groupby(level=0).apply(lambda x: "/".join(x))
df.loc[anyOf.index, "type"] = anyOf
df.drop(columns="anyOf", inplace=True)
df = df.fillna("")
df.index.names = ["column"]
if "type" in df.columns:
df["type"] = df["type"].replace(
{
"number": "``float``",
"integer": "``int``",
"string": "``str``",
}
)
# -------------------------------------------------------------
# process special columns
mandatory = dict(
zip(schema.required, [True] * len(schema.required))
)
_uniq = dict(schema.uniqueness_sets)
if _uniq:
_uniq = {f"Set ({id})": v for id, v in _uniq.items()}
......@@ -347,17 +397,6 @@ class VDataAutodoc:
uniq[col] = id
df["set"] = pd.Series(uniq)
df["set"] = df.set.fillna("")
if mandatory:
df["mandatory"] = pd.Series(mandatory)
df["mandatory"] = df.mandatory.fillna("")
if "type" in df.columns:
df["type"] = df["type"].replace(
{
"number": "``float``",
"integer": "``int``",
"string": "``str``",
}
)
# -------------------------------------------------------------
# bold index
df.reset_index(inplace=True)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment