Commit 20ff2749 authored by Nic's avatar Nic
Browse files

improve autodoc

parent 093eec45
......@@ -7,6 +7,7 @@ import subprocess as sp
from pathlib import Path
import string
import re
import textwrap
import numpy as np
import pandas as pd
......@@ -15,7 +16,7 @@ import tabulate
from gridsource import Data as IVData
from gridsource.validation import load_yaml
INDENT = " "
INDENT = " "
HEADERS = ["=", "-", '"', "'", "~"]
......@@ -26,7 +27,10 @@ def _write(txt="", indent=0, header=None):
if header:
txt = "\n%s" % txt
txt += "\n" + len(txt) * HEADERS[header - 1] + "\n"
return INDENT * indent + txt + "\n"
# indenting
txt = textwrap.indent(txt, prefix=INDENT * indent, predicate=lambda line: True)
return txt + "\n"
def _indent(txt, indent):
......@@ -50,6 +54,8 @@ def _comment(txt):
def _include(filename, relative_to=False):
if isinstance(filename, str):
filename = Path(filename)
if relative_to:
filename = filename.relative_to(relative_to)
return f"\n.. include:: {filename}\n\n"
......@@ -94,6 +100,36 @@ def _directive(name="", arg=None, fields=None, content=None, indent=0):
return "\n".join(o)
def _new_public_file(
filename,
titles=(),
includes=(),
):
"""create a file at `filename` path structured as follows:
titles[0][0], leveled at level titlels[0][1]
titles[1][0], leveled at level titlels[2][1]
...
Please edit {filename} to provide adequate description.
includes[0]
includes[1]
...
"""
if not filename.exists():
with open(filename, "w") as fh:
# write titles
for title, title_level in titles:
fh.write(_write(title, header=title_level))
fh.write(_write(f"Please edit Me (``{filename}``)"))
for include_target, relative_to in includes:
fh.write(_include(include_target, relative_to=relative_to))
class VDataAutodoc:
"""Document a set of columns (aka `tab`)"""
......@@ -115,7 +151,9 @@ class VDataAutodoc:
else:
self.src_dir = self.target_dir / "source"
def create(self, project_name, author, version, lang="en", exist_ok=False):
def create(
self, project_name, author, version, lang="en", exist_ok=False, templatedir=None
):
if self.target_dir.exists():
if not exist_ok:
raise FileExistsError(f"target {self.target_dir} exists")
......@@ -123,7 +161,12 @@ class VDataAutodoc:
shutil.rmtree(self.target_dir)
# -----------------------------------------------------------------
# create project structure
cmd = f"sphinx-quickstart --sep -p {project_name} -a {author} -v {version} -l {lang} {self.target_dir} -q"
cmd = f"sphinx-quickstart --sep -p {project_name} -a {author} -v {version} -l {lang} {self.target_dir} -q --ext-mathjax"
if templatedir:
cmd += f" --templatedir {templatedir}"
print(40 * "*")
print(cmd)
print(40 * "*")
args = shlex.split(cmd)
ret = sp.run(args)
index_content = (
......@@ -131,7 +174,7 @@ class VDataAutodoc:
"===========================================\n"
"\n"
".. toctree::\n"
" :maxdepth: 2\n"
" :maxdepth: 3\n"
" :caption: Contents:\n"
"\n"
" input_data.rst\n"
......@@ -154,6 +197,7 @@ class VDataAutodoc:
drop_columns=(),
rename_columns=(),
order_columns=("column",),
tabs_chapters=((None, "*"),),
):
# =====================================================================
# index.rst call master file `input_data.rst`
......@@ -161,128 +205,178 @@ class VDataAutodoc:
# Private master file `.input_data.rst` is created from scratch
_master_file = self.src_dir / ".input_data.rst"
with open(_master_file, "w") as fh:
fh.write(_write("Expected Input Data", header=2))
fh.write(_write("Input Data Specifications", header=1))
fh.write(_write("The following sections describe the expected data."))
# ---------------------------------------------------------------------
# Public master file is called "input_data.rst" and is created only
# if it doesn't exist
master_file = self.src_dir / "input_data.rst"
if not master_file.exists():
with open(master_file, "w") as fh:
fh.write(_write("User Input", header=1))
fh.write(_write("Introduction", header=2))
fh.write(_write("Write your static introduction here..."))
fh.write(_include(_master_file, relative_to=self.src_dir))
_new_public_file(
master_file,
titles=(("User Input", 1), ("Introduction", 2)),
includes=((_master_file, self.src_dir),),
)
# =====================================================================
# processing tabs
# intended file structure is:
# self.src_dir ("source")
# + root_tabdir ("source/tabs")
# + tabname.rst
# + .tabname.rst
# =====================================================================
tabdir = self.src_dir / "tabs"
tabdir.mkdir(exist_ok=True)
for tab, schema in self.schemas.items():
# if tab == "dimensionalities":
# self._write_dimensionalities(schema)
# continue
if tab in skip_tabs:
continue
# -----------------------------------------------------------------
# update private master file
with open(_master_file, "a") as fh:
fh.write(_include(tabdir / f".{tab}.rst", relative_to=self.src_dir))
# -----------------------------------------------------------------
# create public tab description file `source/tabs/<tab>.rst`
filename = tabdir / f"{tab}.rst"
if not filename.exists():
with open(filename, "w") as fh:
root_tabdir = self.src_dir / "tabs"
root_tabdir.mkdir(exist_ok=True)
processed_tabs = set()
for tab_chapter, tabnames in tabs_chapters:
if tab_chapter is None:
tab_header_level = 2
else:
tab_header_level = 3
# update master fiel with chapter
with open(_master_file, "a") as fh:
fh.write(_write(tab_chapter, header=2))
if tabnames == "*":
# retrieve remaining tabs
tabnames = set(self.schemas.keys()) - processed_tabs
for tab in tabnames:
processed_tabs.add(tab)
if tab in skip_tabs:
continue
schema = self.schemas[tab]
# -----------------------------------------------------------------
# update private master file
with open(_master_file, "a") as fh:
fh.write(
_write(
f"Please edit ``{filename}`` to provide adequate description"
)
_include(root_tabdir / f".{tab}.rst", relative_to=self.src_dir)
)
# -----------------------------------------------------------------
# create private tab description file `source/tabs/.<tab>.rst`
_filename = tabdir / f".{tab}.rst"
with open(_filename, "w") as fh:
fh.write(_write(f"Tab ``{tab}``", header=2))
fh.write(_write(f"Specifications", header=3))
# include public tab description file `source/tabs/<tab>.rst`
fh.write(_include(filename, relative_to=self.src_dir))
fh.write(_write("Data Types", header=3))
# =================================================================
# columns description
# =================================================================
columns = {k: v["items"] for k, v in schema.columns_specs.items()}
df = pd.DataFrame.from_dict(columns, orient="index")
fh.write(_write("-------------------------"))
# -----------------------------------------------------------------
# make anyOf more readable
if "anyOf" in df:
anyOf = df.anyOf.explode().dropna().apply(lambda row: row["type"])
anyOf = anyOf.replace("null", np.NaN).dropna()
anyOf = anyOf.groupby(level=0).apply(lambda x: "/".join(x))
df.loc[anyOf.index, "type"] = anyOf
df.drop(columns="anyOf", inplace=True)
df = df.fillna("")
df.index.names = ["column"]
# -------------------------------------------------------------
# process special columns
mandatory = dict(zip(schema.required, [True] * len(schema.required)))
_uniq = dict(schema.uniqueness_sets)
if _uniq:
uniq = {}
for id, cols in _uniq.items():
for col in cols:
uniq[col] = id
df["set"] = pd.Series(uniq)
df["set"] = df.set.fillna("")
if mandatory:
df["mandatory"] = pd.Series(mandatory)
df["mandatory"] = df.mandatory.fillna("")
if "type" in df.columns:
df["type"] = df["type"].replace(
{
"number": "``float``",
"integer": "``int``",
"string": "``str``",
}
)
# -------------------------------------------------------------
# bold index
df.reset_index(inplace=True)
df["column"] = "**" + df["column"].astype(str) + "**"
# -------------------------------------------------------------
# order columns
cols = list(order_columns)
cols += [c for c in df if c not in cols] # append remaining columns
cols = [c for c in cols if c in df] # ensure all columns are exiting
df = df[cols]
# -------------------------------------------------------------
# drop columns
_drop_columns = [c for c in drop_columns if c in df]
if _drop_columns:
df = df.drop(columns=_drop_columns)
# -------------------------------------------------------------
# rename columns
_rename_columns = {
prev: new for prev, new in rename_columns.items() if prev in df
}
if _rename_columns:
df = df.rename(columns=_rename_columns)
# -------------------------------------------------------------
# clean and order
df = df.fillna("")
table = tabulate.tabulate(
df, headers="keys", tablefmt="rst", showindex=False
)
fh.write(_write(table))
if _uniq:
# create public tab description file `source/tabs/<tab>.rst`
filename = root_tabdir / f"{tab}.rst"
if not filename.exists():
_new_public_file(filename)
# -----------------------------------------------------------------
# create private tab description file `source/tabs/.<tab>.rst`
_filename = root_tabdir / f".{tab}.rst"
with open(_filename, "w") as fh:
fh.write(_write(f'Sheet "``{tab}``"', header=tab_header_level))
# fh.write(_write(f"Specifications", header=tab_header_level + 1))
# include public tab description file `source/tabs/<tab>.rst`
fh.write(_include(filename, relative_to=self.src_dir))
fh.write(
_write(
"the following sets of columns combination need to be **unique**:"
f'"``{tab}``" Columns Specifications',
header=tab_header_level + 1,
)
)
for uniq, cols in _uniq.items():
columns = ", ".join([f"``{c}``" for c in cols])
fh.write(_write(f" * ``{uniq}``: {columns}"))
# =================================================================
# columns description
# =================================================================
columns = {k: v["items"] for k, v in schema.columns_specs.items()}
df = pd.DataFrame.from_dict(columns, orient="index")
# -----------------------------------------------------------------
# make anyOf more readable
if "anyOf" in df:
anyOf = (
df.anyOf.explode().dropna().apply(lambda row: row["type"])
)
anyOf = anyOf.replace("null", np.NaN).dropna()
anyOf = anyOf.groupby(level=0).apply(lambda x: "/".join(x))
df.loc[anyOf.index, "type"] = anyOf
df.drop(columns="anyOf", inplace=True)
df = df.fillna("")
df.index.names = ["column"]
# -------------------------------------------------------------
# process special columns
mandatory = dict(
zip(schema.required, [True] * len(schema.required))
)
_uniq = dict(schema.uniqueness_sets)
if _uniq:
uniq = {}
for id, cols in _uniq.items():
for col in cols:
uniq[col] = id
df["set"] = pd.Series(uniq)
df["set"] = df.set.fillna("")
if mandatory:
df["mandatory"] = pd.Series(mandatory)
df["mandatory"] = df.mandatory.fillna("")
if "type" in df.columns:
df["type"] = df["type"].replace(
{
"number": "``float``",
"integer": "``int``",
"string": "``str``",
}
)
# -------------------------------------------------------------
# bold index
df.reset_index(inplace=True)
df["column"] = "**" + df["column"].astype(str) + "**"
# -------------------------------------------------------------
# order columns
cols = list(order_columns)
cols += [c for c in df if c not in cols] # append remaining columns
cols = [
c for c in cols if c in df
] # ensure all columns are exiting
df = df[cols]
# -------------------------------------------------------------
# drop columns
_drop_columns = [c for c in drop_columns if c in df]
if _drop_columns:
df = df.drop(columns=_drop_columns)
# -------------------------------------------------------------
# rename columns
_rename_columns = {
prev: new for prev, new in rename_columns.items() if prev in df
}
if _rename_columns:
df = df.rename(columns=_rename_columns)
# -------------------------------------------------------------
# clean and order
df = df.fillna("")
table = tabulate.tabulate(
df, headers="keys", tablefmt="rst", showindex=False
)
# ---------------------------------------------------------
# table caption
fh.write(_write(f"\n.. table:: {tab} columns specifications\n"))
fh.write(_write(table, indent=1))
if _uniq:
fh.write(
_write(
"\nthe following sets of columns combination need to be **unique**:"
)
)
for uniq, cols in _uniq.items():
columns = ", ".join([f"``{c}``" for c in cols])
fh.write(_write(f" * ``{uniq}``: {columns}"))
# ---------------------------------------------------------
# xref
if schema.xcalling:
df = pd.DataFrame(
schema.xcalling, index=["Xref sheet", "Xref column"]
).T
df.index.names = ["column"]
df.reset_index(inplace=True)
table = tabulate.tabulate(
df, headers="keys", tablefmt="rst", showindex=False
)
fh.write(
_write(
"**The following columns need to refer to existing values from other sheet(s):**"
)
)
# ---------------------------------------------------------
# XREF table caption
fh.write(
_write(f"\n.. table:: {tab} columns cross-references\n")
)
fh.write(_write(table, indent=1))
# =============================================================================
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment