content.py 14.7 KB
Newer Older
1
2
3
4
5
6
7
8
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu

import os
import json
import shutil
import itertools

rgaudin's avatar
rgaudin committed
9
10
import requests

11
from data import content_file, mirror
12
from backend.catalog import get_catalogs
13
from backend.download import get_content_cache, unarchive
14
from util import get_temp_folder, get_checksum, ONE_GiB, ONE_MiB, CLILogger
15
16

# prepare CONTENTS from JSON file
17
with open(content_file, "r") as fp:
18
19
    CONTENTS = json.load(fp)
    for key, dl_data in CONTENTS.items():
20
21
        if "url" in dl_data.keys():
            CONTENTS[key]["url"] = CONTENTS[key]["url"].format(mirror=mirror)
22
23
24
25
26
27
28
29


def get_content(key):
    if key not in CONTENTS:
        raise KeyError("requested content `{}` is not in CONTENTS".format(key))
    return CONTENTS.get(key)


rgaudin's avatar
rgaudin committed
30
def isremote(path_or_url):
31
32
    return path_or_url.startswith("http")

rgaudin's avatar
rgaudin committed
33
34
35

def isarchive(fpath):
    path, ext = os.path.splitext(fpath)
36
    return ext in (".zip", ".tar", ".tar.bz2", ".tar.gz", ".tar.xz")
rgaudin's avatar
rgaudin committed
37
38
39


def get_alien_content(path_or_url):
40
41
42
43
44
    return (
        get_remote_content(path_or_url)
        if isremote(path_or_url)
        else get_local_content(path_or_url)
    )
rgaudin's avatar
rgaudin committed
45
46
47


def get_local_content(fpath):
48
    """ content-like dict for a user-provided local file
rgaudin's avatar
rgaudin committed
49

50
        WARN: file should be copied into cache manually """
rgaudin's avatar
rgaudin committed
51
52
53
54
55
56
57
58
59
60

    fname = os.path.basename(fpath)
    fsize = os.path.getsize(fpath)
    assert fsize > 0
    return {
        "url": "file://{fpath}".format(fpath=fpath),
        "name": fname,
        "checksum": None,
        "copied_on_destination": False,
        "archive_size": fsize,
61
        "expanded_size": fsize * 1.2 if isarchive(fpath) else fsize,
rgaudin's avatar
rgaudin committed
62
63
64
65
66
    }


def get_remote_content(url):
    fname = os.path.basename(url)
67
    fsize = int(requests.head(url).headers["Content-Length"])
rgaudin's avatar
rgaudin committed
68
69
70
71
72
73
74
    assert fsize > 0
    return {
        "url": url,
        "name": fname,
        "checksum": None,
        "copied_on_destination": False,
        "archive_size": fsize,
75
        "expanded_size": fsize * 1.2 if isarchive(url) else fsize,
rgaudin's avatar
rgaudin committed
76
77
78
    }


79
80
81
def get_collection(
    edupi=False,
    edupi_resources=None,
82
    nomad=False,
83
    mathews=False,
84
85
86
87
88
89
    packages=[],
    kalite_languages=[],
    wikifundi_languages=[],
    aflatoun_languages=[],
):
    """ builds complete list of callbacks and options for selected contents
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105

        returns a list of tuples:
            (project_name, get_content_callback, run_actions_callback, kwargs)

        - project_name: a string describing the project (for progress/UI)

        - kwargs: a dict or arguments to pass to callbacks

        - get_content_callback:
            expects kwargs
            returns a list of contents (get_content)

        - run_action_callback:
            expects cache_folder, mount_point, logger and kwargs
            runs the action for the project (copy content into mount_point)
            no return value
106
        """
107
108
109
110

    collection = []

    if edupi:
111
112
113
114
115
116
117
118
        collection.append(
            (
                "EduPi",
                get_edupi_contents,
                run_edupi_actions,
                {"enable": edupi, "resources_path": edupi_resources},
            )
        )
119

120
121
122
123
124
    if nomad:
        collection.append(
            ("NomadEducation", get_nomad_contents, run_nomad_actions, {"enable": nomad})
        )

125
126
127
128
129
130
131
132
133
134
    if mathews:
        collection.append(
            (
                "MathMathews",
                get_mathews_contents,
                run_mathews_actions,
                {"enable": mathews},
            )
        )

135
    if len(packages):
136
137
138
139
140
141
142
143
        collection.append(
            (
                "Packages",
                get_packages_contents,
                run_packages_actions,
                {"packages": packages},
            )
        )
144
145

    if len(kalite_languages):
146
147
148
149
150
151
152
153
        collection.append(
            (
                "KA-Lite",
                get_kalite_contents,
                run_kalite_actions,
                {"languages": kalite_languages},
            )
        )
154
155

    if len(wikifundi_languages):
156
157
158
159
160
161
162
163
        collection.append(
            (
                "Wikifundi",
                get_wikifundi_contents,
                run_wikifundi_actions,
                {"languages": wikifundi_languages},
            )
        )
164
165

    if len(aflatoun_languages):
166
167
168
169
170
171
172
173
        collection.append(
            (
                "Aflatoun",
                get_aflatoun_contents,
                run_aflatoun_actions,
                {"languages": aflatoun_languages},
            )
        )
174
175
176
177
178

    return collection


def get_all_contents_for(collection):
179
180
181
182
    """ flat list of contents for the collection """
    return itertools.chain.from_iterable(
        [content_dl_cb(**cb_kwargs) for _, content_dl_cb, _, cb_kwargs in collection]
    )
183
184


rgaudin's avatar
rgaudin committed
185
def get_edupi_contents(enable=False, resources_path=None):
186
    """ edupi: has no large downloads. might have user-specified one """
rgaudin's avatar
rgaudin committed
187
    return [get_alien_content(resources_path)] if resources_path else []
188
189


190
191
192
193
194
def get_nomad_contents(enable=False):
    """ nomad: only contains one APK """
    return [get_content("nomad_apk")]


195
196
197
198
199
def get_mathews_contents(enable=False):
    """ mathews: only contains one APK """
    return [get_content("mathews_apk")]


200
def get_kalite_contents(languages=[]):
201
    """ kalite: medium lang packs and huge tarball of videos for each lang """
202
203

    return [
204
205
        get_content("kalite_langpack_{lang}".format(lang=lang)) for lang in languages
    ] + [get_content("kalite_videos_{lang}".format(lang=lang)) for lang in languages]
206
207
208


def get_wikifundi_contents(languages=[]):
209
    """ wikifundi: small size parsoid + large language pack for each lang """
rgaudin's avatar
rgaudin committed
210
    return [
211
212
        get_content("wikifundi_langpack_{lang}".format(lang=lang)) for lang in languages
    ]
213
214
215


def get_aflatoun_contents(languages=[]):
216
217
218
219
    """ aflatoun: single large tarball with content + mini lang packs """
    return [get_content("aflatoun_content")] + [
        get_content("aflatoun_langpack_{lang}".format(lang=lang)) for lang in languages
    ]
220
221
222


def get_package_content(package_id):
223
    """ content-like dict for packages (zim file or static site) """
224
    for catalog in get_catalogs(CLILogger()):
225
        try:
226
227
228
            package = catalog["all"][package_id]
            package.update({"ext": "zip" if package["type"] != "zim" else "zim"})
            package.update({"langid": package.get("langid") or package_id})
229
            return {
230
                "url": package["url"],
rgaudin's avatar
rgaudin committed
231
                "name": "{langid}.{ext}".format(**package),
232
233
                "checksum": package["sha256sum"],
                "archive_size": package["size"],
234
                # add a 10% margin for non-zim (zip file mostly)
235
236
237
                "expanded_size": package["size"] * 1.1
                if package["type"] != "zim"
                else package["size"],
238
            }
239
        except KeyError:
240
241
242
243
            continue


def get_packages_contents(packages=[]):
244
245
246
247
248
249
    """ ideacube: ZIM file or ZIP file for each package """
    return [
        get_package_content(package)
        for package in packages
        if get_package_content(package) is not None
    ]
250
251
252


def extract_and_move(content, cache_folder, root_path, final_path, logger):
253
    """ extract compressed archive into mount-point
254

255
        moves resulting file or folder to desired location """
256
257
258
259

    # retrieve archive path
    archive_fpath = get_content_cache(content, cache_folder, True)

260
    logger.std("Extracting {src} to {dst}".format(src=archive_fpath, dst=final_path))
261
262
263
264
265
266

    # extract to a temp folder on root_path
    extract_folder = get_temp_folder(root_path)
    unarchive(archive_fpath, extract_folder, logger)

    # move useful content to final path
267
268
269
270
271
    useful_path = (
        os.path.join(extract_folder, content["folder_name"])
        if "folder_name" in content.keys()
        else extract_folder
    )
272
273
274
275
276
277
278
    shutil.move(useful_path, final_path)

    # remove temp dir
    shutil.rmtree(extract_folder, ignore_errors=True)


def copy(content, cache_folder, final_path, logger):
279
    """ copy a file from the cache into desired location (on mount point) """
280
281
282
283

    # retrieve archive path
    archive_fpath = get_content_cache(content, cache_folder, True)

284
    logger.std("Copying {src} to {dst}".format(src=archive_fpath, dst=final_path))
285
286
287
288
289

    # move useful content to final path
    shutil.copy(archive_fpath, final_path)


290
291
292
293
def run_edupi_actions(
    cache_folder, mount_point, logger, enable=False, resources_path=None
):
    """ no action for EduPi ; everything within ansiblecube """
rgaudin's avatar
rgaudin committed
294
295
296
    if not enable or not resources_path:
        return

297
298
299
300
301
302
303
    extract_and_move(
        content=get_alien_content(resources_path),
        cache_folder=cache_folder,
        root_path=mount_point,
        final_path=os.path.join(mount_point, "edupi_resources"),
        logger=logger,
    )
304
305


306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
def run_nomad_actions(cache_folder, mount_point, logger, enable=False):
    """ copy downloaded APK """
    if not enable:
        return

    nomad_apk = get_content("nomad_apk")
    nomad_folder = os.path.join(mount_point, "nomad")
    os.makedirs(nomad_folder, exist_ok=True)
    copy(
        content=nomad_apk,
        cache_folder=cache_folder,
        final_path=os.path.join(nomad_folder, nomad_apk["name"]),
        logger=logger,
    )


322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
def run_mathews_actions(cache_folder, mount_point, logger, enable=False):
    """ copy downloaded APK """
    if not enable:
        return

    mathews_apk = get_content("mathews_apk")
    mathews_folder = os.path.join(mount_point, "mathews")
    os.makedirs(mathews_folder, exist_ok=True)
    copy(
        content=mathews_apk,
        cache_folder=cache_folder,
        final_path=os.path.join(mathews_folder, mathews_apk["name"]),
        logger=logger,
    )


338
def run_kalite_actions(cache_folder, mount_point, logger, languages=[]):
339
    """ kalite: copy lang packs (ZIP) as-is and extract videos """
340
341
342
343
344
    if not len(languages):
        return

    for lang in languages:
        # language pack
345
        lang_key = "kalite_langpack_{lang}".format(lang=lang)
346
        lang_pack = get_content(lang_key)
347
348
349
350
351
352
        copy(
            content=lang_pack,
            cache_folder=cache_folder,
            final_path=os.path.join(mount_point, lang_pack["name"]),
            logger=logger,
        )
353
354

        # videos
355
        videos = get_content("kalite_videos_{lang}".format(lang=lang))
356
357
358
359
        extract_and_move(
            content=videos,
            cache_folder=cache_folder,
            root_path=mount_point,
360
361
362
            final_path=os.path.join(mount_point, videos["folder_name"]),
            logger=logger,
        )
363
364
365


def run_wikifundi_actions(cache_folder, mount_point, logger, languages=[]):
366
    """ wikifundi: extract parsoid and all lang packs """
367
368
369
370
371

    if not len(languages):
        return

    for lang in languages:
372
        lang_key = "wikifundi_langpack_{lang}".format(lang=lang)
373
374
375
376
377
378
        content = get_content(lang_key)
        extract_and_move(
            content=content,
            cache_folder=cache_folder,
            root_path=mount_point,
            final_path=os.path.join(mount_point, lang_key),
379
380
            logger=logger,
        )
381
382
383


def run_aflatoun_actions(cache_folder, mount_point, logger, languages=[]):
384
    """ aflatoun: copy lang packs (ZIP) as-is and extract content archive """
385
386
387
388
389
390

    if not len(languages):
        return

    for lang in languages:
        # language pack
391
        lang_key = "aflatoun_langpack_{lang}".format(lang=lang)
392
        lang_pack = get_content(lang_key)
393
394
395
396
397
398
        copy(
            content=lang_pack,
            cache_folder=cache_folder,
            final_path=os.path.join(mount_point, lang_pack["name"]),
            logger=logger,
        )
399

400
401
402
403
404
405
406
    extract_and_move(
        content=get_content("aflatoun_content"),
        cache_folder=cache_folder,
        root_path=mount_point,
        final_path=os.path.join(mount_point, "aflatoun_content"),
        logger=logger,
    )
407
408
409


def run_packages_actions(cache_folder, mount_point, logger, packages=[]):
rgaudin's avatar
rgaudin committed
410
    """ ZIM files are used directly by kiwix-serve """
411

rgaudin's avatar
rgaudin committed
412
413
    # ensure packages folder exists: must macth `zim_path` in ansiblecube
    packages_folder = os.path.join(mount_point, "packages")
414
415
416
417
    os.makedirs(packages_folder, exist_ok=True)

    for package in packages:
        content = get_package_content(package)
418
        logger.std("Copying {p} to {f}".format(p=content["name"], f=packages_folder))
419
420
421
422
423

        # retrieve downloaded path
        package_fpath = get_content_cache(content, cache_folder, True)

        # copy to the packages folder
rgaudin's avatar
rgaudin committed
424
        shutil.copy(package_fpath, os.path.join(packages_folder, content["name"]))
425
426
427


def content_is_cached(content, cache_folder, check_sum=False):
428
429
430
431
432
    """ whether a content is already present in cache """
    content_fpath = os.path.join(cache_folder, content.get("name"))
    if not os.path.exists(content_fpath) or os.path.getsize(
        content_fpath
    ) != content.get("archive_size"):
433
434
435
        return False

    if check_sum:
436
        return get_checksum(content_fpath) == content.get("checksum")
437
438
439
440
441

    return True


def get_collection_download_size(collection):
442
443
    """ data usage to download all of the collection """
    return sum([item.get("archive_size") for item in get_all_contents_for(collection)])
444
445
446


def get_collection_download_size_using_cache(collection, cache_folder):
447
448
449
450
451
452
453
454
    """ data usage to download missing elements of the collection """
    return sum(
        [
            item.get("archive_size")
            for item in get_all_contents_for(collection)
            if not content_is_cached(item, cache_folder)
        ]
    )
455
456


457
def get_expanded_size(collection, add_margin=True):
458
459
460
461
462
463
464
465
466
    """ sum of extracted sizes of all collection with 10%|2GB margin """
    total_size = sum(
        [
            item.get("expanded_size") * 2
            if item.get("copied_on_destination", False)
            else item.get("expanded_size")
            for item in get_all_contents_for(collection)
        ]
    )
rgaudin's avatar
rgaudin committed
467
468

    # add a 2% margin ; make sure it's at least 2GB
469
    margin = max([2 * ONE_GiB, total_size * 0.02]) if add_margin else 0
470
471
472
473
    return total_size + margin


def get_required_image_size(collection):
474
475
    required_size = sum(
        [
rgaudin's avatar
rgaudin committed
476
            get_content("hotspot_master_image").get("root_partition_size"),
477
478
479
            get_expanded_size(collection),
        ]
    )
480

481
    return required_size + ONE_MiB * 256  # make sure we have some free space
482
483
484


def get_required_building_space(collection, cache_folder, image_size=None):
485
    """ total required space to host downlaods and image """
486

487
    # the master image
488
489
    # we neglect the master's expanded size as it is going to be moved
    # to the image path and resized in-place (never reduced)
rgaudin's avatar
rgaudin committed
490
    base_image_size = get_content("hotspot_master_image").get("archive_size")
491
492
493
494
495
496

    # the created image
    if image_size is None:
        image_size = get_required_image_size(collection)

    # download cache
497
    downloads_size = get_collection_download_size_using_cache(collection, cache_folder)
498
499
500
501
502

    total_size = sum([base_image_size, image_size, downloads_size])

    margin = min([2 * ONE_GiB, total_size * 0.2])
    return total_size + margin