content.py 13.9 KB
Newer Older
1
2
3
4
5
6
7
8
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu

import os
import json
import shutil
import itertools

rgaudin's avatar
rgaudin committed
9
10
import requests

11
from data import content_file, mirror
12
from backend.catalog import get_catalogs
13
from backend.download import get_content_cache, unarchive
14
from util import get_temp_folder, get_checksum, ONE_GiB, ONE_MiB, CLILogger
15
16

# prepare CONTENTS from JSON file
17
with open(content_file, "r") as fp:
18
19
    CONTENTS = json.load(fp)
    for key, dl_data in CONTENTS.items():
20
21
        if "url" in dl_data.keys():
            CONTENTS[key]["url"] = CONTENTS[key]["url"].format(mirror=mirror)
22
23
24
25
26
27
28
29


def get_content(key):
    if key not in CONTENTS:
        raise KeyError("requested content `{}` is not in CONTENTS".format(key))
    return CONTENTS.get(key)


rgaudin's avatar
rgaudin committed
30
def isremote(path_or_url):
31
32
    return path_or_url.startswith("http")

rgaudin's avatar
rgaudin committed
33
34
35

def isarchive(fpath):
    path, ext = os.path.splitext(fpath)
36
    return ext in (".zip", ".tar", ".tar.bz2", ".tar.gz", ".tar.xz")
rgaudin's avatar
rgaudin committed
37
38
39


def get_alien_content(path_or_url):
40
41
42
43
44
    return (
        get_remote_content(path_or_url)
        if isremote(path_or_url)
        else get_local_content(path_or_url)
    )
rgaudin's avatar
rgaudin committed
45
46
47


def get_local_content(fpath):
48
    """ content-like dict for a user-provided local file
rgaudin's avatar
rgaudin committed
49

50
        WARN: file should be copied into cache manually """
rgaudin's avatar
rgaudin committed
51
52
53
54
55
56
57
58
59
60

    fname = os.path.basename(fpath)
    fsize = os.path.getsize(fpath)
    assert fsize > 0
    return {
        "url": "file://{fpath}".format(fpath=fpath),
        "name": fname,
        "checksum": None,
        "copied_on_destination": False,
        "archive_size": fsize,
61
        "expanded_size": fsize * 1.2 if isarchive(fpath) else fsize,
rgaudin's avatar
rgaudin committed
62
63
64
65
66
    }


def get_remote_content(url):
    fname = os.path.basename(url)
67
    fsize = int(requests.head(url).headers["Content-Length"])
rgaudin's avatar
rgaudin committed
68
69
70
71
72
73
74
    assert fsize > 0
    return {
        "url": url,
        "name": fname,
        "checksum": None,
        "copied_on_destination": False,
        "archive_size": fsize,
75
        "expanded_size": fsize * 1.2 if isarchive(url) else fsize,
rgaudin's avatar
rgaudin committed
76
77
78
    }


79
80
81
def get_collection(
    edupi=False,
    edupi_resources=None,
82
    nomad=False,
83
84
85
86
87
88
    packages=[],
    kalite_languages=[],
    wikifundi_languages=[],
    aflatoun_languages=[],
):
    """ builds complete list of callbacks and options for selected contents
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104

        returns a list of tuples:
            (project_name, get_content_callback, run_actions_callback, kwargs)

        - project_name: a string describing the project (for progress/UI)

        - kwargs: a dict or arguments to pass to callbacks

        - get_content_callback:
            expects kwargs
            returns a list of contents (get_content)

        - run_action_callback:
            expects cache_folder, mount_point, logger and kwargs
            runs the action for the project (copy content into mount_point)
            no return value
105
        """
106
107
108
109

    collection = []

    if edupi:
110
111
112
113
114
115
116
117
        collection.append(
            (
                "EduPi",
                get_edupi_contents,
                run_edupi_actions,
                {"enable": edupi, "resources_path": edupi_resources},
            )
        )
118

119
120
121
122
123
    if nomad:
        collection.append(
            ("NomadEducation", get_nomad_contents, run_nomad_actions, {"enable": nomad})
        )

124
    if len(packages):
125
126
127
128
129
130
131
132
        collection.append(
            (
                "Packages",
                get_packages_contents,
                run_packages_actions,
                {"packages": packages},
            )
        )
133
134

    if len(kalite_languages):
135
136
137
138
139
140
141
142
        collection.append(
            (
                "KA-Lite",
                get_kalite_contents,
                run_kalite_actions,
                {"languages": kalite_languages},
            )
        )
143
144

    if len(wikifundi_languages):
145
146
147
148
149
150
151
152
        collection.append(
            (
                "Wikifundi",
                get_wikifundi_contents,
                run_wikifundi_actions,
                {"languages": wikifundi_languages},
            )
        )
153
154

    if len(aflatoun_languages):
155
156
157
158
159
160
161
162
        collection.append(
            (
                "Aflatoun",
                get_aflatoun_contents,
                run_aflatoun_actions,
                {"languages": aflatoun_languages},
            )
        )
163
164
165
166
167

    return collection


def get_all_contents_for(collection):
168
169
170
171
    """ flat list of contents for the collection """
    return itertools.chain.from_iterable(
        [content_dl_cb(**cb_kwargs) for _, content_dl_cb, _, cb_kwargs in collection]
    )
172
173


rgaudin's avatar
rgaudin committed
174
def get_edupi_contents(enable=False, resources_path=None):
175
    """ edupi: has no large downloads. might have user-specified one """
rgaudin's avatar
rgaudin committed
176
    return [get_alien_content(resources_path)] if resources_path else []
177
178


179
180
181
182
183
def get_nomad_contents(enable=False):
    """ nomad: only contains one APK """
    return [get_content("nomad_apk")]


184
def get_kalite_contents(languages=[]):
185
    """ kalite: medium lang packs and huge tarball of videos for each lang """
186
187

    return [
188
189
        get_content("kalite_langpack_{lang}".format(lang=lang)) for lang in languages
    ] + [get_content("kalite_videos_{lang}".format(lang=lang)) for lang in languages]
190
191
192


def get_wikifundi_contents(languages=[]):
193
    """ wikifundi: small size parsoid + large language pack for each lang """
rgaudin's avatar
rgaudin committed
194
    return [
195
196
        get_content("wikifundi_langpack_{lang}".format(lang=lang)) for lang in languages
    ]
197
198
199


def get_aflatoun_contents(languages=[]):
200
201
202
203
    """ aflatoun: single large tarball with content + mini lang packs """
    return [get_content("aflatoun_content")] + [
        get_content("aflatoun_langpack_{lang}".format(lang=lang)) for lang in languages
    ]
204
205
206


def get_package_content(package_id):
207
    """ content-like dict for packages (zim file or static site) """
208
    for catalog in get_catalogs(CLILogger()):
209
        try:
210
211
212
            package = catalog["all"][package_id]
            package.update({"ext": "zip" if package["type"] != "zim" else "zim"})
            package.update({"langid": package.get("langid") or package_id})
213
            return {
214
                "url": package["url"],
rgaudin's avatar
rgaudin committed
215
                "name": "{langid}.{ext}".format(**package),
216
217
                "checksum": package["sha256sum"],
                "archive_size": package["size"],
218
                # add a 10% margin for non-zim (zip file mostly)
219
220
221
                "expanded_size": package["size"] * 1.1
                if package["type"] != "zim"
                else package["size"],
222
            }
223
        except KeyError:
224
225
226
227
            continue


def get_packages_contents(packages=[]):
228
229
230
231
232
233
    """ ideacube: ZIM file or ZIP file for each package """
    return [
        get_package_content(package)
        for package in packages
        if get_package_content(package) is not None
    ]
234
235
236


def extract_and_move(content, cache_folder, root_path, final_path, logger):
237
    """ extract compressed archive into mount-point
238

239
        moves resulting file or folder to desired location """
240
241
242
243

    # retrieve archive path
    archive_fpath = get_content_cache(content, cache_folder, True)

244
    logger.std("Extracting {src} to {dst}".format(src=archive_fpath, dst=final_path))
245
246
247
248
249
250

    # extract to a temp folder on root_path
    extract_folder = get_temp_folder(root_path)
    unarchive(archive_fpath, extract_folder, logger)

    # move useful content to final path
251
252
253
254
255
    useful_path = (
        os.path.join(extract_folder, content["folder_name"])
        if "folder_name" in content.keys()
        else extract_folder
    )
256
257
258
259
260
261
262
    shutil.move(useful_path, final_path)

    # remove temp dir
    shutil.rmtree(extract_folder, ignore_errors=True)


def copy(content, cache_folder, final_path, logger):
263
    """ copy a file from the cache into desired location (on mount point) """
264
265
266
267

    # retrieve archive path
    archive_fpath = get_content_cache(content, cache_folder, True)

268
    logger.std("Copying {src} to {dst}".format(src=archive_fpath, dst=final_path))
269
270
271
272
273

    # move useful content to final path
    shutil.copy(archive_fpath, final_path)


274
275
276
277
def run_edupi_actions(
    cache_folder, mount_point, logger, enable=False, resources_path=None
):
    """ no action for EduPi ; everything within ansiblecube """
rgaudin's avatar
rgaudin committed
278
279
280
    if not enable or not resources_path:
        return

281
282
283
284
285
286
287
    extract_and_move(
        content=get_alien_content(resources_path),
        cache_folder=cache_folder,
        root_path=mount_point,
        final_path=os.path.join(mount_point, "edupi_resources"),
        logger=logger,
    )
288
289


290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
def run_nomad_actions(cache_folder, mount_point, logger, enable=False):
    """ copy downloaded APK """
    if not enable:
        return

    nomad_apk = get_content("nomad_apk")
    nomad_folder = os.path.join(mount_point, "nomad")
    os.makedirs(nomad_folder, exist_ok=True)
    copy(
        content=nomad_apk,
        cache_folder=cache_folder,
        final_path=os.path.join(nomad_folder, nomad_apk["name"]),
        logger=logger,
    )


306
def run_kalite_actions(cache_folder, mount_point, logger, languages=[]):
307
    """ kalite: copy lang packs (ZIP) as-is and extract videos """
308
309
310
311
312
    if not len(languages):
        return

    for lang in languages:
        # language pack
313
        lang_key = "kalite_langpack_{lang}".format(lang=lang)
314
        lang_pack = get_content(lang_key)
315
316
317
318
319
320
        copy(
            content=lang_pack,
            cache_folder=cache_folder,
            final_path=os.path.join(mount_point, lang_pack["name"]),
            logger=logger,
        )
321
322

        # videos
323
        videos = get_content("kalite_videos_{lang}".format(lang=lang))
324
325
326
327
        extract_and_move(
            content=videos,
            cache_folder=cache_folder,
            root_path=mount_point,
328
329
330
            final_path=os.path.join(mount_point, videos["folder_name"]),
            logger=logger,
        )
331
332
333


def run_wikifundi_actions(cache_folder, mount_point, logger, languages=[]):
334
    """ wikifundi: extract parsoid and all lang packs """
335
336
337
338
339

    if not len(languages):
        return

    for lang in languages:
340
        lang_key = "wikifundi_langpack_{lang}".format(lang=lang)
341
342
343
344
345
346
        content = get_content(lang_key)
        extract_and_move(
            content=content,
            cache_folder=cache_folder,
            root_path=mount_point,
            final_path=os.path.join(mount_point, lang_key),
347
348
            logger=logger,
        )
349
350
351


def run_aflatoun_actions(cache_folder, mount_point, logger, languages=[]):
352
    """ aflatoun: copy lang packs (ZIP) as-is and extract content archive """
353
354
355
356
357
358

    if not len(languages):
        return

    for lang in languages:
        # language pack
359
        lang_key = "aflatoun_langpack_{lang}".format(lang=lang)
360
        lang_pack = get_content(lang_key)
361
362
363
364
365
366
        copy(
            content=lang_pack,
            cache_folder=cache_folder,
            final_path=os.path.join(mount_point, lang_pack["name"]),
            logger=logger,
        )
367

368
369
370
371
372
373
374
    extract_and_move(
        content=get_content("aflatoun_content"),
        cache_folder=cache_folder,
        root_path=mount_point,
        final_path=os.path.join(mount_point, "aflatoun_content"),
        logger=logger,
    )
375
376
377


def run_packages_actions(cache_folder, mount_point, logger, packages=[]):
rgaudin's avatar
rgaudin committed
378
    """ ZIM files are used directly by kiwix-serve """
379

rgaudin's avatar
rgaudin committed
380
381
    # ensure packages folder exists: must macth `zim_path` in ansiblecube
    packages_folder = os.path.join(mount_point, "packages")
382
383
384
385
    os.makedirs(packages_folder, exist_ok=True)

    for package in packages:
        content = get_package_content(package)
386
        logger.std("Copying {p} to {f}".format(p=content["name"], f=packages_folder))
387
388
389
390
391

        # retrieve downloaded path
        package_fpath = get_content_cache(content, cache_folder, True)

        # copy to the packages folder
rgaudin's avatar
rgaudin committed
392
        shutil.copy(package_fpath, os.path.join(packages_folder, content["name"]))
393
394
395


def content_is_cached(content, cache_folder, check_sum=False):
396
397
398
399
400
    """ whether a content is already present in cache """
    content_fpath = os.path.join(cache_folder, content.get("name"))
    if not os.path.exists(content_fpath) or os.path.getsize(
        content_fpath
    ) != content.get("archive_size"):
401
402
403
        return False

    if check_sum:
404
        return get_checksum(content_fpath) == content.get("checksum")
405
406
407
408
409

    return True


def get_collection_download_size(collection):
410
411
    """ data usage to download all of the collection """
    return sum([item.get("archive_size") for item in get_all_contents_for(collection)])
412
413
414


def get_collection_download_size_using_cache(collection, cache_folder):
415
416
417
418
419
420
421
422
    """ data usage to download missing elements of the collection """
    return sum(
        [
            item.get("archive_size")
            for item in get_all_contents_for(collection)
            if not content_is_cached(item, cache_folder)
        ]
    )
423
424


425
def get_expanded_size(collection, add_margin=True):
426
427
428
429
430
431
432
433
434
    """ sum of extracted sizes of all collection with 10%|2GB margin """
    total_size = sum(
        [
            item.get("expanded_size") * 2
            if item.get("copied_on_destination", False)
            else item.get("expanded_size")
            for item in get_all_contents_for(collection)
        ]
    )
rgaudin's avatar
rgaudin committed
435
436

    # add a 2% margin ; make sure it's at least 2GB
437
    margin = max([2 * ONE_GiB, total_size * 0.02]) if add_margin else 0
438
439
440
441
    return total_size + margin


def get_required_image_size(collection):
442
443
    required_size = sum(
        [
rgaudin's avatar
rgaudin committed
444
            get_content("hotspot_master_image").get("root_partition_size"),
445
446
447
            get_expanded_size(collection),
        ]
    )
448

449
    return required_size + ONE_MiB * 256  # make sure we have some free space
450
451
452


def get_required_building_space(collection, cache_folder, image_size=None):
453
    """ total required space to host downlaods and image """
454

455
    # the master image
456
457
    # we neglect the master's expanded size as it is going to be moved
    # to the image path and resized in-place (never reduced)
rgaudin's avatar
rgaudin committed
458
    base_image_size = get_content("hotspot_master_image").get("archive_size")
459
460
461
462
463
464

    # the created image
    if image_size is None:
        image_size = get_required_image_size(collection)

    # download cache
465
    downloads_size = get_collection_download_size_using_cache(collection, cache_folder)
466
467
468
469
470

    total_size = sum([base_image_size, image_size, downloads_size])

    margin = min([2 * ONE_GiB, total_size * 0.2])
    return total_size + margin