Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ideascube
ARCHIVED Kiwix Hotspot
Commits
604e167e
Commit
604e167e
authored
Sep 21, 2018
by
rgaudin
Browse files
Code formatting: now using black formatting style and tool
black enforces a coding style thus removing this from dev tasks
https://black.readthedocs.io/
parent
a186ef71
Changes
27
Expand all
Hide whitespace changes
Inline
Side-by-side
README.md
View file @
604e167e
...
...
@@ -83,6 +83,17 @@ see [appveyor.yml](appveyor.yml) for windows and [.travis.yml](.travis.yml) for
## Contribute
We now use
[
black
](
https://black.readthedocs.io
)
Coding Style and Formatting tool. Please make sure your contributions passes
`black`
.
Sample
`pre-commit`
git hook:
```
sh
#!/bin/sh
#
# check black coding style compliance and display offending lines
exec
black
--check
--diff
.
```
presentation of the projet at Potsdam
[
Slides
](
http://wiki.kiwix.org/w/images/4/43/Pibox_installer_potsdam_2017_presentation.pdf
)
some notes about how the project is structured:
...
...
additional-hooks/hook-iso639.py
View file @
604e167e
from
PyInstaller.utils.hooks
import
collect_data_files
datas
=
collect_data_files
(
'
iso639
'
)
datas
=
collect_data_files
(
"
iso639
"
)
ansiblecube/partition_boundaries.py
View file @
604e167e
...
...
@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
'''
compute and print new partition boundaries for / dans /data
"""
compute and print new partition boundaries for / dans /data
Used by providing an image's fdisk output and requested size for partitions
fdisk -l <disk> | partition_boundaries.py <root_size> <disk_size>
...
...
@@ -17,17 +17,16 @@
- data partition end
output format: <root_start> <root_end> <data_start> <data_end>
'''
"""
from
__future__
import
(
unicode_literals
,
absolute_import
,
division
,
print_function
)
from
__future__
import
unicode_literals
,
absolute_import
,
division
,
print_function
import
re
import
sys
try
:
text_type
=
unicode
# Python 2
except
NameError
:
text_type
=
str
# Python 3
text_type
=
str
# Python 3
ONE_GB
=
int
(
1e9
)
...
...
@@ -35,7 +34,7 @@ ONE_GB = int(1e9)
def
main
(
root_size
=
7
,
disk_size
=
8
):
# sanitize input
if
disk_size
==
'-'
:
if
disk_size
==
"-"
:
disk_size
=
None
elif
not
isinstance
(
disk_size
,
int
):
disk_size
=
int
(
disk_size
)
...
...
@@ -46,7 +45,9 @@ def main(root_size=7, disk_size=8):
try
:
data
=
get_partitions_boundaries
(
lines
=
sys
.
stdin
.
read
().
splitlines
(),
root_size
=
root_size
,
disk_size
=
disk_size
)
root_size
=
root_size
,
disk_size
=
disk_size
,
)
print
(
" "
.
join
([
text_type
(
x
)
for
x
in
data
]))
...
...
@@ -62,24 +63,26 @@ def get_partitions_boundaries(lines, root_size, disk_size=None):
end_margin
=
4194304
# 4MiB
def
roundup
(
sector
):
return
rounddown
(
sector
)
+
round_bound
\
if
sector
%
round_bound
!=
0
else
sector
return
rounddown
(
sector
)
+
round_bound
if
sector
%
round_bound
!=
0
else
sector
def
rounddown
(
sector
):
return
sector
-
(
sector
%
round_bound
)
\
if
sector
%
round_bound
!=
0
else
sector
return
sector
-
(
sector
%
round_bound
)
if
sector
%
round_bound
!=
0
else
sector
# parse all lines
number_of_sector_match
=
[]
second_partition_match
=
[]
target_reg
=
r
'[0-9a-zA-Z\.\-\_]+\.img'
\
if
'.img'
in
"
\n
"
.
join
(
lines
)
else
r
'\/dev\/[0-9a-z]+'
target_reg
=
(
r
"[0-9a-zA-Z\.\-\_]+\.img"
if
".img"
in
"
\n
"
.
join
(
lines
)
else
r
"\/dev\/[0-9a-z]+"
)
for
line
in
lines
:
number_of_sector_match
+=
re
.
findall
(
r
"^Disk {}:.*, (\d+) sectors$"
.
format
(
target_reg
),
line
)
r
"^Disk {}:.*, (\d+) sectors$"
.
format
(
target_reg
),
line
)
second_partition_match
+=
re
.
findall
(
r
"^{}\d +(\d+) +(\d+) +\d+ +\S+ +\d+ +Linux$"
.
format
(
target_reg
),
line
)
r
"^{}\d +(\d+) +(\d+) +\d+ +\S+ +\d+ +Linux$"
.
format
(
target_reg
),
line
)
# ensure we retrieved nb of sectors correctly
if
len
(
number_of_sector_match
)
!=
1
:
...
...
@@ -88,8 +91,7 @@ def get_partitions_boundaries(lines, root_size, disk_size=None):
# ensure we retrieved the start of the root partition correctly
if
len
(
second_partition_match
)
!=
1
:
raise
ValueError
(
"cannot find start and/or end of root partition of disk"
)
raise
ValueError
(
"cannot find start and/or end of root partition of disk"
)
second_partition_start
=
int
(
second_partition_match
[
0
][
0
])
second_partition_end
=
int
(
second_partition_match
[
0
][
1
])
...
...
@@ -118,5 +120,5 @@ def get_partitions_boundaries(lines, root_size, disk_size=None):
return
root_start
,
root_end
,
data_start
,
data_end
if
__name__
==
'
__main__
'
:
if
__name__
==
"
__main__
"
:
main
(
*
sys
.
argv
[
1
:])
ansiblecube/roles/clock/files/clock.py
View file @
604e167e
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
hardware clock management UI
"""
hardware clock management UI
- set HW clock using system time
- set HW clock manually
- set system time using HW clock
'''
- set system time using HW clock
"""
# https://linux.die.net/man/8/hwclock
# https://afterthoughtsoftware.com/products/rasclock
...
...
@@ -50,14 +50,14 @@ body = u"""<body>
footer
=
u
"</html>"
date_bin
=
'
/bin/date
'
hwclock_bin
=
'
/sbin/hwclock
'
tdctl_bin
=
'
/usr/bin/timedatectl
'
date_bin
=
"
/bin/date
"
hwclock_bin
=
"
/sbin/hwclock
"
tdctl_bin
=
"
/usr/bin/timedatectl
"
def
get_output
(
command
):
try
:
return
subprocess
.
check_output
([
'
sudo
'
]
+
command
).
strip
()
return
subprocess
.
check_output
([
"
sudo
"
]
+
command
).
strip
()
except
Exception
as
exp
:
return
"ERROR: {}"
.
format
(
exp
)
...
...
@@ -65,36 +65,33 @@ def get_output(command):
def
application
(
env
,
start_response
):
output
=
""
if
env
[
'
REQUEST_URI
'
]
==
'
/sys2hw
'
:
if
env
[
"
REQUEST_URI
"
]
==
"
/sys2hw
"
:
# write system datetime into hardware clock
output
=
get_output
([
hwclock_bin
,
'
-w
'
])
output
=
get_output
([
hwclock_bin
,
"
-w
"
])
if
env
[
'
REQUEST_URI
'
]
==
'
/hw2sys
'
:
if
env
[
"
REQUEST_URI
"
]
==
"
/hw2sys
"
:
# set system datetime using hardware clock
output
=
get_output
([
hwclock_bin
,
'
-s
'
])
output
=
get_output
([
hwclock_bin
,
"
-s
"
])
if
env
[
'
REQUEST_URI
'
].
startswith
(
'
/manual2hw
'
):
if
env
[
"
REQUEST_URI
"
].
startswith
(
"
/manual2hw
"
):
# write a manual datetime into hardware clock
try
:
dt
=
urllib
.
unquote_plus
(
env
[
'QUERY_STRING'
])
\
.
split
(
'datetime='
)[
1
]
dt
=
urllib
.
unquote_plus
(
env
[
"QUERY_STRING"
]).
split
(
"datetime="
)[
1
]
# disable ntp otherwise we can't set manual date
get_output
([
tdctl_bin
,
'
set-ntp
'
,
'
no
'
])
get_output
([
tdctl_bin
,
"
set-ntp
"
,
"
no
"
])
# set manual date
output
=
get_output
([
tdctl_bin
,
'
set-time
'
,
dt
])
output
=
get_output
([
tdctl_bin
,
"
set-time
"
,
dt
])
# re-enable ntp. if online, will overwrite our manual datetime
get_output
([
tdctl_bin
,
'
set-ntp
'
,
'
yes
'
])
get_output
([
tdctl_bin
,
"
set-ntp
"
,
"
yes
"
])
except
Exception
as
exp
:
output
=
"ERROR: {}"
.
format
(
exp
)
context
=
{
'output'
:
'<p style="color: blue; font-weight: bold;">{}</p>'
.
format
(
output
),
'system_time'
:
get_output
(
[
date_bin
,
'+"%Y-%m-%d %H:%M:%S.000000%z"'
])[
1
:
-
1
],
'hardware_time'
:
get_output
([
hwclock_bin
,
'-r'
]),
"output"
:
'<p style="color: blue; font-weight: bold;">{}</p>'
.
format
(
output
),
"system_time"
:
get_output
([
date_bin
,
'+"%Y-%m-%d %H:%M:%S.000000%z"'
])[
1
:
-
1
],
"hardware_time"
:
get_output
([
hwclock_bin
,
"-r"
]),
}
start_response
(
'
200 OK
'
,
[(
'
Content-Type
'
,
'
text/html
'
)])
start_response
(
"
200 OK
"
,
[(
"
Content-Type
"
,
"
text/html
"
)])
page
=
header
+
body
.
format
(
**
context
)
+
footer
return
[
page
.
encode
(
'
utf-8
'
)]
return
[
page
.
encode
(
"
utf-8
"
)]
ansiblecube/roles/wikifundi_setup/files/add_mw_extension.py
View file @
604e167e
...
...
@@ -19,41 +19,48 @@ EXTENSION_API = "https://www.mediawiki.org/w/api.php?action=query&list=extdistbr
extension_version
=
"REL1_31"
mediawiki_path
=
"/var/www/html"
if
(
len
(
sys
.
argv
)
>
1
)
:
extension_name
=
sys
.
argv
[
1
]
if
len
(
sys
.
argv
)
>
1
:
extension_name
=
sys
.
argv
[
1
]
else
:
print
(
"extension name needed"
)
print
(
"extension name needed"
)
if
(
len
(
sys
.
argv
)
>
2
)
:
extension_version
=
sys
.
argv
[
2
]
if
len
(
sys
.
argv
)
>
2
:
extension_version
=
sys
.
argv
[
2
]
if
(
len
(
sys
.
argv
)
>
3
)
:
mediawiki_path
=
sys
.
argv
[
3
]
if
len
(
sys
.
argv
)
>
3
:
mediawiki_path
=
sys
.
argv
[
3
]
try
:
url
=
urllib
.
request
.
urlopen
(
EXTENSION_API
%
extension_name
)
data
=
json
.
loads
(
url
.
read
().
decode
())
if
"extensions"
in
data
[
"query"
][
"extdistbranches"
]:
if
extension_version
in
data
[
"query"
][
"extdistbranches"
][
"extensions"
][
extension_name
]
:
url
=
data
[
"query"
][
"extdistbranches"
][
"extensions"
][
extension_name
][
extension_version
]
url
=
urllib
.
request
.
urlopen
(
EXTENSION_API
%
extension_name
)
data
=
json
.
loads
(
url
.
read
().
decode
())
if
"extensions"
in
data
[
"query"
][
"extdistbranches"
]:
if
(
extension_version
in
data
[
"query"
][
"extdistbranches"
][
"extensions"
][
extension_name
]
):
url
=
data
[
"query"
][
"extdistbranches"
][
"extensions"
][
extension_name
][
extension_version
]
else
:
url
=
data
[
"query"
][
"extdistbranches"
][
"extensions"
][
extension_name
][
"master"
]
else
:
url
=
data
[
"query"
][
"extdistbranches"
][
"extensions"
][
extension_name
][
"master"
]
else
:
print
(
"extension %s not found"
%
extension_name
)
sys
.
exit
(
1
)
except
URLError
as
e
:
print
(
"error fetch extension url extension %s"
%
e
)
sys
.
exit
(
2
)
print
(
"extension %s not found"
%
extension_name
)
sys
.
exit
(
1
)
except
URLError
as
e
:
print
(
"error fetch extension url extension %s"
%
e
)
sys
.
exit
(
2
)
try
:
filename
=
"/tmp/extension.tgz"
urllib
.
request
.
urlretrieve
(
url
,
filename
)
filename
=
"/tmp/extension.tgz"
urllib
.
request
.
urlretrieve
(
url
,
filename
)
except
URLError
as
e
:
print
(
"error to download extension %s"
%
e
)
sys
.
exit
(
3
)
print
(
"error to download extension %s"
%
e
)
sys
.
exit
(
3
)
if
(
call
([
"tar"
,
"-xf"
,
filename
,
"-C"
,
mediawiki_path
+
"/extensions"
])
>
0
)
:
print
(
"error to extract extension tarbal"
)
sys
.
exit
(
4
)
if
call
([
"tar"
,
"-xf"
,
filename
,
"-C"
,
mediawiki_path
+
"/extensions"
])
>
0
:
print
(
"error to extract extension tarbal"
)
sys
.
exit
(
4
)
call
([
"rm"
,
filename
])
ansiblecube/tests/conftest.py
View file @
604e167e
...
...
@@ -2,25 +2,25 @@ import os
def
get_files
(
extension
):
for
root
,
dirnames
,
filenames
in
os
.
walk
(
'.'
):
for
root
,
dirnames
,
filenames
in
os
.
walk
(
"."
):
for
filename
in
filenames
:
if
filename
.
endswith
(
extension
):
yield
os
.
path
.
join
(
root
,
filename
)
def
get_roles
():
return
sorted
(
os
.
listdir
(
'
./roles
'
))
return
sorted
(
os
.
listdir
(
"
./roles
"
))
def
pytest_generate_tests
(
metafunc
):
if
'
jinja2_file
'
in
metafunc
.
fixturenames
:
metafunc
.
parametrize
(
'
jinja2_file
'
,
get_files
(
'
.j2
'
))
if
"
jinja2_file
"
in
metafunc
.
fixturenames
:
metafunc
.
parametrize
(
"
jinja2_file
"
,
get_files
(
"
.j2
"
))
if
'
json_file
'
in
metafunc
.
fixturenames
:
metafunc
.
parametrize
(
'
json_file
'
,
get_files
(
'
.json
'
))
if
"
json_file
"
in
metafunc
.
fixturenames
:
metafunc
.
parametrize
(
"
json_file
"
,
get_files
(
"
.json
"
))
if
'
ini_file
'
in
metafunc
.
fixturenames
:
metafunc
.
parametrize
(
'
ini_file
'
,
get_files
(
'
.fact
'
))
if
"
ini_file
"
in
metafunc
.
fixturenames
:
metafunc
.
parametrize
(
"
ini_file
"
,
get_files
(
"
.fact
"
))
if
'
role
'
in
metafunc
.
fixturenames
:
metafunc
.
parametrize
(
'
role
'
,
get_roles
())
if
"
role
"
in
metafunc
.
fixturenames
:
metafunc
.
parametrize
(
"
role
"
,
get_roles
())
ansiblecube/tests/test_ansible.py
View file @
604e167e
...
...
@@ -5,27 +5,28 @@ import pytest
def
test_ansible_playbook_syntax
(
tmpdir
,
role
):
hosts
=
os
.
path
.
join
(
os
.
getcwd
(),
'
hosts
'
)
roles
=
os
.
path
.
join
(
os
.
getcwd
(),
'
roles
'
)
hosts
=
os
.
path
.
join
(
os
.
getcwd
(),
"
hosts
"
)
roles
=
os
.
path
.
join
(
os
.
getcwd
(),
"
roles
"
)
config
=
tmpdir
.
join
(
'
ansible.cfg
'
)
config
=
tmpdir
.
join
(
"
ansible.cfg
"
)
config
.
write
(
'[defaults]
\n
'
'inventory = {hosts}
\n
'
'roles_path = {roles}
\n
'
.
format
(
hosts
=
hosts
,
roles
=
roles
))
"[defaults]
\n
"
"inventory = {hosts}
\n
"
"roles_path = {roles}
\n
"
.
format
(
hosts
=
hosts
,
roles
=
roles
)
)
playbook
=
tmpdir
.
join
(
'
playbook.yml
'
)
playbook
=
tmpdir
.
join
(
"
playbook.yml
"
)
playbook
.
write
(
'---
\n
'
'- hosts: localhost
\n
'
' roles:
\n
'
' - role: {role}'
.
format
(
role
=
role
))
"---
\n
"
"- hosts: localhost
\n
"
" roles:
\n
"
" - role: {role}"
.
format
(
role
=
role
)
)
proc
=
subprocess
.
Popen
(
[
'ansible-playbook'
,
'--syntax-check'
,
'-vvv'
,
str
(
playbook
)],
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
STDOUT
,
env
=
{
'ANSIBLE_CONFIG'
:
str
(
config
),
'PATH'
:
os
.
environ
[
'PATH'
]})
[
"ansible-playbook"
,
"--syntax-check"
,
"-vvv"
,
str
(
playbook
)],
stdout
=
subprocess
.
PIPE
,
stderr
=
subprocess
.
STDOUT
,
env
=
{
"ANSIBLE_CONFIG"
:
str
(
config
),
"PATH"
:
os
.
environ
[
"PATH"
]},
)
out
,
_
=
proc
.
communicate
()
if
proc
.
returncode
!=
0
:
pytest
.
fail
(
'
%s is not a valid role:
\n
%s
'
%
(
role
,
out
))
pytest
.
fail
(
"
%s is not a valid role:
\n
%s
"
%
(
role
,
out
))
ansiblecube/tests/test_syntax.py
View file @
604e167e
...
...
@@ -6,20 +6,20 @@ from jinja2 import Template
def
test_jinja2_file
(
jinja2_file
):
with
open
(
jinja2_file
,
'
rb
'
)
as
f
:
template_text
=
f
.
read
().
decode
(
'
utf-8
'
)
with
open
(
jinja2_file
,
"
rb
"
)
as
f
:
template_text
=
f
.
read
().
decode
(
"
utf-8
"
)
try
:
Template
(
template_text
)
except
Exception
as
e
:
pytest
.
fail
(
'
%s is not valid Jinja2:
\n
%s
'
%
(
jinja2_file
,
e
))
pytest
.
fail
(
"
%s is not valid Jinja2:
\n
%s
"
%
(
jinja2_file
,
e
))
def
test_json_file
(
json_file
):
try
:
with
open
(
json_file
,
mode
=
'r'
)
as
f
:
json
.
load
(
f
,
encoding
=
'
utf-8
'
)
with
open
(
json_file
,
mode
=
"r"
)
as
f
:
json
.
load
(
f
,
encoding
=
"
utf-8
"
)
except
Exception
as
e
:
pytest
.
fail
(
'
%s is not valid JSON:
\n
%s
'
%
(
json_file
,
e
))
pytest
.
fail
(
"
%s is not valid JSON:
\n
%s
"
%
(
json_file
,
e
))
insert_id_to_class_glade.py
View file @
604e167e
...
...
@@ -3,8 +3,10 @@ import os
import
re
import
argparse
parser
=
argparse
.
ArgumentParser
(
description
=
"insert id to class in glade file for gtk3.10 compatibility"
)
parser
.
add_argument
(
'file'
,
type
=
str
,
help
=
"glade file"
)
parser
=
argparse
.
ArgumentParser
(
description
=
"insert id to class in glade file for gtk3.10 compatibility"
)
parser
.
add_argument
(
"file"
,
type
=
str
,
help
=
"glade file"
)
args
=
parser
.
parse_args
()
with
open
(
args
.
file
,
"r"
)
as
config
:
...
...
@@ -21,5 +23,9 @@ with open(args.file, "w") as config:
replaced_pattern
=
r
"<object class=\"(\w*)\"(>|/>)"
if
len
(
re
.
findall
(
replaced_pattern
,
line
))
==
1
:
max_identifier
+=
1
line
=
re
.
sub
(
replaced_pattern
,
"<object class=
\"
\g<1>
\"
id=
\"
no_id_{}
\"
\g<2>"
.
format
(
max_identifier
),
line
)
line
=
re
.
sub
(
replaced_pattern
,
'<object class="\g<1>" id="no_id_{}"\g<2>'
.
format
(
max_identifier
),
line
,
)
config
.
write
(
line
)
make-vexpress-boot/__main__.py
View file @
604e167e
...
...
@@ -5,8 +5,8 @@ import shutil
import
fileinput
import
subprocess
import
urllib.request
from
netfilter_conf
import
NETFILTER_CONF
from
zipfile
import
ZipFile
from
netfilter_conf
import
NETFILTER_CONF
os
.
makedirs
(
"build"
,
exist_ok
=
True
)
os
.
chdir
(
"build"
)
...
...
@@ -24,7 +24,7 @@ exfat_zip = "exfat-nofuse-master.zip"
print
(
"--> make vexpress boot"
)
if
os
.
path
.
isdir
(
boot_zip
):
print
(
"nothing to do"
)
exit
(
0
)
sys
.
exit
(
0
)
print
(
"--> download linux"
)
if
os
.
path
.
isdir
(
linux_folder
):
...
...
@@ -44,17 +44,16 @@ zipFile.extractall()
print
(
"--> insert exfat module into kernel tree"
)
# move downloaded code to fs/exfat
exfat_folder
=
os
.
path
.
join
(
linux_folder
,
'
fs
'
,
'
exfat
'
)
exfat_folder
=
os
.
path
.
join
(
linux_folder
,
"
fs
"
,
"
exfat
"
)
shutil
.
rmtree
(
exfat_folder
,
ignore_errors
=
True
)
shutil
.
move
(
"exfat-nofuse-master"
,
exfat_folder
)
# add a reference to fs/exfat in the fs/ Makefile
with
open
(
os
.
path
.
join
(
linux_folder
,
'
fs
'
,
'
Makefile
'
),
"a"
)
as
fs_makefile
:
with
open
(
os
.
path
.
join
(
linux_folder
,
"
fs
"
,
"
Makefile
"
),
"a"
)
as
fs_makefile
:
fs_makefile
.
write
(
"
\n
obj-$(CONFIG_EXFAT_FS) += exfat/
\n
"
)
# add a reference to fs/exfat in the fs/ MenuConfig
for
line
in
fileinput
.
input
(
os
.
path
.
join
(
linux_folder
,
'fs'
,
'Kconfig'
),
inplace
=
True
):
for
line
in
fileinput
.
input
(
os
.
path
.
join
(
linux_folder
,
"fs"
,
"Kconfig"
),
inplace
=
True
):
sys
.
stdout
.
write
(
line
)
if
'source "fs/fat/Kconfig"'
in
line
:
new_line
=
'source "fs/exfat/Kconfig"
\n
'
...
...
@@ -63,7 +62,9 @@ for line in fileinput.input(os.path.join(linux_folder, 'fs', 'Kconfig'),
os
.
chdir
(
linux_folder
)
print
(
"--> set linux configuration"
)
subprocess
.
check_call
(
"make ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- vexpress_defconfig"
,
shell
=
True
)
subprocess
.
check_call
(
"make ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- vexpress_defconfig"
,
shell
=
True
)
# Modify configuration
with
open
(
".config"
,
"r"
)
as
config
:
...
...
@@ -89,20 +90,41 @@ with open(".config", "w") as config:
# Enable netfilter
config
.
write
(
NETFILTER_CONF
)
subprocess
.
check_call
(
"make -j 2 ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- olddefconfig"
,
shell
=
True
)
subprocess
.
check_call
(
"make -j 2 ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- olddefconfig"
,
shell
=
True
)
print
(
"--> compile linux"
)
subprocess
.
check_call
(
"make -j 2 ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- all"
,
shell
=
True
)
subprocess
.
check_call
(
"make -j 2 ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- all"
,
shell
=
True
)
print
(
"--> create vexpress boot directory"
)
os
.
mkdir
(
"../{}"
.
format
(
boot_dir
))
subprocess
.
check_call
(
"cp .config arch/arm/boot/zImage arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dtb ../{}"
.
format
(
boot_dir
),
shell
=
True
)
subprocess
.
check_call
(
"cp .config arch/arm/boot/zImage arch/arm/boot/dts/vexpress-v2p-ca15_a7.dtb ../{}"
.
format
(
boot_dir
),
shell
=
True
)
subprocess
.
check_call
(
"cp .config arch/arm/boot/zImage arch/arm/boot/dts/vexpress-v2p-ca9.dtb ../{}"
.
format
(
boot_dir
),
shell
=
True
)
subprocess
.
check_call
(
"cp .config arch/arm/boot/zImage arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dtb ../{}"
.
format
(
boot_dir
),
shell
=
True
,
)
subprocess
.
check_call
(
"cp .config arch/arm/boot/zImage arch/arm/boot/dts/vexpress-v2p-ca15_a7.dtb ../{}"
.
format
(
boot_dir
),
shell
=
True
,
)
subprocess
.
check_call
(
"cp .config arch/arm/boot/zImage arch/arm/boot/dts/vexpress-v2p-ca9.dtb ../{}"
.
format
(
boot_dir
),
shell
=
True
,
)
os
.
chdir
(
".."
)
with
open
(
"{}/README.txt"
.
format
(
boot_dir
),
"w+"
)
as
readme
:
readme
.
write
(
"""This is the kernel used by kiwix-hotspot to boot the vexpress machine in QEMU
It has been generated by make-vexpress-boot script at https://framagit.org/ideascube/pibox-installer"""
)
readme
.
write
(
"""This is the kernel used by kiwix-hotspot to boot the vexpress machine in QEMU
It has been generated by make-vexpress-boot script at https://framagit.org/ideascube/pibox-installer"""
)
readme
.
flush
()
print
(
"--> create vexpress boot zip archive"
)
...
...
pibox-installer/__main__.py
View file @
604e167e
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import
argparse
import
sys
import
runpy
...
...
pibox-installer/backend/ansiblecube.py
View file @
604e167e
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
...
...
@@ -16,152 +15,180 @@ ansiblecube_path = "/var/lib/ansible/local"
def
run
(
machine
,
tags
,
extra_vars
=
{},
secret_keys
=
[]):
'''
run ansiblecube in given machine with specified tags and extra-vars
'''
"""
run ansiblecube in given machine with specified tags and extra-vars
"""
# predefined defaults we want to superseed whichever in ansiblecube
ansible_vars
=
{
'
mirror
'
:
mirror
,
'
catalogs
'
:
CATALOGS
,
'
kernel_version
'
:
get_content
(
'
raspbian_image
'
).
get
(
'
kernel_version
'
),
"
mirror
"
:
mirror
,
"
catalogs
"
:
CATALOGS
,
"
kernel_version
"
:
get_content
(
"
raspbian_image
"
).
get
(
"
kernel_version
"
),
}
ansible_vars
.
update
(
extra_vars
)
# save extra_vars to a file on guest
extra_vars_path
=
posixpath
.
join
(
ansiblecube_path
,
"extra_vars.json"
)
with
tempfile
.
NamedTemporaryFile
(
'w'
,
delete
=
False
)
as
fp
:
with
tempfile
.
NamedTemporaryFile
(
"w"
,
delete
=
False
)
as
fp
:
json
.
dump
(
ansible_vars
,
fp
,
indent
=
4
)
fp
.
close
()
machine
.
put_file
(
fp
.
name
,
extra_vars_path
)
os
.
unlink
(
fp
.
name
)
# prepare ansible command
ansible_cmd
=
[
'/usr/local/bin/ansible-playbook'
,
'--inventory hosts'
,
'--tags {}'
.
format
(
","
.
join
(
tags
)),
'--extra-vars="@{}"'
.
format
(
extra_vars_path
),
'main.yml'
]
ansible_cmd
=
[
"/usr/local/bin/ansible-playbook"
,
"--inventory hosts"
,
"--tags {}"
.
format
(
","
.
join
(
tags
)),
'--extra-vars="@{}"'
.
format
(
extra_vars_path
),
"main.yml"
,
]
# display sent configuration to logger
machine
.
_logger
.
std
(
"ansiblecube extra_vars"
)
machine
.
_logger
.
raw_std
(
json
.
dumps
({
k
:
'****'
if
k
in
secret_keys
else
v
for
k
,
v
in
ansible_vars
.
items
()},
indent
=
4
))
json
.
dumps
(
{
k
:
"****"
if
k
in
secret_keys
else
v
for
k
,
v
in
ansible_vars
.
items
()},
indent
=
4
,
)
)
# review the list of tasks so the logger can use it to track progression
tasks_cmd
=
ansible_cmd
[
0
:
1
]
+
[
'--list-tasks'
]
+
ansible_cmd
[
1
:]
machine
.
exec_cmd
(
"sh -c 'cd {path} && tasks=$({cmd} | paste -sd
\"
^
\"
-) "
"&& echo
\"
### TASKS ### $tasks
\"
'"
.
format
(
path
=
ansiblecube_path
,
cmd
=
" "
.
join
(
tasks_cmd
)))
tasks_cmd
=
ansible_cmd
[
0
:
1
]
+
[
"--list-tasks"
]
+
ansible_cmd
[
1
:]
machine
.
exec_cmd
(
'sh -c
\'
cd {path} && tasks=$({cmd} | paste -sd "^" -) '
'&& echo "### TASKS ### $tasks"
\'
'
.
format
(
path
=
ansiblecube_path
,
cmd
=
" "
.
join
(
tasks_cmd
)
)
)