zeus
3 years ago
41 changed files with 4482 additions and 0 deletions
@ -1,2 +1,40 @@ |
|||||
# microservice-ansible |
# microservice-ansible |
||||
|
|
||||
|
|
||||
|
|
||||
|
## LabInstance ansible |
||||
|
|
||||
|
|
||||
|
![alt text](images/swarmlab-network.png "") |
||||
|
|
||||
|
|
||||
|
|
||||
|
## Quickstart |
||||
|
|
||||
|
This is a quickstart guide of howto use this *LabInstance to deploy Vue js applications* |
||||
|
|
||||
|
### HowTo use it |
||||
|
|
||||
|
|
||||
|
### Default Configuration |
||||
|
|
||||
|
- Working Directory |
||||
|
|
||||
|
> /home/docker/project |
||||
|
|
||||
|
- Default user |
||||
|
|
||||
|
> docker |
||||
|
|
||||
|
- Default password |
||||
|
|
||||
|
> docker |
||||
|
|
||||
|
- Default password4root |
||||
|
|
||||
|
> pass |
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
@ -0,0 +1,15 @@ |
|||||
|
function EPHEMERAL_PORT() { |
||||
|
LOW_BOUND=49152 |
||||
|
RANGE=16384 |
||||
|
while true; do |
||||
|
CANDIDATE=$[$LOW_BOUND + ($RANDOM % $RANGE)] |
||||
|
(echo "" >/dev/tcp/127.0.0.1/${CANDIDATE}) >/dev/null 2>&1 |
||||
|
if [ $? -ne 0 ]; then |
||||
|
echo $CANDIDATE |
||||
|
break |
||||
|
fi |
||||
|
done |
||||
|
} |
||||
|
|
||||
|
port=$(EPHEMERAL_PORT) |
||||
|
echo $port |
After Width: | Height: | Size: 80 KiB |
@ -0,0 +1,30 @@ |
|||||
|
#/usr/bin/env bash |
||||
|
_mpi() |
||||
|
{ |
||||
|
local commands_number=${DOTHIS_COMPLETION_COMMANDS_NUMBER:-50} |
||||
|
local IFS=$'\n' |
||||
|
local suggestions=($(compgen -W "$(cat $1 | sed 's/\t//')" -- "${COMP_WORDS[1]}")) |
||||
|
local sug=1 |
||||
|
if [ "${#suggestions[@]}" == "$sug" ]; then |
||||
|
local number="${suggestions[0]/%\ */}" |
||||
|
COMPREPLY=("$number") |
||||
|
else |
||||
|
for i in "${!suggestions[@]}"; do |
||||
|
suggestions[$i]="$(printf '%*s' "-$COLUMNS" "${suggestions[$i]}")" |
||||
|
done |
||||
|
|
||||
|
COMPREPLY=("${suggestions[@]}") |
||||
|
fi |
||||
|
} |
||||
|
_mpi_completions() |
||||
|
{ |
||||
|
local LAB_files=${LAB_PATH} |
||||
|
if [ "${#COMP_WORDS[@]}" == "2" ] && [ "${#COMP_WORDS[@]}" != "3" ]; then |
||||
|
local file2="/usr/share/swarmlab.io/mpi/commands" |
||||
|
_mpi "$file2" 1 |
||||
|
fi |
||||
|
|
||||
|
|
||||
|
} |
||||
|
complete -F _mpi_completions swarmlab-mpi |
||||
|
|
@ -0,0 +1 @@ |
|||||
|
export PATH=$PATH:/usr/share/swarmlab.io/mpi |
@ -0,0 +1,127 @@ |
|||||
|
# ~/.bashrc: executed by bash(1) for non-login shells. |
||||
|
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) |
||||
|
# for examples |
||||
|
|
||||
|
# If not running interactively, don't do anything |
||||
|
case $- in |
||||
|
*i*) ;; |
||||
|
*) return;; |
||||
|
esac |
||||
|
|
||||
|
# don't put duplicate lines or lines starting with space in the history. |
||||
|
# See bash(1) for more options |
||||
|
HISTCONTROL=ignoreboth |
||||
|
|
||||
|
# append to the history file, don't overwrite it |
||||
|
shopt -s histappend |
||||
|
|
||||
|
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1) |
||||
|
HISTSIZE=1000 |
||||
|
HISTFILESIZE=2000 |
||||
|
|
||||
|
# check the window size after each command and, if necessary, |
||||
|
# update the values of LINES and COLUMNS. |
||||
|
shopt -s checkwinsize |
||||
|
|
||||
|
# If set, the pattern "**" used in a pathname expansion context will |
||||
|
# match all files and zero or more directories and subdirectories. |
||||
|
#shopt -s globstar |
||||
|
|
||||
|
# make less more friendly for non-text input files, see lesspipe(1) |
||||
|
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" |
||||
|
|
||||
|
# set variable identifying the chroot you work in (used in the prompt below) |
||||
|
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then |
||||
|
debian_chroot=$(cat /etc/debian_chroot) |
||||
|
fi |
||||
|
|
||||
|
# set a fancy prompt (non-color, unless we know we "want" color) |
||||
|
case "$TERM" in |
||||
|
xterm-color|*-256color) color_prompt=yes;; |
||||
|
esac |
||||
|
|
||||
|
# uncomment for a colored prompt, if the terminal has the capability; turned |
||||
|
# off by default to not distract the user: the focus in a terminal window |
||||
|
# should be on the output of commands, not on the prompt |
||||
|
#force_color_prompt=yes |
||||
|
|
||||
|
if [ -n "$force_color_prompt" ]; then |
||||
|
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then |
||||
|
# We have color support; assume it's compliant with Ecma-48 |
||||
|
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such |
||||
|
# a case would tend to support setf rather than setaf.) |
||||
|
color_prompt=yes |
||||
|
else |
||||
|
color_prompt= |
||||
|
fi |
||||
|
fi |
||||
|
|
||||
|
if [ "$color_prompt" = yes ]; then |
||||
|
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' |
||||
|
else |
||||
|
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' |
||||
|
fi |
||||
|
unset color_prompt force_color_prompt |
||||
|
|
||||
|
# If this is an xterm set the title to user@host:dir |
||||
|
case "$TERM" in |
||||
|
xterm*|rxvt*) |
||||
|
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" |
||||
|
;; |
||||
|
*) |
||||
|
;; |
||||
|
esac |
||||
|
|
||||
|
# enable color support of ls and also add handy aliases |
||||
|
if [ -x /usr/bin/dircolors ]; then |
||||
|
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)" |
||||
|
alias ls='ls --color=auto' |
||||
|
#alias dir='dir --color=auto' |
||||
|
#alias vdir='vdir --color=auto' |
||||
|
|
||||
|
alias grep='grep --color=auto' |
||||
|
alias fgrep='fgrep --color=auto' |
||||
|
alias egrep='egrep --color=auto' |
||||
|
fi |
||||
|
|
||||
|
# colored GCC warnings and errors |
||||
|
#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' |
||||
|
|
||||
|
# some more ls aliases |
||||
|
alias ll='ls -alF' |
||||
|
alias la='ls -A' |
||||
|
alias l='ls -CF' |
||||
|
|
||||
|
# Add an "alert" alias for long running commands. Use like so: |
||||
|
# sleep 10; alert |
||||
|
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"' |
||||
|
|
||||
|
# Alias definitions. |
||||
|
# You may want to put all your additions into a separate file like |
||||
|
# ~/.bash_aliases, instead of adding them here directly. |
||||
|
# See /usr/share/doc/bash-doc/examples in the bash-doc package. |
||||
|
|
||||
|
if [ -f ~/.bash_aliases ]; then |
||||
|
. ~/.bash_aliases |
||||
|
fi |
||||
|
|
||||
|
if [ -d /etc/profile.d ]; then |
||||
|
for i in /etc/profile.d/*.sh; do |
||||
|
if [ -r $i ]; then |
||||
|
. $i |
||||
|
fi |
||||
|
done |
||||
|
unset i |
||||
|
fi |
||||
|
|
||||
|
|
||||
|
# enable programmable completion features (you don't need to enable |
||||
|
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile |
||||
|
# sources /etc/bash.bashrc). |
||||
|
if ! shopt -oq posix; then |
||||
|
if [ -f /usr/share/bash-completion/bash_completion ]; then |
||||
|
. /usr/share/bash-completion/bash_completion |
||||
|
elif [ -f /etc/bash_completion ]; then |
||||
|
. /etc/bash_completion |
||||
|
fi |
||||
|
fi |
@ -0,0 +1,7 @@ |
|||||
|
REGISTRY_ADDR=localhost |
||||
|
REGISTRY_PORT=5000 |
||||
|
IMAGE_NAME=sec |
||||
|
SSH_PORT=2222 |
||||
|
WEB_PORT=80 |
||||
|
WEB_PORT1=443 |
||||
|
WEB_PORT2=8080 |
@ -0,0 +1,15 @@ |
|||||
|
map <C-e> :NERDTreeToggle<CR> |
||||
|
|
||||
|
autocmd BufNewFile,BufRead *.vue,*.js set syntax=verilog tabstop=2|set shiftwidth=2|set noexpandtab autoindent |
||||
|
augroup remember_folds |
||||
|
autocmd! |
||||
|
autocmd BufWinLeave * mkview |
||||
|
autocmd BufWinEnter * silent! loadview |
||||
|
augroup END |
||||
|
" Useful for my Quick Notes feature in my tmuxrc |
||||
|
augroup QuickNotes |
||||
|
au BufWrite,VimLeave NOTES.otl mkview |
||||
|
au BufRead NOTES.otl silent loadview |
||||
|
augroup END |
||||
|
set swapfile |
||||
|
set dir=~/tmp |
@ -0,0 +1,614 @@ |
|||||
|
GNU AFFERO GENERAL PUBLIC LICENSE |
||||
|
|
||||
|
Version 3, 19 November 2007 |
||||
|
|
||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <http s ://fsf.org/> |
||||
|
|
||||
|
Everyone is permitted to copy and distribute verbatim copies of this license |
||||
|
document, but changing it is not allowed. |
||||
|
|
||||
|
Preamble |
||||
|
|
||||
|
The GNU Affero General Public License is a free, copyleft license for software |
||||
|
and other kinds of works, specifically designed to ensure cooperation with |
||||
|
the community in the case of network server software. |
||||
|
|
||||
|
The licenses for most software and other practical works are designed to take |
||||
|
away your freedom to share and change the works. By contrast, our General |
||||
|
Public Licenses are intended to guarantee your freedom to share and change |
||||
|
all versions of a program--to make sure it remains free software for all its |
||||
|
users. |
||||
|
|
||||
|
When we speak of free software, we are referring to freedom, not price. Our |
||||
|
General Public Licenses are designed to make sure that you have the freedom |
||||
|
to distribute copies of free software (and charge for them if you wish), that |
||||
|
you receive source code or can get it if you want it, that you can change |
||||
|
the software or use pieces of it in new free programs, and that you know you |
||||
|
can do these things. |
||||
|
|
||||
|
Developers that use our General Public Licenses protect your rights with two |
||||
|
steps: (1) assert copyright on the software, and (2) offer you this License |
||||
|
which gives you legal permission to copy, distribute and/or modify the software. |
||||
|
|
||||
|
A secondary benefit of defending all users' freedom is that improvements made |
||||
|
in alternate versions of the program, if they receive widespread use, become |
||||
|
available for other developers to incorporate. Many developers of free software |
||||
|
are heartened and encouraged by the resulting cooperation. However, in the |
||||
|
case of software used on network servers, this result may fail to come about. |
||||
|
The GNU General Public License permits making a modified version and letting |
||||
|
the public access it on a server without ever releasing its source code to |
||||
|
the public. |
||||
|
|
||||
|
The GNU Affero General Public License is designed specifically to ensure that, |
||||
|
in such cases, the modified source code becomes available to the community. |
||||
|
It requires the operator of a network server to provide the source code of |
||||
|
the modified version running there to the users of that server. Therefore, |
||||
|
public use of a modified version, on a publicly accessible server, gives the |
||||
|
public access to the source code of the modified version. |
||||
|
|
||||
|
An older license, called the Affero General Public License and published by |
||||
|
Affero, was designed to accomplish similar goals. This is a different license, |
||||
|
not a version of the Affero GPL, but Affero has released a new version of |
||||
|
the Affero GPL which permits relicensing under this license. |
||||
|
|
||||
|
The precise terms and conditions for copying, distribution and modification |
||||
|
follow. |
||||
|
|
||||
|
TERMS AND CONDITIONS |
||||
|
|
||||
|
0. Definitions. |
||||
|
|
||||
|
"This License" refers to version 3 of the GNU Affero General Public License. |
||||
|
|
||||
|
"Copyright" also means copyright-like laws that apply to other kinds of works, |
||||
|
such as semiconductor masks. |
||||
|
|
||||
|
"The Program" refers to any copyrightable work licensed under this License. |
||||
|
Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals |
||||
|
or organizations. |
||||
|
|
||||
|
To "modify" a work means to copy from or adapt all or part of the work in |
||||
|
a fashion requiring copyright permission, other than the making of an exact |
||||
|
copy. The resulting work is called a "modified version" of the earlier work |
||||
|
or a work "based on" the earlier work. |
||||
|
|
||||
|
A "covered work" means either the unmodified Program or a work based on the |
||||
|
Program. |
||||
|
|
||||
|
To "propagate" a work means to do anything with it that, without permission, |
||||
|
would make you directly or secondarily liable for infringement under applicable |
||||
|
copyright law, except executing it on a computer or modifying a private copy. |
||||
|
Propagation includes copying, distribution (with or without modification), |
||||
|
making available to the public, and in some countries other activities as |
||||
|
well. |
||||
|
|
||||
|
To "convey" a work means any kind of propagation that enables other parties |
||||
|
to make or receive copies. Mere interaction with a user through a computer |
||||
|
network, with no transfer of a copy, is not conveying. |
||||
|
|
||||
|
An interactive user interface displays "Appropriate Legal Notices" to the |
||||
|
extent that it includes a convenient and prominently visible feature that |
||||
|
(1) displays an appropriate copyright notice, and (2) tells the user that |
||||
|
there is no warranty for the work (except to the extent that warranties are |
||||
|
provided), that licensees may convey the work under this License, and how |
||||
|
to view a copy of this License. If the interface presents a list of user commands |
||||
|
or options, such as a menu, a prominent item in the list meets this criterion. |
||||
|
|
||||
|
1. Source Code. |
||||
|
|
||||
|
The "source code" for a work means the preferred form of the work for making |
||||
|
modifications to it. "Object code" means any non-source form of a work. |
||||
|
|
||||
|
A "Standard Interface" means an interface that either is an official standard |
||||
|
defined by a recognized standards body, or, in the case of interfaces specified |
||||
|
for a particular programming language, one that is widely used among developers |
||||
|
working in that language. |
||||
|
|
||||
|
The "System Libraries" of an executable work include anything, other than |
||||
|
the work as a whole, that (a) is included in the normal form of packaging |
||||
|
a Major Component, but which is not part of that Major Component, and (b) |
||||
|
serves only to enable use of the work with that Major Component, or to implement |
||||
|
a Standard Interface for which an implementation is available to the public |
||||
|
in source code form. A "Major Component", in this context, means a major essential |
||||
|
component (kernel, window system, and so on) of the specific operating system |
||||
|
(if any) on which the executable work runs, or a compiler used to produce |
||||
|
the work, or an object code interpreter used to run it. |
||||
|
|
||||
|
The "Corresponding Source" for a work in object code form means all the source |
||||
|
code needed to generate, install, and (for an executable work) run the object |
||||
|
code and to modify the work, including scripts to control those activities. |
||||
|
However, it does not include the work's System Libraries, or general-purpose |
||||
|
tools or generally available free programs which are used unmodified in performing |
||||
|
those activities but which are not part of the work. For example, Corresponding |
||||
|
Source includes interface definition files associated with source files for |
||||
|
the work, and the source code for shared libraries and dynamically linked |
||||
|
subprograms that the work is specifically designed to require, such as by |
||||
|
intimate data communication or control flow between those |
||||
|
|
||||
|
subprograms and other parts of the work. |
||||
|
|
||||
|
The Corresponding Source need not include anything that users can regenerate |
||||
|
automatically from other parts of the Corresponding Source. |
||||
|
|
||||
|
The Corresponding Source for a work in source code form is that same work. |
||||
|
|
||||
|
2. Basic Permissions. |
||||
|
|
||||
|
All rights granted under this License are granted for the term of copyright |
||||
|
on the Program, and are irrevocable provided the stated conditions are met. |
||||
|
This License explicitly affirms your unlimited permission to run the unmodified |
||||
|
Program. The output from running a covered work is covered by this License |
||||
|
only if the output, given its content, constitutes a covered work. This License |
||||
|
acknowledges your rights of fair use or other equivalent, as provided by copyright |
||||
|
law. |
||||
|
|
||||
|
You may make, run and propagate covered works that you do not convey, without |
||||
|
conditions so long as your license otherwise remains in force. You may convey |
||||
|
covered works to others for the sole purpose of having them make modifications |
||||
|
exclusively for you, or provide you with facilities for running those works, |
||||
|
provided that you comply with the terms of this License in conveying all material |
||||
|
for which you do not control copyright. Those thus making or running the covered |
||||
|
works for you must do so exclusively on your behalf, under your direction |
||||
|
and control, on terms that prohibit them from making any copies of your copyrighted |
||||
|
material outside their relationship with you. |
||||
|
|
||||
|
Conveying under any other circumstances is permitted solely under the conditions |
||||
|
stated below. Sublicensing is not allowed; section 10 makes it unnecessary. |
||||
|
|
||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law. |
||||
|
|
||||
|
No covered work shall be deemed part of an effective technological measure |
||||
|
under any applicable law fulfilling obligations under article 11 of the WIPO |
||||
|
copyright treaty adopted on 20 December 1996, or similar laws prohibiting |
||||
|
or restricting circumvention of such measures. |
||||
|
|
||||
|
When you convey a covered work, you waive any legal power to forbid circumvention |
||||
|
of technological measures to the extent such circumvention is effected by |
||||
|
exercising rights under this License with respect to the covered work, and |
||||
|
you disclaim any intention to limit operation or modification of the work |
||||
|
as a means of enforcing, against the work's users, your or third parties' |
||||
|
legal rights to forbid circumvention of technological measures. |
||||
|
|
||||
|
4. Conveying Verbatim Copies. |
||||
|
|
||||
|
You may convey verbatim copies of the Program's source code as you receive |
||||
|
it, in any medium, provided that you conspicuously and appropriately publish |
||||
|
on each copy an appropriate copyright notice; keep intact all notices stating |
||||
|
that this License and any non-permissive terms added in accord with section |
||||
|
7 apply to the code; keep intact all notices of the absence of any warranty; |
||||
|
and give all recipients a copy of this License along with the Program. |
||||
|
|
||||
|
You may charge any price or no price for each copy that you convey, and you |
||||
|
may offer support or warranty protection for a fee. |
||||
|
|
||||
|
5. Conveying Modified Source Versions. |
||||
|
|
||||
|
You may convey a work based on the Program, or the modifications to produce |
||||
|
it from the Program, in the form of source code under the terms of section |
||||
|
4, provided that you also meet all of these conditions: |
||||
|
|
||||
|
a) The work must carry prominent notices stating that you modified it, and |
||||
|
giving a relevant date. |
||||
|
|
||||
|
b) The work must carry prominent notices stating that it is released under |
||||
|
this License and any conditions added under section 7. This requirement modifies |
||||
|
the requirement in section 4 to "keep intact all notices". |
||||
|
|
||||
|
c) You must license the entire work, as a whole, under this License to anyone |
||||
|
who comes into possession of a copy. This License will therefore apply, along |
||||
|
with any applicable section 7 additional terms, to the whole of the work, |
||||
|
and all its parts, regardless of how they are packaged. This License gives |
||||
|
no permission to license the work in any other way, but it does not invalidate |
||||
|
such permission if you have separately received it. |
||||
|
|
||||
|
d) If the work has interactive user interfaces, each must display Appropriate |
||||
|
Legal Notices; however, if the Program has interactive interfaces that do |
||||
|
not display Appropriate Legal Notices, your work need not make them do so. |
||||
|
|
||||
|
A compilation of a covered work with other separate and independent works, |
||||
|
which are not by their nature extensions of the covered work, and which are |
||||
|
not combined with it such as to form a larger program, in or on a volume of |
||||
|
a storage or distribution medium, is called an "aggregate" if the compilation |
||||
|
and its resulting copyright are not used to limit the access or legal rights |
||||
|
of the compilation's users beyond what the individual works permit. Inclusion |
||||
|
of a covered work in an aggregate does not cause this License to apply to |
||||
|
the other parts of the aggregate. |
||||
|
|
||||
|
6. Conveying Non-Source Forms. |
||||
|
|
||||
|
You may convey a covered work in object code form under the terms of sections |
||||
|
4 and 5, provided that you also convey the machine-readable Corresponding |
||||
|
Source under the terms of this License, in one of these ways: |
||||
|
|
||||
|
a) Convey the object code in, or embodied in, a physical product (including |
||||
|
a physical distribution medium), accompanied by the Corresponding Source fixed |
||||
|
on a durable physical medium customarily used for software interchange. |
||||
|
|
||||
|
b) Convey the object code in, or embodied in, a physical product (including |
||||
|
a physical distribution medium), accompanied by a written offer, valid for |
||||
|
at least three years and valid for as long as you offer spare parts or customer |
||||
|
support for that product model, to give anyone who possesses the object code |
||||
|
either (1) a copy of the Corresponding Source for all the software in the |
||||
|
product that is covered by this License, on a durable physical medium customarily |
||||
|
used for software interchange, for a price no more than your reasonable cost |
||||
|
of physically performing this conveying of source, or (2) access to copy the |
||||
|
Corresponding Source from a network server at no charge. |
||||
|
|
||||
|
c) Convey individual copies of the object code with a copy of the written |
||||
|
offer to provide the Corresponding Source. This alternative is allowed only |
||||
|
occasionally and noncommercially, and only if you received the object code |
||||
|
with such an offer, in accord with subsection 6b. |
||||
|
|
||||
|
d) Convey the object code by offering access from a designated place (gratis |
||||
|
or for a charge), and offer equivalent access to the Corresponding Source |
||||
|
in the same way through the same place at no further charge. You need not |
||||
|
require recipients to copy the Corresponding Source along with the object |
||||
|
code. If the place to copy the object code is a network server, the Corresponding |
||||
|
Source may be on a different server (operated by you or a third party) that |
||||
|
supports equivalent copying facilities, provided you maintain clear directions |
||||
|
next to the object code saying where to find the Corresponding Source. Regardless |
||||
|
of what server hosts the Corresponding Source, you remain obligated to ensure |
||||
|
that it is available for as long as needed to satisfy these requirements. |
||||
|
|
||||
|
e) Convey the object code using peer-to-peer transmission, provided you inform |
||||
|
other peers where the object code and Corresponding Source of the work are |
||||
|
being offered to the general public at no charge under subsection 6d. |
||||
|
|
||||
|
A separable portion of the object code, whose source code is excluded from |
||||
|
the Corresponding Source as a System Library, need not be included in conveying |
||||
|
the object code work. |
||||
|
|
||||
|
A "User Product" is either (1) a "consumer product", which means any tangible |
||||
|
personal property which is normally used for personal, family, or household |
||||
|
purposes, or (2) anything designed or sold for incorporation into a dwelling. |
||||
|
In determining whether a product is a consumer product, doubtful cases shall |
||||
|
be resolved in favor of coverage. For a particular product received by a particular |
||||
|
user, "normally used" refers to a typical or common use of that class of product, |
||||
|
regardless of the status of the particular user or of the way in which the |
||||
|
particular user actually uses, or expects or is expected to use, the product. |
||||
|
A product is a consumer product regardless of whether the product has substantial |
||||
|
commercial, industrial or non-consumer uses, unless such uses represent the |
||||
|
only significant mode of use of the product. |
||||
|
|
||||
|
"Installation Information" for a User Product means any methods, procedures, |
||||
|
authorization keys, or other information required to install and execute modified |
||||
|
versions of a covered work in that User Product from a modified version of |
||||
|
its Corresponding Source. The information must suffice to ensure that the |
||||
|
continued functioning of the modified object code is in no case prevented |
||||
|
or interfered with solely because modification has been made. |
||||
|
|
||||
|
If you convey an object code work under this section in, or with, or specifically |
||||
|
for use in, a User Product, and the conveying occurs as part of a transaction |
||||
|
in which the right of possession and use of the User Product is transferred |
||||
|
to the recipient in perpetuity or for a fixed term (regardless of how the |
||||
|
transaction is characterized), the Corresponding Source conveyed under this |
||||
|
section must be accompanied by the Installation Information. But this requirement |
||||
|
does not apply if neither you nor any third party retains the ability to install |
||||
|
modified object code on the User Product (for example, the work has been installed |
||||
|
in ROM). |
||||
|
|
||||
|
The requirement to provide Installation Information does not include a requirement |
||||
|
to continue to provide support service, warranty, or updates for a work that |
||||
|
has been modified or installed by the recipient, or for the User Product in |
||||
|
which it has been modified or installed. Access to a network may be denied |
||||
|
when the modification itself materially and adversely affects the operation |
||||
|
of the network or violates the rules and protocols for communication across |
||||
|
the network. |
||||
|
|
||||
|
Corresponding Source conveyed, and Installation Information provided, in accord |
||||
|
with this section must be in a format that is publicly documented (and with |
||||
|
an implementation available to the public in source code form), and must require |
||||
|
no special password or key for unpacking, reading or copying. |
||||
|
|
||||
|
7. Additional Terms. |
||||
|
|
||||
|
"Additional permissions" are terms that supplement the terms of this License |
||||
|
by making exceptions from one or more of its conditions. Additional permissions |
||||
|
that are applicable to the entire Program shall be treated as though they |
||||
|
were included in this License, to the extent that they are valid under applicable |
||||
|
law. If additional permissions apply only to part of the Program, that part |
||||
|
may be used separately under those permissions, but the entire Program remains |
||||
|
governed by this License without regard to the additional permissions. |
||||
|
|
||||
|
When you convey a copy of a covered work, you may at your option remove any |
||||
|
additional permissions from that copy, or from any part of it. (Additional |
||||
|
permissions may be written to require their own removal in certain cases when |
||||
|
you modify the work.) You may place additional permissions on material, added |
||||
|
by you to a covered work, for which you have or can give appropriate copyright |
||||
|
permission. |
||||
|
|
||||
|
Notwithstanding any other provision of this License, for material you add |
||||
|
to a covered work, you may (if authorized by the copyright holders of that |
||||
|
material) supplement the terms of this License with terms: |
||||
|
|
||||
|
a) Disclaiming warranty or limiting liability differently from the terms of |
||||
|
sections 15 and 16 of this License; or |
||||
|
|
||||
|
b) Requiring preservation of specified reasonable legal notices or author |
||||
|
attributions in that material or in the Appropriate Legal Notices displayed |
||||
|
by works containing it; or |
||||
|
|
||||
|
c) Prohibiting misrepresentation of the origin of that material, or requiring |
||||
|
that modified versions of such material be marked in reasonable ways as different |
||||
|
from the original version; or |
||||
|
|
||||
|
d) Limiting the use for publicity purposes of names of licensors or authors |
||||
|
of the material; or |
||||
|
|
||||
|
e) Declining to grant rights under trademark law for use of some trade names, |
||||
|
trademarks, or service marks; or |
||||
|
|
||||
|
f) Requiring indemnification of licensors and authors of that material by |
||||
|
anyone who conveys the material (or modified versions of it) with contractual |
||||
|
assumptions of liability to the recipient, for any liability that these contractual |
||||
|
assumptions directly impose on those licensors and authors. |
||||
|
|
||||
|
All other non-permissive additional terms are considered "further restrictions" |
||||
|
within the meaning of section 10. If the Program as you received it, or any |
||||
|
part of it, contains a notice stating that it is governed by this License |
||||
|
along with a term that is a further restriction, you may remove that term. |
||||
|
If a license document contains a further restriction but permits relicensing |
||||
|
or conveying under this License, you may add to a covered work material governed |
||||
|
by the terms of that license document, provided that the further restriction |
||||
|
does not survive such relicensing or conveying. |
||||
|
|
||||
|
If you add terms to a covered work in accord with this section, you must place, |
||||
|
in the relevant source files, a statement of the additional terms that apply |
||||
|
to those files, or a notice indicating where to find the applicable terms. |
||||
|
|
||||
|
Additional terms, permissive or non-permissive, may be stated in the form |
||||
|
of a separately written license, or stated as exceptions; the above requirements |
||||
|
apply either way. |
||||
|
|
||||
|
8. Termination. |
||||
|
|
||||
|
You may not propagate or modify a covered work except as expressly provided |
||||
|
under this License. Any attempt otherwise to propagate or modify it is void, |
||||
|
and will automatically terminate your rights under this License (including |
||||
|
any patent licenses granted under the third paragraph of section 11). |
||||
|
|
||||
|
However, if you cease all violation of this License, then your license from |
||||
|
a particular copyright holder is reinstated (a) provisionally, unless and |
||||
|
until the copyright holder explicitly and finally terminates your license, |
||||
|
and (b) permanently, if the copyright holder fails to notify you of the violation |
||||
|
by some reasonable means prior to 60 days after the cessation. |
||||
|
|
||||
|
Moreover, your license from a particular copyright holder is reinstated permanently |
||||
|
if the copyright holder notifies you of the violation by some reasonable means, |
||||
|
this is the first time you have received notice of violation of this License |
||||
|
(for any work) from that copyright holder, and you cure the violation prior |
||||
|
to 30 days after your receipt of the notice. |
||||
|
|
||||
|
Termination of your rights under this section does not terminate the licenses |
||||
|
of parties who have received copies or rights from you under this License. |
||||
|
If your rights have been terminated and not permanently reinstated, you do |
||||
|
not qualify to receive new licenses for the same material under section 10. |
||||
|
|
||||
|
9. Acceptance Not Required for Having Copies. |
||||
|
|
||||
|
You are not required to accept this License in order to receive or run a copy |
||||
|
of the Program. Ancillary propagation of a covered work occurring solely as |
||||
|
a consequence of using peer-to-peer transmission to receive a copy likewise |
||||
|
does not require acceptance. However, nothing other than this License grants |
||||
|
you permission to propagate or modify any covered work. These actions infringe |
||||
|
copyright if you do not accept this License. Therefore, by modifying or propagating |
||||
|
a covered work, you indicate your acceptance of this License to do so. |
||||
|
|
||||
|
10. Automatic Licensing of Downstream Recipients. |
||||
|
|
||||
|
Each time you convey a covered work, the recipient automatically receives |
||||
|
a license from the original licensors, to run, modify and propagate that work, |
||||
|
subject to this License. You are not responsible for enforcing compliance |
||||
|
by third parties with this License. |
||||
|
|
||||
|
An "entity transaction" is a transaction transferring control of an organization, |
||||
|
or substantially all assets of one, or subdividing an organization, or merging |
||||
|
organizations. If propagation of a covered work results from an entity transaction, |
||||
|
each party to that transaction who receives a copy of the work also receives |
||||
|
whatever licenses to the work the party's predecessor in interest had or could |
||||
|
give under the previous paragraph, plus a right to possession of the Corresponding |
||||
|
Source of the work from the predecessor in interest, if the predecessor has |
||||
|
it or can get it with reasonable efforts. |
||||
|
|
||||
|
You may not impose any further restrictions on the exercise of the rights |
||||
|
granted or affirmed under this License. For example, you may not impose a |
||||
|
license fee, royalty, or other charge for exercise of rights granted under |
||||
|
this License, and you may not initiate litigation (including a cross-claim |
||||
|
or counterclaim in a lawsuit) alleging that any patent claim is infringed |
||||
|
by making, using, selling, offering for sale, or importing the Program or |
||||
|
any portion of it. |
||||
|
|
||||
|
11. Patents. |
||||
|
|
||||
|
A "contributor" is a copyright holder who authorizes use under this License |
||||
|
of the Program or a work on which the Program is based. The work thus licensed |
||||
|
is called the contributor's "contributor version". |
||||
|
|
||||
|
A contributor's "essential patent claims" are all patent claims owned or controlled |
||||
|
by the contributor, whether already acquired or hereafter acquired, that would |
||||
|
be infringed by some manner, permitted by this License, of making, using, |
||||
|
or selling its contributor version, but do not include claims that would be |
||||
|
infringed only as a consequence of further modification of the contributor |
||||
|
version. For purposes of this definition, "control" includes the right to |
||||
|
grant patent sublicenses in a manner consistent with the requirements of this |
||||
|
License. |
||||
|
|
||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free patent |
||||
|
license under the contributor's essential patent claims, to make, use, sell, |
||||
|
offer for sale, import and otherwise run, modify and propagate the contents |
||||
|
of its contributor version. |
||||
|
|
||||
|
In the following three paragraphs, a "patent license" is any express agreement |
||||
|
or commitment, however denominated, not to enforce a patent (such as an express |
||||
|
permission to practice a patent or covenant not to s ue for patent infringement). |
||||
|
To "grant" such a patent license to a party means to make such an agreement |
||||
|
or commitment not to enforce a patent against the party. |
||||
|
|
||||
|
If you convey a covered work, knowingly relying on a patent license, and the |
||||
|
Corresponding Source of the work is not available for anyone to copy, free |
||||
|
of charge and under the terms of this License, through a publicly available |
||||
|
network server or other readily accessible means, then you must either (1) |
||||
|
cause the Corresponding Source to be so available, or (2) arrange to deprive |
||||
|
yourself of the benefit of the patent license for this particular work, or |
||||
|
(3) arrange, in a manner consistent with the requirements of this License, |
||||
|
to extend the patent |
||||
|
|
||||
|
license to downstream recipients. "Knowingly relying" means you have actual |
||||
|
knowledge that, but for the patent license, your conveying the covered work |
||||
|
in a country, or your recipient's use of the covered work in a country, would |
||||
|
infringe one or more identifiable patents in that country that you have reason |
||||
|
to believe are valid. |
||||
|
|
||||
|
If, pursuant to or in connection with a single transaction or arrangement, |
||||
|
you convey, or propagate by procuring conveyance of, a covered work, and grant |
||||
|
a patent license to some of the parties receiving the covered work authorizing |
||||
|
them to use, propagate, modify or convey a specific copy of the covered work, |
||||
|
then the patent license you grant is automatically extended to all recipients |
||||
|
of the covered work and works based on it. |
||||
|
|
||||
|
A patent license is "discriminatory" if it does not include within the scope |
||||
|
of its coverage, prohibits the exercise of, or is conditioned on the non-exercise |
||||
|
of one or more of the rights that are specifically granted under this License. |
||||
|
You may not convey a covered work if you are a party to an arrangement with |
||||
|
a third party that is in the business of distributing software, under which |
||||
|
you make payment to the third party based on the extent of your activity of |
||||
|
conveying the work, and under which the third party grants, to any of the |
||||
|
parties who would receive the covered work from you, a discriminatory patent |
||||
|
license (a) in connection with copies of the covered work conveyed by you |
||||
|
(or copies made from those copies), or (b) primarily for and in connection |
||||
|
with specific products or compilations that contain the covered work, unless |
||||
|
you entered into that arrangement, or that patent license was granted, prior |
||||
|
to 28 March 2007. |
||||
|
|
||||
|
Nothing in this License shall be construed as excluding or limiting any implied |
||||
|
license or other defenses to infringement that may otherwise be available |
||||
|
to you under applicable patent law. |
||||
|
|
||||
|
12. No Surrender of Others' Freedom. |
||||
|
|
||||
|
If conditions are imposed on you (whether by court order, agreement or otherwise) |
||||
|
that contradict the conditions of this License, they do not excuse you from |
||||
|
the conditions of this License. If you cannot convey a covered work so as |
||||
|
to satisfy simultaneously your obligations under this License and any other |
||||
|
pertinent obligations, then as a consequence you may |
||||
|
|
||||
|
not convey it at all. For example, if you agree to terms that obligate you |
||||
|
to collect a royalty for further conveying from those to whom you convey the |
||||
|
Program, the only way you could satisfy both those terms and this License |
||||
|
would be to refrain entirely from conveying the Program. |
||||
|
|
||||
|
13. Remote Network Interaction; Use with the GNU General Public License. |
||||
|
|
||||
|
Notwithstanding any other provision of this License, if you modify the Program, |
||||
|
your modified version must prominently offer all users interacting with it |
||||
|
remotely through a computer network (if your version supports such interaction) |
||||
|
an opportunity to receive the Corresponding Source of your version by providing |
||||
|
access to the Corresponding Source from a network server at no charge, through |
||||
|
some standard or customary means of facilitating copying of software. This |
||||
|
Corresponding Source shall include the Corresponding Source for any work covered |
||||
|
by version 3 of the GNU General Public License that is incorporated pursuant |
||||
|
to the following paragraph. |
||||
|
|
||||
|
Notwithstanding any other provision of this License, you have permission to |
||||
|
link or combine any covered work with a work licensed under version 3 of the |
||||
|
GNU General Public License into a single combined work, and to convey the |
||||
|
resulting work. The terms of this License will continue to apply to the part |
||||
|
which is the covered work, but the work with which it is combined will remain |
||||
|
governed by version 3 of the GNU General Public License. |
||||
|
|
||||
|
14. Revised Versions of this License. |
||||
|
|
||||
|
The Free Software Foundation may publish revised and/or new versions of the |
||||
|
GNU Affero General Public License from time to time. Such new versions will |
||||
|
be similar in spirit to the present version, but may differ in detail to address |
||||
|
new problems or concerns. |
||||
|
|
||||
|
Each version is given a distinguishing version number. If the Program specifies |
||||
|
that a certain numbered version of the GNU Affero General Public License "or |
||||
|
any later version" applies to it, you have the option of following the terms |
||||
|
and conditions either of that numbered version or of any later version published |
||||
|
by the Free Software Foundation. If the Program does not specify a version |
||||
|
number of the GNU Affero General Public License, you may choose any version |
||||
|
ever published by the Free Software Foundation. |
||||
|
|
||||
|
If the Program specifies that a proxy can decide which future versions of |
||||
|
the GNU Affero General Public License can be used, that proxy's public statement |
||||
|
of acceptance of a version permanently authorizes you to choose that version |
||||
|
for the Program. |
||||
|
|
||||
|
Later license versions may give you additional or different permissions. However, |
||||
|
no additional obligations are imposed on any author or copyright holder as |
||||
|
a result of your choosing to follow a later version. |
||||
|
|
||||
|
15. Disclaimer of Warranty. |
||||
|
|
||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE |
||||
|
LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR |
||||
|
OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER |
||||
|
EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||||
|
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS |
||||
|
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM |
||||
|
PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR |
||||
|
CORRECTION. |
||||
|
|
||||
|
16. Limitation of Liability. |
||||
|
|
||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL |
||||
|
ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM |
||||
|
AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, |
||||
|
INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO |
||||
|
USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED |
||||
|
INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE |
||||
|
PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER |
||||
|
PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. |
||||
|
|
||||
|
17. Interpretation of Sections 15 and 16. |
||||
|
|
||||
|
If the disclaimer of warranty and limitation of liability provided above cannot |
||||
|
be given local legal effect according to their terms, reviewing courts shall |
||||
|
apply local law that most closely approximates an absolute waiver of all civil |
||||
|
liability in connection with the Program, unless a warranty or assumption |
||||
|
of liability accompanies a copy of the Program in return for a fee. END OF |
||||
|
TERMS AND CONDITIONS |
||||
|
|
||||
|
How to Apply These Terms to Your New Programs |
||||
|
|
||||
|
If you develop a new program, and you want it to be of the greatest possible |
||||
|
use to the public, the best way to achieve this is to make it free software |
||||
|
which everyone can redistribute and change under these terms. |
||||
|
|
||||
|
To do so, attach the following notices to the program. It is safest to attach |
||||
|
them to the start of each source file to most effectively state the exclusion |
||||
|
of warranty; and each file should have at least the "copyright" line and a |
||||
|
pointer to where the full notice is found. |
||||
|
|
||||
|
<one line to give the program's name and a brief idea of what it does.> |
||||
|
|
||||
|
Copyright (C) <year> <name of author> |
||||
|
|
||||
|
This program is free software: you can redistribute it and/or modify it under |
||||
|
the terms of the GNU Affero General Public License as published by the Free |
||||
|
Software Foundation, either version 3 of the License, or (at your option) |
||||
|
any later version. |
||||
|
|
||||
|
This program is distributed in the hope that it will be useful, but WITHOUT |
||||
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
||||
|
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more |
||||
|
details. |
||||
|
|
||||
|
You should have received a copy of the GNU Affero General Public License along |
||||
|
with this program. If not, see <http s ://www.gnu.org/licenses/>. |
||||
|
|
||||
|
Also add information on how to contact you by electronic and paper mail. |
||||
|
|
||||
|
If your software can interact with users remotely through a computer network, |
||||
|
you should also make sure that it provides a way for users to get its source. |
||||
|
For example, if your program is a web application, its interface could display |
||||
|
a "Source" link that leads users to an archive of the code. There are many |
||||
|
ways you could offer source, and different solutions will be better for different |
||||
|
programs; see section 13 for the specific requirements. |
||||
|
|
||||
|
You should also get your employer (if you work as a programmer) or school, |
||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary. For |
||||
|
more information on this, and how to apply and follow the GNU AGPL, see <http |
||||
|
s ://www.gnu.org/licenses/>. |
@ -0,0 +1 @@ |
|||||
|
ROOT_PASSWORD="pass" |
@ -0,0 +1,11 @@ |
|||||
|
#!/bin/sh |
||||
|
|
||||
|
hosts=$(get_hosts) |
||||
|
printf "%s" "$hosts" > "$1" |
||||
|
|
||||
|
while sleep 2 |
||||
|
do |
||||
|
current_hosts=$(get_hosts) |
||||
|
[ "$hosts" != "$current_hosts" ] && printf "%s" "$current_hosts" > "$1" |
||||
|
hosts=$current_hosts |
||||
|
done |
@ -0,0 +1,13 @@ |
|||||
|
create create project (swarmlab-sec create) |
||||
|
up start swarmlab-sec (swarmlab-sec up size=10) |
||||
|
scale resize swarmlab-sec (swarmlab-sec scale size=30) |
||||
|
reload rebuild image (swarmlab-sec reload size=15) |
||||
|
login login swarmlab-sec (swarmlab-sec login) |
||||
|
exec execute command (swarmlab-sec exec [SHELL COMMAND]) |
||||
|
down stop swarmlab-sec (swarmlab-sec down) |
||||
|
clean clean project (swarmlab-sec clean) |
||||
|
list show instances (swarmlab-sec list) |
||||
|
help show help (swarmlab-sec help) |
||||
|
|
||||
|
|
||||
|
|
@ -0,0 +1,8 @@ |
|||||
|
#!/bin/sh |
||||
|
|
||||
|
# Include the variables that store the Docker service names |
||||
|
# shellcheck disable=SC1091 |
||||
|
. /etc/opt/service_names |
||||
|
|
||||
|
localip=$(ip addr show dev eth0 | grep "inet " | cut -d ' ' -f 6 | cut -f 1 -d '/') |
||||
|
nmap -sP "$localip/24" | grep Nmap | cut -d' ' -f5 | grep "_$MPI_WORKER_SERVICE_NAME_" > /project/hosts |
@ -0,0 +1,74 @@ |
|||||
|
#! /usr/bin/env sh |
||||
|
|
||||
|
start_dir=$(pwd) |
||||
|
bin_string="export PATH=\"${PATH}:${HOME}/.vimpkg/bin\"" |
||||
|
|
||||
|
# Download the apt-vim files |
||||
|
curl -fSsLo ${HOME}/apt-vim/apt-vim --create-dirs \ |
||||
|
https://raw.githubusercontent.com/egalpin/apt-vim/master/apt-vim |
||||
|
|
||||
|
curl -fSsLo ${HOME}/apt-vim/vim_config.json \ |
||||
|
https://raw.githubusercontent.com/egalpin/apt-vim/master/vim_config.json |
||||
|
|
||||
|
# Add vimrc if there isn't one already |
||||
|
[ -f ${HOME}/.vimrc ] || touch ${HOME}/.vimrc |
||||
|
|
||||
|
# Make sure vimrc is using pathogen |
||||
|
if [ $(grep -c "execute pathogen#infect()" ${HOME}/.vimrc) -eq 0 ]; then |
||||
|
echo "execute pathogen#infect()" >> ${HOME}/.vimrc |
||||
|
fi |
||||
|
if [ $(grep -c "call pathogen#helptags()" ${HOME}/.vimrc) -eq 0 ]; then |
||||
|
echo "call pathogen#helptags()" >> ${HOME}/.vimrc |
||||
|
fi |
||||
|
|
||||
|
# Update path for executing shell |
||||
|
eval "$bin_string" |
||||
|
|
||||
|
added_to_profile=false |
||||
|
already_present=false |
||||
|
for rc in bashrc zshrc bash_profile; do |
||||
|
if [ -s "$HOME/.$rc" ]; then |
||||
|
if grep -q "$bin_string" "$HOME/.$rc"; then |
||||
|
already_present=true |
||||
|
else |
||||
|
printf "\n$bin_string\n" >> "$HOME/.$rc" |
||||
|
printf "== Added apt-vim PATH to '~/.$rc'\n" |
||||
|
added_to_profile=true |
||||
|
fi |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
# Execute apt-vim init |
||||
|
cd ${HOME}/apt-vim |
||||
|
python - <<EOF |
||||
|
import imp, os |
||||
|
print('apt-vim setup starting') |
||||
|
HOME = os.path.expanduser("~") |
||||
|
APT_VIM_DIR = os.path.abspath(os.path.join(HOME, 'apt-vim')) |
||||
|
SCRIPT_ROOT_DIR = os.path.abspath(os.path.join(HOME, '.vimpkg')) |
||||
|
BIN_DIR = os.path.abspath(os.path.join(SCRIPT_ROOT_DIR, 'bin')) |
||||
|
os.environ['PATH'] += os.pathsep + BIN_DIR |
||||
|
os.chdir(APT_VIM_DIR) |
||||
|
|
||||
|
aptvim = imp.load_source("aptvim", "./apt-vim") |
||||
|
av = aptvim.aptvim(ASSUME_YES=True, VIM_CONFIG='', INSTALL_TARGET='') |
||||
|
av.first_run() |
||||
|
av.handle_install(None, None, None) |
||||
|
EOF |
||||
|
python_result=$? |
||||
|
|
||||
|
cd $start_dir |
||||
|
|
||||
|
echo |
||||
|
if [ "$python_result" -ne 0 ]; then |
||||
|
echo "== Error:" |
||||
|
echo " Installation failed." |
||||
|
elif [ "$added_to_profile" = false ] && [ "$already_present" = false ]; then |
||||
|
echo "== Error:" |
||||
|
echo " Found no profile to add apt-vim PATH to." |
||||
|
echo " Add the following line to your shell profile and source it to install manually:" |
||||
|
printf " $bin_string\n" |
||||
|
else |
||||
|
echo "== apt-vim installation succeeded! Run 'source ~/.bashrc || source ~/.bash_profile' or 'source ~/.zshrc'" |
||||
|
echo " to access the executable script." |
||||
|
fi |
@ -0,0 +1,2 @@ |
|||||
|
#!/bin/sh |
||||
|
/usr/bin/supervisord -n -c /etc/supervisor/supervisord.conf |
@ -0,0 +1,6 @@ |
|||||
|
#/bin/sh |
||||
|
|
||||
|
#ip=`nslookup hybrid-mpi_master_1.hybrid-mpi_hybrid-mpi | grep Addr | cut -d':' -f2 | grep -v 127.0.` |
||||
|
ip=`nslookup $NODENAME | grep Addr | cut -d':' -f2 | grep -v 127.0.` |
||||
|
nmap -sn -oG - $ip/24 | grep Up | grep $NODENETWORK | cut -d ' ' -f 2 |
||||
|
|
@ -0,0 +1,44 @@ |
|||||
|
server { |
||||
|
listen 80; |
||||
|
server_name localhost; |
||||
|
|
||||
|
#charset koi8-r; |
||||
|
#access_log /var/log/nginx/log/host.access.log main; |
||||
|
|
||||
|
location / { |
||||
|
root /data/www; |
||||
|
index index.html index.htm; |
||||
|
} |
||||
|
|
||||
|
#error_page 404 /404.html; |
||||
|
|
||||
|
# redirect server error pages to the static page /50x.html |
||||
|
# |
||||
|
error_page 500 502 503 504 /50x.html; |
||||
|
location = /50x.html { |
||||
|
root /data/www; |
||||
|
} |
||||
|
|
||||
|
# proxy the PHP scripts to Apache listening on 127.0.0.1:80 |
||||
|
# |
||||
|
#location ~ \.php$ { |
||||
|
# proxy_pass http://127.0.0.1; |
||||
|
#} |
||||
|
|
||||
|
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 |
||||
|
# |
||||
|
#location ~ \.php$ { |
||||
|
# root html; |
||||
|
# fastcgi_pass 127.0.0.1:9000; |
||||
|
# fastcgi_index index.php; |
||||
|
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; |
||||
|
# include fastcgi_params; |
||||
|
#} |
||||
|
|
||||
|
# deny access to .htaccess files, if Apache's document root |
||||
|
# concurs with nginx's one |
||||
|
# |
||||
|
#location ~ /\.ht { |
||||
|
# deny all; |
||||
|
#} |
||||
|
} |
@ -0,0 +1,29 @@ |
|||||
|
user www-data; |
||||
|
worker_processes 1; |
||||
|
|
||||
|
error_log /var/log/nginx/error.log warn; |
||||
|
pid /var/run/nginx.pid; |
||||
|
|
||||
|
events { |
||||
|
worker_connections 1024; |
||||
|
} |
||||
|
|
||||
|
http { |
||||
|
include /etc/nginx/mime.types; |
||||
|
default_type application/octet-stream; |
||||
|
|
||||
|
log_format main '[$time_local] $remote_user:$remote_addr "$request" ' |
||||
|
'$status $body_bytes_sent "$http_referer" ' |
||||
|
'"$http_user_agent" "$http_x_forwarded_for"'; |
||||
|
|
||||
|
access_log /var/log/nginx/access.log main; |
||||
|
|
||||
|
sendfile on; |
||||
|
#tcp_nopush on; |
||||
|
|
||||
|
keepalive_timeout 65; |
||||
|
|
||||
|
#gzip on; |
||||
|
|
||||
|
include /etc/nginx/conf.d/*.conf; |
||||
|
} |
@ -0,0 +1,28 @@ |
|||||
|
[unix_http_server] |
||||
|
file=/dev/shm/supervisor.sock ; (the path to the socket file) |
||||
|
|
||||
|
[supervisord] |
||||
|
logfile=/var/log/supervisord.log ; (main log file;default $CWD/supervisord.log) |
||||
|
logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB) |
||||
|
logfile_backups=10 ; (num of main logfile rotation backups;default 10) |
||||
|
loglevel=info ; (log level;default info; others: debug,warn,trace) |
||||
|
pidfile=/tmp/supervisord.pid ; (supervisord pidfile;default supervisord.pid) |
||||
|
nodaemon=false ; (start in foreground if true;default false) |
||||
|
minfds=1024 ; (min. avail startup file descriptors;default 1024) |
||||
|
minprocs=200 ; (min. avail process descriptors;default 200) |
||||
|
user=root ; |
||||
|
|
||||
|
[rpcinterface:supervisor] |
||||
|
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface |
||||
|
|
||||
|
[supervisorctl] |
||||
|
serverurl=unix:///dev/shm/supervisor.sock ; use a unix:// URL for a unix socket |
||||
|
|
||||
|
[include] |
||||
|
files = /etc/supervisor/conf.d/*.conf |
||||
|
|
||||
|
[program:nginx] |
||||
|
command=/usr/sbin/nginx |
||||
|
numprocs=1 |
||||
|
autostart=true |
||||
|
autorestart=true |
@ -0,0 +1 @@ |
|||||
|
Course examples |
@ -0,0 +1,31 @@ |
|||||
|
|
||||
|
const app = require('express')(); |
||||
|
const http = require('http').Server(app); |
||||
|
var path = require('path'); |
||||
|
|
||||
|
options = { |
||||
|
secure:true, |
||||
|
reconnect: true, |
||||
|
rejectUnauthorized : false |
||||
|
}; |
||||
|
|
||||
|
|
||||
|
var io2 = require('socket.io-client'); |
||||
|
var socket = io2.connect('http://localhost:8084', options); |
||||
|
|
||||
|
var msg2 = "c= 120"; |
||||
|
socket.emit('log', msg2); |
||||
|
|
||||
|
/* |
||||
|
var io = require('socket.io')(http); |
||||
|
|
||||
|
app.get('/log', (req, res) => { |
||||
|
socket.emit('log', 'send from get'); |
||||
|
res.send('<h1>send</h1>'); |
||||
|
}); |
||||
|
|
||||
|
http.listen(8085, () => { |
||||
|
console.log('listening on *:8085'); |
||||
|
}); |
||||
|
*/ |
||||
|
|
@ -0,0 +1,109 @@ |
|||||
|
var path = require('path'); |
||||
|
var app = require('express')(); |
||||
|
var http = require('http').Server(app); |
||||
|
var io = require('socket.io')(http); |
||||
|
const MongoClient = require('mongodb').MongoClient; |
||||
|
|
||||
|
|
||||
|
app.get('/test', (req, res) => { |
||||
|
|
||||
|
var user="swarmlab" |
||||
|
var pass="swarmlab" |
||||
|
|
||||
|
/* |
||||
|
use admin |
||||
|
db.createUser( |
||||
|
{ |
||||
|
user: "test1", |
||||
|
pwd: 'newpass', // Or "<cleartext password>"
|
||||
|
roles: [ { role: "readWrite", db: "app_swarmlab" } ], |
||||
|
authenticationRestrictions: [ { |
||||
|
clientSource: ["192.168.1.7"], |
||||
|
serverAddress: ["192.168.80.2", "192.168.80.3", "192.168.80.4"] |
||||
|
} ] |
||||
|
} |
||||
|
) |
||||
|
*/ |
||||
|
|
||||
|
var mongourl = "mongodb://"+user+":"+pass+"@swarmlabmongo1:27017,swarmlabmongo2:27017,swarmlabmongo1:27017/app_swarmlab?replicaSet=rs0&authSource=admin&w=1" |
||||
|
const OPTS = { |
||||
|
useNewUrlParser: true, |
||||
|
useUnifiedTopology: true, |
||||
|
//poolSize: 10,
|
||||
|
tls: false |
||||
|
}; |
||||
|
|
||||
|
const client = new MongoClient(mongourl,OPTS); |
||||
|
|
||||
|
client.on('serverDescriptionChanged', function(event) { |
||||
|
console.log('received serverDescriptionChanged'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('serverHeartbeatStarted', function(event) { |
||||
|
console.log('received serverHeartbeatStarted'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('serverHeartbeatSucceeded', function(event) { |
||||
|
console.log('received serverHeartbeatSucceeded'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('serverHeartbeatFailed', function(event) { |
||||
|
console.log('received serverHeartbeatFailed'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('serverOpening', function(event) { |
||||
|
console.log('received serverOpening'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('serverClosed', function(event) { |
||||
|
console.log('received serverClosed'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('topologyOpening', function(event) { |
||||
|
console.log('received topologyOpening'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('topologyClosed', function(event) { |
||||
|
console.log('received topologyClosed'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('topologyDescriptionChanged', function(event) { |
||||
|
console.log('received topologyDescriptionChanged'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.connect(function(err, client) { |
||||
|
if(err) throw err; |
||||
|
|
||||
|
const db = client.db('app_swarmlab'); |
||||
|
db.collection('logs').find({}).toArray() |
||||
|
.then(item => { |
||||
|
console.log('item '+JSON.stringify(item)) |
||||
|
for (let i in item) { |
||||
|
console.log(JSON.stringify('items' + item[i])) |
||||
|
} |
||||
|
}) |
||||
|
}); |
||||
|
|
||||
|
res.send('<h1>Hello world!</h1>'); |
||||
|
}); |
||||
|
|
||||
|
io.on('connection', s => { |
||||
|
console.error('socket connection'); |
||||
|
|
||||
|
s.on('log', (data, room) => { |
||||
|
console.log('broadcast', data); |
||||
|
}); |
||||
|
|
||||
|
}); |
||||
|
|
||||
|
http.listen(8084, () => console.error('listening on http://localhost:8084/')); |
||||
|
console.error('socket.io example'); |
@ -0,0 +1,24 @@ |
|||||
|
var path = require('path'); |
||||
|
var app = require('express')(); |
||||
|
var http = require('http').Server(app); |
||||
|
var io = require('socket.io')(http); |
||||
|
|
||||
|
|
||||
|
app.get('/', (req, res) => { |
||||
|
res.send('<h1>Hello world!</h1>'); |
||||
|
}); |
||||
|
|
||||
|
|
||||
|
io.on('connection', s => { |
||||
|
console.error('socket connection'); |
||||
|
|
||||
|
s.on('log', (data, room) => { |
||||
|
console.log('broadcast', data); |
||||
|
}); |
||||
|
|
||||
|
|
||||
|
}); |
||||
|
|
||||
|
|
||||
|
http.listen(8084, () => console.error('listening on http://localhost:8084/')); |
||||
|
console.error('socket.io example'); |
@ -0,0 +1,8 @@ |
|||||
|
{ |
||||
|
"dependencies": { |
||||
|
"express": "^4.17.1", |
||||
|
"mongodb": "^3.6.5", |
||||
|
"socket.io": "^4.0.0", |
||||
|
"socket.io-client": "^4.0.0" |
||||
|
} |
||||
|
} |
@ -0,0 +1,107 @@ |
|||||
|
var express = require('express'); |
||||
|
var http = require('http'); |
||||
|
const MongoClient = require('mongodb').MongoClient; |
||||
|
|
||||
|
var PORT = 8085; |
||||
|
|
||||
|
var app = express(); |
||||
|
app.get('/', function(req, res) { |
||||
|
var RES ={} |
||||
|
var message = req.query["log"] |
||||
|
|
||||
|
// Connection URL
|
||||
|
var database = "app_swarmlab" |
||||
|
var user = "swarmlab" |
||||
|
var password = "swarmlab" |
||||
|
var collection = "logs" |
||||
|
var replica_set = "rs0" |
||||
|
var nodes = "swarmlabmongo1:27017,swarmlabmongo2:27017,swarmlabmongo3:27017" |
||||
|
//var url = `mongodb://${user}:${password}@${nodes}/${database}?replicaSet=${replica_set}&authSource=admin`
|
||||
|
|
||||
|
var mongourl = "mongodb://"+user+":"+password+"@swarmlabmongo1:27017,swarmlabmongo2:27017,swarmlabmongo3:27017/app_swarmlab?replicaSet=rs0&authSource=admin&w=1" |
||||
|
const OPTS = { |
||||
|
useNewUrlParser: true, |
||||
|
useUnifiedTopology: true, |
||||
|
//poolSize: 10,
|
||||
|
tls: false |
||||
|
}; |
||||
|
|
||||
|
const client = new MongoClient(mongourl,OPTS); |
||||
|
|
||||
|
client.on('serverDescriptionChanged', function(event) { |
||||
|
console.log('received serverDescriptionChanged'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('serverHeartbeatStarted', function(event) { |
||||
|
console.log('received serverHeartbeatStarted'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('serverHeartbeatSucceeded', function(event) { |
||||
|
console.log('received serverHeartbeatSucceeded'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('serverHeartbeatFailed', function(event) { |
||||
|
console.log('received serverHeartbeatFailed'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('serverOpening', function(event) { |
||||
|
console.log('received serverOpening'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('serverClosed', function(event) { |
||||
|
console.log('received serverClosed'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('topologyOpening', function(event) { |
||||
|
console.log('received topologyOpening'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('topologyOpening', function(event) { |
||||
|
console.log('received topologyOpening'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('topologyClosed', function(event) { |
||||
|
console.log('received topologyClosed'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.on('topologyDescriptionChanged', function(event) { |
||||
|
console.log('received topologyDescriptionChanged'); |
||||
|
console.log(JSON.stringify(event, null, 2)); |
||||
|
}); |
||||
|
|
||||
|
client.connect(function(err, client) { |
||||
|
if(err) throw err; |
||||
|
|
||||
|
const db = client.db('app_swarmlab'); |
||||
|
db.collection('logs').find({}).toArray() |
||||
|
.then(item => { |
||||
|
console.log('item '+JSON.stringify(item)) |
||||
|
for (let i in item) { |
||||
|
console.log(JSON.stringify('items' + item[i])) |
||||
|
} |
||||
|
res.send({message: message, data:item}); |
||||
|
|
||||
|
}) |
||||
|
}); |
||||
|
}); |
||||
|
|
||||
|
app.post('/', function(req, res) { |
||||
|
var message = req.body["log"] |
||||
|
console.log(JSON.stringify(message)) |
||||
|
//console.log(req)
|
||||
|
res.send({message: message}); |
||||
|
}); |
||||
|
|
||||
|
http.Server(app).listen(PORT, function() { |
||||
|
console.log("HTTP server listening on port %s", PORT); |
||||
|
}); |
||||
|
|
File diff suppressed because it is too large
@ -0,0 +1,7 @@ |
|||||
|
{ |
||||
|
"main": "app.js", |
||||
|
"dependencies": { |
||||
|
"express": "^4.17.1", |
||||
|
"mongodb": "^3.6.6" |
||||
|
} |
||||
|
} |
@ -0,0 +1,121 @@ |
|||||
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> |
||||
|
|
||||
|
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"> |
||||
|
<head> |
||||
|
<title>Test Page for the Nginx HTTP Server on Swarmlab.io</title> |
||||
|
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> |
||||
|
<style type="text/css"> |
||||
|
/*<![CDATA[*/ |
||||
|
body { |
||||
|
background-color: #fff; |
||||
|
color: #000; |
||||
|
font-size: 0.9em; |
||||
|
font-family: sans-serif,helvetica; |
||||
|
margin: 0; |
||||
|
padding: 0; |
||||
|
} |
||||
|
:link { |
||||
|
color: #c00; |
||||
|
} |
||||
|
:visited { |
||||
|
color: #c00; |
||||
|
} |
||||
|
a:hover { |
||||
|
color: #f50; |
||||
|
} |
||||
|
h1 { |
||||
|
text-align: center; |
||||
|
margin: 0; |
||||
|
padding: 0.6em 2em 0.4em; |
||||
|
background-color: #294172; |
||||
|
color: #fff; |
||||
|
font-weight: normal; |
||||
|
font-size: 1.75em; |
||||
|
border-bottom: 2px solid #000; |
||||
|
} |
||||
|
h1 strong { |
||||
|
font-weight: bold; |
||||
|
font-size: 1.5em; |
||||
|
} |
||||
|
h2 { |
||||
|
text-align: center; |
||||
|
background-color: #3C6EB4; |
||||
|
font-size: 1.1em; |
||||
|
font-weight: bold; |
||||
|
color: #fff; |
||||
|
margin: 0; |
||||
|
padding: 0.5em; |
||||
|
border-bottom: 2px solid #294172; |
||||
|
} |
||||
|
hr { |
||||
|
display: none; |
||||
|
} |
||||
|
.content { |
||||
|
padding: 1em 5em; |
||||
|
} |
||||
|
.alert { |
||||
|
border: 2px solid #000; |
||||
|
} |
||||
|
|
||||
|
img { |
||||
|
border: 2px solid #fff; |
||||
|
padding: 2px; |
||||
|
margin: 2px; |
||||
|
} |
||||
|
a:hover img { |
||||
|
border: 2px solid #294172; |
||||
|
} |
||||
|
.logos { |
||||
|
margin: 1em; |
||||
|
text-align: center; |
||||
|
} |
||||
|
/*]]>*/ |
||||
|
</style> |
||||
|
</head> |
||||
|
|
||||
|
<body> |
||||
|
<h1>Welcome to <strong>nginx</strong> on Swarmlab.io!</h1> |
||||
|
|
||||
|
<div class="content"> |
||||
|
<p>This page is used to test the proper operation of the |
||||
|
<strong>nginx</strong> HTTP server after it has been |
||||
|
installed. If you can read this page, it means that the |
||||
|
web server installed at this site is working |
||||
|
properly.</p> |
||||
|
|
||||
|
<div class="alert"> |
||||
|
<h2>Website Administrator</h2> |
||||
|
<div class="content"> |
||||
|
<p>This is the default <tt>index.html</tt> page that |
||||
|
is distributed with <strong>nginx</strong> on |
||||
|
Swarmlab.io. It is located in |
||||
|
<tt>/data/www</tt>.</p> |
||||
|
|
||||
|
<p>You should now put your content in a location of |
||||
|
your choice and edit the <tt>root</tt> configuration |
||||
|
directive in the <strong>nginx</strong> |
||||
|
configuration file |
||||
|
<tt>/etc/nginx/nginx.conf</tt>.</p> |
||||
|
|
||||
|
<a href="https://docs.nginx.com/nginx/admin-guide/web-server/serving-static-content/"> |
||||
|
<img |
||||
|
src="nginx-logo.png" |
||||
|
alt="[ Powered by nginx ]" |
||||
|
width="121" height="32" />More info here</a> |
||||
|
</div> |
||||
|
</div> |
||||
|
|
||||
|
<div class="logos"> |
||||
|
<a href="http://nginx.com/"><img |
||||
|
src="nginx-logo.png" |
||||
|
alt="[ Powered by nginx ]" |
||||
|
width="121" height="32" /></a> |
||||
|
|
||||
|
<a href="http://swarmlab.io"><img |
||||
|
src="poweredby.png" |
||||
|
alt="[ Powered by Swarmlab.io ]" |
||||
|
width="88" height="31" /></a> |
||||
|
</div> |
||||
|
</div> |
||||
|
</body> |
||||
|
</html> |
@ -0,0 +1,476 @@ |
|||||
|
# config file for ansible -- https://ansible.com/ |
||||
|
# =============================================== |
||||
|
|
||||
|
# nearly all parameters can be overridden in ansible-playbook |
||||
|
# or with command line flags. ansible will read ANSIBLE_CONFIG, |
||||
|
# ansible.cfg in the current working directory, .ansible.cfg in |
||||
|
# the home directory or /etc/ansible/ansible.cfg, whichever it |
||||
|
# finds first |
||||
|
|
||||
|
[defaults] |
||||
|
|
||||
|
# some basic default values... |
||||
|
|
||||
|
#inventory = /etc/ansible/hosts |
||||
|
#library = /usr/share/my_modules/ |
||||
|
#module_utils = /usr/share/my_module_utils/ |
||||
|
remote_tmp = /tmp/.ansible-${USER}/tmp |
||||
|
#local_tmp = ~/.ansible/tmp |
||||
|
#plugin_filters_cfg = /etc/ansible/plugin_filters.yml |
||||
|
#forks = 5 |
||||
|
#poll_interval = 15 |
||||
|
#sudo_user = root |
||||
|
#ask_sudo_pass = True |
||||
|
#ask_pass = True |
||||
|
#transport = smart |
||||
|
#remote_port = 22 |
||||
|
#module_lang = C |
||||
|
#module_set_locale = False |
||||
|
|
||||
|
# plays will gather facts by default, which contain information about |
||||
|
# the remote system. |
||||
|
# |
||||
|
# smart - gather by default, but don't regather if already gathered |
||||
|
# implicit - gather by default, turn off with gather_facts: False |
||||
|
# explicit - do not gather by default, must say gather_facts: True |
||||
|
#gathering = implicit |
||||
|
|
||||
|
# This only affects the gathering done by a play's gather_facts directive, |
||||
|
# by default gathering retrieves all facts subsets |
||||
|
# all - gather all subsets |
||||
|
# network - gather min and network facts |
||||
|
# hardware - gather hardware facts (longest facts to retrieve) |
||||
|
# virtual - gather min and virtual facts |
||||
|
# facter - import facts from facter |
||||
|
# ohai - import facts from ohai |
||||
|
# You can combine them using comma (ex: network,virtual) |
||||
|
# You can negate them using ! (ex: !hardware,!facter,!ohai) |
||||
|
# A minimal set of facts is always gathered. |
||||
|
#gather_subset = all |
||||
|
|
||||
|
# some hardware related facts are collected |
||||
|
# with a maximum timeout of 10 seconds. This |
||||
|
# option lets you increase or decrease that |
||||
|
# timeout to something more suitable for the |
||||
|
# environment. |
||||
|
# gather_timeout = 10 |
||||
|
|
||||
|
# additional paths to search for roles in, colon separated |
||||
|
#roles_path = /etc/ansible/roles |
||||
|
|
||||
|
# uncomment this to disable SSH key host checking |
||||
|
#host_key_checking = False |
||||
|
host_key_checking = False |
||||
|
|
||||
|
# change the default callback, you can only have one 'stdout' type enabled at a time. |
||||
|
#stdout_callback = skippy |
||||
|
|
||||
|
|
||||
|
## Ansible ships with some plugins that require whitelisting, |
||||
|
## this is done to avoid running all of a type by default. |
||||
|
## These setting lists those that you want enabled for your system. |
||||
|
## Custom plugins should not need this unless plugin author specifies it. |
||||
|
|
||||
|
# enable callback plugins, they can output to stdout but cannot be 'stdout' type. |
||||
|
#callback_whitelist = timer, mail |
||||
|
|
||||
|
# Determine whether includes in tasks and handlers are "static" by |
||||
|
# default. As of 2.0, includes are dynamic by default. Setting these |
||||
|
# values to True will make includes behave more like they did in the |
||||
|
# 1.x versions. |
||||
|
#task_includes_static = False |
||||
|
#handler_includes_static = False |
||||
|
|
||||
|
# Controls if a missing handler for a notification event is an error or a warning |
||||
|
#error_on_missing_handler = True |
||||
|
|
||||
|
# change this for alternative sudo implementations |
||||
|
#sudo_exe = sudo |
||||
|
|
||||
|
# What flags to pass to sudo |
||||
|
# WARNING: leaving out the defaults might create unexpected behaviours |
||||
|
#sudo_flags = -H -S -n |
||||
|
|
||||
|
# SSH timeout |
||||
|
#timeout = 10 |
||||
|
|
||||
|
# default user to use for playbooks if user is not specified |
||||
|
# (/usr/bin/ansible will use current user as default) |
||||
|
#remote_user = root |
||||
|
|
||||
|
# logging is off by default unless this path is defined |
||||
|
# if so defined, consider logrotate |
||||
|
#log_path = /var/log/ansible.log |
||||
|
|
||||
|
# default module name for /usr/bin/ansible |
||||
|
#module_name = command |
||||
|
|
||||
|
# use this shell for commands executed under sudo |
||||
|
# you may need to change this to bin/bash in rare instances |
||||
|
# if sudo is constrained |
||||
|
#executable = /bin/sh |
||||
|
|
||||
|
# if inventory variables overlap, does the higher precedence one win |
||||
|
# or are hash values merged together? The default is 'replace' but |
||||
|
# this can also be set to 'merge'. |
||||
|
#hash_behaviour = replace |
||||
|
|
||||
|
# by default, variables from roles will be visible in the global variable |
||||
|
# scope. To prevent this, the following option can be enabled, and only |
||||
|
# tasks and handlers within the role will see the variables there |
||||
|
#private_role_vars = yes |
||||
|
|
||||
|
# list any Jinja2 extensions to enable here: |
||||
|
#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n |
||||
|
|
||||
|
# if set, always use this private key file for authentication, same as |
||||
|
# if passing --private-key to ansible or ansible-playbook |
||||
|
#private_key_file = /path/to/file |
||||
|
|
||||
|
# If set, configures the path to the Vault password file as an alternative to |
||||
|
# specifying --vault-password-file on the command line. |
||||
|
#vault_password_file = /path/to/vault_password_file |
||||
|
|
||||
|
# format of string {{ ansible_managed }} available within Jinja2 |
||||
|
# templates indicates to users editing templates files will be replaced. |
||||
|
# replacing {file}, {host} and {uid} and strftime codes with proper values. |
||||
|
#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} |
||||
|
# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence |
||||
|
# in some situations so the default is a static string: |
||||
|
#ansible_managed = Ansible managed |
||||
|
|
||||
|
# by default, ansible-playbook will display "Skipping [host]" if it determines a task |
||||
|
# should not be run on a host. Set this to "False" if you don't want to see these "Skipping" |
||||
|
# messages. NOTE: the task header will still be shown regardless of whether or not the |
||||
|
# task is skipped. |
||||
|
#display_skipped_hosts = True |
||||
|
|
||||
|
# by default, if a task in a playbook does not include a name: field then |
||||
|
# ansible-playbook will construct a header that includes the task's action but |
||||
|
# not the task's args. This is a security feature because ansible cannot know |
||||
|
# if the *module* considers an argument to be no_log at the time that the |
||||
|
# header is printed. If your environment doesn't have a problem securing |
||||
|
# stdout from ansible-playbook (or you have manually specified no_log in your |
||||
|
# playbook on all of the tasks where you have secret information) then you can |
||||
|
# safely set this to True to get more informative messages. |
||||
|
#display_args_to_stdout = False |
||||
|
|
||||
|
# by default (as of 1.3), Ansible will raise errors when attempting to dereference |
||||
|
# Jinja2 variables that are not set in templates or action lines. Uncomment this line |
||||
|
# to revert the behavior to pre-1.3. |
||||
|
#error_on_undefined_vars = False |
||||
|
|
||||
|
# by default (as of 1.6), Ansible may display warnings based on the configuration of the |
||||
|
# system running ansible itself. This may include warnings about 3rd party packages or |
||||
|
# other conditions that should be resolved if possible. |
||||
|
# to disable these warnings, set the following value to False: |
||||
|
#system_warnings = True |
||||
|
|
||||
|
# by default (as of 1.4), Ansible may display deprecation warnings for language |
||||
|
# features that should no longer be used and will be removed in future versions. |
||||
|
# to disable these warnings, set the following value to False: |
||||
|
#deprecation_warnings = True |
||||
|
|
||||
|
# (as of 1.8), Ansible can optionally warn when usage of the shell and |
||||
|
# command module appear to be simplified by using a default Ansible module |
||||
|
# instead. These warnings can be silenced by adjusting the following |
||||
|
# setting or adding warn=yes or warn=no to the end of the command line |
||||
|
# parameter string. This will for example suggest using the git module |
||||
|
# instead of shelling out to the git command. |
||||
|
# command_warnings = False |
||||
|
|
||||
|
|
||||
|
# set plugin path directories here, separate with colons |
||||
|
#action_plugins = /usr/share/ansible/plugins/action |
||||
|
#cache_plugins = /usr/share/ansible/plugins/cache |
||||
|
#callback_plugins = /usr/share/ansible/plugins/callback |
||||
|
#connection_plugins = /usr/share/ansible/plugins/connection |
||||
|
#lookup_plugins = /usr/share/ansible/plugins/lookup |
||||
|
#inventory_plugins = /usr/share/ansible/plugins/inventory |
||||
|
#vars_plugins = /usr/share/ansible/plugins/vars |
||||
|
#filter_plugins = /usr/share/ansible/plugins/filter |
||||
|
#test_plugins = /usr/share/ansible/plugins/test |
||||
|
#terminal_plugins = /usr/share/ansible/plugins/terminal |
||||
|
#strategy_plugins = /usr/share/ansible/plugins/strategy |
||||
|
|
||||
|
|
||||
|
# by default, ansible will use the 'linear' strategy but you may want to try |
||||
|
# another one |
||||
|
#strategy = free |
||||
|
|
||||
|
# by default callbacks are not loaded for /bin/ansible, enable this if you |
||||
|
# want, for example, a notification or logging callback to also apply to |
||||
|
# /bin/ansible runs |
||||
|
#bin_ansible_callbacks = False |
||||
|
|
||||
|
|
||||
|
# don't like cows? that's unfortunate. |
||||
|
# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 |
||||
|
#nocows = 1 |
||||
|
|
||||
|
# set which cowsay stencil you'd like to use by default. When set to 'random', |
||||
|
# a random stencil will be selected for each task. The selection will be filtered |
||||
|
# against the `cow_whitelist` option below. |
||||
|
#cow_selection = default |
||||
|
#cow_selection = random |
||||
|
|
||||
|
# when using the 'random' option for cowsay, stencils will be restricted to this list. |
||||
|
# it should be formatted as a comma-separated list with no spaces between names. |
||||
|
# NOTE: line continuations here are for formatting purposes only, as the INI parser |
||||
|
# in python does not support them. |
||||
|
#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\ |
||||
|
# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\ |
||||
|
# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www |
||||
|
|
||||
|
# don't like colors either? |
||||
|
# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 |
||||
|
#nocolor = 1 |
||||
|
|
||||
|
# if set to a persistent type (not 'memory', for example 'redis') fact values |
||||
|
# from previous runs in Ansible will be stored. This may be useful when |
||||
|
# wanting to use, for example, IP information from one group of servers |
||||
|
# without having to talk to them in the same playbook run to get their |
||||
|
# current IP information. |
||||
|
#fact_caching = memory |
||||
|
|
||||
|
|
||||
|
# retry files |
||||
|
# When a playbook fails by default a .retry file will be created in ~/ |
||||
|
# You can disable this feature by setting retry_files_enabled to False |
||||
|
# and you can change the location of the files by setting retry_files_save_path |
||||
|
|
||||
|
#retry_files_enabled = False |
||||
|
#retry_files_save_path = ~/.ansible-retry |
||||
|
|
||||
|
# squash actions |
||||
|
# Ansible can optimise actions that call modules with list parameters |
||||
|
# when looping. Instead of calling the module once per with_ item, the |
||||
|
# module is called once with all items at once. Currently this only works |
||||
|
# under limited circumstances, and only with parameters named 'name'. |
||||
|
#squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper |
||||
|
|
||||
|
# prevents logging of task data, off by default |
||||
|
#no_log = False |
||||
|
|
||||
|
# prevents logging of tasks, but only on the targets, data is still logged on the master/controller |
||||
|
#no_target_syslog = False |
||||
|
|
||||
|
# controls whether Ansible will raise an error or warning if a task has no |
||||
|
# choice but to create world readable temporary files to execute a module on |
||||
|
# the remote machine. This option is False by default for security. Users may |
||||
|
# turn this on to have behaviour more like Ansible prior to 2.1.x. See |
||||
|
# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user |
||||
|
# for more secure ways to fix this than enabling this option. |
||||
|
#allow_world_readable_tmpfiles = False |
||||
|
|
||||
|
# controls the compression level of variables sent to |
||||
|
# worker processes. At the default of 0, no compression |
||||
|
# is used. This value must be an integer from 0 to 9. |
||||
|
#var_compression_level = 9 |
||||
|
|
||||
|
# controls what compression method is used for new-style ansible modules when |
||||
|
# they are sent to the remote system. The compression types depend on having |
||||
|
# support compiled into both the controller's python and the client's python. |
||||
|
# The names should match with the python Zipfile compression types: |
||||
|
# * ZIP_STORED (no compression. available everywhere) |
||||
|
# * ZIP_DEFLATED (uses zlib, the default) |
||||
|
# These values may be set per host via the ansible_module_compression inventory |
||||
|
# variable |
||||
|
#module_compression = 'ZIP_DEFLATED' |
||||
|
|
||||
|
# This controls the cutoff point (in bytes) on --diff for files |
||||
|
# set to 0 for unlimited (RAM may suffer!). |
||||
|
#max_diff_size = 1048576 |
||||
|
|
||||
|
# This controls how ansible handles multiple --tags and --skip-tags arguments |
||||
|
# on the CLI. If this is True then multiple arguments are merged together. If |
||||
|
# it is False, then the last specified argument is used and the others are ignored. |
||||
|
# This option will be removed in 2.8. |
||||
|
#merge_multiple_cli_flags = True |
||||
|
|
||||
|
# Controls showing custom stats at the end, off by default |
||||
|
#show_custom_stats = True |
||||
|
|
||||
|
# Controls which files to ignore when using a directory as inventory with |
||||
|
# possibly multiple sources (both static and dynamic) |
||||
|
#inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo |
||||
|
|
||||
|
# This family of modules use an alternative execution path optimized for network appliances |
||||
|
# only update this setting if you know how this works, otherwise it can break module execution |
||||
|
#network_group_modules=eos, nxos, ios, iosxr, junos, vyos |
||||
|
|
||||
|
# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as |
||||
|
# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain |
||||
|
# jinja2 templating language which will be run through the templating engine. |
||||
|
# ENABLING THIS COULD BE A SECURITY RISK |
||||
|
#allow_unsafe_lookups = False |
||||
|
|
||||
|
# set default errors for all plays |
||||
|
#any_errors_fatal = False |
||||
|
|
||||
|
[inventory] |
||||
|
# enable inventory plugins, default: 'host_list', 'script', 'yaml', 'ini' |
||||
|
#enable_plugins = host_list, virtualbox, yaml, constructed |
||||
|
|
||||
|
# ignore these extensions when parsing a directory as inventory source |
||||
|
#ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry |
||||
|
|
||||
|
# ignore files matching these patterns when parsing a directory as inventory source |
||||
|
#ignore_patterns= |
||||
|
|
||||
|
# If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise. |
||||
|
#unparsed_is_failed=False |
||||
|
|
||||
|
[privilege_escalation] |
||||
|
#become=True |
||||
|
#become_method=sudo |
||||
|
#become_user=root |
||||
|
#become_ask_pass=False |
||||
|
|
||||
|
[paramiko_connection] |
||||
|
|
||||
|
# uncomment this line to cause the paramiko connection plugin to not record new host |
||||
|
# keys encountered. Increases performance on new host additions. Setting works independently of the |
||||
|
# host key checking setting above. |
||||
|
#record_host_keys=False |
||||
|
|
||||
|
# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this |
||||
|
# line to disable this behaviour. |
||||
|
#pty=False |
||||
|
|
||||
|
# paramiko will default to looking for SSH keys initially when trying to |
||||
|
# authenticate to remote devices. This is a problem for some network devices |
||||
|
# that close the connection after a key failure. Uncomment this line to |
||||
|
# disable the Paramiko look for keys function |
||||
|
#look_for_keys = False |
||||
|
|
||||
|
# When using persistent connections with Paramiko, the connection runs in a |
||||
|
# background process. If the host doesn't already have a valid SSH key, by |
||||
|
# default Ansible will prompt to add the host key. This will cause connections |
||||
|
# running in background processes to fail. Uncomment this line to have |
||||
|
# Paramiko automatically add host keys. |
||||
|
#host_key_auto_add = True |
||||
|
|
||||
|
[ssh_connection] |
||||
|
|
||||
|
# ssh arguments to use |
||||
|
# Leaving off ControlPersist will result in poor performance, so use |
||||
|
# paramiko on older platforms rather than removing it, -C controls compression use |
||||
|
#ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s |
||||
|
|
||||
|
# The base directory for the ControlPath sockets. |
||||
|
# This is the "%(directory)s" in the control_path option |
||||
|
# |
||||
|
# Example: |
||||
|
# control_path_dir = /tmp/.ansible/cp |
||||
|
#control_path_dir = ~/.ansible/cp |
||||
|
|
||||
|
# The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname, |
||||
|
# port and username (empty string in the config). The hash mitigates a common problem users |
||||
|
# found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format. |
||||
|
# In those cases, a "too long for Unix domain socket" ssh error would occur. |
||||
|
# |
||||
|
# Example: |
||||
|
# control_path = %(directory)s/%%h-%%r |
||||
|
#control_path = |
||||
|
|
||||
|
# Enabling pipelining reduces the number of SSH operations required to |
||||
|
# execute a module on the remote server. This can result in a significant |
||||
|
# performance improvement when enabled, however when using "sudo:" you must |
||||
|
# first disable 'requiretty' in /etc/sudoers |
||||
|
# |
||||
|
# By default, this option is disabled to preserve compatibility with |
||||
|
# sudoers configurations that have requiretty (the default on many distros). |
||||
|
# |
||||
|
#pipelining = False |
||||
|
|
||||
|
# Control the mechanism for transferring files (old) |
||||
|
# * smart = try sftp and then try scp [default] |
||||
|
# * True = use scp only |
||||
|
# * False = use sftp only |
||||
|
#scp_if_ssh = smart |
||||
|
|
||||
|
# Control the mechanism for transferring files (new) |
||||
|
# If set, this will override the scp_if_ssh option |
||||
|
# * sftp = use sftp to transfer files |
||||
|
# * scp = use scp to transfer files |
||||
|
# * piped = use 'dd' over SSH to transfer files |
||||
|
# * smart = try sftp, scp, and piped, in that order [default] |
||||
|
#transfer_method = smart |
||||
|
|
||||
|
# if False, sftp will not use batch mode to transfer files. This may cause some |
||||
|
# types of file transfer failures impossible to catch however, and should |
||||
|
# only be disabled if your sftp version has problems with batch mode |
||||
|
#sftp_batch_mode = False |
||||
|
|
||||
|
# The -tt argument is passed to ssh when pipelining is not enabled because sudo |
||||
|
# requires a tty by default. |
||||
|
#use_tty = True |
||||
|
|
||||
|
[persistent_connection] |
||||
|
|
||||
|
# Configures the persistent connection timeout value in seconds. This value is |
||||
|
# how long the persistent connection will remain idle before it is destroyed. |
||||
|
# If the connection doesn't receive a request before the timeout value |
||||
|
# expires, the connection is shutdown. The default value is 30 seconds. |
||||
|
#connect_timeout = 30 |
||||
|
|
||||
|
# Configures the persistent connection retry timeout. This value configures the |
||||
|
# the retry timeout that ansible-connection will wait to connect |
||||
|
# to the local domain socket. This value must be larger than the |
||||
|
# ssh timeout (timeout) and less than persistent connection idle timeout (connect_timeout). |
||||
|
# The default value is 15 seconds. |
||||
|
#connect_retry_timeout = 15 |
||||
|
|
||||
|
# The command timeout value defines the amount of time to wait for a command |
||||
|
# or RPC call before timing out. The value for the command timeout must |
||||
|
# be less than the value of the persistent connection idle timeout (connect_timeout) |
||||
|
# The default value is 10 second. |
||||
|
#command_timeout = 10 |
||||
|
|
||||
|
[accelerate] |
||||
|
#accelerate_port = 5099 |
||||
|
#accelerate_timeout = 30 |
||||
|
#accelerate_connect_timeout = 5.0 |
||||
|
|
||||
|
# The daemon timeout is measured in minutes. This time is measured |
||||
|
# from the last activity to the accelerate daemon. |
||||
|
#accelerate_daemon_timeout = 30 |
||||
|
|
||||
|
# If set to yes, accelerate_multi_key will allow multiple |
||||
|
# private keys to be uploaded to it, though each user must |
||||
|
# have access to the system via SSH to add a new key. The default |
||||
|
# is "no". |
||||
|
#accelerate_multi_key = yes |
||||
|
|
||||
|
[selinux] |
||||
|
# file systems that require special treatment when dealing with security context |
||||
|
# the default behaviour that copies the existing context or uses the user default |
||||
|
# needs to be changed to use the file system dependent context. |
||||
|
#special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p |
||||
|
|
||||
|
# Set this to yes to allow libvirt_lxc connections to work without SELinux. |
||||
|
#libvirt_lxc_noseclabel = yes |
||||
|
|
||||
|
[colors] |
||||
|
#highlight = white |
||||
|
#verbose = blue |
||||
|
#warn = bright purple |
||||
|
#error = red |
||||
|
#debug = dark gray |
||||
|
#deprecate = purple |
||||
|
#skip = cyan |
||||
|
#unreachable = red |
||||
|
#ok = green |
||||
|
#changed = yellow |
||||
|
#diff_add = green |
||||
|
#diff_remove = red |
||||
|
#diff_lines = cyan |
||||
|
|
||||
|
|
||||
|
[diff] |
||||
|
# Always print diff when running ( same as always running with -D/--diff ) |
||||
|
# always = no |
||||
|
|
||||
|
# Set how many context lines to show in diff |
||||
|
# context = 3 |
@ -0,0 +1,104 @@ |
|||||
|
# config |
||||
|
<match debug.*> |
||||
|
@type stdout |
||||
|
</match> |
||||
|
|
||||
|
# input log |
||||
|
<source> |
||||
|
@type tail |
||||
|
|
||||
|
path /var/log/*.log |
||||
|
path_key tailed_path |
||||
|
|
||||
|
tag stats.node |
||||
|
|
||||
|
# parse json |
||||
|
<parse> |
||||
|
@type json |
||||
|
</parse> |
||||
|
|
||||
|
pos_file /tmp/fluentd--1605454018.pos |
||||
|
</source> |
||||
|
|
||||
|
|
||||
|
# input stats |
||||
|
<source> |
||||
|
@type tail |
||||
|
|
||||
|
path /var/log-in/*/* |
||||
|
path_key tailed_path |
||||
|
|
||||
|
tag log.node |
||||
|
|
||||
|
# parse none |
||||
|
<parse> |
||||
|
@type none |
||||
|
</parse> |
||||
|
|
||||
|
pos_file /tmp/fluentd--1605454014.pos |
||||
|
</source> |
||||
|
|
||||
|
|
||||
|
# output mongo log* |
||||
|
<match log.*> |
||||
|
@type copy |
||||
|
<store> |
||||
|
@type mongo_replset |
||||
|
|
||||
|
database app_swarmlab |
||||
|
collection logs |
||||
|
nodes swarmlabmongo1:27017,swarmlabmongo2:27017,swarmlabmongo1:27017 |
||||
|
|
||||
|
user app_swarmlab |
||||
|
password app_swarmlab |
||||
|
|
||||
|
replica_set rs0 |
||||
|
num_retries 60 |
||||
|
capped |
||||
|
capped_size 100m |
||||
|
|
||||
|
|
||||
|
<buffer> |
||||
|
flush_interval 20s |
||||
|
</buffer> |
||||
|
</store> |
||||
|
<store> |
||||
|
@type stdout |
||||
|
</store> |
||||
|
|
||||
|
<store> |
||||
|
@type file |
||||
|
path /tmp/mylog |
||||
|
<buffer> |
||||
|
timekey 1d |
||||
|
timekey_use_utc true |
||||
|
timekey_wait 10s |
||||
|
</buffer> |
||||
|
</store> |
||||
|
|
||||
|
|
||||
|
</match> |
||||
|
|
||||
|
# output mongo stats* |
||||
|
<match stats.*> |
||||
|
@type copy |
||||
|
<store> |
||||
|
@type mongo_replset |
||||
|
|
||||
|
database app_swarmlab |
||||
|
collection logs |
||||
|
nodes swarmlabmongo1:27017,swarmlabmongo2:27017,swarmlabmongo1:27017 |
||||
|
|
||||
|
user swarmlab |
||||
|
password swarmlab |
||||
|
|
||||
|
replica_set rs0 |
||||
|
num_retries 60 |
||||
|
capped |
||||
|
capped_size 100m |
||||
|
</store> |
||||
|
<store> |
||||
|
@type stdout |
||||
|
</store> |
||||
|
</match> |
||||
|
|
@ -0,0 +1,108 @@ |
|||||
|
# config |
||||
|
<match debug.*> |
||||
|
@type stdout |
||||
|
</match> |
||||
|
|
||||
|
# input |
||||
|
<source> |
||||
|
@type tail |
||||
|
|
||||
|
path /var/log/*.log |
||||
|
path_key tailed_path |
||||
|
|
||||
|
tag stats.node |
||||
|
|
||||
|
# parse json |
||||
|
<parse> |
||||
|
@type json |
||||
|
</parse> |
||||
|
|
||||
|
pos_file /tmp/fluentd--1605454018.pos |
||||
|
</source> |
||||
|
|
||||
|
|
||||
|
# input |
||||
|
<source> |
||||
|
@type tail |
||||
|
|
||||
|
path /var/log-in/*/* |
||||
|
path_key tailed_path |
||||
|
|
||||
|
tag log.node |
||||
|
|
||||
|
# parse none |
||||
|
<parse> |
||||
|
@type none |
||||
|
</parse> |
||||
|
|
||||
|
pos_file /tmp/fluentd--1605454014.pos |
||||
|
</source> |
||||
|
|
||||
|
|
||||
|
# output http |
||||
|
<match log.*> |
||||
|
@type copy |
||||
|
# <store> |
||||
|
# @type mongo_replset |
||||
|
# |
||||
|
# database fluent |
||||
|
# collection logs |
||||
|
# nodes ondemand_playground_mongo1:27017,ondemand_playground_mongo2:27017,ondemand_playground_mongo3:27017,ondemand_playground_mongo4:27017,ondemand_playground_mongo5:27017,ondemand_playground_mongo6:27017,ondemand_playground_mongo7:27017 |
||||
|
# |
||||
|
# user myusername |
||||
|
# password mypassword |
||||
|
# |
||||
|
# replica_set rs1 |
||||
|
# num_retries 60 |
||||
|
# capped |
||||
|
# capped_size 100m |
||||
|
# |
||||
|
# |
||||
|
# <buffer> |
||||
|
# flush_interval 20s |
||||
|
# </buffer> |
||||
|
# </store> |
||||
|
<store> |
||||
|
@type stdout |
||||
|
</store> |
||||
|
|
||||
|
<store> |
||||
|
@type file |
||||
|
path /tmp/mylog |
||||
|
<buffer> |
||||
|
timekey 1d |
||||
|
timekey_use_utc true |
||||
|
timekey_wait 10s |
||||
|
</buffer> |
||||
|
</store> |
||||
|
|
||||
|
|
||||
|
</match> |
||||
|
|
||||
|
<match stats.*> |
||||
|
@type copy |
||||
|
# <store> |
||||
|
# @type mongo_replset |
||||
|
# |
||||
|
# database swarmlabplaygroundstats |
||||
|
# collection logs |
||||
|
# nodes ondemand_playground_mongo1:27017,ondemand_playground_mongo2:27017,ondemand_playground_mongo3:27017,ondemand_playground_mongo4:27017,ondemand_playground_mongo5:27017,ondemand_playground_mongo6:27017,ondemand_playground_mongo7:27017 |
||||
|
# |
||||
|
# user myloguser |
||||
|
# password mylogpassword |
||||
|
# |
||||
|
# replica_set rs1 |
||||
|
# num_retries 60 |
||||
|
# capped |
||||
|
# capped_size 300m |
||||
|
# |
||||
|
# |
||||
|
# <buffer> |
||||
|
# flush_interval 20s |
||||
|
# </buffer> |
||||
|
# </store> |
||||
|
<store> |
||||
|
@type stdout |
||||
|
</store> |
||||
|
</match> |
||||
|
|
@ -0,0 +1,119 @@ |
|||||
|
--- |
||||
|
- hosts: service |
||||
|
remote_user: docker |
||||
|
gather_facts: no |
||||
|
vars: |
||||
|
user: "docker" |
||||
|
|
||||
|
tasks: |
||||
|
|
||||
|
# -------------------------------------------------------------------------------------- |
||||
|
# --------------------- copy conf to fluentd |
||||
|
# -------------------------------------------------------------------------------------- |
||||
|
|
||||
|
# ------------------------ |
||||
|
# cp fluentd.conf |
||||
|
# ------------------------- |
||||
|
- name: cp fluentd.conf |
||||
|
become: true |
||||
|
copy: |
||||
|
src: "./files/fluent-config-update.conf" |
||||
|
dest: /fluentd/etc/fluent.conf |
||||
|
owner: docker |
||||
|
group: docker |
||||
|
mode: 0755 |
||||
|
|
||||
|
# -------------------------------------------------------------------------------------- |
||||
|
# --------------------- kill and save ps tp tmp |
||||
|
# -------------------------------------------------------------------------------------- |
||||
|
|
||||
|
# ------------------------ |
||||
|
# start fluentd |
||||
|
# ------------------------- |
||||
|
- name: find fluentd |
||||
|
shell: ps efw -opid -Cfluentd | grep -v grep | grep -E '[0-9]' |
||||
|
#shell: "ps efw -opid,cmd -Cfluentd | pgrep -o fluentd" |
||||
|
register: fluentdps |
||||
|
# when: fluentdps is defined |
||||
|
|
||||
|
# - fail: msg="this play requires fluentdps" |
||||
|
#when: fluentdps is not defined |
||||
|
|
||||
|
# ------------------------ |
||||
|
# start fluentd |
||||
|
# # ------------------------- |
||||
|
- name: kill -9 fluentd |
||||
|
become: true |
||||
|
ignore_errors: yes |
||||
|
shell: "kill -9 {{ item }}" |
||||
|
with_items: "{{ fluentdps.stdout_lines }}" |
||||
|
when: fluentdps.stdout_lines is defined |
||||
|
|
||||
|
# - fail: msg="this play requires fluentdps" |
||||
|
# when: fluentdps is not defined |
||||
|
|
||||
|
- name: ls fluentdps |
||||
|
debug: var=fluentdps.stdout |
||||
|
|
||||
|
# # ------------------------ |
||||
|
# # save variable > /tmp |
||||
|
# # ------------------------- |
||||
|
# - name: echo kill > tmp |
||||
|
# shell: "echo {{ fluentdps.stdout }} > /tmp/123" |
||||
|
# when: not fluentdps |
||||
|
# |
||||
|
|
||||
|
# -------------------------------------------------------------------------------------- |
||||
|
# --------------------- start and save ps tp tmp |
||||
|
# -------------------------------------------------------------------------------------- |
||||
|
|
||||
|
# ------------------------ |
||||
|
# start fluentd |
||||
|
# ------------------------- |
||||
|
- name: start fluentd background |
||||
|
shell: nohup /home/docker/.gem/ruby/2.5.0/bin/fluentd -c /fluentd/etc/fluent.conf -vv </dev/null >/dev/null 2>&1 & |
||||
|
|
||||
|
# ------------------------ |
||||
|
# start fluentd |
||||
|
# ------------------------- |
||||
|
- name: find1 fluentd |
||||
|
shell: ps efw -opid,cmd -Cfluentd | pgrep -o fluentd |
||||
|
register: fluentdps1 |
||||
|
|
||||
|
- name: ls fluentdps1 |
||||
|
debug: var=fluentdps1.stdout |
||||
|
|
||||
|
# ------------------------ |
||||
|
# save variable > /tmp1 |
||||
|
# ------------------------- |
||||
|
- name: echo > tmp1 |
||||
|
shell: "echo {{ fluentdps1.stdout }} > /tmp/12345" |
||||
|
|
||||
|
# ------------------------ |
||||
|
# example4net tcpdump example |
||||
|
# ------------------------- |
||||
|
# - name: google.com |
||||
|
# become: yes |
||||
|
# become_user: "{{ user }}" |
||||
|
# command: curl http://www.google.com |
||||
|
# ignore_errors: yes |
||||
|
# register: configwww |
||||
|
# |
||||
|
# - name: ls configwww |
||||
|
# debug: var=configwww.stdout_lines |
||||
|
# |
||||
|
# - name: ls -al /var/lab/playground/playground-readmongo/ |
||||
|
# become: yes |
||||
|
# become_user: "{{ user }}" |
||||
|
# #command: ls -al /var/lab/playground/playground-readmongo |
||||
|
# command: netstat -antlupe |
||||
|
# ignore_errors: yes |
||||
|
# register: config |
||||
|
# |
||||
|
# - name: ls config |
||||
|
# debug: var=config.stdout_lines |
||||
|
# |
||||
|
# - name: Refresh connection |
||||
|
# meta: clear_host_errors |
||||
|
# |
||||
|
|
@ -0,0 +1,15 @@ |
|||||
|
#!/bin/sh |
||||
|
|
||||
|
|
||||
|
ip4=$(/sbin/ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1) |
||||
|
ip6=$(/sbin/ip -o -6 addr list eth0 | awk '{print $4}' | cut -d/ -f1) |
||||
|
|
||||
|
echo "[service]" > /project/courses/fluentd/inventory.yml |
||||
|
/project/bin/swarmlab-nmap >> /project/courses/fluentd/inventory.yml |
||||
|
|
||||
|
|
||||
|
# include master or not |
||||
|
echo $ip4 >> /project/courses/fluentd/inventory.yml |
||||
|
|
||||
|
|
||||
|
ansible-playbook -u docker -i inventory.yml fluentd-config-update.yml -f 5 --ask-pass --ask-become-pass |
@ -0,0 +1,44 @@ |
|||||
|
--- |
||||
|
- hosts: service |
||||
|
remote_user: docker |
||||
|
gather_facts: no |
||||
|
vars: |
||||
|
user: "docker" |
||||
|
|
||||
|
tasks: |
||||
|
|
||||
|
# -------------------------------------------------------------------------------------- |
||||
|
# --------------------- create test dir |
||||
|
# -------------------------------------------------------------------------------------- |
||||
|
|
||||
|
# ------------------------ |
||||
|
# test dir |
||||
|
# ------------------------- |
||||
|
- name: make dir for test |
||||
|
become: true |
||||
|
file: |
||||
|
path: "/var/log-in/test" |
||||
|
state: directory |
||||
|
owner: docker |
||||
|
group: docker |
||||
|
mode: '0755' |
||||
|
|
||||
|
# -------------------------------------------------------------------------------------- |
||||
|
# --------------------- kill and save ps tp tmp |
||||
|
# -------------------------------------------------------------------------------------- |
||||
|
|
||||
|
- name: find fluentd |
||||
|
#shell: df -h >> /var/log-in/test/test |
||||
|
shell: df -h |
||||
|
#shell: "ps efw -opid,cmd -Cfluentd | pgrep -o fluentd" |
||||
|
register: fluentddate |
||||
|
|
||||
|
- name: ls fluentddate |
||||
|
debug: var=fluentddate.stdout_lines |
||||
|
|
||||
|
- name: write to /var/log-in/test/test2 |
||||
|
shell: "echo {{ item }} >> /var/log-in/test/test2" |
||||
|
with_items: "{{ fluentddate.stdout_lines }}" |
||||
|
|
||||
|
|
||||
|
|
@ -0,0 +1,15 @@ |
|||||
|
#!/bin/sh |
||||
|
|
||||
|
|
||||
|
ip4=$(/sbin/ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1) |
||||
|
ip6=$(/sbin/ip -o -6 addr list eth0 | awk '{print $4}' | cut -d/ -f1) |
||||
|
|
||||
|
echo "[service]" > /project/courses/fluentd/inventory.yml |
||||
|
/project/bin/swarmlab-nmap >> /project/courses/fluentd/inventory.yml |
||||
|
|
||||
|
|
||||
|
# include master or not |
||||
|
echo $ip4 >> /project/courses/fluentd/inventory.yml |
||||
|
|
||||
|
|
||||
|
ansible-playbook -u docker -i inventory.yml fluentd-test-mongo.yml -f 5 --ask-pass --ask-become-pass |
@ -0,0 +1,200 @@ |
|||||
|
--- |
||||
|
- hosts: service |
||||
|
remote_user: docker |
||||
|
gather_facts: no |
||||
|
vars: |
||||
|
user: "docker" |
||||
|
|
||||
|
tasks: |
||||
|
|
||||
|
# ------------------------ |
||||
|
# apt update |
||||
|
# ------------------------- |
||||
|
- name: apt update packages |
||||
|
become: true |
||||
|
apt: |
||||
|
update_cache: 'yes' |
||||
|
force_apt_get: 'yes' |
||||
|
upgrade: 'dist' |
||||
|
cache_valid_time: 3600 |
||||
|
install_recommends: true |
||||
|
autoremove: true |
||||
|
|
||||
|
# ------------------------ |
||||
|
# apt install packages |
||||
|
# ------------------------- |
||||
|
- name: apt install packages |
||||
|
become: true |
||||
|
apt: |
||||
|
update_cache: 'yes' |
||||
|
force_apt_get: 'yes' |
||||
|
install_recommends: true |
||||
|
autoremove: true |
||||
|
name: "{{ packages }}" |
||||
|
vars: |
||||
|
packages: |
||||
|
- build-essential |
||||
|
- git |
||||
|
- flex |
||||
|
- bison |
||||
|
- traceroute |
||||
|
- curl |
||||
|
- lynx |
||||
|
- ruby |
||||
|
- ruby-dev |
||||
|
|
||||
|
# ------------------------ |
||||
|
# directory4example fluentd |
||||
|
# ------------------------- |
||||
|
- name: make /var/log-in |
||||
|
become: true |
||||
|
file: |
||||
|
path: "/var/log-in" |
||||
|
state: directory |
||||
|
owner: docker |
||||
|
group: docker |
||||
|
mode: '0777' |
||||
|
|
||||
|
# ------------------------ |
||||
|
# gem begin |
||||
|
# ------------------------- |
||||
|
- name: make dir for gem |
||||
|
become: true |
||||
|
file: |
||||
|
path: "/home/docker/.gem" |
||||
|
state: directory |
||||
|
owner: docker |
||||
|
group: docker |
||||
|
mode: '0755' |
||||
|
|
||||
|
- name: gem install fluentd |
||||
|
#become: true |
||||
|
gem: |
||||
|
name: fluentd |
||||
|
version: 1.12.0 |
||||
|
state: present |
||||
|
environment: |
||||
|
CONFIGURE_OPTS: '--disable-install-doc' |
||||
|
PATH: '/home/docker/.gem/ruby/2.5.0/bin:{{ ansible_env.PATH }}' |
||||
|
|
||||
|
- name: gem install fluent-plugin-mongo |
||||
|
#become: true |
||||
|
gem: |
||||
|
name: fluent-plugin-mongo |
||||
|
state: present |
||||
|
|
||||
|
- name: gem install oj |
||||
|
#become: true |
||||
|
gem: |
||||
|
name: oj |
||||
|
state: present |
||||
|
|
||||
|
- name: gem install json |
||||
|
#become: true |
||||
|
gem: |
||||
|
name: json |
||||
|
state: present |
||||
|
|
||||
|
- name: gem install async-http |
||||
|
#become: true |
||||
|
gem: |
||||
|
name: async-http |
||||
|
version: 0.54.0 |
||||
|
state: present |
||||
|
|
||||
|
- name: gem install ext-monitor |
||||
|
#become: true |
||||
|
gem: |
||||
|
name: ext_monitor |
||||
|
version: 0.1.2 |
||||
|
state: present |
||||
|
|
||||
|
# ------------------------ |
||||
|
# gem end |
||||
|
# ------------------------- |
||||
|
|
||||
|
# ------------------------ |
||||
|
# add group |
||||
|
# ------------------------- |
||||
|
# - name: add group fluent |
||||
|
# become: true |
||||
|
# group: |
||||
|
# name: fluent |
||||
|
# state: present |
||||
|
# |
||||
|
# ------------------------ |
||||
|
# add user |
||||
|
# ------------------------- |
||||
|
# - name: add user gem |
||||
|
# become: true |
||||
|
# user: |
||||
|
# name: fluent |
||||
|
# group: fluent |
||||
|
|
||||
|
# ------------------------ |
||||
|
# mkdir directory4 fluent |
||||
|
# ------------------------- |
||||
|
- name: make dir fluentd |
||||
|
become: true |
||||
|
file: |
||||
|
path: "/fluentd/etc" |
||||
|
state: directory |
||||
|
owner: docker |
||||
|
group: docker |
||||
|
mode: '0755' |
||||
|
|
||||
|
- name: make dir fluentd |
||||
|
become: true |
||||
|
file: |
||||
|
path: "/fluentd/plugins" |
||||
|
state: directory |
||||
|
owner: docker |
||||
|
group: docker |
||||
|
mode: '0755' |
||||
|
|
||||
|
# ------------------------ |
||||
|
# cp fluentd.conf |
||||
|
# ------------------------- |
||||
|
- name: cp fluentd.conf |
||||
|
become: true |
||||
|
copy: |
||||
|
src: "./files/fluent.conf" |
||||
|
dest: /fluentd/etc/fluent.conf |
||||
|
owner: docker |
||||
|
group: docker |
||||
|
mode: 0755 |
||||
|
|
||||
|
# ------------------------ |
||||
|
# start fluentd |
||||
|
# ------------------------- |
||||
|
- name: start fluentd background |
||||
|
shell: nohup /home/docker/.gem/ruby/2.5.0/bin/fluentd -c /fluentd/etc/fluent.conf -vv </dev/null >/dev/null 2>&1 & |
||||
|
|
||||
|
# ------------------------ |
||||
|
# example4net tcpdump example |
||||
|
# ------------------------- |
||||
|
# - name: google.com |
||||
|
# become: yes |
||||
|
# become_user: "{{ user }}" |
||||
|
# command: curl http://www.google.com |
||||
|
# ignore_errors: yes |
||||
|
# register: configwww |
||||
|
# |
||||
|
# - name: ls configwww |
||||
|
# debug: var=configwww.stdout_lines |
||||
|
# |
||||
|
# - name: ls -al /var/lab/playground/playground-readmongo/ |
||||
|
# become: yes |
||||
|
# become_user: "{{ user }}" |
||||
|
# #command: ls -al /var/lab/playground/playground-readmongo |
||||
|
# command: netstat -antlupe |
||||
|
# ignore_errors: yes |
||||
|
# register: config |
||||
|
# |
||||
|
# - name: ls config |
||||
|
# debug: var=config.stdout_lines |
||||
|
# |
||||
|
# - name: Refresh connection |
||||
|
# meta: clear_host_errors |
||||
|
# |
||||
|
|
@ -0,0 +1,25 @@ |
|||||
|
#!/bin/sh |
||||
|
|
||||
|
sudo apt update -y |
||||
|
sudo apt install -y ansible sshpass |
||||
|
|
||||
|
sudo mkdir -p /home/docker/.ansible |
||||
|
sudo chown docker.docker -R /home/docker |
||||
|
|
||||
|
sudo cp files/ansible.cfg /etc/ansible/ansible.cfg |
||||
|
|
||||
|
ip4=$(/sbin/ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1) |
||||
|
ip6=$(/sbin/ip -o -6 addr list eth0 | awk '{print $4}' | cut -d/ -f1) |
||||
|
|
||||
|
echo "[service]" > /project/courses/fluentd/inventory.yml |
||||
|
/project/bin/swarmlab-nmap >> /project/courses/fluentd/inventory.yml |
||||
|
|
||||
|
|
||||
|
# include master or not |
||||
|
echo $ip4 >> /project/courses/fluentd/inventory.yml |
||||
|
|
||||
|
|
||||
|
ansible-playbook -u docker -i inventory.yml fluentd.yml -f 5 --ask-pass --ask-become-pass |
||||
|
# 1st make sudo without password |
||||
|
# run with keys |
||||
|
#ansible-playbook -u docker -i inventory.yml fluentd.yml -f 5 --private-key=/home/docker/.ssh/id_rsa |
@ -0,0 +1,6 @@ |
|||||
|
[service] |
||||
|
172.31.0.3 |
||||
|
172.31.0.4 |
||||
|
172.31.0.5 |
||||
|
172.31.0.6 |
||||
|
172.31.0.2 |
@ -0,0 +1 @@ |
|||||
|
echo "Hello World" |
@ -0,0 +1,73 @@ |
|||||
|
#!/bin/sh |
||||
|
|
||||
|
ROLE="undefined" |
||||
|
MPI_MASTER_SERVICE_NAME="sec_masterservice" |
||||
|
MPI_WORKER_SERVICE_NAME="sec_workerservice" |
||||
|
HOSTNAMES="/etc/nethosts" |
||||
|
|
||||
|
####################### |
||||
|
# ARGUMENTS PARSER |
||||
|
|
||||
|
while [ "$1" != "" ]; |
||||
|
do |
||||
|
PARAM=$(echo "$1" | awk -F= '{print $1}') |
||||
|
VALUE=$(echo "$1" | awk -F= '{print $2}') |
||||
|
|
||||
|
case $PARAM in |
||||
|
role) |
||||
|
[ "$VALUE" ] && ROLE=$VALUE |
||||
|
;; |
||||
|
|
||||
|
sec_master_service_name) |
||||
|
[ "$VALUE" ] && MPI_MASTER_SERVICE_NAME=$VALUE |
||||
|
;; |
||||
|
|
||||
|
sec_worker_service_name) |
||||
|
[ "$VALUE" ] && MPI_WORKER_SERVICE_NAME=$VALUE |
||||
|
;; |
||||
|
*) |
||||
|
echo "ERROR: unknown parameter \"$PARAM\"" |
||||
|
exit 1 |
||||
|
;; |
||||
|
esac |
||||
|
shift |
||||
|
done |
||||
|
|
||||
|
|
||||
|
|
||||
|
cat > /etc/opt/service_names <<- EOF |
||||
|
MPI_MASTER_SERVICE_NAME=${MPI_MASTER_SERVICE_NAME} |
||||
|
MPI_WORKER_SERVICE_NAME=${MPI_WORKER_SERVICE_NAME} |
||||
|
EOF |
||||
|
|
||||
|
case $ROLE in |
||||
|
"masterservice") |
||||
|
|
||||
|
# Auto update default host file in background and dumb all output |
||||
|
auto_update_hosts "${HOSTNAMES}" > /dev/null 2>&1 & |
||||
|
tail -f /dev/null |
||||
|
#/root/start-nginx.sh |
||||
|
# Start ssh server |
||||
|
#/usr/sbin/sshd -D |
||||
|
;; |
||||
|
|
||||
|
|
||||
|
"workerservice") |
||||
|
|
||||
|
# Start ssh server in background |
||||
|
#/usr/sbin/sshd -D & |
||||
|
|
||||
|
# Keep trying to connect to master node and stay there indefinitely so that master node can see |
||||
|
# the connected hosts that are ready for MPI work |
||||
|
#while sleep 1 |
||||
|
#do |
||||
|
# shellcheck disable=SC2086 |
||||
|
# ssh -T -o "StrictHostKeyChecking no" \ |
||||
|
# -i "${USER_HOME}/.ssh/id_rsa" \ |
||||
|
# ${USER}@${MPI_MASTER_SERVICE_NAME} \ |
||||
|
tail -f /dev/null |
||||
|
#done |
||||
|
;; |
||||
|
*) |
||||
|
echo 'role argument only accepts "masterservice" or "workerservice"' |
||||
|
esac |
@ -0,0 +1,726 @@ |
|||||
|
#!/bin/bash |
||||
|
|
||||
|
# The MIT License (MIT) |
||||
|
# |
||||
|
# rootApostolos@swarmlab.io |
||||
|
# |
||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
|
# of this software and associated documentation files (the "Software"), to deal |
||||
|
# in the Software without restriction, including without limitation the rights |
||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
|
# copies of the Software, and to permit persons to whom the Software is |
||||
|
# furnished to do so, subject to the following conditions: |
||||
|
# |
||||
|
# The above copyright notice and this permission notice shall be included in all |
||||
|
# copies or substantial portions of the Software. |
||||
|
# |
||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||
|
# SOFTWARE. |
||||
|
|
||||
|
# Origin: https://github.com/NLKNguyen/alpine-mpich |
||||
|
|
||||
|
set -e |
||||
|
|
||||
|
DOCKERuser="docker" |
||||
|
PACKAGES=$(cat <<EOF |
||||
|
net-tools \ |
||||
|
tcpdump \ |
||||
|
sudo \ |
||||
|
vim |
||||
|
EOF |
||||
|
) |
||||
|
# PACKAGES enabled ths with apt-get update && apt-get $APTPARAM install -y openssh-server $PACKAGES in in RUN export DEBIAN_FRONTEND=noninteractive |
||||
|
APTPARAM=" --no-install-recommends " |
||||
|
IMAGE_local="microservice-ansible" |
||||
|
HYBRID_NETWORK="microservice-ansible" |
||||
|
# dont edit |
||||
|
#IMAGE_origin="hub.swarmlab.io:5480/hybrid-numpy" |
||||
|
IMAGE_origin="hub.swarmlab.io:5480/hybrid-ansible" |
||||
|
bootstrap="sec_bootstrap" |
||||
|
hostnames="auto_update_hosts" |
||||
|
hostnames_get="get_hosts" |
||||
|
#NODENAME=${HYBRID_NETWORK}_masterservice_1.${HYBRID_NETWORK}_${HYBRID_NETWORK} |
||||
|
NODENAME=${IMAGE_local}_masterservice_1.${IMAGE_local}_${HYBRID_NETWORK} |
||||
|
NODENETWORK=${HYBRID_NETWORK} |
||||
|
# shellcheck disable=SC1091 |
||||
|
#. ./.env |
||||
|
|
||||
|
|
||||
|
# ----------------------------------------------- |
||||
|
# |
||||
|
# Find Source path of swarmlab.io script |
||||
|
# |
||||
|
# ---------------------------------------------- |
||||
|
|
||||
|
|
||||
|
SOURCE="${BASH_SOURCE[0]}" |
||||
|
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink |
||||
|
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )" |
||||
|
SOURCE="$(readlink "$SOURCE")" |
||||
|
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located |
||||
|
done |
||||
|
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )" |
||||
|
|
||||
|
|
||||
|
# ----------------------------------------------- |
||||
|
# |
||||
|
# Load Default config swarmlab.io |
||||
|
# |
||||
|
# ---------------------------------------------- |
||||
|
|
||||
|
|
||||
|
#. $DIR/.env |
||||
|
|
||||
|
|
||||
|
# ----------------------------------------------- |
||||
|
# |
||||
|
# Find Working dir |
||||
|
# |
||||
|
# ---------------------------------------------- |
||||
|
|
||||
|
function EPHEMERAL_PORT() { |
||||
|
LOW_BOUND=49152 |
||||
|
RANGE=16384 |
||||
|
while true; do |
||||
|
CANDIDATE=$[$LOW_BOUND + ($RANDOM % $RANGE)] |
||||
|
(echo "" >/dev/tcp/127.0.0.1/${CANDIDATE}) >/dev/null 2>&1 |
||||
|
if [ $? -ne 0 ]; then |
||||
|
echo $CANDIDATE |
||||
|
break |
||||
|
fi |
||||
|
done |
||||
|
} |
||||
|
|
||||
|
servicesshport=$(EPHEMERAL_PORT) |
||||
|
|
||||
|
Wdir=$(pwd) |
||||
|
|
||||
|
if [ ! -f $Wdir/.env ]; then |
||||
|
cat << EOF > $Wdir/.env |
||||
|
REGISTRY_ADDR=localhost |
||||
|
REGISTRY_PORT=5000 |
||||
|
IMAGE_NAME=$HYBRID_NETWORK |
||||
|
SSH_PORT=$servicesshport |
||||
|
WEB_PORT=$(EPHEMERAL_PORT) |
||||
|
WEB_PORT1=$(EPHEMERAL_PORT) |
||||
|
WEB_PORT2=$(EPHEMERAL_PORT) |
||||
|
EOF |
||||
|
fi |
||||
|
|
||||
|
. $Wdir/.env |
||||
|
|
||||
|
create_dockerfile () |
||||
|
{ |
||||
|
docker pull $IMAGE_origin << ANSWERS |
||||
|
yes |
||||
|
yes |
||||
|
yes |
||||
|
ANSWERS |
||||
|
|
||||
|
. $Wdir/ROOT_PASSWORD |
||||
|
if [ -d "$Wdir/project" ]; then |
||||
|
# ----------------------------------------------- |
||||
|
# |
||||
|
# create Dockerfile |
||||
|
# |
||||
|
# ---------------------------------------------- |
||||
|
|
||||
|
search='ok' |
||||
|
|
||||
|
if [ $search == 'ok' ] |
||||
|
then |
||||
|
echo "" |
||||
|
echo ">>> Load Origin " |
||||
|
cat << EOF > $Wdir/Dockerfile |
||||
|
FROM $IMAGE_origin |
||||
|
# |
||||
|
USER root |
||||
|
|
||||
|
COPY $bootstrap /usr/bin |
||||
|
COPY $hostnames_get /usr/bin |
||||
|
COPY $hostnames /usr/bin |
||||
|
COPY install-vim-plugin.sh . |
||||
|
|
||||
|
ENV NOTVISIBLE "in users profile" |
||||
|
ENV USER1 docker |
||||
|
ENV USER_HOME /home/docker |
||||
|
ENV SSHDIR \${USER_HOME}/.ssh |
||||
|
COPY ssh/ \${SSHDIR}/ |
||||
|
|
||||
|
RUN export DEBIAN_FRONTEND=noninteractive \ |
||||
|
&& addgroup -S docker && adduser -S docker -G docker \ |
||||
|
&& rm -rf /usr/share/doc \ |
||||
|
&& rm -rf /usr/share/man \ |
||||
|
&& rm -rf /usr/share/locale \ |
||||
|
&& mkdir -p /var/run/sshd \ |
||||
|
&& echo 'root:pass' | chpasswd \ |
||||
|
&& echo "export VISIBLE=now" >> /etc/profile \ |
||||
|
&& mkdir -p /home/docker/project \ |
||||
|
&& mkdir -p /etc/opt \ |
||||
|
&& echo "docker:docker" | chpasswd \ |
||||
|
&& echo "StrictHostKeyChecking no" > \${SSHDIR}/config \ |
||||
|
&& cat \${SSHDIR}/*.pub >> \${SSHDIR}/authorized_keys \ |
||||
|
&& chmod -R 600 \${SSHDIR}/* \ |
||||
|
&& chown -R \${USER1}:\${USER1} \${SSHDIR} |
||||
|
|
||||
|
COPY .vimrc /home/docker |
||||
|
USER root |
||||
|
EOF |
||||
|
fi |
||||
|
else |
||||
|
echo "" |
||||
|
echo "Not in Project Directory" |
||||
|
echo "A project directory should look like this" |
||||
|
echo "" |
||||
|
|
||||
|
cat <<EOF |
||||
|
├── docker-compose.yml |
||||
|
├── Dockerfile |
||||
|
├── out.log |
||||
|
├── project |
||||
|
│ └── hello_world.sh |
||||
|
└── ssh |
||||
|
├── id_rsa |
||||
|
└── id_rsa.pub |
||||
|
EOF |
||||
|
echo "Change to your Project Directory And Try Again" |
||||
|
echo "" |
||||
|
exit 0 |
||||
|
fi |
||||
|
} |
||||
|
|
||||
|
create_project () |
||||
|
{ |
||||
|
|
||||
|
docker pull $IMAGE_origin << ANSWERS |
||||
|
yes |
||||
|
yes |
||||
|
yes |
||||
|
ANSWERS |
||||
|
if [ "$Wdir" == "$HOME" ]; then |
||||
|
echo"" |
||||
|
echo "You are in Your Home directory" |
||||
|
echo "Please create a project directory" |
||||
|
echo "mkdir myproject; cd myproject; swarmlab-sec create" |
||||
|
echo"" |
||||
|
else |
||||
|
echo "# -----------------------------------------------" |
||||
|
echo "##### Create project #####" |
||||
|
echo "# -----------------------------------------------" |
||||
|
|
||||
|
|
||||
|
search='ok' |
||||
|
if [ $search == 'ok' ] |
||||
|
then |
||||
|
echo ">>> Load Origin " |
||||
|
cat << EOF > $Wdir/Dockerfile |
||||
|
FROM $IMAGE_origin |
||||
|
# |
||||
|
USER root |
||||
|
COPY $bootstrap /usr/bin |
||||
|
COPY $hostnames_get /usr/bin |
||||
|
COPY $hostnames /usr/bin |
||||
|
COPY install-vim-plugin.sh . |
||||
|
|
||||
|
ENV NOTVISIBLE "in users profile" |
||||
|
ENV USER1 docker |
||||
|
ENV USER_HOME /home/docker |
||||
|
|
||||
|
RUN export DEBIAN_FRONTEND=noninteractive \ |
||||
|
&& rm -rf /usr/share/doc \ |
||||
|
&& rm -rf /usr/share/man \ |
||||
|
&& rm -rf /usr/share/locale \ |
||||
|
&& addgroup -S docker && adduser -S docker -G docker \ |
||||
|
&& mkdir -p /var/run/sshd \ |
||||
|
&& echo 'root:pass' | chpasswd \ |
||||
|
&& echo "export VISIBLE=now" >> /etc/profile \ |
||||
|
&& mkdir -p /home/docker/project \ |
||||
|
&& mkdir -p /etc/opt \ |
||||
|
&& echo "docker:docker" | chpasswd \ |
||||
|
&& echo "StrictHostKeyChecking no" > \${SSHDIR}/config \ |
||||
|
&& cat \${SSHDIR}/*.pub >> \${SSHDIR}/authorized_keys \ |
||||
|
&& chmod -R 600 \${SSHDIR}/* \ |
||||
|
&& chown -R \${USER1}:\${USER1} \${SSHDIR} |
||||
|
|
||||
|
COPY .vimrc /home/docker |
||||
|
USER root |
||||
|
EOF |
||||
|
fi |
||||
|
|
||||
|
|
||||
|
/bin/mkdir -p $Wdir/project |
||||
|
/bin/cp -rf $DIR/project/bin $Wdir/project |
||||
|
/bin/cp -rf $DIR/project/courses $Wdir/project |
||||
|
/bin/cp -rf $DIR/project/config $Wdir/project |
||||
|
/bin/cp -rf $DIR/project/data-www $Wdir/project |
||||
|
/bin/cp -f $DIR/project/hello_world.sh $Wdir/project |
||||
|
/bin/cp -f $DIR/$bootstrap $Wdir/$bootstrap |
||||
|
/bin/cp -f $DIR/$hostnames $Wdir/$hostnames |
||||
|
/bin/cp -f $DIR/$hostnames_get $Wdir/$hostnames_get |
||||
|
/bin/cp -f $DIR/ROOT_PASSWORD $Wdir/ROOT_PASSWORD |
||||
|
/bin/cp -rf $DIR/.vimrc $Wdir/.vimrc |
||||
|
/bin/cp -rf $DIR/install-vim-plugin.sh $Wdir/install-vim-plugin.sh |
||||
|
|
||||
|
|
||||
|
cat << EOF > $Wdir/docker-compose.yml |
||||
|
version: "3" |
||||
|
|
||||
|
services: |
||||
|
|
||||
|
masterservice: |
||||
|
image: $IMAGE_NAME |
||||
|
privileged: true |
||||
|
environment: |
||||
|
- NODENAME=${NODENAME} |
||||
|
- NODENETWORK=${NODENETWORK} |
||||
|
- DISPLAY=\${DISPLAY} |
||||
|
cap_add: |
||||
|
- NET_ADMIN |
||||
|
user: root |
||||
|
entrypoint: ["sec_bootstrap", "role=masterservice", "sec_master_service_name=masterservice", "sec_worker_service_name=workerservice"] |
||||
|
#ports: |
||||
|
# - "\${R_PORT1}:8001" |
||||
|
# - "\${R_PORT2}:3080" |
||||
|
networks: |
||||
|
- ${HYBRID_NETWORK} |
||||
|
volumes: |
||||
|
- $Wdir/project:/home/docker/project |
||||
|
- $Wdir/$bootstrap:/usr/bin/$bootstrap |
||||
|
|
||||
|
|
||||
|
# workerservice: |
||||
|
# image: $IMAGE_NAME |
||||
|
# privileged: true |
||||
|
# environment: |
||||
|
# - NODENAME=${NODENAME} |
||||
|
# - NODENETWORK=${NODENETWORK} |
||||
|
# - DISPLAY=\${DISPLAY} |
||||
|
# cap_add: |
||||
|
# - NET_ADMIN |
||||
|
# user: root |
||||
|
# entrypoint: ["sec_bootstrap", "role=workerservice", "sec_master_service_name=masterservice", "sec_worker_service_name=workerservice"] |
||||
|
# #ports: |
||||
|
# # - "\${SSH_PORT}:22" |
||||
|
# networks: |
||||
|
# - ${HYBRID_NETWORK} |
||||
|
# volumes: |
||||
|
# - $Wdir/project:/home/docker/project |
||||
|
# - $Wdir/project/data-www:/data-www |
||||
|
# - $Wdir/project/config/nginx.conf:/etc/nginx/nginx.conf |
||||
|
# - $Wdir/project/config/default.conf:/etc/nginx/conf.d/default.conf |
||||
|
# - $Wdir/project/config/supervisord.conf:/etc/supervisor/supervisord.conf |
||||
|
|
||||
|
networks: |
||||
|
${HYBRID_NETWORK}: |
||||
|
EOF |
||||
|
|
||||
|
#/bin/cp -rf $DIR/ssh $Wdir |
||||
|
|
||||
|
cat << EOF > $Wdir/stop.sh |
||||
|
../install/usr/share/swarmlab.io/sec/swarmlab-sec down |
||||
|
EOF |
||||
|
|
||||
|
cat << EOF > $Wdir/container-stop.sh |
||||
|
docker stop \$1 |
||||
|
docker container rm \$1 |
||||
|
EOF |
||||
|
|
||||
|
cat <<EOF |
||||
|
|
||||
|
Project is Ready |
||||
|
├── docker-compose.yml |
||||
|
├── Dockerfile |
||||
|
├── out.log |
||||
|
├── project |
||||
|
│ └── hello_world.sh |
||||
|
└── ssh |
||||
|
├── id_rsa |
||||
|
└── id_rsa.pub |
||||
|
|
||||
|
Run: swarmlab-sec up size=5 |
||||
|
|
||||
|
or run swarmlab-sec help for USAGE |
||||
|
|
||||
|
EOF |
||||
|
|
||||
|
/bin/chmod -R 777 $Wdir |
||||
|
/bin/chown -R sec.sec $Wdir |
||||
|
|
||||
|
fi |
||||
|
} |
||||
|
|
||||
|
####################### |
||||
|
# TASK INDICATORS |
||||
|
COMMAND_UP=0 |
||||
|
COMMAND_CREATE=0 |
||||
|
COMMAND_DOWN=0 |
||||
|
COMMAND_RELOAD=0 |
||||
|
COMMAND_SCALE=0 |
||||
|
COMMAND_LOGIN=0 |
||||
|
COMMAND_EXEC=0 |
||||
|
COMMAND_LIST=0 |
||||
|
COMMAND_CLEAN=0 |
||||
|
|
||||
|
# Default values if providing empty |
||||
|
SIZE=4 |
||||
|
|
||||
|
############################################# |
||||
|
usage () |
||||
|
{ |
||||
|
echo "" |
||||
|
echo "--------------" |
||||
|
echo " SwarmLab.io " |
||||
|
echo "--------------" |
||||
|
echo "" |
||||
|
echo " USAGE: ./swarmlab-sec [COMMAND] [OPTIONS]" |
||||
|
echo "" |
||||
|
echo " Examples of [COMMAND] can be:" |
||||
|
echo " create: create swarmlab-sec " |
||||
|
echo " mkdir WORKdir; cd WORKdir; swarmlab-sec create" |
||||
|
echo "" |
||||
|
echo " up: start swarmlab-sec" |
||||
|
echo " swarmlab-sec up size=10" |
||||
|
echo "" |
||||
|
echo " scale: resize the swarmlab-sec" |
||||
|
echo " swarmlab-sec scale size=30" |
||||
|
echo "" |
||||
|
echo " reload: rebuild image and distribute to nodes" |
||||
|
echo " swarmlab-sec reload size=15" |
||||
|
echo "" |
||||
|
echo " login: login to Docker container of MPI master node for interactive usage" |
||||
|
echo " swarmlab-sec login" |
||||
|
echo "" |
||||
|
echo " exec: execute shell command at the MPI master node" |
||||
|
echo " swarmlab-sec exec [SHELL COMMAND]" |
||||
|
echo "" |
||||
|
echo " down: shutdown swarmlab-sec" |
||||
|
echo " swarmlab-sec down" |
||||
|
echo "" |
||||
|
echo " clean: remove images in the system" |
||||
|
echo " swarmlab-sec clean" |
||||
|
echo "" |
||||
|
echo " list: show running containers of swarmlab-sec" |
||||
|
echo " swarmlab-sec list" |
||||
|
echo "" |
||||
|
echo " help: show this message" |
||||
|
echo " swarmlab-sec help" |
||||
|
echo "" |
||||
|
echo " " |
||||
|
} |
||||
|
|
||||
|
HEADER=" |
||||
|
SwarmLab.io" |
||||
|
|
||||
|
clear_all () |
||||
|
{ |
||||
|
$(docker stop $(docker ps -a | grep "${IMAGE_local}_masterservice_" | awk '{print $1}')) || true |
||||
|
$(docker stop $(docker ps -a | grep "${IMAGE_local}_workerservice_" | awk '{print $1}')) || true |
||||
|
|
||||
|
#$(docker stop $(docker ps -a | grep "_registry_" | awk '{print $1}')) || true # remove for microservices |
||||
|
#docker ps -a | grep "_registry_" | awk '{print $1}' | xargs docker container rm || true # remove for microservices |
||||
|
docker ps -a | grep "${IMAGE_local}_workerservice_" | awk '{print $1}' | xargs docker container rm || true |
||||
|
docker ps -a | grep "${IMAGE_local}_masterservice_" | awk '{print $1}' | xargs docker container rm || true |
||||
|
docker rmi $IMAGE_local -f |
||||
|
} |
||||
|
|
||||
|
down_all () |
||||
|
{ |
||||
|
printf "\\n\\n===> CLEAN UP SWARMLAB" |
||||
|
|
||||
|
printf "\\n%s\\n" "$HEADER" |
||||
|
echo "$ docker-compose down" |
||||
|
printf "\\n" |
||||
|
|
||||
|
docker-compose down |
||||
|
} |
||||
|
|
||||
|
up_registry () |
||||
|
{ |
||||
|
printf "\\n\\n===> SPIN UP REGISTRY" |
||||
|
|
||||
|
printf "\\n%s\\n" "$HEADER" |
||||
|
echo "$ docker-compose up -d registry" |
||||
|
printf "\\n" |
||||
|
|
||||
|
#docker stop swarmlab-registry || true && docker rm swarmlab-registry || true # remove for microservices |
||||
|
docker container prune --force |
||||
|
docker-compose up --no-recreate -d registry |
||||
|
} |
||||
|
|
||||
|
generate_ssh_keys () |
||||
|
{ |
||||
|
if [ -f ssh/id_rsa ] && [ -f ssh/id_rsa.pub ]; then |
||||
|
return 0 |
||||
|
fi |
||||
|
|
||||
|
printf "\\n\\n===> GENERATE SSH KEYS \\n\\n" |
||||
|
|
||||
|
echo "$ mkdir -p ssh/ " |
||||
|
printf "\\n" |
||||
|
mkdir -p ssh/ |
||||
|
|
||||
|
echo "$ ssh-keygen -f ssh/id_rsa -t rsa -N ''" |
||||
|
printf "\\n" |
||||
|
ssh-keygen -f ssh/id_rsa -t rsa -N '' |
||||
|
} |
||||
|
|
||||
|
build_and_push_image () |
||||
|
{ |
||||
|
printf "\\n\\n===> BUILD IMAGE" |
||||
|
printf "\\n%s\\n" "$HEADER" |
||||
|
echo "$ docker build -t \"$IMAGE_NAME\" ." |
||||
|
printf "\\n" |
||||
|
#docker build -t "$REGISTRY_ADDR:$REGISTRY_PORT/$IMAGE_NAME" . |
||||
|
docker build --force-rm --pull -t "$IMAGE_NAME" . |
||||
|
|
||||
|
} |
||||
|
|
||||
|
up_master () |
||||
|
{ |
||||
|
printf "\\n\\n===> SPIN UP MASTER NODE" |
||||
|
printf "\\n%s\\n" "$HEADER" |
||||
|
echo "$ docker-compose up -d masterservice" |
||||
|
printf "\\n" |
||||
|
echo "$ $IMAGE_local -d $IMAGE_origin" |
||||
|
printf "\\n" |
||||
|
|
||||
|
docker-compose rm -f -s -v |
||||
|
docker-compose up --build --remove-orphans --force-recreate -d masterservice << ANSWERS |
||||
|
yes |
||||
|
yes |
||||
|
yes |
||||
|
ANSWERS |
||||
|
#docker-compose up --force-recreate -d masterservice |
||||
|
} |
||||
|
|
||||
|
|
||||
|
up_workers () |
||||
|
{ |
||||
|
printf "\\n\\n===> SPIN UP WORKER NODES" |
||||
|
printf "\\n%s\\n" "$HEADER" |
||||
|
echo "$ docker-compose up -d worker" |
||||
|
printf "\\n" |
||||
|
docker-compose rm -f -s -v |
||||
|
docker-compose up --build --force-recreate --renew-anon-volumes --remove-orphans -d workerservice |
||||
|
#docker-compose up --force-recreate -d workerservice |
||||
|
|
||||
|
printf "\\n" |
||||
|
printf "\\n%s\\n" "$HEADER" |
||||
|
|
||||
|
NUM_WORKER=$((SIZE - 1)) |
||||
|
echo "$ docker-compose scale workerservice=$NUM_WORKER" |
||||
|
printf "\\n" |
||||
|
docker-compose scale workerservice=${NUM_WORKER} |
||||
|
} |
||||
|
|
||||
|
down_master () |
||||
|
{ |
||||
|
printf "\\n\\n===> TORN DOWN MASTER NODE" |
||||
|
printf "\\n%s\\n" "$HEADER" |
||||
|
|
||||
|
echo "$ docker-compose stop masterservice && docker-compose rm -f masterservice" |
||||
|
printf "\\n" |
||||
|
docker-compose stop masterservice && docker-compose rm -f masterservice |
||||
|
} |
||||
|
|
||||
|
down_workers () |
||||
|
{ |
||||
|
printf "\\n\\n===> TORN DOWN WORKER NODES" |
||||
|
printf "\\n%s\\n" "$HEADER" |
||||
|
echo "$ docker-compose stop worker && docker-compose rm -f worker" |
||||
|
printf "\\n" |
||||
|
docker-compose stop workerservice && docker-compose rm -f workerservice |
||||
|
} |
||||
|
|
||||
|
list () |
||||
|
{ |
||||
|
printf "\\n\\n===> LIST CONTAINERS" |
||||
|
printf "\\n%s\\n" "$HEADER" |
||||
|
echo "$ docker-compose ps" |
||||
|
printf "\\n" |
||||
|
docker-compose ps |
||||
|
} |
||||
|
|
||||
|
|
||||
|
exec_on_mpi_master_container () |
||||
|
{ |
||||
|
# shellcheck disable=SC2046 |
||||
|
docker exec -it -u $DOCKERuser $(docker-compose ps | grep 'masterservice'| awk 'NR==1{print $1}') "$@" |
||||
|
} |
||||
|
|
||||
|
prompt_ready () |
||||
|
{ |
||||
|
printf "\\n\\n===> SWARMLAB READY \\n\\n" |
||||
|
} |
||||
|
|
||||
|
show_instruction () |
||||
|
{ |
||||
|
echo ' ## . ' |
||||
|
echo ' ## ## ## == ' |
||||
|
echo ' ## ## ## ## ## === ' |
||||
|
echo ' /"""""""""""""""""\___/ === ' |
||||
|
echo ' ~~~ {~~ ~~~~ ~~~ ~~~~ ~~~ ~ / ===- ~~~ ' |
||||
|
echo ' \______ o __/ ' |
||||
|
echo ' \ \ __/ ' |
||||
|
echo ' \____\_______/ ' |
||||
|
echo ' ' |
||||
|
echo ' Swarmlab.io Hybrid ' |
||||
|
echo '' |
||||
|
echo '==============================================================' |
||||
|
echo '' |
||||
|
|
||||
|
echo "To run SEC programs in an interative shell:" |
||||
|
echo " 1. Login to masterservice node:" |
||||
|
echo " Using Container->connect Menou:" |
||||
|
echo " copy/paste and Run command" |
||||
|
echo "" |
||||
|
echo " Or using SSH with keys through exposed port:" |
||||
|
echo " $ ssh -o \"StrictHostKeyChecking no\" -i ssh/id_rsa -p $SSH_PORT docker@localhost" |
||||
|
echo ' where [localhost] could be changed to the host IP of masterservice node' |
||||
|
echo "" |
||||
|
echo " 2. Execute programs inside masterservice node, for example:" |
||||
|
echo " $ sudo su" |
||||
|
echo " # apk update" |
||||
|
echo " *----------------------------------------------------*" |
||||
|
echo " | Default hostfile of connected nodes in the swarmlab |" |
||||
|
echo " | To obtain hostfile manually: $ ./bin/swarmlab-nmap: > hosts |" |
||||
|
echo " * ---------------------------------------------------*" |
||||
|
echo "" |
||||
|
echo "" |
||||
|
} |
||||
|
|
||||
|
|
||||
|
|
||||
|
############################################# |
||||
|
|
||||
|
while [ "$1" != "" ]; |
||||
|
do |
||||
|
PARAM=$(echo "$1" | awk -F= '{print $1}') |
||||
|
VALUE=$(echo "$1" | awk -F= '{print $2}') |
||||
|
|
||||
|
case $PARAM in |
||||
|
help) |
||||
|
usage |
||||
|
exit |
||||
|
;; |
||||
|
-i) |
||||
|
show_instruction |
||||
|
exit |
||||
|
;; |
||||
|
|
||||
|
login) |
||||
|
COMMAND_LOGIN=1 |
||||
|
;; |
||||
|
|
||||
|
exec) |
||||
|
COMMAND_EXEC=1 |
||||
|
shift # the rest is the shell command to run in the node |
||||
|
SHELL_COMMAND="$*" |
||||
|
break # end while loop |
||||
|
;; |
||||
|
|
||||
|
up) |
||||
|
COMMAND_UP=1 |
||||
|
;; |
||||
|
|
||||
|
create) |
||||
|
COMMAND_CREATE=1 |
||||
|
;; |
||||
|
|
||||
|
down) |
||||
|
COMMAND_DOWN=1 |
||||
|
;; |
||||
|
|
||||
|
reload) |
||||
|
COMMAND_RELOAD=1 |
||||
|
;; |
||||
|
|
||||
|
scale) |
||||
|
COMMAND_SCALE=1 |
||||
|
;; |
||||
|
|
||||
|
list) |
||||
|
COMMAND_LIST=1 |
||||
|
;; |
||||
|
|
||||
|
clean) |
||||
|
COMMAND_CLEAN=1 |
||||
|
;; |
||||
|
|
||||
|
size) |
||||
|
[ "$VALUE" ] && SIZE=$VALUE |
||||
|
;; |
||||
|
|
||||
|
*) |
||||
|
echo "ERROR: unknown parameter \"$PARAM\"" |
||||
|
usage |
||||
|
exit 1 |
||||
|
;; |
||||
|
esac |
||||
|
shift |
||||
|
done |
||||
|
|
||||
|
|
||||
|
if [ $COMMAND_UP -eq 1 ]; then |
||||
|
create_dockerfile |
||||
|
down_all |
||||
|
clear_all |
||||
|
#up_registry # remove for microservices |
||||
|
generate_ssh_keys |
||||
|
build_and_push_image # remove for microservices |
||||
|
up_master |
||||
|
#up_workers |
||||
|
|
||||
|
prompt_ready |
||||
|
show_instruction |
||||
|
|
||||
|
elif [ $COMMAND_CREATE -eq 1 ]; then |
||||
|
create_project |
||||
|
|
||||
|
elif [ $COMMAND_DOWN -eq 1 ]; then |
||||
|
down_all |
||||
|
clear_all |
||||
|
|
||||
|
elif [ $COMMAND_CLEAN -eq 1 ]; then |
||||
|
clear_all |
||||
|
|
||||
|
|
||||
|
elif [ $COMMAND_SCALE -eq 1 ]; then |
||||
|
create_dockerfile |
||||
|
down_master |
||||
|
down_workers |
||||
|
up_master |
||||
|
#up_workers |
||||
|
|
||||
|
prompt_ready |
||||
|
show_instruction |
||||
|
|
||||
|
elif [ $COMMAND_RELOAD -eq 1 ]; then |
||||
|
create_dockerfile |
||||
|
down_master |
||||
|
down_workers |
||||
|
build_and_push_image |
||||
|
up_master |
||||
|
#up_workers |
||||
|
|
||||
|
prompt_ready |
||||
|
show_instruction |
||||
|
|
||||
|
elif [ $COMMAND_LOGIN -eq 1 ]; then |
||||
|
exec_on_mpi_master_container /bin/bash |
||||
|
|
||||
|
elif [ $COMMAND_EXEC -eq 1 ]; then |
||||
|
create_dockerfile |
||||
|
exec_on_mpi_master_container ash -c "${SHELL_COMMAND}" |
||||
|
|
||||
|
elif [ $COMMAND_LIST -eq 1 ]; then |
||||
|
list |
||||
|
else |
||||
|
usage |
||||
|
fi |
||||
|
|
Loading…
Reference in new issue