I'm trying to deploy my mezzanine site and have been following this tutorial using CASE 1 (to new server)
1-3 all seems fine (obviously not) till I get to step 4
4- Run fab all
The process installs virtualenvs, and in it a directory with my project name and then I get this error:
[1xx.xx.xxx.xx] out: New python executable in /home/~/.virtualenvs/blog_iq/bin/python [1xx.xx.xxx.xx] out:
Installing setuptools, pip, wheel...done. [1xx.xx.xxx.xx]] out:
[1xx.xx.xxx.xx]] rsync_project: rsync --exclude ".pyc" --exclude
".pyo" --exclude "*.db" --exclude ".DS_Store" --exclude ".coverage"
--exclude "local_settings.py" --exclude "/static" --exclude "/.git" --exclude "/.hg" -pthrvz --rsh='ssh -p 22 ' C:\Users\~\Root\2blog\blog_iq\
user#1xx.xxx.xx:/home/user/mezzanine/blog_iq
[localhost] local: rsync --exclude ".pyc" --exclude ".pyo"
--exclude "*.db" --exclude ".DS_Store" --exclude ".coverage" --exclude "local_settings.py" --exclude "/static" --exclude "/.git" --exclude
"/.hg" -pthrvz --rsh='ssh -p 22 ' C:\Users\~\Root\2blog\blog_iq\
user#1xx.xxx.xx:/home/user/mezzanine/blog_iq
The source and destination cannot both be remote. rsync error: syntax
or usage error (code 1) at main.c(1292) [Receiver=3.1.2]
Fatal error: local() encountered an error (return code 1) while
executing 'rsync --exclude ".pyc" --exclude ".pyo" --exclude "*.db"
--exclude ".DS_Store" --exclude ".coverage" --exclude "local_settings.py" --exclude "/static" --exclude "/.git" --exclude
"/.hg" -pthrvz --rsh='ssh -p 22 ' C:\Users\~\Root\2blog\blog_iq\
user#1xx.xxx.xx:/home/user/mezzanine/blog_iq
Aborting. Disconnecting from 1xx.xxx.xx... done.
I'll gladly post the entire fabfile.py that was generated by mezzanine upon installation but it's a lengthy one and I never touched it except for looking for the code that's generating this printout and it seems to be here:
################
# Config setup #
################
if not hasattr(env, "proj_app"):
env.proj_app = real_project_name("blog_iq")
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", "fab-script.py"):
# Ensure we import settings from the current dir
try:
conf = import_module("%s.settings" % env.proj_app).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", env.proj_app)
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s/.virtualenvs" % env.user)
env.venv_path = join(env.venv_home, env.proj_name)
env.proj_path = "/home/%s/mezzanine/%s" % (env.user, env.proj_name)
env.manage = "%s/bin/python %s/manage.py" % (env.venv_path, env.proj_path)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_regex = "|".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.vcs_tools = ["git", "hg"]
env.deploy_tool = conf.get("DEPLOY_TOOL", "rsync")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.num_workers = conf.get("NUM_WORKERS",
"multiprocessing.cpu_count() * 2 + 1")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
if not env.secret_key:
print("Aborting, no SECRET_KEY setting defined.")
exit()
# Remote git repos need to be "bare" and reside separated from the project
if env.deploy_tool == "git":
env.repo_path = "/home/%s/git/%s.git" % (env.user, env.proj_name)
else:
env.repo_path = env.proj_path
and here
def rsync_upload():
"""
Uploads the project with rsync excluding some files and folders.
"""
excludes = ["*.pyc", "*.pyo", "*.db", ".DS_Store", ".coverage",
"local_settings.py", "/static", "/.git", "/.hg"]
local_dir = os.getcwd() + os.sep
return rsync_project(remote_dir=env.proj_path, local_dir=local_dir,
exclude=excludes)
Unfortunately, rsync interprets C: as a remote server, because of the column.
I suppose you had to install CygWin along with rsync: then use CygWin-like paths : /cygdrive/c/...
Related
I add depends packet to a exist .bb file, such as add DPENDS="AAA" line to .bb file, when I compile the .bb file, it failed for XXX rdepends on AAA-dev [dev-deps], and I search google, all the answer almost is add line INSANE_SKIP_${PN} += "dev-deps" or RDEPENDS_${PN}_remove = "AAA-dev" to .bb file.
But my question is why? why one packet depend AAA packet, it should also RDPENDS AAA-dev, is there any other answer to fix this problem
The bb source file is:
inherit autotools qcommon
DESCRIPTION = "Daemon to handle AT commands"
DEPENDS = "glib-2.0 qmi qmi-framework qmi-client-helper ocean-link"
SRC_DIR = "${WORKSPACE}/atfwd-daemon/"
S = "${WORKDIR}/atfwd-daemon/"
PR = "r3"
EXTRA_OECONF += "--with-glib --with-common-includes=${STAGING_INCDIR}"
do_configure_append() {
echo "/*This is compiled to generate, only look don't try*/" > ${S}atfwd_config.h
echo "#ifndef _ATFWD_CONFIG_H_" >> ${S}atfwd_config.h
echo "#define _ATFWD_CONFIG_H_" >> ${S}atfwd_config.h
#//<!-- ODM feature caogang#2015-07-13
if [ "${PRJ_NAU8810}" = "NAU8810_CODEC" ]; then
echo "#define NAU8810_CODEC" >> ${S}atfwd_config.h
fi
if [ "${FEATURE_ACDB_ENABLE}" = "true" ]; then
echo "#define FEATURE_ACDB_ENABLE 1" >> ${S}atfwd_config.h
fi
if [ "${PRJ_XXX}" != "" ]; then
echo "#define ${PRJ_XXX}" >> ${S}atfwd_config.h
fi
#//end-->
I add a DEPENDS on onenet pkg
inherit autotools qcommon
DESCRIPTION = "Daemon to handle AT commands"
DEPENDS = "glib-2.0 qmi qmi-framework qmi-client-helper ocean-link onenet"
SRC_DIR = "${WORKSPACE}/atfwd-daemon/"
S = "${WORKDIR}/atfwd-daemon/"
PR = "r3"
EXTRA_OECONF += "--with-glib --with-common-includes=${STAGING_INCDIR}"
do_configure_append() {
echo "/*This is compiled to generate, only look don't try*/" > ${S}atfwd_config.h
echo "#ifndef _ATFWD_CONFIG_H_" >> ${S}atfwd_config.h
echo "#define _ATFWD_CONFIG_H_" >> ${S}atfwd_config.h
#//<!-- ODM feature caogang#2015-07-13
if [ "${PRJ_NAU8810}" = "NAU8810_CODEC" ]; then
echo "#define NAU8810_CODEC" >> ${S}atfwd_config.h
fi
if [ "${FEATURE_ACDB_ENABLE}" = "true" ]; then
echo "#define FEATURE_ACDB_ENABLE 1" >> ${S}atfwd_config.h
fi
if [ "${PRJ_XXX}" != "" ]; then
echo "#define ${PRJ_XXX}" >> ${S}atfwd_config.h
fi
#//end-->
The onenet.bb is:
inherit pkgconfig cmake
DESCRIPTION = "onenet sdk"
LICENSE = "PD"
PR = "r0"
LIC_FILES_CHKSUM = "file://${WORKDIR}/git/LICENSE;md5=bae84cdd023be37582157d865da54cc6"
SRCREV = "065d98dd8de91544315d6167ce73626ce739666d"
SRC_URI = "git://github.com/cm-heclouds/MQTT.git;protocol=https"
S = "${WORKDIR}/git/mqtt_sdk"
do_install() {
install -d ${D}/usr/lib
install -d ${D}/usr/include/onenet
install -m 0644 ${B}/bin/libmqtt.so -D ${D}/usr/lib/
for inc in $(find ${S} -name *.h ! -name 'cJSON.h'); do
install -m 0644 ${inc} -D ${D}/usr/include/onenet
done
}s
The sanity check documentation explains this:
dev-deps: Checks that all packages except -dev or -staticdev packages
do not depend on -dev packages, which would be a packaging bug.
It's telling you that in your current recipe "XXX" runtime-depends on "AAA-dev" and that this is a normally an error. You need to find out how/why this dependency is added before you can decide what the correct solution is.
Based on the added recipes: The issue seems to be that onenet build produces an unversioned ".so" file. This is typically a mistake (the actual library file should be e.g. "libmqtt.so.1.1" and the unversioned file should just be a symlink to the versioned one). I'm very surprised that you are not getting a fatal error on this issue when you build onenet. Are you suppressing the QA error for this?
Since you've managed to build onenet somehow, you now probably have a onenet-dev package that erroneously contains the actual library: The build system notices this during atfwd-daemon build, adds a runtime dependency to onenet-dev (because that's where the library is) and then the QA error triggers because normal packages should not depend on -dev packages.
Possible fixes:
Either fix the onenet build system so it produces a versioned library, or
Force the .so file to be packaged into the actual onenet package instead of onenet-dev, like this:
FILES_${PN}-dev = "${includedir}/"
FILES_${PN} += "${libdir}/libmqtt.so"
A bonus suggestion: Using directory variables instead of paths like /usr/include and /usr/lib (like I did above) is a good "Best Practice".
Using a makefile with Rstudio, I will generate an html report from an Rmd file in my analysis/ subdirectory using the variable $(RENDER), and then I will move this html report to the reports/ subdirectory in the next line of the make rule.
# define VARIABLES for Makefile
RENDER = Rscript -e "require(rmarkdown); render('$<')"
DAT = ./data
ANL = ./analysis
REP = ./reports
VPATH = $(DAT) $(ANL) $(REP)
# generate a report in html
foo.html: foo.Rmd bar.rds
$(RENDER)
mv foo.html $(REP)
How can I combine these two steps into one variable command?
# Attempt:
RENDREP = $(RENDER); mv $($< : .Rmd=.html) $(REP)
# generate a report in html
foo.html: foo.Rmd bar.rds
$(RENDREP)
My attempt at the RENDREP variable yields this error message:
Output created: foo.html
usage: mv [-f | -i | -n] [-v] source target
mv [-f | -i | -n] [-v] source ... directory
make: *** [foo.html] Error 64
The syntax of $($< : .Rmd=.html) is wrong, it should be
RENDREP = $(RENDER); mv $(<:.Rmd=.html) $(REP)
That said, it'd be simpler to just do
RENDREP = $(RENDER); mv $# $(REP)
I'm trying to modify an existing zsh prompt to work with zsh 5.0 and 4.3 because those the versions the systems that I use. How would I make a zsh-script be aware of the current working directory instead of the directory that the file is in?
For context,
This is a function in the script that checks if we're currently in a git directory and adds to the prompt if we are:
# Git status.
# Collect indicators, git branch and pring string.
spaceship_git_status() {
[[ $SPACESHIP_GIT_SHOW == false ]] && return
# Check if the current directory is in a Git repository.
command git rev-parse --is-inside-work-tree &>/dev/null || return
# Check if the current directory is in .git before running git checks.
if [[ "$(git rev-parse --is-inside-git-dir 2> /dev/null)" == 'false' ]]; then
# Ensure the index is up to date.
git update-index --really-refresh -q &>/dev/null
# String of indicators
local indicators=''
indicators+="$(spaceship_git_uncomitted)"
indicators+="$(spaceship_git_unstaged)"
indicators+="$(spaceship_git_untracked)"
indicators+="$(spaceship_git_stashed)"
indicators+="$(spaceship_git_unpushed_unpulled)"
[ -n "${indicators}" ] && indicators=" [${indicators}]";
echo -n " %Bon%b "
echo -n "%{$fg_bold[magenta]%}"
echo -n "$(git_current_branch)"
echo -n "%{$reset_color%}"
echo -n "%{$fg_bold[red]%}"
echo -n "$indicators"
echo -n "%{$reset_color%}"
fi
}
However, based on my debugging, it appears that the function always believes that it is in the directory from which the script was sourced. In other words, as I change directory, the script continues to reference the directory where the script is located.
The spaceship_git_status function is called here:
# Build prompt line
spaceship_build_prompt() {
spaceship_host
spaceship_current_dir
spaceship_git_status
spaceship_nvm_status
spaceship_ruby_version
spaceship_venv_status
}
And this is the PROMPT environment variable is:
# Compose PROMPT
PROMPT=''
[[ $SPACESHIP_PROMPT_ADD_NEWLINE == true ]] && PROMPT="$PROMPT$NEWLINE"
PROMPT="$PROMPT $(spaceship_build_prompt) "
[[ $SPACESHIP_PROMPT_SEPARATE_LINE == true ]] && PROMPT="$PROMPT$NEWLINE"
PROMPT="$PROMPT $(spaceship_return_status) "
I think this is an issue with zsh versions < 5.2 because the prompt renders fine on my other computer with 5.2.
Full code: https://github.com/denysdovhan/spaceship-zsh-theme/blob/master/spaceship.zsh
I'm trying to make a recycle bin for UNIX, so I have two scripts. 1 to delete the file and move it to the bin, the other script to restore the file back to its original location.
my restore script only works if the person gives the path to the deleted file.
ex: sh restore ~/trashbin/filename
How do I hardcode into my script so that I don't need to give the path to the deleted file it should already know to look in the trashbin for the file. My restore script works only when someone calls in the path to the file.
#!/bin/bash
rlink=$(readlink -e "$1")
rname=$(basename "$rlink")
function restoreFile() {
rlink=$(readlink -e "$1")
rname=$(basename "$rlink")
rorgpath=$(grep "$rname" ~/.restore.info | cut -d":" -f2)
rdirect=$(dirname "$rorgpath")
#echo $orgpath
if [ ! -d "$rdirect" ]
then
mkdir -p $rdirect
#echo $var
mv $rlink $rorgpath
else
mv $rlink $rorgpath
fi
}
if [ -z "$1" ]
then
echo "Error no filename provided."
exit 1
elif [ ! -f "$1" ]
then
echo "Error file does not exist."
exit 1
elif [ -f "$rorgpath" ]
then
echo "File already exists in original path."
read -p "Would you like to overwrite it? (y/n)" ovr
if [[ $ovr = y || $ovr = Y || $ovr = yes ]]
then
echo "Restoring File and overwriting."
restoreFile $1
grep -v "$rname" ~/.restore.info > ~/.restorebackup.info
mv ~/.restorebackup.info ~/.restore.info
fi
else
echo "Restoring file into original path."
restoreFile $1
grep -v "$rname" ~/.restore.info > ~/.restorebackup.info
mv ~/.restorebackup.info ~/.restore.info
fi
When you "remove" the file from the file-system to your trash-bin, move it so that its path is remembered. Example: removing file /home/user/file.txt should mean moving this file to ~/.trash/home/user/file.txt. That way, you'll be able to restore files to the original location, and you'll have auto-complete work, since you can do: sh restore ~/.trash/<TAB><TAB>
I would like some help in creating a loop that will take one of my files extension .tar.gz
unzip it untar it and search the files inside (with extension .tlg) using grep -a >> output.text.
In the outout.text i will require the matching data as well as the name of the file and parent tar it came from
one this search has been performed i would like the untared files to be deleted and the preocess to continue on the next tar file until all tars have been checked.
I can't untar all at one as i dont have the disk space for this
Can anyone help
?
thanks
To avoid creating temporary files, you can use GNU tar's --to-stdout option.
The code below is careful about spaces and other characters in paths that may confuse the shell:
#! /usr/bin/perl
use warnings;
use strict;
sub usage { "Usage: $0 pattern tar-gz-file ..\n" }
sub output_from {
my($cmd,#args) = #_;
my $pid = open my $fh, "-|";
warn("$0: fork: $!"), return unless defined $pid;
if ($pid) {
my #lines = <$fh>;
close $fh or warn "$0: $cmd #args exited " . ($? >> 8);
wantarray ? #lines : join "" => #lines;
}
else {
exec $cmd, #args or die "$0: exec $cmd #args: $!\n";
}
}
die usage unless #ARGV >= 2;
my $pattern = shift;
foreach my $tgz (#ARGV) {
chomp(my #toc = output_from "tar", "-ztf", $tgz);
foreach my $tlg (grep /\.tlg\z/, #toc) {
my $line = 0;
for (output_from "tar", "--to-stdout", "-zxf", $tgz, $tlg) {
++$line;
print "$tlg:$line: $_" if /$pattern/o;
}
}
}
Sample runs:
$ ./grep-tlgs hello tlgs.tar.gz
tlgs/another.tlg:2: hello
tlgs/file1.tlg:2: hello
tlgs/file1.tlg:3: hello
tlgs/third.tlg:1: hello
$ ./grep-tlgs ^ tlgs.tar.gz
tlgs/another.tlg:1: blah blah
tlgs/another.tlg:2: hello
tlgs/another.tlg:3: howdy
tlgs/file1.tlg:1: whoah
tlgs/file1.tlg:2: hello
tlgs/file1.tlg:3: hello
tlgs/file1.tlg:4: good-bye
tlgs/third.tlg:1: hello
tlgs/third.tlg:2: howdy
$ ./grep-tlgs ^ xtlgs.tar.gz
tar: xtlgs.tar.gz: Cannot open: No such file or directory
tar: Error is not recoverable: exiting now
tar: Child returned status 2
tar: Exiting with failure status due to previous errors
./grep-tlgs: tar -ztf xtlgs.tar.gz exited 2 at ./grep-tlgs line 14.
You could loop over the tars, extract them, then grep them; something like this should work:
match="somestring"
mkdir out/
for i in *.tar.gz; do
mkdir out/${i} # create outdir
tar -C out/${i} -xf ${i} # extract to sub-dir with same name as tar;
# this will show up in grep output
cd out
grep -r ${match} ${i} >> ../output.text
cd ..
rm -rf out/${i} # delete untarred files
done
be careful, as the contents of the $i variable are passed to rm -rf and has the power to delete stuff for good.