常用Shell命令
BASEDIR=$(cd $(dirname $0) && pwd) cd $BASEDIR>/dev/null
选取两个文件中相同的部分:(Must sort first)
comm -12 <(sort /tmp/gcs.calix.com-wifi-pm-per-radio_50|uniq) <(sort /tmp/gcs.calix.com-wifi-pm-per-radio_dupcheck_50|uniq)
CURL:
curl -H "Accept: application/json" -H "Content-type: application/json" -X POST -d \'{"id":100}\' http://localhost/test/adduser
变量处理:
#根据变量值获取变量 pass=`eval echo \'$\'psql_"${env}_pass"` cmd=`eval echo \'$\'psql_"$env"`
数值处理:
#判断是否为数字 if [ -n "$org" ] && [ "$org" -eq "$org" ] 2>/dev/null; then echo "$org need process" cat result.txt|grep $org>_result_${org} fi #数字累加
step=`expr "$step" + 1`
输入参数:
usage="Usage: $0 -o/--org orgId[Must] -p/--prepare t[Option] -f/--force t[Option] -d/--dry t[Option] -k/--kafka t[Option]" while [ "$1" != "" ] do case $1 in -o|--org) shift orgId=$1 ;; -f|--force) shift if [ "$1" = "-p" ];then prepare=1 fi force=1 ;; -p|--prepare) shift if [ "$1" = "-f" ];then force=1 fi prepare=1 ;; -d|--dry) shift dry=1 ;; -k|--kafka) shift kafka_flg=$1 ;; *) echo $usage exit 1 ;; esac shift done if [ -z $orgId ];then echo -e "$RED[Error] Missing orgId!$NC\r\n$usage" exit fi
确认函数:
check_and_commit() { cmd="" step=`expr "$step" + 1` echo ""|tee -a $logs echo -e "$BLUE[`date +\'%Y-%m-%d %H:%M:%S\'`][Step $step] exec $1 $NC"|tee -a $log if [ $force -eq 0 ];then while true; do read -p "Do you confirm to execute step $1? [y/n]" yn case $yn in [Yy]* ) $2; if [ $dry -eq 1 ];then echo -e "$GREEN [Dry Run]$NC $cmd"|tee -a $log else echo $cmd|tee -a $log $cmd|tee $logsdir/$1.log fi break;; [Nn]* ) echo "ignore step $1"|tee -a $log ;break;; esac done else $2 if [ $dry -eq 1 ];then echo -e "$GREEN [Dry Run]$NC $cmd"|tee -a $log else echo $cmd|tee -a $log $cmd|tee -a $logsdir/$1.log fi fi }
prepare_message_confirm() { echo "Please make sure next items be done" echo -e "${RED} 1.env.sh use correct environment information ${NC}" echo -e "${RED} 2.all gcs vm had added the onecloud replay URL and restarted${NC}" echo -e "${RED} 3.make sure this vm can connect to brown field mongo/redshift/CMC gateway ${NC}" echo -e "${RED} 4.had startup cloud-subscriber with correct version and expose port 3424 ${NC}" echo -e "${RED} 5.brown field subscrbier-sync pod had patched ${NC}" if [ $force -eq 0 ];then while true; do read -p "Do you confirm ? [y/n]" yn case $yn in [Yy]* ) echo "will continue to execute for org :$orgId";break;; [Nn]* ) exit -1 ;break;; esac done fi }
时间函数:
#根据时区显示时间:
TZ=:Asia/Hong_Kong date +\'%Y-%m-%d %H:%M:%S\'
#根据long型时间获取字符值
date_str=$(date -d @$time_long +\'%Y-%m-%d\')
#计算时间差值:
start_date=$1
if [ -z $start_date ];then
start_date=`date --date="-7 day" +%Y-%m-%d`
fi
#end_date=$(date -d "$start_date 1 day" +%Y-%m-%d)
#end_date=`date --date="-1 day" +%Y-%m-%d`
end_date=`date +%Y-%m-%d`
start_time=$(date -d "$start_date" +%s)
end_time=$(date -d $end_date +%s)
字符处理:
#去除文件后缀
fnocsv=${f%%.csv}
cdevice=`echo $fnocsv|awk -F \'_\' \'{print $1}\'`
time_long=`echo $fnocsv|awk -F \'_\' \'{print $2}\'`
date_str=$(date -d @$time_long +\'%Y-%m-%d\')
#orgs截取掉最后的orgId
left="${orgs#*$orgId}"
统计网络连接状态:
#netstat -n | awk \'/^tcp/ {++state[$NF]} END {for(key in state) print key,"\t",state[key]}\'
ESTABLISHED 21
FIN_WAIT1 3
SYN_SENT 1
#ss -s
Total: 181 (kernel 0)
TCP: 19 (estab 13, closed 0, orphaned 1, synrecv 0, timewait 0/0), ports 0
Transport Total IP IPv6
* 0 - -
RAW 0 0 0
UDP 6 6 0
TCP 19 17 2
INET 25 23 2
FRAG 0 0 0
数组:
IFS=\',\' read -ra org_array <<< "$orgs" for orgId in "${org_array[@]}" do
While 循环
for s in `ls _result_*` do while [ `ps -ef|grep pm_missing_data_move.py|grep -v grep|wc -l` -gt 3 ] do sleep 1s done if [ -f $s ];then if [ `ps -ef|grep -v grep|grep $s|wc -l` -eq 0 ];then mv $s run_$s nohup python pm_missing_data_move.py -e tony.ben@calix.com --password FA200417# --filename run_$s && mv run_$s done_${s} & fi fi done
sed函数
#普通替换 sed -i "s/ENV/$env/g" $sql #读取行 sed -n \'1,3p\' xxx.txt //读取第一到3行 #删除 sed -i \'/xx/d\' xxx.txt
query="\'{\"orgId\" : \"$orgid\"}\'"
echo "$query"
mongoexport -h 199.71.143.62 -d sxa -c sxa-subscribers -f customId,_id -q \'{"orgId" : "145079"}\' --csv -o customId.csv
sed -i \'/customId/d\' customId.csv
sed -i "s/^\"/update cloud_subscribers set subscriber_location_id=\'/" customId.csv
sed -i "s/\",\"/\' where subscriber_id =\'/" customId.csv
sed -i "s/\"$/\' and org_id = \'145079\';/" customId.csv
source:
customId,_id
"005960","e14f6837-a66b-46a1-84c8-82b1c7e53fa9"
"006280","ce3f714b-c335-46ed-8481-5b5c15eaf5a3"
result:
update cloud_subscribers set subscriber_location_id=\'005960\' where subscriber_id =\'e14f6837-a66b-46a1-84c8-82b1c7e53fa9\' and org_id = \'145079\';
update cloud_subscribers set subscriber_location_id=\'006280\' where subscriber_id =\'ce3f714b-c335-46ed-8481-5b5c15eaf5a3\' and org_id = \'145079\';
#去除颜色字符
sed -i \'s/\x1b\[[0-9;]*m//g\' $log_file
postgres command:
#!/bin/bash export PGPASSWORD=\'xxx\' pg_dump -h localhost -d cloud -s --exclude-table=cloud_subscriber_devices_0227,calixcalldisposition_backup,cloud_subscriber_devices_0227,cloud_subscribers_0227,david_billing,dv2,sxacc_devices_backup,sxaimsubscriber_next_endpoint_id_bak,sxaimsubscriber_next_endpoint_id_old,tblsizestats,csc_site_scan_results_* -U calixcloud -f schma.sql #pg_dump -h localhost -d cloud -s -F c \ #--exclude-table cloud_subscriber_devices_0227 calixcalldisposition_backup cloud_subscriber_devices_0227 cloud_subscribers_0227 david_billing dv2 sxacc_devices_backup sxaimsubscriber_next_endpoint_id_bak sxaimsubscriber_next_endpoint_id_old tblsizestats csc_site_scan_results_* \ #-U calixcloud -f schma.sql
打印颜色
RED=\'\033[0;31m\' BLUE=\'\033[1;32m\' GREEN=\'\033[1;34m\' NC=\'\033[0m\' # No Color echo -e "$RED[Error] Missing orgId!$NC\r\n$usage"
mongo
compare_mongo_postgres() { source_sxa_mongo=`mongo $source_sxa_mongo --eval "db.isMaster()[\'primary\']"|grep 27017|awk -F \':\' \'{print $1}\'` export PGPASSWORD=$onecloud_postgres_password flg=0 for c in sxacc-devices sxacc-provisioning-records do mnum=`mongo $source_sxa_mongo/sxa --eval "db.getCollection(\'$c\').find({\'orgId\':\'$orgId\'}).count()"|awk \'END {print}\'` rm -rf check_collection_num.sql cp check_collection_num.sql.tmp check_collection_num.sql sed -i "s/TABLE/$c/g" check_collection_num.sql sed -i "s/ORGID/$orgId/g" check_collection_num.sql pnum=`psql -h $onecloud_postgres_host -d $onecloud_postgres_db -U $onecloud_postgres_username -f check_collection_num.sql|sed -n \'3p\'` echo "$c mongo: $mnum postgres: $pnum" if [ $mnum -ne $pnum ];then flg=1 echo -e "$RED[Error] c: $c org: $orgId not match, Mongo: $mnum Post: $pnum $NC\r\n" fi done if [ $flg -eq 1 ];then exit 1 fi } #选取master节点 target_sxa_mongo=`mongo $target_sxa_mongo --eval "db.isMaster()[\'primary\']"|grep 27017|awk -F \':\' \'{print $1}\'` #export导出 mongoexport --host $source_sxa_mongo --port $source_sxa_mongo_port -d $source_sxa_mongo_db #import导入 mongoimport --host $target_sxa_mongo --port $target_sxa_mongo_port -d $target_sxa_mongo_db
#获取collection大小
var collectionNames = db.getCollectionNames(), stats = [];
collectionNames.forEach(function (n) { stats.push(db[n].stats()); });
stats = stats.sort(function(a, b) { return b[\'size\'] - a[\'size\']; });
for (var c in stats) { print(stats[c][\'ns\'] + ": " + stats[c][\'size\']/1024/1024/1024 + " GB (" + stats[c][\'storageSize\']/1024/1024/1024 + " GB)"); }
#method2
var mgo = new Mongo()
function getReadableFileSizeString(fileSizeInBytes) {
var i = -1;
var byteUnits = [\' kB\', \' MB\', \' GB\', \' TB\', \'PB\', \'EB\', \'ZB\', \'YB\'];
do {
fileSizeInBytes = fileSizeInBytes / 1024;
i++;
} while (fileSizeInBytes > 1024);
return Math.max(fileSizeInBytes, 0.1).toFixed(1) + byteUnits[i];
};
function getStatsFor(db){
var collectionNames = db.getCollectionNames(), stats = [];
collectionNames.forEach(function (n) { stats.push(db.getCollection(n).stats()); });
stats = stats.sort(function(a, b) { return b[\'size\'] - a[\'size\']; });
for (var c in stats) { print(stats[c][\'ns\'] + ": " + getReadableFileSizeString(stats[c][\'size\']) + " (" + getReadableFileSizeString(stats[c][\'storageSize\']) + ")"); }
}
function getAllStats(){
mgo.getDBNames().forEach(function(name){ var db = mgo.getDB(name); print(\'\n \'+db+\'\n\'); getStatsFor(db) })
}
getAllStats()
#get database size
cmdctr:PRIMARY> show databases;
cloud 0.031GB
local 23.020GB
meteor 0.031GB
sxa 509.251GB
sxa_archive 64.469GB
test 0.031GB
定制登录欢迎消息
cd /etc/update-motd.d/ cat <<EOF >100-custom #!/bin/sh echo "" echo "\033[1;32m----------------------------------------------------\033[0m" echo "\033[0;31mTony.Ben\'s ACS Develop Server.\nAny questions please connect with tony.ben@calix.com\033[0m" echo "\033[1;32m----------------------------------------------------\033[0m" echo "\033[1;32m+++++++++++++++++++++++++++++++++++++++++++++++++++++\033[0m" echo "\033[0;31mLast Login Info\033[0m" last echo "\033[1;32m+++++++++++++++++++++++++++++++++++++++++++++++++++++\033[0m" echo "" echo "\033[1;32m-----------------------------------------------------\033[0m" ifconfig -a | awk \'BEGIN {FS="\n"; RS=""} {print $1,$2}\' | grep -v \'lo\' | awk \'{print "\t\t"$1,$7}\' echo "\033[1;32m------------------------------------------------------\033[0m" EOF
chmod 751 100-custom
EOF
EOF是END Of File的缩写,表示自定义终止符.既然自定义,那么EOF就不是固定的,可以随意设置别名,在linux按ctrl-d就代表EOF. EOF一般会配合cat能够多行文本输出. 通过cat配合重定向能够生成文件并追加操作,在它之前先熟悉几个特殊符号: < :输入重定向 > :输出重定向 >> :输出重定向,进行追加,不会覆盖之前内容 << :标准输入来自命令行的一对分隔号的中间内容. 其用法如下: <<EOF //开始 .... EOF //结束 还可以自定义,比如自定义: <<BBB //开始 .... BBB //结束 1)向文件test.sh里输入内容。 [root@slave-server opt]# cat << EOF >test.sh > 123123123 > 3452354345 > asdfasdfs > EOF [root@slave-server opt]# cat test.sh 123123123 3452354345 asdfasdfs 追加内容 [root@slave-server opt]# cat << EOF >>test.sh > 7777 > 8888 > EOF [root@slave-server opt]# cat test.sh 123123123 3452354345 asdfasdfs 7777 8888 覆盖 [root@slave-server opt]# cat << EOF >test.sh > 55555 > EOF [root@slave-server opt]# cat test.sh 55555 ———————————————— 版权声明:本文为CSDN博主「jaryle」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。 原文链接:https://blog.csdn.net/jaryle/java/article/details/77880500
Rancher/K8S 相关
全量清除Rancher/K8S 节点
docker stop $(docker ps -aq) docker system prune -f docker volume rm $(docker volume ls -q) docker image rm $(docker image ls -q) rm -rf /etc/ceph \ /etc/cni \ /etc/kubernetes \ /opt/cni \ /opt/rke \ /run/secrets/kubernetes.io \ /run/calico \ /run/flannel \ /var/lib/calico \ /var/lib/etcd \ /var/lib/cni \ /var/lib/kubelet \ /var/lib/rancher/rke/log \ /var/log/containers \ /var/log/pods \ /var/run/calico
获取Rancher dockerid
docker ps | grep -E "rancher/rancher:|rancher/rancher " | awk \'{ print $1 }\'
启动Rancher
sudo docker run -d --name rancher --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher:latest
获取dockerId
docker ps -aq
手工Save/Load images
Save image到文件
sudo docker save -o ubuntu.tar ubuntu:precise ubuntu:unicorn
docker load --input api-clear-cache-server.tar.gz
Load Image
docker load --input api-clear-cache-server.tar.gz
1.awk command
1.1 Purpose 1: want to distinct and then count and sort by num
1.1.1 Command: cat result.txt|awk \'{print $1}\'|uniq -c|sort -k 1n
Sort parameters:-k: sort by key (in this case column, pairs with -t)
-n: sort as a number
-r: reverse order
(optional) -t: in case you want to change the key separator (default: space)
Uniq parameter:
-w: choose the first N characters
Explanation:
In your problem, we need to first sort the first column and then the second one. So there is a -k 1,1 followed by -k 2,2. But, the second key (ONLY) must be sorted as a number and in the reverse order. Thus, it should be -k 2nr,2.
Note that if the -n or -r sort parameters are outside -k parameter, they are applied to the whole input instead of specific keys.
Lastly, me must find the unique lines, but matching only the first 4 chars. Thus, uniq -w 4
2. Ubuntu 安装ping curl
apt-get update apt-get install iputils-ping -y apt-get install curl -y