[Orca-users] Giving something back
Sean O'Neill
sean at seanoneill.info
Thu Oct 17 14:17:48 PDT 2002
This is a template script written in Perl that can be used to collect
data. The meat of the script is how it handles log files. I tried to make
it roll, create, compress, etc the logs similar to how orcallator.se
does. I think I'm pretty close :) Read the text at the top of the script
for more details.
Enjoy :)
--
........................................................
......... ..- -. .. -..- .-. ..- .-.. . ... ............
.-- .. -. -... .-.. --- .-- ... -.. .-. --- --- .-.. ...
Sean O'Neill
-------------- next part --------------
#!/usr/local/bin/perl
# -*- mode: Perl -*-
# Author: Sean O'Neill
# Date: Oct, 2002
# Commerce One, Inc. (currently)
# sean.oneill at deletethistoemail.commerceone.com.deletethistoemail
# sean at deletethistoemail.seanoneill.info.deletethistoemail
# License: Its yours. Do with it as you will. You break anything that yours
# too - I'm absolved any responsibility should you use this. There isn't
# anything in here that should break anything though. Now that that's said ...
# pats on the shoulder are always appreciated :)
# I wrote this template after writing a script to collect SNMP statistics
# from Weblogic and realized it could be used for anything. So I tried to
# mark the primary areas needing updating by you for whatever you are going
# to use it for. This template was stripped for the actual utility I
# originally wrote. I hope someone finds this useful.
# This is script nothing more then a template for collecting data (however you
# want - SNMP, vmstat, whatever) and writing it to a log file in a format that
# Orca will correctly process for graphing. So this script can be used to
# create a data collection capability for just about anything assuming you are
# get the data.
# The meat of this script is primarily how it handles the log files. The
# collection, processing, and printing of the data is your problem.
# This script trys to simulate how the the orcallator.se script handles log
# files for Orca e.g. opening new log file at midnight (or there abouts) and
# compressing the previous days log and after a restart compressing the previous
# log file. I think is comes pretty close to how orcallator.se does it :)
# The become_daemon and open_pid_file subroutines came from the "Network
# Programming with Perl" book by Lincoln Stein. Very nice book - get it.
# The recursive_mkdir subroutine came from a posting I found in the net.
# Thanks whoever you are.
#
# This is my first attempt at using Perl packages and objects. I was having
# a TERRIBLE time with scoping when I first wrote this thing and using
# packages and objects made all that a bad memory - for the most part. So
# be nice if you have comments on how I did this ;> I'm not a programmer - by
# trade anyway.
#
use IO::File;
use Getopt::Std;
use POSIX 'setsid';
#use strict;
###### SUBROUTINES DEFINITIONS for OutputLog PACKAGE ########
package OutputLog;
#
# Create new OutputLog class object
#
sub new {
use constant COMPRESS => "/usr/local/bin/gzip";
use constant COMPRESSARG => "-2";
use constant COMPRESSEXT => ".gz";
my ($outputdir, $basename, $year, $month, $day) = @_;
my $r_outputlog = {
"outputdir" => $outputdir,
"basename" => $basename,
"year" => $year,
"month" => $month,
"day" => $day,
"fileincrement" => 0,
"compressutil" => COMPRESS,
"compressarg" => COMPRESSARG,
"compressext" => COMPRESSEXT,
"FD" => ""
};
bless $r_outputlog, 'OutputLog';
return $r_outputlog;
}
#
# This doesn't nothing more then return to fully qualified filename as
# currently defined within the OutputLog object.
#
sub getoutputlogfilename {
my $r_outputlog = shift;
return sprintf("%s/%s-%04d-%02d-%02d-%03d", $r_outputlog->{'outputdir'}, $r_outputlog->{'basename'}, $r_outputlog->{'year'}, $r_outputlog->{'month'}, $r_outputlog->{'day'}, $r_outputlog->{'fileincrement'});
}
#
# This figures out what the OutputLog object {'fileincrement'} value should
# basically be. Also, if current day files already exist that aren't
# compressed, it compresses them.
#
sub determineoutputlogfilename {
my $r_outputlog = shift;
my $flag = 1;
my $filename;
if( $r_outputlog->{'outputdir'} eq "STDOUT" ) {
return "STDOUT";
} else {
while( $flag ) {
$filename = getoutputlogfilename($r_outputlog);
#
# If current filename AND current filename COMPRESSED already exists,
# delete the compressed file, recompress, and increment
# {'fileincrement'}
#
if( -f $filename && -f $filename . $r_outputlog->{'compressext'} ) {
unlink($filename . $r_outputlog->{'compressext'});
compressoutputlog( $r_outputlog->{'compressutil'}, $r_outputlog->{'compressarg'}, $filename );
$r_outputlog->{'fileincrement'}++;
next;
}
#
# If current filename already exists, compress it, and increment
# {'fileincrement'}
#
if( -f $filename ) {
compressoutputlog( $r_outputlog->{'compressutil'}, $r_outputlog->{'compressarg'}, $filename );
$r_outputlog->{'fileincrement'}++;
next;
}
#
# If current filename COMPRESS already exists, simply increment
# {'fileincrement'}
#
if( -f $filename . $r_outputlog->{'compressext'} ) {
$r_outputlog->{'fileincrement'}++;
next;
}
$flag = 0;
}
}
}
#
# I think the title say nuff ...
#
sub compressoutputlog {
#
# @_ is passed in the correct order for system():
# Basically: command argument argument
# gzip -2 filename
#
system(@_) == 0 or die "system @_ failed";
}
#
# I think the title says nuff ...
# This subroutine passes back a reference to the file descriptor.
#
sub openoutputlog {
my $r_outputlog = shift;
my $filename = getoutputlogfilename($r_outputlog);
local *OUTFH;
if( $filename eq "STDOUT" ) {
open(OUTFH, ">&STDOUT") or die "Can't output STDOUT: $!\n";
} else {
open(OUTFH, ">$filename") or die "Can't create $filename: $!\n";
}
$r_outputlog->{'FD'} = *OUTFH;
return *OUTFH;
}
#
# I think the title says nuff ...
#
sub closeoutputlog {
my $r_outputlog = shift;
close $r_outputlog->{'FD'};
}
###### SUBROUTINES DEFINITIONS for MAIN PACKAGE ########
package main;
#
# Put script into background
#
sub become_daemon() {
die "Can't fork" unless defined (my $child = fork);
exit(0) if $child; # Parent dies
setsid(); # Become session leader
open(STDIN, "</dev/null");
if( $verbose ) {
open(STDERR, ">&STDOUT");
} else {
open(STDERR, ">/dev/null");
}
chdir '/local/var/log'; # Change working directory
$ENV{PATH} = '/bin:/usr/sbin:/usr/bin:/usr/local/bin';
return $$;
}
#
# Title says it all ...
#
sub print_usage() {
print <<EOF;
Usage:
myallator.pl [-options]
USAGE: myallator.pl [-options] community\\\@server
EXAMPLES: Ouptut 5 interactions of stats and sleep 5 seconds between each
The following output to the screen:
myallator.pl -D STDOUT -c 5 -S 5 public\\\@staging-app01
The following goes into background mode and outputs to
/local/home/perfboy/orca/var/orca/myallator/staging-app01/myallator-YYYY-MM-DD-###
myallator.pl -d staging-app01
OPTIONS:
-b <basename> Basename of outputfile. Defaults to "myallator".
-c <count> Number of times to run through main loop
-D <directory> Directory to write data into - Defaults to:
/local/home/perfboy/orca/var/orca/myallator
Value can also be STDOUT for output to screen. STDOUT can not
be specific in daemon mode.
-d Become daemon
-h Display this help text
-P <pid filename> filename to use for storing the child process pid value
Useful if you want to run multiple instances of this
script on a machine and avoid pid filename collisions.
-S <seconds> Sleep time in seconds - defaults to 60
-v Verbose - gives a little more output during processing
EOF
exit(0);
}
sub generate_outputdir($$) {
(my $outputdir, $hostname) = @_;
return sprintf("%s/%s", $outputdir, $hostname);
}
#
# Recursively create the directory passed e.g. mkdir -p <dir>
#
sub recursive_mkdir($) {
my $path = shift;
my @dirs = split "/" => $path;
foreach my $dir (@dirs) {
$tmp .= "$dir/";
unless ( -e $tmp and -d _ ) {
mkdir($tmp, 0755) || die "Cannot make $tmp: $!";
}
next;
}
}
sub open_pid_file($) {
my $file = shift;
if (-e $file) {
my $fh = IO::File->new($file) || return;
my $pid = <$fh>;
die "Server already running with PID $pid" if kill 0 => $pid;
warn "Removing PID file for defunct server process $pid.\n" if ( $verbose );
die "Can't unlink PID file $file" unless -w $ file && unlink $file;
}
warn "Opening pid file: $file\n" if ( $verbose );
return IO::File->new($file, O_CREAT|O_EXCL|O_WRONLY, 0644) or die "Can't create $file:
$!\n";
}
###### MAIN LOOP ######
getopts("c:dD:hP:S:v");
print_usage() if $opt_h;
# $hostname is used primarily in how to create the resultant data directory
#
my $hostname = split /\@/, $ARGV[0];
print_usage unless $hostname;
my $timenow = time();
(my $sec, $min, $hour, $day, $month, $year) = (localtime($timenow))[0,1,2,3,4,5];
my $count = 0;
my $basename = ($opt_b ? $opt_b : "myallator");
my $outputdir = ($opt_D ? $opt_D : "/local/home/perfboy/orca/var/orca/myallator");
$outputdir =~ s/\/$//g; # Strip trailing slash assuming there is one
my $logfileobj;
if( $outputdir eq "STDOUT" ) {
$logfileobj = OutputLog::new($outputdir, "", "", "", "");
} else {
$outputdir = generate_outputdir($outputdir, $hostname );
recursive_mkdir($outputdir) if( ! -d $outputdir );
$logfileobj = OutputLog::new($outputdir, $basename, $year + 1900, $month + 1, $day);
$logfileobj->determineoutputlogfilename();
}
my $maxcount = ($opt_c ? $opt_c : -1);
my $pid_file = ($opt_P ? $opt_P : '/var/tmp/myallator.pid');
my $sleeptime = ($opt_S ? $opt_S : 60);
local $verbose = ($opt_v ? $opt_v : 0);
if( $opt_d ) {
die "Demon mode specified - you cannot specify STDOUT in daemon mode." if( $outputdir eq "STDOUT");
print "Going daemon mode ... L8R !!!!\n";
my $pidfh = open_pid_file( $pid_file );
my $pid = become_daemon();
print $pidfh $pid;
close $pidfh;
} else {
warn "Output to " . $logfileobj->getoutputlogfilename() . "\n" if( $verbose );
}
my $outfh = $logfileobj->openoutputlog();
#
# Select $outfh so all the print statements don't need
# filehandle specified.
#
select $outfh;
$| = 1; # Disable output buffering on $outfh
# ***** You need to define you print headers here
##printf "timestamp locltime serverUptime serverMaxHeapSpace serverHeapUsedPct serverQueueThroughput jdbcMaxCapacity jdbcInitCapacity jdbcCurrentPoolSize jdbcCurrentInUse jdbcTotalPendingConnections jdbcHightwaterPendingConnections jdbcHighwaterWaitTime\n";
#
# ***** If you are commuting averages, rates, throughputs, whatever, you need
# ***** to collect your first batch of data here. If you are collectin what
# ***** I call guage data, you don't need this step.
#
while (1) {
my $timenow = time();
my ($sec, $min, $hour, $day, $month, $year) = (localtime($timenow))[0,1,2,3,4,5];
my $timestring = sprintf("%02d:%02d:%02d", $hour, $min, $sec);
sleep $sleeptime; # Sleep must be before data collection assuming
# computations are being done. If no computations,
# sleep can be moved to the bottom of this while loop.
# This allows you are get one row of data immediately
# but ONLY is no computations are being done on the data.
#
# ***** Collect Data Here
# ***** and performance any computing if necessary
#
# Example of data collection below
##my($serverUptime, $serverMaxHeapSpace, $serverHeapUsedPct, $serverQueueThroughput, $jdbcMaxCapacity, $jdbcInitCapacity, $jdbcCurrentPoolSize, $jdbcCurrentInUse, $jdbcTotalPendingConnections, $jdbcHightwaterPendingConnections, $jdbcHighwaterWaitTime) = snmpget($hostname, $community, $port, 'serverUptime','serverMaxHeapSpace', 'serverHeapUsedPct', 'serverQueueThroughput', 'jdbcMaxCapacity', 'jdbcInitCapacity', 'jdbcCurrentPoolSize', 'jdbcCurrentInUse', 'jdbcTotalPendingConnections', 'jdbcHighwaterPendingConnections', 'jdbcHighwaterWaitTime');
##$serverUptime =~ s/ /-/g;
#
# If the day changes (e.g. 12th becomes the 13th), its time to roll the
# current log file and compress it.
#
if( $logfileobj->{'outputdir'} ne "STDOUT" && $day != $logfileobj->{'day'} ) {
$logfileobj->closeoutputlog;
$logfileobj->determineoutputlogfilename;
$logfileobj->{'year'} = $year + 1900;
$logfileobj->{'month'} = $month + 1;
$logfileobj->{'day'} = $day;
$logfileobj->{'fileincrement'} = 0;
warn "Changing log to " . $logfileobj->getoutputlogfilename() . "\n" if( $verbose );
$outfh = $logfileobj->openoutputlog();
select $outfh;
$| = 1; # Disable output buffering on $outfh
# ***** You need to define you print headers here
##printf "timestamp locltime serverUptime serverMaxHeapSpace serverHeapUsedPct serverQueueThroughput jdbcMaxCapacity jdbcInitCapacity jdbcCurrentPoolSize jdbcCurrentInUse jdbcTotalPendingConnections jdbcHightwaterPendingConnections jdbcHighwaterWaitTime\n";
}
# ***** You need to print your data here
##printf "$timenow $timestring $serverUptime $serverMaxHeapSpace $serverHeapUsedPct $serverQueueThroughput $jdbcMaxCapacity $jdbcInitCapacity $jdbcCurrentPoolSize $jdbcCurrentInUse $jdbcTotalPendingConnections $jdbcHightwaterPendingConnections $jdbcHighwaterWaitTime\n"
;
$count++ if( $maxcount != -1 );
if( $count == $maxcount ) {
$logfileobj->closeoutputlog;
exit(0);
}
}
$logfileobj->closeoutputlog;
exit(0);
More information about the Orca-users
mailing list